1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
49 static rtx
neg_const_int (machine_mode
, const_rtx
);
50 static bool plus_minus_operand_p (const_rtx
);
51 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
52 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
54 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
55 machine_mode
, rtx
, rtx
);
56 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
57 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
60 /* Negate a CONST_INT rtx. */
62 neg_const_int (machine_mode mode
, const_rtx i
)
64 unsigned HOST_WIDE_INT val
= -UINTVAL (i
);
66 if (!HWI_COMPUTABLE_MODE_P (mode
)
67 && val
== UINTVAL (i
))
68 return simplify_const_unary_operation (NEG
, mode
, CONST_CAST_RTX (i
),
70 return gen_int_mode (val
, mode
);
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
77 mode_signbit_p (machine_mode mode
, const_rtx x
)
79 unsigned HOST_WIDE_INT val
;
81 scalar_int_mode int_mode
;
83 if (!is_int_mode (mode
, &int_mode
))
86 width
= GET_MODE_PRECISION (int_mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
93 #if TARGET_SUPPORTS_WIDE_INT
94 else if (CONST_WIDE_INT_P (x
))
97 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
98 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
100 for (i
= 0; i
< elts
- 1; i
++)
101 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
103 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
104 width
%= HOST_BITS_PER_WIDE_INT
;
106 width
= HOST_BITS_PER_WIDE_INT
;
109 else if (width
<= HOST_BITS_PER_DOUBLE_INT
110 && CONST_DOUBLE_AS_INT_P (x
)
111 && CONST_DOUBLE_LOW (x
) == 0)
113 val
= CONST_DOUBLE_HIGH (x
);
114 width
-= HOST_BITS_PER_WIDE_INT
;
118 /* X is not an integer constant. */
121 if (width
< HOST_BITS_PER_WIDE_INT
)
122 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
123 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
126 /* Test whether VAL is equal to the most significant bit of mode MODE
127 (after masking with the mode mask of MODE). Returns false if the
128 precision of MODE is too large to handle. */
131 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
134 scalar_int_mode int_mode
;
136 if (!is_int_mode (mode
, &int_mode
))
139 width
= GET_MODE_PRECISION (int_mode
);
140 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
143 val
&= GET_MODE_MASK (int_mode
);
144 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
147 /* Test whether the most significant bit of mode MODE is set in VAL.
148 Returns false if the precision of MODE is too large to handle. */
150 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
154 scalar_int_mode int_mode
;
155 if (!is_int_mode (mode
, &int_mode
))
158 width
= GET_MODE_PRECISION (int_mode
);
159 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
162 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
166 /* Test whether the most significant bit of mode MODE is clear in VAL.
167 Returns false if the precision of MODE is too large to handle. */
169 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
173 scalar_int_mode int_mode
;
174 if (!is_int_mode (mode
, &int_mode
))
177 width
= GET_MODE_PRECISION (int_mode
);
178 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
181 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
185 /* Make a binary operation by properly ordering the operands and
186 seeing if the expression folds. */
189 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
194 /* If this simplifies, do it. */
195 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
199 /* Put complex operands first and constants second if commutative. */
200 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
201 && swap_commutative_operands_p (op0
, op1
))
202 std::swap (op0
, op1
);
204 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
207 /* If X is a MEM referencing the constant pool, return the real value.
208 Otherwise return X. */
210 avoid_constant_pool_reference (rtx x
)
214 poly_int64 offset
= 0;
216 switch (GET_CODE (x
))
222 /* Handle float extensions of constant pool references. */
224 c
= avoid_constant_pool_reference (tmp
);
225 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
226 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
234 if (GET_MODE (x
) == BLKmode
)
239 /* Call target hook to avoid the effects of -fpic etc.... */
240 addr
= targetm
.delegitimize_address (addr
);
242 /* Split the address into a base and integer offset. */
243 addr
= strip_offset (addr
, &offset
);
245 if (GET_CODE (addr
) == LO_SUM
)
246 addr
= XEXP (addr
, 1);
248 /* If this is a constant pool reference, we can turn it into its
249 constant and hope that simplifications happen. */
250 if (GET_CODE (addr
) == SYMBOL_REF
251 && CONSTANT_POOL_ADDRESS_P (addr
))
253 c
= get_pool_constant (addr
);
254 cmode
= get_pool_mode (addr
);
256 /* If we're accessing the constant in a different mode than it was
257 originally stored, attempt to fix that up via subreg simplifications.
258 If that fails we have no choice but to return the original memory. */
259 if (known_eq (offset
, 0) && cmode
== GET_MODE (x
))
261 else if (known_in_range_p (offset
, 0, GET_MODE_SIZE (cmode
)))
263 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
264 if (tem
&& CONSTANT_P (tem
))
272 /* Simplify a MEM based on its attributes. This is the default
273 delegitimize_address target hook, and it's recommended that every
274 overrider call it. */
277 delegitimize_mem_from_attrs (rtx x
)
279 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
280 use their base addresses as equivalent. */
283 && MEM_OFFSET_KNOWN_P (x
))
285 tree decl
= MEM_EXPR (x
);
286 machine_mode mode
= GET_MODE (x
);
287 poly_int64 offset
= 0;
289 switch (TREE_CODE (decl
))
299 case ARRAY_RANGE_REF
:
304 case VIEW_CONVERT_EXPR
:
306 poly_int64 bitsize
, bitpos
, bytepos
, toffset_val
= 0;
308 int unsignedp
, reversep
, volatilep
= 0;
311 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
312 &unsignedp
, &reversep
, &volatilep
);
313 if (maybe_ne (bitsize
, GET_MODE_BITSIZE (mode
))
314 || !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
315 || (toffset
&& !poly_int_tree_p (toffset
, &toffset_val
)))
318 offset
+= bytepos
+ toffset_val
;
324 && mode
== GET_MODE (x
)
326 && (TREE_STATIC (decl
)
327 || DECL_THREAD_LOCAL_P (decl
))
328 && DECL_RTL_SET_P (decl
)
329 && MEM_P (DECL_RTL (decl
)))
333 offset
+= MEM_OFFSET (x
);
335 newx
= DECL_RTL (decl
);
339 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
340 poly_int64 n_offset
, o_offset
;
342 /* Avoid creating a new MEM needlessly if we already had
343 the same address. We do if there's no OFFSET and the
344 old address X is identical to NEWX, or if X is of the
345 form (plus NEWX OFFSET), or the NEWX is of the form
346 (plus Y (const_int Z)) and X is that with the offset
347 added: (plus Y (const_int Z+OFFSET)). */
348 n
= strip_offset (n
, &n_offset
);
349 o
= strip_offset (o
, &o_offset
);
350 if (!(known_eq (o_offset
, n_offset
+ offset
)
351 && rtx_equal_p (o
, n
)))
352 x
= adjust_address_nv (newx
, mode
, offset
);
354 else if (GET_MODE (x
) == GET_MODE (newx
)
355 && known_eq (offset
, 0))
363 /* Make a unary operation by first seeing if it folds and otherwise making
364 the specified operation. */
367 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
368 machine_mode op_mode
)
372 /* If this simplifies, use it. */
373 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
376 return gen_rtx_fmt_e (code
, mode
, op
);
379 /* Likewise for ternary operations. */
382 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
383 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
387 /* If this simplifies, use it. */
388 if ((tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
389 op0
, op1
, op2
)) != 0)
392 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
395 /* Likewise, for relational operations.
396 CMP_MODE specifies mode comparison is done in. */
399 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
400 machine_mode cmp_mode
, rtx op0
, rtx op1
)
404 if ((tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
408 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
411 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
412 and simplify the result. If FN is non-NULL, call this callback on each
413 X, if it returns non-NULL, replace X with its return value and simplify the
417 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
418 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
420 enum rtx_code code
= GET_CODE (x
);
421 machine_mode mode
= GET_MODE (x
);
422 machine_mode op_mode
;
424 rtx op0
, op1
, op2
, newx
, op
;
428 if (__builtin_expect (fn
!= NULL
, 0))
430 newx
= fn (x
, old_rtx
, data
);
434 else if (rtx_equal_p (x
, old_rtx
))
435 return copy_rtx ((rtx
) data
);
437 switch (GET_RTX_CLASS (code
))
441 op_mode
= GET_MODE (op0
);
442 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
443 if (op0
== XEXP (x
, 0))
445 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
449 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
450 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
451 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
453 return simplify_gen_binary (code
, mode
, op0
, op1
);
456 case RTX_COMM_COMPARE
:
459 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
460 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
461 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
462 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
464 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
467 case RTX_BITFIELD_OPS
:
469 op_mode
= GET_MODE (op0
);
470 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
471 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
472 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
473 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
475 if (op_mode
== VOIDmode
)
476 op_mode
= GET_MODE (op0
);
477 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
482 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
483 if (op0
== SUBREG_REG (x
))
485 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
486 GET_MODE (SUBREG_REG (x
)),
488 return op0
? op0
: x
;
495 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
496 if (op0
== XEXP (x
, 0))
498 return replace_equiv_address_nv (x
, op0
);
500 else if (code
== LO_SUM
)
502 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
503 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
505 /* (lo_sum (high x) y) -> y where x and y have the same base. */
506 if (GET_CODE (op0
) == HIGH
)
508 rtx base0
, base1
, offset0
, offset1
;
509 split_const (XEXP (op0
, 0), &base0
, &offset0
);
510 split_const (op1
, &base1
, &offset1
);
511 if (rtx_equal_p (base0
, base1
))
515 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
517 return gen_rtx_LO_SUM (mode
, op0
, op1
);
526 fmt
= GET_RTX_FORMAT (code
);
527 for (i
= 0; fmt
[i
]; i
++)
532 newvec
= XVEC (newx
, i
);
533 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
535 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
537 if (op
!= RTVEC_ELT (vec
, j
))
541 newvec
= shallow_copy_rtvec (vec
);
543 newx
= shallow_copy_rtx (x
);
544 XVEC (newx
, i
) = newvec
;
546 RTVEC_ELT (newvec
, j
) = op
;
554 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
555 if (op
!= XEXP (x
, i
))
558 newx
= shallow_copy_rtx (x
);
567 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
568 resulting RTX. Return a new RTX which is as simplified as possible. */
571 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
573 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
576 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
577 Only handle cases where the truncated value is inherently an rvalue.
579 RTL provides two ways of truncating a value:
581 1. a lowpart subreg. This form is only a truncation when both
582 the outer and inner modes (here MODE and OP_MODE respectively)
583 are scalar integers, and only then when the subreg is used as
586 It is only valid to form such truncating subregs if the
587 truncation requires no action by the target. The onus for
588 proving this is on the creator of the subreg -- e.g. the
589 caller to simplify_subreg or simplify_gen_subreg -- and typically
590 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
592 2. a TRUNCATE. This form handles both scalar and compound integers.
594 The first form is preferred where valid. However, the TRUNCATE
595 handling in simplify_unary_operation turns the second form into the
596 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
597 so it is generally safe to form rvalue truncations using:
599 simplify_gen_unary (TRUNCATE, ...)
601 and leave simplify_unary_operation to work out which representation
604 Because of the proof requirements on (1), simplify_truncation must
605 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
606 regardless of whether the outer truncation came from a SUBREG or a
607 TRUNCATE. For example, if the caller has proven that an SImode
612 is a no-op and can be represented as a subreg, it does not follow
613 that SImode truncations of X and Y are also no-ops. On a target
614 like 64-bit MIPS that requires SImode values to be stored in
615 sign-extended form, an SImode truncation of:
617 (and:DI (reg:DI X) (const_int 63))
619 is trivially a no-op because only the lower 6 bits can be set.
620 However, X is still an arbitrary 64-bit number and so we cannot
621 assume that truncating it too is a no-op. */
624 simplify_truncation (machine_mode mode
, rtx op
,
625 machine_mode op_mode
)
627 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
628 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
629 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
631 gcc_assert (precision
<= op_precision
);
633 /* Optimize truncations of zero and sign extended values. */
634 if (GET_CODE (op
) == ZERO_EXTEND
635 || GET_CODE (op
) == SIGN_EXTEND
)
637 /* There are three possibilities. If MODE is the same as the
638 origmode, we can omit both the extension and the subreg.
639 If MODE is not larger than the origmode, we can apply the
640 truncation without the extension. Finally, if the outermode
641 is larger than the origmode, we can just extend to the appropriate
643 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
644 if (mode
== origmode
)
646 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
647 return simplify_gen_unary (TRUNCATE
, mode
,
648 XEXP (op
, 0), origmode
);
650 return simplify_gen_unary (GET_CODE (op
), mode
,
651 XEXP (op
, 0), origmode
);
654 /* If the machine can perform operations in the truncated mode, distribute
655 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
656 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
658 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
659 && (GET_CODE (op
) == PLUS
660 || GET_CODE (op
) == MINUS
661 || GET_CODE (op
) == MULT
))
663 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
666 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
668 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
672 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
673 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
674 the outer subreg is effectively a truncation to the original mode. */
675 if ((GET_CODE (op
) == LSHIFTRT
676 || GET_CODE (op
) == ASHIFTRT
)
677 /* Ensure that OP_MODE is at least twice as wide as MODE
678 to avoid the possibility that an outer LSHIFTRT shifts by more
679 than the sign extension's sign_bit_copies and introduces zeros
680 into the high bits of the result. */
681 && 2 * precision
<= op_precision
682 && CONST_INT_P (XEXP (op
, 1))
683 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
684 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
685 && UINTVAL (XEXP (op
, 1)) < precision
)
686 return simplify_gen_binary (ASHIFTRT
, mode
,
687 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
689 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
690 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
691 the outer subreg is effectively a truncation to the original mode. */
692 if ((GET_CODE (op
) == LSHIFTRT
693 || GET_CODE (op
) == ASHIFTRT
)
694 && CONST_INT_P (XEXP (op
, 1))
695 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
696 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
697 && UINTVAL (XEXP (op
, 1)) < precision
)
698 return simplify_gen_binary (LSHIFTRT
, mode
,
699 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
701 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
702 to (ashift:QI (x:QI) C), where C is a suitable small constant and
703 the outer subreg is effectively a truncation to the original mode. */
704 if (GET_CODE (op
) == ASHIFT
705 && CONST_INT_P (XEXP (op
, 1))
706 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
707 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
708 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
709 && UINTVAL (XEXP (op
, 1)) < precision
)
710 return simplify_gen_binary (ASHIFT
, mode
,
711 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
713 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
714 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
716 if (GET_CODE (op
) == AND
717 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
718 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
719 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
720 && CONST_INT_P (XEXP (op
, 1)))
722 rtx op0
= (XEXP (XEXP (op
, 0), 0));
723 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
724 rtx mask_op
= XEXP (op
, 1);
725 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
726 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
728 if (shift
< precision
729 /* If doing this transform works for an X with all bits set,
730 it works for any X. */
731 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
732 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
733 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
734 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
736 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
737 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
741 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
742 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
744 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
745 && REG_P (XEXP (op
, 0))
746 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
747 && CONST_INT_P (XEXP (op
, 1))
748 && CONST_INT_P (XEXP (op
, 2)))
750 rtx op0
= XEXP (op
, 0);
751 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
752 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
753 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
755 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
758 pos
-= op_precision
- precision
;
759 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
760 XEXP (op
, 1), GEN_INT (pos
));
763 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
765 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
767 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
768 XEXP (op
, 1), XEXP (op
, 2));
772 /* Recognize a word extraction from a multi-word subreg. */
773 if ((GET_CODE (op
) == LSHIFTRT
774 || GET_CODE (op
) == ASHIFTRT
)
775 && SCALAR_INT_MODE_P (mode
)
776 && SCALAR_INT_MODE_P (op_mode
)
777 && precision
>= BITS_PER_WORD
778 && 2 * precision
<= op_precision
779 && CONST_INT_P (XEXP (op
, 1))
780 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
781 && UINTVAL (XEXP (op
, 1)) < op_precision
)
783 poly_int64 byte
= subreg_lowpart_offset (mode
, op_mode
);
784 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
785 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
787 ? byte
- shifted_bytes
788 : byte
+ shifted_bytes
));
791 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
792 and try replacing the TRUNCATE and shift with it. Don't do this
793 if the MEM has a mode-dependent address. */
794 if ((GET_CODE (op
) == LSHIFTRT
795 || GET_CODE (op
) == ASHIFTRT
)
796 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
797 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
798 && MEM_P (XEXP (op
, 0))
799 && CONST_INT_P (XEXP (op
, 1))
800 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
801 && INTVAL (XEXP (op
, 1)) > 0
802 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
803 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
804 MEM_ADDR_SPACE (XEXP (op
, 0)))
805 && ! MEM_VOLATILE_P (XEXP (op
, 0))
806 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
807 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
809 poly_int64 byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
810 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
811 return adjust_address_nv (XEXP (op
, 0), int_mode
,
813 ? byte
- shifted_bytes
814 : byte
+ shifted_bytes
));
817 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
818 (OP:SI foo:SI) if OP is NEG or ABS. */
819 if ((GET_CODE (op
) == ABS
820 || GET_CODE (op
) == NEG
)
821 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
822 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
823 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
824 return simplify_gen_unary (GET_CODE (op
), mode
,
825 XEXP (XEXP (op
, 0), 0), mode
);
827 /* (truncate:A (subreg:B (truncate:C X) 0)) is
829 if (GET_CODE (op
) == SUBREG
830 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
831 && SCALAR_INT_MODE_P (op_mode
)
832 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
833 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
834 && subreg_lowpart_p (op
))
836 rtx inner
= XEXP (SUBREG_REG (op
), 0);
837 if (GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (subreg_mode
))
838 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
841 /* If subreg above is paradoxical and C is narrower
842 than A, return (subreg:A (truncate:C X) 0). */
843 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
), subreg_mode
, 0);
846 /* (truncate:A (truncate:B X)) is (truncate:A X). */
847 if (GET_CODE (op
) == TRUNCATE
)
848 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
849 GET_MODE (XEXP (op
, 0)));
851 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
853 if (GET_CODE (op
) == IOR
854 && SCALAR_INT_MODE_P (mode
)
855 && SCALAR_INT_MODE_P (op_mode
)
856 && CONST_INT_P (XEXP (op
, 1))
857 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
863 /* Try to simplify a unary operation CODE whose output mode is to be
864 MODE with input operand OP whose mode was originally OP_MODE.
865 Return zero if no simplification can be made. */
867 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
868 rtx op
, machine_mode op_mode
)
872 trueop
= avoid_constant_pool_reference (op
);
874 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
878 return simplify_unary_operation_1 (code
, mode
, op
);
881 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
885 exact_int_to_float_conversion_p (const_rtx op
)
887 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
888 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
889 /* Constants shouldn't reach here. */
890 gcc_assert (op0_mode
!= VOIDmode
);
891 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
892 int in_bits
= in_prec
;
893 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
895 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
896 if (GET_CODE (op
) == FLOAT
)
897 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
898 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
899 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
902 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
904 return in_bits
<= out_bits
;
907 /* Perform some simplifications we can do even if the operands
910 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
912 enum rtx_code reversed
;
913 rtx temp
, elt
, base
, step
;
914 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
919 /* (not (not X)) == X. */
920 if (GET_CODE (op
) == NOT
)
923 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
924 comparison is all ones. */
925 if (COMPARISON_P (op
)
926 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
927 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
928 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
929 XEXP (op
, 0), XEXP (op
, 1));
931 /* (not (plus X -1)) can become (neg X). */
932 if (GET_CODE (op
) == PLUS
933 && XEXP (op
, 1) == constm1_rtx
)
934 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
936 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
937 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
938 and MODE_VECTOR_INT. */
939 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
940 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
943 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
944 if (GET_CODE (op
) == XOR
945 && CONST_INT_P (XEXP (op
, 1))
946 && (temp
= simplify_unary_operation (NOT
, mode
,
947 XEXP (op
, 1), mode
)) != 0)
948 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
950 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
951 if (GET_CODE (op
) == PLUS
952 && CONST_INT_P (XEXP (op
, 1))
953 && mode_signbit_p (mode
, XEXP (op
, 1))
954 && (temp
= simplify_unary_operation (NOT
, mode
,
955 XEXP (op
, 1), mode
)) != 0)
956 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
959 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
960 operands other than 1, but that is not valid. We could do a
961 similar simplification for (not (lshiftrt C X)) where C is
962 just the sign bit, but this doesn't seem common enough to
964 if (GET_CODE (op
) == ASHIFT
965 && XEXP (op
, 0) == const1_rtx
)
967 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
968 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
971 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
972 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
973 so we can perform the above simplification. */
974 if (STORE_FLAG_VALUE
== -1
975 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
976 && GET_CODE (op
) == ASHIFTRT
977 && CONST_INT_P (XEXP (op
, 1))
978 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
979 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
980 XEXP (op
, 0), const0_rtx
);
983 if (partial_subreg_p (op
)
984 && subreg_lowpart_p (op
)
985 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
986 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
988 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
991 x
= gen_rtx_ROTATE (inner_mode
,
992 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
994 XEXP (SUBREG_REG (op
), 1));
995 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
1000 /* Apply De Morgan's laws to reduce number of patterns for machines
1001 with negating logical insns (and-not, nand, etc.). If result has
1002 only one NOT, put it first, since that is how the patterns are
1004 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1006 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1007 machine_mode op_mode
;
1009 op_mode
= GET_MODE (in1
);
1010 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1012 op_mode
= GET_MODE (in2
);
1013 if (op_mode
== VOIDmode
)
1015 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1017 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1018 std::swap (in1
, in2
);
1020 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1024 /* (not (bswap x)) -> (bswap (not x)). */
1025 if (GET_CODE (op
) == BSWAP
)
1027 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1028 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1033 /* (neg (neg X)) == X. */
1034 if (GET_CODE (op
) == NEG
)
1035 return XEXP (op
, 0);
1037 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1038 If comparison is not reversible use
1040 if (GET_CODE (op
) == IF_THEN_ELSE
)
1042 rtx cond
= XEXP (op
, 0);
1043 rtx true_rtx
= XEXP (op
, 1);
1044 rtx false_rtx
= XEXP (op
, 2);
1046 if ((GET_CODE (true_rtx
) == NEG
1047 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1048 || (GET_CODE (false_rtx
) == NEG
1049 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1051 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1052 temp
= reversed_comparison (cond
, mode
);
1056 std::swap (true_rtx
, false_rtx
);
1058 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1059 mode
, temp
, true_rtx
, false_rtx
);
1063 /* (neg (plus X 1)) can become (not X). */
1064 if (GET_CODE (op
) == PLUS
1065 && XEXP (op
, 1) == const1_rtx
)
1066 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1068 /* Similarly, (neg (not X)) is (plus X 1). */
1069 if (GET_CODE (op
) == NOT
)
1070 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1073 /* (neg (minus X Y)) can become (minus Y X). This transformation
1074 isn't safe for modes with signed zeros, since if X and Y are
1075 both +0, (minus Y X) is the same as (minus X Y). If the
1076 rounding mode is towards +infinity (or -infinity) then the two
1077 expressions will be rounded differently. */
1078 if (GET_CODE (op
) == MINUS
1079 && !HONOR_SIGNED_ZEROS (mode
)
1080 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1081 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1083 if (GET_CODE (op
) == PLUS
1084 && !HONOR_SIGNED_ZEROS (mode
)
1085 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1087 /* (neg (plus A C)) is simplified to (minus -C A). */
1088 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1089 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1091 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1093 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1096 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1097 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1098 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1101 /* (neg (mult A B)) becomes (mult A (neg B)).
1102 This works even for floating-point values. */
1103 if (GET_CODE (op
) == MULT
1104 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1106 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1107 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1110 /* NEG commutes with ASHIFT since it is multiplication. Only do
1111 this if we can then eliminate the NEG (e.g., if the operand
1113 if (GET_CODE (op
) == ASHIFT
)
1115 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1117 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1120 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1121 C is equal to the width of MODE minus 1. */
1122 if (GET_CODE (op
) == ASHIFTRT
1123 && CONST_INT_P (XEXP (op
, 1))
1124 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1125 return simplify_gen_binary (LSHIFTRT
, mode
,
1126 XEXP (op
, 0), XEXP (op
, 1));
1128 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1129 C is equal to the width of MODE minus 1. */
1130 if (GET_CODE (op
) == LSHIFTRT
1131 && CONST_INT_P (XEXP (op
, 1))
1132 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1133 return simplify_gen_binary (ASHIFTRT
, mode
,
1134 XEXP (op
, 0), XEXP (op
, 1));
1136 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1137 if (GET_CODE (op
) == XOR
1138 && XEXP (op
, 1) == const1_rtx
1139 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1140 return plus_constant (mode
, XEXP (op
, 0), -1);
1142 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1143 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1144 if (GET_CODE (op
) == LT
1145 && XEXP (op
, 1) == const0_rtx
1146 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1148 int_mode
= as_a
<scalar_int_mode
> (mode
);
1149 int isize
= GET_MODE_PRECISION (inner
);
1150 if (STORE_FLAG_VALUE
== 1)
1152 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1153 gen_int_shift_amount (inner
,
1155 if (int_mode
== inner
)
1157 if (GET_MODE_PRECISION (int_mode
) > isize
)
1158 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1159 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1161 else if (STORE_FLAG_VALUE
== -1)
1163 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1164 gen_int_shift_amount (inner
,
1166 if (int_mode
== inner
)
1168 if (GET_MODE_PRECISION (int_mode
) > isize
)
1169 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1170 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1174 if (vec_series_p (op
, &base
, &step
))
1176 /* Only create a new series if we can simplify both parts. In other
1177 cases this isn't really a simplification, and it's not necessarily
1178 a win to replace a vector operation with a scalar operation. */
1179 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1180 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1183 step
= simplify_unary_operation (NEG
, inner_mode
,
1186 return gen_vec_series (mode
, base
, step
);
1192 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1193 with the umulXi3_highpart patterns. */
1194 if (GET_CODE (op
) == LSHIFTRT
1195 && GET_CODE (XEXP (op
, 0)) == MULT
)
1198 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1200 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1202 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1206 /* We can't handle truncation to a partial integer mode here
1207 because we don't know the real bitsize of the partial
1212 if (GET_MODE (op
) != VOIDmode
)
1214 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1219 /* If we know that the value is already truncated, we can
1220 replace the TRUNCATE with a SUBREG. */
1221 if (known_eq (GET_MODE_NUNITS (mode
), 1)
1222 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1223 || truncated_to_mode (mode
, op
)))
1225 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1230 /* A truncate of a comparison can be replaced with a subreg if
1231 STORE_FLAG_VALUE permits. This is like the previous test,
1232 but it works even if the comparison is done in a mode larger
1233 than HOST_BITS_PER_WIDE_INT. */
1234 if (HWI_COMPUTABLE_MODE_P (mode
)
1235 && COMPARISON_P (op
)
1236 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1238 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1243 /* A truncate of a memory is just loading the low part of the memory
1244 if we are not changing the meaning of the address. */
1245 if (GET_CODE (op
) == MEM
1246 && !VECTOR_MODE_P (mode
)
1247 && !MEM_VOLATILE_P (op
)
1248 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1250 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1257 case FLOAT_TRUNCATE
:
1258 if (DECIMAL_FLOAT_MODE_P (mode
))
1261 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1262 if (GET_CODE (op
) == FLOAT_EXTEND
1263 && GET_MODE (XEXP (op
, 0)) == mode
)
1264 return XEXP (op
, 0);
1266 /* (float_truncate:SF (float_truncate:DF foo:XF))
1267 = (float_truncate:SF foo:XF).
1268 This may eliminate double rounding, so it is unsafe.
1270 (float_truncate:SF (float_extend:XF foo:DF))
1271 = (float_truncate:SF foo:DF).
1273 (float_truncate:DF (float_extend:XF foo:SF))
1274 = (float_extend:DF foo:SF). */
1275 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1276 && flag_unsafe_math_optimizations
)
1277 || GET_CODE (op
) == FLOAT_EXTEND
)
1278 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1279 > GET_MODE_UNIT_SIZE (mode
)
1280 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1282 XEXP (op
, 0), mode
);
1284 /* (float_truncate (float x)) is (float x) */
1285 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1286 && (flag_unsafe_math_optimizations
1287 || exact_int_to_float_conversion_p (op
)))
1288 return simplify_gen_unary (GET_CODE (op
), mode
,
1290 GET_MODE (XEXP (op
, 0)));
1292 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1293 (OP:SF foo:SF) if OP is NEG or ABS. */
1294 if ((GET_CODE (op
) == ABS
1295 || GET_CODE (op
) == NEG
)
1296 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1297 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1298 return simplify_gen_unary (GET_CODE (op
), mode
,
1299 XEXP (XEXP (op
, 0), 0), mode
);
1301 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1302 is (float_truncate:SF x). */
1303 if (GET_CODE (op
) == SUBREG
1304 && subreg_lowpart_p (op
)
1305 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1306 return SUBREG_REG (op
);
1310 if (DECIMAL_FLOAT_MODE_P (mode
))
1313 /* (float_extend (float_extend x)) is (float_extend x)
1315 (float_extend (float x)) is (float x) assuming that double
1316 rounding can't happen.
1318 if (GET_CODE (op
) == FLOAT_EXTEND
1319 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1320 && exact_int_to_float_conversion_p (op
)))
1321 return simplify_gen_unary (GET_CODE (op
), mode
,
1323 GET_MODE (XEXP (op
, 0)));
1328 /* (abs (neg <foo>)) -> (abs <foo>) */
1329 if (GET_CODE (op
) == NEG
)
1330 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1331 GET_MODE (XEXP (op
, 0)));
1333 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1335 if (GET_MODE (op
) == VOIDmode
)
1338 /* If operand is something known to be positive, ignore the ABS. */
1339 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1340 || val_signbit_known_clear_p (GET_MODE (op
),
1341 nonzero_bits (op
, GET_MODE (op
))))
1344 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1345 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1346 && (num_sign_bit_copies (op
, int_mode
)
1347 == GET_MODE_PRECISION (int_mode
)))
1348 return gen_rtx_NEG (int_mode
, op
);
1353 /* (ffs (*_extend <X>)) = (ffs <X>) */
1354 if (GET_CODE (op
) == SIGN_EXTEND
1355 || GET_CODE (op
) == ZERO_EXTEND
)
1356 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1357 GET_MODE (XEXP (op
, 0)));
1361 switch (GET_CODE (op
))
1365 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1366 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1367 GET_MODE (XEXP (op
, 0)));
1371 /* Rotations don't affect popcount. */
1372 if (!side_effects_p (XEXP (op
, 1)))
1373 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1374 GET_MODE (XEXP (op
, 0)));
1383 switch (GET_CODE (op
))
1389 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1390 GET_MODE (XEXP (op
, 0)));
1394 /* Rotations don't affect parity. */
1395 if (!side_effects_p (XEXP (op
, 1)))
1396 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1397 GET_MODE (XEXP (op
, 0)));
1406 /* (bswap (bswap x)) -> x. */
1407 if (GET_CODE (op
) == BSWAP
)
1408 return XEXP (op
, 0);
1412 /* (float (sign_extend <X>)) = (float <X>). */
1413 if (GET_CODE (op
) == SIGN_EXTEND
)
1414 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1415 GET_MODE (XEXP (op
, 0)));
1419 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1420 becomes just the MINUS if its mode is MODE. This allows
1421 folding switch statements on machines using casesi (such as
1423 if (GET_CODE (op
) == TRUNCATE
1424 && GET_MODE (XEXP (op
, 0)) == mode
1425 && GET_CODE (XEXP (op
, 0)) == MINUS
1426 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1427 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1428 return XEXP (op
, 0);
1430 /* Extending a widening multiplication should be canonicalized to
1431 a wider widening multiplication. */
1432 if (GET_CODE (op
) == MULT
)
1434 rtx lhs
= XEXP (op
, 0);
1435 rtx rhs
= XEXP (op
, 1);
1436 enum rtx_code lcode
= GET_CODE (lhs
);
1437 enum rtx_code rcode
= GET_CODE (rhs
);
1439 /* Widening multiplies usually extend both operands, but sometimes
1440 they use a shift to extract a portion of a register. */
1441 if ((lcode
== SIGN_EXTEND
1442 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1443 && (rcode
== SIGN_EXTEND
1444 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1446 machine_mode lmode
= GET_MODE (lhs
);
1447 machine_mode rmode
= GET_MODE (rhs
);
1450 if (lcode
== ASHIFTRT
)
1451 /* Number of bits not shifted off the end. */
1452 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1453 - INTVAL (XEXP (lhs
, 1)));
1454 else /* lcode == SIGN_EXTEND */
1455 /* Size of inner mode. */
1456 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1458 if (rcode
== ASHIFTRT
)
1459 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1460 - INTVAL (XEXP (rhs
, 1)));
1461 else /* rcode == SIGN_EXTEND */
1462 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1464 /* We can only widen multiplies if the result is mathematiclly
1465 equivalent. I.e. if overflow was impossible. */
1466 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1467 return simplify_gen_binary
1469 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1470 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1474 /* Check for a sign extension of a subreg of a promoted
1475 variable, where the promotion is sign-extended, and the
1476 target mode is the same as the variable's promotion. */
1477 if (GET_CODE (op
) == SUBREG
1478 && SUBREG_PROMOTED_VAR_P (op
)
1479 && SUBREG_PROMOTED_SIGNED_P (op
)
1480 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1482 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1487 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1488 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1489 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1491 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1492 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1493 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1494 GET_MODE (XEXP (op
, 0)));
1497 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1498 is (sign_extend:M (subreg:O <X>)) if there is mode with
1499 GET_MODE_BITSIZE (N) - I bits.
1500 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1501 is similarly (zero_extend:M (subreg:O <X>)). */
1502 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1503 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1504 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1505 && CONST_INT_P (XEXP (op
, 1))
1506 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1507 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1508 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1510 scalar_int_mode tmode
;
1511 gcc_assert (GET_MODE_PRECISION (int_mode
)
1512 > GET_MODE_PRECISION (op_mode
));
1513 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1514 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1517 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1519 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1520 ? SIGN_EXTEND
: ZERO_EXTEND
,
1521 int_mode
, inner
, tmode
);
1525 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1526 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1527 if (GET_CODE (op
) == LSHIFTRT
1528 && CONST_INT_P (XEXP (op
, 1))
1529 && XEXP (op
, 1) != const0_rtx
)
1530 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1532 #if defined(POINTERS_EXTEND_UNSIGNED)
1533 /* As we do not know which address space the pointer is referring to,
1534 we can do this only if the target does not support different pointer
1535 or address modes depending on the address space. */
1536 if (target_default_pointer_address_modes_p ()
1537 && ! POINTERS_EXTEND_UNSIGNED
1538 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1540 || (GET_CODE (op
) == SUBREG
1541 && REG_P (SUBREG_REG (op
))
1542 && REG_POINTER (SUBREG_REG (op
))
1543 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1544 && !targetm
.have_ptr_extend ())
1547 = convert_memory_address_addr_space_1 (Pmode
, op
,
1548 ADDR_SPACE_GENERIC
, false,
1557 /* Check for a zero extension of a subreg of a promoted
1558 variable, where the promotion is zero-extended, and the
1559 target mode is the same as the variable's promotion. */
1560 if (GET_CODE (op
) == SUBREG
1561 && SUBREG_PROMOTED_VAR_P (op
)
1562 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1563 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1565 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1570 /* Extending a widening multiplication should be canonicalized to
1571 a wider widening multiplication. */
1572 if (GET_CODE (op
) == MULT
)
1574 rtx lhs
= XEXP (op
, 0);
1575 rtx rhs
= XEXP (op
, 1);
1576 enum rtx_code lcode
= GET_CODE (lhs
);
1577 enum rtx_code rcode
= GET_CODE (rhs
);
1579 /* Widening multiplies usually extend both operands, but sometimes
1580 they use a shift to extract a portion of a register. */
1581 if ((lcode
== ZERO_EXTEND
1582 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1583 && (rcode
== ZERO_EXTEND
1584 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1586 machine_mode lmode
= GET_MODE (lhs
);
1587 machine_mode rmode
= GET_MODE (rhs
);
1590 if (lcode
== LSHIFTRT
)
1591 /* Number of bits not shifted off the end. */
1592 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1593 - INTVAL (XEXP (lhs
, 1)));
1594 else /* lcode == ZERO_EXTEND */
1595 /* Size of inner mode. */
1596 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1598 if (rcode
== LSHIFTRT
)
1599 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1600 - INTVAL (XEXP (rhs
, 1)));
1601 else /* rcode == ZERO_EXTEND */
1602 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1604 /* We can only widen multiplies if the result is mathematiclly
1605 equivalent. I.e. if overflow was impossible. */
1606 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1607 return simplify_gen_binary
1609 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1610 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1614 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1615 if (GET_CODE (op
) == ZERO_EXTEND
)
1616 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1617 GET_MODE (XEXP (op
, 0)));
1619 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1620 is (zero_extend:M (subreg:O <X>)) if there is mode with
1621 GET_MODE_PRECISION (N) - I bits. */
1622 if (GET_CODE (op
) == LSHIFTRT
1623 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1624 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1625 && CONST_INT_P (XEXP (op
, 1))
1626 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1627 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1628 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1630 scalar_int_mode tmode
;
1631 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1632 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1635 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1637 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1642 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1643 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1645 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1646 (and:SI (reg:SI) (const_int 63)). */
1647 if (partial_subreg_p (op
)
1648 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1649 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1650 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1651 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1652 && subreg_lowpart_p (op
)
1653 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1654 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1656 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1657 return SUBREG_REG (op
);
1658 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1662 #if defined(POINTERS_EXTEND_UNSIGNED)
1663 /* As we do not know which address space the pointer is referring to,
1664 we can do this only if the target does not support different pointer
1665 or address modes depending on the address space. */
1666 if (target_default_pointer_address_modes_p ()
1667 && POINTERS_EXTEND_UNSIGNED
> 0
1668 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1670 || (GET_CODE (op
) == SUBREG
1671 && REG_P (SUBREG_REG (op
))
1672 && REG_POINTER (SUBREG_REG (op
))
1673 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1674 && !targetm
.have_ptr_extend ())
1677 = convert_memory_address_addr_space_1 (Pmode
, op
,
1678 ADDR_SPACE_GENERIC
, false,
1690 if (VECTOR_MODE_P (mode
)
1691 && vec_duplicate_p (op
, &elt
)
1692 && code
!= VEC_DUPLICATE
)
1694 /* Try applying the operator to ELT and see if that simplifies.
1695 We can duplicate the result if so.
1697 The reason we don't use simplify_gen_unary is that it isn't
1698 necessarily a win to convert things like:
1700 (neg:V (vec_duplicate:V (reg:S R)))
1704 (vec_duplicate:V (neg:S (reg:S R)))
1706 The first might be done entirely in vector registers while the
1707 second might need a move between register files. */
1708 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1709 elt
, GET_MODE_INNER (GET_MODE (op
)));
1711 return gen_vec_duplicate (mode
, temp
);
1717 /* Try to compute the value of a unary operation CODE whose output mode is to
1718 be MODE with input operand OP whose mode was originally OP_MODE.
1719 Return zero if the value cannot be computed. */
1721 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1722 rtx op
, machine_mode op_mode
)
1724 scalar_int_mode result_mode
;
1726 if (code
== VEC_DUPLICATE
)
1728 gcc_assert (VECTOR_MODE_P (mode
));
1729 if (GET_MODE (op
) != VOIDmode
)
1731 if (!VECTOR_MODE_P (GET_MODE (op
)))
1732 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1734 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1737 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1738 return gen_const_vec_duplicate (mode
, op
);
1739 unsigned int n_elts
;
1740 if (GET_CODE (op
) == CONST_VECTOR
1741 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
1743 /* This must be constant if we're duplicating it to a constant
1744 number of elements. */
1745 unsigned int in_n_elts
= CONST_VECTOR_NUNITS (op
).to_constant ();
1746 gcc_assert (in_n_elts
< n_elts
);
1747 gcc_assert ((n_elts
% in_n_elts
) == 0);
1748 rtvec v
= rtvec_alloc (n_elts
);
1749 for (unsigned i
= 0; i
< n_elts
; i
++)
1750 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1751 return gen_rtx_CONST_VECTOR (mode
, v
);
1755 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1757 gcc_assert (GET_MODE (op
) == op_mode
);
1759 rtx_vector_builder builder
;
1760 if (!builder
.new_unary_operation (mode
, op
, false))
1763 unsigned int count
= builder
.encoded_nelts ();
1764 for (unsigned int i
= 0; i
< count
; i
++)
1766 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1767 CONST_VECTOR_ELT (op
, i
),
1768 GET_MODE_INNER (op_mode
));
1769 if (!x
|| !valid_for_const_vector_p (mode
, x
))
1771 builder
.quick_push (x
);
1773 return builder
.build ();
1776 /* The order of these tests is critical so that, for example, we don't
1777 check the wrong mode (input vs. output) for a conversion operation,
1778 such as FIX. At some point, this should be simplified. */
1780 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1784 if (op_mode
== VOIDmode
)
1786 /* CONST_INT have VOIDmode as the mode. We assume that all
1787 the bits of the constant are significant, though, this is
1788 a dangerous assumption as many times CONST_INTs are
1789 created and used with garbage in the bits outside of the
1790 precision of the implied mode of the const_int. */
1791 op_mode
= MAX_MODE_INT
;
1794 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1796 /* Avoid the folding if flag_signaling_nans is on and
1797 operand is a signaling NaN. */
1798 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1801 d
= real_value_truncate (mode
, d
);
1802 return const_double_from_real_value (d
, mode
);
1804 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1808 if (op_mode
== VOIDmode
)
1810 /* CONST_INT have VOIDmode as the mode. We assume that all
1811 the bits of the constant are significant, though, this is
1812 a dangerous assumption as many times CONST_INTs are
1813 created and used with garbage in the bits outside of the
1814 precision of the implied mode of the const_int. */
1815 op_mode
= MAX_MODE_INT
;
1818 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1820 /* Avoid the folding if flag_signaling_nans is on and
1821 operand is a signaling NaN. */
1822 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1825 d
= real_value_truncate (mode
, d
);
1826 return const_double_from_real_value (d
, mode
);
1829 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1831 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1833 scalar_int_mode imode
= (op_mode
== VOIDmode
1835 : as_a
<scalar_int_mode
> (op_mode
));
1836 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1839 #if TARGET_SUPPORTS_WIDE_INT == 0
1840 /* This assert keeps the simplification from producing a result
1841 that cannot be represented in a CONST_DOUBLE but a lot of
1842 upstream callers expect that this function never fails to
1843 simplify something and so you if you added this to the test
1844 above the code would die later anyway. If this assert
1845 happens, you just need to make the port support wide int. */
1846 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1852 result
= wi::bit_not (op0
);
1856 result
= wi::neg (op0
);
1860 result
= wi::abs (op0
);
1864 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1868 if (wi::ne_p (op0
, 0))
1869 int_value
= wi::clz (op0
);
1870 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1872 result
= wi::shwi (int_value
, result_mode
);
1876 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
1880 if (wi::ne_p (op0
, 0))
1881 int_value
= wi::ctz (op0
);
1882 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1884 result
= wi::shwi (int_value
, result_mode
);
1888 result
= wi::shwi (wi::popcount (op0
), result_mode
);
1892 result
= wi::shwi (wi::parity (op0
), result_mode
);
1896 result
= wide_int (op0
).bswap ();
1901 result
= wide_int::from (op0
, width
, UNSIGNED
);
1905 result
= wide_int::from (op0
, width
, SIGNED
);
1913 return immed_wide_int_const (result
, result_mode
);
1916 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1917 && SCALAR_FLOAT_MODE_P (mode
)
1918 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1920 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1926 d
= real_value_abs (&d
);
1929 d
= real_value_negate (&d
);
1931 case FLOAT_TRUNCATE
:
1932 /* Don't perform the operation if flag_signaling_nans is on
1933 and the operand is a signaling NaN. */
1934 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1936 d
= real_value_truncate (mode
, d
);
1939 /* Don't perform the operation if flag_signaling_nans is on
1940 and the operand is a signaling NaN. */
1941 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1943 /* All this does is change the mode, unless changing
1945 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1946 real_convert (&d
, mode
, &d
);
1949 /* Don't perform the operation if flag_signaling_nans is on
1950 and the operand is a signaling NaN. */
1951 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1953 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1960 real_to_target (tmp
, &d
, GET_MODE (op
));
1961 for (i
= 0; i
< 4; i
++)
1963 real_from_target (&d
, tmp
, mode
);
1969 return const_double_from_real_value (d
, mode
);
1971 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1972 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1973 && is_int_mode (mode
, &result_mode
))
1975 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1976 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1977 operators are intentionally left unspecified (to ease implementation
1978 by target backends), for consistency, this routine implements the
1979 same semantics for constant folding as used by the middle-end. */
1981 /* This was formerly used only for non-IEEE float.
1982 eggert@twinsun.com says it is safe for IEEE also. */
1984 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1985 wide_int wmax
, wmin
;
1986 /* This is part of the abi to real_to_integer, but we check
1987 things before making this call. */
1993 if (REAL_VALUE_ISNAN (*x
))
1996 /* Test against the signed upper bound. */
1997 wmax
= wi::max_value (width
, SIGNED
);
1998 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1999 if (real_less (&t
, x
))
2000 return immed_wide_int_const (wmax
, mode
);
2002 /* Test against the signed lower bound. */
2003 wmin
= wi::min_value (width
, SIGNED
);
2004 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2005 if (real_less (x
, &t
))
2006 return immed_wide_int_const (wmin
, mode
);
2008 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2012 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2015 /* Test against the unsigned upper bound. */
2016 wmax
= wi::max_value (width
, UNSIGNED
);
2017 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2018 if (real_less (&t
, x
))
2019 return immed_wide_int_const (wmax
, mode
);
2021 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2029 /* Handle polynomial integers. */
2030 else if (CONST_POLY_INT_P (op
))
2032 poly_wide_int result
;
2036 result
= -const_poly_int_value (op
);
2040 result
= ~const_poly_int_value (op
);
2046 return immed_wide_int_const (result
, mode
);
2052 /* Subroutine of simplify_binary_operation to simplify a binary operation
2053 CODE that can commute with byte swapping, with result mode MODE and
2054 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2055 Return zero if no simplification or canonicalization is possible. */
2058 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2063 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2064 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2066 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2067 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2068 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2071 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2072 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2074 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2075 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2081 /* Subroutine of simplify_binary_operation to simplify a commutative,
2082 associative binary operation CODE with result mode MODE, operating
2083 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2084 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2085 canonicalization is possible. */
2088 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2093 /* Linearize the operator to the left. */
2094 if (GET_CODE (op1
) == code
)
2096 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2097 if (GET_CODE (op0
) == code
)
2099 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2100 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2103 /* "a op (b op c)" becomes "(b op c) op a". */
2104 if (! swap_commutative_operands_p (op1
, op0
))
2105 return simplify_gen_binary (code
, mode
, op1
, op0
);
2107 std::swap (op0
, op1
);
2110 if (GET_CODE (op0
) == code
)
2112 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2113 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2115 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2116 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2119 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2120 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2122 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2124 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2125 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2127 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2134 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2135 and OP1. Return 0 if no simplification is possible.
2137 Don't use this for relational operations such as EQ or LT.
2138 Use simplify_relational_operation instead. */
2140 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2143 rtx trueop0
, trueop1
;
2146 /* Relational operations don't work here. We must know the mode
2147 of the operands in order to do the comparison correctly.
2148 Assuming a full word can give incorrect results.
2149 Consider comparing 128 with -128 in QImode. */
2150 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2151 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2153 /* Make sure the constant is second. */
2154 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2155 && swap_commutative_operands_p (op0
, op1
))
2156 std::swap (op0
, op1
);
2158 trueop0
= avoid_constant_pool_reference (op0
);
2159 trueop1
= avoid_constant_pool_reference (op1
);
2161 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2164 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2169 /* If the above steps did not result in a simplification and op0 or op1
2170 were constant pool references, use the referenced constants directly. */
2171 if (trueop0
!= op0
|| trueop1
!= op1
)
2172 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2177 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2178 which OP0 and OP1 are both vector series or vector duplicates
2179 (which are really just series with a step of 0). If so, try to
2180 form a new series by applying CODE to the bases and to the steps.
2181 Return null if no simplification is possible.
2183 MODE is the mode of the operation and is known to be a vector
2187 simplify_binary_operation_series (rtx_code code
, machine_mode mode
,
2191 if (vec_duplicate_p (op0
, &base0
))
2193 else if (!vec_series_p (op0
, &base0
, &step0
))
2197 if (vec_duplicate_p (op1
, &base1
))
2199 else if (!vec_series_p (op1
, &base1
, &step1
))
2202 /* Only create a new series if we can simplify both parts. In other
2203 cases this isn't really a simplification, and it's not necessarily
2204 a win to replace a vector operation with a scalar operation. */
2205 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2206 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2210 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2214 return gen_vec_series (mode
, new_base
, new_step
);
2217 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2218 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2219 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2220 actual constants. */
2223 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2224 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2226 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2228 scalar_int_mode int_mode
, inner_mode
;
2231 /* Even if we can't compute a constant result,
2232 there are some cases worth simplifying. */
2237 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2238 when x is NaN, infinite, or finite and nonzero. They aren't
2239 when x is -0 and the rounding mode is not towards -infinity,
2240 since (-0) + 0 is then 0. */
2241 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2244 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2245 transformations are safe even for IEEE. */
2246 if (GET_CODE (op0
) == NEG
)
2247 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2248 else if (GET_CODE (op1
) == NEG
)
2249 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2251 /* (~a) + 1 -> -a */
2252 if (INTEGRAL_MODE_P (mode
)
2253 && GET_CODE (op0
) == NOT
2254 && trueop1
== const1_rtx
)
2255 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2257 /* Handle both-operands-constant cases. We can only add
2258 CONST_INTs to constants since the sum of relocatable symbols
2259 can't be handled by most assemblers. Don't add CONST_INT
2260 to CONST_INT since overflow won't be computed properly if wider
2261 than HOST_BITS_PER_WIDE_INT. */
2263 if ((GET_CODE (op0
) == CONST
2264 || GET_CODE (op0
) == SYMBOL_REF
2265 || GET_CODE (op0
) == LABEL_REF
)
2266 && poly_int_rtx_p (op1
, &offset
))
2267 return plus_constant (mode
, op0
, offset
);
2268 else if ((GET_CODE (op1
) == CONST
2269 || GET_CODE (op1
) == SYMBOL_REF
2270 || GET_CODE (op1
) == LABEL_REF
)
2271 && poly_int_rtx_p (op0
, &offset
))
2272 return plus_constant (mode
, op1
, offset
);
2274 /* See if this is something like X * C - X or vice versa or
2275 if the multiplication is written as a shift. If so, we can
2276 distribute and make a new multiply, shift, or maybe just
2277 have X (if C is 2 in the example above). But don't make
2278 something more expensive than we had before. */
2280 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2282 rtx lhs
= op0
, rhs
= op1
;
2284 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2285 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2287 if (GET_CODE (lhs
) == NEG
)
2289 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2290 lhs
= XEXP (lhs
, 0);
2292 else if (GET_CODE (lhs
) == MULT
2293 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2295 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2296 lhs
= XEXP (lhs
, 0);
2298 else if (GET_CODE (lhs
) == ASHIFT
2299 && CONST_INT_P (XEXP (lhs
, 1))
2300 && INTVAL (XEXP (lhs
, 1)) >= 0
2301 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2303 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2304 GET_MODE_PRECISION (int_mode
));
2305 lhs
= XEXP (lhs
, 0);
2308 if (GET_CODE (rhs
) == NEG
)
2310 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2311 rhs
= XEXP (rhs
, 0);
2313 else if (GET_CODE (rhs
) == MULT
2314 && CONST_INT_P (XEXP (rhs
, 1)))
2316 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2317 rhs
= XEXP (rhs
, 0);
2319 else if (GET_CODE (rhs
) == ASHIFT
2320 && CONST_INT_P (XEXP (rhs
, 1))
2321 && INTVAL (XEXP (rhs
, 1)) >= 0
2322 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2324 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2325 GET_MODE_PRECISION (int_mode
));
2326 rhs
= XEXP (rhs
, 0);
2329 if (rtx_equal_p (lhs
, rhs
))
2331 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2333 bool speed
= optimize_function_for_speed_p (cfun
);
2335 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2337 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2338 return (set_src_cost (tem
, int_mode
, speed
)
2339 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2343 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2344 if (CONST_SCALAR_INT_P (op1
)
2345 && GET_CODE (op0
) == XOR
2346 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2347 && mode_signbit_p (mode
, op1
))
2348 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2349 simplify_gen_binary (XOR
, mode
, op1
,
2352 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2353 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2354 && GET_CODE (op0
) == MULT
2355 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2359 in1
= XEXP (XEXP (op0
, 0), 0);
2360 in2
= XEXP (op0
, 1);
2361 return simplify_gen_binary (MINUS
, mode
, op1
,
2362 simplify_gen_binary (MULT
, mode
,
2366 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2367 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2369 if (COMPARISON_P (op0
)
2370 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2371 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2372 && (reversed
= reversed_comparison (op0
, mode
)))
2374 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2376 /* If one of the operands is a PLUS or a MINUS, see if we can
2377 simplify this by the associative law.
2378 Don't use the associative law for floating point.
2379 The inaccuracy makes it nonassociative,
2380 and subtle programs can break if operations are associated. */
2382 if (INTEGRAL_MODE_P (mode
)
2383 && (plus_minus_operand_p (op0
)
2384 || plus_minus_operand_p (op1
))
2385 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2388 /* Reassociate floating point addition only when the user
2389 specifies associative math operations. */
2390 if (FLOAT_MODE_P (mode
)
2391 && flag_associative_math
)
2393 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2398 /* Handle vector series. */
2399 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2401 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2408 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2409 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2410 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2411 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2413 rtx xop00
= XEXP (op0
, 0);
2414 rtx xop10
= XEXP (op1
, 0);
2416 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2419 if (REG_P (xop00
) && REG_P (xop10
)
2420 && REGNO (xop00
) == REGNO (xop10
)
2421 && GET_MODE (xop00
) == mode
2422 && GET_MODE (xop10
) == mode
2423 && GET_MODE_CLASS (mode
) == MODE_CC
)
2429 /* We can't assume x-x is 0 even with non-IEEE floating point,
2430 but since it is zero except in very strange circumstances, we
2431 will treat it as zero with -ffinite-math-only. */
2432 if (rtx_equal_p (trueop0
, trueop1
)
2433 && ! side_effects_p (op0
)
2434 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2435 return CONST0_RTX (mode
);
2437 /* Change subtraction from zero into negation. (0 - x) is the
2438 same as -x when x is NaN, infinite, or finite and nonzero.
2439 But if the mode has signed zeros, and does not round towards
2440 -infinity, then 0 - 0 is 0, not -0. */
2441 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2442 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2444 /* (-1 - a) is ~a, unless the expression contains symbolic
2445 constants, in which case not retaining additions and
2446 subtractions could cause invalid assembly to be produced. */
2447 if (trueop0
== constm1_rtx
2448 && !contains_symbolic_reference_p (op1
))
2449 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2451 /* Subtracting 0 has no effect unless the mode has signed zeros
2452 and supports rounding towards -infinity. In such a case,
2454 if (!(HONOR_SIGNED_ZEROS (mode
)
2455 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2456 && trueop1
== CONST0_RTX (mode
))
2459 /* See if this is something like X * C - X or vice versa or
2460 if the multiplication is written as a shift. If so, we can
2461 distribute and make a new multiply, shift, or maybe just
2462 have X (if C is 2 in the example above). But don't make
2463 something more expensive than we had before. */
2465 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2467 rtx lhs
= op0
, rhs
= op1
;
2469 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2470 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2472 if (GET_CODE (lhs
) == NEG
)
2474 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2475 lhs
= XEXP (lhs
, 0);
2477 else if (GET_CODE (lhs
) == MULT
2478 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2480 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2481 lhs
= XEXP (lhs
, 0);
2483 else if (GET_CODE (lhs
) == ASHIFT
2484 && CONST_INT_P (XEXP (lhs
, 1))
2485 && INTVAL (XEXP (lhs
, 1)) >= 0
2486 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2488 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2489 GET_MODE_PRECISION (int_mode
));
2490 lhs
= XEXP (lhs
, 0);
2493 if (GET_CODE (rhs
) == NEG
)
2495 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2496 rhs
= XEXP (rhs
, 0);
2498 else if (GET_CODE (rhs
) == MULT
2499 && CONST_INT_P (XEXP (rhs
, 1)))
2501 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2502 rhs
= XEXP (rhs
, 0);
2504 else if (GET_CODE (rhs
) == ASHIFT
2505 && CONST_INT_P (XEXP (rhs
, 1))
2506 && INTVAL (XEXP (rhs
, 1)) >= 0
2507 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2509 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2510 GET_MODE_PRECISION (int_mode
));
2511 negcoeff1
= -negcoeff1
;
2512 rhs
= XEXP (rhs
, 0);
2515 if (rtx_equal_p (lhs
, rhs
))
2517 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2519 bool speed
= optimize_function_for_speed_p (cfun
);
2521 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2523 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2524 return (set_src_cost (tem
, int_mode
, speed
)
2525 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2529 /* (a - (-b)) -> (a + b). True even for IEEE. */
2530 if (GET_CODE (op1
) == NEG
)
2531 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2533 /* (-x - c) may be simplified as (-c - x). */
2534 if (GET_CODE (op0
) == NEG
2535 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2537 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2539 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2542 if ((GET_CODE (op0
) == CONST
2543 || GET_CODE (op0
) == SYMBOL_REF
2544 || GET_CODE (op0
) == LABEL_REF
)
2545 && poly_int_rtx_p (op1
, &offset
))
2546 return plus_constant (mode
, op0
, trunc_int_for_mode (-offset
, mode
));
2548 /* Don't let a relocatable value get a negative coeff. */
2549 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2550 return simplify_gen_binary (PLUS
, mode
,
2552 neg_const_int (mode
, op1
));
2554 /* (x - (x & y)) -> (x & ~y) */
2555 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2557 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2559 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2560 GET_MODE (XEXP (op1
, 1)));
2561 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2563 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2565 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2566 GET_MODE (XEXP (op1
, 0)));
2567 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2571 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2572 by reversing the comparison code if valid. */
2573 if (STORE_FLAG_VALUE
== 1
2574 && trueop0
== const1_rtx
2575 && COMPARISON_P (op1
)
2576 && (reversed
= reversed_comparison (op1
, mode
)))
2579 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2580 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2581 && GET_CODE (op1
) == MULT
2582 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2586 in1
= XEXP (XEXP (op1
, 0), 0);
2587 in2
= XEXP (op1
, 1);
2588 return simplify_gen_binary (PLUS
, mode
,
2589 simplify_gen_binary (MULT
, mode
,
2594 /* Canonicalize (minus (neg A) (mult B C)) to
2595 (minus (mult (neg B) C) A). */
2596 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2597 && GET_CODE (op1
) == MULT
2598 && GET_CODE (op0
) == NEG
)
2602 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2603 in2
= XEXP (op1
, 1);
2604 return simplify_gen_binary (MINUS
, mode
,
2605 simplify_gen_binary (MULT
, mode
,
2610 /* If one of the operands is a PLUS or a MINUS, see if we can
2611 simplify this by the associative law. This will, for example,
2612 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2613 Don't use the associative law for floating point.
2614 The inaccuracy makes it nonassociative,
2615 and subtle programs can break if operations are associated. */
2617 if (INTEGRAL_MODE_P (mode
)
2618 && (plus_minus_operand_p (op0
)
2619 || plus_minus_operand_p (op1
))
2620 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2623 /* Handle vector series. */
2624 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2626 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2633 if (trueop1
== constm1_rtx
)
2634 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2636 if (GET_CODE (op0
) == NEG
)
2638 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2639 /* If op1 is a MULT as well and simplify_unary_operation
2640 just moved the NEG to the second operand, simplify_gen_binary
2641 below could through simplify_associative_operation move
2642 the NEG around again and recurse endlessly. */
2644 && GET_CODE (op1
) == MULT
2645 && GET_CODE (temp
) == MULT
2646 && XEXP (op1
, 0) == XEXP (temp
, 0)
2647 && GET_CODE (XEXP (temp
, 1)) == NEG
2648 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2651 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2653 if (GET_CODE (op1
) == NEG
)
2655 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2656 /* If op0 is a MULT as well and simplify_unary_operation
2657 just moved the NEG to the second operand, simplify_gen_binary
2658 below could through simplify_associative_operation move
2659 the NEG around again and recurse endlessly. */
2661 && GET_CODE (op0
) == MULT
2662 && GET_CODE (temp
) == MULT
2663 && XEXP (op0
, 0) == XEXP (temp
, 0)
2664 && GET_CODE (XEXP (temp
, 1)) == NEG
2665 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2668 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2671 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2672 x is NaN, since x * 0 is then also NaN. Nor is it valid
2673 when the mode has signed zeros, since multiplying a negative
2674 number by 0 will give -0, not 0. */
2675 if (!HONOR_NANS (mode
)
2676 && !HONOR_SIGNED_ZEROS (mode
)
2677 && trueop1
== CONST0_RTX (mode
)
2678 && ! side_effects_p (op0
))
2681 /* In IEEE floating point, x*1 is not equivalent to x for
2683 if (!HONOR_SNANS (mode
)
2684 && trueop1
== CONST1_RTX (mode
))
2687 /* Convert multiply by constant power of two into shift. */
2688 if (CONST_SCALAR_INT_P (trueop1
))
2690 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2692 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2693 gen_int_shift_amount (mode
, val
));
2696 /* x*2 is x+x and x*(-1) is -x */
2697 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2698 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2699 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2700 && GET_MODE (op0
) == mode
)
2702 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2704 if (real_equal (d1
, &dconst2
))
2705 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2707 if (!HONOR_SNANS (mode
)
2708 && real_equal (d1
, &dconstm1
))
2709 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2712 /* Optimize -x * -x as x * x. */
2713 if (FLOAT_MODE_P (mode
)
2714 && GET_CODE (op0
) == NEG
2715 && GET_CODE (op1
) == NEG
2716 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2717 && !side_effects_p (XEXP (op0
, 0)))
2718 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2720 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2721 if (SCALAR_FLOAT_MODE_P (mode
)
2722 && GET_CODE (op0
) == ABS
2723 && GET_CODE (op1
) == ABS
2724 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2725 && !side_effects_p (XEXP (op0
, 0)))
2726 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2728 /* Reassociate multiplication, but for floating point MULTs
2729 only when the user specifies unsafe math optimizations. */
2730 if (! FLOAT_MODE_P (mode
)
2731 || flag_unsafe_math_optimizations
)
2733 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2740 if (trueop1
== CONST0_RTX (mode
))
2742 if (INTEGRAL_MODE_P (mode
)
2743 && trueop1
== CONSTM1_RTX (mode
)
2744 && !side_effects_p (op0
))
2746 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2748 /* A | (~A) -> -1 */
2749 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2750 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2751 && ! side_effects_p (op0
)
2752 && SCALAR_INT_MODE_P (mode
))
2755 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2756 if (CONST_INT_P (op1
)
2757 && HWI_COMPUTABLE_MODE_P (mode
)
2758 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2759 && !side_effects_p (op0
))
2762 /* Canonicalize (X & C1) | C2. */
2763 if (GET_CODE (op0
) == AND
2764 && CONST_INT_P (trueop1
)
2765 && CONST_INT_P (XEXP (op0
, 1)))
2767 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2768 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2769 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2771 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2773 && !side_effects_p (XEXP (op0
, 0)))
2776 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2777 if (((c1
|c2
) & mask
) == mask
)
2778 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2781 /* Convert (A & B) | A to A. */
2782 if (GET_CODE (op0
) == AND
2783 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2784 || rtx_equal_p (XEXP (op0
, 1), op1
))
2785 && ! side_effects_p (XEXP (op0
, 0))
2786 && ! side_effects_p (XEXP (op0
, 1)))
2789 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2790 mode size to (rotate A CX). */
2792 if (GET_CODE (op1
) == ASHIFT
2793 || GET_CODE (op1
) == SUBREG
)
2804 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2805 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2806 && CONST_INT_P (XEXP (opleft
, 1))
2807 && CONST_INT_P (XEXP (opright
, 1))
2808 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2809 == GET_MODE_UNIT_PRECISION (mode
)))
2810 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2812 /* Same, but for ashift that has been "simplified" to a wider mode
2813 by simplify_shift_const. */
2815 if (GET_CODE (opleft
) == SUBREG
2816 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2817 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
2819 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2820 && GET_CODE (opright
) == LSHIFTRT
2821 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2822 && known_eq (SUBREG_BYTE (opleft
), SUBREG_BYTE (XEXP (opright
, 0)))
2823 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
2824 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2825 SUBREG_REG (XEXP (opright
, 0)))
2826 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2827 && CONST_INT_P (XEXP (opright
, 1))
2828 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
2829 + INTVAL (XEXP (opright
, 1))
2830 == GET_MODE_PRECISION (int_mode
)))
2831 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
2832 XEXP (SUBREG_REG (opleft
), 1));
2834 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2835 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2836 the PLUS does not affect any of the bits in OP1: then we can do
2837 the IOR as a PLUS and we can associate. This is valid if OP1
2838 can be safely shifted left C bits. */
2839 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2840 && GET_CODE (XEXP (op0
, 0)) == PLUS
2841 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2842 && CONST_INT_P (XEXP (op0
, 1))
2843 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2845 int count
= INTVAL (XEXP (op0
, 1));
2846 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
2848 if (mask
>> count
== INTVAL (trueop1
)
2849 && trunc_int_for_mode (mask
, mode
) == mask
2850 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2851 return simplify_gen_binary (ASHIFTRT
, mode
,
2852 plus_constant (mode
, XEXP (op0
, 0),
2857 /* The following happens with bitfield merging.
2858 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
2859 if (GET_CODE (op0
) == AND
2860 && GET_CODE (op1
) == AND
2861 && CONST_INT_P (XEXP (op0
, 1))
2862 && CONST_INT_P (XEXP (op1
, 1))
2863 && (INTVAL (XEXP (op0
, 1))
2864 == ~INTVAL (XEXP (op1
, 1))))
2866 /* The IOR may be on both sides. */
2867 rtx top0
= NULL_RTX
, top1
= NULL_RTX
;
2868 if (GET_CODE (XEXP (op1
, 0)) == IOR
)
2869 top0
= op0
, top1
= op1
;
2870 else if (GET_CODE (XEXP (op0
, 0)) == IOR
)
2871 top0
= op1
, top1
= op0
;
2874 /* X may be on either side of the inner IOR. */
2876 if (rtx_equal_p (XEXP (top0
, 0),
2877 XEXP (XEXP (top1
, 0), 0)))
2878 tem
= XEXP (XEXP (top1
, 0), 1);
2879 else if (rtx_equal_p (XEXP (top0
, 0),
2880 XEXP (XEXP (top1
, 0), 1)))
2881 tem
= XEXP (XEXP (top1
, 0), 0);
2883 return simplify_gen_binary (IOR
, mode
, XEXP (top0
, 0),
2885 (AND
, mode
, tem
, XEXP (top1
, 1)));
2889 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2893 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2899 if (trueop1
== CONST0_RTX (mode
))
2901 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2902 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2903 if (rtx_equal_p (trueop0
, trueop1
)
2904 && ! side_effects_p (op0
)
2905 && GET_MODE_CLASS (mode
) != MODE_CC
)
2906 return CONST0_RTX (mode
);
2908 /* Canonicalize XOR of the most significant bit to PLUS. */
2909 if (CONST_SCALAR_INT_P (op1
)
2910 && mode_signbit_p (mode
, op1
))
2911 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2912 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2913 if (CONST_SCALAR_INT_P (op1
)
2914 && GET_CODE (op0
) == PLUS
2915 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2916 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2917 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2918 simplify_gen_binary (XOR
, mode
, op1
,
2921 /* If we are XORing two things that have no bits in common,
2922 convert them into an IOR. This helps to detect rotation encoded
2923 using those methods and possibly other simplifications. */
2925 if (HWI_COMPUTABLE_MODE_P (mode
)
2926 && (nonzero_bits (op0
, mode
)
2927 & nonzero_bits (op1
, mode
)) == 0)
2928 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2930 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2931 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2934 int num_negated
= 0;
2936 if (GET_CODE (op0
) == NOT
)
2937 num_negated
++, op0
= XEXP (op0
, 0);
2938 if (GET_CODE (op1
) == NOT
)
2939 num_negated
++, op1
= XEXP (op1
, 0);
2941 if (num_negated
== 2)
2942 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2943 else if (num_negated
== 1)
2944 return simplify_gen_unary (NOT
, mode
,
2945 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2949 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2950 correspond to a machine insn or result in further simplifications
2951 if B is a constant. */
2953 if (GET_CODE (op0
) == AND
2954 && rtx_equal_p (XEXP (op0
, 1), op1
)
2955 && ! side_effects_p (op1
))
2956 return simplify_gen_binary (AND
, mode
,
2957 simplify_gen_unary (NOT
, mode
,
2958 XEXP (op0
, 0), mode
),
2961 else if (GET_CODE (op0
) == AND
2962 && rtx_equal_p (XEXP (op0
, 0), op1
)
2963 && ! side_effects_p (op1
))
2964 return simplify_gen_binary (AND
, mode
,
2965 simplify_gen_unary (NOT
, mode
,
2966 XEXP (op0
, 1), mode
),
2969 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2970 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2971 out bits inverted twice and not set by C. Similarly, given
2972 (xor (and (xor A B) C) D), simplify without inverting C in
2973 the xor operand: (xor (and A C) (B&C)^D).
2975 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2976 && GET_CODE (XEXP (op0
, 0)) == XOR
2977 && CONST_INT_P (op1
)
2978 && CONST_INT_P (XEXP (op0
, 1))
2979 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2981 enum rtx_code op
= GET_CODE (op0
);
2982 rtx a
= XEXP (XEXP (op0
, 0), 0);
2983 rtx b
= XEXP (XEXP (op0
, 0), 1);
2984 rtx c
= XEXP (op0
, 1);
2986 HOST_WIDE_INT bval
= INTVAL (b
);
2987 HOST_WIDE_INT cval
= INTVAL (c
);
2988 HOST_WIDE_INT dval
= INTVAL (d
);
2989 HOST_WIDE_INT xcval
;
2996 return simplify_gen_binary (XOR
, mode
,
2997 simplify_gen_binary (op
, mode
, a
, c
),
2998 gen_int_mode ((bval
& xcval
) ^ dval
,
3002 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3003 we can transform like this:
3004 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3005 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3006 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3007 Attempt a few simplifications when B and C are both constants. */
3008 if (GET_CODE (op0
) == AND
3009 && CONST_INT_P (op1
)
3010 && CONST_INT_P (XEXP (op0
, 1)))
3012 rtx a
= XEXP (op0
, 0);
3013 rtx b
= XEXP (op0
, 1);
3015 HOST_WIDE_INT bval
= INTVAL (b
);
3016 HOST_WIDE_INT cval
= INTVAL (c
);
3018 /* Instead of computing ~A&C, we compute its negated value,
3019 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3020 optimize for sure. If it does not simplify, we still try
3021 to compute ~A&C below, but since that always allocates
3022 RTL, we don't try that before committing to returning a
3023 simplified expression. */
3024 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
3027 if ((~cval
& bval
) == 0)
3029 rtx na_c
= NULL_RTX
;
3031 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
3034 /* If ~A does not simplify, don't bother: we don't
3035 want to simplify 2 operations into 3, and if na_c
3036 were to simplify with na, n_na_c would have
3037 simplified as well. */
3038 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
3040 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
3043 /* Try to simplify ~A&C | ~B&C. */
3044 if (na_c
!= NULL_RTX
)
3045 return simplify_gen_binary (IOR
, mode
, na_c
,
3046 gen_int_mode (~bval
& cval
, mode
));
3050 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3051 if (n_na_c
== CONSTM1_RTX (mode
))
3053 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3054 gen_int_mode (~cval
& bval
,
3056 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3057 gen_int_mode (~bval
& cval
,
3063 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3064 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3065 machines, and also has shorter instruction path length. */
3066 if (GET_CODE (op0
) == AND
3067 && GET_CODE (XEXP (op0
, 0)) == XOR
3068 && CONST_INT_P (XEXP (op0
, 1))
3069 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3072 rtx b
= XEXP (XEXP (op0
, 0), 1);
3073 rtx c
= XEXP (op0
, 1);
3074 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3075 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3076 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3077 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3079 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3080 else if (GET_CODE (op0
) == AND
3081 && GET_CODE (XEXP (op0
, 0)) == XOR
3082 && CONST_INT_P (XEXP (op0
, 1))
3083 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3085 rtx a
= XEXP (XEXP (op0
, 0), 0);
3087 rtx c
= XEXP (op0
, 1);
3088 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3089 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3090 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3091 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3094 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3095 comparison if STORE_FLAG_VALUE is 1. */
3096 if (STORE_FLAG_VALUE
== 1
3097 && trueop1
== const1_rtx
3098 && COMPARISON_P (op0
)
3099 && (reversed
= reversed_comparison (op0
, mode
)))
3102 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3103 is (lt foo (const_int 0)), so we can perform the above
3104 simplification if STORE_FLAG_VALUE is 1. */
3106 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3107 && STORE_FLAG_VALUE
== 1
3108 && trueop1
== const1_rtx
3109 && GET_CODE (op0
) == LSHIFTRT
3110 && CONST_INT_P (XEXP (op0
, 1))
3111 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3112 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3114 /* (xor (comparison foo bar) (const_int sign-bit))
3115 when STORE_FLAG_VALUE is the sign bit. */
3116 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3117 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3118 && trueop1
== const_true_rtx
3119 && COMPARISON_P (op0
)
3120 && (reversed
= reversed_comparison (op0
, int_mode
)))
3123 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3127 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3133 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3135 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3137 if (HWI_COMPUTABLE_MODE_P (mode
))
3139 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3140 HOST_WIDE_INT nzop1
;
3141 if (CONST_INT_P (trueop1
))
3143 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3144 /* If we are turning off bits already known off in OP0, we need
3146 if ((nzop0
& ~val1
) == 0)
3149 nzop1
= nonzero_bits (trueop1
, mode
);
3150 /* If we are clearing all the nonzero bits, the result is zero. */
3151 if ((nzop1
& nzop0
) == 0
3152 && !side_effects_p (op0
) && !side_effects_p (op1
))
3153 return CONST0_RTX (mode
);
3155 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3156 && GET_MODE_CLASS (mode
) != MODE_CC
)
3159 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3160 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3161 && ! side_effects_p (op0
)
3162 && GET_MODE_CLASS (mode
) != MODE_CC
)
3163 return CONST0_RTX (mode
);
3165 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3166 there are no nonzero bits of C outside of X's mode. */
3167 if ((GET_CODE (op0
) == SIGN_EXTEND
3168 || GET_CODE (op0
) == ZERO_EXTEND
)
3169 && CONST_INT_P (trueop1
)
3170 && HWI_COMPUTABLE_MODE_P (mode
)
3171 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3172 & UINTVAL (trueop1
)) == 0)
3174 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3175 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3176 gen_int_mode (INTVAL (trueop1
),
3178 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3181 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3182 we might be able to further simplify the AND with X and potentially
3183 remove the truncation altogether. */
3184 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3186 rtx x
= XEXP (op0
, 0);
3187 machine_mode xmode
= GET_MODE (x
);
3188 tem
= simplify_gen_binary (AND
, xmode
, x
,
3189 gen_int_mode (INTVAL (trueop1
), xmode
));
3190 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3193 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3194 if (GET_CODE (op0
) == IOR
3195 && CONST_INT_P (trueop1
)
3196 && CONST_INT_P (XEXP (op0
, 1)))
3198 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3199 return simplify_gen_binary (IOR
, mode
,
3200 simplify_gen_binary (AND
, mode
,
3201 XEXP (op0
, 0), op1
),
3202 gen_int_mode (tmp
, mode
));
3205 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3206 insn (and may simplify more). */
3207 if (GET_CODE (op0
) == XOR
3208 && rtx_equal_p (XEXP (op0
, 0), op1
)
3209 && ! side_effects_p (op1
))
3210 return simplify_gen_binary (AND
, mode
,
3211 simplify_gen_unary (NOT
, mode
,
3212 XEXP (op0
, 1), mode
),
3215 if (GET_CODE (op0
) == XOR
3216 && rtx_equal_p (XEXP (op0
, 1), op1
)
3217 && ! side_effects_p (op1
))
3218 return simplify_gen_binary (AND
, mode
,
3219 simplify_gen_unary (NOT
, mode
,
3220 XEXP (op0
, 0), mode
),
3223 /* Similarly for (~(A ^ B)) & A. */
3224 if (GET_CODE (op0
) == NOT
3225 && GET_CODE (XEXP (op0
, 0)) == XOR
3226 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3227 && ! side_effects_p (op1
))
3228 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3230 if (GET_CODE (op0
) == NOT
3231 && GET_CODE (XEXP (op0
, 0)) == XOR
3232 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3233 && ! side_effects_p (op1
))
3234 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3236 /* Convert (A | B) & A to A. */
3237 if (GET_CODE (op0
) == IOR
3238 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3239 || rtx_equal_p (XEXP (op0
, 1), op1
))
3240 && ! side_effects_p (XEXP (op0
, 0))
3241 && ! side_effects_p (XEXP (op0
, 1)))
3244 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3245 ((A & N) + B) & M -> (A + B) & M
3246 Similarly if (N & M) == 0,
3247 ((A | N) + B) & M -> (A + B) & M
3248 and for - instead of + and/or ^ instead of |.
3249 Also, if (N & M) == 0, then
3250 (A +- N) & M -> A & M. */
3251 if (CONST_INT_P (trueop1
)
3252 && HWI_COMPUTABLE_MODE_P (mode
)
3253 && ~UINTVAL (trueop1
)
3254 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3255 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3260 pmop
[0] = XEXP (op0
, 0);
3261 pmop
[1] = XEXP (op0
, 1);
3263 if (CONST_INT_P (pmop
[1])
3264 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3265 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3267 for (which
= 0; which
< 2; which
++)
3270 switch (GET_CODE (tem
))
3273 if (CONST_INT_P (XEXP (tem
, 1))
3274 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3275 == UINTVAL (trueop1
))
3276 pmop
[which
] = XEXP (tem
, 0);
3280 if (CONST_INT_P (XEXP (tem
, 1))
3281 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3282 pmop
[which
] = XEXP (tem
, 0);
3289 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3291 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3293 return simplify_gen_binary (code
, mode
, tem
, op1
);
3297 /* (and X (ior (not X) Y) -> (and X Y) */
3298 if (GET_CODE (op1
) == IOR
3299 && GET_CODE (XEXP (op1
, 0)) == NOT
3300 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3301 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3303 /* (and (ior (not X) Y) X) -> (and X Y) */
3304 if (GET_CODE (op0
) == IOR
3305 && GET_CODE (XEXP (op0
, 0)) == NOT
3306 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3307 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3309 /* (and X (ior Y (not X)) -> (and X Y) */
3310 if (GET_CODE (op1
) == IOR
3311 && GET_CODE (XEXP (op1
, 1)) == NOT
3312 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3313 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3315 /* (and (ior Y (not X)) X) -> (and X Y) */
3316 if (GET_CODE (op0
) == IOR
3317 && GET_CODE (XEXP (op0
, 1)) == NOT
3318 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3319 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3321 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3325 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3331 /* 0/x is 0 (or x&0 if x has side-effects). */
3332 if (trueop0
== CONST0_RTX (mode
)
3333 && !cfun
->can_throw_non_call_exceptions
)
3335 if (side_effects_p (op1
))
3336 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3340 if (trueop1
== CONST1_RTX (mode
))
3342 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3346 /* Convert divide by power of two into shift. */
3347 if (CONST_INT_P (trueop1
)
3348 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3349 return simplify_gen_binary (LSHIFTRT
, mode
, op0
,
3350 gen_int_shift_amount (mode
, val
));
3354 /* Handle floating point and integers separately. */
3355 if (SCALAR_FLOAT_MODE_P (mode
))
3357 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3358 safe for modes with NaNs, since 0.0 / 0.0 will then be
3359 NaN rather than 0.0. Nor is it safe for modes with signed
3360 zeros, since dividing 0 by a negative number gives -0.0 */
3361 if (trueop0
== CONST0_RTX (mode
)
3362 && !HONOR_NANS (mode
)
3363 && !HONOR_SIGNED_ZEROS (mode
)
3364 && ! side_effects_p (op1
))
3367 if (trueop1
== CONST1_RTX (mode
)
3368 && !HONOR_SNANS (mode
))
3371 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3372 && trueop1
!= CONST0_RTX (mode
))
3374 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3377 if (real_equal (d1
, &dconstm1
)
3378 && !HONOR_SNANS (mode
))
3379 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3381 /* Change FP division by a constant into multiplication.
3382 Only do this with -freciprocal-math. */
3383 if (flag_reciprocal_math
3384 && !real_equal (d1
, &dconst0
))
3387 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3388 tem
= const_double_from_real_value (d
, mode
);
3389 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3393 else if (SCALAR_INT_MODE_P (mode
))
3395 /* 0/x is 0 (or x&0 if x has side-effects). */
3396 if (trueop0
== CONST0_RTX (mode
)
3397 && !cfun
->can_throw_non_call_exceptions
)
3399 if (side_effects_p (op1
))
3400 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3404 if (trueop1
== CONST1_RTX (mode
))
3406 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3411 if (trueop1
== constm1_rtx
)
3413 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3415 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3421 /* 0%x is 0 (or x&0 if x has side-effects). */
3422 if (trueop0
== CONST0_RTX (mode
))
3424 if (side_effects_p (op1
))
3425 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3428 /* x%1 is 0 (of x&0 if x has side-effects). */
3429 if (trueop1
== CONST1_RTX (mode
))
3431 if (side_effects_p (op0
))
3432 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3433 return CONST0_RTX (mode
);
3435 /* Implement modulus by power of two as AND. */
3436 if (CONST_INT_P (trueop1
)
3437 && exact_log2 (UINTVAL (trueop1
)) > 0)
3438 return simplify_gen_binary (AND
, mode
, op0
,
3439 gen_int_mode (UINTVAL (trueop1
) - 1,
3444 /* 0%x is 0 (or x&0 if x has side-effects). */
3445 if (trueop0
== CONST0_RTX (mode
))
3447 if (side_effects_p (op1
))
3448 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3451 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3452 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3454 if (side_effects_p (op0
))
3455 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3456 return CONST0_RTX (mode
);
3462 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3463 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3464 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3466 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3467 if (CONST_INT_P (trueop1
)
3468 && IN_RANGE (INTVAL (trueop1
),
3469 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
3470 GET_MODE_UNIT_PRECISION (mode
) - 1))
3472 int new_amount
= GET_MODE_UNIT_PRECISION (mode
) - INTVAL (trueop1
);
3473 rtx new_amount_rtx
= gen_int_shift_amount (mode
, new_amount
);
3474 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3475 mode
, op0
, new_amount_rtx
);
3480 if (trueop1
== CONST0_RTX (mode
))
3482 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3484 /* Rotating ~0 always results in ~0. */
3485 if (CONST_INT_P (trueop0
)
3486 && HWI_COMPUTABLE_MODE_P (mode
)
3487 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3488 && ! side_effects_p (op1
))
3494 scalar constants c1, c2
3495 size (M2) > size (M1)
3496 c1 == size (M2) - size (M1)
3498 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3502 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3504 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3505 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3507 && CONST_INT_P (op1
)
3508 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3509 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
3511 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3512 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
3513 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3514 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
3515 && subreg_lowpart_p (op0
))
3517 rtx tmp
= gen_int_shift_amount
3518 (inner_mode
, INTVAL (XEXP (SUBREG_REG (op0
), 1)) + INTVAL (op1
));
3519 tmp
= simplify_gen_binary (code
, inner_mode
,
3520 XEXP (SUBREG_REG (op0
), 0),
3522 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
3525 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3527 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
3528 if (val
!= INTVAL (op1
))
3529 return simplify_gen_binary (code
, mode
, op0
,
3530 gen_int_shift_amount (mode
, val
));
3537 if (trueop1
== CONST0_RTX (mode
))
3539 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3541 goto canonicalize_shift
;
3544 if (trueop1
== CONST0_RTX (mode
))
3546 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3548 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3549 if (GET_CODE (op0
) == CLZ
3550 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
3551 && CONST_INT_P (trueop1
)
3552 && STORE_FLAG_VALUE
== 1
3553 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
3555 unsigned HOST_WIDE_INT zero_val
= 0;
3557 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
3558 && zero_val
== GET_MODE_PRECISION (inner_mode
)
3559 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3560 return simplify_gen_relational (EQ
, mode
, inner_mode
,
3561 XEXP (op0
, 0), const0_rtx
);
3563 goto canonicalize_shift
;
3566 if (HWI_COMPUTABLE_MODE_P (mode
)
3567 && mode_signbit_p (mode
, trueop1
)
3568 && ! side_effects_p (op0
))
3570 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3572 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3578 if (HWI_COMPUTABLE_MODE_P (mode
)
3579 && CONST_INT_P (trueop1
)
3580 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3581 && ! side_effects_p (op0
))
3583 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3585 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3591 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3593 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3595 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3601 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3603 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3605 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3618 /* ??? There are simplifications that can be done. */
3622 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
3623 return gen_vec_duplicate (mode
, op0
);
3624 if (valid_for_const_vector_p (mode
, op0
)
3625 && valid_for_const_vector_p (mode
, op1
))
3626 return gen_const_vec_series (mode
, op0
, op1
);
3630 if (!VECTOR_MODE_P (mode
))
3632 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3633 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3634 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3635 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3637 /* We can't reason about selections made at runtime. */
3638 if (!CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3641 if (vec_duplicate_p (trueop0
, &elt0
))
3644 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3645 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3648 /* Extract a scalar element from a nested VEC_SELECT expression
3649 (with optional nested VEC_CONCAT expression). Some targets
3650 (i386) extract scalar element from a vector using chain of
3651 nested VEC_SELECT expressions. When input operand is a memory
3652 operand, this operation can be simplified to a simple scalar
3653 load from an offseted memory address. */
3655 if (GET_CODE (trueop0
) == VEC_SELECT
3656 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3657 .is_constant (&n_elts
)))
3659 rtx op0
= XEXP (trueop0
, 0);
3660 rtx op1
= XEXP (trueop0
, 1);
3662 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3668 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3669 gcc_assert (i
< n_elts
);
3671 /* Select element, pointed by nested selector. */
3672 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3674 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3675 if (GET_CODE (op0
) == VEC_CONCAT
)
3677 rtx op00
= XEXP (op0
, 0);
3678 rtx op01
= XEXP (op0
, 1);
3680 machine_mode mode00
, mode01
;
3681 int n_elts00
, n_elts01
;
3683 mode00
= GET_MODE (op00
);
3684 mode01
= GET_MODE (op01
);
3686 /* Find out the number of elements of each operand.
3687 Since the concatenated result has a constant number
3688 of elements, the operands must too. */
3689 n_elts00
= GET_MODE_NUNITS (mode00
).to_constant ();
3690 n_elts01
= GET_MODE_NUNITS (mode01
).to_constant ();
3692 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3694 /* Select correct operand of VEC_CONCAT
3695 and adjust selector. */
3696 if (elem
< n_elts01
)
3707 vec
= rtvec_alloc (1);
3708 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3710 tmp
= gen_rtx_fmt_ee (code
, mode
,
3711 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3717 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3718 gcc_assert (GET_MODE_INNER (mode
)
3719 == GET_MODE_INNER (GET_MODE (trueop0
)));
3720 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3722 if (vec_duplicate_p (trueop0
, &elt0
))
3723 /* It doesn't matter which elements are selected by trueop1,
3724 because they are all the same. */
3725 return gen_vec_duplicate (mode
, elt0
);
3727 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3729 unsigned n_elts
= XVECLEN (trueop1
, 0);
3730 rtvec v
= rtvec_alloc (n_elts
);
3733 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
3734 for (i
= 0; i
< n_elts
; i
++)
3736 rtx x
= XVECEXP (trueop1
, 0, i
);
3738 if (!CONST_INT_P (x
))
3741 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3745 return gen_rtx_CONST_VECTOR (mode
, v
);
3748 /* Recognize the identity. */
3749 if (GET_MODE (trueop0
) == mode
)
3751 bool maybe_ident
= true;
3752 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3754 rtx j
= XVECEXP (trueop1
, 0, i
);
3755 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3757 maybe_ident
= false;
3765 /* If we build {a,b} then permute it, build the result directly. */
3766 if (XVECLEN (trueop1
, 0) == 2
3767 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3768 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3769 && GET_CODE (trueop0
) == VEC_CONCAT
3770 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3771 && GET_MODE (XEXP (trueop0
, 0)) == mode
3772 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3773 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3775 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3776 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3779 gcc_assert (i0
< 4 && i1
< 4);
3780 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3781 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3783 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3786 if (XVECLEN (trueop1
, 0) == 2
3787 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3788 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3789 && GET_CODE (trueop0
) == VEC_CONCAT
3790 && GET_MODE (trueop0
) == mode
)
3792 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3793 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3796 gcc_assert (i0
< 2 && i1
< 2);
3797 subop0
= XEXP (trueop0
, i0
);
3798 subop1
= XEXP (trueop0
, i1
);
3800 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3803 /* If we select one half of a vec_concat, return that. */
3805 if (GET_CODE (trueop0
) == VEC_CONCAT
3806 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3808 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 1)))
3810 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3812 rtx subop0
= XEXP (trueop0
, 0);
3813 rtx subop1
= XEXP (trueop0
, 1);
3814 machine_mode mode0
= GET_MODE (subop0
);
3815 machine_mode mode1
= GET_MODE (subop1
);
3816 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3817 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3819 bool success
= true;
3820 for (int i
= 1; i
< l0
; ++i
)
3822 rtx j
= XVECEXP (trueop1
, 0, i
);
3823 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3832 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3834 bool success
= true;
3835 for (int i
= 1; i
< l1
; ++i
)
3837 rtx j
= XVECEXP (trueop1
, 0, i
);
3838 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3850 if (XVECLEN (trueop1
, 0) == 1
3851 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3852 && GET_CODE (trueop0
) == VEC_CONCAT
)
3855 offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3857 /* Try to find the element in the VEC_CONCAT. */
3858 while (GET_MODE (vec
) != mode
3859 && GET_CODE (vec
) == VEC_CONCAT
)
3861 poly_int64 vec_size
;
3863 if (CONST_INT_P (XEXP (vec
, 0)))
3865 /* vec_concat of two const_ints doesn't make sense with
3866 respect to modes. */
3867 if (CONST_INT_P (XEXP (vec
, 1)))
3870 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3871 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3874 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3876 if (known_lt (offset
, vec_size
))
3877 vec
= XEXP (vec
, 0);
3878 else if (known_ge (offset
, vec_size
))
3881 vec
= XEXP (vec
, 1);
3885 vec
= avoid_constant_pool_reference (vec
);
3888 if (GET_MODE (vec
) == mode
)
3892 /* If we select elements in a vec_merge that all come from the same
3893 operand, select from that operand directly. */
3894 if (GET_CODE (op0
) == VEC_MERGE
)
3896 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3897 if (CONST_INT_P (trueop02
))
3899 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3900 bool all_operand0
= true;
3901 bool all_operand1
= true;
3902 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3904 rtx j
= XVECEXP (trueop1
, 0, i
);
3905 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
3906 all_operand1
= false;
3908 all_operand0
= false;
3910 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3911 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3912 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3913 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3917 /* If we have two nested selects that are inverses of each
3918 other, replace them with the source operand. */
3919 if (GET_CODE (trueop0
) == VEC_SELECT
3920 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3922 rtx op0_subop1
= XEXP (trueop0
, 1);
3923 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3924 gcc_assert (known_eq (XVECLEN (trueop1
, 0), GET_MODE_NUNITS (mode
)));
3926 /* Apply the outer ordering vector to the inner one. (The inner
3927 ordering vector is expressly permitted to be of a different
3928 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3929 then the two VEC_SELECTs cancel. */
3930 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3932 rtx x
= XVECEXP (trueop1
, 0, i
);
3933 if (!CONST_INT_P (x
))
3935 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3936 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3939 return XEXP (trueop0
, 0);
3945 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3946 ? GET_MODE (trueop0
)
3947 : GET_MODE_INNER (mode
));
3948 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3949 ? GET_MODE (trueop1
)
3950 : GET_MODE_INNER (mode
));
3952 gcc_assert (VECTOR_MODE_P (mode
));
3953 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode
)
3954 + GET_MODE_SIZE (op1_mode
),
3955 GET_MODE_SIZE (mode
)));
3957 if (VECTOR_MODE_P (op0_mode
))
3958 gcc_assert (GET_MODE_INNER (mode
)
3959 == GET_MODE_INNER (op0_mode
));
3961 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3963 if (VECTOR_MODE_P (op1_mode
))
3964 gcc_assert (GET_MODE_INNER (mode
)
3965 == GET_MODE_INNER (op1_mode
));
3967 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3969 unsigned int n_elts
, in_n_elts
;
3970 if ((GET_CODE (trueop0
) == CONST_VECTOR
3971 || CONST_SCALAR_INT_P (trueop0
)
3972 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3973 && (GET_CODE (trueop1
) == CONST_VECTOR
3974 || CONST_SCALAR_INT_P (trueop1
)
3975 || CONST_DOUBLE_AS_FLOAT_P (trueop1
))
3976 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
)
3977 && GET_MODE_NUNITS (op0_mode
).is_constant (&in_n_elts
))
3979 rtvec v
= rtvec_alloc (n_elts
);
3981 for (i
= 0; i
< n_elts
; i
++)
3985 if (!VECTOR_MODE_P (op0_mode
))
3986 RTVEC_ELT (v
, i
) = trueop0
;
3988 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3992 if (!VECTOR_MODE_P (op1_mode
))
3993 RTVEC_ELT (v
, i
) = trueop1
;
3995 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
4000 return gen_rtx_CONST_VECTOR (mode
, v
);
4003 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4004 Restrict the transformation to avoid generating a VEC_SELECT with a
4005 mode unrelated to its operand. */
4006 if (GET_CODE (trueop0
) == VEC_SELECT
4007 && GET_CODE (trueop1
) == VEC_SELECT
4008 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
4009 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4011 rtx par0
= XEXP (trueop0
, 1);
4012 rtx par1
= XEXP (trueop1
, 1);
4013 int len0
= XVECLEN (par0
, 0);
4014 int len1
= XVECLEN (par1
, 0);
4015 rtvec vec
= rtvec_alloc (len0
+ len1
);
4016 for (int i
= 0; i
< len0
; i
++)
4017 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
4018 for (int i
= 0; i
< len1
; i
++)
4019 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
4020 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
4021 gen_rtx_PARALLEL (VOIDmode
, vec
));
4030 if (mode
== GET_MODE (op0
)
4031 && mode
== GET_MODE (op1
)
4032 && vec_duplicate_p (op0
, &elt0
)
4033 && vec_duplicate_p (op1
, &elt1
))
4035 /* Try applying the operator to ELT and see if that simplifies.
4036 We can duplicate the result if so.
4038 The reason we don't use simplify_gen_binary is that it isn't
4039 necessarily a win to convert things like:
4041 (plus:V (vec_duplicate:V (reg:S R1))
4042 (vec_duplicate:V (reg:S R2)))
4046 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4048 The first might be done entirely in vector registers while the
4049 second might need a move between register files. */
4050 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4053 return gen_vec_duplicate (mode
, tem
);
4059 /* Return true if binary operation OP distributes over addition in operand
4060 OPNO, with the other operand being held constant. OPNO counts from 1. */
4063 distributes_over_addition_p (rtx_code op
, int opno
)
4081 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
4084 if (VECTOR_MODE_P (mode
)
4085 && code
!= VEC_CONCAT
4086 && GET_CODE (op0
) == CONST_VECTOR
4087 && GET_CODE (op1
) == CONST_VECTOR
)
4090 if (CONST_VECTOR_STEPPED_P (op0
)
4091 && CONST_VECTOR_STEPPED_P (op1
))
4092 /* We can operate directly on the encoding if:
4094 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4096 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4098 Addition and subtraction are the supported operators
4099 for which this is true. */
4100 step_ok_p
= (code
== PLUS
|| code
== MINUS
);
4101 else if (CONST_VECTOR_STEPPED_P (op0
))
4102 /* We can operate directly on stepped encodings if:
4106 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4108 which is true if (x -> x op c) distributes over addition. */
4109 step_ok_p
= distributes_over_addition_p (code
, 1);
4111 /* Similarly in reverse. */
4112 step_ok_p
= distributes_over_addition_p (code
, 2);
4113 rtx_vector_builder builder
;
4114 if (!builder
.new_binary_operation (mode
, op0
, op1
, step_ok_p
))
4117 unsigned int count
= builder
.encoded_nelts ();
4118 for (unsigned int i
= 0; i
< count
; i
++)
4120 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4121 CONST_VECTOR_ELT (op0
, i
),
4122 CONST_VECTOR_ELT (op1
, i
));
4123 if (!x
|| !valid_for_const_vector_p (mode
, x
))
4125 builder
.quick_push (x
);
4127 return builder
.build ();
4130 if (VECTOR_MODE_P (mode
)
4131 && code
== VEC_CONCAT
4132 && (CONST_SCALAR_INT_P (op0
)
4133 || CONST_FIXED_P (op0
)
4134 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4135 && (CONST_SCALAR_INT_P (op1
)
4136 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4137 || CONST_FIXED_P (op1
)))
4139 /* Both inputs have a constant number of elements, so the result
4141 unsigned n_elts
= GET_MODE_NUNITS (mode
).to_constant ();
4142 rtvec v
= rtvec_alloc (n_elts
);
4144 gcc_assert (n_elts
>= 2);
4147 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4148 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4150 RTVEC_ELT (v
, 0) = op0
;
4151 RTVEC_ELT (v
, 1) = op1
;
4155 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
)).to_constant ();
4156 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
)).to_constant ();
4159 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4160 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4161 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4163 for (i
= 0; i
< op0_n_elts
; ++i
)
4164 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op0
, i
);
4165 for (i
= 0; i
< op1_n_elts
; ++i
)
4166 RTVEC_ELT (v
, op0_n_elts
+i
) = CONST_VECTOR_ELT (op1
, i
);
4169 return gen_rtx_CONST_VECTOR (mode
, v
);
4172 if (SCALAR_FLOAT_MODE_P (mode
)
4173 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4174 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4175 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4186 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4188 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4190 for (i
= 0; i
< 4; i
++)
4207 real_from_target (&r
, tmp0
, mode
);
4208 return const_double_from_real_value (r
, mode
);
4212 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4213 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4216 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4217 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4219 if (HONOR_SNANS (mode
)
4220 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4221 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4224 real_convert (&f0
, mode
, opr0
);
4225 real_convert (&f1
, mode
, opr1
);
4228 && real_equal (&f1
, &dconst0
)
4229 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4232 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4233 && flag_trapping_math
4234 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4236 int s0
= REAL_VALUE_NEGATIVE (f0
);
4237 int s1
= REAL_VALUE_NEGATIVE (f1
);
4242 /* Inf + -Inf = NaN plus exception. */
4247 /* Inf - Inf = NaN plus exception. */
4252 /* Inf / Inf = NaN plus exception. */
4259 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4260 && flag_trapping_math
4261 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4262 || (REAL_VALUE_ISINF (f1
)
4263 && real_equal (&f0
, &dconst0
))))
4264 /* Inf * 0 = NaN plus exception. */
4267 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4269 real_convert (&result
, mode
, &value
);
4271 /* Don't constant fold this floating point operation if
4272 the result has overflowed and flag_trapping_math. */
4274 if (flag_trapping_math
4275 && MODE_HAS_INFINITIES (mode
)
4276 && REAL_VALUE_ISINF (result
)
4277 && !REAL_VALUE_ISINF (f0
)
4278 && !REAL_VALUE_ISINF (f1
))
4279 /* Overflow plus exception. */
4282 /* Don't constant fold this floating point operation if the
4283 result may dependent upon the run-time rounding mode and
4284 flag_rounding_math is set, or if GCC's software emulation
4285 is unable to accurately represent the result. */
4287 if ((flag_rounding_math
4288 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4289 && (inexact
|| !real_identical (&result
, &value
)))
4292 return const_double_from_real_value (result
, mode
);
4296 /* We can fold some multi-word operations. */
4297 scalar_int_mode int_mode
;
4298 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4299 && CONST_SCALAR_INT_P (op0
)
4300 && CONST_SCALAR_INT_P (op1
))
4303 wi::overflow_type overflow
;
4304 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4305 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4307 #if TARGET_SUPPORTS_WIDE_INT == 0
4308 /* This assert keeps the simplification from producing a result
4309 that cannot be represented in a CONST_DOUBLE but a lot of
4310 upstream callers expect that this function never fails to
4311 simplify something and so you if you added this to the test
4312 above the code would die later anyway. If this assert
4313 happens, you just need to make the port support wide int. */
4314 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4319 result
= wi::sub (pop0
, pop1
);
4323 result
= wi::add (pop0
, pop1
);
4327 result
= wi::mul (pop0
, pop1
);
4331 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4337 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4343 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4349 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4355 result
= wi::bit_and (pop0
, pop1
);
4359 result
= wi::bit_or (pop0
, pop1
);
4363 result
= wi::bit_xor (pop0
, pop1
);
4367 result
= wi::smin (pop0
, pop1
);
4371 result
= wi::smax (pop0
, pop1
);
4375 result
= wi::umin (pop0
, pop1
);
4379 result
= wi::umax (pop0
, pop1
);
4386 wide_int wop1
= pop1
;
4387 if (SHIFT_COUNT_TRUNCATED
)
4388 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
4389 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
4395 result
= wi::lrshift (pop0
, wop1
);
4399 result
= wi::arshift (pop0
, wop1
);
4403 result
= wi::lshift (pop0
, wop1
);
4414 if (wi::neg_p (pop1
))
4420 result
= wi::lrotate (pop0
, pop1
);
4424 result
= wi::rrotate (pop0
, pop1
);
4435 return immed_wide_int_const (result
, int_mode
);
4438 /* Handle polynomial integers. */
4439 if (NUM_POLY_INT_COEFFS
> 1
4440 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4441 && poly_int_rtx_p (op0
)
4442 && poly_int_rtx_p (op1
))
4444 poly_wide_int result
;
4448 result
= wi::to_poly_wide (op0
, mode
) + wi::to_poly_wide (op1
, mode
);
4452 result
= wi::to_poly_wide (op0
, mode
) - wi::to_poly_wide (op1
, mode
);
4456 if (CONST_SCALAR_INT_P (op1
))
4457 result
= wi::to_poly_wide (op0
, mode
) * rtx_mode_t (op1
, mode
);
4463 if (CONST_SCALAR_INT_P (op1
))
4465 wide_int shift
= rtx_mode_t (op1
, mode
);
4466 if (SHIFT_COUNT_TRUNCATED
)
4467 shift
= wi::umod_trunc (shift
, GET_MODE_PRECISION (int_mode
));
4468 else if (wi::geu_p (shift
, GET_MODE_PRECISION (int_mode
)))
4470 result
= wi::to_poly_wide (op0
, mode
) << shift
;
4477 if (!CONST_SCALAR_INT_P (op1
)
4478 || !can_ior_p (wi::to_poly_wide (op0
, mode
),
4479 rtx_mode_t (op1
, mode
), &result
))
4486 return immed_wide_int_const (result
, int_mode
);
4494 /* Return a positive integer if X should sort after Y. The value
4495 returned is 1 if and only if X and Y are both regs. */
4498 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4502 result
= (commutative_operand_precedence (y
)
4503 - commutative_operand_precedence (x
));
4505 return result
+ result
;
4507 /* Group together equal REGs to do more simplification. */
4508 if (REG_P (x
) && REG_P (y
))
4509 return REGNO (x
) > REGNO (y
);
4514 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4515 operands may be another PLUS or MINUS.
4517 Rather than test for specific case, we do this by a brute-force method
4518 and do all possible simplifications until no more changes occur. Then
4519 we rebuild the operation.
4521 May return NULL_RTX when no changes were made. */
4524 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4527 struct simplify_plus_minus_op_data
4534 int changed
, n_constants
, canonicalized
= 0;
4537 memset (ops
, 0, sizeof ops
);
4539 /* Set up the two operands and then expand them until nothing has been
4540 changed. If we run out of room in our array, give up; this should
4541 almost never happen. */
4546 ops
[1].neg
= (code
== MINUS
);
4553 for (i
= 0; i
< n_ops
; i
++)
4555 rtx this_op
= ops
[i
].op
;
4556 int this_neg
= ops
[i
].neg
;
4557 enum rtx_code this_code
= GET_CODE (this_op
);
4563 if (n_ops
== ARRAY_SIZE (ops
))
4566 ops
[n_ops
].op
= XEXP (this_op
, 1);
4567 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4570 ops
[i
].op
= XEXP (this_op
, 0);
4572 /* If this operand was negated then we will potentially
4573 canonicalize the expression. Similarly if we don't
4574 place the operands adjacent we're re-ordering the
4575 expression and thus might be performing a
4576 canonicalization. Ignore register re-ordering.
4577 ??? It might be better to shuffle the ops array here,
4578 but then (plus (plus (A, B), plus (C, D))) wouldn't
4579 be seen as non-canonical. */
4582 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4587 ops
[i
].op
= XEXP (this_op
, 0);
4588 ops
[i
].neg
= ! this_neg
;
4594 if (n_ops
!= ARRAY_SIZE (ops
)
4595 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4596 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4597 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4599 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4600 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4601 ops
[n_ops
].neg
= this_neg
;
4609 /* ~a -> (-a - 1) */
4610 if (n_ops
!= ARRAY_SIZE (ops
))
4612 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4613 ops
[n_ops
++].neg
= this_neg
;
4614 ops
[i
].op
= XEXP (this_op
, 0);
4615 ops
[i
].neg
= !this_neg
;
4625 ops
[i
].op
= neg_const_int (mode
, this_op
);
4639 if (n_constants
> 1)
4642 gcc_assert (n_ops
>= 2);
4644 /* If we only have two operands, we can avoid the loops. */
4647 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4650 /* Get the two operands. Be careful with the order, especially for
4651 the cases where code == MINUS. */
4652 if (ops
[0].neg
&& ops
[1].neg
)
4654 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4657 else if (ops
[0].neg
)
4668 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4671 /* Now simplify each pair of operands until nothing changes. */
4674 /* Insertion sort is good enough for a small array. */
4675 for (i
= 1; i
< n_ops
; i
++)
4677 struct simplify_plus_minus_op_data save
;
4681 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4684 /* Just swapping registers doesn't count as canonicalization. */
4690 ops
[j
+ 1] = ops
[j
];
4692 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4697 for (i
= n_ops
- 1; i
> 0; i
--)
4698 for (j
= i
- 1; j
>= 0; j
--)
4700 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4701 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4703 if (lhs
!= 0 && rhs
!= 0)
4705 enum rtx_code ncode
= PLUS
;
4711 std::swap (lhs
, rhs
);
4713 else if (swap_commutative_operands_p (lhs
, rhs
))
4714 std::swap (lhs
, rhs
);
4716 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4717 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4719 rtx tem_lhs
, tem_rhs
;
4721 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4722 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4723 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4726 if (tem
&& !CONSTANT_P (tem
))
4727 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4730 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4734 /* Reject "simplifications" that just wrap the two
4735 arguments in a CONST. Failure to do so can result
4736 in infinite recursion with simplify_binary_operation
4737 when it calls us to simplify CONST operations.
4738 Also, if we find such a simplification, don't try
4739 any more combinations with this rhs: We must have
4740 something like symbol+offset, ie. one of the
4741 trivial CONST expressions we handle later. */
4742 if (GET_CODE (tem
) == CONST
4743 && GET_CODE (XEXP (tem
, 0)) == ncode
4744 && XEXP (XEXP (tem
, 0), 0) == lhs
4745 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4748 if (GET_CODE (tem
) == NEG
)
4749 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4750 if (CONST_INT_P (tem
) && lneg
)
4751 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4755 ops
[j
].op
= NULL_RTX
;
4765 /* Pack all the operands to the lower-numbered entries. */
4766 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4775 /* If nothing changed, check that rematerialization of rtl instructions
4776 is still required. */
4779 /* Perform rematerialization if only all operands are registers and
4780 all operations are PLUS. */
4781 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4782 around rs6000 and how it uses the CA register. See PR67145. */
4783 for (i
= 0; i
< n_ops
; i
++)
4785 || !REG_P (ops
[i
].op
)
4786 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4787 && fixed_regs
[REGNO (ops
[i
].op
)]
4788 && !global_regs
[REGNO (ops
[i
].op
)]
4789 && ops
[i
].op
!= frame_pointer_rtx
4790 && ops
[i
].op
!= arg_pointer_rtx
4791 && ops
[i
].op
!= stack_pointer_rtx
))
4796 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4798 && CONST_INT_P (ops
[1].op
)
4799 && CONSTANT_P (ops
[0].op
)
4801 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4803 /* We suppressed creation of trivial CONST expressions in the
4804 combination loop to avoid recursion. Create one manually now.
4805 The combination loop should have ensured that there is exactly
4806 one CONST_INT, and the sort will have ensured that it is last
4807 in the array and that any other constant will be next-to-last. */
4810 && CONST_INT_P (ops
[n_ops
- 1].op
)
4811 && CONSTANT_P (ops
[n_ops
- 2].op
))
4813 rtx value
= ops
[n_ops
- 1].op
;
4814 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4815 value
= neg_const_int (mode
, value
);
4816 if (CONST_INT_P (value
))
4818 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4824 /* Put a non-negated operand first, if possible. */
4826 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4829 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4838 /* Now make the result by performing the requested operations. */
4841 for (i
= 1; i
< n_ops
; i
++)
4842 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4843 mode
, result
, ops
[i
].op
);
4848 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4850 plus_minus_operand_p (const_rtx x
)
4852 return GET_CODE (x
) == PLUS
4853 || GET_CODE (x
) == MINUS
4854 || (GET_CODE (x
) == CONST
4855 && GET_CODE (XEXP (x
, 0)) == PLUS
4856 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4857 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4860 /* Like simplify_binary_operation except used for relational operators.
4861 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4862 not also be VOIDmode.
4864 CMP_MODE specifies in which mode the comparison is done in, so it is
4865 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4866 the operands or, if both are VOIDmode, the operands are compared in
4867 "infinite precision". */
4869 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4870 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4872 rtx tem
, trueop0
, trueop1
;
4874 if (cmp_mode
== VOIDmode
)
4875 cmp_mode
= GET_MODE (op0
);
4876 if (cmp_mode
== VOIDmode
)
4877 cmp_mode
= GET_MODE (op1
);
4879 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4882 if (SCALAR_FLOAT_MODE_P (mode
))
4884 if (tem
== const0_rtx
)
4885 return CONST0_RTX (mode
);
4886 #ifdef FLOAT_STORE_FLAG_VALUE
4888 REAL_VALUE_TYPE val
;
4889 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4890 return const_double_from_real_value (val
, mode
);
4896 if (VECTOR_MODE_P (mode
))
4898 if (tem
== const0_rtx
)
4899 return CONST0_RTX (mode
);
4900 #ifdef VECTOR_STORE_FLAG_VALUE
4902 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4903 if (val
== NULL_RTX
)
4905 if (val
== const1_rtx
)
4906 return CONST1_RTX (mode
);
4908 return gen_const_vec_duplicate (mode
, val
);
4918 /* For the following tests, ensure const0_rtx is op1. */
4919 if (swap_commutative_operands_p (op0
, op1
)
4920 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4921 std::swap (op0
, op1
), code
= swap_condition (code
);
4923 /* If op0 is a compare, extract the comparison arguments from it. */
4924 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4925 return simplify_gen_relational (code
, mode
, VOIDmode
,
4926 XEXP (op0
, 0), XEXP (op0
, 1));
4928 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4932 trueop0
= avoid_constant_pool_reference (op0
);
4933 trueop1
= avoid_constant_pool_reference (op1
);
4934 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4938 /* This part of simplify_relational_operation is only used when CMP_MODE
4939 is not in class MODE_CC (i.e. it is a real comparison).
4941 MODE is the mode of the result, while CMP_MODE specifies in which
4942 mode the comparison is done in, so it is the mode of the operands. */
4945 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4946 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4948 enum rtx_code op0code
= GET_CODE (op0
);
4950 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4952 /* If op0 is a comparison, extract the comparison arguments
4956 if (GET_MODE (op0
) == mode
)
4957 return simplify_rtx (op0
);
4959 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4960 XEXP (op0
, 0), XEXP (op0
, 1));
4962 else if (code
== EQ
)
4964 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
4965 if (new_code
!= UNKNOWN
)
4966 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4967 XEXP (op0
, 0), XEXP (op0
, 1));
4971 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4972 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4973 if ((code
== LTU
|| code
== GEU
)
4974 && GET_CODE (op0
) == PLUS
4975 && CONST_INT_P (XEXP (op0
, 1))
4976 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4977 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4978 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4979 && XEXP (op0
, 1) != const0_rtx
)
4982 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4983 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4984 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4987 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4988 transformed into (LTU a -C). */
4989 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
4990 && CONST_INT_P (XEXP (op0
, 1))
4991 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
4992 && XEXP (op0
, 1) != const0_rtx
)
4995 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4996 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
4997 XEXP (op0
, 0), new_cmp
);
5000 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5001 if ((code
== LTU
|| code
== GEU
)
5002 && GET_CODE (op0
) == PLUS
5003 && rtx_equal_p (op1
, XEXP (op0
, 1))
5004 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5005 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
5006 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
5007 copy_rtx (XEXP (op0
, 0)));
5009 if (op1
== const0_rtx
)
5011 /* Canonicalize (GTU x 0) as (NE x 0). */
5013 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
5014 /* Canonicalize (LEU x 0) as (EQ x 0). */
5016 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
5018 else if (op1
== const1_rtx
)
5023 /* Canonicalize (GE x 1) as (GT x 0). */
5024 return simplify_gen_relational (GT
, mode
, cmp_mode
,
5027 /* Canonicalize (GEU x 1) as (NE x 0). */
5028 return simplify_gen_relational (NE
, mode
, cmp_mode
,
5031 /* Canonicalize (LT x 1) as (LE x 0). */
5032 return simplify_gen_relational (LE
, mode
, cmp_mode
,
5035 /* Canonicalize (LTU x 1) as (EQ x 0). */
5036 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
5042 else if (op1
== constm1_rtx
)
5044 /* Canonicalize (LE x -1) as (LT x 0). */
5046 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
5047 /* Canonicalize (GT x -1) as (GE x 0). */
5049 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
5052 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5053 if ((code
== EQ
|| code
== NE
)
5054 && (op0code
== PLUS
|| op0code
== MINUS
)
5056 && CONSTANT_P (XEXP (op0
, 1))
5057 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
5059 rtx x
= XEXP (op0
, 0);
5060 rtx c
= XEXP (op0
, 1);
5061 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
5062 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
5064 /* Detect an infinite recursive condition, where we oscillate at this
5065 simplification case between:
5066 A + B == C <---> C - B == A,
5067 where A, B, and C are all constants with non-simplifiable expressions,
5068 usually SYMBOL_REFs. */
5069 if (GET_CODE (tem
) == invcode
5071 && rtx_equal_p (c
, XEXP (tem
, 1)))
5074 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
5077 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5078 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5079 scalar_int_mode int_mode
, int_cmp_mode
;
5081 && op1
== const0_rtx
5082 && is_int_mode (mode
, &int_mode
)
5083 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
5084 /* ??? Work-around BImode bugs in the ia64 backend. */
5085 && int_mode
!= BImode
5086 && int_cmp_mode
!= BImode
5087 && nonzero_bits (op0
, int_cmp_mode
) == 1
5088 && STORE_FLAG_VALUE
== 1)
5089 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
5090 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
5091 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
5093 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5094 if ((code
== EQ
|| code
== NE
)
5095 && op1
== const0_rtx
5097 return simplify_gen_relational (code
, mode
, cmp_mode
,
5098 XEXP (op0
, 0), XEXP (op0
, 1));
5100 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5101 if ((code
== EQ
|| code
== NE
)
5103 && rtx_equal_p (XEXP (op0
, 0), op1
)
5104 && !side_effects_p (XEXP (op0
, 0)))
5105 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
5108 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5109 if ((code
== EQ
|| code
== NE
)
5111 && rtx_equal_p (XEXP (op0
, 1), op1
)
5112 && !side_effects_p (XEXP (op0
, 1)))
5113 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5116 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5117 if ((code
== EQ
|| code
== NE
)
5119 && CONST_SCALAR_INT_P (op1
)
5120 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
5121 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5122 simplify_gen_binary (XOR
, cmp_mode
,
5123 XEXP (op0
, 1), op1
));
5125 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5126 constant folding if x/y is a constant. */
5127 if ((code
== EQ
|| code
== NE
)
5128 && (op0code
== AND
|| op0code
== IOR
)
5129 && !side_effects_p (op1
)
5130 && op1
!= CONST0_RTX (cmp_mode
))
5132 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5133 (eq/ne (and (not y) x) 0). */
5134 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 0), op1
))
5135 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 1), op1
)))
5137 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1),
5139 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
5141 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5142 CONST0_RTX (cmp_mode
));
5145 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5146 (eq/ne (and (not x) y) 0). */
5147 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 1), op1
))
5148 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 0), op1
)))
5150 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0),
5152 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
5154 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5155 CONST0_RTX (cmp_mode
));
5159 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5160 if ((code
== EQ
|| code
== NE
)
5161 && GET_CODE (op0
) == BSWAP
5162 && CONST_SCALAR_INT_P (op1
))
5163 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5164 simplify_gen_unary (BSWAP
, cmp_mode
,
5167 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5168 if ((code
== EQ
|| code
== NE
)
5169 && GET_CODE (op0
) == BSWAP
5170 && GET_CODE (op1
) == BSWAP
)
5171 return simplify_gen_relational (code
, mode
, cmp_mode
,
5172 XEXP (op0
, 0), XEXP (op1
, 0));
5174 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
5180 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5181 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
5182 XEXP (op0
, 0), const0_rtx
);
5187 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5188 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
5189 XEXP (op0
, 0), const0_rtx
);
5208 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5209 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5210 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5211 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5212 For floating-point comparisons, assume that the operands were ordered. */
5215 comparison_result (enum rtx_code code
, int known_results
)
5221 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
5224 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
5228 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
5231 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
5235 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
5238 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5241 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5243 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5246 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5248 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5251 return const_true_rtx
;
5259 /* Check if the given comparison (done in the given MODE) is actually
5260 a tautology or a contradiction. If the mode is VOID_mode, the
5261 comparison is done in "infinite precision". If no simplification
5262 is possible, this function returns zero. Otherwise, it returns
5263 either const_true_rtx or const0_rtx. */
5266 simplify_const_relational_operation (enum rtx_code code
,
5274 gcc_assert (mode
!= VOIDmode
5275 || (GET_MODE (op0
) == VOIDmode
5276 && GET_MODE (op1
) == VOIDmode
));
5278 /* If op0 is a compare, extract the comparison arguments from it. */
5279 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5281 op1
= XEXP (op0
, 1);
5282 op0
= XEXP (op0
, 0);
5284 if (GET_MODE (op0
) != VOIDmode
)
5285 mode
= GET_MODE (op0
);
5286 else if (GET_MODE (op1
) != VOIDmode
)
5287 mode
= GET_MODE (op1
);
5292 /* We can't simplify MODE_CC values since we don't know what the
5293 actual comparison is. */
5294 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5297 /* Make sure the constant is second. */
5298 if (swap_commutative_operands_p (op0
, op1
))
5300 std::swap (op0
, op1
);
5301 code
= swap_condition (code
);
5304 trueop0
= avoid_constant_pool_reference (op0
);
5305 trueop1
= avoid_constant_pool_reference (op1
);
5307 /* For integer comparisons of A and B maybe we can simplify A - B and can
5308 then simplify a comparison of that with zero. If A and B are both either
5309 a register or a CONST_INT, this can't help; testing for these cases will
5310 prevent infinite recursion here and speed things up.
5312 We can only do this for EQ and NE comparisons as otherwise we may
5313 lose or introduce overflow which we cannot disregard as undefined as
5314 we do not know the signedness of the operation on either the left or
5315 the right hand side of the comparison. */
5317 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5318 && (code
== EQ
|| code
== NE
)
5319 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5320 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5321 && (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
)) != 0
5322 /* We cannot do this if tem is a nonzero address. */
5323 && ! nonzero_address_p (tem
))
5324 return simplify_const_relational_operation (signed_condition (code
),
5325 mode
, tem
, const0_rtx
);
5327 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5328 return const_true_rtx
;
5330 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5333 /* For modes without NaNs, if the two operands are equal, we know the
5334 result except if they have side-effects. Even with NaNs we know
5335 the result of unordered comparisons and, if signaling NaNs are
5336 irrelevant, also the result of LT/GT/LTGT. */
5337 if ((! HONOR_NANS (trueop0
)
5338 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5339 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5340 && ! HONOR_SNANS (trueop0
)))
5341 && rtx_equal_p (trueop0
, trueop1
)
5342 && ! side_effects_p (trueop0
))
5343 return comparison_result (code
, CMP_EQ
);
5345 /* If the operands are floating-point constants, see if we can fold
5347 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5348 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5349 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5351 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5352 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5354 /* Comparisons are unordered iff at least one of the values is NaN. */
5355 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5365 return const_true_rtx
;
5378 return comparison_result (code
,
5379 (real_equal (d0
, d1
) ? CMP_EQ
:
5380 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5383 /* Otherwise, see if the operands are both integers. */
5384 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5385 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5387 /* It would be nice if we really had a mode here. However, the
5388 largest int representable on the target is as good as
5390 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5391 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5392 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5394 if (wi::eq_p (ptrueop0
, ptrueop1
))
5395 return comparison_result (code
, CMP_EQ
);
5398 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5399 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5400 return comparison_result (code
, cr
);
5404 /* Optimize comparisons with upper and lower bounds. */
5405 scalar_int_mode int_mode
;
5406 if (CONST_INT_P (trueop1
)
5407 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5408 && HWI_COMPUTABLE_MODE_P (int_mode
)
5409 && !side_effects_p (trueop0
))
5412 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
5413 HOST_WIDE_INT val
= INTVAL (trueop1
);
5414 HOST_WIDE_INT mmin
, mmax
;
5424 /* Get a reduced range if the sign bit is zero. */
5425 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
5432 rtx mmin_rtx
, mmax_rtx
;
5433 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
5435 mmin
= INTVAL (mmin_rtx
);
5436 mmax
= INTVAL (mmax_rtx
);
5439 unsigned int sign_copies
5440 = num_sign_bit_copies (trueop0
, int_mode
);
5442 mmin
>>= (sign_copies
- 1);
5443 mmax
>>= (sign_copies
- 1);
5449 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5451 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5452 return const_true_rtx
;
5453 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5458 return const_true_rtx
;
5463 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5465 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5466 return const_true_rtx
;
5467 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5472 return const_true_rtx
;
5478 /* x == y is always false for y out of range. */
5479 if (val
< mmin
|| val
> mmax
)
5483 /* x > y is always false for y >= mmax, always true for y < mmin. */
5485 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5487 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5488 return const_true_rtx
;
5494 return const_true_rtx
;
5497 /* x < y is always false for y <= mmin, always true for y > mmax. */
5499 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5501 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5502 return const_true_rtx
;
5508 return const_true_rtx
;
5512 /* x != y is always true for y out of range. */
5513 if (val
< mmin
|| val
> mmax
)
5514 return const_true_rtx
;
5522 /* Optimize integer comparisons with zero. */
5523 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5524 && trueop1
== const0_rtx
5525 && !side_effects_p (trueop0
))
5527 /* Some addresses are known to be nonzero. We don't know
5528 their sign, but equality comparisons are known. */
5529 if (nonzero_address_p (trueop0
))
5531 if (code
== EQ
|| code
== LEU
)
5533 if (code
== NE
|| code
== GTU
)
5534 return const_true_rtx
;
5537 /* See if the first operand is an IOR with a constant. If so, we
5538 may be able to determine the result of this comparison. */
5539 if (GET_CODE (op0
) == IOR
)
5541 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5542 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5544 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
5545 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5546 && (UINTVAL (inner_const
)
5557 return const_true_rtx
;
5561 return const_true_rtx
;
5575 /* Optimize comparison of ABS with zero. */
5576 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5577 && (GET_CODE (trueop0
) == ABS
5578 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5579 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5584 /* Optimize abs(x) < 0.0. */
5585 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5590 /* Optimize abs(x) >= 0.0. */
5591 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5592 return const_true_rtx
;
5596 /* Optimize ! (abs(x) < 0.0). */
5597 return const_true_rtx
;
5607 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5608 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5609 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5610 can be simplified to that or NULL_RTX if not.
5611 Assume X is compared against zero with CMP_CODE and the true
5612 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5615 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5617 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5620 /* Result on X == 0 and X !=0 respectively. */
5621 rtx on_zero
, on_nonzero
;
5625 on_nonzero
= false_val
;
5629 on_zero
= false_val
;
5630 on_nonzero
= true_val
;
5633 rtx_code op_code
= GET_CODE (on_nonzero
);
5634 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5635 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5636 || !CONST_INT_P (on_zero
))
5639 HOST_WIDE_INT op_val
;
5640 scalar_int_mode mode ATTRIBUTE_UNUSED
5641 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
5642 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
5643 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
5644 && op_val
== INTVAL (on_zero
))
5650 /* Try to simplify X given that it appears within operand OP of a
5651 VEC_MERGE operation whose mask is MASK. X need not use the same
5652 vector mode as the VEC_MERGE, but it must have the same number of
5655 Return the simplified X on success, otherwise return NULL_RTX. */
5658 simplify_merge_mask (rtx x
, rtx mask
, int op
)
5660 gcc_assert (VECTOR_MODE_P (GET_MODE (x
)));
5661 poly_uint64 nunits
= GET_MODE_NUNITS (GET_MODE (x
));
5662 if (GET_CODE (x
) == VEC_MERGE
&& rtx_equal_p (XEXP (x
, 2), mask
))
5664 if (side_effects_p (XEXP (x
, 1 - op
)))
5667 return XEXP (x
, op
);
5670 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5671 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
))
5673 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5675 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), top0
,
5676 GET_MODE (XEXP (x
, 0)));
5679 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5680 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
5681 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
5682 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
))
5684 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5685 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
5688 if (COMPARISON_P (x
))
5689 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
5690 GET_MODE (XEXP (x
, 0)) != VOIDmode
5691 ? GET_MODE (XEXP (x
, 0))
5692 : GET_MODE (XEXP (x
, 1)),
5693 top0
? top0
: XEXP (x
, 0),
5694 top1
? top1
: XEXP (x
, 1));
5696 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
5697 top0
? top0
: XEXP (x
, 0),
5698 top1
? top1
: XEXP (x
, 1));
5701 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_TERNARY
5702 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5703 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
5704 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
5705 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
)
5706 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 2)))
5707 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 2))), nunits
))
5709 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5710 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
5711 rtx top2
= simplify_merge_mask (XEXP (x
, 2), mask
, op
);
5712 if (top0
|| top1
|| top2
)
5713 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
5714 GET_MODE (XEXP (x
, 0)),
5715 top0
? top0
: XEXP (x
, 0),
5716 top1
? top1
: XEXP (x
, 1),
5717 top2
? top2
: XEXP (x
, 2));
5723 /* Simplify CODE, an operation with result mode MODE and three operands,
5724 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5725 a constant. Return 0 if no simplifications is possible. */
5728 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5729 machine_mode op0_mode
, rtx op0
, rtx op1
,
5732 bool any_change
= false;
5734 scalar_int_mode int_mode
, int_op0_mode
;
5735 unsigned int n_elts
;
5740 /* Simplify negations around the multiplication. */
5741 /* -a * -b + c => a * b + c. */
5742 if (GET_CODE (op0
) == NEG
)
5744 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5746 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5748 else if (GET_CODE (op1
) == NEG
)
5750 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5752 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5755 /* Canonicalize the two multiplication operands. */
5756 /* a * -b + c => -b * a + c. */
5757 if (swap_commutative_operands_p (op0
, op1
))
5758 std::swap (op0
, op1
), any_change
= true;
5761 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5766 if (CONST_INT_P (op0
)
5767 && CONST_INT_P (op1
)
5768 && CONST_INT_P (op2
)
5769 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5770 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
5771 && HWI_COMPUTABLE_MODE_P (int_mode
))
5773 /* Extracting a bit-field from a constant */
5774 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5775 HOST_WIDE_INT op1val
= INTVAL (op1
);
5776 HOST_WIDE_INT op2val
= INTVAL (op2
);
5777 if (!BITS_BIG_ENDIAN
)
5779 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
5780 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
5782 /* Not enough information to calculate the bit position. */
5785 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5787 /* First zero-extend. */
5788 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5789 /* If desired, propagate sign bit. */
5790 if (code
== SIGN_EXTRACT
5791 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5793 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
5796 return gen_int_mode (val
, int_mode
);
5801 if (CONST_INT_P (op0
))
5802 return op0
!= const0_rtx
? op1
: op2
;
5804 /* Convert c ? a : a into "a". */
5805 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5808 /* Convert a != b ? a : b into "a". */
5809 if (GET_CODE (op0
) == NE
5810 && ! side_effects_p (op0
)
5811 && ! HONOR_NANS (mode
)
5812 && ! HONOR_SIGNED_ZEROS (mode
)
5813 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5814 && rtx_equal_p (XEXP (op0
, 1), op2
))
5815 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5816 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5819 /* Convert a == b ? a : b into "b". */
5820 if (GET_CODE (op0
) == EQ
5821 && ! side_effects_p (op0
)
5822 && ! HONOR_NANS (mode
)
5823 && ! HONOR_SIGNED_ZEROS (mode
)
5824 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5825 && rtx_equal_p (XEXP (op0
, 1), op2
))
5826 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5827 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5830 /* Convert (!c) != {0,...,0} ? a : b into
5831 c != {0,...,0} ? b : a for vector modes. */
5832 if (VECTOR_MODE_P (GET_MODE (op1
))
5833 && GET_CODE (op0
) == NE
5834 && GET_CODE (XEXP (op0
, 0)) == NOT
5835 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5837 rtx cv
= XEXP (op0
, 1);
5840 if (!CONST_VECTOR_NUNITS (cv
).is_constant (&nunits
))
5843 for (int i
= 0; i
< nunits
; ++i
)
5844 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5851 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5852 XEXP (XEXP (op0
, 0), 0),
5854 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5859 /* Convert x == 0 ? N : clz (x) into clz (x) when
5860 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5861 Similarly for ctz (x). */
5862 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
5863 && XEXP (op0
, 1) == const0_rtx
)
5866 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
5872 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5874 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5875 ? GET_MODE (XEXP (op0
, 1))
5876 : GET_MODE (XEXP (op0
, 0)));
5879 /* Look for happy constants in op1 and op2. */
5880 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5882 HOST_WIDE_INT t
= INTVAL (op1
);
5883 HOST_WIDE_INT f
= INTVAL (op2
);
5885 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5886 code
= GET_CODE (op0
);
5887 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5890 tmp
= reversed_comparison_code (op0
, NULL
);
5898 return simplify_gen_relational (code
, mode
, cmp_mode
,
5899 XEXP (op0
, 0), XEXP (op0
, 1));
5902 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5903 cmp_mode
, XEXP (op0
, 0),
5906 /* See if any simplifications were possible. */
5909 if (CONST_INT_P (temp
))
5910 return temp
== const0_rtx
? op2
: op1
;
5912 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5918 gcc_assert (GET_MODE (op0
) == mode
);
5919 gcc_assert (GET_MODE (op1
) == mode
);
5920 gcc_assert (VECTOR_MODE_P (mode
));
5921 trueop2
= avoid_constant_pool_reference (op2
);
5922 if (CONST_INT_P (trueop2
)
5923 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
5925 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5926 unsigned HOST_WIDE_INT mask
;
5927 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5930 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
5932 if (!(sel
& mask
) && !side_effects_p (op0
))
5934 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5937 rtx trueop0
= avoid_constant_pool_reference (op0
);
5938 rtx trueop1
= avoid_constant_pool_reference (op1
);
5939 if (GET_CODE (trueop0
) == CONST_VECTOR
5940 && GET_CODE (trueop1
) == CONST_VECTOR
)
5942 rtvec v
= rtvec_alloc (n_elts
);
5945 for (i
= 0; i
< n_elts
; i
++)
5946 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
5947 ? CONST_VECTOR_ELT (trueop0
, i
)
5948 : CONST_VECTOR_ELT (trueop1
, i
));
5949 return gen_rtx_CONST_VECTOR (mode
, v
);
5952 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5953 if no element from a appears in the result. */
5954 if (GET_CODE (op0
) == VEC_MERGE
)
5956 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5957 if (CONST_INT_P (tem
))
5959 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5960 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5961 return simplify_gen_ternary (code
, mode
, mode
,
5962 XEXP (op0
, 1), op1
, op2
);
5963 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5964 return simplify_gen_ternary (code
, mode
, mode
,
5965 XEXP (op0
, 0), op1
, op2
);
5968 if (GET_CODE (op1
) == VEC_MERGE
)
5970 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5971 if (CONST_INT_P (tem
))
5973 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5974 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5975 return simplify_gen_ternary (code
, mode
, mode
,
5976 op0
, XEXP (op1
, 1), op2
);
5977 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5978 return simplify_gen_ternary (code
, mode
, mode
,
5979 op0
, XEXP (op1
, 0), op2
);
5983 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5985 if (GET_CODE (op0
) == VEC_DUPLICATE
5986 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5987 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5988 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0
, 0))), 1))
5990 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5991 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5993 if (XEXP (XEXP (op0
, 0), 0) == op1
5994 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5998 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6000 with (vec_concat (X) (B)) if N == 1 or
6001 (vec_concat (A) (X)) if N == 2. */
6002 if (GET_CODE (op0
) == VEC_DUPLICATE
6003 && GET_CODE (op1
) == CONST_VECTOR
6004 && known_eq (CONST_VECTOR_NUNITS (op1
), 2)
6005 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6006 && IN_RANGE (sel
, 1, 2))
6008 rtx newop0
= XEXP (op0
, 0);
6009 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
6011 std::swap (newop0
, newop1
);
6012 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6014 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6015 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6016 Only applies for vectors of two elements. */
6017 if (GET_CODE (op0
) == VEC_DUPLICATE
6018 && GET_CODE (op1
) == VEC_CONCAT
6019 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6020 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6021 && IN_RANGE (sel
, 1, 2))
6023 rtx newop0
= XEXP (op0
, 0);
6024 rtx newop1
= XEXP (op1
, 2 - sel
);
6025 rtx otherop
= XEXP (op1
, sel
- 1);
6027 std::swap (newop0
, newop1
);
6028 /* Don't want to throw away the other part of the vec_concat if
6029 it has side-effects. */
6030 if (!side_effects_p (otherop
))
6031 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6036 (vec_merge:outer (vec_duplicate:outer x:inner)
6037 (subreg:outer y:inner 0)
6040 with (vec_concat:outer x:inner y:inner) if N == 1,
6041 or (vec_concat:outer y:inner x:inner) if N == 2.
6043 Implicitly, this means we have a paradoxical subreg, but such
6044 a check is cheap, so make it anyway.
6046 Only applies for vectors of two elements. */
6047 if (GET_CODE (op0
) == VEC_DUPLICATE
6048 && GET_CODE (op1
) == SUBREG
6049 && GET_MODE (op1
) == GET_MODE (op0
)
6050 && GET_MODE (SUBREG_REG (op1
)) == GET_MODE (XEXP (op0
, 0))
6051 && paradoxical_subreg_p (op1
)
6052 && subreg_lowpart_p (op1
)
6053 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6054 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6055 && IN_RANGE (sel
, 1, 2))
6057 rtx newop0
= XEXP (op0
, 0);
6058 rtx newop1
= SUBREG_REG (op1
);
6060 std::swap (newop0
, newop1
);
6061 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6064 /* Same as above but with switched operands:
6065 Replace (vec_merge:outer (subreg:outer x:inner 0)
6066 (vec_duplicate:outer y:inner)
6069 with (vec_concat:outer x:inner y:inner) if N == 1,
6070 or (vec_concat:outer y:inner x:inner) if N == 2. */
6071 if (GET_CODE (op1
) == VEC_DUPLICATE
6072 && GET_CODE (op0
) == SUBREG
6073 && GET_MODE (op0
) == GET_MODE (op1
)
6074 && GET_MODE (SUBREG_REG (op0
)) == GET_MODE (XEXP (op1
, 0))
6075 && paradoxical_subreg_p (op0
)
6076 && subreg_lowpart_p (op0
)
6077 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6078 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6079 && IN_RANGE (sel
, 1, 2))
6081 rtx newop0
= SUBREG_REG (op0
);
6082 rtx newop1
= XEXP (op1
, 0);
6084 std::swap (newop0
, newop1
);
6085 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6088 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6090 with (vec_concat x y) or (vec_concat y x) depending on value
6092 if (GET_CODE (op0
) == VEC_DUPLICATE
6093 && GET_CODE (op1
) == VEC_DUPLICATE
6094 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6095 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6096 && IN_RANGE (sel
, 1, 2))
6098 rtx newop0
= XEXP (op0
, 0);
6099 rtx newop1
= XEXP (op1
, 0);
6101 std::swap (newop0
, newop1
);
6103 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6107 if (rtx_equal_p (op0
, op1
)
6108 && !side_effects_p (op2
) && !side_effects_p (op1
))
6111 if (!side_effects_p (op2
))
6114 = may_trap_p (op0
) ? NULL_RTX
: simplify_merge_mask (op0
, op2
, 0);
6116 = may_trap_p (op1
) ? NULL_RTX
: simplify_merge_mask (op1
, op2
, 1);
6118 return simplify_gen_ternary (code
, mode
, mode
,
6120 top1
? top1
: op1
, op2
);
6132 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
6133 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
6134 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
6136 Works by unpacking INNER_BYTES bytes of OP into a collection of 8-bit values
6137 represented as a little-endian array of 'unsigned char', selecting by BYTE,
6138 and then repacking them again for OUTERMODE. If OP is a CONST_VECTOR,
6139 FIRST_ELEM is the number of the first element to extract, otherwise
6140 FIRST_ELEM is ignored. */
6143 simplify_immed_subreg (fixed_size_mode outermode
, rtx op
,
6144 machine_mode innermode
, unsigned int byte
,
6145 unsigned int first_elem
, unsigned int inner_bytes
)
6149 value_mask
= (1 << value_bit
) - 1
6151 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
6159 rtx result_s
= NULL
;
6160 rtvec result_v
= NULL
;
6161 enum mode_class outer_class
;
6162 scalar_mode outer_submode
;
6165 /* Some ports misuse CCmode. */
6166 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
6169 /* We have no way to represent a complex constant at the rtl level. */
6170 if (COMPLEX_MODE_P (outermode
))
6173 /* We support any size mode. */
6174 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
6175 inner_bytes
* BITS_PER_UNIT
);
6177 /* Unpack the value. */
6179 if (GET_CODE (op
) == CONST_VECTOR
)
6181 num_elem
= CEIL (inner_bytes
, GET_MODE_UNIT_SIZE (innermode
));
6182 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
6187 elem_bitsize
= max_bitsize
;
6189 /* If this asserts, it is too complicated; reducing value_bit may help. */
6190 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
6191 /* I don't know how to handle endianness of sub-units. */
6192 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
6194 for (elem
= 0; elem
< num_elem
; elem
++)
6197 rtx el
= (GET_CODE (op
) == CONST_VECTOR
6198 ? CONST_VECTOR_ELT (op
, first_elem
+ elem
)
6201 /* Vectors are kept in target memory order. (This is probably
6204 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
6205 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
6207 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6208 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6209 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
6210 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6211 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
6214 switch (GET_CODE (el
))
6218 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
6220 *vp
++ = INTVAL (el
) >> i
;
6221 /* CONST_INTs are always logically sign-extended. */
6222 for (; i
< elem_bitsize
; i
+= value_bit
)
6223 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
6226 case CONST_WIDE_INT
:
6228 rtx_mode_t val
= rtx_mode_t (el
, GET_MODE_INNER (innermode
));
6229 unsigned char extend
= wi::sign_mask (val
);
6230 int prec
= wi::get_precision (val
);
6232 for (i
= 0; i
< prec
&& i
< elem_bitsize
; i
+= value_bit
)
6233 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
6234 for (; i
< elem_bitsize
; i
+= value_bit
)
6240 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
6242 unsigned char extend
= 0;
6243 /* If this triggers, someone should have generated a
6244 CONST_INT instead. */
6245 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
6247 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
6248 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
6249 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
6252 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
6256 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
6258 for (; i
< elem_bitsize
; i
+= value_bit
)
6263 /* This is big enough for anything on the platform. */
6264 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6265 scalar_float_mode el_mode
;
6267 el_mode
= as_a
<scalar_float_mode
> (GET_MODE (el
));
6268 int bitsize
= GET_MODE_BITSIZE (el_mode
);
6270 gcc_assert (bitsize
<= elem_bitsize
);
6271 gcc_assert (bitsize
% value_bit
== 0);
6273 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
6276 /* real_to_target produces its result in words affected by
6277 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6278 and use WORDS_BIG_ENDIAN instead; see the documentation
6279 of SUBREG in rtl.texi. */
6280 for (i
= 0; i
< bitsize
; i
+= value_bit
)
6283 if (WORDS_BIG_ENDIAN
)
6284 ibase
= bitsize
- 1 - i
;
6287 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
6290 /* It shouldn't matter what's done here, so fill it with
6292 for (; i
< elem_bitsize
; i
+= value_bit
)
6298 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
6300 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
6301 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
6305 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
6306 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
6307 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
6309 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
6310 >> (i
- HOST_BITS_PER_WIDE_INT
);
6311 for (; i
< elem_bitsize
; i
+= value_bit
)
6321 /* Now, pick the right byte to start with. */
6322 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6323 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6324 will already have offset 0. */
6325 if (inner_bytes
>= GET_MODE_SIZE (outermode
))
6327 unsigned ibyte
= inner_bytes
- GET_MODE_SIZE (outermode
) - byte
;
6328 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6329 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6330 byte
= (subword_byte
% UNITS_PER_WORD
6331 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6334 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6335 so if it's become negative it will instead be very large.) */
6336 gcc_assert (byte
< inner_bytes
);
6338 /* Convert from bytes to chunks of size value_bit. */
6339 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
6341 /* Re-pack the value. */
6342 num_elem
= GET_MODE_NUNITS (outermode
);
6344 if (VECTOR_MODE_P (outermode
))
6346 result_v
= rtvec_alloc (num_elem
);
6347 elems
= &RTVEC_ELT (result_v
, 0);
6352 outer_submode
= GET_MODE_INNER (outermode
);
6353 outer_class
= GET_MODE_CLASS (outer_submode
);
6354 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
6356 gcc_assert (elem_bitsize
% value_bit
== 0);
6357 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
6359 for (elem
= 0; elem
< num_elem
; elem
++)
6363 /* Vectors are stored in target memory order. (This is probably
6366 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
6367 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
6369 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6370 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6371 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
6372 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6373 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
6376 switch (outer_class
)
6379 case MODE_PARTIAL_INT
:
6384 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
6385 / HOST_BITS_PER_WIDE_INT
;
6386 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
6389 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
6391 for (u
= 0; u
< units
; u
++)
6393 unsigned HOST_WIDE_INT buf
= 0;
6395 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
6397 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6400 base
+= HOST_BITS_PER_WIDE_INT
;
6402 r
= wide_int::from_array (tmp
, units
,
6403 GET_MODE_PRECISION (outer_submode
));
6404 #if TARGET_SUPPORTS_WIDE_INT == 0
6405 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6406 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
6409 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
6414 case MODE_DECIMAL_FLOAT
:
6417 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32] = { 0 };
6419 /* real_from_target wants its input in words affected by
6420 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6421 and use WORDS_BIG_ENDIAN instead; see the documentation
6422 of SUBREG in rtl.texi. */
6423 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
6426 if (WORDS_BIG_ENDIAN
)
6427 ibase
= elem_bitsize
- 1 - i
;
6430 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
6433 real_from_target (&r
, tmp
, outer_submode
);
6434 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
6446 f
.mode
= outer_submode
;
6449 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
6451 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6452 for (; i
< elem_bitsize
; i
+= value_bit
)
6453 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
6454 << (i
- HOST_BITS_PER_WIDE_INT
));
6456 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
6464 if (VECTOR_MODE_P (outermode
))
6465 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
6470 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6471 Return 0 if no simplifications are possible. */
6473 simplify_subreg (machine_mode outermode
, rtx op
,
6474 machine_mode innermode
, poly_uint64 byte
)
6476 /* Little bit of sanity checking. */
6477 gcc_assert (innermode
!= VOIDmode
);
6478 gcc_assert (outermode
!= VOIDmode
);
6479 gcc_assert (innermode
!= BLKmode
);
6480 gcc_assert (outermode
!= BLKmode
);
6482 gcc_assert (GET_MODE (op
) == innermode
6483 || GET_MODE (op
) == VOIDmode
);
6485 poly_uint64 outersize
= GET_MODE_SIZE (outermode
);
6486 if (!multiple_p (byte
, outersize
))
6489 poly_uint64 innersize
= GET_MODE_SIZE (innermode
);
6490 if (maybe_ge (byte
, innersize
))
6493 if (outermode
== innermode
&& known_eq (byte
, 0U))
6496 if (multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
)))
6500 if (VECTOR_MODE_P (outermode
)
6501 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
6502 && vec_duplicate_p (op
, &elt
))
6503 return gen_vec_duplicate (outermode
, elt
);
6505 if (outermode
== GET_MODE_INNER (innermode
)
6506 && vec_duplicate_p (op
, &elt
))
6510 if (CONST_SCALAR_INT_P (op
)
6511 || CONST_DOUBLE_AS_FLOAT_P (op
)
6512 || CONST_FIXED_P (op
)
6513 || GET_CODE (op
) == CONST_VECTOR
)
6515 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6516 the result from bytes, so it only works if the sizes of the modes
6517 and the value of the offset are known at compile time. Cases that
6518 that apply to general modes and offsets should be handled here
6519 before calling simplify_immed_subreg. */
6520 fixed_size_mode fs_outermode
, fs_innermode
;
6521 unsigned HOST_WIDE_INT cbyte
;
6522 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
)
6523 && is_a
<fixed_size_mode
> (innermode
, &fs_innermode
)
6524 && byte
.is_constant (&cbyte
))
6525 return simplify_immed_subreg (fs_outermode
, op
, fs_innermode
, cbyte
,
6526 0, GET_MODE_SIZE (fs_innermode
));
6528 /* Handle constant-sized outer modes and variable-sized inner modes. */
6529 unsigned HOST_WIDE_INT first_elem
;
6530 if (GET_CODE (op
) == CONST_VECTOR
6531 && is_a
<fixed_size_mode
> (outermode
, &fs_outermode
)
6532 && constant_multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
),
6534 return simplify_immed_subreg (fs_outermode
, op
, innermode
, 0,
6536 GET_MODE_SIZE (fs_outermode
));
6541 /* Changing mode twice with SUBREG => just change it once,
6542 or not at all if changing back op starting mode. */
6543 if (GET_CODE (op
) == SUBREG
)
6545 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6546 poly_uint64 innermostsize
= GET_MODE_SIZE (innermostmode
);
6549 if (outermode
== innermostmode
6550 && known_eq (byte
, 0U)
6551 && known_eq (SUBREG_BYTE (op
), 0))
6552 return SUBREG_REG (op
);
6554 /* Work out the memory offset of the final OUTERMODE value relative
6555 to the inner value of OP. */
6556 poly_int64 mem_offset
= subreg_memory_offset (outermode
,
6558 poly_int64 op_mem_offset
= subreg_memory_offset (op
);
6559 poly_int64 final_offset
= mem_offset
+ op_mem_offset
;
6561 /* See whether resulting subreg will be paradoxical. */
6562 if (!paradoxical_subreg_p (outermode
, innermostmode
))
6564 /* Bail out in case resulting subreg would be incorrect. */
6565 if (maybe_lt (final_offset
, 0)
6566 || maybe_ge (poly_uint64 (final_offset
), innermostsize
)
6567 || !multiple_p (final_offset
, outersize
))
6572 poly_int64 required_offset
= subreg_memory_offset (outermode
,
6574 if (maybe_ne (final_offset
, required_offset
))
6576 /* Paradoxical subregs always have byte offset 0. */
6580 /* Recurse for further possible simplifications. */
6581 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6585 if (validate_subreg (outermode
, innermostmode
,
6586 SUBREG_REG (op
), final_offset
))
6588 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6589 if (SUBREG_PROMOTED_VAR_P (op
)
6590 && SUBREG_PROMOTED_SIGN (op
) >= 0
6591 && GET_MODE_CLASS (outermode
) == MODE_INT
6592 && known_ge (outersize
, innersize
)
6593 && known_le (outersize
, innermostsize
)
6594 && subreg_lowpart_p (newx
))
6596 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6597 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6604 /* SUBREG of a hard register => just change the register number
6605 and/or mode. If the hard register is not valid in that mode,
6606 suppress this simplification. If the hard register is the stack,
6607 frame, or argument pointer, leave this as a SUBREG. */
6609 if (REG_P (op
) && HARD_REGISTER_P (op
))
6611 unsigned int regno
, final_regno
;
6614 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6615 if (HARD_REGISTER_NUM_P (final_regno
))
6617 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
6618 subreg_memory_offset (outermode
,
6621 /* Propagate original regno. We don't have any way to specify
6622 the offset inside original regno, so do so only for lowpart.
6623 The information is used only by alias analysis that cannot
6624 grog partial register anyway. */
6626 if (known_eq (subreg_lowpart_offset (outermode
, innermode
), byte
))
6627 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6632 /* If we have a SUBREG of a register that we are replacing and we are
6633 replacing it with a MEM, make a new MEM and try replacing the
6634 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6635 or if we would be widening it. */
6638 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6639 /* Allow splitting of volatile memory references in case we don't
6640 have instruction to move the whole thing. */
6641 && (! MEM_VOLATILE_P (op
)
6642 || ! have_insn_for (SET
, innermode
))
6643 && known_le (outersize
, innersize
))
6644 return adjust_address_nv (op
, outermode
, byte
);
6646 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6648 if (GET_CODE (op
) == CONCAT
6649 || GET_CODE (op
) == VEC_CONCAT
)
6651 poly_uint64 final_offset
;
6654 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6655 if (part_mode
== VOIDmode
)
6656 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6657 poly_uint64 part_size
= GET_MODE_SIZE (part_mode
);
6658 if (known_lt (byte
, part_size
))
6660 part
= XEXP (op
, 0);
6661 final_offset
= byte
;
6663 else if (known_ge (byte
, part_size
))
6665 part
= XEXP (op
, 1);
6666 final_offset
= byte
- part_size
;
6671 if (maybe_gt (final_offset
+ outersize
, part_size
))
6674 part_mode
= GET_MODE (part
);
6675 if (part_mode
== VOIDmode
)
6676 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6677 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
6680 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
6681 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6686 (subreg (vec_merge (X)
6688 (const_int ((1 << N) | M)))
6689 (N * sizeof (outermode)))
6691 (subreg (X) (N * sizeof (outermode)))
6694 if (constant_multiple_p (byte
, GET_MODE_SIZE (outermode
), &idx
)
6695 && idx
< HOST_BITS_PER_WIDE_INT
6696 && GET_CODE (op
) == VEC_MERGE
6697 && GET_MODE_INNER (innermode
) == outermode
6698 && CONST_INT_P (XEXP (op
, 2))
6699 && (UINTVAL (XEXP (op
, 2)) & (HOST_WIDE_INT_1U
<< idx
)) != 0)
6700 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
, byte
);
6702 /* A SUBREG resulting from a zero extension may fold to zero if
6703 it extracts higher bits that the ZERO_EXTEND's source bits. */
6704 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6706 poly_uint64 bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6707 if (known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))))
6708 return CONST0_RTX (outermode
);
6711 scalar_int_mode int_outermode
, int_innermode
;
6712 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
6713 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
6714 && known_eq (byte
, subreg_lowpart_offset (int_outermode
, int_innermode
)))
6716 /* Handle polynomial integers. The upper bits of a paradoxical
6717 subreg are undefined, so this is safe regardless of whether
6718 we're truncating or extending. */
6719 if (CONST_POLY_INT_P (op
))
6722 = poly_wide_int::from (const_poly_int_value (op
),
6723 GET_MODE_PRECISION (int_outermode
),
6725 return immed_wide_int_const (val
, int_outermode
);
6728 if (GET_MODE_PRECISION (int_outermode
)
6729 < GET_MODE_PRECISION (int_innermode
))
6731 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
6737 /* If OP is a vector comparison and the subreg is not changing the
6738 number of elements or the size of the elements, change the result
6739 of the comparison to the new mode. */
6740 if (COMPARISON_P (op
)
6741 && VECTOR_MODE_P (outermode
)
6742 && VECTOR_MODE_P (innermode
)
6743 && known_eq (GET_MODE_NUNITS (outermode
), GET_MODE_NUNITS (innermode
))
6744 && known_eq (GET_MODE_UNIT_SIZE (outermode
),
6745 GET_MODE_UNIT_SIZE (innermode
)))
6746 return simplify_gen_relational (GET_CODE (op
), outermode
, innermode
,
6747 XEXP (op
, 0), XEXP (op
, 1));
6751 /* Make a SUBREG operation or equivalent if it folds. */
6754 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6755 machine_mode innermode
, poly_uint64 byte
)
6759 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6763 if (GET_CODE (op
) == SUBREG
6764 || GET_CODE (op
) == CONCAT
6765 || GET_MODE (op
) == VOIDmode
)
6768 if (validate_subreg (outermode
, innermode
, op
, byte
))
6769 return gen_rtx_SUBREG (outermode
, op
, byte
);
6774 /* Generates a subreg to get the least significant part of EXPR (in mode
6775 INNER_MODE) to OUTER_MODE. */
6778 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6779 machine_mode inner_mode
)
6781 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6782 subreg_lowpart_offset (outer_mode
, inner_mode
));
6785 /* Simplify X, an rtx expression.
6787 Return the simplified expression or NULL if no simplifications
6790 This is the preferred entry point into the simplification routines;
6791 however, we still allow passes to call the more specific routines.
6793 Right now GCC has three (yes, three) major bodies of RTL simplification
6794 code that need to be unified.
6796 1. fold_rtx in cse.c. This code uses various CSE specific
6797 information to aid in RTL simplification.
6799 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6800 it uses combine specific information to aid in RTL
6803 3. The routines in this file.
6806 Long term we want to only have one body of simplification code; to
6807 get to that state I recommend the following steps:
6809 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6810 which are not pass dependent state into these routines.
6812 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6813 use this routine whenever possible.
6815 3. Allow for pass dependent state to be provided to these
6816 routines and add simplifications based on the pass dependent
6817 state. Remove code from cse.c & combine.c that becomes
6820 It will take time, but ultimately the compiler will be easier to
6821 maintain and improve. It's totally silly that when we add a
6822 simplification that it needs to be added to 4 places (3 for RTL
6823 simplification and 1 for tree simplification. */
6826 simplify_rtx (const_rtx x
)
6828 const enum rtx_code code
= GET_CODE (x
);
6829 const machine_mode mode
= GET_MODE (x
);
6831 switch (GET_RTX_CLASS (code
))
6834 return simplify_unary_operation (code
, mode
,
6835 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6836 case RTX_COMM_ARITH
:
6837 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6838 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6843 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6846 case RTX_BITFIELD_OPS
:
6847 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6848 XEXP (x
, 0), XEXP (x
, 1),
6852 case RTX_COMM_COMPARE
:
6853 return simplify_relational_operation (code
, mode
,
6854 ((GET_MODE (XEXP (x
, 0))
6856 ? GET_MODE (XEXP (x
, 0))
6857 : GET_MODE (XEXP (x
, 1))),
6863 return simplify_subreg (mode
, SUBREG_REG (x
),
6864 GET_MODE (SUBREG_REG (x
)),
6871 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6872 if (GET_CODE (XEXP (x
, 0)) == HIGH
6873 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
6886 namespace selftest
{
6888 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6891 make_test_reg (machine_mode mode
)
6893 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
6895 return gen_rtx_REG (mode
, test_reg_num
++);
6898 /* Test vector simplifications involving VEC_DUPLICATE in which the
6899 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6900 register that holds one element of MODE. */
6903 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
6905 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
6906 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
6907 poly_uint64 nunits
= GET_MODE_NUNITS (mode
);
6908 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
6910 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6911 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
6912 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
6913 ASSERT_RTX_EQ (duplicate
,
6914 simplify_unary_operation (NOT
, mode
,
6915 duplicate_not
, mode
));
6917 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
6918 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
6919 ASSERT_RTX_EQ (duplicate
,
6920 simplify_unary_operation (NEG
, mode
,
6921 duplicate_neg
, mode
));
6923 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6924 ASSERT_RTX_EQ (duplicate
,
6925 simplify_binary_operation (PLUS
, mode
, duplicate
,
6926 CONST0_RTX (mode
)));
6928 ASSERT_RTX_EQ (duplicate
,
6929 simplify_binary_operation (MINUS
, mode
, duplicate
,
6930 CONST0_RTX (mode
)));
6932 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
6933 simplify_binary_operation (MINUS
, mode
, duplicate
,
6937 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6938 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
6939 ASSERT_RTX_PTR_EQ (scalar_reg
,
6940 simplify_binary_operation (VEC_SELECT
, inner_mode
,
6941 duplicate
, zero_par
));
6943 unsigned HOST_WIDE_INT const_nunits
;
6944 if (nunits
.is_constant (&const_nunits
))
6946 /* And again with the final element. */
6947 rtx last_index
= gen_int_mode (const_nunits
- 1, word_mode
);
6948 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
6949 ASSERT_RTX_PTR_EQ (scalar_reg
,
6950 simplify_binary_operation (VEC_SELECT
, inner_mode
,
6951 duplicate
, last_par
));
6953 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
6954 rtx vector_reg
= make_test_reg (mode
);
6955 for (unsigned HOST_WIDE_INT i
= 0; i
< const_nunits
; i
++)
6957 if (i
>= HOST_BITS_PER_WIDE_INT
)
6959 rtx mask
= GEN_INT ((HOST_WIDE_INT_1U
<< i
) | (i
+ 1));
6960 rtx vm
= gen_rtx_VEC_MERGE (mode
, duplicate
, vector_reg
, mask
);
6961 poly_uint64 offset
= i
* GET_MODE_SIZE (inner_mode
);
6962 ASSERT_RTX_EQ (scalar_reg
,
6963 simplify_gen_subreg (inner_mode
, vm
,
6968 /* Test a scalar subreg of a VEC_DUPLICATE. */
6969 poly_uint64 offset
= subreg_lowpart_offset (inner_mode
, mode
);
6970 ASSERT_RTX_EQ (scalar_reg
,
6971 simplify_gen_subreg (inner_mode
, duplicate
,
6974 machine_mode narrower_mode
;
6975 if (maybe_ne (nunits
, 2U)
6976 && multiple_p (nunits
, 2)
6977 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
6978 && VECTOR_MODE_P (narrower_mode
))
6980 /* Test VEC_SELECT of a vector. */
6982 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
6983 rtx narrower_duplicate
6984 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
6985 ASSERT_RTX_EQ (narrower_duplicate
,
6986 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
6987 duplicate
, vec_par
));
6989 /* Test a vector subreg of a VEC_DUPLICATE. */
6990 poly_uint64 offset
= subreg_lowpart_offset (narrower_mode
, mode
);
6991 ASSERT_RTX_EQ (narrower_duplicate
,
6992 simplify_gen_subreg (narrower_mode
, duplicate
,
6997 /* Test vector simplifications involving VEC_SERIES in which the
6998 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6999 register that holds one element of MODE. */
7002 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
7004 /* Test unary cases with VEC_SERIES arguments. */
7005 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7006 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7007 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7008 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
7009 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
7010 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
7011 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
7012 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
7013 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
7015 ASSERT_RTX_EQ (series_0_r
,
7016 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
7017 ASSERT_RTX_EQ (series_r_m1
,
7018 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
7019 ASSERT_RTX_EQ (series_r_r
,
7020 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
7022 /* Test that a VEC_SERIES with a zero step is simplified away. */
7023 ASSERT_RTX_EQ (duplicate
,
7024 simplify_binary_operation (VEC_SERIES
, mode
,
7025 scalar_reg
, const0_rtx
));
7027 /* Test PLUS and MINUS with VEC_SERIES. */
7028 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
7029 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
7030 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
7031 ASSERT_RTX_EQ (series_r_r
,
7032 simplify_binary_operation (PLUS
, mode
, series_0_r
,
7034 ASSERT_RTX_EQ (series_r_1
,
7035 simplify_binary_operation (PLUS
, mode
, duplicate
,
7037 ASSERT_RTX_EQ (series_r_m1
,
7038 simplify_binary_operation (PLUS
, mode
, duplicate
,
7040 ASSERT_RTX_EQ (series_0_r
,
7041 simplify_binary_operation (MINUS
, mode
, series_r_r
,
7043 ASSERT_RTX_EQ (series_r_m1
,
7044 simplify_binary_operation (MINUS
, mode
, duplicate
,
7046 ASSERT_RTX_EQ (series_r_1
,
7047 simplify_binary_operation (MINUS
, mode
, duplicate
,
7049 ASSERT_RTX_EQ (series_0_m1
,
7050 simplify_binary_operation (VEC_SERIES
, mode
, const0_rtx
,
7053 /* Test NEG on constant vector series. */
7054 ASSERT_RTX_EQ (series_0_m1
,
7055 simplify_unary_operation (NEG
, mode
, series_0_1
, mode
));
7056 ASSERT_RTX_EQ (series_0_1
,
7057 simplify_unary_operation (NEG
, mode
, series_0_m1
, mode
));
7059 /* Test PLUS and MINUS on constant vector series. */
7060 rtx scalar2
= gen_int_mode (2, inner_mode
);
7061 rtx scalar3
= gen_int_mode (3, inner_mode
);
7062 rtx series_1_1
= gen_const_vec_series (mode
, const1_rtx
, const1_rtx
);
7063 rtx series_0_2
= gen_const_vec_series (mode
, const0_rtx
, scalar2
);
7064 rtx series_1_3
= gen_const_vec_series (mode
, const1_rtx
, scalar3
);
7065 ASSERT_RTX_EQ (series_1_1
,
7066 simplify_binary_operation (PLUS
, mode
, series_0_1
,
7067 CONST1_RTX (mode
)));
7068 ASSERT_RTX_EQ (series_0_m1
,
7069 simplify_binary_operation (PLUS
, mode
, CONST0_RTX (mode
),
7071 ASSERT_RTX_EQ (series_1_3
,
7072 simplify_binary_operation (PLUS
, mode
, series_1_1
,
7074 ASSERT_RTX_EQ (series_0_1
,
7075 simplify_binary_operation (MINUS
, mode
, series_1_1
,
7076 CONST1_RTX (mode
)));
7077 ASSERT_RTX_EQ (series_1_1
,
7078 simplify_binary_operation (MINUS
, mode
, CONST1_RTX (mode
),
7080 ASSERT_RTX_EQ (series_1_1
,
7081 simplify_binary_operation (MINUS
, mode
, series_1_3
,
7084 /* Test MULT between constant vectors. */
7085 rtx vec2
= gen_const_vec_duplicate (mode
, scalar2
);
7086 rtx vec3
= gen_const_vec_duplicate (mode
, scalar3
);
7087 rtx scalar9
= gen_int_mode (9, inner_mode
);
7088 rtx series_3_9
= gen_const_vec_series (mode
, scalar3
, scalar9
);
7089 ASSERT_RTX_EQ (series_0_2
,
7090 simplify_binary_operation (MULT
, mode
, series_0_1
, vec2
));
7091 ASSERT_RTX_EQ (series_3_9
,
7092 simplify_binary_operation (MULT
, mode
, vec3
, series_1_3
));
7093 if (!GET_MODE_NUNITS (mode
).is_constant ())
7094 ASSERT_FALSE (simplify_binary_operation (MULT
, mode
, series_0_1
,
7097 /* Test ASHIFT between constant vectors. */
7098 ASSERT_RTX_EQ (series_0_2
,
7099 simplify_binary_operation (ASHIFT
, mode
, series_0_1
,
7100 CONST1_RTX (mode
)));
7101 if (!GET_MODE_NUNITS (mode
).is_constant ())
7102 ASSERT_FALSE (simplify_binary_operation (ASHIFT
, mode
, CONST1_RTX (mode
),
7106 /* Verify simplify_merge_mask works correctly. */
7109 test_vec_merge (machine_mode mode
)
7111 rtx op0
= make_test_reg (mode
);
7112 rtx op1
= make_test_reg (mode
);
7113 rtx op2
= make_test_reg (mode
);
7114 rtx op3
= make_test_reg (mode
);
7115 rtx op4
= make_test_reg (mode
);
7116 rtx op5
= make_test_reg (mode
);
7117 rtx mask1
= make_test_reg (SImode
);
7118 rtx mask2
= make_test_reg (SImode
);
7119 rtx vm1
= gen_rtx_VEC_MERGE (mode
, op0
, op1
, mask1
);
7120 rtx vm2
= gen_rtx_VEC_MERGE (mode
, op2
, op3
, mask1
);
7121 rtx vm3
= gen_rtx_VEC_MERGE (mode
, op4
, op5
, mask1
);
7123 /* Simple vec_merge. */
7124 ASSERT_EQ (op0
, simplify_merge_mask (vm1
, mask1
, 0));
7125 ASSERT_EQ (op1
, simplify_merge_mask (vm1
, mask1
, 1));
7126 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 0));
7127 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 1));
7129 /* Nested vec_merge.
7130 It's tempting to make this simplify right down to opN, but we don't
7131 because all the simplify_* functions assume that the operands have
7132 already been simplified. */
7133 rtx nvm
= gen_rtx_VEC_MERGE (mode
, vm1
, vm2
, mask1
);
7134 ASSERT_EQ (vm1
, simplify_merge_mask (nvm
, mask1
, 0));
7135 ASSERT_EQ (vm2
, simplify_merge_mask (nvm
, mask1
, 1));
7137 /* Intermediate unary op. */
7138 rtx unop
= gen_rtx_NOT (mode
, vm1
);
7139 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op0
),
7140 simplify_merge_mask (unop
, mask1
, 0));
7141 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op1
),
7142 simplify_merge_mask (unop
, mask1
, 1));
7144 /* Intermediate binary op. */
7145 rtx binop
= gen_rtx_PLUS (mode
, vm1
, vm2
);
7146 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op0
, op2
),
7147 simplify_merge_mask (binop
, mask1
, 0));
7148 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op1
, op3
),
7149 simplify_merge_mask (binop
, mask1
, 1));
7151 /* Intermediate ternary op. */
7152 rtx tenop
= gen_rtx_FMA (mode
, vm1
, vm2
, vm3
);
7153 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op0
, op2
, op4
),
7154 simplify_merge_mask (tenop
, mask1
, 0));
7155 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op1
, op3
, op5
),
7156 simplify_merge_mask (tenop
, mask1
, 1));
7159 rtx badop0
= gen_rtx_PRE_INC (mode
, op0
);
7160 rtx badvm
= gen_rtx_VEC_MERGE (mode
, badop0
, op1
, mask1
);
7161 ASSERT_EQ (badop0
, simplify_merge_mask (badvm
, mask1
, 0));
7162 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (badvm
, mask1
, 1));
7164 /* Called indirectly. */
7165 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode
, op0
, op3
, mask1
),
7166 simplify_rtx (nvm
));
7169 /* Verify some simplifications involving vectors. */
7174 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
7176 machine_mode mode
= (machine_mode
) i
;
7177 if (VECTOR_MODE_P (mode
))
7179 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
7180 test_vector_ops_duplicate (mode
, scalar_reg
);
7181 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
7182 && maybe_gt (GET_MODE_NUNITS (mode
), 2))
7183 test_vector_ops_series (mode
, scalar_reg
);
7184 test_vec_merge (mode
);
7189 template<unsigned int N
>
7190 struct simplify_const_poly_int_tests
7196 struct simplify_const_poly_int_tests
<1>
7198 static void run () {}
7201 /* Test various CONST_POLY_INT properties. */
7203 template<unsigned int N
>
7205 simplify_const_poly_int_tests
<N
>::run ()
7207 rtx x1
= gen_int_mode (poly_int64 (1, 1), QImode
);
7208 rtx x2
= gen_int_mode (poly_int64 (-80, 127), QImode
);
7209 rtx x3
= gen_int_mode (poly_int64 (-79, -128), QImode
);
7210 rtx x4
= gen_int_mode (poly_int64 (5, 4), QImode
);
7211 rtx x5
= gen_int_mode (poly_int64 (30, 24), QImode
);
7212 rtx x6
= gen_int_mode (poly_int64 (20, 16), QImode
);
7213 rtx x7
= gen_int_mode (poly_int64 (7, 4), QImode
);
7214 rtx x8
= gen_int_mode (poly_int64 (30, 24), HImode
);
7215 rtx x9
= gen_int_mode (poly_int64 (-30, -24), HImode
);
7216 rtx x10
= gen_int_mode (poly_int64 (-31, -24), HImode
);
7217 rtx two
= GEN_INT (2);
7218 rtx six
= GEN_INT (6);
7219 poly_uint64 offset
= subreg_lowpart_offset (QImode
, HImode
);
7221 /* These tests only try limited operation combinations. Fuller arithmetic
7222 testing is done directly on poly_ints. */
7223 ASSERT_EQ (simplify_unary_operation (NEG
, HImode
, x8
, HImode
), x9
);
7224 ASSERT_EQ (simplify_unary_operation (NOT
, HImode
, x8
, HImode
), x10
);
7225 ASSERT_EQ (simplify_unary_operation (TRUNCATE
, QImode
, x8
, HImode
), x5
);
7226 ASSERT_EQ (simplify_binary_operation (PLUS
, QImode
, x1
, x2
), x3
);
7227 ASSERT_EQ (simplify_binary_operation (MINUS
, QImode
, x3
, x1
), x2
);
7228 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, x4
, six
), x5
);
7229 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, six
, x4
), x5
);
7230 ASSERT_EQ (simplify_binary_operation (ASHIFT
, QImode
, x4
, two
), x6
);
7231 ASSERT_EQ (simplify_binary_operation (IOR
, QImode
, x4
, two
), x7
);
7232 ASSERT_EQ (simplify_subreg (HImode
, x5
, QImode
, 0), x8
);
7233 ASSERT_EQ (simplify_subreg (QImode
, x8
, HImode
, offset
), x5
);
7236 /* Run all of the selftests within this file. */
7239 simplify_rtx_c_tests ()
7242 simplify_const_poly_int_tests
<NUM_POLY_INT_COEFFS
>::run ();
7245 } // namespace selftest
7247 #endif /* CHECKING_P */