1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
49 static bool plus_minus_operand_p (const_rtx
);
50 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
51 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
53 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
54 machine_mode
, rtx
, rtx
);
55 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
56 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
59 /* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */
62 neg_poly_int_rtx (machine_mode mode
, const_rtx i
)
64 return immed_wide_int_const (-wi::to_poly_wide (i
, mode
), mode
);
67 /* Test whether expression, X, is an immediate constant that represents
68 the most significant bit of machine mode MODE. */
71 mode_signbit_p (machine_mode mode
, const_rtx x
)
73 unsigned HOST_WIDE_INT val
;
75 scalar_int_mode int_mode
;
77 if (!is_int_mode (mode
, &int_mode
))
80 width
= GET_MODE_PRECISION (int_mode
);
84 if (width
<= HOST_BITS_PER_WIDE_INT
87 #if TARGET_SUPPORTS_WIDE_INT
88 else if (CONST_WIDE_INT_P (x
))
91 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
92 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
94 for (i
= 0; i
< elts
- 1; i
++)
95 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
97 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
98 width
%= HOST_BITS_PER_WIDE_INT
;
100 width
= HOST_BITS_PER_WIDE_INT
;
103 else if (width
<= HOST_BITS_PER_DOUBLE_INT
104 && CONST_DOUBLE_AS_INT_P (x
)
105 && CONST_DOUBLE_LOW (x
) == 0)
107 val
= CONST_DOUBLE_HIGH (x
);
108 width
-= HOST_BITS_PER_WIDE_INT
;
112 /* X is not an integer constant. */
115 if (width
< HOST_BITS_PER_WIDE_INT
)
116 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
117 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
120 /* Test whether VAL is equal to the most significant bit of mode MODE
121 (after masking with the mode mask of MODE). Returns false if the
122 precision of MODE is too large to handle. */
125 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
128 scalar_int_mode int_mode
;
130 if (!is_int_mode (mode
, &int_mode
))
133 width
= GET_MODE_PRECISION (int_mode
);
134 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
137 val
&= GET_MODE_MASK (int_mode
);
138 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
141 /* Test whether the most significant bit of mode MODE is set in VAL.
142 Returns false if the precision of MODE is too large to handle. */
144 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
148 scalar_int_mode int_mode
;
149 if (!is_int_mode (mode
, &int_mode
))
152 width
= GET_MODE_PRECISION (int_mode
);
153 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
156 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
160 /* Test whether the most significant bit of mode MODE is clear in VAL.
161 Returns false if the precision of MODE is too large to handle. */
163 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
167 scalar_int_mode int_mode
;
168 if (!is_int_mode (mode
, &int_mode
))
171 width
= GET_MODE_PRECISION (int_mode
);
172 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
175 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
179 /* Make a binary operation by properly ordering the operands and
180 seeing if the expression folds. */
183 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
188 /* If this simplifies, do it. */
189 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
193 /* Put complex operands first and constants second if commutative. */
194 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
195 && swap_commutative_operands_p (op0
, op1
))
196 std::swap (op0
, op1
);
198 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
201 /* If X is a MEM referencing the constant pool, return the real value.
202 Otherwise return X. */
204 avoid_constant_pool_reference (rtx x
)
208 poly_int64 offset
= 0;
210 switch (GET_CODE (x
))
216 /* Handle float extensions of constant pool references. */
218 c
= avoid_constant_pool_reference (tmp
);
219 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
220 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
228 if (GET_MODE (x
) == BLKmode
)
233 /* Call target hook to avoid the effects of -fpic etc.... */
234 addr
= targetm
.delegitimize_address (addr
);
236 /* Split the address into a base and integer offset. */
237 addr
= strip_offset (addr
, &offset
);
239 if (GET_CODE (addr
) == LO_SUM
)
240 addr
= XEXP (addr
, 1);
242 /* If this is a constant pool reference, we can turn it into its
243 constant and hope that simplifications happen. */
244 if (GET_CODE (addr
) == SYMBOL_REF
245 && CONSTANT_POOL_ADDRESS_P (addr
))
247 c
= get_pool_constant (addr
);
248 cmode
= get_pool_mode (addr
);
250 /* If we're accessing the constant in a different mode than it was
251 originally stored, attempt to fix that up via subreg simplifications.
252 If that fails we have no choice but to return the original memory. */
253 if (known_eq (offset
, 0) && cmode
== GET_MODE (x
))
255 else if (known_in_range_p (offset
, 0, GET_MODE_SIZE (cmode
)))
257 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
258 if (tem
&& CONSTANT_P (tem
))
266 /* Simplify a MEM based on its attributes. This is the default
267 delegitimize_address target hook, and it's recommended that every
268 overrider call it. */
271 delegitimize_mem_from_attrs (rtx x
)
273 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
274 use their base addresses as equivalent. */
277 && MEM_OFFSET_KNOWN_P (x
))
279 tree decl
= MEM_EXPR (x
);
280 machine_mode mode
= GET_MODE (x
);
281 poly_int64 offset
= 0;
283 switch (TREE_CODE (decl
))
293 case ARRAY_RANGE_REF
:
298 case VIEW_CONVERT_EXPR
:
300 poly_int64 bitsize
, bitpos
, bytepos
, toffset_val
= 0;
302 int unsignedp
, reversep
, volatilep
= 0;
305 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
306 &unsignedp
, &reversep
, &volatilep
);
307 if (maybe_ne (bitsize
, GET_MODE_BITSIZE (mode
))
308 || !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
309 || (toffset
&& !poly_int_tree_p (toffset
, &toffset_val
)))
312 offset
+= bytepos
+ toffset_val
;
318 && mode
== GET_MODE (x
)
320 && (TREE_STATIC (decl
)
321 || DECL_THREAD_LOCAL_P (decl
))
322 && DECL_RTL_SET_P (decl
)
323 && MEM_P (DECL_RTL (decl
)))
327 offset
+= MEM_OFFSET (x
);
329 newx
= DECL_RTL (decl
);
333 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
334 poly_int64 n_offset
, o_offset
;
336 /* Avoid creating a new MEM needlessly if we already had
337 the same address. We do if there's no OFFSET and the
338 old address X is identical to NEWX, or if X is of the
339 form (plus NEWX OFFSET), or the NEWX is of the form
340 (plus Y (const_int Z)) and X is that with the offset
341 added: (plus Y (const_int Z+OFFSET)). */
342 n
= strip_offset (n
, &n_offset
);
343 o
= strip_offset (o
, &o_offset
);
344 if (!(known_eq (o_offset
, n_offset
+ offset
)
345 && rtx_equal_p (o
, n
)))
346 x
= adjust_address_nv (newx
, mode
, offset
);
348 else if (GET_MODE (x
) == GET_MODE (newx
)
349 && known_eq (offset
, 0))
357 /* Make a unary operation by first seeing if it folds and otherwise making
358 the specified operation. */
361 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
362 machine_mode op_mode
)
366 /* If this simplifies, use it. */
367 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
370 return gen_rtx_fmt_e (code
, mode
, op
);
373 /* Likewise for ternary operations. */
376 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
377 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
381 /* If this simplifies, use it. */
382 if ((tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
383 op0
, op1
, op2
)) != 0)
386 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
389 /* Likewise, for relational operations.
390 CMP_MODE specifies mode comparison is done in. */
393 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
394 machine_mode cmp_mode
, rtx op0
, rtx op1
)
398 if ((tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
402 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
405 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
406 and simplify the result. If FN is non-NULL, call this callback on each
407 X, if it returns non-NULL, replace X with its return value and simplify the
411 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
412 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
414 enum rtx_code code
= GET_CODE (x
);
415 machine_mode mode
= GET_MODE (x
);
416 machine_mode op_mode
;
418 rtx op0
, op1
, op2
, newx
, op
;
422 if (__builtin_expect (fn
!= NULL
, 0))
424 newx
= fn (x
, old_rtx
, data
);
428 else if (rtx_equal_p (x
, old_rtx
))
429 return copy_rtx ((rtx
) data
);
431 switch (GET_RTX_CLASS (code
))
435 op_mode
= GET_MODE (op0
);
436 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
437 if (op0
== XEXP (x
, 0))
439 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
443 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
444 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
445 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
447 return simplify_gen_binary (code
, mode
, op0
, op1
);
450 case RTX_COMM_COMPARE
:
453 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
454 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
455 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
456 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
458 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
461 case RTX_BITFIELD_OPS
:
463 op_mode
= GET_MODE (op0
);
464 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
465 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
466 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
467 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
469 if (op_mode
== VOIDmode
)
470 op_mode
= GET_MODE (op0
);
471 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
476 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
477 if (op0
== SUBREG_REG (x
))
479 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
480 GET_MODE (SUBREG_REG (x
)),
482 return op0
? op0
: x
;
489 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
490 if (op0
== XEXP (x
, 0))
492 return replace_equiv_address_nv (x
, op0
);
494 else if (code
== LO_SUM
)
496 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
497 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
499 /* (lo_sum (high x) y) -> y where x and y have the same base. */
500 if (GET_CODE (op0
) == HIGH
)
502 rtx base0
, base1
, offset0
, offset1
;
503 split_const (XEXP (op0
, 0), &base0
, &offset0
);
504 split_const (op1
, &base1
, &offset1
);
505 if (rtx_equal_p (base0
, base1
))
509 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
511 return gen_rtx_LO_SUM (mode
, op0
, op1
);
520 fmt
= GET_RTX_FORMAT (code
);
521 for (i
= 0; fmt
[i
]; i
++)
526 newvec
= XVEC (newx
, i
);
527 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
529 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
531 if (op
!= RTVEC_ELT (vec
, j
))
535 newvec
= shallow_copy_rtvec (vec
);
537 newx
= shallow_copy_rtx (x
);
538 XVEC (newx
, i
) = newvec
;
540 RTVEC_ELT (newvec
, j
) = op
;
548 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
549 if (op
!= XEXP (x
, i
))
552 newx
= shallow_copy_rtx (x
);
561 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
562 resulting RTX. Return a new RTX which is as simplified as possible. */
565 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
567 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
570 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
571 Only handle cases where the truncated value is inherently an rvalue.
573 RTL provides two ways of truncating a value:
575 1. a lowpart subreg. This form is only a truncation when both
576 the outer and inner modes (here MODE and OP_MODE respectively)
577 are scalar integers, and only then when the subreg is used as
580 It is only valid to form such truncating subregs if the
581 truncation requires no action by the target. The onus for
582 proving this is on the creator of the subreg -- e.g. the
583 caller to simplify_subreg or simplify_gen_subreg -- and typically
584 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
586 2. a TRUNCATE. This form handles both scalar and compound integers.
588 The first form is preferred where valid. However, the TRUNCATE
589 handling in simplify_unary_operation turns the second form into the
590 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
591 so it is generally safe to form rvalue truncations using:
593 simplify_gen_unary (TRUNCATE, ...)
595 and leave simplify_unary_operation to work out which representation
598 Because of the proof requirements on (1), simplify_truncation must
599 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
600 regardless of whether the outer truncation came from a SUBREG or a
601 TRUNCATE. For example, if the caller has proven that an SImode
606 is a no-op and can be represented as a subreg, it does not follow
607 that SImode truncations of X and Y are also no-ops. On a target
608 like 64-bit MIPS that requires SImode values to be stored in
609 sign-extended form, an SImode truncation of:
611 (and:DI (reg:DI X) (const_int 63))
613 is trivially a no-op because only the lower 6 bits can be set.
614 However, X is still an arbitrary 64-bit number and so we cannot
615 assume that truncating it too is a no-op. */
618 simplify_truncation (machine_mode mode
, rtx op
,
619 machine_mode op_mode
)
621 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
622 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
623 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
625 gcc_assert (precision
<= op_precision
);
627 /* Optimize truncations of zero and sign extended values. */
628 if (GET_CODE (op
) == ZERO_EXTEND
629 || GET_CODE (op
) == SIGN_EXTEND
)
631 /* There are three possibilities. If MODE is the same as the
632 origmode, we can omit both the extension and the subreg.
633 If MODE is not larger than the origmode, we can apply the
634 truncation without the extension. Finally, if the outermode
635 is larger than the origmode, we can just extend to the appropriate
637 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
638 if (mode
== origmode
)
640 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
641 return simplify_gen_unary (TRUNCATE
, mode
,
642 XEXP (op
, 0), origmode
);
644 return simplify_gen_unary (GET_CODE (op
), mode
,
645 XEXP (op
, 0), origmode
);
648 /* If the machine can perform operations in the truncated mode, distribute
649 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
650 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
652 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
653 && (GET_CODE (op
) == PLUS
654 || GET_CODE (op
) == MINUS
655 || GET_CODE (op
) == MULT
))
657 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
660 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
662 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
666 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
667 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
668 the outer subreg is effectively a truncation to the original mode. */
669 if ((GET_CODE (op
) == LSHIFTRT
670 || GET_CODE (op
) == ASHIFTRT
)
671 /* Ensure that OP_MODE is at least twice as wide as MODE
672 to avoid the possibility that an outer LSHIFTRT shifts by more
673 than the sign extension's sign_bit_copies and introduces zeros
674 into the high bits of the result. */
675 && 2 * precision
<= op_precision
676 && CONST_INT_P (XEXP (op
, 1))
677 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
678 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
679 && UINTVAL (XEXP (op
, 1)) < precision
)
680 return simplify_gen_binary (ASHIFTRT
, mode
,
681 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
683 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
684 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
685 the outer subreg is effectively a truncation to the original mode. */
686 if ((GET_CODE (op
) == LSHIFTRT
687 || GET_CODE (op
) == ASHIFTRT
)
688 && CONST_INT_P (XEXP (op
, 1))
689 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
690 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
691 && UINTVAL (XEXP (op
, 1)) < precision
)
692 return simplify_gen_binary (LSHIFTRT
, mode
,
693 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
695 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
696 to (ashift:QI (x:QI) C), where C is a suitable small constant and
697 the outer subreg is effectively a truncation to the original mode. */
698 if (GET_CODE (op
) == ASHIFT
699 && CONST_INT_P (XEXP (op
, 1))
700 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
701 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
702 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
703 && UINTVAL (XEXP (op
, 1)) < precision
)
704 return simplify_gen_binary (ASHIFT
, mode
,
705 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
707 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
708 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
710 if (GET_CODE (op
) == AND
711 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
712 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
713 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
714 && CONST_INT_P (XEXP (op
, 1)))
716 rtx op0
= (XEXP (XEXP (op
, 0), 0));
717 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
718 rtx mask_op
= XEXP (op
, 1);
719 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
720 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
722 if (shift
< precision
723 /* If doing this transform works for an X with all bits set,
724 it works for any X. */
725 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
726 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
727 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
728 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
730 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
731 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
735 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
736 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
738 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
739 && REG_P (XEXP (op
, 0))
740 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
741 && CONST_INT_P (XEXP (op
, 1))
742 && CONST_INT_P (XEXP (op
, 2)))
744 rtx op0
= XEXP (op
, 0);
745 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
746 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
747 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
749 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
752 pos
-= op_precision
- precision
;
753 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
754 XEXP (op
, 1), GEN_INT (pos
));
757 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
759 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
761 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
762 XEXP (op
, 1), XEXP (op
, 2));
766 /* Recognize a word extraction from a multi-word subreg. */
767 if ((GET_CODE (op
) == LSHIFTRT
768 || GET_CODE (op
) == ASHIFTRT
)
769 && SCALAR_INT_MODE_P (mode
)
770 && SCALAR_INT_MODE_P (op_mode
)
771 && precision
>= BITS_PER_WORD
772 && 2 * precision
<= op_precision
773 && CONST_INT_P (XEXP (op
, 1))
774 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
775 && UINTVAL (XEXP (op
, 1)) < op_precision
)
777 poly_int64 byte
= subreg_lowpart_offset (mode
, op_mode
);
778 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
779 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
781 ? byte
- shifted_bytes
782 : byte
+ shifted_bytes
));
785 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
786 and try replacing the TRUNCATE and shift with it. Don't do this
787 if the MEM has a mode-dependent address. */
788 if ((GET_CODE (op
) == LSHIFTRT
789 || GET_CODE (op
) == ASHIFTRT
)
790 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
791 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
792 && MEM_P (XEXP (op
, 0))
793 && CONST_INT_P (XEXP (op
, 1))
794 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
795 && INTVAL (XEXP (op
, 1)) > 0
796 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
797 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
798 MEM_ADDR_SPACE (XEXP (op
, 0)))
799 && ! MEM_VOLATILE_P (XEXP (op
, 0))
800 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
801 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
803 poly_int64 byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
804 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
805 return adjust_address_nv (XEXP (op
, 0), int_mode
,
807 ? byte
- shifted_bytes
808 : byte
+ shifted_bytes
));
811 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
812 (OP:SI foo:SI) if OP is NEG or ABS. */
813 if ((GET_CODE (op
) == ABS
814 || GET_CODE (op
) == NEG
)
815 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
816 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
817 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
818 return simplify_gen_unary (GET_CODE (op
), mode
,
819 XEXP (XEXP (op
, 0), 0), mode
);
821 /* (truncate:A (subreg:B (truncate:C X) 0)) is
823 if (GET_CODE (op
) == SUBREG
824 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
825 && SCALAR_INT_MODE_P (op_mode
)
826 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
827 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
828 && subreg_lowpart_p (op
))
830 rtx inner
= XEXP (SUBREG_REG (op
), 0);
831 if (GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (subreg_mode
))
832 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
835 /* If subreg above is paradoxical and C is narrower
836 than A, return (subreg:A (truncate:C X) 0). */
837 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
), subreg_mode
, 0);
840 /* (truncate:A (truncate:B X)) is (truncate:A X). */
841 if (GET_CODE (op
) == TRUNCATE
)
842 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
843 GET_MODE (XEXP (op
, 0)));
845 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
847 if (GET_CODE (op
) == IOR
848 && SCALAR_INT_MODE_P (mode
)
849 && SCALAR_INT_MODE_P (op_mode
)
850 && CONST_INT_P (XEXP (op
, 1))
851 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
857 /* Try to simplify a unary operation CODE whose output mode is to be
858 MODE with input operand OP whose mode was originally OP_MODE.
859 Return zero if no simplification can be made. */
861 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
862 rtx op
, machine_mode op_mode
)
866 trueop
= avoid_constant_pool_reference (op
);
868 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
872 return simplify_unary_operation_1 (code
, mode
, op
);
875 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
879 exact_int_to_float_conversion_p (const_rtx op
)
881 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
882 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
883 /* Constants shouldn't reach here. */
884 gcc_assert (op0_mode
!= VOIDmode
);
885 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
886 int in_bits
= in_prec
;
887 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
889 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
890 if (GET_CODE (op
) == FLOAT
)
891 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
892 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
893 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
896 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
898 return in_bits
<= out_bits
;
901 /* Perform some simplifications we can do even if the operands
904 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
906 enum rtx_code reversed
;
907 rtx temp
, elt
, base
, step
;
908 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
913 /* (not (not X)) == X. */
914 if (GET_CODE (op
) == NOT
)
917 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
918 comparison is all ones. */
919 if (COMPARISON_P (op
)
920 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
921 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
922 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
923 XEXP (op
, 0), XEXP (op
, 1));
925 /* (not (plus X -1)) can become (neg X). */
926 if (GET_CODE (op
) == PLUS
927 && XEXP (op
, 1) == constm1_rtx
)
928 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
930 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
931 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
932 and MODE_VECTOR_INT. */
933 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
934 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
937 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
938 if (GET_CODE (op
) == XOR
939 && CONST_INT_P (XEXP (op
, 1))
940 && (temp
= simplify_unary_operation (NOT
, mode
,
941 XEXP (op
, 1), mode
)) != 0)
942 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
944 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
945 if (GET_CODE (op
) == PLUS
946 && CONST_INT_P (XEXP (op
, 1))
947 && mode_signbit_p (mode
, XEXP (op
, 1))
948 && (temp
= simplify_unary_operation (NOT
, mode
,
949 XEXP (op
, 1), mode
)) != 0)
950 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
953 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
954 operands other than 1, but that is not valid. We could do a
955 similar simplification for (not (lshiftrt C X)) where C is
956 just the sign bit, but this doesn't seem common enough to
958 if (GET_CODE (op
) == ASHIFT
959 && XEXP (op
, 0) == const1_rtx
)
961 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
962 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
965 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
966 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
967 so we can perform the above simplification. */
968 if (STORE_FLAG_VALUE
== -1
969 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
970 && GET_CODE (op
) == ASHIFTRT
971 && CONST_INT_P (XEXP (op
, 1))
972 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
973 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
974 XEXP (op
, 0), const0_rtx
);
977 if (partial_subreg_p (op
)
978 && subreg_lowpart_p (op
)
979 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
980 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
982 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
985 x
= gen_rtx_ROTATE (inner_mode
,
986 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
988 XEXP (SUBREG_REG (op
), 1));
989 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
994 /* Apply De Morgan's laws to reduce number of patterns for machines
995 with negating logical insns (and-not, nand, etc.). If result has
996 only one NOT, put it first, since that is how the patterns are
998 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1000 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1001 machine_mode op_mode
;
1003 op_mode
= GET_MODE (in1
);
1004 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1006 op_mode
= GET_MODE (in2
);
1007 if (op_mode
== VOIDmode
)
1009 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1011 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1012 std::swap (in1
, in2
);
1014 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1018 /* (not (bswap x)) -> (bswap (not x)). */
1019 if (GET_CODE (op
) == BSWAP
)
1021 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1022 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1027 /* (neg (neg X)) == X. */
1028 if (GET_CODE (op
) == NEG
)
1029 return XEXP (op
, 0);
1031 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1032 If comparison is not reversible use
1034 if (GET_CODE (op
) == IF_THEN_ELSE
)
1036 rtx cond
= XEXP (op
, 0);
1037 rtx true_rtx
= XEXP (op
, 1);
1038 rtx false_rtx
= XEXP (op
, 2);
1040 if ((GET_CODE (true_rtx
) == NEG
1041 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1042 || (GET_CODE (false_rtx
) == NEG
1043 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1045 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1046 temp
= reversed_comparison (cond
, mode
);
1050 std::swap (true_rtx
, false_rtx
);
1052 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1053 mode
, temp
, true_rtx
, false_rtx
);
1057 /* (neg (plus X 1)) can become (not X). */
1058 if (GET_CODE (op
) == PLUS
1059 && XEXP (op
, 1) == const1_rtx
)
1060 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1062 /* Similarly, (neg (not X)) is (plus X 1). */
1063 if (GET_CODE (op
) == NOT
)
1064 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1067 /* (neg (minus X Y)) can become (minus Y X). This transformation
1068 isn't safe for modes with signed zeros, since if X and Y are
1069 both +0, (minus Y X) is the same as (minus X Y). If the
1070 rounding mode is towards +infinity (or -infinity) then the two
1071 expressions will be rounded differently. */
1072 if (GET_CODE (op
) == MINUS
1073 && !HONOR_SIGNED_ZEROS (mode
)
1074 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1075 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1077 if (GET_CODE (op
) == PLUS
1078 && !HONOR_SIGNED_ZEROS (mode
)
1079 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1081 /* (neg (plus A C)) is simplified to (minus -C A). */
1082 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1083 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1085 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1087 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1090 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1091 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1092 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1095 /* (neg (mult A B)) becomes (mult A (neg B)).
1096 This works even for floating-point values. */
1097 if (GET_CODE (op
) == MULT
1098 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1100 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1101 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1104 /* NEG commutes with ASHIFT since it is multiplication. Only do
1105 this if we can then eliminate the NEG (e.g., if the operand
1107 if (GET_CODE (op
) == ASHIFT
)
1109 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1111 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1114 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1115 C is equal to the width of MODE minus 1. */
1116 if (GET_CODE (op
) == ASHIFTRT
1117 && CONST_INT_P (XEXP (op
, 1))
1118 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1119 return simplify_gen_binary (LSHIFTRT
, mode
,
1120 XEXP (op
, 0), XEXP (op
, 1));
1122 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1123 C is equal to the width of MODE minus 1. */
1124 if (GET_CODE (op
) == LSHIFTRT
1125 && CONST_INT_P (XEXP (op
, 1))
1126 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1127 return simplify_gen_binary (ASHIFTRT
, mode
,
1128 XEXP (op
, 0), XEXP (op
, 1));
1130 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1131 if (GET_CODE (op
) == XOR
1132 && XEXP (op
, 1) == const1_rtx
1133 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1134 return plus_constant (mode
, XEXP (op
, 0), -1);
1136 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1137 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1138 if (GET_CODE (op
) == LT
1139 && XEXP (op
, 1) == const0_rtx
1140 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1142 int_mode
= as_a
<scalar_int_mode
> (mode
);
1143 int isize
= GET_MODE_PRECISION (inner
);
1144 if (STORE_FLAG_VALUE
== 1)
1146 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1147 gen_int_shift_amount (inner
,
1149 if (int_mode
== inner
)
1151 if (GET_MODE_PRECISION (int_mode
) > isize
)
1152 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1153 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1155 else if (STORE_FLAG_VALUE
== -1)
1157 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1158 gen_int_shift_amount (inner
,
1160 if (int_mode
== inner
)
1162 if (GET_MODE_PRECISION (int_mode
) > isize
)
1163 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1164 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1168 if (vec_series_p (op
, &base
, &step
))
1170 /* Only create a new series if we can simplify both parts. In other
1171 cases this isn't really a simplification, and it's not necessarily
1172 a win to replace a vector operation with a scalar operation. */
1173 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1174 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1177 step
= simplify_unary_operation (NEG
, inner_mode
,
1180 return gen_vec_series (mode
, base
, step
);
1186 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1187 with the umulXi3_highpart patterns. */
1188 if (GET_CODE (op
) == LSHIFTRT
1189 && GET_CODE (XEXP (op
, 0)) == MULT
)
1192 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1194 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1196 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1200 /* We can't handle truncation to a partial integer mode here
1201 because we don't know the real bitsize of the partial
1206 if (GET_MODE (op
) != VOIDmode
)
1208 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1213 /* If we know that the value is already truncated, we can
1214 replace the TRUNCATE with a SUBREG. */
1215 if (known_eq (GET_MODE_NUNITS (mode
), 1)
1216 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1217 || truncated_to_mode (mode
, op
)))
1219 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1224 /* A truncate of a comparison can be replaced with a subreg if
1225 STORE_FLAG_VALUE permits. This is like the previous test,
1226 but it works even if the comparison is done in a mode larger
1227 than HOST_BITS_PER_WIDE_INT. */
1228 if (HWI_COMPUTABLE_MODE_P (mode
)
1229 && COMPARISON_P (op
)
1230 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1232 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1237 /* A truncate of a memory is just loading the low part of the memory
1238 if we are not changing the meaning of the address. */
1239 if (GET_CODE (op
) == MEM
1240 && !VECTOR_MODE_P (mode
)
1241 && !MEM_VOLATILE_P (op
)
1242 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1244 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1251 case FLOAT_TRUNCATE
:
1252 if (DECIMAL_FLOAT_MODE_P (mode
))
1255 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1256 if (GET_CODE (op
) == FLOAT_EXTEND
1257 && GET_MODE (XEXP (op
, 0)) == mode
)
1258 return XEXP (op
, 0);
1260 /* (float_truncate:SF (float_truncate:DF foo:XF))
1261 = (float_truncate:SF foo:XF).
1262 This may eliminate double rounding, so it is unsafe.
1264 (float_truncate:SF (float_extend:XF foo:DF))
1265 = (float_truncate:SF foo:DF).
1267 (float_truncate:DF (float_extend:XF foo:SF))
1268 = (float_extend:DF foo:SF). */
1269 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1270 && flag_unsafe_math_optimizations
)
1271 || GET_CODE (op
) == FLOAT_EXTEND
)
1272 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1273 > GET_MODE_UNIT_SIZE (mode
)
1274 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1276 XEXP (op
, 0), mode
);
1278 /* (float_truncate (float x)) is (float x) */
1279 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1280 && (flag_unsafe_math_optimizations
1281 || exact_int_to_float_conversion_p (op
)))
1282 return simplify_gen_unary (GET_CODE (op
), mode
,
1284 GET_MODE (XEXP (op
, 0)));
1286 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1287 (OP:SF foo:SF) if OP is NEG or ABS. */
1288 if ((GET_CODE (op
) == ABS
1289 || GET_CODE (op
) == NEG
)
1290 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1291 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1292 return simplify_gen_unary (GET_CODE (op
), mode
,
1293 XEXP (XEXP (op
, 0), 0), mode
);
1295 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1296 is (float_truncate:SF x). */
1297 if (GET_CODE (op
) == SUBREG
1298 && subreg_lowpart_p (op
)
1299 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1300 return SUBREG_REG (op
);
1304 if (DECIMAL_FLOAT_MODE_P (mode
))
1307 /* (float_extend (float_extend x)) is (float_extend x)
1309 (float_extend (float x)) is (float x) assuming that double
1310 rounding can't happen.
1312 if (GET_CODE (op
) == FLOAT_EXTEND
1313 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1314 && exact_int_to_float_conversion_p (op
)))
1315 return simplify_gen_unary (GET_CODE (op
), mode
,
1317 GET_MODE (XEXP (op
, 0)));
1322 /* (abs (neg <foo>)) -> (abs <foo>) */
1323 if (GET_CODE (op
) == NEG
)
1324 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1325 GET_MODE (XEXP (op
, 0)));
1327 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1329 if (GET_MODE (op
) == VOIDmode
)
1332 /* If operand is something known to be positive, ignore the ABS. */
1333 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1334 || val_signbit_known_clear_p (GET_MODE (op
),
1335 nonzero_bits (op
, GET_MODE (op
))))
1338 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1339 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1340 && (num_sign_bit_copies (op
, int_mode
)
1341 == GET_MODE_PRECISION (int_mode
)))
1342 return gen_rtx_NEG (int_mode
, op
);
1347 /* (ffs (*_extend <X>)) = (ffs <X>) */
1348 if (GET_CODE (op
) == SIGN_EXTEND
1349 || GET_CODE (op
) == ZERO_EXTEND
)
1350 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1351 GET_MODE (XEXP (op
, 0)));
1355 switch (GET_CODE (op
))
1359 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1360 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1361 GET_MODE (XEXP (op
, 0)));
1365 /* Rotations don't affect popcount. */
1366 if (!side_effects_p (XEXP (op
, 1)))
1367 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1368 GET_MODE (XEXP (op
, 0)));
1377 switch (GET_CODE (op
))
1383 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1384 GET_MODE (XEXP (op
, 0)));
1388 /* Rotations don't affect parity. */
1389 if (!side_effects_p (XEXP (op
, 1)))
1390 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1391 GET_MODE (XEXP (op
, 0)));
1400 /* (bswap (bswap x)) -> x. */
1401 if (GET_CODE (op
) == BSWAP
)
1402 return XEXP (op
, 0);
1406 /* (float (sign_extend <X>)) = (float <X>). */
1407 if (GET_CODE (op
) == SIGN_EXTEND
)
1408 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1409 GET_MODE (XEXP (op
, 0)));
1413 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1414 becomes just the MINUS if its mode is MODE. This allows
1415 folding switch statements on machines using casesi (such as
1417 if (GET_CODE (op
) == TRUNCATE
1418 && GET_MODE (XEXP (op
, 0)) == mode
1419 && GET_CODE (XEXP (op
, 0)) == MINUS
1420 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1421 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1422 return XEXP (op
, 0);
1424 /* Extending a widening multiplication should be canonicalized to
1425 a wider widening multiplication. */
1426 if (GET_CODE (op
) == MULT
)
1428 rtx lhs
= XEXP (op
, 0);
1429 rtx rhs
= XEXP (op
, 1);
1430 enum rtx_code lcode
= GET_CODE (lhs
);
1431 enum rtx_code rcode
= GET_CODE (rhs
);
1433 /* Widening multiplies usually extend both operands, but sometimes
1434 they use a shift to extract a portion of a register. */
1435 if ((lcode
== SIGN_EXTEND
1436 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1437 && (rcode
== SIGN_EXTEND
1438 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1440 machine_mode lmode
= GET_MODE (lhs
);
1441 machine_mode rmode
= GET_MODE (rhs
);
1444 if (lcode
== ASHIFTRT
)
1445 /* Number of bits not shifted off the end. */
1446 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1447 - INTVAL (XEXP (lhs
, 1)));
1448 else /* lcode == SIGN_EXTEND */
1449 /* Size of inner mode. */
1450 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1452 if (rcode
== ASHIFTRT
)
1453 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1454 - INTVAL (XEXP (rhs
, 1)));
1455 else /* rcode == SIGN_EXTEND */
1456 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1458 /* We can only widen multiplies if the result is mathematiclly
1459 equivalent. I.e. if overflow was impossible. */
1460 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1461 return simplify_gen_binary
1463 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1464 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1468 /* Check for a sign extension of a subreg of a promoted
1469 variable, where the promotion is sign-extended, and the
1470 target mode is the same as the variable's promotion. */
1471 if (GET_CODE (op
) == SUBREG
1472 && SUBREG_PROMOTED_VAR_P (op
)
1473 && SUBREG_PROMOTED_SIGNED_P (op
)
1474 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1476 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1481 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1482 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1483 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1485 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1486 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1487 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1488 GET_MODE (XEXP (op
, 0)));
1491 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1492 is (sign_extend:M (subreg:O <X>)) if there is mode with
1493 GET_MODE_BITSIZE (N) - I bits.
1494 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1495 is similarly (zero_extend:M (subreg:O <X>)). */
1496 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1497 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1498 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1499 && CONST_INT_P (XEXP (op
, 1))
1500 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1501 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1502 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1504 scalar_int_mode tmode
;
1505 gcc_assert (GET_MODE_PRECISION (int_mode
)
1506 > GET_MODE_PRECISION (op_mode
));
1507 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1508 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1511 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1513 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1514 ? SIGN_EXTEND
: ZERO_EXTEND
,
1515 int_mode
, inner
, tmode
);
1519 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1520 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1521 if (GET_CODE (op
) == LSHIFTRT
1522 && CONST_INT_P (XEXP (op
, 1))
1523 && XEXP (op
, 1) != const0_rtx
)
1524 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1526 #if defined(POINTERS_EXTEND_UNSIGNED)
1527 /* As we do not know which address space the pointer is referring to,
1528 we can do this only if the target does not support different pointer
1529 or address modes depending on the address space. */
1530 if (target_default_pointer_address_modes_p ()
1531 && ! POINTERS_EXTEND_UNSIGNED
1532 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1534 || (GET_CODE (op
) == SUBREG
1535 && REG_P (SUBREG_REG (op
))
1536 && REG_POINTER (SUBREG_REG (op
))
1537 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1538 && !targetm
.have_ptr_extend ())
1541 = convert_memory_address_addr_space_1 (Pmode
, op
,
1542 ADDR_SPACE_GENERIC
, false,
1551 /* Check for a zero extension of a subreg of a promoted
1552 variable, where the promotion is zero-extended, and the
1553 target mode is the same as the variable's promotion. */
1554 if (GET_CODE (op
) == SUBREG
1555 && SUBREG_PROMOTED_VAR_P (op
)
1556 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1557 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1559 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1564 /* Extending a widening multiplication should be canonicalized to
1565 a wider widening multiplication. */
1566 if (GET_CODE (op
) == MULT
)
1568 rtx lhs
= XEXP (op
, 0);
1569 rtx rhs
= XEXP (op
, 1);
1570 enum rtx_code lcode
= GET_CODE (lhs
);
1571 enum rtx_code rcode
= GET_CODE (rhs
);
1573 /* Widening multiplies usually extend both operands, but sometimes
1574 they use a shift to extract a portion of a register. */
1575 if ((lcode
== ZERO_EXTEND
1576 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1577 && (rcode
== ZERO_EXTEND
1578 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1580 machine_mode lmode
= GET_MODE (lhs
);
1581 machine_mode rmode
= GET_MODE (rhs
);
1584 if (lcode
== LSHIFTRT
)
1585 /* Number of bits not shifted off the end. */
1586 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1587 - INTVAL (XEXP (lhs
, 1)));
1588 else /* lcode == ZERO_EXTEND */
1589 /* Size of inner mode. */
1590 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1592 if (rcode
== LSHIFTRT
)
1593 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1594 - INTVAL (XEXP (rhs
, 1)));
1595 else /* rcode == ZERO_EXTEND */
1596 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1598 /* We can only widen multiplies if the result is mathematiclly
1599 equivalent. I.e. if overflow was impossible. */
1600 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1601 return simplify_gen_binary
1603 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1604 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1608 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1609 if (GET_CODE (op
) == ZERO_EXTEND
)
1610 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1611 GET_MODE (XEXP (op
, 0)));
1613 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1614 is (zero_extend:M (subreg:O <X>)) if there is mode with
1615 GET_MODE_PRECISION (N) - I bits. */
1616 if (GET_CODE (op
) == LSHIFTRT
1617 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1618 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1619 && CONST_INT_P (XEXP (op
, 1))
1620 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1621 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1622 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1624 scalar_int_mode tmode
;
1625 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1626 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1629 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1631 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1636 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1637 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1639 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1640 (and:SI (reg:SI) (const_int 63)). */
1641 if (partial_subreg_p (op
)
1642 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1643 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1644 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1645 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1646 && subreg_lowpart_p (op
)
1647 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1648 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1650 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1651 return SUBREG_REG (op
);
1652 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1656 #if defined(POINTERS_EXTEND_UNSIGNED)
1657 /* As we do not know which address space the pointer is referring to,
1658 we can do this only if the target does not support different pointer
1659 or address modes depending on the address space. */
1660 if (target_default_pointer_address_modes_p ()
1661 && POINTERS_EXTEND_UNSIGNED
> 0
1662 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1664 || (GET_CODE (op
) == SUBREG
1665 && REG_P (SUBREG_REG (op
))
1666 && REG_POINTER (SUBREG_REG (op
))
1667 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1668 && !targetm
.have_ptr_extend ())
1671 = convert_memory_address_addr_space_1 (Pmode
, op
,
1672 ADDR_SPACE_GENERIC
, false,
1684 if (VECTOR_MODE_P (mode
)
1685 && vec_duplicate_p (op
, &elt
)
1686 && code
!= VEC_DUPLICATE
)
1688 /* Try applying the operator to ELT and see if that simplifies.
1689 We can duplicate the result if so.
1691 The reason we don't use simplify_gen_unary is that it isn't
1692 necessarily a win to convert things like:
1694 (neg:V (vec_duplicate:V (reg:S R)))
1698 (vec_duplicate:V (neg:S (reg:S R)))
1700 The first might be done entirely in vector registers while the
1701 second might need a move between register files. */
1702 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1703 elt
, GET_MODE_INNER (GET_MODE (op
)));
1705 return gen_vec_duplicate (mode
, temp
);
1711 /* Try to compute the value of a unary operation CODE whose output mode is to
1712 be MODE with input operand OP whose mode was originally OP_MODE.
1713 Return zero if the value cannot be computed. */
1715 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1716 rtx op
, machine_mode op_mode
)
1718 scalar_int_mode result_mode
;
1720 if (code
== VEC_DUPLICATE
)
1722 gcc_assert (VECTOR_MODE_P (mode
));
1723 if (GET_MODE (op
) != VOIDmode
)
1725 if (!VECTOR_MODE_P (GET_MODE (op
)))
1726 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1728 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1731 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1732 return gen_const_vec_duplicate (mode
, op
);
1733 if (GET_CODE (op
) == CONST_VECTOR
1734 && (CONST_VECTOR_DUPLICATE_P (op
)
1735 || CONST_VECTOR_NUNITS (op
).is_constant ()))
1737 unsigned int npatterns
= (CONST_VECTOR_DUPLICATE_P (op
)
1738 ? CONST_VECTOR_NPATTERNS (op
)
1739 : CONST_VECTOR_NUNITS (op
).to_constant ());
1740 gcc_assert (multiple_p (GET_MODE_NUNITS (mode
), npatterns
));
1741 rtx_vector_builder
builder (mode
, npatterns
, 1);
1742 for (unsigned i
= 0; i
< npatterns
; i
++)
1743 builder
.quick_push (CONST_VECTOR_ELT (op
, i
));
1744 return builder
.build ();
1748 if (VECTOR_MODE_P (mode
)
1749 && GET_CODE (op
) == CONST_VECTOR
1750 && known_eq (GET_MODE_NUNITS (mode
), CONST_VECTOR_NUNITS (op
)))
1752 gcc_assert (GET_MODE (op
) == op_mode
);
1754 rtx_vector_builder builder
;
1755 if (!builder
.new_unary_operation (mode
, op
, false))
1758 unsigned int count
= builder
.encoded_nelts ();
1759 for (unsigned int i
= 0; i
< count
; i
++)
1761 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1762 CONST_VECTOR_ELT (op
, i
),
1763 GET_MODE_INNER (op_mode
));
1764 if (!x
|| !valid_for_const_vector_p (mode
, x
))
1766 builder
.quick_push (x
);
1768 return builder
.build ();
1771 /* The order of these tests is critical so that, for example, we don't
1772 check the wrong mode (input vs. output) for a conversion operation,
1773 such as FIX. At some point, this should be simplified. */
1775 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1779 if (op_mode
== VOIDmode
)
1781 /* CONST_INT have VOIDmode as the mode. We assume that all
1782 the bits of the constant are significant, though, this is
1783 a dangerous assumption as many times CONST_INTs are
1784 created and used with garbage in the bits outside of the
1785 precision of the implied mode of the const_int. */
1786 op_mode
= MAX_MODE_INT
;
1789 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1791 /* Avoid the folding if flag_signaling_nans is on and
1792 operand is a signaling NaN. */
1793 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1796 d
= real_value_truncate (mode
, d
);
1797 return const_double_from_real_value (d
, mode
);
1799 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1803 if (op_mode
== VOIDmode
)
1805 /* CONST_INT have VOIDmode as the mode. We assume that all
1806 the bits of the constant are significant, though, this is
1807 a dangerous assumption as many times CONST_INTs are
1808 created and used with garbage in the bits outside of the
1809 precision of the implied mode of the const_int. */
1810 op_mode
= MAX_MODE_INT
;
1813 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1815 /* Avoid the folding if flag_signaling_nans is on and
1816 operand is a signaling NaN. */
1817 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1820 d
= real_value_truncate (mode
, d
);
1821 return const_double_from_real_value (d
, mode
);
1824 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1826 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1828 scalar_int_mode imode
= (op_mode
== VOIDmode
1830 : as_a
<scalar_int_mode
> (op_mode
));
1831 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1834 #if TARGET_SUPPORTS_WIDE_INT == 0
1835 /* This assert keeps the simplification from producing a result
1836 that cannot be represented in a CONST_DOUBLE but a lot of
1837 upstream callers expect that this function never fails to
1838 simplify something and so you if you added this to the test
1839 above the code would die later anyway. If this assert
1840 happens, you just need to make the port support wide int. */
1841 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1847 result
= wi::bit_not (op0
);
1851 result
= wi::neg (op0
);
1855 result
= wi::abs (op0
);
1859 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1863 if (wi::ne_p (op0
, 0))
1864 int_value
= wi::clz (op0
);
1865 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1867 result
= wi::shwi (int_value
, result_mode
);
1871 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
1875 if (wi::ne_p (op0
, 0))
1876 int_value
= wi::ctz (op0
);
1877 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1879 result
= wi::shwi (int_value
, result_mode
);
1883 result
= wi::shwi (wi::popcount (op0
), result_mode
);
1887 result
= wi::shwi (wi::parity (op0
), result_mode
);
1891 result
= wide_int (op0
).bswap ();
1896 result
= wide_int::from (op0
, width
, UNSIGNED
);
1900 result
= wide_int::from (op0
, width
, SIGNED
);
1908 return immed_wide_int_const (result
, result_mode
);
1911 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1912 && SCALAR_FLOAT_MODE_P (mode
)
1913 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1915 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1921 d
= real_value_abs (&d
);
1924 d
= real_value_negate (&d
);
1926 case FLOAT_TRUNCATE
:
1927 /* Don't perform the operation if flag_signaling_nans is on
1928 and the operand is a signaling NaN. */
1929 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1931 d
= real_value_truncate (mode
, d
);
1934 /* Don't perform the operation if flag_signaling_nans is on
1935 and the operand is a signaling NaN. */
1936 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1938 /* All this does is change the mode, unless changing
1940 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1941 real_convert (&d
, mode
, &d
);
1944 /* Don't perform the operation if flag_signaling_nans is on
1945 and the operand is a signaling NaN. */
1946 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1948 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1955 real_to_target (tmp
, &d
, GET_MODE (op
));
1956 for (i
= 0; i
< 4; i
++)
1958 real_from_target (&d
, tmp
, mode
);
1964 return const_double_from_real_value (d
, mode
);
1966 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1967 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1968 && is_int_mode (mode
, &result_mode
))
1970 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1971 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1972 operators are intentionally left unspecified (to ease implementation
1973 by target backends), for consistency, this routine implements the
1974 same semantics for constant folding as used by the middle-end. */
1976 /* This was formerly used only for non-IEEE float.
1977 eggert@twinsun.com says it is safe for IEEE also. */
1979 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1980 wide_int wmax
, wmin
;
1981 /* This is part of the abi to real_to_integer, but we check
1982 things before making this call. */
1988 if (REAL_VALUE_ISNAN (*x
))
1991 /* Test against the signed upper bound. */
1992 wmax
= wi::max_value (width
, SIGNED
);
1993 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1994 if (real_less (&t
, x
))
1995 return immed_wide_int_const (wmax
, mode
);
1997 /* Test against the signed lower bound. */
1998 wmin
= wi::min_value (width
, SIGNED
);
1999 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2000 if (real_less (x
, &t
))
2001 return immed_wide_int_const (wmin
, mode
);
2003 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2007 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2010 /* Test against the unsigned upper bound. */
2011 wmax
= wi::max_value (width
, UNSIGNED
);
2012 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2013 if (real_less (&t
, x
))
2014 return immed_wide_int_const (wmax
, mode
);
2016 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2024 /* Handle polynomial integers. */
2025 else if (CONST_POLY_INT_P (op
))
2027 poly_wide_int result
;
2031 result
= -const_poly_int_value (op
);
2035 result
= ~const_poly_int_value (op
);
2041 return immed_wide_int_const (result
, mode
);
2047 /* Subroutine of simplify_binary_operation to simplify a binary operation
2048 CODE that can commute with byte swapping, with result mode MODE and
2049 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2050 Return zero if no simplification or canonicalization is possible. */
2053 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2058 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2059 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2061 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2062 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2063 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2066 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2067 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2069 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2070 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2076 /* Subroutine of simplify_binary_operation to simplify a commutative,
2077 associative binary operation CODE with result mode MODE, operating
2078 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2079 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2080 canonicalization is possible. */
2083 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2088 /* Linearize the operator to the left. */
2089 if (GET_CODE (op1
) == code
)
2091 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2092 if (GET_CODE (op0
) == code
)
2094 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2095 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2098 /* "a op (b op c)" becomes "(b op c) op a". */
2099 if (! swap_commutative_operands_p (op1
, op0
))
2100 return simplify_gen_binary (code
, mode
, op1
, op0
);
2102 std::swap (op0
, op1
);
2105 if (GET_CODE (op0
) == code
)
2107 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2108 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2110 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2111 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2114 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2115 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2117 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2119 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2120 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2122 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2128 /* Return a mask describing the COMPARISON. */
2130 comparison_to_mask (enum rtx_code comparison
)
2170 /* Return a comparison corresponding to the MASK. */
2171 static enum rtx_code
2172 mask_to_comparison (int mask
)
2212 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2213 and OP1, which should be both relational operations. Return 0 if no such
2214 simplification is possible. */
2216 simplify_logical_relational_operation (enum rtx_code code
, machine_mode mode
,
2219 /* We only handle IOR of two relational operations. */
2223 if (!(COMPARISON_P (op0
) && COMPARISON_P (op1
)))
2226 if (!(rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2227 && rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 1))))
2230 enum rtx_code code0
= GET_CODE (op0
);
2231 enum rtx_code code1
= GET_CODE (op1
);
2233 /* We don't handle unsigned comparisons currently. */
2234 if (code0
== LTU
|| code0
== GTU
|| code0
== LEU
|| code0
== GEU
)
2236 if (code1
== LTU
|| code1
== GTU
|| code1
== LEU
|| code1
== GEU
)
2239 int mask0
= comparison_to_mask (code0
);
2240 int mask1
= comparison_to_mask (code1
);
2242 int mask
= mask0
| mask1
;
2245 return const_true_rtx
;
2247 code
= mask_to_comparison (mask
);
2249 op0
= XEXP (op1
, 0);
2250 op1
= XEXP (op1
, 1);
2252 return simplify_gen_relational (code
, mode
, VOIDmode
, op0
, op1
);
2255 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2256 and OP1. Return 0 if no simplification is possible.
2258 Don't use this for relational operations such as EQ or LT.
2259 Use simplify_relational_operation instead. */
2261 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2264 rtx trueop0
, trueop1
;
2267 /* Relational operations don't work here. We must know the mode
2268 of the operands in order to do the comparison correctly.
2269 Assuming a full word can give incorrect results.
2270 Consider comparing 128 with -128 in QImode. */
2271 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2272 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2274 /* Make sure the constant is second. */
2275 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2276 && swap_commutative_operands_p (op0
, op1
))
2277 std::swap (op0
, op1
);
2279 trueop0
= avoid_constant_pool_reference (op0
);
2280 trueop1
= avoid_constant_pool_reference (op1
);
2282 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2285 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2290 /* If the above steps did not result in a simplification and op0 or op1
2291 were constant pool references, use the referenced constants directly. */
2292 if (trueop0
!= op0
|| trueop1
!= op1
)
2293 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2298 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2299 which OP0 and OP1 are both vector series or vector duplicates
2300 (which are really just series with a step of 0). If so, try to
2301 form a new series by applying CODE to the bases and to the steps.
2302 Return null if no simplification is possible.
2304 MODE is the mode of the operation and is known to be a vector
2308 simplify_binary_operation_series (rtx_code code
, machine_mode mode
,
2312 if (vec_duplicate_p (op0
, &base0
))
2314 else if (!vec_series_p (op0
, &base0
, &step0
))
2318 if (vec_duplicate_p (op1
, &base1
))
2320 else if (!vec_series_p (op1
, &base1
, &step1
))
2323 /* Only create a new series if we can simplify both parts. In other
2324 cases this isn't really a simplification, and it's not necessarily
2325 a win to replace a vector operation with a scalar operation. */
2326 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2327 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2331 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2335 return gen_vec_series (mode
, new_base
, new_step
);
2338 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2339 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2340 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2341 actual constants. */
2344 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2345 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2347 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2349 scalar_int_mode int_mode
, inner_mode
;
2352 /* Even if we can't compute a constant result,
2353 there are some cases worth simplifying. */
2358 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2359 when x is NaN, infinite, or finite and nonzero. They aren't
2360 when x is -0 and the rounding mode is not towards -infinity,
2361 since (-0) + 0 is then 0. */
2362 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2365 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2366 transformations are safe even for IEEE. */
2367 if (GET_CODE (op0
) == NEG
)
2368 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2369 else if (GET_CODE (op1
) == NEG
)
2370 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2372 /* (~a) + 1 -> -a */
2373 if (INTEGRAL_MODE_P (mode
)
2374 && GET_CODE (op0
) == NOT
2375 && trueop1
== const1_rtx
)
2376 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2378 /* Handle both-operands-constant cases. We can only add
2379 CONST_INTs to constants since the sum of relocatable symbols
2380 can't be handled by most assemblers. Don't add CONST_INT
2381 to CONST_INT since overflow won't be computed properly if wider
2382 than HOST_BITS_PER_WIDE_INT. */
2384 if ((GET_CODE (op0
) == CONST
2385 || GET_CODE (op0
) == SYMBOL_REF
2386 || GET_CODE (op0
) == LABEL_REF
)
2387 && poly_int_rtx_p (op1
, &offset
))
2388 return plus_constant (mode
, op0
, offset
);
2389 else if ((GET_CODE (op1
) == CONST
2390 || GET_CODE (op1
) == SYMBOL_REF
2391 || GET_CODE (op1
) == LABEL_REF
)
2392 && poly_int_rtx_p (op0
, &offset
))
2393 return plus_constant (mode
, op1
, offset
);
2395 /* See if this is something like X * C - X or vice versa or
2396 if the multiplication is written as a shift. If so, we can
2397 distribute and make a new multiply, shift, or maybe just
2398 have X (if C is 2 in the example above). But don't make
2399 something more expensive than we had before. */
2401 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2403 rtx lhs
= op0
, rhs
= op1
;
2405 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2406 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2408 if (GET_CODE (lhs
) == NEG
)
2410 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2411 lhs
= XEXP (lhs
, 0);
2413 else if (GET_CODE (lhs
) == MULT
2414 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2416 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2417 lhs
= XEXP (lhs
, 0);
2419 else if (GET_CODE (lhs
) == ASHIFT
2420 && CONST_INT_P (XEXP (lhs
, 1))
2421 && INTVAL (XEXP (lhs
, 1)) >= 0
2422 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2424 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2425 GET_MODE_PRECISION (int_mode
));
2426 lhs
= XEXP (lhs
, 0);
2429 if (GET_CODE (rhs
) == NEG
)
2431 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2432 rhs
= XEXP (rhs
, 0);
2434 else if (GET_CODE (rhs
) == MULT
2435 && CONST_INT_P (XEXP (rhs
, 1)))
2437 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2438 rhs
= XEXP (rhs
, 0);
2440 else if (GET_CODE (rhs
) == ASHIFT
2441 && CONST_INT_P (XEXP (rhs
, 1))
2442 && INTVAL (XEXP (rhs
, 1)) >= 0
2443 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2445 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2446 GET_MODE_PRECISION (int_mode
));
2447 rhs
= XEXP (rhs
, 0);
2450 if (rtx_equal_p (lhs
, rhs
))
2452 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2454 bool speed
= optimize_function_for_speed_p (cfun
);
2456 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2458 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2459 return (set_src_cost (tem
, int_mode
, speed
)
2460 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2464 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2465 if (CONST_SCALAR_INT_P (op1
)
2466 && GET_CODE (op0
) == XOR
2467 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2468 && mode_signbit_p (mode
, op1
))
2469 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2470 simplify_gen_binary (XOR
, mode
, op1
,
2473 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2474 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2475 && GET_CODE (op0
) == MULT
2476 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2480 in1
= XEXP (XEXP (op0
, 0), 0);
2481 in2
= XEXP (op0
, 1);
2482 return simplify_gen_binary (MINUS
, mode
, op1
,
2483 simplify_gen_binary (MULT
, mode
,
2487 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2488 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2490 if (COMPARISON_P (op0
)
2491 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2492 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2493 && (reversed
= reversed_comparison (op0
, mode
)))
2495 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2497 /* If one of the operands is a PLUS or a MINUS, see if we can
2498 simplify this by the associative law.
2499 Don't use the associative law for floating point.
2500 The inaccuracy makes it nonassociative,
2501 and subtle programs can break if operations are associated. */
2503 if (INTEGRAL_MODE_P (mode
)
2504 && (plus_minus_operand_p (op0
)
2505 || plus_minus_operand_p (op1
))
2506 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2509 /* Reassociate floating point addition only when the user
2510 specifies associative math operations. */
2511 if (FLOAT_MODE_P (mode
)
2512 && flag_associative_math
)
2514 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2519 /* Handle vector series. */
2520 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2522 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2529 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2530 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2531 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2532 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2534 rtx xop00
= XEXP (op0
, 0);
2535 rtx xop10
= XEXP (op1
, 0);
2537 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2540 if (REG_P (xop00
) && REG_P (xop10
)
2541 && REGNO (xop00
) == REGNO (xop10
)
2542 && GET_MODE (xop00
) == mode
2543 && GET_MODE (xop10
) == mode
2544 && GET_MODE_CLASS (mode
) == MODE_CC
)
2550 /* We can't assume x-x is 0 even with non-IEEE floating point,
2551 but since it is zero except in very strange circumstances, we
2552 will treat it as zero with -ffinite-math-only. */
2553 if (rtx_equal_p (trueop0
, trueop1
)
2554 && ! side_effects_p (op0
)
2555 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2556 return CONST0_RTX (mode
);
2558 /* Change subtraction from zero into negation. (0 - x) is the
2559 same as -x when x is NaN, infinite, or finite and nonzero.
2560 But if the mode has signed zeros, and does not round towards
2561 -infinity, then 0 - 0 is 0, not -0. */
2562 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2563 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2565 /* (-1 - a) is ~a, unless the expression contains symbolic
2566 constants, in which case not retaining additions and
2567 subtractions could cause invalid assembly to be produced. */
2568 if (trueop0
== constm1_rtx
2569 && !contains_symbolic_reference_p (op1
))
2570 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2572 /* Subtracting 0 has no effect unless the mode has signed zeros
2573 and supports rounding towards -infinity. In such a case,
2575 if (!(HONOR_SIGNED_ZEROS (mode
)
2576 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2577 && trueop1
== CONST0_RTX (mode
))
2580 /* See if this is something like X * C - X or vice versa or
2581 if the multiplication is written as a shift. If so, we can
2582 distribute and make a new multiply, shift, or maybe just
2583 have X (if C is 2 in the example above). But don't make
2584 something more expensive than we had before. */
2586 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2588 rtx lhs
= op0
, rhs
= op1
;
2590 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2591 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2593 if (GET_CODE (lhs
) == NEG
)
2595 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2596 lhs
= XEXP (lhs
, 0);
2598 else if (GET_CODE (lhs
) == MULT
2599 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2601 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2602 lhs
= XEXP (lhs
, 0);
2604 else if (GET_CODE (lhs
) == ASHIFT
2605 && CONST_INT_P (XEXP (lhs
, 1))
2606 && INTVAL (XEXP (lhs
, 1)) >= 0
2607 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2609 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2610 GET_MODE_PRECISION (int_mode
));
2611 lhs
= XEXP (lhs
, 0);
2614 if (GET_CODE (rhs
) == NEG
)
2616 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2617 rhs
= XEXP (rhs
, 0);
2619 else if (GET_CODE (rhs
) == MULT
2620 && CONST_INT_P (XEXP (rhs
, 1)))
2622 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2623 rhs
= XEXP (rhs
, 0);
2625 else if (GET_CODE (rhs
) == ASHIFT
2626 && CONST_INT_P (XEXP (rhs
, 1))
2627 && INTVAL (XEXP (rhs
, 1)) >= 0
2628 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2630 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2631 GET_MODE_PRECISION (int_mode
));
2632 negcoeff1
= -negcoeff1
;
2633 rhs
= XEXP (rhs
, 0);
2636 if (rtx_equal_p (lhs
, rhs
))
2638 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2640 bool speed
= optimize_function_for_speed_p (cfun
);
2642 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2644 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2645 return (set_src_cost (tem
, int_mode
, speed
)
2646 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2650 /* (a - (-b)) -> (a + b). True even for IEEE. */
2651 if (GET_CODE (op1
) == NEG
)
2652 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2654 /* (-x - c) may be simplified as (-c - x). */
2655 if (GET_CODE (op0
) == NEG
2656 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2658 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2660 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2663 if ((GET_CODE (op0
) == CONST
2664 || GET_CODE (op0
) == SYMBOL_REF
2665 || GET_CODE (op0
) == LABEL_REF
)
2666 && poly_int_rtx_p (op1
, &offset
))
2667 return plus_constant (mode
, op0
, trunc_int_for_mode (-offset
, mode
));
2669 /* Don't let a relocatable value get a negative coeff. */
2670 if (poly_int_rtx_p (op1
) && GET_MODE (op0
) != VOIDmode
)
2671 return simplify_gen_binary (PLUS
, mode
,
2673 neg_poly_int_rtx (mode
, op1
));
2675 /* (x - (x & y)) -> (x & ~y) */
2676 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2678 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2680 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2681 GET_MODE (XEXP (op1
, 1)));
2682 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2684 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2686 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2687 GET_MODE (XEXP (op1
, 0)));
2688 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2692 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2693 by reversing the comparison code if valid. */
2694 if (STORE_FLAG_VALUE
== 1
2695 && trueop0
== const1_rtx
2696 && COMPARISON_P (op1
)
2697 && (reversed
= reversed_comparison (op1
, mode
)))
2700 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2701 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2702 && GET_CODE (op1
) == MULT
2703 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2707 in1
= XEXP (XEXP (op1
, 0), 0);
2708 in2
= XEXP (op1
, 1);
2709 return simplify_gen_binary (PLUS
, mode
,
2710 simplify_gen_binary (MULT
, mode
,
2715 /* Canonicalize (minus (neg A) (mult B C)) to
2716 (minus (mult (neg B) C) A). */
2717 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2718 && GET_CODE (op1
) == MULT
2719 && GET_CODE (op0
) == NEG
)
2723 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2724 in2
= XEXP (op1
, 1);
2725 return simplify_gen_binary (MINUS
, mode
,
2726 simplify_gen_binary (MULT
, mode
,
2731 /* If one of the operands is a PLUS or a MINUS, see if we can
2732 simplify this by the associative law. This will, for example,
2733 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2734 Don't use the associative law for floating point.
2735 The inaccuracy makes it nonassociative,
2736 and subtle programs can break if operations are associated. */
2738 if (INTEGRAL_MODE_P (mode
)
2739 && (plus_minus_operand_p (op0
)
2740 || plus_minus_operand_p (op1
))
2741 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2744 /* Handle vector series. */
2745 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2747 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2754 if (trueop1
== constm1_rtx
)
2755 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2757 if (GET_CODE (op0
) == NEG
)
2759 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2760 /* If op1 is a MULT as well and simplify_unary_operation
2761 just moved the NEG to the second operand, simplify_gen_binary
2762 below could through simplify_associative_operation move
2763 the NEG around again and recurse endlessly. */
2765 && GET_CODE (op1
) == MULT
2766 && GET_CODE (temp
) == MULT
2767 && XEXP (op1
, 0) == XEXP (temp
, 0)
2768 && GET_CODE (XEXP (temp
, 1)) == NEG
2769 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2772 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2774 if (GET_CODE (op1
) == NEG
)
2776 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2777 /* If op0 is a MULT as well and simplify_unary_operation
2778 just moved the NEG to the second operand, simplify_gen_binary
2779 below could through simplify_associative_operation move
2780 the NEG around again and recurse endlessly. */
2782 && GET_CODE (op0
) == MULT
2783 && GET_CODE (temp
) == MULT
2784 && XEXP (op0
, 0) == XEXP (temp
, 0)
2785 && GET_CODE (XEXP (temp
, 1)) == NEG
2786 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2789 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2792 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2793 x is NaN, since x * 0 is then also NaN. Nor is it valid
2794 when the mode has signed zeros, since multiplying a negative
2795 number by 0 will give -0, not 0. */
2796 if (!HONOR_NANS (mode
)
2797 && !HONOR_SIGNED_ZEROS (mode
)
2798 && trueop1
== CONST0_RTX (mode
)
2799 && ! side_effects_p (op0
))
2802 /* In IEEE floating point, x*1 is not equivalent to x for
2804 if (!HONOR_SNANS (mode
)
2805 && trueop1
== CONST1_RTX (mode
))
2808 /* Convert multiply by constant power of two into shift. */
2809 if (CONST_SCALAR_INT_P (trueop1
))
2811 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2813 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2814 gen_int_shift_amount (mode
, val
));
2817 /* x*2 is x+x and x*(-1) is -x */
2818 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2819 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2820 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2821 && GET_MODE (op0
) == mode
)
2823 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2825 if (real_equal (d1
, &dconst2
))
2826 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2828 if (!HONOR_SNANS (mode
)
2829 && real_equal (d1
, &dconstm1
))
2830 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2833 /* Optimize -x * -x as x * x. */
2834 if (FLOAT_MODE_P (mode
)
2835 && GET_CODE (op0
) == NEG
2836 && GET_CODE (op1
) == NEG
2837 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2838 && !side_effects_p (XEXP (op0
, 0)))
2839 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2841 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2842 if (SCALAR_FLOAT_MODE_P (mode
)
2843 && GET_CODE (op0
) == ABS
2844 && GET_CODE (op1
) == ABS
2845 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2846 && !side_effects_p (XEXP (op0
, 0)))
2847 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2849 /* Reassociate multiplication, but for floating point MULTs
2850 only when the user specifies unsafe math optimizations. */
2851 if (! FLOAT_MODE_P (mode
)
2852 || flag_unsafe_math_optimizations
)
2854 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2861 if (trueop1
== CONST0_RTX (mode
))
2863 if (INTEGRAL_MODE_P (mode
)
2864 && trueop1
== CONSTM1_RTX (mode
)
2865 && !side_effects_p (op0
))
2867 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2869 /* A | (~A) -> -1 */
2870 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2871 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2872 && ! side_effects_p (op0
)
2873 && SCALAR_INT_MODE_P (mode
))
2876 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2877 if (CONST_INT_P (op1
)
2878 && HWI_COMPUTABLE_MODE_P (mode
)
2879 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2880 && !side_effects_p (op0
))
2883 /* Canonicalize (X & C1) | C2. */
2884 if (GET_CODE (op0
) == AND
2885 && CONST_INT_P (trueop1
)
2886 && CONST_INT_P (XEXP (op0
, 1)))
2888 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2889 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2890 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2892 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2894 && !side_effects_p (XEXP (op0
, 0)))
2897 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2898 if (((c1
|c2
) & mask
) == mask
)
2899 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2902 /* Convert (A & B) | A to A. */
2903 if (GET_CODE (op0
) == AND
2904 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2905 || rtx_equal_p (XEXP (op0
, 1), op1
))
2906 && ! side_effects_p (XEXP (op0
, 0))
2907 && ! side_effects_p (XEXP (op0
, 1)))
2910 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2911 mode size to (rotate A CX). */
2913 if (GET_CODE (op1
) == ASHIFT
2914 || GET_CODE (op1
) == SUBREG
)
2925 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2926 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2927 && CONST_INT_P (XEXP (opleft
, 1))
2928 && CONST_INT_P (XEXP (opright
, 1))
2929 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2930 == GET_MODE_UNIT_PRECISION (mode
)))
2931 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2933 /* Same, but for ashift that has been "simplified" to a wider mode
2934 by simplify_shift_const. */
2936 if (GET_CODE (opleft
) == SUBREG
2937 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2938 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
2940 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2941 && GET_CODE (opright
) == LSHIFTRT
2942 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2943 && known_eq (SUBREG_BYTE (opleft
), SUBREG_BYTE (XEXP (opright
, 0)))
2944 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
2945 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2946 SUBREG_REG (XEXP (opright
, 0)))
2947 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2948 && CONST_INT_P (XEXP (opright
, 1))
2949 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
2950 + INTVAL (XEXP (opright
, 1))
2951 == GET_MODE_PRECISION (int_mode
)))
2952 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
2953 XEXP (SUBREG_REG (opleft
), 1));
2955 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2956 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2957 the PLUS does not affect any of the bits in OP1: then we can do
2958 the IOR as a PLUS and we can associate. This is valid if OP1
2959 can be safely shifted left C bits. */
2960 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2961 && GET_CODE (XEXP (op0
, 0)) == PLUS
2962 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2963 && CONST_INT_P (XEXP (op0
, 1))
2964 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2966 int count
= INTVAL (XEXP (op0
, 1));
2967 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
2969 if (mask
>> count
== INTVAL (trueop1
)
2970 && trunc_int_for_mode (mask
, mode
) == mask
2971 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2972 return simplify_gen_binary (ASHIFTRT
, mode
,
2973 plus_constant (mode
, XEXP (op0
, 0),
2978 /* The following happens with bitfield merging.
2979 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
2980 if (GET_CODE (op0
) == AND
2981 && GET_CODE (op1
) == AND
2982 && CONST_INT_P (XEXP (op0
, 1))
2983 && CONST_INT_P (XEXP (op1
, 1))
2984 && (INTVAL (XEXP (op0
, 1))
2985 == ~INTVAL (XEXP (op1
, 1))))
2987 /* The IOR may be on both sides. */
2988 rtx top0
= NULL_RTX
, top1
= NULL_RTX
;
2989 if (GET_CODE (XEXP (op1
, 0)) == IOR
)
2990 top0
= op0
, top1
= op1
;
2991 else if (GET_CODE (XEXP (op0
, 0)) == IOR
)
2992 top0
= op1
, top1
= op0
;
2995 /* X may be on either side of the inner IOR. */
2997 if (rtx_equal_p (XEXP (top0
, 0),
2998 XEXP (XEXP (top1
, 0), 0)))
2999 tem
= XEXP (XEXP (top1
, 0), 1);
3000 else if (rtx_equal_p (XEXP (top0
, 0),
3001 XEXP (XEXP (top1
, 0), 1)))
3002 tem
= XEXP (XEXP (top1
, 0), 0);
3004 return simplify_gen_binary (IOR
, mode
, XEXP (top0
, 0),
3006 (AND
, mode
, tem
, XEXP (top1
, 1)));
3010 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3014 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3018 tem
= simplify_logical_relational_operation (code
, mode
, op0
, op1
);
3024 if (trueop1
== CONST0_RTX (mode
))
3026 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3027 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
3028 if (rtx_equal_p (trueop0
, trueop1
)
3029 && ! side_effects_p (op0
)
3030 && GET_MODE_CLASS (mode
) != MODE_CC
)
3031 return CONST0_RTX (mode
);
3033 /* Canonicalize XOR of the most significant bit to PLUS. */
3034 if (CONST_SCALAR_INT_P (op1
)
3035 && mode_signbit_p (mode
, op1
))
3036 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
3037 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
3038 if (CONST_SCALAR_INT_P (op1
)
3039 && GET_CODE (op0
) == PLUS
3040 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
3041 && mode_signbit_p (mode
, XEXP (op0
, 1)))
3042 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
3043 simplify_gen_binary (XOR
, mode
, op1
,
3046 /* If we are XORing two things that have no bits in common,
3047 convert them into an IOR. This helps to detect rotation encoded
3048 using those methods and possibly other simplifications. */
3050 if (HWI_COMPUTABLE_MODE_P (mode
)
3051 && (nonzero_bits (op0
, mode
)
3052 & nonzero_bits (op1
, mode
)) == 0)
3053 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
3055 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3056 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3059 int num_negated
= 0;
3061 if (GET_CODE (op0
) == NOT
)
3062 num_negated
++, op0
= XEXP (op0
, 0);
3063 if (GET_CODE (op1
) == NOT
)
3064 num_negated
++, op1
= XEXP (op1
, 0);
3066 if (num_negated
== 2)
3067 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
3068 else if (num_negated
== 1)
3069 return simplify_gen_unary (NOT
, mode
,
3070 simplify_gen_binary (XOR
, mode
, op0
, op1
),
3074 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
3075 correspond to a machine insn or result in further simplifications
3076 if B is a constant. */
3078 if (GET_CODE (op0
) == AND
3079 && rtx_equal_p (XEXP (op0
, 1), op1
)
3080 && ! side_effects_p (op1
))
3081 return simplify_gen_binary (AND
, mode
,
3082 simplify_gen_unary (NOT
, mode
,
3083 XEXP (op0
, 0), mode
),
3086 else if (GET_CODE (op0
) == AND
3087 && rtx_equal_p (XEXP (op0
, 0), op1
)
3088 && ! side_effects_p (op1
))
3089 return simplify_gen_binary (AND
, mode
,
3090 simplify_gen_unary (NOT
, mode
,
3091 XEXP (op0
, 1), mode
),
3094 /* Given (xor (ior (xor A B) C) D), where B, C and D are
3095 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3096 out bits inverted twice and not set by C. Similarly, given
3097 (xor (and (xor A B) C) D), simplify without inverting C in
3098 the xor operand: (xor (and A C) (B&C)^D).
3100 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
3101 && GET_CODE (XEXP (op0
, 0)) == XOR
3102 && CONST_INT_P (op1
)
3103 && CONST_INT_P (XEXP (op0
, 1))
3104 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
3106 enum rtx_code op
= GET_CODE (op0
);
3107 rtx a
= XEXP (XEXP (op0
, 0), 0);
3108 rtx b
= XEXP (XEXP (op0
, 0), 1);
3109 rtx c
= XEXP (op0
, 1);
3111 HOST_WIDE_INT bval
= INTVAL (b
);
3112 HOST_WIDE_INT cval
= INTVAL (c
);
3113 HOST_WIDE_INT dval
= INTVAL (d
);
3114 HOST_WIDE_INT xcval
;
3121 return simplify_gen_binary (XOR
, mode
,
3122 simplify_gen_binary (op
, mode
, a
, c
),
3123 gen_int_mode ((bval
& xcval
) ^ dval
,
3127 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3128 we can transform like this:
3129 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3130 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3131 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3132 Attempt a few simplifications when B and C are both constants. */
3133 if (GET_CODE (op0
) == AND
3134 && CONST_INT_P (op1
)
3135 && CONST_INT_P (XEXP (op0
, 1)))
3137 rtx a
= XEXP (op0
, 0);
3138 rtx b
= XEXP (op0
, 1);
3140 HOST_WIDE_INT bval
= INTVAL (b
);
3141 HOST_WIDE_INT cval
= INTVAL (c
);
3143 /* Instead of computing ~A&C, we compute its negated value,
3144 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3145 optimize for sure. If it does not simplify, we still try
3146 to compute ~A&C below, but since that always allocates
3147 RTL, we don't try that before committing to returning a
3148 simplified expression. */
3149 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
3152 if ((~cval
& bval
) == 0)
3154 rtx na_c
= NULL_RTX
;
3156 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
3159 /* If ~A does not simplify, don't bother: we don't
3160 want to simplify 2 operations into 3, and if na_c
3161 were to simplify with na, n_na_c would have
3162 simplified as well. */
3163 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
3165 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
3168 /* Try to simplify ~A&C | ~B&C. */
3169 if (na_c
!= NULL_RTX
)
3170 return simplify_gen_binary (IOR
, mode
, na_c
,
3171 gen_int_mode (~bval
& cval
, mode
));
3175 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3176 if (n_na_c
== CONSTM1_RTX (mode
))
3178 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3179 gen_int_mode (~cval
& bval
,
3181 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3182 gen_int_mode (~bval
& cval
,
3188 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3189 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3190 machines, and also has shorter instruction path length. */
3191 if (GET_CODE (op0
) == AND
3192 && GET_CODE (XEXP (op0
, 0)) == XOR
3193 && CONST_INT_P (XEXP (op0
, 1))
3194 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3197 rtx b
= XEXP (XEXP (op0
, 0), 1);
3198 rtx c
= XEXP (op0
, 1);
3199 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3200 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3201 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3202 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3204 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3205 else if (GET_CODE (op0
) == AND
3206 && GET_CODE (XEXP (op0
, 0)) == XOR
3207 && CONST_INT_P (XEXP (op0
, 1))
3208 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3210 rtx a
= XEXP (XEXP (op0
, 0), 0);
3212 rtx c
= XEXP (op0
, 1);
3213 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3214 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3215 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3216 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3219 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3220 comparison if STORE_FLAG_VALUE is 1. */
3221 if (STORE_FLAG_VALUE
== 1
3222 && trueop1
== const1_rtx
3223 && COMPARISON_P (op0
)
3224 && (reversed
= reversed_comparison (op0
, mode
)))
3227 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3228 is (lt foo (const_int 0)), so we can perform the above
3229 simplification if STORE_FLAG_VALUE is 1. */
3231 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3232 && STORE_FLAG_VALUE
== 1
3233 && trueop1
== const1_rtx
3234 && GET_CODE (op0
) == LSHIFTRT
3235 && CONST_INT_P (XEXP (op0
, 1))
3236 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3237 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3239 /* (xor (comparison foo bar) (const_int sign-bit))
3240 when STORE_FLAG_VALUE is the sign bit. */
3241 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3242 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3243 && trueop1
== const_true_rtx
3244 && COMPARISON_P (op0
)
3245 && (reversed
= reversed_comparison (op0
, int_mode
)))
3248 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3252 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3258 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3260 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3262 if (HWI_COMPUTABLE_MODE_P (mode
))
3264 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3265 HOST_WIDE_INT nzop1
;
3266 if (CONST_INT_P (trueop1
))
3268 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3269 /* If we are turning off bits already known off in OP0, we need
3271 if ((nzop0
& ~val1
) == 0)
3274 nzop1
= nonzero_bits (trueop1
, mode
);
3275 /* If we are clearing all the nonzero bits, the result is zero. */
3276 if ((nzop1
& nzop0
) == 0
3277 && !side_effects_p (op0
) && !side_effects_p (op1
))
3278 return CONST0_RTX (mode
);
3280 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3281 && GET_MODE_CLASS (mode
) != MODE_CC
)
3284 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3285 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3286 && ! side_effects_p (op0
)
3287 && GET_MODE_CLASS (mode
) != MODE_CC
)
3288 return CONST0_RTX (mode
);
3290 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3291 there are no nonzero bits of C outside of X's mode. */
3292 if ((GET_CODE (op0
) == SIGN_EXTEND
3293 || GET_CODE (op0
) == ZERO_EXTEND
)
3294 && CONST_INT_P (trueop1
)
3295 && HWI_COMPUTABLE_MODE_P (mode
)
3296 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3297 & UINTVAL (trueop1
)) == 0)
3299 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3300 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3301 gen_int_mode (INTVAL (trueop1
),
3303 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3306 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3307 we might be able to further simplify the AND with X and potentially
3308 remove the truncation altogether. */
3309 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3311 rtx x
= XEXP (op0
, 0);
3312 machine_mode xmode
= GET_MODE (x
);
3313 tem
= simplify_gen_binary (AND
, xmode
, x
,
3314 gen_int_mode (INTVAL (trueop1
), xmode
));
3315 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3318 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3319 if (GET_CODE (op0
) == IOR
3320 && CONST_INT_P (trueop1
)
3321 && CONST_INT_P (XEXP (op0
, 1)))
3323 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3324 return simplify_gen_binary (IOR
, mode
,
3325 simplify_gen_binary (AND
, mode
,
3326 XEXP (op0
, 0), op1
),
3327 gen_int_mode (tmp
, mode
));
3330 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3331 insn (and may simplify more). */
3332 if (GET_CODE (op0
) == XOR
3333 && rtx_equal_p (XEXP (op0
, 0), op1
)
3334 && ! side_effects_p (op1
))
3335 return simplify_gen_binary (AND
, mode
,
3336 simplify_gen_unary (NOT
, mode
,
3337 XEXP (op0
, 1), mode
),
3340 if (GET_CODE (op0
) == XOR
3341 && rtx_equal_p (XEXP (op0
, 1), op1
)
3342 && ! side_effects_p (op1
))
3343 return simplify_gen_binary (AND
, mode
,
3344 simplify_gen_unary (NOT
, mode
,
3345 XEXP (op0
, 0), mode
),
3348 /* Similarly for (~(A ^ B)) & A. */
3349 if (GET_CODE (op0
) == NOT
3350 && GET_CODE (XEXP (op0
, 0)) == XOR
3351 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3352 && ! side_effects_p (op1
))
3353 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3355 if (GET_CODE (op0
) == NOT
3356 && GET_CODE (XEXP (op0
, 0)) == XOR
3357 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3358 && ! side_effects_p (op1
))
3359 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3361 /* Convert (A | B) & A to A. */
3362 if (GET_CODE (op0
) == IOR
3363 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3364 || rtx_equal_p (XEXP (op0
, 1), op1
))
3365 && ! side_effects_p (XEXP (op0
, 0))
3366 && ! side_effects_p (XEXP (op0
, 1)))
3369 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3370 ((A & N) + B) & M -> (A + B) & M
3371 Similarly if (N & M) == 0,
3372 ((A | N) + B) & M -> (A + B) & M
3373 and for - instead of + and/or ^ instead of |.
3374 Also, if (N & M) == 0, then
3375 (A +- N) & M -> A & M. */
3376 if (CONST_INT_P (trueop1
)
3377 && HWI_COMPUTABLE_MODE_P (mode
)
3378 && ~UINTVAL (trueop1
)
3379 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3380 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3385 pmop
[0] = XEXP (op0
, 0);
3386 pmop
[1] = XEXP (op0
, 1);
3388 if (CONST_INT_P (pmop
[1])
3389 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3390 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3392 for (which
= 0; which
< 2; which
++)
3395 switch (GET_CODE (tem
))
3398 if (CONST_INT_P (XEXP (tem
, 1))
3399 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3400 == UINTVAL (trueop1
))
3401 pmop
[which
] = XEXP (tem
, 0);
3405 if (CONST_INT_P (XEXP (tem
, 1))
3406 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3407 pmop
[which
] = XEXP (tem
, 0);
3414 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3416 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3418 return simplify_gen_binary (code
, mode
, tem
, op1
);
3422 /* (and X (ior (not X) Y) -> (and X Y) */
3423 if (GET_CODE (op1
) == IOR
3424 && GET_CODE (XEXP (op1
, 0)) == NOT
3425 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3426 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3428 /* (and (ior (not X) Y) X) -> (and X Y) */
3429 if (GET_CODE (op0
) == IOR
3430 && GET_CODE (XEXP (op0
, 0)) == NOT
3431 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3432 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3434 /* (and X (ior Y (not X)) -> (and X Y) */
3435 if (GET_CODE (op1
) == IOR
3436 && GET_CODE (XEXP (op1
, 1)) == NOT
3437 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3438 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3440 /* (and (ior Y (not X)) X) -> (and X Y) */
3441 if (GET_CODE (op0
) == IOR
3442 && GET_CODE (XEXP (op0
, 1)) == NOT
3443 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3444 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3446 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3450 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3456 /* 0/x is 0 (or x&0 if x has side-effects). */
3457 if (trueop0
== CONST0_RTX (mode
)
3458 && !cfun
->can_throw_non_call_exceptions
)
3460 if (side_effects_p (op1
))
3461 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3465 if (trueop1
== CONST1_RTX (mode
))
3467 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3471 /* Convert divide by power of two into shift. */
3472 if (CONST_INT_P (trueop1
)
3473 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3474 return simplify_gen_binary (LSHIFTRT
, mode
, op0
,
3475 gen_int_shift_amount (mode
, val
));
3479 /* Handle floating point and integers separately. */
3480 if (SCALAR_FLOAT_MODE_P (mode
))
3482 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3483 safe for modes with NaNs, since 0.0 / 0.0 will then be
3484 NaN rather than 0.0. Nor is it safe for modes with signed
3485 zeros, since dividing 0 by a negative number gives -0.0 */
3486 if (trueop0
== CONST0_RTX (mode
)
3487 && !HONOR_NANS (mode
)
3488 && !HONOR_SIGNED_ZEROS (mode
)
3489 && ! side_effects_p (op1
))
3492 if (trueop1
== CONST1_RTX (mode
)
3493 && !HONOR_SNANS (mode
))
3496 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3497 && trueop1
!= CONST0_RTX (mode
))
3499 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3502 if (real_equal (d1
, &dconstm1
)
3503 && !HONOR_SNANS (mode
))
3504 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3506 /* Change FP division by a constant into multiplication.
3507 Only do this with -freciprocal-math. */
3508 if (flag_reciprocal_math
3509 && !real_equal (d1
, &dconst0
))
3512 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3513 tem
= const_double_from_real_value (d
, mode
);
3514 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3518 else if (SCALAR_INT_MODE_P (mode
))
3520 /* 0/x is 0 (or x&0 if x has side-effects). */
3521 if (trueop0
== CONST0_RTX (mode
)
3522 && !cfun
->can_throw_non_call_exceptions
)
3524 if (side_effects_p (op1
))
3525 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3529 if (trueop1
== CONST1_RTX (mode
))
3531 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3536 if (trueop1
== constm1_rtx
)
3538 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3540 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3546 /* 0%x is 0 (or x&0 if x has side-effects). */
3547 if (trueop0
== CONST0_RTX (mode
))
3549 if (side_effects_p (op1
))
3550 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3553 /* x%1 is 0 (of x&0 if x has side-effects). */
3554 if (trueop1
== CONST1_RTX (mode
))
3556 if (side_effects_p (op0
))
3557 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3558 return CONST0_RTX (mode
);
3560 /* Implement modulus by power of two as AND. */
3561 if (CONST_INT_P (trueop1
)
3562 && exact_log2 (UINTVAL (trueop1
)) > 0)
3563 return simplify_gen_binary (AND
, mode
, op0
,
3564 gen_int_mode (UINTVAL (trueop1
) - 1,
3569 /* 0%x is 0 (or x&0 if x has side-effects). */
3570 if (trueop0
== CONST0_RTX (mode
))
3572 if (side_effects_p (op1
))
3573 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3576 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3577 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3579 if (side_effects_p (op0
))
3580 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3581 return CONST0_RTX (mode
);
3587 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3588 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3589 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3591 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3592 if (CONST_INT_P (trueop1
)
3593 && IN_RANGE (INTVAL (trueop1
),
3594 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
3595 GET_MODE_UNIT_PRECISION (mode
) - 1))
3597 int new_amount
= GET_MODE_UNIT_PRECISION (mode
) - INTVAL (trueop1
);
3598 rtx new_amount_rtx
= gen_int_shift_amount (mode
, new_amount
);
3599 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3600 mode
, op0
, new_amount_rtx
);
3605 if (trueop1
== CONST0_RTX (mode
))
3607 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3609 /* Rotating ~0 always results in ~0. */
3610 if (CONST_INT_P (trueop0
)
3611 && HWI_COMPUTABLE_MODE_P (mode
)
3612 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3613 && ! side_effects_p (op1
))
3619 scalar constants c1, c2
3620 size (M2) > size (M1)
3621 c1 == size (M2) - size (M1)
3623 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3627 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3629 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3630 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3632 && CONST_INT_P (op1
)
3633 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3634 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
3636 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3637 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
3638 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3639 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
3640 && subreg_lowpart_p (op0
))
3642 rtx tmp
= gen_int_shift_amount
3643 (inner_mode
, INTVAL (XEXP (SUBREG_REG (op0
), 1)) + INTVAL (op1
));
3644 tmp
= simplify_gen_binary (code
, inner_mode
,
3645 XEXP (SUBREG_REG (op0
), 0),
3647 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
3650 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3652 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
3653 if (val
!= INTVAL (op1
))
3654 return simplify_gen_binary (code
, mode
, op0
,
3655 gen_int_shift_amount (mode
, val
));
3662 if (trueop1
== CONST0_RTX (mode
))
3664 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3666 goto canonicalize_shift
;
3669 if (trueop1
== CONST0_RTX (mode
))
3671 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3673 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3674 if (GET_CODE (op0
) == CLZ
3675 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
3676 && CONST_INT_P (trueop1
)
3677 && STORE_FLAG_VALUE
== 1
3678 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
3680 unsigned HOST_WIDE_INT zero_val
= 0;
3682 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
3683 && zero_val
== GET_MODE_PRECISION (inner_mode
)
3684 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3685 return simplify_gen_relational (EQ
, mode
, inner_mode
,
3686 XEXP (op0
, 0), const0_rtx
);
3688 goto canonicalize_shift
;
3691 if (HWI_COMPUTABLE_MODE_P (mode
)
3692 && mode_signbit_p (mode
, trueop1
)
3693 && ! side_effects_p (op0
))
3695 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3697 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3703 if (HWI_COMPUTABLE_MODE_P (mode
)
3704 && CONST_INT_P (trueop1
)
3705 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3706 && ! side_effects_p (op0
))
3708 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3710 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3716 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3718 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3720 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3726 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3728 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3730 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3743 /* ??? There are simplifications that can be done. */
3747 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
3748 return gen_vec_duplicate (mode
, op0
);
3749 if (valid_for_const_vector_p (mode
, op0
)
3750 && valid_for_const_vector_p (mode
, op1
))
3751 return gen_const_vec_series (mode
, op0
, op1
);
3755 if (!VECTOR_MODE_P (mode
))
3757 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3758 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3759 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3760 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3762 /* We can't reason about selections made at runtime. */
3763 if (!CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3766 if (vec_duplicate_p (trueop0
, &elt0
))
3769 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3770 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3773 /* Extract a scalar element from a nested VEC_SELECT expression
3774 (with optional nested VEC_CONCAT expression). Some targets
3775 (i386) extract scalar element from a vector using chain of
3776 nested VEC_SELECT expressions. When input operand is a memory
3777 operand, this operation can be simplified to a simple scalar
3778 load from an offseted memory address. */
3780 if (GET_CODE (trueop0
) == VEC_SELECT
3781 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3782 .is_constant (&n_elts
)))
3784 rtx op0
= XEXP (trueop0
, 0);
3785 rtx op1
= XEXP (trueop0
, 1);
3787 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3793 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3794 gcc_assert (i
< n_elts
);
3796 /* Select element, pointed by nested selector. */
3797 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3799 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3800 if (GET_CODE (op0
) == VEC_CONCAT
)
3802 rtx op00
= XEXP (op0
, 0);
3803 rtx op01
= XEXP (op0
, 1);
3805 machine_mode mode00
, mode01
;
3806 int n_elts00
, n_elts01
;
3808 mode00
= GET_MODE (op00
);
3809 mode01
= GET_MODE (op01
);
3811 /* Find out the number of elements of each operand.
3812 Since the concatenated result has a constant number
3813 of elements, the operands must too. */
3814 n_elts00
= GET_MODE_NUNITS (mode00
).to_constant ();
3815 n_elts01
= GET_MODE_NUNITS (mode01
).to_constant ();
3817 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3819 /* Select correct operand of VEC_CONCAT
3820 and adjust selector. */
3821 if (elem
< n_elts01
)
3832 vec
= rtvec_alloc (1);
3833 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3835 tmp
= gen_rtx_fmt_ee (code
, mode
,
3836 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3842 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3843 gcc_assert (GET_MODE_INNER (mode
)
3844 == GET_MODE_INNER (GET_MODE (trueop0
)));
3845 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3847 if (vec_duplicate_p (trueop0
, &elt0
))
3848 /* It doesn't matter which elements are selected by trueop1,
3849 because they are all the same. */
3850 return gen_vec_duplicate (mode
, elt0
);
3852 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3854 unsigned n_elts
= XVECLEN (trueop1
, 0);
3855 rtvec v
= rtvec_alloc (n_elts
);
3858 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
3859 for (i
= 0; i
< n_elts
; i
++)
3861 rtx x
= XVECEXP (trueop1
, 0, i
);
3863 if (!CONST_INT_P (x
))
3866 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3870 return gen_rtx_CONST_VECTOR (mode
, v
);
3873 /* Recognize the identity. */
3874 if (GET_MODE (trueop0
) == mode
)
3876 bool maybe_ident
= true;
3877 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3879 rtx j
= XVECEXP (trueop1
, 0, i
);
3880 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3882 maybe_ident
= false;
3890 /* If we build {a,b} then permute it, build the result directly. */
3891 if (XVECLEN (trueop1
, 0) == 2
3892 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3893 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3894 && GET_CODE (trueop0
) == VEC_CONCAT
3895 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3896 && GET_MODE (XEXP (trueop0
, 0)) == mode
3897 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3898 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3900 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3901 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3904 gcc_assert (i0
< 4 && i1
< 4);
3905 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3906 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3908 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3911 if (XVECLEN (trueop1
, 0) == 2
3912 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3913 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3914 && GET_CODE (trueop0
) == VEC_CONCAT
3915 && GET_MODE (trueop0
) == mode
)
3917 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3918 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3921 gcc_assert (i0
< 2 && i1
< 2);
3922 subop0
= XEXP (trueop0
, i0
);
3923 subop1
= XEXP (trueop0
, i1
);
3925 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3928 /* If we select one half of a vec_concat, return that. */
3930 if (GET_CODE (trueop0
) == VEC_CONCAT
3931 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3933 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 1)))
3935 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3937 rtx subop0
= XEXP (trueop0
, 0);
3938 rtx subop1
= XEXP (trueop0
, 1);
3939 machine_mode mode0
= GET_MODE (subop0
);
3940 machine_mode mode1
= GET_MODE (subop1
);
3941 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3942 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3944 bool success
= true;
3945 for (int i
= 1; i
< l0
; ++i
)
3947 rtx j
= XVECEXP (trueop1
, 0, i
);
3948 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3957 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3959 bool success
= true;
3960 for (int i
= 1; i
< l1
; ++i
)
3962 rtx j
= XVECEXP (trueop1
, 0, i
);
3963 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3975 if (XVECLEN (trueop1
, 0) == 1
3976 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3977 && GET_CODE (trueop0
) == VEC_CONCAT
)
3980 offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3982 /* Try to find the element in the VEC_CONCAT. */
3983 while (GET_MODE (vec
) != mode
3984 && GET_CODE (vec
) == VEC_CONCAT
)
3986 poly_int64 vec_size
;
3988 if (CONST_INT_P (XEXP (vec
, 0)))
3990 /* vec_concat of two const_ints doesn't make sense with
3991 respect to modes. */
3992 if (CONST_INT_P (XEXP (vec
, 1)))
3995 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3996 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3999 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
4001 if (known_lt (offset
, vec_size
))
4002 vec
= XEXP (vec
, 0);
4003 else if (known_ge (offset
, vec_size
))
4006 vec
= XEXP (vec
, 1);
4010 vec
= avoid_constant_pool_reference (vec
);
4013 if (GET_MODE (vec
) == mode
)
4017 /* If we select elements in a vec_merge that all come from the same
4018 operand, select from that operand directly. */
4019 if (GET_CODE (op0
) == VEC_MERGE
)
4021 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
4022 if (CONST_INT_P (trueop02
))
4024 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
4025 bool all_operand0
= true;
4026 bool all_operand1
= true;
4027 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
4029 rtx j
= XVECEXP (trueop1
, 0, i
);
4030 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
4031 all_operand1
= false;
4033 all_operand0
= false;
4035 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
4036 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
4037 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
4038 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
4042 /* If we have two nested selects that are inverses of each
4043 other, replace them with the source operand. */
4044 if (GET_CODE (trueop0
) == VEC_SELECT
4045 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4047 rtx op0_subop1
= XEXP (trueop0
, 1);
4048 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
4049 gcc_assert (known_eq (XVECLEN (trueop1
, 0), GET_MODE_NUNITS (mode
)));
4051 /* Apply the outer ordering vector to the inner one. (The inner
4052 ordering vector is expressly permitted to be of a different
4053 length than the outer one.) If the result is { 0, 1, ..., n-1 }
4054 then the two VEC_SELECTs cancel. */
4055 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
4057 rtx x
= XVECEXP (trueop1
, 0, i
);
4058 if (!CONST_INT_P (x
))
4060 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
4061 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
4064 return XEXP (trueop0
, 0);
4070 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
4071 ? GET_MODE (trueop0
)
4072 : GET_MODE_INNER (mode
));
4073 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
4074 ? GET_MODE (trueop1
)
4075 : GET_MODE_INNER (mode
));
4077 gcc_assert (VECTOR_MODE_P (mode
));
4078 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode
)
4079 + GET_MODE_SIZE (op1_mode
),
4080 GET_MODE_SIZE (mode
)));
4082 if (VECTOR_MODE_P (op0_mode
))
4083 gcc_assert (GET_MODE_INNER (mode
)
4084 == GET_MODE_INNER (op0_mode
));
4086 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
4088 if (VECTOR_MODE_P (op1_mode
))
4089 gcc_assert (GET_MODE_INNER (mode
)
4090 == GET_MODE_INNER (op1_mode
));
4092 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
4094 unsigned int n_elts
, in_n_elts
;
4095 if ((GET_CODE (trueop0
) == CONST_VECTOR
4096 || CONST_SCALAR_INT_P (trueop0
)
4097 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
4098 && (GET_CODE (trueop1
) == CONST_VECTOR
4099 || CONST_SCALAR_INT_P (trueop1
)
4100 || CONST_DOUBLE_AS_FLOAT_P (trueop1
))
4101 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
)
4102 && GET_MODE_NUNITS (op0_mode
).is_constant (&in_n_elts
))
4104 rtvec v
= rtvec_alloc (n_elts
);
4106 for (i
= 0; i
< n_elts
; i
++)
4110 if (!VECTOR_MODE_P (op0_mode
))
4111 RTVEC_ELT (v
, i
) = trueop0
;
4113 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
4117 if (!VECTOR_MODE_P (op1_mode
))
4118 RTVEC_ELT (v
, i
) = trueop1
;
4120 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
4125 return gen_rtx_CONST_VECTOR (mode
, v
);
4128 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4129 Restrict the transformation to avoid generating a VEC_SELECT with a
4130 mode unrelated to its operand. */
4131 if (GET_CODE (trueop0
) == VEC_SELECT
4132 && GET_CODE (trueop1
) == VEC_SELECT
4133 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
4134 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4136 rtx par0
= XEXP (trueop0
, 1);
4137 rtx par1
= XEXP (trueop1
, 1);
4138 int len0
= XVECLEN (par0
, 0);
4139 int len1
= XVECLEN (par1
, 0);
4140 rtvec vec
= rtvec_alloc (len0
+ len1
);
4141 for (int i
= 0; i
< len0
; i
++)
4142 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
4143 for (int i
= 0; i
< len1
; i
++)
4144 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
4145 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
4146 gen_rtx_PARALLEL (VOIDmode
, vec
));
4155 if (mode
== GET_MODE (op0
)
4156 && mode
== GET_MODE (op1
)
4157 && vec_duplicate_p (op0
, &elt0
)
4158 && vec_duplicate_p (op1
, &elt1
))
4160 /* Try applying the operator to ELT and see if that simplifies.
4161 We can duplicate the result if so.
4163 The reason we don't use simplify_gen_binary is that it isn't
4164 necessarily a win to convert things like:
4166 (plus:V (vec_duplicate:V (reg:S R1))
4167 (vec_duplicate:V (reg:S R2)))
4171 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4173 The first might be done entirely in vector registers while the
4174 second might need a move between register files. */
4175 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4178 return gen_vec_duplicate (mode
, tem
);
4184 /* Return true if binary operation OP distributes over addition in operand
4185 OPNO, with the other operand being held constant. OPNO counts from 1. */
4188 distributes_over_addition_p (rtx_code op
, int opno
)
4206 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
4209 if (VECTOR_MODE_P (mode
)
4210 && code
!= VEC_CONCAT
4211 && GET_CODE (op0
) == CONST_VECTOR
4212 && GET_CODE (op1
) == CONST_VECTOR
)
4215 if (CONST_VECTOR_STEPPED_P (op0
)
4216 && CONST_VECTOR_STEPPED_P (op1
))
4217 /* We can operate directly on the encoding if:
4219 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4221 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4223 Addition and subtraction are the supported operators
4224 for which this is true. */
4225 step_ok_p
= (code
== PLUS
|| code
== MINUS
);
4226 else if (CONST_VECTOR_STEPPED_P (op0
))
4227 /* We can operate directly on stepped encodings if:
4231 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4233 which is true if (x -> x op c) distributes over addition. */
4234 step_ok_p
= distributes_over_addition_p (code
, 1);
4236 /* Similarly in reverse. */
4237 step_ok_p
= distributes_over_addition_p (code
, 2);
4238 rtx_vector_builder builder
;
4239 if (!builder
.new_binary_operation (mode
, op0
, op1
, step_ok_p
))
4242 unsigned int count
= builder
.encoded_nelts ();
4243 for (unsigned int i
= 0; i
< count
; i
++)
4245 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4246 CONST_VECTOR_ELT (op0
, i
),
4247 CONST_VECTOR_ELT (op1
, i
));
4248 if (!x
|| !valid_for_const_vector_p (mode
, x
))
4250 builder
.quick_push (x
);
4252 return builder
.build ();
4255 if (VECTOR_MODE_P (mode
)
4256 && code
== VEC_CONCAT
4257 && (CONST_SCALAR_INT_P (op0
)
4258 || CONST_FIXED_P (op0
)
4259 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4260 && (CONST_SCALAR_INT_P (op1
)
4261 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4262 || CONST_FIXED_P (op1
)))
4264 /* Both inputs have a constant number of elements, so the result
4266 unsigned n_elts
= GET_MODE_NUNITS (mode
).to_constant ();
4267 rtvec v
= rtvec_alloc (n_elts
);
4269 gcc_assert (n_elts
>= 2);
4272 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4273 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4275 RTVEC_ELT (v
, 0) = op0
;
4276 RTVEC_ELT (v
, 1) = op1
;
4280 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
)).to_constant ();
4281 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
)).to_constant ();
4284 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4285 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4286 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4288 for (i
= 0; i
< op0_n_elts
; ++i
)
4289 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op0
, i
);
4290 for (i
= 0; i
< op1_n_elts
; ++i
)
4291 RTVEC_ELT (v
, op0_n_elts
+i
) = CONST_VECTOR_ELT (op1
, i
);
4294 return gen_rtx_CONST_VECTOR (mode
, v
);
4297 if (SCALAR_FLOAT_MODE_P (mode
)
4298 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4299 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4300 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4311 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4313 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4315 for (i
= 0; i
< 4; i
++)
4332 real_from_target (&r
, tmp0
, mode
);
4333 return const_double_from_real_value (r
, mode
);
4337 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4338 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4341 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4342 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4344 if (HONOR_SNANS (mode
)
4345 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4346 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4349 real_convert (&f0
, mode
, opr0
);
4350 real_convert (&f1
, mode
, opr1
);
4353 && real_equal (&f1
, &dconst0
)
4354 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4357 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4358 && flag_trapping_math
4359 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4361 int s0
= REAL_VALUE_NEGATIVE (f0
);
4362 int s1
= REAL_VALUE_NEGATIVE (f1
);
4367 /* Inf + -Inf = NaN plus exception. */
4372 /* Inf - Inf = NaN plus exception. */
4377 /* Inf / Inf = NaN plus exception. */
4384 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4385 && flag_trapping_math
4386 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4387 || (REAL_VALUE_ISINF (f1
)
4388 && real_equal (&f0
, &dconst0
))))
4389 /* Inf * 0 = NaN plus exception. */
4392 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4394 real_convert (&result
, mode
, &value
);
4396 /* Don't constant fold this floating point operation if
4397 the result has overflowed and flag_trapping_math. */
4399 if (flag_trapping_math
4400 && MODE_HAS_INFINITIES (mode
)
4401 && REAL_VALUE_ISINF (result
)
4402 && !REAL_VALUE_ISINF (f0
)
4403 && !REAL_VALUE_ISINF (f1
))
4404 /* Overflow plus exception. */
4407 /* Don't constant fold this floating point operation if the
4408 result may dependent upon the run-time rounding mode and
4409 flag_rounding_math is set, or if GCC's software emulation
4410 is unable to accurately represent the result. */
4412 if ((flag_rounding_math
4413 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4414 && (inexact
|| !real_identical (&result
, &value
)))
4417 return const_double_from_real_value (result
, mode
);
4421 /* We can fold some multi-word operations. */
4422 scalar_int_mode int_mode
;
4423 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4424 && CONST_SCALAR_INT_P (op0
)
4425 && CONST_SCALAR_INT_P (op1
))
4428 wi::overflow_type overflow
;
4429 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4430 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4432 #if TARGET_SUPPORTS_WIDE_INT == 0
4433 /* This assert keeps the simplification from producing a result
4434 that cannot be represented in a CONST_DOUBLE but a lot of
4435 upstream callers expect that this function never fails to
4436 simplify something and so you if you added this to the test
4437 above the code would die later anyway. If this assert
4438 happens, you just need to make the port support wide int. */
4439 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4444 result
= wi::sub (pop0
, pop1
);
4448 result
= wi::add (pop0
, pop1
);
4452 result
= wi::mul (pop0
, pop1
);
4456 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4462 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4468 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4474 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4480 result
= wi::bit_and (pop0
, pop1
);
4484 result
= wi::bit_or (pop0
, pop1
);
4488 result
= wi::bit_xor (pop0
, pop1
);
4492 result
= wi::smin (pop0
, pop1
);
4496 result
= wi::smax (pop0
, pop1
);
4500 result
= wi::umin (pop0
, pop1
);
4504 result
= wi::umax (pop0
, pop1
);
4511 wide_int wop1
= pop1
;
4512 if (SHIFT_COUNT_TRUNCATED
)
4513 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
4514 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
4520 result
= wi::lrshift (pop0
, wop1
);
4524 result
= wi::arshift (pop0
, wop1
);
4528 result
= wi::lshift (pop0
, wop1
);
4539 if (wi::neg_p (pop1
))
4545 result
= wi::lrotate (pop0
, pop1
);
4549 result
= wi::rrotate (pop0
, pop1
);
4560 return immed_wide_int_const (result
, int_mode
);
4563 /* Handle polynomial integers. */
4564 if (NUM_POLY_INT_COEFFS
> 1
4565 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4566 && poly_int_rtx_p (op0
)
4567 && poly_int_rtx_p (op1
))
4569 poly_wide_int result
;
4573 result
= wi::to_poly_wide (op0
, mode
) + wi::to_poly_wide (op1
, mode
);
4577 result
= wi::to_poly_wide (op0
, mode
) - wi::to_poly_wide (op1
, mode
);
4581 if (CONST_SCALAR_INT_P (op1
))
4582 result
= wi::to_poly_wide (op0
, mode
) * rtx_mode_t (op1
, mode
);
4588 if (CONST_SCALAR_INT_P (op1
))
4590 wide_int shift
= rtx_mode_t (op1
, mode
);
4591 if (SHIFT_COUNT_TRUNCATED
)
4592 shift
= wi::umod_trunc (shift
, GET_MODE_PRECISION (int_mode
));
4593 else if (wi::geu_p (shift
, GET_MODE_PRECISION (int_mode
)))
4595 result
= wi::to_poly_wide (op0
, mode
) << shift
;
4602 if (!CONST_SCALAR_INT_P (op1
)
4603 || !can_ior_p (wi::to_poly_wide (op0
, mode
),
4604 rtx_mode_t (op1
, mode
), &result
))
4611 return immed_wide_int_const (result
, int_mode
);
4619 /* Return a positive integer if X should sort after Y. The value
4620 returned is 1 if and only if X and Y are both regs. */
4623 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4627 result
= (commutative_operand_precedence (y
)
4628 - commutative_operand_precedence (x
));
4630 return result
+ result
;
4632 /* Group together equal REGs to do more simplification. */
4633 if (REG_P (x
) && REG_P (y
))
4634 return REGNO (x
) > REGNO (y
);
4639 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4640 operands may be another PLUS or MINUS.
4642 Rather than test for specific case, we do this by a brute-force method
4643 and do all possible simplifications until no more changes occur. Then
4644 we rebuild the operation.
4646 May return NULL_RTX when no changes were made. */
4649 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4652 struct simplify_plus_minus_op_data
4659 int changed
, n_constants
, canonicalized
= 0;
4662 memset (ops
, 0, sizeof ops
);
4664 /* Set up the two operands and then expand them until nothing has been
4665 changed. If we run out of room in our array, give up; this should
4666 almost never happen. */
4671 ops
[1].neg
= (code
== MINUS
);
4678 for (i
= 0; i
< n_ops
; i
++)
4680 rtx this_op
= ops
[i
].op
;
4681 int this_neg
= ops
[i
].neg
;
4682 enum rtx_code this_code
= GET_CODE (this_op
);
4688 if (n_ops
== ARRAY_SIZE (ops
))
4691 ops
[n_ops
].op
= XEXP (this_op
, 1);
4692 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4695 ops
[i
].op
= XEXP (this_op
, 0);
4697 /* If this operand was negated then we will potentially
4698 canonicalize the expression. Similarly if we don't
4699 place the operands adjacent we're re-ordering the
4700 expression and thus might be performing a
4701 canonicalization. Ignore register re-ordering.
4702 ??? It might be better to shuffle the ops array here,
4703 but then (plus (plus (A, B), plus (C, D))) wouldn't
4704 be seen as non-canonical. */
4707 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4712 ops
[i
].op
= XEXP (this_op
, 0);
4713 ops
[i
].neg
= ! this_neg
;
4719 if (n_ops
!= ARRAY_SIZE (ops
)
4720 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4721 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4722 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4724 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4725 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4726 ops
[n_ops
].neg
= this_neg
;
4734 /* ~a -> (-a - 1) */
4735 if (n_ops
!= ARRAY_SIZE (ops
))
4737 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4738 ops
[n_ops
++].neg
= this_neg
;
4739 ops
[i
].op
= XEXP (this_op
, 0);
4740 ops
[i
].neg
= !this_neg
;
4746 CASE_CONST_SCALAR_INT
:
4747 case CONST_POLY_INT
:
4751 ops
[i
].op
= neg_poly_int_rtx (mode
, this_op
);
4765 if (n_constants
> 1)
4768 gcc_assert (n_ops
>= 2);
4770 /* If we only have two operands, we can avoid the loops. */
4773 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4776 /* Get the two operands. Be careful with the order, especially for
4777 the cases where code == MINUS. */
4778 if (ops
[0].neg
&& ops
[1].neg
)
4780 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4783 else if (ops
[0].neg
)
4794 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4797 /* Now simplify each pair of operands until nothing changes. */
4800 /* Insertion sort is good enough for a small array. */
4801 for (i
= 1; i
< n_ops
; i
++)
4803 struct simplify_plus_minus_op_data save
;
4807 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4810 /* Just swapping registers doesn't count as canonicalization. */
4816 ops
[j
+ 1] = ops
[j
];
4818 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4823 for (i
= n_ops
- 1; i
> 0; i
--)
4824 for (j
= i
- 1; j
>= 0; j
--)
4826 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4827 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4829 if (lhs
!= 0 && rhs
!= 0)
4831 enum rtx_code ncode
= PLUS
;
4837 std::swap (lhs
, rhs
);
4839 else if (swap_commutative_operands_p (lhs
, rhs
))
4840 std::swap (lhs
, rhs
);
4842 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4843 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4845 rtx tem_lhs
, tem_rhs
;
4847 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4848 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4849 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4852 if (tem
&& !CONSTANT_P (tem
))
4853 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4856 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4860 /* Reject "simplifications" that just wrap the two
4861 arguments in a CONST. Failure to do so can result
4862 in infinite recursion with simplify_binary_operation
4863 when it calls us to simplify CONST operations.
4864 Also, if we find such a simplification, don't try
4865 any more combinations with this rhs: We must have
4866 something like symbol+offset, ie. one of the
4867 trivial CONST expressions we handle later. */
4868 if (GET_CODE (tem
) == CONST
4869 && GET_CODE (XEXP (tem
, 0)) == ncode
4870 && XEXP (XEXP (tem
, 0), 0) == lhs
4871 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4874 if (GET_CODE (tem
) == NEG
)
4875 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4876 if (poly_int_rtx_p (tem
) && lneg
)
4877 tem
= neg_poly_int_rtx (mode
, tem
), lneg
= 0;
4881 ops
[j
].op
= NULL_RTX
;
4891 /* Pack all the operands to the lower-numbered entries. */
4892 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4901 /* If nothing changed, check that rematerialization of rtl instructions
4902 is still required. */
4905 /* Perform rematerialization if only all operands are registers and
4906 all operations are PLUS. */
4907 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4908 around rs6000 and how it uses the CA register. See PR67145. */
4909 for (i
= 0; i
< n_ops
; i
++)
4911 || !REG_P (ops
[i
].op
)
4912 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4913 && fixed_regs
[REGNO (ops
[i
].op
)]
4914 && !global_regs
[REGNO (ops
[i
].op
)]
4915 && ops
[i
].op
!= frame_pointer_rtx
4916 && ops
[i
].op
!= arg_pointer_rtx
4917 && ops
[i
].op
!= stack_pointer_rtx
))
4922 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4924 && CONST_INT_P (ops
[1].op
)
4925 && CONSTANT_P (ops
[0].op
)
4927 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4929 /* We suppressed creation of trivial CONST expressions in the
4930 combination loop to avoid recursion. Create one manually now.
4931 The combination loop should have ensured that there is exactly
4932 one CONST_INT, and the sort will have ensured that it is last
4933 in the array and that any other constant will be next-to-last. */
4936 && poly_int_rtx_p (ops
[n_ops
- 1].op
)
4937 && CONSTANT_P (ops
[n_ops
- 2].op
))
4939 rtx value
= ops
[n_ops
- 1].op
;
4940 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4941 value
= neg_poly_int_rtx (mode
, value
);
4942 if (CONST_INT_P (value
))
4944 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4950 /* Put a non-negated operand first, if possible. */
4952 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4955 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4964 /* Now make the result by performing the requested operations. */
4967 for (i
= 1; i
< n_ops
; i
++)
4968 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4969 mode
, result
, ops
[i
].op
);
4974 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4976 plus_minus_operand_p (const_rtx x
)
4978 return GET_CODE (x
) == PLUS
4979 || GET_CODE (x
) == MINUS
4980 || (GET_CODE (x
) == CONST
4981 && GET_CODE (XEXP (x
, 0)) == PLUS
4982 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4983 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4986 /* Like simplify_binary_operation except used for relational operators.
4987 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4988 not also be VOIDmode.
4990 CMP_MODE specifies in which mode the comparison is done in, so it is
4991 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4992 the operands or, if both are VOIDmode, the operands are compared in
4993 "infinite precision". */
4995 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4996 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4998 rtx tem
, trueop0
, trueop1
;
5000 if (cmp_mode
== VOIDmode
)
5001 cmp_mode
= GET_MODE (op0
);
5002 if (cmp_mode
== VOIDmode
)
5003 cmp_mode
= GET_MODE (op1
);
5005 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
5008 if (SCALAR_FLOAT_MODE_P (mode
))
5010 if (tem
== const0_rtx
)
5011 return CONST0_RTX (mode
);
5012 #ifdef FLOAT_STORE_FLAG_VALUE
5014 REAL_VALUE_TYPE val
;
5015 val
= FLOAT_STORE_FLAG_VALUE (mode
);
5016 return const_double_from_real_value (val
, mode
);
5022 if (VECTOR_MODE_P (mode
))
5024 if (tem
== const0_rtx
)
5025 return CONST0_RTX (mode
);
5026 #ifdef VECTOR_STORE_FLAG_VALUE
5028 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
5029 if (val
== NULL_RTX
)
5031 if (val
== const1_rtx
)
5032 return CONST1_RTX (mode
);
5034 return gen_const_vec_duplicate (mode
, val
);
5044 /* For the following tests, ensure const0_rtx is op1. */
5045 if (swap_commutative_operands_p (op0
, op1
)
5046 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
5047 std::swap (op0
, op1
), code
= swap_condition (code
);
5049 /* If op0 is a compare, extract the comparison arguments from it. */
5050 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5051 return simplify_gen_relational (code
, mode
, VOIDmode
,
5052 XEXP (op0
, 0), XEXP (op0
, 1));
5054 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
5058 trueop0
= avoid_constant_pool_reference (op0
);
5059 trueop1
= avoid_constant_pool_reference (op1
);
5060 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
5064 /* This part of simplify_relational_operation is only used when CMP_MODE
5065 is not in class MODE_CC (i.e. it is a real comparison).
5067 MODE is the mode of the result, while CMP_MODE specifies in which
5068 mode the comparison is done in, so it is the mode of the operands. */
5071 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
5072 machine_mode cmp_mode
, rtx op0
, rtx op1
)
5074 enum rtx_code op0code
= GET_CODE (op0
);
5076 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
5078 /* If op0 is a comparison, extract the comparison arguments
5082 if (GET_MODE (op0
) == mode
)
5083 return simplify_rtx (op0
);
5085 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
5086 XEXP (op0
, 0), XEXP (op0
, 1));
5088 else if (code
== EQ
)
5090 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
5091 if (new_code
!= UNKNOWN
)
5092 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
5093 XEXP (op0
, 0), XEXP (op0
, 1));
5097 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5098 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
5099 if ((code
== LTU
|| code
== GEU
)
5100 && GET_CODE (op0
) == PLUS
5101 && CONST_INT_P (XEXP (op0
, 1))
5102 && (rtx_equal_p (op1
, XEXP (op0
, 0))
5103 || rtx_equal_p (op1
, XEXP (op0
, 1)))
5104 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5105 && XEXP (op0
, 1) != const0_rtx
)
5108 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5109 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
5110 cmp_mode
, XEXP (op0
, 0), new_cmp
);
5113 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5114 transformed into (LTU a -C). */
5115 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
5116 && CONST_INT_P (XEXP (op0
, 1))
5117 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
5118 && XEXP (op0
, 1) != const0_rtx
)
5121 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5122 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
5123 XEXP (op0
, 0), new_cmp
);
5126 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5127 if ((code
== LTU
|| code
== GEU
)
5128 && GET_CODE (op0
) == PLUS
5129 && rtx_equal_p (op1
, XEXP (op0
, 1))
5130 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5131 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
5132 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
5133 copy_rtx (XEXP (op0
, 0)));
5135 if (op1
== const0_rtx
)
5137 /* Canonicalize (GTU x 0) as (NE x 0). */
5139 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
5140 /* Canonicalize (LEU x 0) as (EQ x 0). */
5142 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
5144 else if (op1
== const1_rtx
)
5149 /* Canonicalize (GE x 1) as (GT x 0). */
5150 return simplify_gen_relational (GT
, mode
, cmp_mode
,
5153 /* Canonicalize (GEU x 1) as (NE x 0). */
5154 return simplify_gen_relational (NE
, mode
, cmp_mode
,
5157 /* Canonicalize (LT x 1) as (LE x 0). */
5158 return simplify_gen_relational (LE
, mode
, cmp_mode
,
5161 /* Canonicalize (LTU x 1) as (EQ x 0). */
5162 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
5168 else if (op1
== constm1_rtx
)
5170 /* Canonicalize (LE x -1) as (LT x 0). */
5172 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
5173 /* Canonicalize (GT x -1) as (GE x 0). */
5175 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
5178 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5179 if ((code
== EQ
|| code
== NE
)
5180 && (op0code
== PLUS
|| op0code
== MINUS
)
5182 && CONSTANT_P (XEXP (op0
, 1))
5183 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
5185 rtx x
= XEXP (op0
, 0);
5186 rtx c
= XEXP (op0
, 1);
5187 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
5188 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
5190 /* Detect an infinite recursive condition, where we oscillate at this
5191 simplification case between:
5192 A + B == C <---> C - B == A,
5193 where A, B, and C are all constants with non-simplifiable expressions,
5194 usually SYMBOL_REFs. */
5195 if (GET_CODE (tem
) == invcode
5197 && rtx_equal_p (c
, XEXP (tem
, 1)))
5200 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
5203 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5204 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5205 scalar_int_mode int_mode
, int_cmp_mode
;
5207 && op1
== const0_rtx
5208 && is_int_mode (mode
, &int_mode
)
5209 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
5210 /* ??? Work-around BImode bugs in the ia64 backend. */
5211 && int_mode
!= BImode
5212 && int_cmp_mode
!= BImode
5213 && nonzero_bits (op0
, int_cmp_mode
) == 1
5214 && STORE_FLAG_VALUE
== 1)
5215 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
5216 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
5217 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
5219 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5220 if ((code
== EQ
|| code
== NE
)
5221 && op1
== const0_rtx
5223 return simplify_gen_relational (code
, mode
, cmp_mode
,
5224 XEXP (op0
, 0), XEXP (op0
, 1));
5226 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5227 if ((code
== EQ
|| code
== NE
)
5229 && rtx_equal_p (XEXP (op0
, 0), op1
)
5230 && !side_effects_p (XEXP (op0
, 0)))
5231 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
5234 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5235 if ((code
== EQ
|| code
== NE
)
5237 && rtx_equal_p (XEXP (op0
, 1), op1
)
5238 && !side_effects_p (XEXP (op0
, 1)))
5239 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5242 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5243 if ((code
== EQ
|| code
== NE
)
5245 && CONST_SCALAR_INT_P (op1
)
5246 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
5247 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5248 simplify_gen_binary (XOR
, cmp_mode
,
5249 XEXP (op0
, 1), op1
));
5251 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5252 constant folding if x/y is a constant. */
5253 if ((code
== EQ
|| code
== NE
)
5254 && (op0code
== AND
|| op0code
== IOR
)
5255 && !side_effects_p (op1
)
5256 && op1
!= CONST0_RTX (cmp_mode
))
5258 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5259 (eq/ne (and (not y) x) 0). */
5260 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 0), op1
))
5261 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 1), op1
)))
5263 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1),
5265 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
5267 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5268 CONST0_RTX (cmp_mode
));
5271 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5272 (eq/ne (and (not x) y) 0). */
5273 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 1), op1
))
5274 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 0), op1
)))
5276 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0),
5278 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
5280 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5281 CONST0_RTX (cmp_mode
));
5285 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5286 if ((code
== EQ
|| code
== NE
)
5287 && GET_CODE (op0
) == BSWAP
5288 && CONST_SCALAR_INT_P (op1
))
5289 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5290 simplify_gen_unary (BSWAP
, cmp_mode
,
5293 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5294 if ((code
== EQ
|| code
== NE
)
5295 && GET_CODE (op0
) == BSWAP
5296 && GET_CODE (op1
) == BSWAP
)
5297 return simplify_gen_relational (code
, mode
, cmp_mode
,
5298 XEXP (op0
, 0), XEXP (op1
, 0));
5300 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
5306 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5307 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
5308 XEXP (op0
, 0), const0_rtx
);
5313 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5314 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
5315 XEXP (op0
, 0), const0_rtx
);
5334 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5335 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5336 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5337 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5338 For floating-point comparisons, assume that the operands were ordered. */
5341 comparison_result (enum rtx_code code
, int known_results
)
5347 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
5350 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
5354 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
5357 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
5361 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
5364 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5367 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5369 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5372 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5374 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5377 return const_true_rtx
;
5385 /* Check if the given comparison (done in the given MODE) is actually
5386 a tautology or a contradiction. If the mode is VOID_mode, the
5387 comparison is done in "infinite precision". If no simplification
5388 is possible, this function returns zero. Otherwise, it returns
5389 either const_true_rtx or const0_rtx. */
5392 simplify_const_relational_operation (enum rtx_code code
,
5400 gcc_assert (mode
!= VOIDmode
5401 || (GET_MODE (op0
) == VOIDmode
5402 && GET_MODE (op1
) == VOIDmode
));
5404 /* If op0 is a compare, extract the comparison arguments from it. */
5405 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5407 op1
= XEXP (op0
, 1);
5408 op0
= XEXP (op0
, 0);
5410 if (GET_MODE (op0
) != VOIDmode
)
5411 mode
= GET_MODE (op0
);
5412 else if (GET_MODE (op1
) != VOIDmode
)
5413 mode
= GET_MODE (op1
);
5418 /* We can't simplify MODE_CC values since we don't know what the
5419 actual comparison is. */
5420 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5423 /* Make sure the constant is second. */
5424 if (swap_commutative_operands_p (op0
, op1
))
5426 std::swap (op0
, op1
);
5427 code
= swap_condition (code
);
5430 trueop0
= avoid_constant_pool_reference (op0
);
5431 trueop1
= avoid_constant_pool_reference (op1
);
5433 /* For integer comparisons of A and B maybe we can simplify A - B and can
5434 then simplify a comparison of that with zero. If A and B are both either
5435 a register or a CONST_INT, this can't help; testing for these cases will
5436 prevent infinite recursion here and speed things up.
5438 We can only do this for EQ and NE comparisons as otherwise we may
5439 lose or introduce overflow which we cannot disregard as undefined as
5440 we do not know the signedness of the operation on either the left or
5441 the right hand side of the comparison. */
5443 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5444 && (code
== EQ
|| code
== NE
)
5445 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5446 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5447 && (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
)) != 0
5448 /* We cannot do this if tem is a nonzero address. */
5449 && ! nonzero_address_p (tem
))
5450 return simplify_const_relational_operation (signed_condition (code
),
5451 mode
, tem
, const0_rtx
);
5453 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5454 return const_true_rtx
;
5456 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5459 /* For modes without NaNs, if the two operands are equal, we know the
5460 result except if they have side-effects. Even with NaNs we know
5461 the result of unordered comparisons and, if signaling NaNs are
5462 irrelevant, also the result of LT/GT/LTGT. */
5463 if ((! HONOR_NANS (trueop0
)
5464 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5465 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5466 && ! HONOR_SNANS (trueop0
)))
5467 && rtx_equal_p (trueop0
, trueop1
)
5468 && ! side_effects_p (trueop0
))
5469 return comparison_result (code
, CMP_EQ
);
5471 /* If the operands are floating-point constants, see if we can fold
5473 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5474 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5475 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5477 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5478 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5480 /* Comparisons are unordered iff at least one of the values is NaN. */
5481 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5491 return const_true_rtx
;
5504 return comparison_result (code
,
5505 (real_equal (d0
, d1
) ? CMP_EQ
:
5506 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5509 /* Otherwise, see if the operands are both integers. */
5510 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5511 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5513 /* It would be nice if we really had a mode here. However, the
5514 largest int representable on the target is as good as
5516 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5517 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5518 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5520 if (wi::eq_p (ptrueop0
, ptrueop1
))
5521 return comparison_result (code
, CMP_EQ
);
5524 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5525 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5526 return comparison_result (code
, cr
);
5530 /* Optimize comparisons with upper and lower bounds. */
5531 scalar_int_mode int_mode
;
5532 if (CONST_INT_P (trueop1
)
5533 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5534 && HWI_COMPUTABLE_MODE_P (int_mode
)
5535 && !side_effects_p (trueop0
))
5538 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
5539 HOST_WIDE_INT val
= INTVAL (trueop1
);
5540 HOST_WIDE_INT mmin
, mmax
;
5550 /* Get a reduced range if the sign bit is zero. */
5551 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
5558 rtx mmin_rtx
, mmax_rtx
;
5559 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
5561 mmin
= INTVAL (mmin_rtx
);
5562 mmax
= INTVAL (mmax_rtx
);
5565 unsigned int sign_copies
5566 = num_sign_bit_copies (trueop0
, int_mode
);
5568 mmin
>>= (sign_copies
- 1);
5569 mmax
>>= (sign_copies
- 1);
5575 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5577 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5578 return const_true_rtx
;
5579 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5584 return const_true_rtx
;
5589 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5591 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5592 return const_true_rtx
;
5593 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5598 return const_true_rtx
;
5604 /* x == y is always false for y out of range. */
5605 if (val
< mmin
|| val
> mmax
)
5609 /* x > y is always false for y >= mmax, always true for y < mmin. */
5611 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5613 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5614 return const_true_rtx
;
5620 return const_true_rtx
;
5623 /* x < y is always false for y <= mmin, always true for y > mmax. */
5625 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5627 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5628 return const_true_rtx
;
5634 return const_true_rtx
;
5638 /* x != y is always true for y out of range. */
5639 if (val
< mmin
|| val
> mmax
)
5640 return const_true_rtx
;
5648 /* Optimize integer comparisons with zero. */
5649 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5650 && trueop1
== const0_rtx
5651 && !side_effects_p (trueop0
))
5653 /* Some addresses are known to be nonzero. We don't know
5654 their sign, but equality comparisons are known. */
5655 if (nonzero_address_p (trueop0
))
5657 if (code
== EQ
|| code
== LEU
)
5659 if (code
== NE
|| code
== GTU
)
5660 return const_true_rtx
;
5663 /* See if the first operand is an IOR with a constant. If so, we
5664 may be able to determine the result of this comparison. */
5665 if (GET_CODE (op0
) == IOR
)
5667 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5668 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5670 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
5671 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5672 && (UINTVAL (inner_const
)
5683 return const_true_rtx
;
5687 return const_true_rtx
;
5701 /* Optimize comparison of ABS with zero. */
5702 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5703 && (GET_CODE (trueop0
) == ABS
5704 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5705 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5710 /* Optimize abs(x) < 0.0. */
5711 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5716 /* Optimize abs(x) >= 0.0. */
5717 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5718 return const_true_rtx
;
5722 /* Optimize ! (abs(x) < 0.0). */
5723 return const_true_rtx
;
5733 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5734 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5735 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5736 can be simplified to that or NULL_RTX if not.
5737 Assume X is compared against zero with CMP_CODE and the true
5738 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5741 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5743 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5746 /* Result on X == 0 and X !=0 respectively. */
5747 rtx on_zero
, on_nonzero
;
5751 on_nonzero
= false_val
;
5755 on_zero
= false_val
;
5756 on_nonzero
= true_val
;
5759 rtx_code op_code
= GET_CODE (on_nonzero
);
5760 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5761 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5762 || !CONST_INT_P (on_zero
))
5765 HOST_WIDE_INT op_val
;
5766 scalar_int_mode mode ATTRIBUTE_UNUSED
5767 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
5768 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
5769 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
5770 && op_val
== INTVAL (on_zero
))
5776 /* Try to simplify X given that it appears within operand OP of a
5777 VEC_MERGE operation whose mask is MASK. X need not use the same
5778 vector mode as the VEC_MERGE, but it must have the same number of
5781 Return the simplified X on success, otherwise return NULL_RTX. */
5784 simplify_merge_mask (rtx x
, rtx mask
, int op
)
5786 gcc_assert (VECTOR_MODE_P (GET_MODE (x
)));
5787 poly_uint64 nunits
= GET_MODE_NUNITS (GET_MODE (x
));
5788 if (GET_CODE (x
) == VEC_MERGE
&& rtx_equal_p (XEXP (x
, 2), mask
))
5790 if (side_effects_p (XEXP (x
, 1 - op
)))
5793 return XEXP (x
, op
);
5796 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5797 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
))
5799 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5801 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), top0
,
5802 GET_MODE (XEXP (x
, 0)));
5805 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5806 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
5807 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
5808 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
))
5810 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5811 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
5814 if (COMPARISON_P (x
))
5815 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
5816 GET_MODE (XEXP (x
, 0)) != VOIDmode
5817 ? GET_MODE (XEXP (x
, 0))
5818 : GET_MODE (XEXP (x
, 1)),
5819 top0
? top0
: XEXP (x
, 0),
5820 top1
? top1
: XEXP (x
, 1));
5822 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
5823 top0
? top0
: XEXP (x
, 0),
5824 top1
? top1
: XEXP (x
, 1));
5827 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_TERNARY
5828 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5829 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
5830 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
5831 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
)
5832 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 2)))
5833 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 2))), nunits
))
5835 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5836 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
5837 rtx top2
= simplify_merge_mask (XEXP (x
, 2), mask
, op
);
5838 if (top0
|| top1
|| top2
)
5839 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
5840 GET_MODE (XEXP (x
, 0)),
5841 top0
? top0
: XEXP (x
, 0),
5842 top1
? top1
: XEXP (x
, 1),
5843 top2
? top2
: XEXP (x
, 2));
5849 /* Simplify CODE, an operation with result mode MODE and three operands,
5850 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5851 a constant. Return 0 if no simplifications is possible. */
5854 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5855 machine_mode op0_mode
, rtx op0
, rtx op1
,
5858 bool any_change
= false;
5860 scalar_int_mode int_mode
, int_op0_mode
;
5861 unsigned int n_elts
;
5866 /* Simplify negations around the multiplication. */
5867 /* -a * -b + c => a * b + c. */
5868 if (GET_CODE (op0
) == NEG
)
5870 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5872 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5874 else if (GET_CODE (op1
) == NEG
)
5876 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5878 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5881 /* Canonicalize the two multiplication operands. */
5882 /* a * -b + c => -b * a + c. */
5883 if (swap_commutative_operands_p (op0
, op1
))
5884 std::swap (op0
, op1
), any_change
= true;
5887 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5892 if (CONST_INT_P (op0
)
5893 && CONST_INT_P (op1
)
5894 && CONST_INT_P (op2
)
5895 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5896 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
5897 && HWI_COMPUTABLE_MODE_P (int_mode
))
5899 /* Extracting a bit-field from a constant */
5900 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5901 HOST_WIDE_INT op1val
= INTVAL (op1
);
5902 HOST_WIDE_INT op2val
= INTVAL (op2
);
5903 if (!BITS_BIG_ENDIAN
)
5905 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
5906 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
5908 /* Not enough information to calculate the bit position. */
5911 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5913 /* First zero-extend. */
5914 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5915 /* If desired, propagate sign bit. */
5916 if (code
== SIGN_EXTRACT
5917 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5919 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
5922 return gen_int_mode (val
, int_mode
);
5927 if (CONST_INT_P (op0
))
5928 return op0
!= const0_rtx
? op1
: op2
;
5930 /* Convert c ? a : a into "a". */
5931 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5934 /* Convert a != b ? a : b into "a". */
5935 if (GET_CODE (op0
) == NE
5936 && ! side_effects_p (op0
)
5937 && ! HONOR_NANS (mode
)
5938 && ! HONOR_SIGNED_ZEROS (mode
)
5939 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5940 && rtx_equal_p (XEXP (op0
, 1), op2
))
5941 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5942 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5945 /* Convert a == b ? a : b into "b". */
5946 if (GET_CODE (op0
) == EQ
5947 && ! side_effects_p (op0
)
5948 && ! HONOR_NANS (mode
)
5949 && ! HONOR_SIGNED_ZEROS (mode
)
5950 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5951 && rtx_equal_p (XEXP (op0
, 1), op2
))
5952 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5953 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5956 /* Convert (!c) != {0,...,0} ? a : b into
5957 c != {0,...,0} ? b : a for vector modes. */
5958 if (VECTOR_MODE_P (GET_MODE (op1
))
5959 && GET_CODE (op0
) == NE
5960 && GET_CODE (XEXP (op0
, 0)) == NOT
5961 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5963 rtx cv
= XEXP (op0
, 1);
5966 if (!CONST_VECTOR_NUNITS (cv
).is_constant (&nunits
))
5969 for (int i
= 0; i
< nunits
; ++i
)
5970 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5977 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5978 XEXP (XEXP (op0
, 0), 0),
5980 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5985 /* Convert x == 0 ? N : clz (x) into clz (x) when
5986 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5987 Similarly for ctz (x). */
5988 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
5989 && XEXP (op0
, 1) == const0_rtx
)
5992 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
5998 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
6000 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
6001 ? GET_MODE (XEXP (op0
, 1))
6002 : GET_MODE (XEXP (op0
, 0)));
6005 /* Look for happy constants in op1 and op2. */
6006 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
6008 HOST_WIDE_INT t
= INTVAL (op1
);
6009 HOST_WIDE_INT f
= INTVAL (op2
);
6011 if (t
== STORE_FLAG_VALUE
&& f
== 0)
6012 code
= GET_CODE (op0
);
6013 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
6016 tmp
= reversed_comparison_code (op0
, NULL
);
6024 return simplify_gen_relational (code
, mode
, cmp_mode
,
6025 XEXP (op0
, 0), XEXP (op0
, 1));
6028 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
6029 cmp_mode
, XEXP (op0
, 0),
6032 /* See if any simplifications were possible. */
6035 if (CONST_INT_P (temp
))
6036 return temp
== const0_rtx
? op2
: op1
;
6038 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
6044 gcc_assert (GET_MODE (op0
) == mode
);
6045 gcc_assert (GET_MODE (op1
) == mode
);
6046 gcc_assert (VECTOR_MODE_P (mode
));
6047 trueop2
= avoid_constant_pool_reference (op2
);
6048 if (CONST_INT_P (trueop2
)
6049 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
6051 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
6052 unsigned HOST_WIDE_INT mask
;
6053 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
6056 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
6058 if (!(sel
& mask
) && !side_effects_p (op0
))
6060 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
6063 rtx trueop0
= avoid_constant_pool_reference (op0
);
6064 rtx trueop1
= avoid_constant_pool_reference (op1
);
6065 if (GET_CODE (trueop0
) == CONST_VECTOR
6066 && GET_CODE (trueop1
) == CONST_VECTOR
)
6068 rtvec v
= rtvec_alloc (n_elts
);
6071 for (i
= 0; i
< n_elts
; i
++)
6072 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
6073 ? CONST_VECTOR_ELT (trueop0
, i
)
6074 : CONST_VECTOR_ELT (trueop1
, i
));
6075 return gen_rtx_CONST_VECTOR (mode
, v
);
6078 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6079 if no element from a appears in the result. */
6080 if (GET_CODE (op0
) == VEC_MERGE
)
6082 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
6083 if (CONST_INT_P (tem
))
6085 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
6086 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
6087 return simplify_gen_ternary (code
, mode
, mode
,
6088 XEXP (op0
, 1), op1
, op2
);
6089 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
6090 return simplify_gen_ternary (code
, mode
, mode
,
6091 XEXP (op0
, 0), op1
, op2
);
6094 if (GET_CODE (op1
) == VEC_MERGE
)
6096 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
6097 if (CONST_INT_P (tem
))
6099 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
6100 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
6101 return simplify_gen_ternary (code
, mode
, mode
,
6102 op0
, XEXP (op1
, 1), op2
);
6103 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
6104 return simplify_gen_ternary (code
, mode
, mode
,
6105 op0
, XEXP (op1
, 0), op2
);
6109 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6111 if (GET_CODE (op0
) == VEC_DUPLICATE
6112 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
6113 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
6114 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0
, 0))), 1))
6116 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
6117 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
6119 if (XEXP (XEXP (op0
, 0), 0) == op1
6120 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
6124 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6126 with (vec_concat (X) (B)) if N == 1 or
6127 (vec_concat (A) (X)) if N == 2. */
6128 if (GET_CODE (op0
) == VEC_DUPLICATE
6129 && GET_CODE (op1
) == CONST_VECTOR
6130 && known_eq (CONST_VECTOR_NUNITS (op1
), 2)
6131 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6132 && IN_RANGE (sel
, 1, 2))
6134 rtx newop0
= XEXP (op0
, 0);
6135 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
6137 std::swap (newop0
, newop1
);
6138 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6140 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6141 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6142 Only applies for vectors of two elements. */
6143 if (GET_CODE (op0
) == VEC_DUPLICATE
6144 && GET_CODE (op1
) == VEC_CONCAT
6145 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6146 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6147 && IN_RANGE (sel
, 1, 2))
6149 rtx newop0
= XEXP (op0
, 0);
6150 rtx newop1
= XEXP (op1
, 2 - sel
);
6151 rtx otherop
= XEXP (op1
, sel
- 1);
6153 std::swap (newop0
, newop1
);
6154 /* Don't want to throw away the other part of the vec_concat if
6155 it has side-effects. */
6156 if (!side_effects_p (otherop
))
6157 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6162 (vec_merge:outer (vec_duplicate:outer x:inner)
6163 (subreg:outer y:inner 0)
6166 with (vec_concat:outer x:inner y:inner) if N == 1,
6167 or (vec_concat:outer y:inner x:inner) if N == 2.
6169 Implicitly, this means we have a paradoxical subreg, but such
6170 a check is cheap, so make it anyway.
6172 Only applies for vectors of two elements. */
6173 if (GET_CODE (op0
) == VEC_DUPLICATE
6174 && GET_CODE (op1
) == SUBREG
6175 && GET_MODE (op1
) == GET_MODE (op0
)
6176 && GET_MODE (SUBREG_REG (op1
)) == GET_MODE (XEXP (op0
, 0))
6177 && paradoxical_subreg_p (op1
)
6178 && subreg_lowpart_p (op1
)
6179 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6180 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6181 && IN_RANGE (sel
, 1, 2))
6183 rtx newop0
= XEXP (op0
, 0);
6184 rtx newop1
= SUBREG_REG (op1
);
6186 std::swap (newop0
, newop1
);
6187 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6190 /* Same as above but with switched operands:
6191 Replace (vec_merge:outer (subreg:outer x:inner 0)
6192 (vec_duplicate:outer y:inner)
6195 with (vec_concat:outer x:inner y:inner) if N == 1,
6196 or (vec_concat:outer y:inner x:inner) if N == 2. */
6197 if (GET_CODE (op1
) == VEC_DUPLICATE
6198 && GET_CODE (op0
) == SUBREG
6199 && GET_MODE (op0
) == GET_MODE (op1
)
6200 && GET_MODE (SUBREG_REG (op0
)) == GET_MODE (XEXP (op1
, 0))
6201 && paradoxical_subreg_p (op0
)
6202 && subreg_lowpart_p (op0
)
6203 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6204 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6205 && IN_RANGE (sel
, 1, 2))
6207 rtx newop0
= SUBREG_REG (op0
);
6208 rtx newop1
= XEXP (op1
, 0);
6210 std::swap (newop0
, newop1
);
6211 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6214 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6216 with (vec_concat x y) or (vec_concat y x) depending on value
6218 if (GET_CODE (op0
) == VEC_DUPLICATE
6219 && GET_CODE (op1
) == VEC_DUPLICATE
6220 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6221 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6222 && IN_RANGE (sel
, 1, 2))
6224 rtx newop0
= XEXP (op0
, 0);
6225 rtx newop1
= XEXP (op1
, 0);
6227 std::swap (newop0
, newop1
);
6229 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6233 if (rtx_equal_p (op0
, op1
)
6234 && !side_effects_p (op2
) && !side_effects_p (op1
))
6237 if (!side_effects_p (op2
))
6240 = may_trap_p (op0
) ? NULL_RTX
: simplify_merge_mask (op0
, op2
, 0);
6242 = may_trap_p (op1
) ? NULL_RTX
: simplify_merge_mask (op1
, op2
, 1);
6244 return simplify_gen_ternary (code
, mode
, mode
,
6246 top1
? top1
: op1
, op2
);
6258 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6259 starting at byte FIRST_BYTE. Return true on success and add the
6260 bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6261 that the bytes follow target memory order. Leave BYTES unmodified
6264 MODE is the mode of X. The caller must reserve NUM_BYTES bytes in
6265 BYTES before calling this function. */
6268 native_encode_rtx (machine_mode mode
, rtx x
, vec
<target_unit
> &bytes
,
6269 unsigned int first_byte
, unsigned int num_bytes
)
6271 /* Check the mode is sensible. */
6272 gcc_assert (GET_MODE (x
) == VOIDmode
6273 ? is_a
<scalar_int_mode
> (mode
)
6274 : mode
== GET_MODE (x
));
6276 if (GET_CODE (x
) == CONST_VECTOR
)
6278 /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6279 is necessary. The only complication is that MODE_VECTOR_BOOL
6280 vectors can have several elements per byte. */
6281 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6282 GET_MODE_NUNITS (mode
));
6283 unsigned int elt
= first_byte
* BITS_PER_UNIT
/ elt_bits
;
6284 if (elt_bits
< BITS_PER_UNIT
)
6286 /* This is the only case in which elements can be smaller than
6288 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
6289 for (unsigned int i
= 0; i
< num_bytes
; ++i
)
6291 target_unit value
= 0;
6292 for (unsigned int j
= 0; j
< BITS_PER_UNIT
; j
+= elt_bits
)
6294 value
|= (INTVAL (CONST_VECTOR_ELT (x
, elt
)) & 1) << j
;
6297 bytes
.quick_push (value
);
6302 unsigned int start
= bytes
.length ();
6303 unsigned int elt_bytes
= GET_MODE_UNIT_SIZE (mode
);
6304 /* Make FIRST_BYTE relative to ELT. */
6305 first_byte
%= elt_bytes
;
6306 while (num_bytes
> 0)
6308 /* Work out how many bytes we want from element ELT. */
6309 unsigned int chunk_bytes
= MIN (num_bytes
, elt_bytes
- first_byte
);
6310 if (!native_encode_rtx (GET_MODE_INNER (mode
),
6311 CONST_VECTOR_ELT (x
, elt
), bytes
,
6312 first_byte
, chunk_bytes
))
6314 bytes
.truncate (start
);
6319 num_bytes
-= chunk_bytes
;
6324 /* All subsequent cases are limited to scalars. */
6326 if (!is_a
<scalar_mode
> (mode
, &smode
))
6329 /* Make sure that the region is in range. */
6330 unsigned int end_byte
= first_byte
+ num_bytes
;
6331 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
6332 gcc_assert (end_byte
<= mode_bytes
);
6334 if (CONST_SCALAR_INT_P (x
))
6336 /* The target memory layout is affected by both BYTES_BIG_ENDIAN
6337 and WORDS_BIG_ENDIAN. Use the subreg machinery to get the lsb
6338 position of each byte. */
6339 rtx_mode_t
value (x
, smode
);
6340 wide_int_ref
value_wi (value
);
6341 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6343 /* Always constant because the inputs are. */
6345 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6346 /* Operate directly on the encoding rather than using
6347 wi::extract_uhwi, so that we preserve the sign or zero
6348 extension for modes that are not a whole number of bits in
6349 size. (Zero extension is only used for the combination of
6350 innermode == BImode && STORE_FLAG_VALUE == 1). */
6351 unsigned int elt
= lsb
/ HOST_BITS_PER_WIDE_INT
;
6352 unsigned int shift
= lsb
% HOST_BITS_PER_WIDE_INT
;
6353 unsigned HOST_WIDE_INT uhwi
= value_wi
.elt (elt
);
6354 bytes
.quick_push (uhwi
>> shift
);
6359 if (CONST_DOUBLE_P (x
))
6361 /* real_to_target produces an array of integers in target memory order.
6362 All integers before the last one have 32 bits; the last one may
6363 have 32 bits or fewer, depending on whether the mode bitsize
6364 is divisible by 32. Each of these integers is then laid out
6365 in target memory as any other integer would be. */
6366 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6367 real_to_target (el32
, CONST_DOUBLE_REAL_VALUE (x
), smode
);
6369 /* The (maximum) number of target bytes per element of el32. */
6370 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
6371 gcc_assert (bytes_per_el32
!= 0);
6373 /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
6375 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6377 unsigned int index
= byte
/ bytes_per_el32
;
6378 unsigned int subbyte
= byte
% bytes_per_el32
;
6379 unsigned int int_bytes
= MIN (bytes_per_el32
,
6380 mode_bytes
- index
* bytes_per_el32
);
6381 /* Always constant because the inputs are. */
6383 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
6384 bytes
.quick_push ((unsigned long) el32
[index
] >> lsb
);
6389 if (GET_CODE (x
) == CONST_FIXED
)
6391 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6393 /* Always constant because the inputs are. */
6395 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6396 unsigned HOST_WIDE_INT piece
= CONST_FIXED_VALUE_LOW (x
);
6397 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
6399 lsb
-= HOST_BITS_PER_WIDE_INT
;
6400 piece
= CONST_FIXED_VALUE_HIGH (x
);
6402 bytes
.quick_push (piece
>> lsb
);
6410 /* Read a vector of mode MODE from the target memory image given by BYTES,
6411 starting at byte FIRST_BYTE. The vector is known to be encodable using
6412 NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
6413 and BYTES is known to have enough bytes to supply NPATTERNS *
6414 NELTS_PER_PATTERN vector elements. Each element of BYTES contains
6415 BITS_PER_UNIT bits and the bytes are in target memory order.
6417 Return the vector on success, otherwise return NULL_RTX. */
6420 native_decode_vector_rtx (machine_mode mode
, vec
<target_unit
> bytes
,
6421 unsigned int first_byte
, unsigned int npatterns
,
6422 unsigned int nelts_per_pattern
)
6424 rtx_vector_builder
builder (mode
, npatterns
, nelts_per_pattern
);
6426 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6427 GET_MODE_NUNITS (mode
));
6428 if (elt_bits
< BITS_PER_UNIT
)
6430 /* This is the only case in which elements can be smaller than a byte.
6431 Element 0 is always in the lsb of the containing byte. */
6432 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
6433 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
6435 unsigned int bit_index
= first_byte
* BITS_PER_UNIT
+ i
* elt_bits
;
6436 unsigned int byte_index
= bit_index
/ BITS_PER_UNIT
;
6437 unsigned int lsb
= bit_index
% BITS_PER_UNIT
;
6438 builder
.quick_push (bytes
[byte_index
] & (1 << lsb
)
6439 ? CONST1_RTX (BImode
)
6440 : CONST0_RTX (BImode
));
6445 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
6447 rtx x
= native_decode_rtx (GET_MODE_INNER (mode
), bytes
, first_byte
);
6450 builder
.quick_push (x
);
6451 first_byte
+= elt_bits
/ BITS_PER_UNIT
;
6454 return builder
.build ();
6457 /* Read an rtx of mode MODE from the target memory image given by BYTES,
6458 starting at byte FIRST_BYTE. Each element of BYTES contains BITS_PER_UNIT
6459 bits and the bytes are in target memory order. The image has enough
6460 values to specify all bytes of MODE.
6462 Return the rtx on success, otherwise return NULL_RTX. */
6465 native_decode_rtx (machine_mode mode
, vec
<target_unit
> bytes
,
6466 unsigned int first_byte
)
6468 if (VECTOR_MODE_P (mode
))
6470 /* If we know at compile time how many elements there are,
6471 pull each element directly from BYTES. */
6473 if (GET_MODE_NUNITS (mode
).is_constant (&nelts
))
6474 return native_decode_vector_rtx (mode
, bytes
, first_byte
, nelts
, 1);
6478 scalar_int_mode imode
;
6479 if (is_a
<scalar_int_mode
> (mode
, &imode
)
6480 && GET_MODE_PRECISION (imode
) <= MAX_BITSIZE_MODE_ANY_INT
)
6482 /* Pull the bytes msb first, so that we can use simple
6483 shift-and-insert wide_int operations. */
6484 unsigned int size
= GET_MODE_SIZE (imode
);
6485 wide_int
result (wi::zero (GET_MODE_PRECISION (imode
)));
6486 for (unsigned int i
= 0; i
< size
; ++i
)
6488 unsigned int lsb
= (size
- i
- 1) * BITS_PER_UNIT
;
6489 /* Always constant because the inputs are. */
6490 unsigned int subbyte
6491 = subreg_size_offset_from_lsb (1, size
, lsb
).to_constant ();
6492 result
<<= BITS_PER_UNIT
;
6493 result
|= bytes
[first_byte
+ subbyte
];
6495 return immed_wide_int_const (result
, imode
);
6498 scalar_float_mode fmode
;
6499 if (is_a
<scalar_float_mode
> (mode
, &fmode
))
6501 /* We need to build an array of integers in target memory order.
6502 All integers before the last one have 32 bits; the last one may
6503 have 32 bits or fewer, depending on whether the mode bitsize
6504 is divisible by 32. */
6505 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6506 unsigned int num_el32
= CEIL (GET_MODE_BITSIZE (fmode
), 32);
6507 memset (el32
, 0, num_el32
* sizeof (long));
6509 /* The (maximum) number of target bytes per element of el32. */
6510 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
6511 gcc_assert (bytes_per_el32
!= 0);
6513 unsigned int mode_bytes
= GET_MODE_SIZE (fmode
);
6514 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
6516 unsigned int index
= byte
/ bytes_per_el32
;
6517 unsigned int subbyte
= byte
% bytes_per_el32
;
6518 unsigned int int_bytes
= MIN (bytes_per_el32
,
6519 mode_bytes
- index
* bytes_per_el32
);
6520 /* Always constant because the inputs are. */
6522 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
6523 el32
[index
] |= (unsigned long) bytes
[first_byte
+ byte
] << lsb
;
6526 real_from_target (&r
, el32
, fmode
);
6527 return const_double_from_real_value (r
, fmode
);
6530 if (ALL_SCALAR_FIXED_POINT_MODE_P (mode
))
6532 scalar_mode smode
= as_a
<scalar_mode
> (mode
);
6538 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
6539 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
6541 /* Always constant because the inputs are. */
6543 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6544 unsigned HOST_WIDE_INT unit
= bytes
[first_byte
+ byte
];
6545 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
6546 f
.data
.high
|= unit
<< (lsb
- HOST_BITS_PER_WIDE_INT
);
6548 f
.data
.low
|= unit
<< lsb
;
6550 return CONST_FIXED_FROM_FIXED_VALUE (f
, mode
);
6556 /* Simplify a byte offset BYTE into CONST_VECTOR X. The main purpose
6557 is to convert a runtime BYTE value into a constant one. */
6560 simplify_const_vector_byte_offset (rtx x
, poly_uint64 byte
)
6562 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
6563 machine_mode mode
= GET_MODE (x
);
6564 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6565 GET_MODE_NUNITS (mode
));
6566 /* The number of bits needed to encode one element from each pattern. */
6567 unsigned int sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * elt_bits
;
6569 /* Identify the start point in terms of a sequence number and a byte offset
6570 within that sequence. */
6571 poly_uint64 first_sequence
;
6572 unsigned HOST_WIDE_INT subbit
;
6573 if (can_div_trunc_p (byte
* BITS_PER_UNIT
, sequence_bits
,
6574 &first_sequence
, &subbit
))
6576 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
6577 if (nelts_per_pattern
== 1)
6578 /* This is a duplicated vector, so the value of FIRST_SEQUENCE
6580 byte
= subbit
/ BITS_PER_UNIT
;
6581 else if (nelts_per_pattern
== 2 && known_gt (first_sequence
, 0U))
6583 /* The subreg drops the first element from each pattern and
6584 only uses the second element. Find the first sequence
6585 that starts on a byte boundary. */
6586 subbit
+= least_common_multiple (sequence_bits
, BITS_PER_UNIT
);
6587 byte
= subbit
/ BITS_PER_UNIT
;
6593 /* Subroutine of simplify_subreg in which:
6595 - X is known to be a CONST_VECTOR
6596 - OUTERMODE is known to be a vector mode
6598 Try to handle the subreg by operating on the CONST_VECTOR encoding
6599 rather than on each individual element of the CONST_VECTOR.
6601 Return the simplified subreg on success, otherwise return NULL_RTX. */
6604 simplify_const_vector_subreg (machine_mode outermode
, rtx x
,
6605 machine_mode innermode
, unsigned int first_byte
)
6607 /* Paradoxical subregs of vectors have dubious semantics. */
6608 if (paradoxical_subreg_p (outermode
, innermode
))
6611 /* We can only preserve the semantics of a stepped pattern if the new
6612 vector element is the same as the original one. */
6613 if (CONST_VECTOR_STEPPED_P (x
)
6614 && GET_MODE_INNER (outermode
) != GET_MODE_INNER (innermode
))
6617 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
6618 unsigned int x_elt_bits
6619 = vector_element_size (GET_MODE_BITSIZE (innermode
),
6620 GET_MODE_NUNITS (innermode
));
6621 unsigned int out_elt_bits
6622 = vector_element_size (GET_MODE_BITSIZE (outermode
),
6623 GET_MODE_NUNITS (outermode
));
6625 /* The number of bits needed to encode one element from every pattern
6626 of the original vector. */
6627 unsigned int x_sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * x_elt_bits
;
6629 /* The number of bits needed to encode one element from every pattern
6631 unsigned int out_sequence_bits
6632 = least_common_multiple (x_sequence_bits
, out_elt_bits
);
6634 /* Work out the number of interleaved patterns in the output vector
6635 and the number of encoded elements per pattern. */
6636 unsigned int out_npatterns
= out_sequence_bits
/ out_elt_bits
;
6637 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
6639 /* The encoding scheme requires the number of elements to be a multiple
6640 of the number of patterns, so that each pattern appears at least once
6641 and so that the same number of elements appear from each pattern. */
6642 bool ok_p
= multiple_p (GET_MODE_NUNITS (outermode
), out_npatterns
);
6643 unsigned int const_nunits
;
6644 if (GET_MODE_NUNITS (outermode
).is_constant (&const_nunits
)
6645 && (!ok_p
|| out_npatterns
* nelts_per_pattern
> const_nunits
))
6647 /* Either the encoding is invalid, or applying it would give us
6648 more elements than we need. Just encode each element directly. */
6649 out_npatterns
= const_nunits
;
6650 nelts_per_pattern
= 1;
6655 /* Get enough bytes of X to form the new encoding. */
6656 unsigned int buffer_bits
= out_npatterns
* nelts_per_pattern
* out_elt_bits
;
6657 unsigned int buffer_bytes
= CEIL (buffer_bits
, BITS_PER_UNIT
);
6658 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
6659 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, buffer_bytes
))
6662 /* Reencode the bytes as OUTERMODE. */
6663 return native_decode_vector_rtx (outermode
, buffer
, 0, out_npatterns
,
6667 /* Try to simplify a subreg of a constant by encoding the subreg region
6668 as a sequence of target bytes and reading them back in the new mode.
6669 Return the new value on success, otherwise return null.
6671 The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
6672 and byte offset FIRST_BYTE. */
6675 simplify_immed_subreg (fixed_size_mode outermode
, rtx x
,
6676 machine_mode innermode
, unsigned int first_byte
)
6678 unsigned int buffer_bytes
= GET_MODE_SIZE (outermode
);
6679 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
6681 /* Some ports misuse CCmode. */
6682 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (x
))
6685 /* Paradoxical subregs read undefined values for bytes outside of the
6686 inner value. However, we have traditionally always sign-extended
6687 integer constants and zero-extended others. */
6688 unsigned int inner_bytes
= buffer_bytes
;
6689 if (paradoxical_subreg_p (outermode
, innermode
))
6691 if (!GET_MODE_SIZE (innermode
).is_constant (&inner_bytes
))
6694 target_unit filler
= 0;
6695 if (CONST_SCALAR_INT_P (x
) && wi::neg_p (rtx_mode_t (x
, innermode
)))
6698 /* Add any leading bytes due to big-endian layout. The number of
6699 bytes must be constant because both modes have constant size. */
6700 unsigned int leading_bytes
6701 = -byte_lowpart_offset (outermode
, innermode
).to_constant ();
6702 for (unsigned int i
= 0; i
< leading_bytes
; ++i
)
6703 buffer
.quick_push (filler
);
6705 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
6708 /* Add any trailing bytes due to little-endian layout. */
6709 while (buffer
.length () < buffer_bytes
)
6710 buffer
.quick_push (filler
);
6714 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
6717 return native_decode_rtx (outermode
, buffer
, 0);
6720 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6721 Return 0 if no simplifications are possible. */
6723 simplify_subreg (machine_mode outermode
, rtx op
,
6724 machine_mode innermode
, poly_uint64 byte
)
6726 /* Little bit of sanity checking. */
6727 gcc_assert (innermode
!= VOIDmode
);
6728 gcc_assert (outermode
!= VOIDmode
);
6729 gcc_assert (innermode
!= BLKmode
);
6730 gcc_assert (outermode
!= BLKmode
);
6732 gcc_assert (GET_MODE (op
) == innermode
6733 || GET_MODE (op
) == VOIDmode
);
6735 poly_uint64 outersize
= GET_MODE_SIZE (outermode
);
6736 if (!multiple_p (byte
, outersize
))
6739 poly_uint64 innersize
= GET_MODE_SIZE (innermode
);
6740 if (maybe_ge (byte
, innersize
))
6743 if (outermode
== innermode
&& known_eq (byte
, 0U))
6746 if (GET_CODE (op
) == CONST_VECTOR
)
6747 byte
= simplify_const_vector_byte_offset (op
, byte
);
6749 if (multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
)))
6753 if (VECTOR_MODE_P (outermode
)
6754 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
6755 && vec_duplicate_p (op
, &elt
))
6756 return gen_vec_duplicate (outermode
, elt
);
6758 if (outermode
== GET_MODE_INNER (innermode
)
6759 && vec_duplicate_p (op
, &elt
))
6763 if (CONST_SCALAR_INT_P (op
)
6764 || CONST_DOUBLE_AS_FLOAT_P (op
)
6765 || CONST_FIXED_P (op
)
6766 || GET_CODE (op
) == CONST_VECTOR
)
6768 unsigned HOST_WIDE_INT cbyte
;
6769 if (byte
.is_constant (&cbyte
))
6771 if (GET_CODE (op
) == CONST_VECTOR
&& VECTOR_MODE_P (outermode
))
6773 rtx tmp
= simplify_const_vector_subreg (outermode
, op
,
6779 fixed_size_mode fs_outermode
;
6780 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
))
6781 return simplify_immed_subreg (fs_outermode
, op
, innermode
, cbyte
);
6785 /* Changing mode twice with SUBREG => just change it once,
6786 or not at all if changing back op starting mode. */
6787 if (GET_CODE (op
) == SUBREG
)
6789 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6790 poly_uint64 innermostsize
= GET_MODE_SIZE (innermostmode
);
6793 if (outermode
== innermostmode
6794 && known_eq (byte
, 0U)
6795 && known_eq (SUBREG_BYTE (op
), 0))
6796 return SUBREG_REG (op
);
6798 /* Work out the memory offset of the final OUTERMODE value relative
6799 to the inner value of OP. */
6800 poly_int64 mem_offset
= subreg_memory_offset (outermode
,
6802 poly_int64 op_mem_offset
= subreg_memory_offset (op
);
6803 poly_int64 final_offset
= mem_offset
+ op_mem_offset
;
6805 /* See whether resulting subreg will be paradoxical. */
6806 if (!paradoxical_subreg_p (outermode
, innermostmode
))
6808 /* Bail out in case resulting subreg would be incorrect. */
6809 if (maybe_lt (final_offset
, 0)
6810 || maybe_ge (poly_uint64 (final_offset
), innermostsize
)
6811 || !multiple_p (final_offset
, outersize
))
6816 poly_int64 required_offset
= subreg_memory_offset (outermode
,
6818 if (maybe_ne (final_offset
, required_offset
))
6820 /* Paradoxical subregs always have byte offset 0. */
6824 /* Recurse for further possible simplifications. */
6825 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6829 if (validate_subreg (outermode
, innermostmode
,
6830 SUBREG_REG (op
), final_offset
))
6832 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6833 if (SUBREG_PROMOTED_VAR_P (op
)
6834 && SUBREG_PROMOTED_SIGN (op
) >= 0
6835 && GET_MODE_CLASS (outermode
) == MODE_INT
6836 && known_ge (outersize
, innersize
)
6837 && known_le (outersize
, innermostsize
)
6838 && subreg_lowpart_p (newx
))
6840 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6841 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6848 /* SUBREG of a hard register => just change the register number
6849 and/or mode. If the hard register is not valid in that mode,
6850 suppress this simplification. If the hard register is the stack,
6851 frame, or argument pointer, leave this as a SUBREG. */
6853 if (REG_P (op
) && HARD_REGISTER_P (op
))
6855 unsigned int regno
, final_regno
;
6858 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6859 if (HARD_REGISTER_NUM_P (final_regno
))
6861 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
6862 subreg_memory_offset (outermode
,
6865 /* Propagate original regno. We don't have any way to specify
6866 the offset inside original regno, so do so only for lowpart.
6867 The information is used only by alias analysis that cannot
6868 grog partial register anyway. */
6870 if (known_eq (subreg_lowpart_offset (outermode
, innermode
), byte
))
6871 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6876 /* If we have a SUBREG of a register that we are replacing and we are
6877 replacing it with a MEM, make a new MEM and try replacing the
6878 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6879 or if we would be widening it. */
6882 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6883 /* Allow splitting of volatile memory references in case we don't
6884 have instruction to move the whole thing. */
6885 && (! MEM_VOLATILE_P (op
)
6886 || ! have_insn_for (SET
, innermode
))
6887 && known_le (outersize
, innersize
))
6888 return adjust_address_nv (op
, outermode
, byte
);
6890 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6892 if (GET_CODE (op
) == CONCAT
6893 || GET_CODE (op
) == VEC_CONCAT
)
6895 poly_uint64 final_offset
;
6898 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6899 if (part_mode
== VOIDmode
)
6900 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6901 poly_uint64 part_size
= GET_MODE_SIZE (part_mode
);
6902 if (known_lt (byte
, part_size
))
6904 part
= XEXP (op
, 0);
6905 final_offset
= byte
;
6907 else if (known_ge (byte
, part_size
))
6909 part
= XEXP (op
, 1);
6910 final_offset
= byte
- part_size
;
6915 if (maybe_gt (final_offset
+ outersize
, part_size
))
6918 part_mode
= GET_MODE (part
);
6919 if (part_mode
== VOIDmode
)
6920 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6921 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
6924 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
6925 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6930 (subreg (vec_merge (X)
6932 (const_int ((1 << N) | M)))
6933 (N * sizeof (outermode)))
6935 (subreg (X) (N * sizeof (outermode)))
6938 if (constant_multiple_p (byte
, GET_MODE_SIZE (outermode
), &idx
)
6939 && idx
< HOST_BITS_PER_WIDE_INT
6940 && GET_CODE (op
) == VEC_MERGE
6941 && GET_MODE_INNER (innermode
) == outermode
6942 && CONST_INT_P (XEXP (op
, 2))
6943 && (UINTVAL (XEXP (op
, 2)) & (HOST_WIDE_INT_1U
<< idx
)) != 0)
6944 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
, byte
);
6946 /* A SUBREG resulting from a zero extension may fold to zero if
6947 it extracts higher bits that the ZERO_EXTEND's source bits. */
6948 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6950 poly_uint64 bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6951 if (known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))))
6952 return CONST0_RTX (outermode
);
6955 scalar_int_mode int_outermode
, int_innermode
;
6956 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
6957 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
6958 && known_eq (byte
, subreg_lowpart_offset (int_outermode
, int_innermode
)))
6960 /* Handle polynomial integers. The upper bits of a paradoxical
6961 subreg are undefined, so this is safe regardless of whether
6962 we're truncating or extending. */
6963 if (CONST_POLY_INT_P (op
))
6966 = poly_wide_int::from (const_poly_int_value (op
),
6967 GET_MODE_PRECISION (int_outermode
),
6969 return immed_wide_int_const (val
, int_outermode
);
6972 if (GET_MODE_PRECISION (int_outermode
)
6973 < GET_MODE_PRECISION (int_innermode
))
6975 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
6981 /* If OP is a vector comparison and the subreg is not changing the
6982 number of elements or the size of the elements, change the result
6983 of the comparison to the new mode. */
6984 if (COMPARISON_P (op
)
6985 && VECTOR_MODE_P (outermode
)
6986 && VECTOR_MODE_P (innermode
)
6987 && known_eq (GET_MODE_NUNITS (outermode
), GET_MODE_NUNITS (innermode
))
6988 && known_eq (GET_MODE_UNIT_SIZE (outermode
),
6989 GET_MODE_UNIT_SIZE (innermode
)))
6990 return simplify_gen_relational (GET_CODE (op
), outermode
, innermode
,
6991 XEXP (op
, 0), XEXP (op
, 1));
6995 /* Make a SUBREG operation or equivalent if it folds. */
6998 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6999 machine_mode innermode
, poly_uint64 byte
)
7003 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
7007 if (GET_CODE (op
) == SUBREG
7008 || GET_CODE (op
) == CONCAT
7009 || GET_MODE (op
) == VOIDmode
)
7012 if (validate_subreg (outermode
, innermode
, op
, byte
))
7013 return gen_rtx_SUBREG (outermode
, op
, byte
);
7018 /* Generates a subreg to get the least significant part of EXPR (in mode
7019 INNER_MODE) to OUTER_MODE. */
7022 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
7023 machine_mode inner_mode
)
7025 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
7026 subreg_lowpart_offset (outer_mode
, inner_mode
));
7029 /* Simplify X, an rtx expression.
7031 Return the simplified expression or NULL if no simplifications
7034 This is the preferred entry point into the simplification routines;
7035 however, we still allow passes to call the more specific routines.
7037 Right now GCC has three (yes, three) major bodies of RTL simplification
7038 code that need to be unified.
7040 1. fold_rtx in cse.c. This code uses various CSE specific
7041 information to aid in RTL simplification.
7043 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
7044 it uses combine specific information to aid in RTL
7047 3. The routines in this file.
7050 Long term we want to only have one body of simplification code; to
7051 get to that state I recommend the following steps:
7053 1. Pour over fold_rtx & simplify_rtx and move any simplifications
7054 which are not pass dependent state into these routines.
7056 2. As code is moved by #1, change fold_rtx & simplify_rtx to
7057 use this routine whenever possible.
7059 3. Allow for pass dependent state to be provided to these
7060 routines and add simplifications based on the pass dependent
7061 state. Remove code from cse.c & combine.c that becomes
7064 It will take time, but ultimately the compiler will be easier to
7065 maintain and improve. It's totally silly that when we add a
7066 simplification that it needs to be added to 4 places (3 for RTL
7067 simplification and 1 for tree simplification. */
7070 simplify_rtx (const_rtx x
)
7072 const enum rtx_code code
= GET_CODE (x
);
7073 const machine_mode mode
= GET_MODE (x
);
7075 switch (GET_RTX_CLASS (code
))
7078 return simplify_unary_operation (code
, mode
,
7079 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
7080 case RTX_COMM_ARITH
:
7081 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
7082 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
7087 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
7090 case RTX_BITFIELD_OPS
:
7091 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
7092 XEXP (x
, 0), XEXP (x
, 1),
7096 case RTX_COMM_COMPARE
:
7097 return simplify_relational_operation (code
, mode
,
7098 ((GET_MODE (XEXP (x
, 0))
7100 ? GET_MODE (XEXP (x
, 0))
7101 : GET_MODE (XEXP (x
, 1))),
7107 return simplify_subreg (mode
, SUBREG_REG (x
),
7108 GET_MODE (SUBREG_REG (x
)),
7115 /* Convert (lo_sum (high FOO) FOO) to FOO. */
7116 if (GET_CODE (XEXP (x
, 0)) == HIGH
7117 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
7130 namespace selftest
{
7132 /* Make a unique pseudo REG of mode MODE for use by selftests. */
7135 make_test_reg (machine_mode mode
)
7137 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
7139 return gen_rtx_REG (mode
, test_reg_num
++);
7142 /* Test vector simplifications involving VEC_DUPLICATE in which the
7143 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7144 register that holds one element of MODE. */
7147 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
7149 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7150 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7151 poly_uint64 nunits
= GET_MODE_NUNITS (mode
);
7152 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
7154 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
7155 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
7156 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
7157 ASSERT_RTX_EQ (duplicate
,
7158 simplify_unary_operation (NOT
, mode
,
7159 duplicate_not
, mode
));
7161 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7162 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
7163 ASSERT_RTX_EQ (duplicate
,
7164 simplify_unary_operation (NEG
, mode
,
7165 duplicate_neg
, mode
));
7167 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
7168 ASSERT_RTX_EQ (duplicate
,
7169 simplify_binary_operation (PLUS
, mode
, duplicate
,
7170 CONST0_RTX (mode
)));
7172 ASSERT_RTX_EQ (duplicate
,
7173 simplify_binary_operation (MINUS
, mode
, duplicate
,
7174 CONST0_RTX (mode
)));
7176 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
7177 simplify_binary_operation (MINUS
, mode
, duplicate
,
7181 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
7182 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
7183 ASSERT_RTX_PTR_EQ (scalar_reg
,
7184 simplify_binary_operation (VEC_SELECT
, inner_mode
,
7185 duplicate
, zero_par
));
7187 unsigned HOST_WIDE_INT const_nunits
;
7188 if (nunits
.is_constant (&const_nunits
))
7190 /* And again with the final element. */
7191 rtx last_index
= gen_int_mode (const_nunits
- 1, word_mode
);
7192 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
7193 ASSERT_RTX_PTR_EQ (scalar_reg
,
7194 simplify_binary_operation (VEC_SELECT
, inner_mode
,
7195 duplicate
, last_par
));
7197 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
7198 rtx vector_reg
= make_test_reg (mode
);
7199 for (unsigned HOST_WIDE_INT i
= 0; i
< const_nunits
; i
++)
7201 if (i
>= HOST_BITS_PER_WIDE_INT
)
7203 rtx mask
= GEN_INT ((HOST_WIDE_INT_1U
<< i
) | (i
+ 1));
7204 rtx vm
= gen_rtx_VEC_MERGE (mode
, duplicate
, vector_reg
, mask
);
7205 poly_uint64 offset
= i
* GET_MODE_SIZE (inner_mode
);
7206 ASSERT_RTX_EQ (scalar_reg
,
7207 simplify_gen_subreg (inner_mode
, vm
,
7212 /* Test a scalar subreg of a VEC_DUPLICATE. */
7213 poly_uint64 offset
= subreg_lowpart_offset (inner_mode
, mode
);
7214 ASSERT_RTX_EQ (scalar_reg
,
7215 simplify_gen_subreg (inner_mode
, duplicate
,
7218 machine_mode narrower_mode
;
7219 if (maybe_ne (nunits
, 2U)
7220 && multiple_p (nunits
, 2)
7221 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
7222 && VECTOR_MODE_P (narrower_mode
))
7224 /* Test VEC_DUPLICATE of a vector. */
7225 rtx_vector_builder
nbuilder (narrower_mode
, 2, 1);
7226 nbuilder
.quick_push (const0_rtx
);
7227 nbuilder
.quick_push (const1_rtx
);
7228 rtx_vector_builder
builder (mode
, 2, 1);
7229 builder
.quick_push (const0_rtx
);
7230 builder
.quick_push (const1_rtx
);
7231 ASSERT_RTX_EQ (builder
.build (),
7232 simplify_unary_operation (VEC_DUPLICATE
, mode
,
7236 /* Test VEC_SELECT of a vector. */
7238 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
7239 rtx narrower_duplicate
7240 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
7241 ASSERT_RTX_EQ (narrower_duplicate
,
7242 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
7243 duplicate
, vec_par
));
7245 /* Test a vector subreg of a VEC_DUPLICATE. */
7246 poly_uint64 offset
= subreg_lowpart_offset (narrower_mode
, mode
);
7247 ASSERT_RTX_EQ (narrower_duplicate
,
7248 simplify_gen_subreg (narrower_mode
, duplicate
,
7253 /* Test vector simplifications involving VEC_SERIES in which the
7254 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7255 register that holds one element of MODE. */
7258 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
7260 /* Test unary cases with VEC_SERIES arguments. */
7261 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7262 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7263 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7264 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
7265 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
7266 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
7267 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
7268 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
7269 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
7271 ASSERT_RTX_EQ (series_0_r
,
7272 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
7273 ASSERT_RTX_EQ (series_r_m1
,
7274 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
7275 ASSERT_RTX_EQ (series_r_r
,
7276 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
7278 /* Test that a VEC_SERIES with a zero step is simplified away. */
7279 ASSERT_RTX_EQ (duplicate
,
7280 simplify_binary_operation (VEC_SERIES
, mode
,
7281 scalar_reg
, const0_rtx
));
7283 /* Test PLUS and MINUS with VEC_SERIES. */
7284 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
7285 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
7286 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
7287 ASSERT_RTX_EQ (series_r_r
,
7288 simplify_binary_operation (PLUS
, mode
, series_0_r
,
7290 ASSERT_RTX_EQ (series_r_1
,
7291 simplify_binary_operation (PLUS
, mode
, duplicate
,
7293 ASSERT_RTX_EQ (series_r_m1
,
7294 simplify_binary_operation (PLUS
, mode
, duplicate
,
7296 ASSERT_RTX_EQ (series_0_r
,
7297 simplify_binary_operation (MINUS
, mode
, series_r_r
,
7299 ASSERT_RTX_EQ (series_r_m1
,
7300 simplify_binary_operation (MINUS
, mode
, duplicate
,
7302 ASSERT_RTX_EQ (series_r_1
,
7303 simplify_binary_operation (MINUS
, mode
, duplicate
,
7305 ASSERT_RTX_EQ (series_0_m1
,
7306 simplify_binary_operation (VEC_SERIES
, mode
, const0_rtx
,
7309 /* Test NEG on constant vector series. */
7310 ASSERT_RTX_EQ (series_0_m1
,
7311 simplify_unary_operation (NEG
, mode
, series_0_1
, mode
));
7312 ASSERT_RTX_EQ (series_0_1
,
7313 simplify_unary_operation (NEG
, mode
, series_0_m1
, mode
));
7315 /* Test PLUS and MINUS on constant vector series. */
7316 rtx scalar2
= gen_int_mode (2, inner_mode
);
7317 rtx scalar3
= gen_int_mode (3, inner_mode
);
7318 rtx series_1_1
= gen_const_vec_series (mode
, const1_rtx
, const1_rtx
);
7319 rtx series_0_2
= gen_const_vec_series (mode
, const0_rtx
, scalar2
);
7320 rtx series_1_3
= gen_const_vec_series (mode
, const1_rtx
, scalar3
);
7321 ASSERT_RTX_EQ (series_1_1
,
7322 simplify_binary_operation (PLUS
, mode
, series_0_1
,
7323 CONST1_RTX (mode
)));
7324 ASSERT_RTX_EQ (series_0_m1
,
7325 simplify_binary_operation (PLUS
, mode
, CONST0_RTX (mode
),
7327 ASSERT_RTX_EQ (series_1_3
,
7328 simplify_binary_operation (PLUS
, mode
, series_1_1
,
7330 ASSERT_RTX_EQ (series_0_1
,
7331 simplify_binary_operation (MINUS
, mode
, series_1_1
,
7332 CONST1_RTX (mode
)));
7333 ASSERT_RTX_EQ (series_1_1
,
7334 simplify_binary_operation (MINUS
, mode
, CONST1_RTX (mode
),
7336 ASSERT_RTX_EQ (series_1_1
,
7337 simplify_binary_operation (MINUS
, mode
, series_1_3
,
7340 /* Test MULT between constant vectors. */
7341 rtx vec2
= gen_const_vec_duplicate (mode
, scalar2
);
7342 rtx vec3
= gen_const_vec_duplicate (mode
, scalar3
);
7343 rtx scalar9
= gen_int_mode (9, inner_mode
);
7344 rtx series_3_9
= gen_const_vec_series (mode
, scalar3
, scalar9
);
7345 ASSERT_RTX_EQ (series_0_2
,
7346 simplify_binary_operation (MULT
, mode
, series_0_1
, vec2
));
7347 ASSERT_RTX_EQ (series_3_9
,
7348 simplify_binary_operation (MULT
, mode
, vec3
, series_1_3
));
7349 if (!GET_MODE_NUNITS (mode
).is_constant ())
7350 ASSERT_FALSE (simplify_binary_operation (MULT
, mode
, series_0_1
,
7353 /* Test ASHIFT between constant vectors. */
7354 ASSERT_RTX_EQ (series_0_2
,
7355 simplify_binary_operation (ASHIFT
, mode
, series_0_1
,
7356 CONST1_RTX (mode
)));
7357 if (!GET_MODE_NUNITS (mode
).is_constant ())
7358 ASSERT_FALSE (simplify_binary_operation (ASHIFT
, mode
, CONST1_RTX (mode
),
7362 /* Verify simplify_merge_mask works correctly. */
7365 test_vec_merge (machine_mode mode
)
7367 rtx op0
= make_test_reg (mode
);
7368 rtx op1
= make_test_reg (mode
);
7369 rtx op2
= make_test_reg (mode
);
7370 rtx op3
= make_test_reg (mode
);
7371 rtx op4
= make_test_reg (mode
);
7372 rtx op5
= make_test_reg (mode
);
7373 rtx mask1
= make_test_reg (SImode
);
7374 rtx mask2
= make_test_reg (SImode
);
7375 rtx vm1
= gen_rtx_VEC_MERGE (mode
, op0
, op1
, mask1
);
7376 rtx vm2
= gen_rtx_VEC_MERGE (mode
, op2
, op3
, mask1
);
7377 rtx vm3
= gen_rtx_VEC_MERGE (mode
, op4
, op5
, mask1
);
7379 /* Simple vec_merge. */
7380 ASSERT_EQ (op0
, simplify_merge_mask (vm1
, mask1
, 0));
7381 ASSERT_EQ (op1
, simplify_merge_mask (vm1
, mask1
, 1));
7382 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 0));
7383 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 1));
7385 /* Nested vec_merge.
7386 It's tempting to make this simplify right down to opN, but we don't
7387 because all the simplify_* functions assume that the operands have
7388 already been simplified. */
7389 rtx nvm
= gen_rtx_VEC_MERGE (mode
, vm1
, vm2
, mask1
);
7390 ASSERT_EQ (vm1
, simplify_merge_mask (nvm
, mask1
, 0));
7391 ASSERT_EQ (vm2
, simplify_merge_mask (nvm
, mask1
, 1));
7393 /* Intermediate unary op. */
7394 rtx unop
= gen_rtx_NOT (mode
, vm1
);
7395 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op0
),
7396 simplify_merge_mask (unop
, mask1
, 0));
7397 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op1
),
7398 simplify_merge_mask (unop
, mask1
, 1));
7400 /* Intermediate binary op. */
7401 rtx binop
= gen_rtx_PLUS (mode
, vm1
, vm2
);
7402 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op0
, op2
),
7403 simplify_merge_mask (binop
, mask1
, 0));
7404 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op1
, op3
),
7405 simplify_merge_mask (binop
, mask1
, 1));
7407 /* Intermediate ternary op. */
7408 rtx tenop
= gen_rtx_FMA (mode
, vm1
, vm2
, vm3
);
7409 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op0
, op2
, op4
),
7410 simplify_merge_mask (tenop
, mask1
, 0));
7411 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op1
, op3
, op5
),
7412 simplify_merge_mask (tenop
, mask1
, 1));
7415 rtx badop0
= gen_rtx_PRE_INC (mode
, op0
);
7416 rtx badvm
= gen_rtx_VEC_MERGE (mode
, badop0
, op1
, mask1
);
7417 ASSERT_EQ (badop0
, simplify_merge_mask (badvm
, mask1
, 0));
7418 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (badvm
, mask1
, 1));
7420 /* Called indirectly. */
7421 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode
, op0
, op3
, mask1
),
7422 simplify_rtx (nvm
));
7425 /* Test subregs of integer vector constant X, trying elements in
7426 the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
7427 where NELTS is the number of elements in X. Subregs involving
7428 elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail. */
7431 test_vector_subregs_modes (rtx x
, poly_uint64 elt_bias
= 0,
7432 unsigned int first_valid
= 0)
7434 machine_mode inner_mode
= GET_MODE (x
);
7435 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7437 for (unsigned int modei
= 0; modei
< NUM_MACHINE_MODES
; ++modei
)
7439 machine_mode outer_mode
= (machine_mode
) modei
;
7440 if (!VECTOR_MODE_P (outer_mode
))
7443 unsigned int outer_nunits
;
7444 if (GET_MODE_INNER (outer_mode
) == int_mode
7445 && GET_MODE_NUNITS (outer_mode
).is_constant (&outer_nunits
)
7446 && multiple_p (GET_MODE_NUNITS (inner_mode
), outer_nunits
))
7448 /* Test subregs in which the outer mode is a smaller,
7449 constant-sized vector of the same element type. */
7451 = constant_lower_bound (GET_MODE_NUNITS (inner_mode
));
7452 for (unsigned int elt
= 0; elt
< limit
; elt
+= outer_nunits
)
7454 rtx expected
= NULL_RTX
;
7455 if (elt
>= first_valid
)
7457 rtx_vector_builder
builder (outer_mode
, outer_nunits
, 1);
7458 for (unsigned int i
= 0; i
< outer_nunits
; ++i
)
7459 builder
.quick_push (CONST_VECTOR_ELT (x
, elt
+ i
));
7460 expected
= builder
.build ();
7462 poly_uint64 byte
= (elt_bias
+ elt
) * GET_MODE_SIZE (int_mode
);
7463 ASSERT_RTX_EQ (expected
,
7464 simplify_subreg (outer_mode
, x
,
7468 else if (known_eq (GET_MODE_SIZE (outer_mode
),
7469 GET_MODE_SIZE (inner_mode
))
7470 && known_eq (elt_bias
, 0U)
7471 && (GET_MODE_CLASS (outer_mode
) != MODE_VECTOR_BOOL
7472 || known_eq (GET_MODE_BITSIZE (outer_mode
),
7473 GET_MODE_NUNITS (outer_mode
)))
7474 && (!FLOAT_MODE_P (outer_mode
)
7475 || (FLOAT_MODE_FORMAT (outer_mode
)->ieee_bits
7476 == GET_MODE_UNIT_PRECISION (outer_mode
)))
7477 && (GET_MODE_SIZE (inner_mode
).is_constant ()
7478 || !CONST_VECTOR_STEPPED_P (x
)))
7480 /* Try converting to OUTER_MODE and back. */
7481 rtx outer_x
= simplify_subreg (outer_mode
, x
, inner_mode
, 0);
7482 ASSERT_TRUE (outer_x
!= NULL_RTX
);
7483 ASSERT_RTX_EQ (x
, simplify_subreg (inner_mode
, outer_x
,
7488 if (BYTES_BIG_ENDIAN
== WORDS_BIG_ENDIAN
)
7490 /* Test each byte in the element range. */
7492 = constant_lower_bound (GET_MODE_SIZE (inner_mode
));
7493 for (unsigned int i
= 0; i
< limit
; ++i
)
7495 unsigned int elt
= i
/ GET_MODE_SIZE (int_mode
);
7496 rtx expected
= NULL_RTX
;
7497 if (elt
>= first_valid
)
7499 unsigned int byte_shift
= i
% GET_MODE_SIZE (int_mode
);
7500 if (BYTES_BIG_ENDIAN
)
7501 byte_shift
= GET_MODE_SIZE (int_mode
) - byte_shift
- 1;
7502 rtx_mode_t
vec_elt (CONST_VECTOR_ELT (x
, elt
), int_mode
);
7503 wide_int shifted_elt
7504 = wi::lrshift (vec_elt
, byte_shift
* BITS_PER_UNIT
);
7505 expected
= immed_wide_int_const (shifted_elt
, QImode
);
7507 poly_uint64 byte
= elt_bias
* GET_MODE_SIZE (int_mode
) + i
;
7508 ASSERT_RTX_EQ (expected
,
7509 simplify_subreg (QImode
, x
, inner_mode
, byte
));
7514 /* Test constant subregs of integer vector mode INNER_MODE, using 1
7515 element per pattern. */
7518 test_vector_subregs_repeating (machine_mode inner_mode
)
7520 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
7521 unsigned int min_nunits
= constant_lower_bound (nunits
);
7522 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7523 unsigned int count
= gcd (min_nunits
, 8);
7525 rtx_vector_builder
builder (inner_mode
, count
, 1);
7526 for (unsigned int i
= 0; i
< count
; ++i
)
7527 builder
.quick_push (gen_int_mode (8 - i
, int_mode
));
7528 rtx x
= builder
.build ();
7530 test_vector_subregs_modes (x
);
7531 if (!nunits
.is_constant ())
7532 test_vector_subregs_modes (x
, nunits
- min_nunits
);
7535 /* Test constant subregs of integer vector mode INNER_MODE, using 2
7536 elements per pattern. */
7539 test_vector_subregs_fore_back (machine_mode inner_mode
)
7541 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
7542 unsigned int min_nunits
= constant_lower_bound (nunits
);
7543 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7544 unsigned int count
= gcd (min_nunits
, 4);
7546 rtx_vector_builder
builder (inner_mode
, count
, 2);
7547 for (unsigned int i
= 0; i
< count
; ++i
)
7548 builder
.quick_push (gen_int_mode (i
, int_mode
));
7549 for (unsigned int i
= 0; i
< count
; ++i
)
7550 builder
.quick_push (gen_int_mode (-(int) i
, int_mode
));
7551 rtx x
= builder
.build ();
7553 test_vector_subregs_modes (x
);
7554 if (!nunits
.is_constant ())
7555 test_vector_subregs_modes (x
, nunits
- min_nunits
, count
);
7558 /* Test constant subregs of integer vector mode INNER_MODE, using 3
7559 elements per pattern. */
7562 test_vector_subregs_stepped (machine_mode inner_mode
)
7564 /* Build { 0, 1, 2, 3, ... }. */
7565 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7566 rtx_vector_builder
builder (inner_mode
, 1, 3);
7567 for (unsigned int i
= 0; i
< 3; ++i
)
7568 builder
.quick_push (gen_int_mode (i
, int_mode
));
7569 rtx x
= builder
.build ();
7571 test_vector_subregs_modes (x
);
7574 /* Test constant subregs of integer vector mode INNER_MODE. */
7577 test_vector_subregs (machine_mode inner_mode
)
7579 test_vector_subregs_repeating (inner_mode
);
7580 test_vector_subregs_fore_back (inner_mode
);
7581 test_vector_subregs_stepped (inner_mode
);
7584 /* Verify some simplifications involving vectors. */
7589 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
7591 machine_mode mode
= (machine_mode
) i
;
7592 if (VECTOR_MODE_P (mode
))
7594 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
7595 test_vector_ops_duplicate (mode
, scalar_reg
);
7596 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
7597 && maybe_gt (GET_MODE_NUNITS (mode
), 2))
7599 test_vector_ops_series (mode
, scalar_reg
);
7600 test_vector_subregs (mode
);
7602 test_vec_merge (mode
);
7607 template<unsigned int N
>
7608 struct simplify_const_poly_int_tests
7614 struct simplify_const_poly_int_tests
<1>
7616 static void run () {}
7619 /* Test various CONST_POLY_INT properties. */
7621 template<unsigned int N
>
7623 simplify_const_poly_int_tests
<N
>::run ()
7625 rtx x1
= gen_int_mode (poly_int64 (1, 1), QImode
);
7626 rtx x2
= gen_int_mode (poly_int64 (-80, 127), QImode
);
7627 rtx x3
= gen_int_mode (poly_int64 (-79, -128), QImode
);
7628 rtx x4
= gen_int_mode (poly_int64 (5, 4), QImode
);
7629 rtx x5
= gen_int_mode (poly_int64 (30, 24), QImode
);
7630 rtx x6
= gen_int_mode (poly_int64 (20, 16), QImode
);
7631 rtx x7
= gen_int_mode (poly_int64 (7, 4), QImode
);
7632 rtx x8
= gen_int_mode (poly_int64 (30, 24), HImode
);
7633 rtx x9
= gen_int_mode (poly_int64 (-30, -24), HImode
);
7634 rtx x10
= gen_int_mode (poly_int64 (-31, -24), HImode
);
7635 rtx two
= GEN_INT (2);
7636 rtx six
= GEN_INT (6);
7637 poly_uint64 offset
= subreg_lowpart_offset (QImode
, HImode
);
7639 /* These tests only try limited operation combinations. Fuller arithmetic
7640 testing is done directly on poly_ints. */
7641 ASSERT_EQ (simplify_unary_operation (NEG
, HImode
, x8
, HImode
), x9
);
7642 ASSERT_EQ (simplify_unary_operation (NOT
, HImode
, x8
, HImode
), x10
);
7643 ASSERT_EQ (simplify_unary_operation (TRUNCATE
, QImode
, x8
, HImode
), x5
);
7644 ASSERT_EQ (simplify_binary_operation (PLUS
, QImode
, x1
, x2
), x3
);
7645 ASSERT_EQ (simplify_binary_operation (MINUS
, QImode
, x3
, x1
), x2
);
7646 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, x4
, six
), x5
);
7647 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, six
, x4
), x5
);
7648 ASSERT_EQ (simplify_binary_operation (ASHIFT
, QImode
, x4
, two
), x6
);
7649 ASSERT_EQ (simplify_binary_operation (IOR
, QImode
, x4
, two
), x7
);
7650 ASSERT_EQ (simplify_subreg (HImode
, x5
, QImode
, 0), x8
);
7651 ASSERT_EQ (simplify_subreg (QImode
, x8
, HImode
, offset
), x5
);
7654 /* Run all of the selftests within this file. */
7657 simplify_rtx_c_tests ()
7660 simplify_const_poly_int_tests
<NUM_POLY_INT_COEFFS
>::run ();
7663 } // namespace selftest
7665 #endif /* CHECKING_P */