1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
49 static bool plus_minus_operand_p (const_rtx
);
50 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
51 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
53 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
54 machine_mode
, rtx
, rtx
);
55 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
56 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
59 /* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */
62 neg_poly_int_rtx (machine_mode mode
, const_rtx i
)
64 return immed_wide_int_const (-wi::to_poly_wide (i
, mode
), mode
);
67 /* Test whether expression, X, is an immediate constant that represents
68 the most significant bit of machine mode MODE. */
71 mode_signbit_p (machine_mode mode
, const_rtx x
)
73 unsigned HOST_WIDE_INT val
;
75 scalar_int_mode int_mode
;
77 if (!is_int_mode (mode
, &int_mode
))
80 width
= GET_MODE_PRECISION (int_mode
);
84 if (width
<= HOST_BITS_PER_WIDE_INT
87 #if TARGET_SUPPORTS_WIDE_INT
88 else if (CONST_WIDE_INT_P (x
))
91 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
92 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
94 for (i
= 0; i
< elts
- 1; i
++)
95 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
97 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
98 width
%= HOST_BITS_PER_WIDE_INT
;
100 width
= HOST_BITS_PER_WIDE_INT
;
103 else if (width
<= HOST_BITS_PER_DOUBLE_INT
104 && CONST_DOUBLE_AS_INT_P (x
)
105 && CONST_DOUBLE_LOW (x
) == 0)
107 val
= CONST_DOUBLE_HIGH (x
);
108 width
-= HOST_BITS_PER_WIDE_INT
;
112 /* X is not an integer constant. */
115 if (width
< HOST_BITS_PER_WIDE_INT
)
116 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
117 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
120 /* Test whether VAL is equal to the most significant bit of mode MODE
121 (after masking with the mode mask of MODE). Returns false if the
122 precision of MODE is too large to handle. */
125 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
128 scalar_int_mode int_mode
;
130 if (!is_int_mode (mode
, &int_mode
))
133 width
= GET_MODE_PRECISION (int_mode
);
134 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
137 val
&= GET_MODE_MASK (int_mode
);
138 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
141 /* Test whether the most significant bit of mode MODE is set in VAL.
142 Returns false if the precision of MODE is too large to handle. */
144 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
148 scalar_int_mode int_mode
;
149 if (!is_int_mode (mode
, &int_mode
))
152 width
= GET_MODE_PRECISION (int_mode
);
153 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
156 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
160 /* Test whether the most significant bit of mode MODE is clear in VAL.
161 Returns false if the precision of MODE is too large to handle. */
163 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
167 scalar_int_mode int_mode
;
168 if (!is_int_mode (mode
, &int_mode
))
171 width
= GET_MODE_PRECISION (int_mode
);
172 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
175 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
179 /* Make a binary operation by properly ordering the operands and
180 seeing if the expression folds. */
183 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
188 /* If this simplifies, do it. */
189 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
193 /* Put complex operands first and constants second if commutative. */
194 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
195 && swap_commutative_operands_p (op0
, op1
))
196 std::swap (op0
, op1
);
198 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
201 /* If X is a MEM referencing the constant pool, return the real value.
202 Otherwise return X. */
204 avoid_constant_pool_reference (rtx x
)
208 poly_int64 offset
= 0;
210 switch (GET_CODE (x
))
216 /* Handle float extensions of constant pool references. */
218 c
= avoid_constant_pool_reference (tmp
);
219 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
220 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
228 if (GET_MODE (x
) == BLKmode
)
233 /* Call target hook to avoid the effects of -fpic etc.... */
234 addr
= targetm
.delegitimize_address (addr
);
236 /* Split the address into a base and integer offset. */
237 addr
= strip_offset (addr
, &offset
);
239 if (GET_CODE (addr
) == LO_SUM
)
240 addr
= XEXP (addr
, 1);
242 /* If this is a constant pool reference, we can turn it into its
243 constant and hope that simplifications happen. */
244 if (GET_CODE (addr
) == SYMBOL_REF
245 && CONSTANT_POOL_ADDRESS_P (addr
))
247 c
= get_pool_constant (addr
);
248 cmode
= get_pool_mode (addr
);
250 /* If we're accessing the constant in a different mode than it was
251 originally stored, attempt to fix that up via subreg simplifications.
252 If that fails we have no choice but to return the original memory. */
253 if (known_eq (offset
, 0) && cmode
== GET_MODE (x
))
255 else if (known_in_range_p (offset
, 0, GET_MODE_SIZE (cmode
)))
257 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
258 if (tem
&& CONSTANT_P (tem
))
266 /* Simplify a MEM based on its attributes. This is the default
267 delegitimize_address target hook, and it's recommended that every
268 overrider call it. */
271 delegitimize_mem_from_attrs (rtx x
)
273 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
274 use their base addresses as equivalent. */
277 && MEM_OFFSET_KNOWN_P (x
))
279 tree decl
= MEM_EXPR (x
);
280 machine_mode mode
= GET_MODE (x
);
281 poly_int64 offset
= 0;
283 switch (TREE_CODE (decl
))
293 case ARRAY_RANGE_REF
:
298 case VIEW_CONVERT_EXPR
:
300 poly_int64 bitsize
, bitpos
, bytepos
, toffset_val
= 0;
302 int unsignedp
, reversep
, volatilep
= 0;
305 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
306 &unsignedp
, &reversep
, &volatilep
);
307 if (maybe_ne (bitsize
, GET_MODE_BITSIZE (mode
))
308 || !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
309 || (toffset
&& !poly_int_tree_p (toffset
, &toffset_val
)))
312 offset
+= bytepos
+ toffset_val
;
318 && mode
== GET_MODE (x
)
320 && (TREE_STATIC (decl
)
321 || DECL_THREAD_LOCAL_P (decl
))
322 && DECL_RTL_SET_P (decl
)
323 && MEM_P (DECL_RTL (decl
)))
327 offset
+= MEM_OFFSET (x
);
329 newx
= DECL_RTL (decl
);
333 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
334 poly_int64 n_offset
, o_offset
;
336 /* Avoid creating a new MEM needlessly if we already had
337 the same address. We do if there's no OFFSET and the
338 old address X is identical to NEWX, or if X is of the
339 form (plus NEWX OFFSET), or the NEWX is of the form
340 (plus Y (const_int Z)) and X is that with the offset
341 added: (plus Y (const_int Z+OFFSET)). */
342 n
= strip_offset (n
, &n_offset
);
343 o
= strip_offset (o
, &o_offset
);
344 if (!(known_eq (o_offset
, n_offset
+ offset
)
345 && rtx_equal_p (o
, n
)))
346 x
= adjust_address_nv (newx
, mode
, offset
);
348 else if (GET_MODE (x
) == GET_MODE (newx
)
349 && known_eq (offset
, 0))
357 /* Make a unary operation by first seeing if it folds and otherwise making
358 the specified operation. */
361 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
362 machine_mode op_mode
)
366 /* If this simplifies, use it. */
367 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
370 return gen_rtx_fmt_e (code
, mode
, op
);
373 /* Likewise for ternary operations. */
376 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
377 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
381 /* If this simplifies, use it. */
382 if ((tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
383 op0
, op1
, op2
)) != 0)
386 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
389 /* Likewise, for relational operations.
390 CMP_MODE specifies mode comparison is done in. */
393 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
394 machine_mode cmp_mode
, rtx op0
, rtx op1
)
398 if ((tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
402 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
405 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
406 and simplify the result. If FN is non-NULL, call this callback on each
407 X, if it returns non-NULL, replace X with its return value and simplify the
411 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
412 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
414 enum rtx_code code
= GET_CODE (x
);
415 machine_mode mode
= GET_MODE (x
);
416 machine_mode op_mode
;
418 rtx op0
, op1
, op2
, newx
, op
;
422 if (__builtin_expect (fn
!= NULL
, 0))
424 newx
= fn (x
, old_rtx
, data
);
428 else if (rtx_equal_p (x
, old_rtx
))
429 return copy_rtx ((rtx
) data
);
431 switch (GET_RTX_CLASS (code
))
435 op_mode
= GET_MODE (op0
);
436 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
437 if (op0
== XEXP (x
, 0))
439 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
443 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
444 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
445 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
447 return simplify_gen_binary (code
, mode
, op0
, op1
);
450 case RTX_COMM_COMPARE
:
453 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
454 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
455 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
456 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
458 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
461 case RTX_BITFIELD_OPS
:
463 op_mode
= GET_MODE (op0
);
464 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
465 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
466 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
467 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
469 if (op_mode
== VOIDmode
)
470 op_mode
= GET_MODE (op0
);
471 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
476 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
477 if (op0
== SUBREG_REG (x
))
479 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
480 GET_MODE (SUBREG_REG (x
)),
482 return op0
? op0
: x
;
489 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
490 if (op0
== XEXP (x
, 0))
492 return replace_equiv_address_nv (x
, op0
);
494 else if (code
== LO_SUM
)
496 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
497 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
499 /* (lo_sum (high x) y) -> y where x and y have the same base. */
500 if (GET_CODE (op0
) == HIGH
)
502 rtx base0
, base1
, offset0
, offset1
;
503 split_const (XEXP (op0
, 0), &base0
, &offset0
);
504 split_const (op1
, &base1
, &offset1
);
505 if (rtx_equal_p (base0
, base1
))
509 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
511 return gen_rtx_LO_SUM (mode
, op0
, op1
);
520 fmt
= GET_RTX_FORMAT (code
);
521 for (i
= 0; fmt
[i
]; i
++)
526 newvec
= XVEC (newx
, i
);
527 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
529 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
531 if (op
!= RTVEC_ELT (vec
, j
))
535 newvec
= shallow_copy_rtvec (vec
);
537 newx
= shallow_copy_rtx (x
);
538 XVEC (newx
, i
) = newvec
;
540 RTVEC_ELT (newvec
, j
) = op
;
548 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
549 if (op
!= XEXP (x
, i
))
552 newx
= shallow_copy_rtx (x
);
561 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
562 resulting RTX. Return a new RTX which is as simplified as possible. */
565 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
567 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
570 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
571 Only handle cases where the truncated value is inherently an rvalue.
573 RTL provides two ways of truncating a value:
575 1. a lowpart subreg. This form is only a truncation when both
576 the outer and inner modes (here MODE and OP_MODE respectively)
577 are scalar integers, and only then when the subreg is used as
580 It is only valid to form such truncating subregs if the
581 truncation requires no action by the target. The onus for
582 proving this is on the creator of the subreg -- e.g. the
583 caller to simplify_subreg or simplify_gen_subreg -- and typically
584 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
586 2. a TRUNCATE. This form handles both scalar and compound integers.
588 The first form is preferred where valid. However, the TRUNCATE
589 handling in simplify_unary_operation turns the second form into the
590 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
591 so it is generally safe to form rvalue truncations using:
593 simplify_gen_unary (TRUNCATE, ...)
595 and leave simplify_unary_operation to work out which representation
598 Because of the proof requirements on (1), simplify_truncation must
599 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
600 regardless of whether the outer truncation came from a SUBREG or a
601 TRUNCATE. For example, if the caller has proven that an SImode
606 is a no-op and can be represented as a subreg, it does not follow
607 that SImode truncations of X and Y are also no-ops. On a target
608 like 64-bit MIPS that requires SImode values to be stored in
609 sign-extended form, an SImode truncation of:
611 (and:DI (reg:DI X) (const_int 63))
613 is trivially a no-op because only the lower 6 bits can be set.
614 However, X is still an arbitrary 64-bit number and so we cannot
615 assume that truncating it too is a no-op. */
618 simplify_truncation (machine_mode mode
, rtx op
,
619 machine_mode op_mode
)
621 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
622 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
623 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
625 gcc_assert (precision
<= op_precision
);
627 /* Optimize truncations of zero and sign extended values. */
628 if (GET_CODE (op
) == ZERO_EXTEND
629 || GET_CODE (op
) == SIGN_EXTEND
)
631 /* There are three possibilities. If MODE is the same as the
632 origmode, we can omit both the extension and the subreg.
633 If MODE is not larger than the origmode, we can apply the
634 truncation without the extension. Finally, if the outermode
635 is larger than the origmode, we can just extend to the appropriate
637 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
638 if (mode
== origmode
)
640 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
641 return simplify_gen_unary (TRUNCATE
, mode
,
642 XEXP (op
, 0), origmode
);
644 return simplify_gen_unary (GET_CODE (op
), mode
,
645 XEXP (op
, 0), origmode
);
648 /* If the machine can perform operations in the truncated mode, distribute
649 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
650 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
652 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
653 && (GET_CODE (op
) == PLUS
654 || GET_CODE (op
) == MINUS
655 || GET_CODE (op
) == MULT
))
657 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
660 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
662 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
666 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
667 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
668 the outer subreg is effectively a truncation to the original mode. */
669 if ((GET_CODE (op
) == LSHIFTRT
670 || GET_CODE (op
) == ASHIFTRT
)
671 /* Ensure that OP_MODE is at least twice as wide as MODE
672 to avoid the possibility that an outer LSHIFTRT shifts by more
673 than the sign extension's sign_bit_copies and introduces zeros
674 into the high bits of the result. */
675 && 2 * precision
<= op_precision
676 && CONST_INT_P (XEXP (op
, 1))
677 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
678 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
679 && UINTVAL (XEXP (op
, 1)) < precision
)
680 return simplify_gen_binary (ASHIFTRT
, mode
,
681 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
683 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
684 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
685 the outer subreg is effectively a truncation to the original mode. */
686 if ((GET_CODE (op
) == LSHIFTRT
687 || GET_CODE (op
) == ASHIFTRT
)
688 && CONST_INT_P (XEXP (op
, 1))
689 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
690 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
691 && UINTVAL (XEXP (op
, 1)) < precision
)
692 return simplify_gen_binary (LSHIFTRT
, mode
,
693 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
695 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
696 to (ashift:QI (x:QI) C), where C is a suitable small constant and
697 the outer subreg is effectively a truncation to the original mode. */
698 if (GET_CODE (op
) == ASHIFT
699 && CONST_INT_P (XEXP (op
, 1))
700 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
701 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
702 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
703 && UINTVAL (XEXP (op
, 1)) < precision
)
704 return simplify_gen_binary (ASHIFT
, mode
,
705 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
707 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
708 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
710 if (GET_CODE (op
) == AND
711 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
712 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
713 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
714 && CONST_INT_P (XEXP (op
, 1)))
716 rtx op0
= (XEXP (XEXP (op
, 0), 0));
717 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
718 rtx mask_op
= XEXP (op
, 1);
719 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
720 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
722 if (shift
< precision
723 /* If doing this transform works for an X with all bits set,
724 it works for any X. */
725 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
726 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
727 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
728 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
730 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
731 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
735 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
736 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
738 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
739 && REG_P (XEXP (op
, 0))
740 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
741 && CONST_INT_P (XEXP (op
, 1))
742 && CONST_INT_P (XEXP (op
, 2)))
744 rtx op0
= XEXP (op
, 0);
745 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
746 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
747 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
749 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
752 pos
-= op_precision
- precision
;
753 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
754 XEXP (op
, 1), GEN_INT (pos
));
757 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
759 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
761 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
762 XEXP (op
, 1), XEXP (op
, 2));
766 /* Recognize a word extraction from a multi-word subreg. */
767 if ((GET_CODE (op
) == LSHIFTRT
768 || GET_CODE (op
) == ASHIFTRT
)
769 && SCALAR_INT_MODE_P (mode
)
770 && SCALAR_INT_MODE_P (op_mode
)
771 && precision
>= BITS_PER_WORD
772 && 2 * precision
<= op_precision
773 && CONST_INT_P (XEXP (op
, 1))
774 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
775 && UINTVAL (XEXP (op
, 1)) < op_precision
)
777 poly_int64 byte
= subreg_lowpart_offset (mode
, op_mode
);
778 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
779 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
781 ? byte
- shifted_bytes
782 : byte
+ shifted_bytes
));
785 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
786 and try replacing the TRUNCATE and shift with it. Don't do this
787 if the MEM has a mode-dependent address. */
788 if ((GET_CODE (op
) == LSHIFTRT
789 || GET_CODE (op
) == ASHIFTRT
)
790 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
791 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
792 && MEM_P (XEXP (op
, 0))
793 && CONST_INT_P (XEXP (op
, 1))
794 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
795 && INTVAL (XEXP (op
, 1)) > 0
796 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
797 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
798 MEM_ADDR_SPACE (XEXP (op
, 0)))
799 && ! MEM_VOLATILE_P (XEXP (op
, 0))
800 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
801 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
803 poly_int64 byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
804 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
805 return adjust_address_nv (XEXP (op
, 0), int_mode
,
807 ? byte
- shifted_bytes
808 : byte
+ shifted_bytes
));
811 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
812 (OP:SI foo:SI) if OP is NEG or ABS. */
813 if ((GET_CODE (op
) == ABS
814 || GET_CODE (op
) == NEG
)
815 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
816 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
817 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
818 return simplify_gen_unary (GET_CODE (op
), mode
,
819 XEXP (XEXP (op
, 0), 0), mode
);
821 /* (truncate:A (subreg:B (truncate:C X) 0)) is
823 if (GET_CODE (op
) == SUBREG
824 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
825 && SCALAR_INT_MODE_P (op_mode
)
826 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
827 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
828 && subreg_lowpart_p (op
))
830 rtx inner
= XEXP (SUBREG_REG (op
), 0);
831 if (GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (subreg_mode
))
832 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
835 /* If subreg above is paradoxical and C is narrower
836 than A, return (subreg:A (truncate:C X) 0). */
837 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
), subreg_mode
, 0);
840 /* (truncate:A (truncate:B X)) is (truncate:A X). */
841 if (GET_CODE (op
) == TRUNCATE
)
842 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
843 GET_MODE (XEXP (op
, 0)));
845 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
847 if (GET_CODE (op
) == IOR
848 && SCALAR_INT_MODE_P (mode
)
849 && SCALAR_INT_MODE_P (op_mode
)
850 && CONST_INT_P (XEXP (op
, 1))
851 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
857 /* Try to simplify a unary operation CODE whose output mode is to be
858 MODE with input operand OP whose mode was originally OP_MODE.
859 Return zero if no simplification can be made. */
861 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
862 rtx op
, machine_mode op_mode
)
866 trueop
= avoid_constant_pool_reference (op
);
868 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
872 return simplify_unary_operation_1 (code
, mode
, op
);
875 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
879 exact_int_to_float_conversion_p (const_rtx op
)
881 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
882 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
883 /* Constants shouldn't reach here. */
884 gcc_assert (op0_mode
!= VOIDmode
);
885 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
886 int in_bits
= in_prec
;
887 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
889 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
890 if (GET_CODE (op
) == FLOAT
)
891 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
892 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
893 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
896 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
898 return in_bits
<= out_bits
;
901 /* Perform some simplifications we can do even if the operands
904 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
906 enum rtx_code reversed
;
907 rtx temp
, elt
, base
, step
;
908 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
913 /* (not (not X)) == X. */
914 if (GET_CODE (op
) == NOT
)
917 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
918 comparison is all ones. */
919 if (COMPARISON_P (op
)
920 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
921 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
922 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
923 XEXP (op
, 0), XEXP (op
, 1));
925 /* (not (plus X -1)) can become (neg X). */
926 if (GET_CODE (op
) == PLUS
927 && XEXP (op
, 1) == constm1_rtx
)
928 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
930 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
931 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
932 and MODE_VECTOR_INT. */
933 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
934 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
937 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
938 if (GET_CODE (op
) == XOR
939 && CONST_INT_P (XEXP (op
, 1))
940 && (temp
= simplify_unary_operation (NOT
, mode
,
941 XEXP (op
, 1), mode
)) != 0)
942 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
944 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
945 if (GET_CODE (op
) == PLUS
946 && CONST_INT_P (XEXP (op
, 1))
947 && mode_signbit_p (mode
, XEXP (op
, 1))
948 && (temp
= simplify_unary_operation (NOT
, mode
,
949 XEXP (op
, 1), mode
)) != 0)
950 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
953 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
954 operands other than 1, but that is not valid. We could do a
955 similar simplification for (not (lshiftrt C X)) where C is
956 just the sign bit, but this doesn't seem common enough to
958 if (GET_CODE (op
) == ASHIFT
959 && XEXP (op
, 0) == const1_rtx
)
961 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
962 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
965 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
966 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
967 so we can perform the above simplification. */
968 if (STORE_FLAG_VALUE
== -1
969 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
970 && GET_CODE (op
) == ASHIFTRT
971 && CONST_INT_P (XEXP (op
, 1))
972 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
973 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
974 XEXP (op
, 0), const0_rtx
);
977 if (partial_subreg_p (op
)
978 && subreg_lowpart_p (op
)
979 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
980 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
982 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
985 x
= gen_rtx_ROTATE (inner_mode
,
986 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
988 XEXP (SUBREG_REG (op
), 1));
989 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
994 /* Apply De Morgan's laws to reduce number of patterns for machines
995 with negating logical insns (and-not, nand, etc.). If result has
996 only one NOT, put it first, since that is how the patterns are
998 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1000 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1001 machine_mode op_mode
;
1003 op_mode
= GET_MODE (in1
);
1004 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1006 op_mode
= GET_MODE (in2
);
1007 if (op_mode
== VOIDmode
)
1009 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1011 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1012 std::swap (in1
, in2
);
1014 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1018 /* (not (bswap x)) -> (bswap (not x)). */
1019 if (GET_CODE (op
) == BSWAP
)
1021 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1022 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1027 /* (neg (neg X)) == X. */
1028 if (GET_CODE (op
) == NEG
)
1029 return XEXP (op
, 0);
1031 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1032 If comparison is not reversible use
1034 if (GET_CODE (op
) == IF_THEN_ELSE
)
1036 rtx cond
= XEXP (op
, 0);
1037 rtx true_rtx
= XEXP (op
, 1);
1038 rtx false_rtx
= XEXP (op
, 2);
1040 if ((GET_CODE (true_rtx
) == NEG
1041 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1042 || (GET_CODE (false_rtx
) == NEG
1043 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1045 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1046 temp
= reversed_comparison (cond
, mode
);
1050 std::swap (true_rtx
, false_rtx
);
1052 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1053 mode
, temp
, true_rtx
, false_rtx
);
1057 /* (neg (plus X 1)) can become (not X). */
1058 if (GET_CODE (op
) == PLUS
1059 && XEXP (op
, 1) == const1_rtx
)
1060 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1062 /* Similarly, (neg (not X)) is (plus X 1). */
1063 if (GET_CODE (op
) == NOT
)
1064 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1067 /* (neg (minus X Y)) can become (minus Y X). This transformation
1068 isn't safe for modes with signed zeros, since if X and Y are
1069 both +0, (minus Y X) is the same as (minus X Y). If the
1070 rounding mode is towards +infinity (or -infinity) then the two
1071 expressions will be rounded differently. */
1072 if (GET_CODE (op
) == MINUS
1073 && !HONOR_SIGNED_ZEROS (mode
)
1074 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1075 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1077 if (GET_CODE (op
) == PLUS
1078 && !HONOR_SIGNED_ZEROS (mode
)
1079 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1081 /* (neg (plus A C)) is simplified to (minus -C A). */
1082 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1083 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1085 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1087 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1090 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1091 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1092 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1095 /* (neg (mult A B)) becomes (mult A (neg B)).
1096 This works even for floating-point values. */
1097 if (GET_CODE (op
) == MULT
1098 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1100 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1101 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1104 /* NEG commutes with ASHIFT since it is multiplication. Only do
1105 this if we can then eliminate the NEG (e.g., if the operand
1107 if (GET_CODE (op
) == ASHIFT
)
1109 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1111 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1114 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1115 C is equal to the width of MODE minus 1. */
1116 if (GET_CODE (op
) == ASHIFTRT
1117 && CONST_INT_P (XEXP (op
, 1))
1118 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1119 return simplify_gen_binary (LSHIFTRT
, mode
,
1120 XEXP (op
, 0), XEXP (op
, 1));
1122 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1123 C is equal to the width of MODE minus 1. */
1124 if (GET_CODE (op
) == LSHIFTRT
1125 && CONST_INT_P (XEXP (op
, 1))
1126 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1127 return simplify_gen_binary (ASHIFTRT
, mode
,
1128 XEXP (op
, 0), XEXP (op
, 1));
1130 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1131 if (GET_CODE (op
) == XOR
1132 && XEXP (op
, 1) == const1_rtx
1133 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1134 return plus_constant (mode
, XEXP (op
, 0), -1);
1136 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1137 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1138 if (GET_CODE (op
) == LT
1139 && XEXP (op
, 1) == const0_rtx
1140 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1142 int_mode
= as_a
<scalar_int_mode
> (mode
);
1143 int isize
= GET_MODE_PRECISION (inner
);
1144 if (STORE_FLAG_VALUE
== 1)
1146 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1147 gen_int_shift_amount (inner
,
1149 if (int_mode
== inner
)
1151 if (GET_MODE_PRECISION (int_mode
) > isize
)
1152 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1153 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1155 else if (STORE_FLAG_VALUE
== -1)
1157 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1158 gen_int_shift_amount (inner
,
1160 if (int_mode
== inner
)
1162 if (GET_MODE_PRECISION (int_mode
) > isize
)
1163 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1164 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1168 if (vec_series_p (op
, &base
, &step
))
1170 /* Only create a new series if we can simplify both parts. In other
1171 cases this isn't really a simplification, and it's not necessarily
1172 a win to replace a vector operation with a scalar operation. */
1173 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1174 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1177 step
= simplify_unary_operation (NEG
, inner_mode
,
1180 return gen_vec_series (mode
, base
, step
);
1186 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1187 with the umulXi3_highpart patterns. */
1188 if (GET_CODE (op
) == LSHIFTRT
1189 && GET_CODE (XEXP (op
, 0)) == MULT
)
1192 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1194 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1196 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1200 /* We can't handle truncation to a partial integer mode here
1201 because we don't know the real bitsize of the partial
1206 if (GET_MODE (op
) != VOIDmode
)
1208 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1213 /* If we know that the value is already truncated, we can
1214 replace the TRUNCATE with a SUBREG. */
1215 if (known_eq (GET_MODE_NUNITS (mode
), 1)
1216 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1217 || truncated_to_mode (mode
, op
)))
1219 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1224 /* A truncate of a comparison can be replaced with a subreg if
1225 STORE_FLAG_VALUE permits. This is like the previous test,
1226 but it works even if the comparison is done in a mode larger
1227 than HOST_BITS_PER_WIDE_INT. */
1228 if (HWI_COMPUTABLE_MODE_P (mode
)
1229 && COMPARISON_P (op
)
1230 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1232 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1237 /* A truncate of a memory is just loading the low part of the memory
1238 if we are not changing the meaning of the address. */
1239 if (GET_CODE (op
) == MEM
1240 && !VECTOR_MODE_P (mode
)
1241 && !MEM_VOLATILE_P (op
)
1242 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1244 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1251 case FLOAT_TRUNCATE
:
1252 if (DECIMAL_FLOAT_MODE_P (mode
))
1255 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1256 if (GET_CODE (op
) == FLOAT_EXTEND
1257 && GET_MODE (XEXP (op
, 0)) == mode
)
1258 return XEXP (op
, 0);
1260 /* (float_truncate:SF (float_truncate:DF foo:XF))
1261 = (float_truncate:SF foo:XF).
1262 This may eliminate double rounding, so it is unsafe.
1264 (float_truncate:SF (float_extend:XF foo:DF))
1265 = (float_truncate:SF foo:DF).
1267 (float_truncate:DF (float_extend:XF foo:SF))
1268 = (float_extend:DF foo:SF). */
1269 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1270 && flag_unsafe_math_optimizations
)
1271 || GET_CODE (op
) == FLOAT_EXTEND
)
1272 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1273 > GET_MODE_UNIT_SIZE (mode
)
1274 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1276 XEXP (op
, 0), mode
);
1278 /* (float_truncate (float x)) is (float x) */
1279 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1280 && (flag_unsafe_math_optimizations
1281 || exact_int_to_float_conversion_p (op
)))
1282 return simplify_gen_unary (GET_CODE (op
), mode
,
1284 GET_MODE (XEXP (op
, 0)));
1286 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1287 (OP:SF foo:SF) if OP is NEG or ABS. */
1288 if ((GET_CODE (op
) == ABS
1289 || GET_CODE (op
) == NEG
)
1290 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1291 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1292 return simplify_gen_unary (GET_CODE (op
), mode
,
1293 XEXP (XEXP (op
, 0), 0), mode
);
1295 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1296 is (float_truncate:SF x). */
1297 if (GET_CODE (op
) == SUBREG
1298 && subreg_lowpart_p (op
)
1299 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1300 return SUBREG_REG (op
);
1304 if (DECIMAL_FLOAT_MODE_P (mode
))
1307 /* (float_extend (float_extend x)) is (float_extend x)
1309 (float_extend (float x)) is (float x) assuming that double
1310 rounding can't happen.
1312 if (GET_CODE (op
) == FLOAT_EXTEND
1313 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1314 && exact_int_to_float_conversion_p (op
)))
1315 return simplify_gen_unary (GET_CODE (op
), mode
,
1317 GET_MODE (XEXP (op
, 0)));
1322 /* (abs (neg <foo>)) -> (abs <foo>) */
1323 if (GET_CODE (op
) == NEG
)
1324 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1325 GET_MODE (XEXP (op
, 0)));
1327 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1329 if (GET_MODE (op
) == VOIDmode
)
1332 /* If operand is something known to be positive, ignore the ABS. */
1333 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1334 || val_signbit_known_clear_p (GET_MODE (op
),
1335 nonzero_bits (op
, GET_MODE (op
))))
1338 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1339 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1340 && (num_sign_bit_copies (op
, int_mode
)
1341 == GET_MODE_PRECISION (int_mode
)))
1342 return gen_rtx_NEG (int_mode
, op
);
1347 /* (ffs (*_extend <X>)) = (ffs <X>) */
1348 if (GET_CODE (op
) == SIGN_EXTEND
1349 || GET_CODE (op
) == ZERO_EXTEND
)
1350 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1351 GET_MODE (XEXP (op
, 0)));
1355 switch (GET_CODE (op
))
1359 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1360 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1361 GET_MODE (XEXP (op
, 0)));
1365 /* Rotations don't affect popcount. */
1366 if (!side_effects_p (XEXP (op
, 1)))
1367 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1368 GET_MODE (XEXP (op
, 0)));
1377 switch (GET_CODE (op
))
1383 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1384 GET_MODE (XEXP (op
, 0)));
1388 /* Rotations don't affect parity. */
1389 if (!side_effects_p (XEXP (op
, 1)))
1390 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1391 GET_MODE (XEXP (op
, 0)));
1400 /* (bswap (bswap x)) -> x. */
1401 if (GET_CODE (op
) == BSWAP
)
1402 return XEXP (op
, 0);
1406 /* (float (sign_extend <X>)) = (float <X>). */
1407 if (GET_CODE (op
) == SIGN_EXTEND
)
1408 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1409 GET_MODE (XEXP (op
, 0)));
1413 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1414 becomes just the MINUS if its mode is MODE. This allows
1415 folding switch statements on machines using casesi (such as
1417 if (GET_CODE (op
) == TRUNCATE
1418 && GET_MODE (XEXP (op
, 0)) == mode
1419 && GET_CODE (XEXP (op
, 0)) == MINUS
1420 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1421 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1422 return XEXP (op
, 0);
1424 /* Extending a widening multiplication should be canonicalized to
1425 a wider widening multiplication. */
1426 if (GET_CODE (op
) == MULT
)
1428 rtx lhs
= XEXP (op
, 0);
1429 rtx rhs
= XEXP (op
, 1);
1430 enum rtx_code lcode
= GET_CODE (lhs
);
1431 enum rtx_code rcode
= GET_CODE (rhs
);
1433 /* Widening multiplies usually extend both operands, but sometimes
1434 they use a shift to extract a portion of a register. */
1435 if ((lcode
== SIGN_EXTEND
1436 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1437 && (rcode
== SIGN_EXTEND
1438 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1440 machine_mode lmode
= GET_MODE (lhs
);
1441 machine_mode rmode
= GET_MODE (rhs
);
1444 if (lcode
== ASHIFTRT
)
1445 /* Number of bits not shifted off the end. */
1446 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1447 - INTVAL (XEXP (lhs
, 1)));
1448 else /* lcode == SIGN_EXTEND */
1449 /* Size of inner mode. */
1450 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1452 if (rcode
== ASHIFTRT
)
1453 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1454 - INTVAL (XEXP (rhs
, 1)));
1455 else /* rcode == SIGN_EXTEND */
1456 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1458 /* We can only widen multiplies if the result is mathematiclly
1459 equivalent. I.e. if overflow was impossible. */
1460 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1461 return simplify_gen_binary
1463 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1464 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1468 /* Check for a sign extension of a subreg of a promoted
1469 variable, where the promotion is sign-extended, and the
1470 target mode is the same as the variable's promotion. */
1471 if (GET_CODE (op
) == SUBREG
1472 && SUBREG_PROMOTED_VAR_P (op
)
1473 && SUBREG_PROMOTED_SIGNED_P (op
)
1474 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1476 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1481 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1482 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1483 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1485 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1486 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1487 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1488 GET_MODE (XEXP (op
, 0)));
1491 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1492 is (sign_extend:M (subreg:O <X>)) if there is mode with
1493 GET_MODE_BITSIZE (N) - I bits.
1494 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1495 is similarly (zero_extend:M (subreg:O <X>)). */
1496 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1497 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1498 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1499 && CONST_INT_P (XEXP (op
, 1))
1500 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1501 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1502 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1504 scalar_int_mode tmode
;
1505 gcc_assert (GET_MODE_PRECISION (int_mode
)
1506 > GET_MODE_PRECISION (op_mode
));
1507 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1508 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1511 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1513 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1514 ? SIGN_EXTEND
: ZERO_EXTEND
,
1515 int_mode
, inner
, tmode
);
1519 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1520 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1521 if (GET_CODE (op
) == LSHIFTRT
1522 && CONST_INT_P (XEXP (op
, 1))
1523 && XEXP (op
, 1) != const0_rtx
)
1524 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1526 #if defined(POINTERS_EXTEND_UNSIGNED)
1527 /* As we do not know which address space the pointer is referring to,
1528 we can do this only if the target does not support different pointer
1529 or address modes depending on the address space. */
1530 if (target_default_pointer_address_modes_p ()
1531 && ! POINTERS_EXTEND_UNSIGNED
1532 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1534 || (GET_CODE (op
) == SUBREG
1535 && REG_P (SUBREG_REG (op
))
1536 && REG_POINTER (SUBREG_REG (op
))
1537 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1538 && !targetm
.have_ptr_extend ())
1541 = convert_memory_address_addr_space_1 (Pmode
, op
,
1542 ADDR_SPACE_GENERIC
, false,
1551 /* Check for a zero extension of a subreg of a promoted
1552 variable, where the promotion is zero-extended, and the
1553 target mode is the same as the variable's promotion. */
1554 if (GET_CODE (op
) == SUBREG
1555 && SUBREG_PROMOTED_VAR_P (op
)
1556 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1557 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1559 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1564 /* Extending a widening multiplication should be canonicalized to
1565 a wider widening multiplication. */
1566 if (GET_CODE (op
) == MULT
)
1568 rtx lhs
= XEXP (op
, 0);
1569 rtx rhs
= XEXP (op
, 1);
1570 enum rtx_code lcode
= GET_CODE (lhs
);
1571 enum rtx_code rcode
= GET_CODE (rhs
);
1573 /* Widening multiplies usually extend both operands, but sometimes
1574 they use a shift to extract a portion of a register. */
1575 if ((lcode
== ZERO_EXTEND
1576 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1577 && (rcode
== ZERO_EXTEND
1578 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1580 machine_mode lmode
= GET_MODE (lhs
);
1581 machine_mode rmode
= GET_MODE (rhs
);
1584 if (lcode
== LSHIFTRT
)
1585 /* Number of bits not shifted off the end. */
1586 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1587 - INTVAL (XEXP (lhs
, 1)));
1588 else /* lcode == ZERO_EXTEND */
1589 /* Size of inner mode. */
1590 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1592 if (rcode
== LSHIFTRT
)
1593 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1594 - INTVAL (XEXP (rhs
, 1)));
1595 else /* rcode == ZERO_EXTEND */
1596 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1598 /* We can only widen multiplies if the result is mathematiclly
1599 equivalent. I.e. if overflow was impossible. */
1600 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1601 return simplify_gen_binary
1603 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1604 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1608 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1609 if (GET_CODE (op
) == ZERO_EXTEND
)
1610 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1611 GET_MODE (XEXP (op
, 0)));
1613 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1614 is (zero_extend:M (subreg:O <X>)) if there is mode with
1615 GET_MODE_PRECISION (N) - I bits. */
1616 if (GET_CODE (op
) == LSHIFTRT
1617 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1618 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1619 && CONST_INT_P (XEXP (op
, 1))
1620 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1621 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1622 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1624 scalar_int_mode tmode
;
1625 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1626 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1629 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1631 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1636 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1637 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1639 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1640 (and:SI (reg:SI) (const_int 63)). */
1641 if (partial_subreg_p (op
)
1642 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1643 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1644 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1645 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1646 && subreg_lowpart_p (op
)
1647 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1648 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1650 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1651 return SUBREG_REG (op
);
1652 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1656 #if defined(POINTERS_EXTEND_UNSIGNED)
1657 /* As we do not know which address space the pointer is referring to,
1658 we can do this only if the target does not support different pointer
1659 or address modes depending on the address space. */
1660 if (target_default_pointer_address_modes_p ()
1661 && POINTERS_EXTEND_UNSIGNED
> 0
1662 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1664 || (GET_CODE (op
) == SUBREG
1665 && REG_P (SUBREG_REG (op
))
1666 && REG_POINTER (SUBREG_REG (op
))
1667 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1668 && !targetm
.have_ptr_extend ())
1671 = convert_memory_address_addr_space_1 (Pmode
, op
,
1672 ADDR_SPACE_GENERIC
, false,
1684 if (VECTOR_MODE_P (mode
)
1685 && vec_duplicate_p (op
, &elt
)
1686 && code
!= VEC_DUPLICATE
)
1688 /* Try applying the operator to ELT and see if that simplifies.
1689 We can duplicate the result if so.
1691 The reason we don't use simplify_gen_unary is that it isn't
1692 necessarily a win to convert things like:
1694 (neg:V (vec_duplicate:V (reg:S R)))
1698 (vec_duplicate:V (neg:S (reg:S R)))
1700 The first might be done entirely in vector registers while the
1701 second might need a move between register files. */
1702 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1703 elt
, GET_MODE_INNER (GET_MODE (op
)));
1705 return gen_vec_duplicate (mode
, temp
);
1711 /* Try to compute the value of a unary operation CODE whose output mode is to
1712 be MODE with input operand OP whose mode was originally OP_MODE.
1713 Return zero if the value cannot be computed. */
1715 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1716 rtx op
, machine_mode op_mode
)
1718 scalar_int_mode result_mode
;
1720 if (code
== VEC_DUPLICATE
)
1722 gcc_assert (VECTOR_MODE_P (mode
));
1723 if (GET_MODE (op
) != VOIDmode
)
1725 if (!VECTOR_MODE_P (GET_MODE (op
)))
1726 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1728 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1731 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1732 return gen_const_vec_duplicate (mode
, op
);
1733 if (GET_CODE (op
) == CONST_VECTOR
1734 && (CONST_VECTOR_DUPLICATE_P (op
)
1735 || CONST_VECTOR_NUNITS (op
).is_constant ()))
1737 unsigned int npatterns
= (CONST_VECTOR_DUPLICATE_P (op
)
1738 ? CONST_VECTOR_NPATTERNS (op
)
1739 : CONST_VECTOR_NUNITS (op
).to_constant ());
1740 gcc_assert (multiple_p (GET_MODE_NUNITS (mode
), npatterns
));
1741 rtx_vector_builder
builder (mode
, npatterns
, 1);
1742 for (unsigned i
= 0; i
< npatterns
; i
++)
1743 builder
.quick_push (CONST_VECTOR_ELT (op
, i
));
1744 return builder
.build ();
1748 if (VECTOR_MODE_P (mode
)
1749 && GET_CODE (op
) == CONST_VECTOR
1750 && known_eq (GET_MODE_NUNITS (mode
), CONST_VECTOR_NUNITS (op
)))
1752 gcc_assert (GET_MODE (op
) == op_mode
);
1754 rtx_vector_builder builder
;
1755 if (!builder
.new_unary_operation (mode
, op
, false))
1758 unsigned int count
= builder
.encoded_nelts ();
1759 for (unsigned int i
= 0; i
< count
; i
++)
1761 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1762 CONST_VECTOR_ELT (op
, i
),
1763 GET_MODE_INNER (op_mode
));
1764 if (!x
|| !valid_for_const_vector_p (mode
, x
))
1766 builder
.quick_push (x
);
1768 return builder
.build ();
1771 /* The order of these tests is critical so that, for example, we don't
1772 check the wrong mode (input vs. output) for a conversion operation,
1773 such as FIX. At some point, this should be simplified. */
1775 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1779 if (op_mode
== VOIDmode
)
1781 /* CONST_INT have VOIDmode as the mode. We assume that all
1782 the bits of the constant are significant, though, this is
1783 a dangerous assumption as many times CONST_INTs are
1784 created and used with garbage in the bits outside of the
1785 precision of the implied mode of the const_int. */
1786 op_mode
= MAX_MODE_INT
;
1789 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1791 /* Avoid the folding if flag_signaling_nans is on and
1792 operand is a signaling NaN. */
1793 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1796 d
= real_value_truncate (mode
, d
);
1797 return const_double_from_real_value (d
, mode
);
1799 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1803 if (op_mode
== VOIDmode
)
1805 /* CONST_INT have VOIDmode as the mode. We assume that all
1806 the bits of the constant are significant, though, this is
1807 a dangerous assumption as many times CONST_INTs are
1808 created and used with garbage in the bits outside of the
1809 precision of the implied mode of the const_int. */
1810 op_mode
= MAX_MODE_INT
;
1813 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1815 /* Avoid the folding if flag_signaling_nans is on and
1816 operand is a signaling NaN. */
1817 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1820 d
= real_value_truncate (mode
, d
);
1821 return const_double_from_real_value (d
, mode
);
1824 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1826 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1827 if (width
> MAX_BITSIZE_MODE_ANY_INT
)
1831 scalar_int_mode imode
= (op_mode
== VOIDmode
1833 : as_a
<scalar_int_mode
> (op_mode
));
1834 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1837 #if TARGET_SUPPORTS_WIDE_INT == 0
1838 /* This assert keeps the simplification from producing a result
1839 that cannot be represented in a CONST_DOUBLE but a lot of
1840 upstream callers expect that this function never fails to
1841 simplify something and so you if you added this to the test
1842 above the code would die later anyway. If this assert
1843 happens, you just need to make the port support wide int. */
1844 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1850 result
= wi::bit_not (op0
);
1854 result
= wi::neg (op0
);
1858 result
= wi::abs (op0
);
1862 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1866 if (wi::ne_p (op0
, 0))
1867 int_value
= wi::clz (op0
);
1868 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1870 result
= wi::shwi (int_value
, result_mode
);
1874 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
1878 if (wi::ne_p (op0
, 0))
1879 int_value
= wi::ctz (op0
);
1880 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1882 result
= wi::shwi (int_value
, result_mode
);
1886 result
= wi::shwi (wi::popcount (op0
), result_mode
);
1890 result
= wi::shwi (wi::parity (op0
), result_mode
);
1894 result
= wide_int (op0
).bswap ();
1899 result
= wide_int::from (op0
, width
, UNSIGNED
);
1903 result
= wide_int::from (op0
, width
, SIGNED
);
1911 return immed_wide_int_const (result
, result_mode
);
1914 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1915 && SCALAR_FLOAT_MODE_P (mode
)
1916 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1918 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1924 d
= real_value_abs (&d
);
1927 d
= real_value_negate (&d
);
1929 case FLOAT_TRUNCATE
:
1930 /* Don't perform the operation if flag_signaling_nans is on
1931 and the operand is a signaling NaN. */
1932 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1934 d
= real_value_truncate (mode
, d
);
1937 /* Don't perform the operation if flag_signaling_nans is on
1938 and the operand is a signaling NaN. */
1939 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1941 /* All this does is change the mode, unless changing
1943 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1944 real_convert (&d
, mode
, &d
);
1947 /* Don't perform the operation if flag_signaling_nans is on
1948 and the operand is a signaling NaN. */
1949 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1951 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1958 real_to_target (tmp
, &d
, GET_MODE (op
));
1959 for (i
= 0; i
< 4; i
++)
1961 real_from_target (&d
, tmp
, mode
);
1967 return const_double_from_real_value (d
, mode
);
1969 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1970 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1971 && is_int_mode (mode
, &result_mode
))
1973 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1974 if (width
> MAX_BITSIZE_MODE_ANY_INT
)
1977 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1978 operators are intentionally left unspecified (to ease implementation
1979 by target backends), for consistency, this routine implements the
1980 same semantics for constant folding as used by the middle-end. */
1982 /* This was formerly used only for non-IEEE float.
1983 eggert@twinsun.com says it is safe for IEEE also. */
1985 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1986 wide_int wmax
, wmin
;
1987 /* This is part of the abi to real_to_integer, but we check
1988 things before making this call. */
1994 if (REAL_VALUE_ISNAN (*x
))
1997 /* Test against the signed upper bound. */
1998 wmax
= wi::max_value (width
, SIGNED
);
1999 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
2000 if (real_less (&t
, x
))
2001 return immed_wide_int_const (wmax
, mode
);
2003 /* Test against the signed lower bound. */
2004 wmin
= wi::min_value (width
, SIGNED
);
2005 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2006 if (real_less (x
, &t
))
2007 return immed_wide_int_const (wmin
, mode
);
2009 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2013 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2016 /* Test against the unsigned upper bound. */
2017 wmax
= wi::max_value (width
, UNSIGNED
);
2018 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2019 if (real_less (&t
, x
))
2020 return immed_wide_int_const (wmax
, mode
);
2022 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2030 /* Handle polynomial integers. */
2031 else if (CONST_POLY_INT_P (op
))
2033 poly_wide_int result
;
2037 result
= -const_poly_int_value (op
);
2041 result
= ~const_poly_int_value (op
);
2047 return immed_wide_int_const (result
, mode
);
2053 /* Subroutine of simplify_binary_operation to simplify a binary operation
2054 CODE that can commute with byte swapping, with result mode MODE and
2055 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2056 Return zero if no simplification or canonicalization is possible. */
2059 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2064 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2065 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2067 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2068 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2069 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2072 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2073 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2075 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2076 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2082 /* Subroutine of simplify_binary_operation to simplify a commutative,
2083 associative binary operation CODE with result mode MODE, operating
2084 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2085 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2086 canonicalization is possible. */
2089 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2094 /* Linearize the operator to the left. */
2095 if (GET_CODE (op1
) == code
)
2097 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2098 if (GET_CODE (op0
) == code
)
2100 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2101 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2104 /* "a op (b op c)" becomes "(b op c) op a". */
2105 if (! swap_commutative_operands_p (op1
, op0
))
2106 return simplify_gen_binary (code
, mode
, op1
, op0
);
2108 std::swap (op0
, op1
);
2111 if (GET_CODE (op0
) == code
)
2113 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2114 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2116 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2117 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2120 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2121 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2123 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2125 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2126 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2128 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2134 /* Return a mask describing the COMPARISON. */
2136 comparison_to_mask (enum rtx_code comparison
)
2176 /* Return a comparison corresponding to the MASK. */
2177 static enum rtx_code
2178 mask_to_comparison (int mask
)
2218 /* Return true if CODE is valid for comparisons of mode MODE, false
2221 It is always safe to return false, even if the code was valid for the
2222 given mode as that will merely suppress optimizations. */
2225 comparison_code_valid_for_mode (enum rtx_code code
, enum machine_mode mode
)
2229 /* These are valid for integral, floating and vector modes. */
2236 return (INTEGRAL_MODE_P (mode
)
2237 || FLOAT_MODE_P (mode
)
2238 || VECTOR_MODE_P (mode
));
2240 /* These are valid for floating point modes. */
2249 return FLOAT_MODE_P (mode
);
2251 /* These are filtered out in simplify_logical_operation, but
2252 we check for them too as a matter of safety. They are valid
2253 for integral and vector modes. */
2258 return INTEGRAL_MODE_P (mode
) || VECTOR_MODE_P (mode
);
2265 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2266 and OP1, which should be both relational operations. Return 0 if no such
2267 simplification is possible. */
2269 simplify_logical_relational_operation (enum rtx_code code
, machine_mode mode
,
2272 /* We only handle IOR of two relational operations. */
2276 if (!(COMPARISON_P (op0
) && COMPARISON_P (op1
)))
2279 if (!(rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2280 && rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 1))))
2283 enum rtx_code code0
= GET_CODE (op0
);
2284 enum rtx_code code1
= GET_CODE (op1
);
2286 /* We don't handle unsigned comparisons currently. */
2287 if (code0
== LTU
|| code0
== GTU
|| code0
== LEU
|| code0
== GEU
)
2289 if (code1
== LTU
|| code1
== GTU
|| code1
== LEU
|| code1
== GEU
)
2292 int mask0
= comparison_to_mask (code0
);
2293 int mask1
= comparison_to_mask (code1
);
2295 int mask
= mask0
| mask1
;
2298 return const_true_rtx
;
2300 code
= mask_to_comparison (mask
);
2302 /* Many comparison codes are only valid for certain mode classes. */
2303 if (!comparison_code_valid_for_mode (code
, mode
))
2306 op0
= XEXP (op1
, 0);
2307 op1
= XEXP (op1
, 1);
2309 return simplify_gen_relational (code
, mode
, VOIDmode
, op0
, op1
);
2312 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2313 and OP1. Return 0 if no simplification is possible.
2315 Don't use this for relational operations such as EQ or LT.
2316 Use simplify_relational_operation instead. */
2318 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2321 rtx trueop0
, trueop1
;
2324 /* Relational operations don't work here. We must know the mode
2325 of the operands in order to do the comparison correctly.
2326 Assuming a full word can give incorrect results.
2327 Consider comparing 128 with -128 in QImode. */
2328 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2329 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2331 /* Make sure the constant is second. */
2332 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2333 && swap_commutative_operands_p (op0
, op1
))
2334 std::swap (op0
, op1
);
2336 trueop0
= avoid_constant_pool_reference (op0
);
2337 trueop1
= avoid_constant_pool_reference (op1
);
2339 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2342 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2347 /* If the above steps did not result in a simplification and op0 or op1
2348 were constant pool references, use the referenced constants directly. */
2349 if (trueop0
!= op0
|| trueop1
!= op1
)
2350 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2355 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2356 which OP0 and OP1 are both vector series or vector duplicates
2357 (which are really just series with a step of 0). If so, try to
2358 form a new series by applying CODE to the bases and to the steps.
2359 Return null if no simplification is possible.
2361 MODE is the mode of the operation and is known to be a vector
2365 simplify_binary_operation_series (rtx_code code
, machine_mode mode
,
2369 if (vec_duplicate_p (op0
, &base0
))
2371 else if (!vec_series_p (op0
, &base0
, &step0
))
2375 if (vec_duplicate_p (op1
, &base1
))
2377 else if (!vec_series_p (op1
, &base1
, &step1
))
2380 /* Only create a new series if we can simplify both parts. In other
2381 cases this isn't really a simplification, and it's not necessarily
2382 a win to replace a vector operation with a scalar operation. */
2383 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2384 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2388 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2392 return gen_vec_series (mode
, new_base
, new_step
);
2395 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2396 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2397 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2398 actual constants. */
2401 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2402 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2404 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2406 scalar_int_mode int_mode
, inner_mode
;
2409 /* Even if we can't compute a constant result,
2410 there are some cases worth simplifying. */
2415 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2416 when x is NaN, infinite, or finite and nonzero. They aren't
2417 when x is -0 and the rounding mode is not towards -infinity,
2418 since (-0) + 0 is then 0. */
2419 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2422 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2423 transformations are safe even for IEEE. */
2424 if (GET_CODE (op0
) == NEG
)
2425 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2426 else if (GET_CODE (op1
) == NEG
)
2427 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2429 /* (~a) + 1 -> -a */
2430 if (INTEGRAL_MODE_P (mode
)
2431 && GET_CODE (op0
) == NOT
2432 && trueop1
== const1_rtx
)
2433 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2435 /* Handle both-operands-constant cases. We can only add
2436 CONST_INTs to constants since the sum of relocatable symbols
2437 can't be handled by most assemblers. Don't add CONST_INT
2438 to CONST_INT since overflow won't be computed properly if wider
2439 than HOST_BITS_PER_WIDE_INT. */
2441 if ((GET_CODE (op0
) == CONST
2442 || GET_CODE (op0
) == SYMBOL_REF
2443 || GET_CODE (op0
) == LABEL_REF
)
2444 && poly_int_rtx_p (op1
, &offset
))
2445 return plus_constant (mode
, op0
, offset
);
2446 else if ((GET_CODE (op1
) == CONST
2447 || GET_CODE (op1
) == SYMBOL_REF
2448 || GET_CODE (op1
) == LABEL_REF
)
2449 && poly_int_rtx_p (op0
, &offset
))
2450 return plus_constant (mode
, op1
, offset
);
2452 /* See if this is something like X * C - X or vice versa or
2453 if the multiplication is written as a shift. If so, we can
2454 distribute and make a new multiply, shift, or maybe just
2455 have X (if C is 2 in the example above). But don't make
2456 something more expensive than we had before. */
2458 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2460 rtx lhs
= op0
, rhs
= op1
;
2462 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2463 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2465 if (GET_CODE (lhs
) == NEG
)
2467 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2468 lhs
= XEXP (lhs
, 0);
2470 else if (GET_CODE (lhs
) == MULT
2471 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2473 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2474 lhs
= XEXP (lhs
, 0);
2476 else if (GET_CODE (lhs
) == ASHIFT
2477 && CONST_INT_P (XEXP (lhs
, 1))
2478 && INTVAL (XEXP (lhs
, 1)) >= 0
2479 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2481 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2482 GET_MODE_PRECISION (int_mode
));
2483 lhs
= XEXP (lhs
, 0);
2486 if (GET_CODE (rhs
) == NEG
)
2488 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2489 rhs
= XEXP (rhs
, 0);
2491 else if (GET_CODE (rhs
) == MULT
2492 && CONST_INT_P (XEXP (rhs
, 1)))
2494 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2495 rhs
= XEXP (rhs
, 0);
2497 else if (GET_CODE (rhs
) == ASHIFT
2498 && CONST_INT_P (XEXP (rhs
, 1))
2499 && INTVAL (XEXP (rhs
, 1)) >= 0
2500 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2502 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2503 GET_MODE_PRECISION (int_mode
));
2504 rhs
= XEXP (rhs
, 0);
2507 if (rtx_equal_p (lhs
, rhs
))
2509 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2511 bool speed
= optimize_function_for_speed_p (cfun
);
2513 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2515 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2516 return (set_src_cost (tem
, int_mode
, speed
)
2517 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2521 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2522 if (CONST_SCALAR_INT_P (op1
)
2523 && GET_CODE (op0
) == XOR
2524 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2525 && mode_signbit_p (mode
, op1
))
2526 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2527 simplify_gen_binary (XOR
, mode
, op1
,
2530 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2531 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2532 && GET_CODE (op0
) == MULT
2533 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2537 in1
= XEXP (XEXP (op0
, 0), 0);
2538 in2
= XEXP (op0
, 1);
2539 return simplify_gen_binary (MINUS
, mode
, op1
,
2540 simplify_gen_binary (MULT
, mode
,
2544 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2545 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2547 if (COMPARISON_P (op0
)
2548 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2549 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2550 && (reversed
= reversed_comparison (op0
, mode
)))
2552 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2554 /* If one of the operands is a PLUS or a MINUS, see if we can
2555 simplify this by the associative law.
2556 Don't use the associative law for floating point.
2557 The inaccuracy makes it nonassociative,
2558 and subtle programs can break if operations are associated. */
2560 if (INTEGRAL_MODE_P (mode
)
2561 && (plus_minus_operand_p (op0
)
2562 || plus_minus_operand_p (op1
))
2563 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2566 /* Reassociate floating point addition only when the user
2567 specifies associative math operations. */
2568 if (FLOAT_MODE_P (mode
)
2569 && flag_associative_math
)
2571 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2576 /* Handle vector series. */
2577 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2579 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2586 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2587 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2588 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2589 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2591 rtx xop00
= XEXP (op0
, 0);
2592 rtx xop10
= XEXP (op1
, 0);
2594 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2597 if (REG_P (xop00
) && REG_P (xop10
)
2598 && REGNO (xop00
) == REGNO (xop10
)
2599 && GET_MODE (xop00
) == mode
2600 && GET_MODE (xop10
) == mode
2601 && GET_MODE_CLASS (mode
) == MODE_CC
)
2607 /* We can't assume x-x is 0 even with non-IEEE floating point,
2608 but since it is zero except in very strange circumstances, we
2609 will treat it as zero with -ffinite-math-only. */
2610 if (rtx_equal_p (trueop0
, trueop1
)
2611 && ! side_effects_p (op0
)
2612 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2613 return CONST0_RTX (mode
);
2615 /* Change subtraction from zero into negation. (0 - x) is the
2616 same as -x when x is NaN, infinite, or finite and nonzero.
2617 But if the mode has signed zeros, and does not round towards
2618 -infinity, then 0 - 0 is 0, not -0. */
2619 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2620 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2622 /* (-1 - a) is ~a, unless the expression contains symbolic
2623 constants, in which case not retaining additions and
2624 subtractions could cause invalid assembly to be produced. */
2625 if (trueop0
== constm1_rtx
2626 && !contains_symbolic_reference_p (op1
))
2627 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2629 /* Subtracting 0 has no effect unless the mode has signed zeros
2630 and supports rounding towards -infinity. In such a case,
2632 if (!(HONOR_SIGNED_ZEROS (mode
)
2633 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2634 && trueop1
== CONST0_RTX (mode
))
2637 /* See if this is something like X * C - X or vice versa or
2638 if the multiplication is written as a shift. If so, we can
2639 distribute and make a new multiply, shift, or maybe just
2640 have X (if C is 2 in the example above). But don't make
2641 something more expensive than we had before. */
2643 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2645 rtx lhs
= op0
, rhs
= op1
;
2647 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2648 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2650 if (GET_CODE (lhs
) == NEG
)
2652 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2653 lhs
= XEXP (lhs
, 0);
2655 else if (GET_CODE (lhs
) == MULT
2656 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2658 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2659 lhs
= XEXP (lhs
, 0);
2661 else if (GET_CODE (lhs
) == ASHIFT
2662 && CONST_INT_P (XEXP (lhs
, 1))
2663 && INTVAL (XEXP (lhs
, 1)) >= 0
2664 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2666 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2667 GET_MODE_PRECISION (int_mode
));
2668 lhs
= XEXP (lhs
, 0);
2671 if (GET_CODE (rhs
) == NEG
)
2673 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2674 rhs
= XEXP (rhs
, 0);
2676 else if (GET_CODE (rhs
) == MULT
2677 && CONST_INT_P (XEXP (rhs
, 1)))
2679 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2680 rhs
= XEXP (rhs
, 0);
2682 else if (GET_CODE (rhs
) == ASHIFT
2683 && CONST_INT_P (XEXP (rhs
, 1))
2684 && INTVAL (XEXP (rhs
, 1)) >= 0
2685 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2687 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2688 GET_MODE_PRECISION (int_mode
));
2689 negcoeff1
= -negcoeff1
;
2690 rhs
= XEXP (rhs
, 0);
2693 if (rtx_equal_p (lhs
, rhs
))
2695 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2697 bool speed
= optimize_function_for_speed_p (cfun
);
2699 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2701 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2702 return (set_src_cost (tem
, int_mode
, speed
)
2703 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2707 /* (a - (-b)) -> (a + b). True even for IEEE. */
2708 if (GET_CODE (op1
) == NEG
)
2709 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2711 /* (-x - c) may be simplified as (-c - x). */
2712 if (GET_CODE (op0
) == NEG
2713 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2715 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2717 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2720 if ((GET_CODE (op0
) == CONST
2721 || GET_CODE (op0
) == SYMBOL_REF
2722 || GET_CODE (op0
) == LABEL_REF
)
2723 && poly_int_rtx_p (op1
, &offset
))
2724 return plus_constant (mode
, op0
, trunc_int_for_mode (-offset
, mode
));
2726 /* Don't let a relocatable value get a negative coeff. */
2727 if (poly_int_rtx_p (op1
) && GET_MODE (op0
) != VOIDmode
)
2728 return simplify_gen_binary (PLUS
, mode
,
2730 neg_poly_int_rtx (mode
, op1
));
2732 /* (x - (x & y)) -> (x & ~y) */
2733 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2735 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2737 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2738 GET_MODE (XEXP (op1
, 1)));
2739 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2741 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2743 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2744 GET_MODE (XEXP (op1
, 0)));
2745 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2749 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2750 by reversing the comparison code if valid. */
2751 if (STORE_FLAG_VALUE
== 1
2752 && trueop0
== const1_rtx
2753 && COMPARISON_P (op1
)
2754 && (reversed
= reversed_comparison (op1
, mode
)))
2757 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2758 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2759 && GET_CODE (op1
) == MULT
2760 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2764 in1
= XEXP (XEXP (op1
, 0), 0);
2765 in2
= XEXP (op1
, 1);
2766 return simplify_gen_binary (PLUS
, mode
,
2767 simplify_gen_binary (MULT
, mode
,
2772 /* Canonicalize (minus (neg A) (mult B C)) to
2773 (minus (mult (neg B) C) A). */
2774 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2775 && GET_CODE (op1
) == MULT
2776 && GET_CODE (op0
) == NEG
)
2780 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2781 in2
= XEXP (op1
, 1);
2782 return simplify_gen_binary (MINUS
, mode
,
2783 simplify_gen_binary (MULT
, mode
,
2788 /* If one of the operands is a PLUS or a MINUS, see if we can
2789 simplify this by the associative law. This will, for example,
2790 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2791 Don't use the associative law for floating point.
2792 The inaccuracy makes it nonassociative,
2793 and subtle programs can break if operations are associated. */
2795 if (INTEGRAL_MODE_P (mode
)
2796 && (plus_minus_operand_p (op0
)
2797 || plus_minus_operand_p (op1
))
2798 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2801 /* Handle vector series. */
2802 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2804 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2811 if (trueop1
== constm1_rtx
)
2812 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2814 if (GET_CODE (op0
) == NEG
)
2816 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2817 /* If op1 is a MULT as well and simplify_unary_operation
2818 just moved the NEG to the second operand, simplify_gen_binary
2819 below could through simplify_associative_operation move
2820 the NEG around again and recurse endlessly. */
2822 && GET_CODE (op1
) == MULT
2823 && GET_CODE (temp
) == MULT
2824 && XEXP (op1
, 0) == XEXP (temp
, 0)
2825 && GET_CODE (XEXP (temp
, 1)) == NEG
2826 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2829 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2831 if (GET_CODE (op1
) == NEG
)
2833 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2834 /* If op0 is a MULT as well and simplify_unary_operation
2835 just moved the NEG to the second operand, simplify_gen_binary
2836 below could through simplify_associative_operation move
2837 the NEG around again and recurse endlessly. */
2839 && GET_CODE (op0
) == MULT
2840 && GET_CODE (temp
) == MULT
2841 && XEXP (op0
, 0) == XEXP (temp
, 0)
2842 && GET_CODE (XEXP (temp
, 1)) == NEG
2843 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2846 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2849 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2850 x is NaN, since x * 0 is then also NaN. Nor is it valid
2851 when the mode has signed zeros, since multiplying a negative
2852 number by 0 will give -0, not 0. */
2853 if (!HONOR_NANS (mode
)
2854 && !HONOR_SIGNED_ZEROS (mode
)
2855 && trueop1
== CONST0_RTX (mode
)
2856 && ! side_effects_p (op0
))
2859 /* In IEEE floating point, x*1 is not equivalent to x for
2861 if (!HONOR_SNANS (mode
)
2862 && trueop1
== CONST1_RTX (mode
))
2865 /* Convert multiply by constant power of two into shift. */
2866 if (CONST_SCALAR_INT_P (trueop1
))
2868 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2870 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2871 gen_int_shift_amount (mode
, val
));
2874 /* x*2 is x+x and x*(-1) is -x */
2875 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2876 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2877 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2878 && GET_MODE (op0
) == mode
)
2880 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2882 if (real_equal (d1
, &dconst2
))
2883 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2885 if (!HONOR_SNANS (mode
)
2886 && real_equal (d1
, &dconstm1
))
2887 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2890 /* Optimize -x * -x as x * x. */
2891 if (FLOAT_MODE_P (mode
)
2892 && GET_CODE (op0
) == NEG
2893 && GET_CODE (op1
) == NEG
2894 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2895 && !side_effects_p (XEXP (op0
, 0)))
2896 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2898 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2899 if (SCALAR_FLOAT_MODE_P (mode
)
2900 && GET_CODE (op0
) == ABS
2901 && GET_CODE (op1
) == ABS
2902 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2903 && !side_effects_p (XEXP (op0
, 0)))
2904 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2906 /* Reassociate multiplication, but for floating point MULTs
2907 only when the user specifies unsafe math optimizations. */
2908 if (! FLOAT_MODE_P (mode
)
2909 || flag_unsafe_math_optimizations
)
2911 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2918 if (trueop1
== CONST0_RTX (mode
))
2920 if (INTEGRAL_MODE_P (mode
)
2921 && trueop1
== CONSTM1_RTX (mode
)
2922 && !side_effects_p (op0
))
2924 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2926 /* A | (~A) -> -1 */
2927 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2928 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2929 && ! side_effects_p (op0
)
2930 && SCALAR_INT_MODE_P (mode
))
2933 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2934 if (CONST_INT_P (op1
)
2935 && HWI_COMPUTABLE_MODE_P (mode
)
2936 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2937 && !side_effects_p (op0
))
2940 /* Canonicalize (X & C1) | C2. */
2941 if (GET_CODE (op0
) == AND
2942 && CONST_INT_P (trueop1
)
2943 && CONST_INT_P (XEXP (op0
, 1)))
2945 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2946 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2947 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2949 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2951 && !side_effects_p (XEXP (op0
, 0)))
2954 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2955 if (((c1
|c2
) & mask
) == mask
)
2956 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2959 /* Convert (A & B) | A to A. */
2960 if (GET_CODE (op0
) == AND
2961 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2962 || rtx_equal_p (XEXP (op0
, 1), op1
))
2963 && ! side_effects_p (XEXP (op0
, 0))
2964 && ! side_effects_p (XEXP (op0
, 1)))
2967 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2968 mode size to (rotate A CX). */
2970 if (GET_CODE (op1
) == ASHIFT
2971 || GET_CODE (op1
) == SUBREG
)
2982 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2983 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2984 && CONST_INT_P (XEXP (opleft
, 1))
2985 && CONST_INT_P (XEXP (opright
, 1))
2986 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2987 == GET_MODE_UNIT_PRECISION (mode
)))
2988 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2990 /* Same, but for ashift that has been "simplified" to a wider mode
2991 by simplify_shift_const. */
2993 if (GET_CODE (opleft
) == SUBREG
2994 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2995 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
2997 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2998 && GET_CODE (opright
) == LSHIFTRT
2999 && GET_CODE (XEXP (opright
, 0)) == SUBREG
3000 && known_eq (SUBREG_BYTE (opleft
), SUBREG_BYTE (XEXP (opright
, 0)))
3001 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
3002 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
3003 SUBREG_REG (XEXP (opright
, 0)))
3004 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
3005 && CONST_INT_P (XEXP (opright
, 1))
3006 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
3007 + INTVAL (XEXP (opright
, 1))
3008 == GET_MODE_PRECISION (int_mode
)))
3009 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
3010 XEXP (SUBREG_REG (opleft
), 1));
3012 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
3013 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
3014 the PLUS does not affect any of the bits in OP1: then we can do
3015 the IOR as a PLUS and we can associate. This is valid if OP1
3016 can be safely shifted left C bits. */
3017 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
3018 && GET_CODE (XEXP (op0
, 0)) == PLUS
3019 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
3020 && CONST_INT_P (XEXP (op0
, 1))
3021 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
3023 int count
= INTVAL (XEXP (op0
, 1));
3024 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
3026 if (mask
>> count
== INTVAL (trueop1
)
3027 && trunc_int_for_mode (mask
, mode
) == mask
3028 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
3029 return simplify_gen_binary (ASHIFTRT
, mode
,
3030 plus_constant (mode
, XEXP (op0
, 0),
3035 /* The following happens with bitfield merging.
3036 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
3037 if (GET_CODE (op0
) == AND
3038 && GET_CODE (op1
) == AND
3039 && CONST_INT_P (XEXP (op0
, 1))
3040 && CONST_INT_P (XEXP (op1
, 1))
3041 && (INTVAL (XEXP (op0
, 1))
3042 == ~INTVAL (XEXP (op1
, 1))))
3044 /* The IOR may be on both sides. */
3045 rtx top0
= NULL_RTX
, top1
= NULL_RTX
;
3046 if (GET_CODE (XEXP (op1
, 0)) == IOR
)
3047 top0
= op0
, top1
= op1
;
3048 else if (GET_CODE (XEXP (op0
, 0)) == IOR
)
3049 top0
= op1
, top1
= op0
;
3052 /* X may be on either side of the inner IOR. */
3054 if (rtx_equal_p (XEXP (top0
, 0),
3055 XEXP (XEXP (top1
, 0), 0)))
3056 tem
= XEXP (XEXP (top1
, 0), 1);
3057 else if (rtx_equal_p (XEXP (top0
, 0),
3058 XEXP (XEXP (top1
, 0), 1)))
3059 tem
= XEXP (XEXP (top1
, 0), 0);
3061 return simplify_gen_binary (IOR
, mode
, XEXP (top0
, 0),
3063 (AND
, mode
, tem
, XEXP (top1
, 1)));
3067 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3071 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3075 tem
= simplify_logical_relational_operation (code
, mode
, op0
, op1
);
3081 if (trueop1
== CONST0_RTX (mode
))
3083 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3084 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
3085 if (rtx_equal_p (trueop0
, trueop1
)
3086 && ! side_effects_p (op0
)
3087 && GET_MODE_CLASS (mode
) != MODE_CC
)
3088 return CONST0_RTX (mode
);
3090 /* Canonicalize XOR of the most significant bit to PLUS. */
3091 if (CONST_SCALAR_INT_P (op1
)
3092 && mode_signbit_p (mode
, op1
))
3093 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
3094 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
3095 if (CONST_SCALAR_INT_P (op1
)
3096 && GET_CODE (op0
) == PLUS
3097 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
3098 && mode_signbit_p (mode
, XEXP (op0
, 1)))
3099 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
3100 simplify_gen_binary (XOR
, mode
, op1
,
3103 /* If we are XORing two things that have no bits in common,
3104 convert them into an IOR. This helps to detect rotation encoded
3105 using those methods and possibly other simplifications. */
3107 if (HWI_COMPUTABLE_MODE_P (mode
)
3108 && (nonzero_bits (op0
, mode
)
3109 & nonzero_bits (op1
, mode
)) == 0)
3110 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
3112 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3113 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3116 int num_negated
= 0;
3118 if (GET_CODE (op0
) == NOT
)
3119 num_negated
++, op0
= XEXP (op0
, 0);
3120 if (GET_CODE (op1
) == NOT
)
3121 num_negated
++, op1
= XEXP (op1
, 0);
3123 if (num_negated
== 2)
3124 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
3125 else if (num_negated
== 1)
3126 return simplify_gen_unary (NOT
, mode
,
3127 simplify_gen_binary (XOR
, mode
, op0
, op1
),
3131 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
3132 correspond to a machine insn or result in further simplifications
3133 if B is a constant. */
3135 if (GET_CODE (op0
) == AND
3136 && rtx_equal_p (XEXP (op0
, 1), op1
)
3137 && ! side_effects_p (op1
))
3138 return simplify_gen_binary (AND
, mode
,
3139 simplify_gen_unary (NOT
, mode
,
3140 XEXP (op0
, 0), mode
),
3143 else if (GET_CODE (op0
) == AND
3144 && rtx_equal_p (XEXP (op0
, 0), op1
)
3145 && ! side_effects_p (op1
))
3146 return simplify_gen_binary (AND
, mode
,
3147 simplify_gen_unary (NOT
, mode
,
3148 XEXP (op0
, 1), mode
),
3151 /* Given (xor (ior (xor A B) C) D), where B, C and D are
3152 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3153 out bits inverted twice and not set by C. Similarly, given
3154 (xor (and (xor A B) C) D), simplify without inverting C in
3155 the xor operand: (xor (and A C) (B&C)^D).
3157 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
3158 && GET_CODE (XEXP (op0
, 0)) == XOR
3159 && CONST_INT_P (op1
)
3160 && CONST_INT_P (XEXP (op0
, 1))
3161 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
3163 enum rtx_code op
= GET_CODE (op0
);
3164 rtx a
= XEXP (XEXP (op0
, 0), 0);
3165 rtx b
= XEXP (XEXP (op0
, 0), 1);
3166 rtx c
= XEXP (op0
, 1);
3168 HOST_WIDE_INT bval
= INTVAL (b
);
3169 HOST_WIDE_INT cval
= INTVAL (c
);
3170 HOST_WIDE_INT dval
= INTVAL (d
);
3171 HOST_WIDE_INT xcval
;
3178 return simplify_gen_binary (XOR
, mode
,
3179 simplify_gen_binary (op
, mode
, a
, c
),
3180 gen_int_mode ((bval
& xcval
) ^ dval
,
3184 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3185 we can transform like this:
3186 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3187 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3188 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3189 Attempt a few simplifications when B and C are both constants. */
3190 if (GET_CODE (op0
) == AND
3191 && CONST_INT_P (op1
)
3192 && CONST_INT_P (XEXP (op0
, 1)))
3194 rtx a
= XEXP (op0
, 0);
3195 rtx b
= XEXP (op0
, 1);
3197 HOST_WIDE_INT bval
= INTVAL (b
);
3198 HOST_WIDE_INT cval
= INTVAL (c
);
3200 /* Instead of computing ~A&C, we compute its negated value,
3201 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3202 optimize for sure. If it does not simplify, we still try
3203 to compute ~A&C below, but since that always allocates
3204 RTL, we don't try that before committing to returning a
3205 simplified expression. */
3206 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
3209 if ((~cval
& bval
) == 0)
3211 rtx na_c
= NULL_RTX
;
3213 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
3216 /* If ~A does not simplify, don't bother: we don't
3217 want to simplify 2 operations into 3, and if na_c
3218 were to simplify with na, n_na_c would have
3219 simplified as well. */
3220 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
3222 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
3225 /* Try to simplify ~A&C | ~B&C. */
3226 if (na_c
!= NULL_RTX
)
3227 return simplify_gen_binary (IOR
, mode
, na_c
,
3228 gen_int_mode (~bval
& cval
, mode
));
3232 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3233 if (n_na_c
== CONSTM1_RTX (mode
))
3235 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3236 gen_int_mode (~cval
& bval
,
3238 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3239 gen_int_mode (~bval
& cval
,
3245 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3246 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3247 machines, and also has shorter instruction path length. */
3248 if (GET_CODE (op0
) == AND
3249 && GET_CODE (XEXP (op0
, 0)) == XOR
3250 && CONST_INT_P (XEXP (op0
, 1))
3251 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3254 rtx b
= XEXP (XEXP (op0
, 0), 1);
3255 rtx c
= XEXP (op0
, 1);
3256 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3257 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3258 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3259 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3261 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3262 else if (GET_CODE (op0
) == AND
3263 && GET_CODE (XEXP (op0
, 0)) == XOR
3264 && CONST_INT_P (XEXP (op0
, 1))
3265 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3267 rtx a
= XEXP (XEXP (op0
, 0), 0);
3269 rtx c
= XEXP (op0
, 1);
3270 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3271 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3272 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3273 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3276 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3277 comparison if STORE_FLAG_VALUE is 1. */
3278 if (STORE_FLAG_VALUE
== 1
3279 && trueop1
== const1_rtx
3280 && COMPARISON_P (op0
)
3281 && (reversed
= reversed_comparison (op0
, mode
)))
3284 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3285 is (lt foo (const_int 0)), so we can perform the above
3286 simplification if STORE_FLAG_VALUE is 1. */
3288 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3289 && STORE_FLAG_VALUE
== 1
3290 && trueop1
== const1_rtx
3291 && GET_CODE (op0
) == LSHIFTRT
3292 && CONST_INT_P (XEXP (op0
, 1))
3293 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3294 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3296 /* (xor (comparison foo bar) (const_int sign-bit))
3297 when STORE_FLAG_VALUE is the sign bit. */
3298 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3299 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3300 && trueop1
== const_true_rtx
3301 && COMPARISON_P (op0
)
3302 && (reversed
= reversed_comparison (op0
, int_mode
)))
3305 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3309 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3315 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3317 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3319 if (HWI_COMPUTABLE_MODE_P (mode
))
3321 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3322 HOST_WIDE_INT nzop1
;
3323 if (CONST_INT_P (trueop1
))
3325 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3326 /* If we are turning off bits already known off in OP0, we need
3328 if ((nzop0
& ~val1
) == 0)
3331 nzop1
= nonzero_bits (trueop1
, mode
);
3332 /* If we are clearing all the nonzero bits, the result is zero. */
3333 if ((nzop1
& nzop0
) == 0
3334 && !side_effects_p (op0
) && !side_effects_p (op1
))
3335 return CONST0_RTX (mode
);
3337 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3338 && GET_MODE_CLASS (mode
) != MODE_CC
)
3341 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3342 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3343 && ! side_effects_p (op0
)
3344 && GET_MODE_CLASS (mode
) != MODE_CC
)
3345 return CONST0_RTX (mode
);
3347 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3348 there are no nonzero bits of C outside of X's mode. */
3349 if ((GET_CODE (op0
) == SIGN_EXTEND
3350 || GET_CODE (op0
) == ZERO_EXTEND
)
3351 && CONST_INT_P (trueop1
)
3352 && HWI_COMPUTABLE_MODE_P (mode
)
3353 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3354 & UINTVAL (trueop1
)) == 0)
3356 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3357 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3358 gen_int_mode (INTVAL (trueop1
),
3360 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3363 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3364 we might be able to further simplify the AND with X and potentially
3365 remove the truncation altogether. */
3366 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3368 rtx x
= XEXP (op0
, 0);
3369 machine_mode xmode
= GET_MODE (x
);
3370 tem
= simplify_gen_binary (AND
, xmode
, x
,
3371 gen_int_mode (INTVAL (trueop1
), xmode
));
3372 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3375 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3376 if (GET_CODE (op0
) == IOR
3377 && CONST_INT_P (trueop1
)
3378 && CONST_INT_P (XEXP (op0
, 1)))
3380 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3381 return simplify_gen_binary (IOR
, mode
,
3382 simplify_gen_binary (AND
, mode
,
3383 XEXP (op0
, 0), op1
),
3384 gen_int_mode (tmp
, mode
));
3387 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3388 insn (and may simplify more). */
3389 if (GET_CODE (op0
) == XOR
3390 && rtx_equal_p (XEXP (op0
, 0), op1
)
3391 && ! side_effects_p (op1
))
3392 return simplify_gen_binary (AND
, mode
,
3393 simplify_gen_unary (NOT
, mode
,
3394 XEXP (op0
, 1), mode
),
3397 if (GET_CODE (op0
) == XOR
3398 && rtx_equal_p (XEXP (op0
, 1), op1
)
3399 && ! side_effects_p (op1
))
3400 return simplify_gen_binary (AND
, mode
,
3401 simplify_gen_unary (NOT
, mode
,
3402 XEXP (op0
, 0), mode
),
3405 /* Similarly for (~(A ^ B)) & A. */
3406 if (GET_CODE (op0
) == NOT
3407 && GET_CODE (XEXP (op0
, 0)) == XOR
3408 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3409 && ! side_effects_p (op1
))
3410 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3412 if (GET_CODE (op0
) == NOT
3413 && GET_CODE (XEXP (op0
, 0)) == XOR
3414 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3415 && ! side_effects_p (op1
))
3416 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3418 /* Convert (A | B) & A to A. */
3419 if (GET_CODE (op0
) == IOR
3420 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3421 || rtx_equal_p (XEXP (op0
, 1), op1
))
3422 && ! side_effects_p (XEXP (op0
, 0))
3423 && ! side_effects_p (XEXP (op0
, 1)))
3426 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3427 ((A & N) + B) & M -> (A + B) & M
3428 Similarly if (N & M) == 0,
3429 ((A | N) + B) & M -> (A + B) & M
3430 and for - instead of + and/or ^ instead of |.
3431 Also, if (N & M) == 0, then
3432 (A +- N) & M -> A & M. */
3433 if (CONST_INT_P (trueop1
)
3434 && HWI_COMPUTABLE_MODE_P (mode
)
3435 && ~UINTVAL (trueop1
)
3436 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3437 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3442 pmop
[0] = XEXP (op0
, 0);
3443 pmop
[1] = XEXP (op0
, 1);
3445 if (CONST_INT_P (pmop
[1])
3446 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3447 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3449 for (which
= 0; which
< 2; which
++)
3452 switch (GET_CODE (tem
))
3455 if (CONST_INT_P (XEXP (tem
, 1))
3456 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3457 == UINTVAL (trueop1
))
3458 pmop
[which
] = XEXP (tem
, 0);
3462 if (CONST_INT_P (XEXP (tem
, 1))
3463 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3464 pmop
[which
] = XEXP (tem
, 0);
3471 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3473 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3475 return simplify_gen_binary (code
, mode
, tem
, op1
);
3479 /* (and X (ior (not X) Y) -> (and X Y) */
3480 if (GET_CODE (op1
) == IOR
3481 && GET_CODE (XEXP (op1
, 0)) == NOT
3482 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3483 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3485 /* (and (ior (not X) Y) X) -> (and X Y) */
3486 if (GET_CODE (op0
) == IOR
3487 && GET_CODE (XEXP (op0
, 0)) == NOT
3488 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3489 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3491 /* (and X (ior Y (not X)) -> (and X Y) */
3492 if (GET_CODE (op1
) == IOR
3493 && GET_CODE (XEXP (op1
, 1)) == NOT
3494 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3495 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3497 /* (and (ior Y (not X)) X) -> (and X Y) */
3498 if (GET_CODE (op0
) == IOR
3499 && GET_CODE (XEXP (op0
, 1)) == NOT
3500 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3501 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3503 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3507 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3513 /* 0/x is 0 (or x&0 if x has side-effects). */
3514 if (trueop0
== CONST0_RTX (mode
)
3515 && !cfun
->can_throw_non_call_exceptions
)
3517 if (side_effects_p (op1
))
3518 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3522 if (trueop1
== CONST1_RTX (mode
))
3524 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3528 /* Convert divide by power of two into shift. */
3529 if (CONST_INT_P (trueop1
)
3530 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3531 return simplify_gen_binary (LSHIFTRT
, mode
, op0
,
3532 gen_int_shift_amount (mode
, val
));
3536 /* Handle floating point and integers separately. */
3537 if (SCALAR_FLOAT_MODE_P (mode
))
3539 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3540 safe for modes with NaNs, since 0.0 / 0.0 will then be
3541 NaN rather than 0.0. Nor is it safe for modes with signed
3542 zeros, since dividing 0 by a negative number gives -0.0 */
3543 if (trueop0
== CONST0_RTX (mode
)
3544 && !HONOR_NANS (mode
)
3545 && !HONOR_SIGNED_ZEROS (mode
)
3546 && ! side_effects_p (op1
))
3549 if (trueop1
== CONST1_RTX (mode
)
3550 && !HONOR_SNANS (mode
))
3553 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3554 && trueop1
!= CONST0_RTX (mode
))
3556 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3559 if (real_equal (d1
, &dconstm1
)
3560 && !HONOR_SNANS (mode
))
3561 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3563 /* Change FP division by a constant into multiplication.
3564 Only do this with -freciprocal-math. */
3565 if (flag_reciprocal_math
3566 && !real_equal (d1
, &dconst0
))
3569 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3570 tem
= const_double_from_real_value (d
, mode
);
3571 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3575 else if (SCALAR_INT_MODE_P (mode
))
3577 /* 0/x is 0 (or x&0 if x has side-effects). */
3578 if (trueop0
== CONST0_RTX (mode
)
3579 && !cfun
->can_throw_non_call_exceptions
)
3581 if (side_effects_p (op1
))
3582 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3586 if (trueop1
== CONST1_RTX (mode
))
3588 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3593 if (trueop1
== constm1_rtx
)
3595 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3597 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3603 /* 0%x is 0 (or x&0 if x has side-effects). */
3604 if (trueop0
== CONST0_RTX (mode
))
3606 if (side_effects_p (op1
))
3607 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3610 /* x%1 is 0 (of x&0 if x has side-effects). */
3611 if (trueop1
== CONST1_RTX (mode
))
3613 if (side_effects_p (op0
))
3614 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3615 return CONST0_RTX (mode
);
3617 /* Implement modulus by power of two as AND. */
3618 if (CONST_INT_P (trueop1
)
3619 && exact_log2 (UINTVAL (trueop1
)) > 0)
3620 return simplify_gen_binary (AND
, mode
, op0
,
3621 gen_int_mode (UINTVAL (trueop1
) - 1,
3626 /* 0%x is 0 (or x&0 if x has side-effects). */
3627 if (trueop0
== CONST0_RTX (mode
))
3629 if (side_effects_p (op1
))
3630 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3633 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3634 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3636 if (side_effects_p (op0
))
3637 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3638 return CONST0_RTX (mode
);
3644 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3645 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3646 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3648 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3649 if (CONST_INT_P (trueop1
)
3650 && IN_RANGE (INTVAL (trueop1
),
3651 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
3652 GET_MODE_UNIT_PRECISION (mode
) - 1))
3654 int new_amount
= GET_MODE_UNIT_PRECISION (mode
) - INTVAL (trueop1
);
3655 rtx new_amount_rtx
= gen_int_shift_amount (mode
, new_amount
);
3656 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3657 mode
, op0
, new_amount_rtx
);
3662 if (trueop1
== CONST0_RTX (mode
))
3664 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3666 /* Rotating ~0 always results in ~0. */
3667 if (CONST_INT_P (trueop0
)
3668 && HWI_COMPUTABLE_MODE_P (mode
)
3669 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3670 && ! side_effects_p (op1
))
3676 scalar constants c1, c2
3677 size (M2) > size (M1)
3678 c1 == size (M2) - size (M1)
3680 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3684 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3686 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3687 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3689 && CONST_INT_P (op1
)
3690 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3691 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
3693 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3694 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
3695 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3696 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
3697 && subreg_lowpart_p (op0
))
3699 rtx tmp
= gen_int_shift_amount
3700 (inner_mode
, INTVAL (XEXP (SUBREG_REG (op0
), 1)) + INTVAL (op1
));
3702 /* Combine would usually zero out the value when combining two
3703 local shifts and the range becomes larger or equal to the mode.
3704 However since we fold away one of the shifts here combine won't
3705 see it so we should immediately zero the result if it's out of
3707 if (code
== LSHIFTRT
3708 && INTVAL (tmp
) >= GET_MODE_BITSIZE (inner_mode
))
3711 tmp
= simplify_gen_binary (code
,
3713 XEXP (SUBREG_REG (op0
), 0),
3716 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
3719 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3721 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
3722 if (val
!= INTVAL (op1
))
3723 return simplify_gen_binary (code
, mode
, op0
,
3724 gen_int_shift_amount (mode
, val
));
3731 if (trueop1
== CONST0_RTX (mode
))
3733 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3735 goto canonicalize_shift
;
3738 if (trueop1
== CONST0_RTX (mode
))
3740 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3742 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3743 if (GET_CODE (op0
) == CLZ
3744 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
3745 && CONST_INT_P (trueop1
)
3746 && STORE_FLAG_VALUE
== 1
3747 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
3749 unsigned HOST_WIDE_INT zero_val
= 0;
3751 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
3752 && zero_val
== GET_MODE_PRECISION (inner_mode
)
3753 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3754 return simplify_gen_relational (EQ
, mode
, inner_mode
,
3755 XEXP (op0
, 0), const0_rtx
);
3757 goto canonicalize_shift
;
3760 if (HWI_COMPUTABLE_MODE_P (mode
)
3761 && mode_signbit_p (mode
, trueop1
)
3762 && ! side_effects_p (op0
))
3764 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3766 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3772 if (HWI_COMPUTABLE_MODE_P (mode
)
3773 && CONST_INT_P (trueop1
)
3774 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3775 && ! side_effects_p (op0
))
3777 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3779 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3785 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3787 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3789 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3795 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3797 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3799 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3812 /* ??? There are simplifications that can be done. */
3816 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
3817 return gen_vec_duplicate (mode
, op0
);
3818 if (valid_for_const_vector_p (mode
, op0
)
3819 && valid_for_const_vector_p (mode
, op1
))
3820 return gen_const_vec_series (mode
, op0
, op1
);
3824 if (!VECTOR_MODE_P (mode
))
3826 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3827 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3828 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3829 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3831 /* We can't reason about selections made at runtime. */
3832 if (!CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3835 if (vec_duplicate_p (trueop0
, &elt0
))
3838 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3839 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3842 /* Extract a scalar element from a nested VEC_SELECT expression
3843 (with optional nested VEC_CONCAT expression). Some targets
3844 (i386) extract scalar element from a vector using chain of
3845 nested VEC_SELECT expressions. When input operand is a memory
3846 operand, this operation can be simplified to a simple scalar
3847 load from an offseted memory address. */
3849 if (GET_CODE (trueop0
) == VEC_SELECT
3850 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3851 .is_constant (&n_elts
)))
3853 rtx op0
= XEXP (trueop0
, 0);
3854 rtx op1
= XEXP (trueop0
, 1);
3856 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3862 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3863 gcc_assert (i
< n_elts
);
3865 /* Select element, pointed by nested selector. */
3866 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3868 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3869 if (GET_CODE (op0
) == VEC_CONCAT
)
3871 rtx op00
= XEXP (op0
, 0);
3872 rtx op01
= XEXP (op0
, 1);
3874 machine_mode mode00
, mode01
;
3875 int n_elts00
, n_elts01
;
3877 mode00
= GET_MODE (op00
);
3878 mode01
= GET_MODE (op01
);
3880 /* Find out the number of elements of each operand.
3881 Since the concatenated result has a constant number
3882 of elements, the operands must too. */
3883 n_elts00
= GET_MODE_NUNITS (mode00
).to_constant ();
3884 n_elts01
= GET_MODE_NUNITS (mode01
).to_constant ();
3886 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3888 /* Select correct operand of VEC_CONCAT
3889 and adjust selector. */
3890 if (elem
< n_elts01
)
3901 vec
= rtvec_alloc (1);
3902 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3904 tmp
= gen_rtx_fmt_ee (code
, mode
,
3905 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3911 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3912 gcc_assert (GET_MODE_INNER (mode
)
3913 == GET_MODE_INNER (GET_MODE (trueop0
)));
3914 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3916 if (vec_duplicate_p (trueop0
, &elt0
))
3917 /* It doesn't matter which elements are selected by trueop1,
3918 because they are all the same. */
3919 return gen_vec_duplicate (mode
, elt0
);
3921 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3923 unsigned n_elts
= XVECLEN (trueop1
, 0);
3924 rtvec v
= rtvec_alloc (n_elts
);
3927 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
3928 for (i
= 0; i
< n_elts
; i
++)
3930 rtx x
= XVECEXP (trueop1
, 0, i
);
3932 if (!CONST_INT_P (x
))
3935 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3939 return gen_rtx_CONST_VECTOR (mode
, v
);
3942 /* Recognize the identity. */
3943 if (GET_MODE (trueop0
) == mode
)
3945 bool maybe_ident
= true;
3946 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3948 rtx j
= XVECEXP (trueop1
, 0, i
);
3949 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3951 maybe_ident
= false;
3959 /* If we build {a,b} then permute it, build the result directly. */
3960 if (XVECLEN (trueop1
, 0) == 2
3961 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3962 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3963 && GET_CODE (trueop0
) == VEC_CONCAT
3964 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3965 && GET_MODE (XEXP (trueop0
, 0)) == mode
3966 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3967 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3969 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3970 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3973 gcc_assert (i0
< 4 && i1
< 4);
3974 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3975 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3977 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3980 if (XVECLEN (trueop1
, 0) == 2
3981 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3982 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3983 && GET_CODE (trueop0
) == VEC_CONCAT
3984 && GET_MODE (trueop0
) == mode
)
3986 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3987 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3990 gcc_assert (i0
< 2 && i1
< 2);
3991 subop0
= XEXP (trueop0
, i0
);
3992 subop1
= XEXP (trueop0
, i1
);
3994 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3997 /* If we select one half of a vec_concat, return that. */
3999 if (GET_CODE (trueop0
) == VEC_CONCAT
4000 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
4002 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 1)))
4004 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
4006 rtx subop0
= XEXP (trueop0
, 0);
4007 rtx subop1
= XEXP (trueop0
, 1);
4008 machine_mode mode0
= GET_MODE (subop0
);
4009 machine_mode mode1
= GET_MODE (subop1
);
4010 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4011 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
4013 bool success
= true;
4014 for (int i
= 1; i
< l0
; ++i
)
4016 rtx j
= XVECEXP (trueop1
, 0, i
);
4017 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
4026 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
4028 bool success
= true;
4029 for (int i
= 1; i
< l1
; ++i
)
4031 rtx j
= XVECEXP (trueop1
, 0, i
);
4032 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
4044 if (XVECLEN (trueop1
, 0) == 1
4045 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4046 && GET_CODE (trueop0
) == VEC_CONCAT
)
4049 offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
4051 /* Try to find the element in the VEC_CONCAT. */
4052 while (GET_MODE (vec
) != mode
4053 && GET_CODE (vec
) == VEC_CONCAT
)
4055 poly_int64 vec_size
;
4057 if (CONST_INT_P (XEXP (vec
, 0)))
4059 /* vec_concat of two const_ints doesn't make sense with
4060 respect to modes. */
4061 if (CONST_INT_P (XEXP (vec
, 1)))
4064 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
4065 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
4068 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
4070 if (known_lt (offset
, vec_size
))
4071 vec
= XEXP (vec
, 0);
4072 else if (known_ge (offset
, vec_size
))
4075 vec
= XEXP (vec
, 1);
4079 vec
= avoid_constant_pool_reference (vec
);
4082 if (GET_MODE (vec
) == mode
)
4086 /* If we select elements in a vec_merge that all come from the same
4087 operand, select from that operand directly. */
4088 if (GET_CODE (op0
) == VEC_MERGE
)
4090 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
4091 if (CONST_INT_P (trueop02
))
4093 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
4094 bool all_operand0
= true;
4095 bool all_operand1
= true;
4096 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
4098 rtx j
= XVECEXP (trueop1
, 0, i
);
4099 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
4100 all_operand1
= false;
4102 all_operand0
= false;
4104 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
4105 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
4106 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
4107 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
4111 /* If we have two nested selects that are inverses of each
4112 other, replace them with the source operand. */
4113 if (GET_CODE (trueop0
) == VEC_SELECT
4114 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4116 rtx op0_subop1
= XEXP (trueop0
, 1);
4117 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
4118 gcc_assert (known_eq (XVECLEN (trueop1
, 0), GET_MODE_NUNITS (mode
)));
4120 /* Apply the outer ordering vector to the inner one. (The inner
4121 ordering vector is expressly permitted to be of a different
4122 length than the outer one.) If the result is { 0, 1, ..., n-1 }
4123 then the two VEC_SELECTs cancel. */
4124 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
4126 rtx x
= XVECEXP (trueop1
, 0, i
);
4127 if (!CONST_INT_P (x
))
4129 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
4130 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
4133 return XEXP (trueop0
, 0);
4139 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
4140 ? GET_MODE (trueop0
)
4141 : GET_MODE_INNER (mode
));
4142 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
4143 ? GET_MODE (trueop1
)
4144 : GET_MODE_INNER (mode
));
4146 gcc_assert (VECTOR_MODE_P (mode
));
4147 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode
)
4148 + GET_MODE_SIZE (op1_mode
),
4149 GET_MODE_SIZE (mode
)));
4151 if (VECTOR_MODE_P (op0_mode
))
4152 gcc_assert (GET_MODE_INNER (mode
)
4153 == GET_MODE_INNER (op0_mode
));
4155 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
4157 if (VECTOR_MODE_P (op1_mode
))
4158 gcc_assert (GET_MODE_INNER (mode
)
4159 == GET_MODE_INNER (op1_mode
));
4161 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
4163 unsigned int n_elts
, in_n_elts
;
4164 if ((GET_CODE (trueop0
) == CONST_VECTOR
4165 || CONST_SCALAR_INT_P (trueop0
)
4166 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
4167 && (GET_CODE (trueop1
) == CONST_VECTOR
4168 || CONST_SCALAR_INT_P (trueop1
)
4169 || CONST_DOUBLE_AS_FLOAT_P (trueop1
))
4170 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
)
4171 && GET_MODE_NUNITS (op0_mode
).is_constant (&in_n_elts
))
4173 rtvec v
= rtvec_alloc (n_elts
);
4175 for (i
= 0; i
< n_elts
; i
++)
4179 if (!VECTOR_MODE_P (op0_mode
))
4180 RTVEC_ELT (v
, i
) = trueop0
;
4182 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
4186 if (!VECTOR_MODE_P (op1_mode
))
4187 RTVEC_ELT (v
, i
) = trueop1
;
4189 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
4194 return gen_rtx_CONST_VECTOR (mode
, v
);
4197 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4198 Restrict the transformation to avoid generating a VEC_SELECT with a
4199 mode unrelated to its operand. */
4200 if (GET_CODE (trueop0
) == VEC_SELECT
4201 && GET_CODE (trueop1
) == VEC_SELECT
4202 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
4203 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4205 rtx par0
= XEXP (trueop0
, 1);
4206 rtx par1
= XEXP (trueop1
, 1);
4207 int len0
= XVECLEN (par0
, 0);
4208 int len1
= XVECLEN (par1
, 0);
4209 rtvec vec
= rtvec_alloc (len0
+ len1
);
4210 for (int i
= 0; i
< len0
; i
++)
4211 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
4212 for (int i
= 0; i
< len1
; i
++)
4213 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
4214 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
4215 gen_rtx_PARALLEL (VOIDmode
, vec
));
4224 if (mode
== GET_MODE (op0
)
4225 && mode
== GET_MODE (op1
)
4226 && vec_duplicate_p (op0
, &elt0
)
4227 && vec_duplicate_p (op1
, &elt1
))
4229 /* Try applying the operator to ELT and see if that simplifies.
4230 We can duplicate the result if so.
4232 The reason we don't use simplify_gen_binary is that it isn't
4233 necessarily a win to convert things like:
4235 (plus:V (vec_duplicate:V (reg:S R1))
4236 (vec_duplicate:V (reg:S R2)))
4240 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4242 The first might be done entirely in vector registers while the
4243 second might need a move between register files. */
4244 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4247 return gen_vec_duplicate (mode
, tem
);
4253 /* Return true if binary operation OP distributes over addition in operand
4254 OPNO, with the other operand being held constant. OPNO counts from 1. */
4257 distributes_over_addition_p (rtx_code op
, int opno
)
4275 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
4278 if (VECTOR_MODE_P (mode
)
4279 && code
!= VEC_CONCAT
4280 && GET_CODE (op0
) == CONST_VECTOR
4281 && GET_CODE (op1
) == CONST_VECTOR
)
4284 if (CONST_VECTOR_STEPPED_P (op0
)
4285 && CONST_VECTOR_STEPPED_P (op1
))
4286 /* We can operate directly on the encoding if:
4288 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4290 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4292 Addition and subtraction are the supported operators
4293 for which this is true. */
4294 step_ok_p
= (code
== PLUS
|| code
== MINUS
);
4295 else if (CONST_VECTOR_STEPPED_P (op0
))
4296 /* We can operate directly on stepped encodings if:
4300 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4302 which is true if (x -> x op c) distributes over addition. */
4303 step_ok_p
= distributes_over_addition_p (code
, 1);
4305 /* Similarly in reverse. */
4306 step_ok_p
= distributes_over_addition_p (code
, 2);
4307 rtx_vector_builder builder
;
4308 if (!builder
.new_binary_operation (mode
, op0
, op1
, step_ok_p
))
4311 unsigned int count
= builder
.encoded_nelts ();
4312 for (unsigned int i
= 0; i
< count
; i
++)
4314 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4315 CONST_VECTOR_ELT (op0
, i
),
4316 CONST_VECTOR_ELT (op1
, i
));
4317 if (!x
|| !valid_for_const_vector_p (mode
, x
))
4319 builder
.quick_push (x
);
4321 return builder
.build ();
4324 if (VECTOR_MODE_P (mode
)
4325 && code
== VEC_CONCAT
4326 && (CONST_SCALAR_INT_P (op0
)
4327 || CONST_FIXED_P (op0
)
4328 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4329 && (CONST_SCALAR_INT_P (op1
)
4330 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4331 || CONST_FIXED_P (op1
)))
4333 /* Both inputs have a constant number of elements, so the result
4335 unsigned n_elts
= GET_MODE_NUNITS (mode
).to_constant ();
4336 rtvec v
= rtvec_alloc (n_elts
);
4338 gcc_assert (n_elts
>= 2);
4341 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4342 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4344 RTVEC_ELT (v
, 0) = op0
;
4345 RTVEC_ELT (v
, 1) = op1
;
4349 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
)).to_constant ();
4350 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
)).to_constant ();
4353 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4354 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4355 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4357 for (i
= 0; i
< op0_n_elts
; ++i
)
4358 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op0
, i
);
4359 for (i
= 0; i
< op1_n_elts
; ++i
)
4360 RTVEC_ELT (v
, op0_n_elts
+i
) = CONST_VECTOR_ELT (op1
, i
);
4363 return gen_rtx_CONST_VECTOR (mode
, v
);
4366 if (SCALAR_FLOAT_MODE_P (mode
)
4367 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4368 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4369 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4380 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4382 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4384 for (i
= 0; i
< 4; i
++)
4401 real_from_target (&r
, tmp0
, mode
);
4402 return const_double_from_real_value (r
, mode
);
4406 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4407 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4410 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4411 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4413 if (HONOR_SNANS (mode
)
4414 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4415 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4418 real_convert (&f0
, mode
, opr0
);
4419 real_convert (&f1
, mode
, opr1
);
4422 && real_equal (&f1
, &dconst0
)
4423 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4426 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4427 && flag_trapping_math
4428 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4430 int s0
= REAL_VALUE_NEGATIVE (f0
);
4431 int s1
= REAL_VALUE_NEGATIVE (f1
);
4436 /* Inf + -Inf = NaN plus exception. */
4441 /* Inf - Inf = NaN plus exception. */
4446 /* Inf / Inf = NaN plus exception. */
4453 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4454 && flag_trapping_math
4455 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4456 || (REAL_VALUE_ISINF (f1
)
4457 && real_equal (&f0
, &dconst0
))))
4458 /* Inf * 0 = NaN plus exception. */
4461 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4463 real_convert (&result
, mode
, &value
);
4465 /* Don't constant fold this floating point operation if
4466 the result has overflowed and flag_trapping_math. */
4468 if (flag_trapping_math
4469 && MODE_HAS_INFINITIES (mode
)
4470 && REAL_VALUE_ISINF (result
)
4471 && !REAL_VALUE_ISINF (f0
)
4472 && !REAL_VALUE_ISINF (f1
))
4473 /* Overflow plus exception. */
4476 /* Don't constant fold this floating point operation if the
4477 result may dependent upon the run-time rounding mode and
4478 flag_rounding_math is set, or if GCC's software emulation
4479 is unable to accurately represent the result. */
4481 if ((flag_rounding_math
4482 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4483 && (inexact
|| !real_identical (&result
, &value
)))
4486 return const_double_from_real_value (result
, mode
);
4490 /* We can fold some multi-word operations. */
4491 scalar_int_mode int_mode
;
4492 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4493 && CONST_SCALAR_INT_P (op0
)
4494 && CONST_SCALAR_INT_P (op1
)
4495 && GET_MODE_PRECISION (int_mode
) <= MAX_BITSIZE_MODE_ANY_INT
)
4498 wi::overflow_type overflow
;
4499 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4500 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4502 #if TARGET_SUPPORTS_WIDE_INT == 0
4503 /* This assert keeps the simplification from producing a result
4504 that cannot be represented in a CONST_DOUBLE but a lot of
4505 upstream callers expect that this function never fails to
4506 simplify something and so you if you added this to the test
4507 above the code would die later anyway. If this assert
4508 happens, you just need to make the port support wide int. */
4509 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4514 result
= wi::sub (pop0
, pop1
);
4518 result
= wi::add (pop0
, pop1
);
4522 result
= wi::mul (pop0
, pop1
);
4526 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4532 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4538 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4544 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4550 result
= wi::bit_and (pop0
, pop1
);
4554 result
= wi::bit_or (pop0
, pop1
);
4558 result
= wi::bit_xor (pop0
, pop1
);
4562 result
= wi::smin (pop0
, pop1
);
4566 result
= wi::smax (pop0
, pop1
);
4570 result
= wi::umin (pop0
, pop1
);
4574 result
= wi::umax (pop0
, pop1
);
4581 wide_int wop1
= pop1
;
4582 if (SHIFT_COUNT_TRUNCATED
)
4583 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
4584 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
4590 result
= wi::lrshift (pop0
, wop1
);
4594 result
= wi::arshift (pop0
, wop1
);
4598 result
= wi::lshift (pop0
, wop1
);
4609 if (wi::neg_p (pop1
))
4615 result
= wi::lrotate (pop0
, pop1
);
4619 result
= wi::rrotate (pop0
, pop1
);
4630 return immed_wide_int_const (result
, int_mode
);
4633 /* Handle polynomial integers. */
4634 if (NUM_POLY_INT_COEFFS
> 1
4635 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4636 && poly_int_rtx_p (op0
)
4637 && poly_int_rtx_p (op1
))
4639 poly_wide_int result
;
4643 result
= wi::to_poly_wide (op0
, mode
) + wi::to_poly_wide (op1
, mode
);
4647 result
= wi::to_poly_wide (op0
, mode
) - wi::to_poly_wide (op1
, mode
);
4651 if (CONST_SCALAR_INT_P (op1
))
4652 result
= wi::to_poly_wide (op0
, mode
) * rtx_mode_t (op1
, mode
);
4658 if (CONST_SCALAR_INT_P (op1
))
4660 wide_int shift
= rtx_mode_t (op1
, mode
);
4661 if (SHIFT_COUNT_TRUNCATED
)
4662 shift
= wi::umod_trunc (shift
, GET_MODE_PRECISION (int_mode
));
4663 else if (wi::geu_p (shift
, GET_MODE_PRECISION (int_mode
)))
4665 result
= wi::to_poly_wide (op0
, mode
) << shift
;
4672 if (!CONST_SCALAR_INT_P (op1
)
4673 || !can_ior_p (wi::to_poly_wide (op0
, mode
),
4674 rtx_mode_t (op1
, mode
), &result
))
4681 return immed_wide_int_const (result
, int_mode
);
4689 /* Return a positive integer if X should sort after Y. The value
4690 returned is 1 if and only if X and Y are both regs. */
4693 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4697 result
= (commutative_operand_precedence (y
)
4698 - commutative_operand_precedence (x
));
4700 return result
+ result
;
4702 /* Group together equal REGs to do more simplification. */
4703 if (REG_P (x
) && REG_P (y
))
4704 return REGNO (x
) > REGNO (y
);
4709 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4710 operands may be another PLUS or MINUS.
4712 Rather than test for specific case, we do this by a brute-force method
4713 and do all possible simplifications until no more changes occur. Then
4714 we rebuild the operation.
4716 May return NULL_RTX when no changes were made. */
4719 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4722 struct simplify_plus_minus_op_data
4729 int changed
, n_constants
, canonicalized
= 0;
4732 memset (ops
, 0, sizeof ops
);
4734 /* Set up the two operands and then expand them until nothing has been
4735 changed. If we run out of room in our array, give up; this should
4736 almost never happen. */
4741 ops
[1].neg
= (code
== MINUS
);
4748 for (i
= 0; i
< n_ops
; i
++)
4750 rtx this_op
= ops
[i
].op
;
4751 int this_neg
= ops
[i
].neg
;
4752 enum rtx_code this_code
= GET_CODE (this_op
);
4758 if (n_ops
== ARRAY_SIZE (ops
))
4761 ops
[n_ops
].op
= XEXP (this_op
, 1);
4762 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4765 ops
[i
].op
= XEXP (this_op
, 0);
4767 /* If this operand was negated then we will potentially
4768 canonicalize the expression. Similarly if we don't
4769 place the operands adjacent we're re-ordering the
4770 expression and thus might be performing a
4771 canonicalization. Ignore register re-ordering.
4772 ??? It might be better to shuffle the ops array here,
4773 but then (plus (plus (A, B), plus (C, D))) wouldn't
4774 be seen as non-canonical. */
4777 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4782 ops
[i
].op
= XEXP (this_op
, 0);
4783 ops
[i
].neg
= ! this_neg
;
4789 if (n_ops
!= ARRAY_SIZE (ops
)
4790 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4791 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4792 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4794 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4795 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4796 ops
[n_ops
].neg
= this_neg
;
4804 /* ~a -> (-a - 1) */
4805 if (n_ops
!= ARRAY_SIZE (ops
))
4807 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4808 ops
[n_ops
++].neg
= this_neg
;
4809 ops
[i
].op
= XEXP (this_op
, 0);
4810 ops
[i
].neg
= !this_neg
;
4816 CASE_CONST_SCALAR_INT
:
4817 case CONST_POLY_INT
:
4821 ops
[i
].op
= neg_poly_int_rtx (mode
, this_op
);
4835 if (n_constants
> 1)
4838 gcc_assert (n_ops
>= 2);
4840 /* If we only have two operands, we can avoid the loops. */
4843 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4846 /* Get the two operands. Be careful with the order, especially for
4847 the cases where code == MINUS. */
4848 if (ops
[0].neg
&& ops
[1].neg
)
4850 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4853 else if (ops
[0].neg
)
4864 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4867 /* Now simplify each pair of operands until nothing changes. */
4870 /* Insertion sort is good enough for a small array. */
4871 for (i
= 1; i
< n_ops
; i
++)
4873 struct simplify_plus_minus_op_data save
;
4877 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4880 /* Just swapping registers doesn't count as canonicalization. */
4886 ops
[j
+ 1] = ops
[j
];
4888 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4893 for (i
= n_ops
- 1; i
> 0; i
--)
4894 for (j
= i
- 1; j
>= 0; j
--)
4896 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4897 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4899 if (lhs
!= 0 && rhs
!= 0)
4901 enum rtx_code ncode
= PLUS
;
4907 std::swap (lhs
, rhs
);
4909 else if (swap_commutative_operands_p (lhs
, rhs
))
4910 std::swap (lhs
, rhs
);
4912 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4913 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4915 rtx tem_lhs
, tem_rhs
;
4917 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4918 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4919 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4922 if (tem
&& !CONSTANT_P (tem
))
4923 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4926 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4930 /* Reject "simplifications" that just wrap the two
4931 arguments in a CONST. Failure to do so can result
4932 in infinite recursion with simplify_binary_operation
4933 when it calls us to simplify CONST operations.
4934 Also, if we find such a simplification, don't try
4935 any more combinations with this rhs: We must have
4936 something like symbol+offset, ie. one of the
4937 trivial CONST expressions we handle later. */
4938 if (GET_CODE (tem
) == CONST
4939 && GET_CODE (XEXP (tem
, 0)) == ncode
4940 && XEXP (XEXP (tem
, 0), 0) == lhs
4941 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4944 if (GET_CODE (tem
) == NEG
)
4945 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4946 if (poly_int_rtx_p (tem
) && lneg
)
4947 tem
= neg_poly_int_rtx (mode
, tem
), lneg
= 0;
4951 ops
[j
].op
= NULL_RTX
;
4961 /* Pack all the operands to the lower-numbered entries. */
4962 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4971 /* If nothing changed, check that rematerialization of rtl instructions
4972 is still required. */
4975 /* Perform rematerialization if only all operands are registers and
4976 all operations are PLUS. */
4977 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4978 around rs6000 and how it uses the CA register. See PR67145. */
4979 for (i
= 0; i
< n_ops
; i
++)
4981 || !REG_P (ops
[i
].op
)
4982 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4983 && fixed_regs
[REGNO (ops
[i
].op
)]
4984 && !global_regs
[REGNO (ops
[i
].op
)]
4985 && ops
[i
].op
!= frame_pointer_rtx
4986 && ops
[i
].op
!= arg_pointer_rtx
4987 && ops
[i
].op
!= stack_pointer_rtx
))
4992 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4994 && CONST_INT_P (ops
[1].op
)
4995 && CONSTANT_P (ops
[0].op
)
4997 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4999 /* We suppressed creation of trivial CONST expressions in the
5000 combination loop to avoid recursion. Create one manually now.
5001 The combination loop should have ensured that there is exactly
5002 one CONST_INT, and the sort will have ensured that it is last
5003 in the array and that any other constant will be next-to-last. */
5006 && poly_int_rtx_p (ops
[n_ops
- 1].op
)
5007 && CONSTANT_P (ops
[n_ops
- 2].op
))
5009 rtx value
= ops
[n_ops
- 1].op
;
5010 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
5011 value
= neg_poly_int_rtx (mode
, value
);
5012 if (CONST_INT_P (value
))
5014 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
5020 /* Put a non-negated operand first, if possible. */
5022 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
5025 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
5034 /* Now make the result by performing the requested operations. */
5037 for (i
= 1; i
< n_ops
; i
++)
5038 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
5039 mode
, result
, ops
[i
].op
);
5044 /* Check whether an operand is suitable for calling simplify_plus_minus. */
5046 plus_minus_operand_p (const_rtx x
)
5048 return GET_CODE (x
) == PLUS
5049 || GET_CODE (x
) == MINUS
5050 || (GET_CODE (x
) == CONST
5051 && GET_CODE (XEXP (x
, 0)) == PLUS
5052 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
5053 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
5056 /* Like simplify_binary_operation except used for relational operators.
5057 MODE is the mode of the result. If MODE is VOIDmode, both operands must
5058 not also be VOIDmode.
5060 CMP_MODE specifies in which mode the comparison is done in, so it is
5061 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
5062 the operands or, if both are VOIDmode, the operands are compared in
5063 "infinite precision". */
5065 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
5066 machine_mode cmp_mode
, rtx op0
, rtx op1
)
5068 rtx tem
, trueop0
, trueop1
;
5070 if (cmp_mode
== VOIDmode
)
5071 cmp_mode
= GET_MODE (op0
);
5072 if (cmp_mode
== VOIDmode
)
5073 cmp_mode
= GET_MODE (op1
);
5075 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
5078 if (SCALAR_FLOAT_MODE_P (mode
))
5080 if (tem
== const0_rtx
)
5081 return CONST0_RTX (mode
);
5082 #ifdef FLOAT_STORE_FLAG_VALUE
5084 REAL_VALUE_TYPE val
;
5085 val
= FLOAT_STORE_FLAG_VALUE (mode
);
5086 return const_double_from_real_value (val
, mode
);
5092 if (VECTOR_MODE_P (mode
))
5094 if (tem
== const0_rtx
)
5095 return CONST0_RTX (mode
);
5096 #ifdef VECTOR_STORE_FLAG_VALUE
5098 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
5099 if (val
== NULL_RTX
)
5101 if (val
== const1_rtx
)
5102 return CONST1_RTX (mode
);
5104 return gen_const_vec_duplicate (mode
, val
);
5110 /* For vector comparison with scalar int result, it is unknown
5111 if the target means here a comparison into an integral bitmask,
5112 or comparison where all comparisons true mean const_true_rtx
5113 whole result, or where any comparisons true mean const_true_rtx
5114 whole result. For const0_rtx all the cases are the same. */
5115 if (VECTOR_MODE_P (cmp_mode
)
5116 && SCALAR_INT_MODE_P (mode
)
5117 && tem
== const_true_rtx
)
5123 /* For the following tests, ensure const0_rtx is op1. */
5124 if (swap_commutative_operands_p (op0
, op1
)
5125 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
5126 std::swap (op0
, op1
), code
= swap_condition (code
);
5128 /* If op0 is a compare, extract the comparison arguments from it. */
5129 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5130 return simplify_gen_relational (code
, mode
, VOIDmode
,
5131 XEXP (op0
, 0), XEXP (op0
, 1));
5133 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
5137 trueop0
= avoid_constant_pool_reference (op0
);
5138 trueop1
= avoid_constant_pool_reference (op1
);
5139 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
5143 /* This part of simplify_relational_operation is only used when CMP_MODE
5144 is not in class MODE_CC (i.e. it is a real comparison).
5146 MODE is the mode of the result, while CMP_MODE specifies in which
5147 mode the comparison is done in, so it is the mode of the operands. */
5150 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
5151 machine_mode cmp_mode
, rtx op0
, rtx op1
)
5153 enum rtx_code op0code
= GET_CODE (op0
);
5155 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
5157 /* If op0 is a comparison, extract the comparison arguments
5161 if (GET_MODE (op0
) == mode
)
5162 return simplify_rtx (op0
);
5164 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
5165 XEXP (op0
, 0), XEXP (op0
, 1));
5167 else if (code
== EQ
)
5169 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
5170 if (new_code
!= UNKNOWN
)
5171 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
5172 XEXP (op0
, 0), XEXP (op0
, 1));
5176 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5177 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
5178 if ((code
== LTU
|| code
== GEU
)
5179 && GET_CODE (op0
) == PLUS
5180 && CONST_INT_P (XEXP (op0
, 1))
5181 && (rtx_equal_p (op1
, XEXP (op0
, 0))
5182 || rtx_equal_p (op1
, XEXP (op0
, 1)))
5183 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5184 && XEXP (op0
, 1) != const0_rtx
)
5187 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5188 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
5189 cmp_mode
, XEXP (op0
, 0), new_cmp
);
5192 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5193 transformed into (LTU a -C). */
5194 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
5195 && CONST_INT_P (XEXP (op0
, 1))
5196 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
5197 && XEXP (op0
, 1) != const0_rtx
)
5200 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5201 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
5202 XEXP (op0
, 0), new_cmp
);
5205 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5206 if ((code
== LTU
|| code
== GEU
)
5207 && GET_CODE (op0
) == PLUS
5208 && rtx_equal_p (op1
, XEXP (op0
, 1))
5209 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5210 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
5211 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
5212 copy_rtx (XEXP (op0
, 0)));
5214 if (op1
== const0_rtx
)
5216 /* Canonicalize (GTU x 0) as (NE x 0). */
5218 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
5219 /* Canonicalize (LEU x 0) as (EQ x 0). */
5221 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
5223 else if (op1
== const1_rtx
)
5228 /* Canonicalize (GE x 1) as (GT x 0). */
5229 return simplify_gen_relational (GT
, mode
, cmp_mode
,
5232 /* Canonicalize (GEU x 1) as (NE x 0). */
5233 return simplify_gen_relational (NE
, mode
, cmp_mode
,
5236 /* Canonicalize (LT x 1) as (LE x 0). */
5237 return simplify_gen_relational (LE
, mode
, cmp_mode
,
5240 /* Canonicalize (LTU x 1) as (EQ x 0). */
5241 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
5247 else if (op1
== constm1_rtx
)
5249 /* Canonicalize (LE x -1) as (LT x 0). */
5251 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
5252 /* Canonicalize (GT x -1) as (GE x 0). */
5254 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
5257 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5258 if ((code
== EQ
|| code
== NE
)
5259 && (op0code
== PLUS
|| op0code
== MINUS
)
5261 && CONSTANT_P (XEXP (op0
, 1))
5262 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
5264 rtx x
= XEXP (op0
, 0);
5265 rtx c
= XEXP (op0
, 1);
5266 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
5267 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
5269 /* Detect an infinite recursive condition, where we oscillate at this
5270 simplification case between:
5271 A + B == C <---> C - B == A,
5272 where A, B, and C are all constants with non-simplifiable expressions,
5273 usually SYMBOL_REFs. */
5274 if (GET_CODE (tem
) == invcode
5276 && rtx_equal_p (c
, XEXP (tem
, 1)))
5279 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
5282 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5283 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5284 scalar_int_mode int_mode
, int_cmp_mode
;
5286 && op1
== const0_rtx
5287 && is_int_mode (mode
, &int_mode
)
5288 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
5289 /* ??? Work-around BImode bugs in the ia64 backend. */
5290 && int_mode
!= BImode
5291 && int_cmp_mode
!= BImode
5292 && nonzero_bits (op0
, int_cmp_mode
) == 1
5293 && STORE_FLAG_VALUE
== 1)
5294 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
5295 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
5296 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
5298 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5299 if ((code
== EQ
|| code
== NE
)
5300 && op1
== const0_rtx
5302 return simplify_gen_relational (code
, mode
, cmp_mode
,
5303 XEXP (op0
, 0), XEXP (op0
, 1));
5305 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5306 if ((code
== EQ
|| code
== NE
)
5308 && rtx_equal_p (XEXP (op0
, 0), op1
)
5309 && !side_effects_p (XEXP (op0
, 0)))
5310 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
5313 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5314 if ((code
== EQ
|| code
== NE
)
5316 && rtx_equal_p (XEXP (op0
, 1), op1
)
5317 && !side_effects_p (XEXP (op0
, 1)))
5318 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5321 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5322 if ((code
== EQ
|| code
== NE
)
5324 && CONST_SCALAR_INT_P (op1
)
5325 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
5326 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5327 simplify_gen_binary (XOR
, cmp_mode
,
5328 XEXP (op0
, 1), op1
));
5330 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5331 constant folding if x/y is a constant. */
5332 if ((code
== EQ
|| code
== NE
)
5333 && (op0code
== AND
|| op0code
== IOR
)
5334 && !side_effects_p (op1
)
5335 && op1
!= CONST0_RTX (cmp_mode
))
5337 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5338 (eq/ne (and (not y) x) 0). */
5339 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 0), op1
))
5340 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 1), op1
)))
5342 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1),
5344 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
5346 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5347 CONST0_RTX (cmp_mode
));
5350 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5351 (eq/ne (and (not x) y) 0). */
5352 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 1), op1
))
5353 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 0), op1
)))
5355 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0),
5357 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
5359 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5360 CONST0_RTX (cmp_mode
));
5364 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5365 if ((code
== EQ
|| code
== NE
)
5366 && GET_CODE (op0
) == BSWAP
5367 && CONST_SCALAR_INT_P (op1
))
5368 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5369 simplify_gen_unary (BSWAP
, cmp_mode
,
5372 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5373 if ((code
== EQ
|| code
== NE
)
5374 && GET_CODE (op0
) == BSWAP
5375 && GET_CODE (op1
) == BSWAP
)
5376 return simplify_gen_relational (code
, mode
, cmp_mode
,
5377 XEXP (op0
, 0), XEXP (op1
, 0));
5379 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
5385 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5386 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
5387 XEXP (op0
, 0), const0_rtx
);
5392 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5393 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
5394 XEXP (op0
, 0), const0_rtx
);
5413 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5414 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5415 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5416 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5417 For floating-point comparisons, assume that the operands were ordered. */
5420 comparison_result (enum rtx_code code
, int known_results
)
5426 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
5429 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
5433 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
5436 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
5440 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
5443 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5446 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5448 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5451 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5453 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5456 return const_true_rtx
;
5464 /* Check if the given comparison (done in the given MODE) is actually
5465 a tautology or a contradiction. If the mode is VOIDmode, the
5466 comparison is done in "infinite precision". If no simplification
5467 is possible, this function returns zero. Otherwise, it returns
5468 either const_true_rtx or const0_rtx. */
5471 simplify_const_relational_operation (enum rtx_code code
,
5479 gcc_assert (mode
!= VOIDmode
5480 || (GET_MODE (op0
) == VOIDmode
5481 && GET_MODE (op1
) == VOIDmode
));
5483 /* If op0 is a compare, extract the comparison arguments from it. */
5484 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5486 op1
= XEXP (op0
, 1);
5487 op0
= XEXP (op0
, 0);
5489 if (GET_MODE (op0
) != VOIDmode
)
5490 mode
= GET_MODE (op0
);
5491 else if (GET_MODE (op1
) != VOIDmode
)
5492 mode
= GET_MODE (op1
);
5497 /* We can't simplify MODE_CC values since we don't know what the
5498 actual comparison is. */
5499 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5502 /* Make sure the constant is second. */
5503 if (swap_commutative_operands_p (op0
, op1
))
5505 std::swap (op0
, op1
);
5506 code
= swap_condition (code
);
5509 trueop0
= avoid_constant_pool_reference (op0
);
5510 trueop1
= avoid_constant_pool_reference (op1
);
5512 /* For integer comparisons of A and B maybe we can simplify A - B and can
5513 then simplify a comparison of that with zero. If A and B are both either
5514 a register or a CONST_INT, this can't help; testing for these cases will
5515 prevent infinite recursion here and speed things up.
5517 We can only do this for EQ and NE comparisons as otherwise we may
5518 lose or introduce overflow which we cannot disregard as undefined as
5519 we do not know the signedness of the operation on either the left or
5520 the right hand side of the comparison. */
5522 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5523 && (code
== EQ
|| code
== NE
)
5524 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5525 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5526 && (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
)) != 0
5527 /* We cannot do this if tem is a nonzero address. */
5528 && ! nonzero_address_p (tem
))
5529 return simplify_const_relational_operation (signed_condition (code
),
5530 mode
, tem
, const0_rtx
);
5532 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5533 return const_true_rtx
;
5535 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5538 /* For modes without NaNs, if the two operands are equal, we know the
5539 result except if they have side-effects. Even with NaNs we know
5540 the result of unordered comparisons and, if signaling NaNs are
5541 irrelevant, also the result of LT/GT/LTGT. */
5542 if ((! HONOR_NANS (trueop0
)
5543 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5544 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5545 && ! HONOR_SNANS (trueop0
)))
5546 && rtx_equal_p (trueop0
, trueop1
)
5547 && ! side_effects_p (trueop0
))
5548 return comparison_result (code
, CMP_EQ
);
5550 /* If the operands are floating-point constants, see if we can fold
5552 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5553 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5554 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5556 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5557 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5559 /* Comparisons are unordered iff at least one of the values is NaN. */
5560 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5570 return const_true_rtx
;
5583 return comparison_result (code
,
5584 (real_equal (d0
, d1
) ? CMP_EQ
:
5585 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5588 /* Otherwise, see if the operands are both integers. */
5589 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5590 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5592 /* It would be nice if we really had a mode here. However, the
5593 largest int representable on the target is as good as
5595 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5596 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5597 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5599 if (wi::eq_p (ptrueop0
, ptrueop1
))
5600 return comparison_result (code
, CMP_EQ
);
5603 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5604 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5605 return comparison_result (code
, cr
);
5609 /* Optimize comparisons with upper and lower bounds. */
5610 scalar_int_mode int_mode
;
5611 if (CONST_INT_P (trueop1
)
5612 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5613 && HWI_COMPUTABLE_MODE_P (int_mode
)
5614 && !side_effects_p (trueop0
))
5617 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
5618 HOST_WIDE_INT val
= INTVAL (trueop1
);
5619 HOST_WIDE_INT mmin
, mmax
;
5629 /* Get a reduced range if the sign bit is zero. */
5630 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
5637 rtx mmin_rtx
, mmax_rtx
;
5638 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
5640 mmin
= INTVAL (mmin_rtx
);
5641 mmax
= INTVAL (mmax_rtx
);
5644 unsigned int sign_copies
5645 = num_sign_bit_copies (trueop0
, int_mode
);
5647 mmin
>>= (sign_copies
- 1);
5648 mmax
>>= (sign_copies
- 1);
5654 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5656 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5657 return const_true_rtx
;
5658 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5663 return const_true_rtx
;
5668 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5670 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5671 return const_true_rtx
;
5672 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5677 return const_true_rtx
;
5683 /* x == y is always false for y out of range. */
5684 if (val
< mmin
|| val
> mmax
)
5688 /* x > y is always false for y >= mmax, always true for y < mmin. */
5690 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5692 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5693 return const_true_rtx
;
5699 return const_true_rtx
;
5702 /* x < y is always false for y <= mmin, always true for y > mmax. */
5704 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5706 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5707 return const_true_rtx
;
5713 return const_true_rtx
;
5717 /* x != y is always true for y out of range. */
5718 if (val
< mmin
|| val
> mmax
)
5719 return const_true_rtx
;
5727 /* Optimize integer comparisons with zero. */
5728 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5729 && trueop1
== const0_rtx
5730 && !side_effects_p (trueop0
))
5732 /* Some addresses are known to be nonzero. We don't know
5733 their sign, but equality comparisons are known. */
5734 if (nonzero_address_p (trueop0
))
5736 if (code
== EQ
|| code
== LEU
)
5738 if (code
== NE
|| code
== GTU
)
5739 return const_true_rtx
;
5742 /* See if the first operand is an IOR with a constant. If so, we
5743 may be able to determine the result of this comparison. */
5744 if (GET_CODE (op0
) == IOR
)
5746 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5747 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5749 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
5750 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5751 && (UINTVAL (inner_const
)
5762 return const_true_rtx
;
5766 return const_true_rtx
;
5780 /* Optimize comparison of ABS with zero. */
5781 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5782 && (GET_CODE (trueop0
) == ABS
5783 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5784 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5789 /* Optimize abs(x) < 0.0. */
5790 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5795 /* Optimize abs(x) >= 0.0. */
5796 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5797 return const_true_rtx
;
5801 /* Optimize ! (abs(x) < 0.0). */
5802 return const_true_rtx
;
5812 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5813 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5814 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5815 can be simplified to that or NULL_RTX if not.
5816 Assume X is compared against zero with CMP_CODE and the true
5817 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5820 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5822 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5825 /* Result on X == 0 and X !=0 respectively. */
5826 rtx on_zero
, on_nonzero
;
5830 on_nonzero
= false_val
;
5834 on_zero
= false_val
;
5835 on_nonzero
= true_val
;
5838 rtx_code op_code
= GET_CODE (on_nonzero
);
5839 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5840 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5841 || !CONST_INT_P (on_zero
))
5844 HOST_WIDE_INT op_val
;
5845 scalar_int_mode mode ATTRIBUTE_UNUSED
5846 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
5847 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
5848 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
5849 && op_val
== INTVAL (on_zero
))
5855 /* Try to simplify X given that it appears within operand OP of a
5856 VEC_MERGE operation whose mask is MASK. X need not use the same
5857 vector mode as the VEC_MERGE, but it must have the same number of
5860 Return the simplified X on success, otherwise return NULL_RTX. */
5863 simplify_merge_mask (rtx x
, rtx mask
, int op
)
5865 gcc_assert (VECTOR_MODE_P (GET_MODE (x
)));
5866 poly_uint64 nunits
= GET_MODE_NUNITS (GET_MODE (x
));
5867 if (GET_CODE (x
) == VEC_MERGE
&& rtx_equal_p (XEXP (x
, 2), mask
))
5869 if (side_effects_p (XEXP (x
, 1 - op
)))
5872 return XEXP (x
, op
);
5875 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5876 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
))
5878 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5880 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), top0
,
5881 GET_MODE (XEXP (x
, 0)));
5884 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5885 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
5886 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
5887 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
))
5889 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5890 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
5893 if (COMPARISON_P (x
))
5894 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
5895 GET_MODE (XEXP (x
, 0)) != VOIDmode
5896 ? GET_MODE (XEXP (x
, 0))
5897 : GET_MODE (XEXP (x
, 1)),
5898 top0
? top0
: XEXP (x
, 0),
5899 top1
? top1
: XEXP (x
, 1));
5901 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
5902 top0
? top0
: XEXP (x
, 0),
5903 top1
? top1
: XEXP (x
, 1));
5906 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_TERNARY
5907 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5908 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
5909 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
5910 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
)
5911 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 2)))
5912 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 2))), nunits
))
5914 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5915 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
5916 rtx top2
= simplify_merge_mask (XEXP (x
, 2), mask
, op
);
5917 if (top0
|| top1
|| top2
)
5918 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
5919 GET_MODE (XEXP (x
, 0)),
5920 top0
? top0
: XEXP (x
, 0),
5921 top1
? top1
: XEXP (x
, 1),
5922 top2
? top2
: XEXP (x
, 2));
5928 /* Simplify CODE, an operation with result mode MODE and three operands,
5929 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5930 a constant. Return 0 if no simplifications is possible. */
5933 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5934 machine_mode op0_mode
, rtx op0
, rtx op1
,
5937 bool any_change
= false;
5939 scalar_int_mode int_mode
, int_op0_mode
;
5940 unsigned int n_elts
;
5945 /* Simplify negations around the multiplication. */
5946 /* -a * -b + c => a * b + c. */
5947 if (GET_CODE (op0
) == NEG
)
5949 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5951 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5953 else if (GET_CODE (op1
) == NEG
)
5955 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5957 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5960 /* Canonicalize the two multiplication operands. */
5961 /* a * -b + c => -b * a + c. */
5962 if (swap_commutative_operands_p (op0
, op1
))
5963 std::swap (op0
, op1
), any_change
= true;
5966 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5971 if (CONST_INT_P (op0
)
5972 && CONST_INT_P (op1
)
5973 && CONST_INT_P (op2
)
5974 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5975 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
5976 && HWI_COMPUTABLE_MODE_P (int_mode
))
5978 /* Extracting a bit-field from a constant */
5979 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5980 HOST_WIDE_INT op1val
= INTVAL (op1
);
5981 HOST_WIDE_INT op2val
= INTVAL (op2
);
5982 if (!BITS_BIG_ENDIAN
)
5984 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
5985 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
5987 /* Not enough information to calculate the bit position. */
5990 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5992 /* First zero-extend. */
5993 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5994 /* If desired, propagate sign bit. */
5995 if (code
== SIGN_EXTRACT
5996 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5998 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
6001 return gen_int_mode (val
, int_mode
);
6006 if (CONST_INT_P (op0
))
6007 return op0
!= const0_rtx
? op1
: op2
;
6009 /* Convert c ? a : a into "a". */
6010 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
6013 /* Convert a != b ? a : b into "a". */
6014 if (GET_CODE (op0
) == NE
6015 && ! side_effects_p (op0
)
6016 && ! HONOR_NANS (mode
)
6017 && ! HONOR_SIGNED_ZEROS (mode
)
6018 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
6019 && rtx_equal_p (XEXP (op0
, 1), op2
))
6020 || (rtx_equal_p (XEXP (op0
, 0), op2
)
6021 && rtx_equal_p (XEXP (op0
, 1), op1
))))
6024 /* Convert a == b ? a : b into "b". */
6025 if (GET_CODE (op0
) == EQ
6026 && ! side_effects_p (op0
)
6027 && ! HONOR_NANS (mode
)
6028 && ! HONOR_SIGNED_ZEROS (mode
)
6029 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
6030 && rtx_equal_p (XEXP (op0
, 1), op2
))
6031 || (rtx_equal_p (XEXP (op0
, 0), op2
)
6032 && rtx_equal_p (XEXP (op0
, 1), op1
))))
6035 /* Convert (!c) != {0,...,0} ? a : b into
6036 c != {0,...,0} ? b : a for vector modes. */
6037 if (VECTOR_MODE_P (GET_MODE (op1
))
6038 && GET_CODE (op0
) == NE
6039 && GET_CODE (XEXP (op0
, 0)) == NOT
6040 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
6042 rtx cv
= XEXP (op0
, 1);
6045 if (!CONST_VECTOR_NUNITS (cv
).is_constant (&nunits
))
6048 for (int i
= 0; i
< nunits
; ++i
)
6049 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
6056 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
6057 XEXP (XEXP (op0
, 0), 0),
6059 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
6064 /* Convert x == 0 ? N : clz (x) into clz (x) when
6065 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
6066 Similarly for ctz (x). */
6067 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
6068 && XEXP (op0
, 1) == const0_rtx
)
6071 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
6077 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
6079 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
6080 ? GET_MODE (XEXP (op0
, 1))
6081 : GET_MODE (XEXP (op0
, 0)));
6084 /* Look for happy constants in op1 and op2. */
6085 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
6087 HOST_WIDE_INT t
= INTVAL (op1
);
6088 HOST_WIDE_INT f
= INTVAL (op2
);
6090 if (t
== STORE_FLAG_VALUE
&& f
== 0)
6091 code
= GET_CODE (op0
);
6092 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
6095 tmp
= reversed_comparison_code (op0
, NULL
);
6103 return simplify_gen_relational (code
, mode
, cmp_mode
,
6104 XEXP (op0
, 0), XEXP (op0
, 1));
6107 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
6108 cmp_mode
, XEXP (op0
, 0),
6111 /* See if any simplifications were possible. */
6114 if (CONST_INT_P (temp
))
6115 return temp
== const0_rtx
? op2
: op1
;
6117 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
6123 gcc_assert (GET_MODE (op0
) == mode
);
6124 gcc_assert (GET_MODE (op1
) == mode
);
6125 gcc_assert (VECTOR_MODE_P (mode
));
6126 trueop2
= avoid_constant_pool_reference (op2
);
6127 if (CONST_INT_P (trueop2
)
6128 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
6130 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
6131 unsigned HOST_WIDE_INT mask
;
6132 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
6135 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
6137 if (!(sel
& mask
) && !side_effects_p (op0
))
6139 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
6142 rtx trueop0
= avoid_constant_pool_reference (op0
);
6143 rtx trueop1
= avoid_constant_pool_reference (op1
);
6144 if (GET_CODE (trueop0
) == CONST_VECTOR
6145 && GET_CODE (trueop1
) == CONST_VECTOR
)
6147 rtvec v
= rtvec_alloc (n_elts
);
6150 for (i
= 0; i
< n_elts
; i
++)
6151 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
6152 ? CONST_VECTOR_ELT (trueop0
, i
)
6153 : CONST_VECTOR_ELT (trueop1
, i
));
6154 return gen_rtx_CONST_VECTOR (mode
, v
);
6157 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6158 if no element from a appears in the result. */
6159 if (GET_CODE (op0
) == VEC_MERGE
)
6161 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
6162 if (CONST_INT_P (tem
))
6164 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
6165 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
6166 return simplify_gen_ternary (code
, mode
, mode
,
6167 XEXP (op0
, 1), op1
, op2
);
6168 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
6169 return simplify_gen_ternary (code
, mode
, mode
,
6170 XEXP (op0
, 0), op1
, op2
);
6173 if (GET_CODE (op1
) == VEC_MERGE
)
6175 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
6176 if (CONST_INT_P (tem
))
6178 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
6179 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
6180 return simplify_gen_ternary (code
, mode
, mode
,
6181 op0
, XEXP (op1
, 1), op2
);
6182 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
6183 return simplify_gen_ternary (code
, mode
, mode
,
6184 op0
, XEXP (op1
, 0), op2
);
6188 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6190 if (GET_CODE (op0
) == VEC_DUPLICATE
6191 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
6192 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
6193 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0
, 0))), 1))
6195 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
6196 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
6198 if (XEXP (XEXP (op0
, 0), 0) == op1
6199 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
6203 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6205 with (vec_concat (X) (B)) if N == 1 or
6206 (vec_concat (A) (X)) if N == 2. */
6207 if (GET_CODE (op0
) == VEC_DUPLICATE
6208 && GET_CODE (op1
) == CONST_VECTOR
6209 && known_eq (CONST_VECTOR_NUNITS (op1
), 2)
6210 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6211 && IN_RANGE (sel
, 1, 2))
6213 rtx newop0
= XEXP (op0
, 0);
6214 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
6216 std::swap (newop0
, newop1
);
6217 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6219 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6220 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6221 Only applies for vectors of two elements. */
6222 if (GET_CODE (op0
) == VEC_DUPLICATE
6223 && GET_CODE (op1
) == VEC_CONCAT
6224 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6225 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6226 && IN_RANGE (sel
, 1, 2))
6228 rtx newop0
= XEXP (op0
, 0);
6229 rtx newop1
= XEXP (op1
, 2 - sel
);
6230 rtx otherop
= XEXP (op1
, sel
- 1);
6232 std::swap (newop0
, newop1
);
6233 /* Don't want to throw away the other part of the vec_concat if
6234 it has side-effects. */
6235 if (!side_effects_p (otherop
))
6236 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6241 (vec_merge:outer (vec_duplicate:outer x:inner)
6242 (subreg:outer y:inner 0)
6245 with (vec_concat:outer x:inner y:inner) if N == 1,
6246 or (vec_concat:outer y:inner x:inner) if N == 2.
6248 Implicitly, this means we have a paradoxical subreg, but such
6249 a check is cheap, so make it anyway.
6251 Only applies for vectors of two elements. */
6252 if (GET_CODE (op0
) == VEC_DUPLICATE
6253 && GET_CODE (op1
) == SUBREG
6254 && GET_MODE (op1
) == GET_MODE (op0
)
6255 && GET_MODE (SUBREG_REG (op1
)) == GET_MODE (XEXP (op0
, 0))
6256 && paradoxical_subreg_p (op1
)
6257 && subreg_lowpart_p (op1
)
6258 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6259 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6260 && IN_RANGE (sel
, 1, 2))
6262 rtx newop0
= XEXP (op0
, 0);
6263 rtx newop1
= SUBREG_REG (op1
);
6265 std::swap (newop0
, newop1
);
6266 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6269 /* Same as above but with switched operands:
6270 Replace (vec_merge:outer (subreg:outer x:inner 0)
6271 (vec_duplicate:outer y:inner)
6274 with (vec_concat:outer x:inner y:inner) if N == 1,
6275 or (vec_concat:outer y:inner x:inner) if N == 2. */
6276 if (GET_CODE (op1
) == VEC_DUPLICATE
6277 && GET_CODE (op0
) == SUBREG
6278 && GET_MODE (op0
) == GET_MODE (op1
)
6279 && GET_MODE (SUBREG_REG (op0
)) == GET_MODE (XEXP (op1
, 0))
6280 && paradoxical_subreg_p (op0
)
6281 && subreg_lowpart_p (op0
)
6282 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6283 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6284 && IN_RANGE (sel
, 1, 2))
6286 rtx newop0
= SUBREG_REG (op0
);
6287 rtx newop1
= XEXP (op1
, 0);
6289 std::swap (newop0
, newop1
);
6290 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6293 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6295 with (vec_concat x y) or (vec_concat y x) depending on value
6297 if (GET_CODE (op0
) == VEC_DUPLICATE
6298 && GET_CODE (op1
) == VEC_DUPLICATE
6299 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6300 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6301 && IN_RANGE (sel
, 1, 2))
6303 rtx newop0
= XEXP (op0
, 0);
6304 rtx newop1
= XEXP (op1
, 0);
6306 std::swap (newop0
, newop1
);
6308 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6312 if (rtx_equal_p (op0
, op1
)
6313 && !side_effects_p (op2
) && !side_effects_p (op1
))
6316 if (!side_effects_p (op2
))
6319 = may_trap_p (op0
) ? NULL_RTX
: simplify_merge_mask (op0
, op2
, 0);
6321 = may_trap_p (op1
) ? NULL_RTX
: simplify_merge_mask (op1
, op2
, 1);
6323 return simplify_gen_ternary (code
, mode
, mode
,
6325 top1
? top1
: op1
, op2
);
6337 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6338 starting at byte FIRST_BYTE. Return true on success and add the
6339 bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6340 that the bytes follow target memory order. Leave BYTES unmodified
6343 MODE is the mode of X. The caller must reserve NUM_BYTES bytes in
6344 BYTES before calling this function. */
6347 native_encode_rtx (machine_mode mode
, rtx x
, vec
<target_unit
> &bytes
,
6348 unsigned int first_byte
, unsigned int num_bytes
)
6350 /* Check the mode is sensible. */
6351 gcc_assert (GET_MODE (x
) == VOIDmode
6352 ? is_a
<scalar_int_mode
> (mode
)
6353 : mode
== GET_MODE (x
));
6355 if (GET_CODE (x
) == CONST_VECTOR
)
6357 /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6358 is necessary. The only complication is that MODE_VECTOR_BOOL
6359 vectors can have several elements per byte. */
6360 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6361 GET_MODE_NUNITS (mode
));
6362 unsigned int elt
= first_byte
* BITS_PER_UNIT
/ elt_bits
;
6363 if (elt_bits
< BITS_PER_UNIT
)
6365 /* This is the only case in which elements can be smaller than
6367 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
6368 for (unsigned int i
= 0; i
< num_bytes
; ++i
)
6370 target_unit value
= 0;
6371 for (unsigned int j
= 0; j
< BITS_PER_UNIT
; j
+= elt_bits
)
6373 value
|= (INTVAL (CONST_VECTOR_ELT (x
, elt
)) & 1) << j
;
6376 bytes
.quick_push (value
);
6381 unsigned int start
= bytes
.length ();
6382 unsigned int elt_bytes
= GET_MODE_UNIT_SIZE (mode
);
6383 /* Make FIRST_BYTE relative to ELT. */
6384 first_byte
%= elt_bytes
;
6385 while (num_bytes
> 0)
6387 /* Work out how many bytes we want from element ELT. */
6388 unsigned int chunk_bytes
= MIN (num_bytes
, elt_bytes
- first_byte
);
6389 if (!native_encode_rtx (GET_MODE_INNER (mode
),
6390 CONST_VECTOR_ELT (x
, elt
), bytes
,
6391 first_byte
, chunk_bytes
))
6393 bytes
.truncate (start
);
6398 num_bytes
-= chunk_bytes
;
6403 /* All subsequent cases are limited to scalars. */
6405 if (!is_a
<scalar_mode
> (mode
, &smode
))
6408 /* Make sure that the region is in range. */
6409 unsigned int end_byte
= first_byte
+ num_bytes
;
6410 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
6411 gcc_assert (end_byte
<= mode_bytes
);
6413 if (CONST_SCALAR_INT_P (x
))
6415 /* The target memory layout is affected by both BYTES_BIG_ENDIAN
6416 and WORDS_BIG_ENDIAN. Use the subreg machinery to get the lsb
6417 position of each byte. */
6418 rtx_mode_t
value (x
, smode
);
6419 wide_int_ref
value_wi (value
);
6420 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6422 /* Always constant because the inputs are. */
6424 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6425 /* Operate directly on the encoding rather than using
6426 wi::extract_uhwi, so that we preserve the sign or zero
6427 extension for modes that are not a whole number of bits in
6428 size. (Zero extension is only used for the combination of
6429 innermode == BImode && STORE_FLAG_VALUE == 1). */
6430 unsigned int elt
= lsb
/ HOST_BITS_PER_WIDE_INT
;
6431 unsigned int shift
= lsb
% HOST_BITS_PER_WIDE_INT
;
6432 unsigned HOST_WIDE_INT uhwi
= value_wi
.elt (elt
);
6433 bytes
.quick_push (uhwi
>> shift
);
6438 if (CONST_DOUBLE_P (x
))
6440 /* real_to_target produces an array of integers in target memory order.
6441 All integers before the last one have 32 bits; the last one may
6442 have 32 bits or fewer, depending on whether the mode bitsize
6443 is divisible by 32. Each of these integers is then laid out
6444 in target memory as any other integer would be. */
6445 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6446 real_to_target (el32
, CONST_DOUBLE_REAL_VALUE (x
), smode
);
6448 /* The (maximum) number of target bytes per element of el32. */
6449 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
6450 gcc_assert (bytes_per_el32
!= 0);
6452 /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
6454 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6456 unsigned int index
= byte
/ bytes_per_el32
;
6457 unsigned int subbyte
= byte
% bytes_per_el32
;
6458 unsigned int int_bytes
= MIN (bytes_per_el32
,
6459 mode_bytes
- index
* bytes_per_el32
);
6460 /* Always constant because the inputs are. */
6462 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
6463 bytes
.quick_push ((unsigned long) el32
[index
] >> lsb
);
6468 if (GET_CODE (x
) == CONST_FIXED
)
6470 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6472 /* Always constant because the inputs are. */
6474 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6475 unsigned HOST_WIDE_INT piece
= CONST_FIXED_VALUE_LOW (x
);
6476 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
6478 lsb
-= HOST_BITS_PER_WIDE_INT
;
6479 piece
= CONST_FIXED_VALUE_HIGH (x
);
6481 bytes
.quick_push (piece
>> lsb
);
6489 /* Read a vector of mode MODE from the target memory image given by BYTES,
6490 starting at byte FIRST_BYTE. The vector is known to be encodable using
6491 NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
6492 and BYTES is known to have enough bytes to supply NPATTERNS *
6493 NELTS_PER_PATTERN vector elements. Each element of BYTES contains
6494 BITS_PER_UNIT bits and the bytes are in target memory order.
6496 Return the vector on success, otherwise return NULL_RTX. */
6499 native_decode_vector_rtx (machine_mode mode
, vec
<target_unit
> bytes
,
6500 unsigned int first_byte
, unsigned int npatterns
,
6501 unsigned int nelts_per_pattern
)
6503 rtx_vector_builder
builder (mode
, npatterns
, nelts_per_pattern
);
6505 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6506 GET_MODE_NUNITS (mode
));
6507 if (elt_bits
< BITS_PER_UNIT
)
6509 /* This is the only case in which elements can be smaller than a byte.
6510 Element 0 is always in the lsb of the containing byte. */
6511 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
6512 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
6514 unsigned int bit_index
= first_byte
* BITS_PER_UNIT
+ i
* elt_bits
;
6515 unsigned int byte_index
= bit_index
/ BITS_PER_UNIT
;
6516 unsigned int lsb
= bit_index
% BITS_PER_UNIT
;
6517 builder
.quick_push (bytes
[byte_index
] & (1 << lsb
)
6518 ? CONST1_RTX (BImode
)
6519 : CONST0_RTX (BImode
));
6524 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
6526 rtx x
= native_decode_rtx (GET_MODE_INNER (mode
), bytes
, first_byte
);
6529 builder
.quick_push (x
);
6530 first_byte
+= elt_bits
/ BITS_PER_UNIT
;
6533 return builder
.build ();
6536 /* Read an rtx of mode MODE from the target memory image given by BYTES,
6537 starting at byte FIRST_BYTE. Each element of BYTES contains BITS_PER_UNIT
6538 bits and the bytes are in target memory order. The image has enough
6539 values to specify all bytes of MODE.
6541 Return the rtx on success, otherwise return NULL_RTX. */
6544 native_decode_rtx (machine_mode mode
, vec
<target_unit
> bytes
,
6545 unsigned int first_byte
)
6547 if (VECTOR_MODE_P (mode
))
6549 /* If we know at compile time how many elements there are,
6550 pull each element directly from BYTES. */
6552 if (GET_MODE_NUNITS (mode
).is_constant (&nelts
))
6553 return native_decode_vector_rtx (mode
, bytes
, first_byte
, nelts
, 1);
6557 scalar_int_mode imode
;
6558 if (is_a
<scalar_int_mode
> (mode
, &imode
)
6559 && GET_MODE_PRECISION (imode
) <= MAX_BITSIZE_MODE_ANY_INT
)
6561 /* Pull the bytes msb first, so that we can use simple
6562 shift-and-insert wide_int operations. */
6563 unsigned int size
= GET_MODE_SIZE (imode
);
6564 wide_int
result (wi::zero (GET_MODE_PRECISION (imode
)));
6565 for (unsigned int i
= 0; i
< size
; ++i
)
6567 unsigned int lsb
= (size
- i
- 1) * BITS_PER_UNIT
;
6568 /* Always constant because the inputs are. */
6569 unsigned int subbyte
6570 = subreg_size_offset_from_lsb (1, size
, lsb
).to_constant ();
6571 result
<<= BITS_PER_UNIT
;
6572 result
|= bytes
[first_byte
+ subbyte
];
6574 return immed_wide_int_const (result
, imode
);
6577 scalar_float_mode fmode
;
6578 if (is_a
<scalar_float_mode
> (mode
, &fmode
))
6580 /* We need to build an array of integers in target memory order.
6581 All integers before the last one have 32 bits; the last one may
6582 have 32 bits or fewer, depending on whether the mode bitsize
6583 is divisible by 32. */
6584 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6585 unsigned int num_el32
= CEIL (GET_MODE_BITSIZE (fmode
), 32);
6586 memset (el32
, 0, num_el32
* sizeof (long));
6588 /* The (maximum) number of target bytes per element of el32. */
6589 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
6590 gcc_assert (bytes_per_el32
!= 0);
6592 unsigned int mode_bytes
= GET_MODE_SIZE (fmode
);
6593 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
6595 unsigned int index
= byte
/ bytes_per_el32
;
6596 unsigned int subbyte
= byte
% bytes_per_el32
;
6597 unsigned int int_bytes
= MIN (bytes_per_el32
,
6598 mode_bytes
- index
* bytes_per_el32
);
6599 /* Always constant because the inputs are. */
6601 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
6602 el32
[index
] |= (unsigned long) bytes
[first_byte
+ byte
] << lsb
;
6605 real_from_target (&r
, el32
, fmode
);
6606 return const_double_from_real_value (r
, fmode
);
6609 if (ALL_SCALAR_FIXED_POINT_MODE_P (mode
))
6611 scalar_mode smode
= as_a
<scalar_mode
> (mode
);
6617 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
6618 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
6620 /* Always constant because the inputs are. */
6622 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6623 unsigned HOST_WIDE_INT unit
= bytes
[first_byte
+ byte
];
6624 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
6625 f
.data
.high
|= unit
<< (lsb
- HOST_BITS_PER_WIDE_INT
);
6627 f
.data
.low
|= unit
<< lsb
;
6629 return CONST_FIXED_FROM_FIXED_VALUE (f
, mode
);
6635 /* Simplify a byte offset BYTE into CONST_VECTOR X. The main purpose
6636 is to convert a runtime BYTE value into a constant one. */
6639 simplify_const_vector_byte_offset (rtx x
, poly_uint64 byte
)
6641 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
6642 machine_mode mode
= GET_MODE (x
);
6643 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6644 GET_MODE_NUNITS (mode
));
6645 /* The number of bits needed to encode one element from each pattern. */
6646 unsigned int sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * elt_bits
;
6648 /* Identify the start point in terms of a sequence number and a byte offset
6649 within that sequence. */
6650 poly_uint64 first_sequence
;
6651 unsigned HOST_WIDE_INT subbit
;
6652 if (can_div_trunc_p (byte
* BITS_PER_UNIT
, sequence_bits
,
6653 &first_sequence
, &subbit
))
6655 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
6656 if (nelts_per_pattern
== 1)
6657 /* This is a duplicated vector, so the value of FIRST_SEQUENCE
6659 byte
= subbit
/ BITS_PER_UNIT
;
6660 else if (nelts_per_pattern
== 2 && known_gt (first_sequence
, 0U))
6662 /* The subreg drops the first element from each pattern and
6663 only uses the second element. Find the first sequence
6664 that starts on a byte boundary. */
6665 subbit
+= least_common_multiple (sequence_bits
, BITS_PER_UNIT
);
6666 byte
= subbit
/ BITS_PER_UNIT
;
6672 /* Subroutine of simplify_subreg in which:
6674 - X is known to be a CONST_VECTOR
6675 - OUTERMODE is known to be a vector mode
6677 Try to handle the subreg by operating on the CONST_VECTOR encoding
6678 rather than on each individual element of the CONST_VECTOR.
6680 Return the simplified subreg on success, otherwise return NULL_RTX. */
6683 simplify_const_vector_subreg (machine_mode outermode
, rtx x
,
6684 machine_mode innermode
, unsigned int first_byte
)
6686 /* Paradoxical subregs of vectors have dubious semantics. */
6687 if (paradoxical_subreg_p (outermode
, innermode
))
6690 /* We can only preserve the semantics of a stepped pattern if the new
6691 vector element is the same as the original one. */
6692 if (CONST_VECTOR_STEPPED_P (x
)
6693 && GET_MODE_INNER (outermode
) != GET_MODE_INNER (innermode
))
6696 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
6697 unsigned int x_elt_bits
6698 = vector_element_size (GET_MODE_BITSIZE (innermode
),
6699 GET_MODE_NUNITS (innermode
));
6700 unsigned int out_elt_bits
6701 = vector_element_size (GET_MODE_BITSIZE (outermode
),
6702 GET_MODE_NUNITS (outermode
));
6704 /* The number of bits needed to encode one element from every pattern
6705 of the original vector. */
6706 unsigned int x_sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * x_elt_bits
;
6708 /* The number of bits needed to encode one element from every pattern
6710 unsigned int out_sequence_bits
6711 = least_common_multiple (x_sequence_bits
, out_elt_bits
);
6713 /* Work out the number of interleaved patterns in the output vector
6714 and the number of encoded elements per pattern. */
6715 unsigned int out_npatterns
= out_sequence_bits
/ out_elt_bits
;
6716 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
6718 /* The encoding scheme requires the number of elements to be a multiple
6719 of the number of patterns, so that each pattern appears at least once
6720 and so that the same number of elements appear from each pattern. */
6721 bool ok_p
= multiple_p (GET_MODE_NUNITS (outermode
), out_npatterns
);
6722 unsigned int const_nunits
;
6723 if (GET_MODE_NUNITS (outermode
).is_constant (&const_nunits
)
6724 && (!ok_p
|| out_npatterns
* nelts_per_pattern
> const_nunits
))
6726 /* Either the encoding is invalid, or applying it would give us
6727 more elements than we need. Just encode each element directly. */
6728 out_npatterns
= const_nunits
;
6729 nelts_per_pattern
= 1;
6734 /* Get enough bytes of X to form the new encoding. */
6735 unsigned int buffer_bits
= out_npatterns
* nelts_per_pattern
* out_elt_bits
;
6736 unsigned int buffer_bytes
= CEIL (buffer_bits
, BITS_PER_UNIT
);
6737 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
6738 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, buffer_bytes
))
6741 /* Reencode the bytes as OUTERMODE. */
6742 return native_decode_vector_rtx (outermode
, buffer
, 0, out_npatterns
,
6746 /* Try to simplify a subreg of a constant by encoding the subreg region
6747 as a sequence of target bytes and reading them back in the new mode.
6748 Return the new value on success, otherwise return null.
6750 The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
6751 and byte offset FIRST_BYTE. */
6754 simplify_immed_subreg (fixed_size_mode outermode
, rtx x
,
6755 machine_mode innermode
, unsigned int first_byte
)
6757 unsigned int buffer_bytes
= GET_MODE_SIZE (outermode
);
6758 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
6760 /* Some ports misuse CCmode. */
6761 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (x
))
6764 /* Paradoxical subregs read undefined values for bytes outside of the
6765 inner value. However, we have traditionally always sign-extended
6766 integer constants and zero-extended others. */
6767 unsigned int inner_bytes
= buffer_bytes
;
6768 if (paradoxical_subreg_p (outermode
, innermode
))
6770 if (!GET_MODE_SIZE (innermode
).is_constant (&inner_bytes
))
6773 target_unit filler
= 0;
6774 if (CONST_SCALAR_INT_P (x
) && wi::neg_p (rtx_mode_t (x
, innermode
)))
6777 /* Add any leading bytes due to big-endian layout. The number of
6778 bytes must be constant because both modes have constant size. */
6779 unsigned int leading_bytes
6780 = -byte_lowpart_offset (outermode
, innermode
).to_constant ();
6781 for (unsigned int i
= 0; i
< leading_bytes
; ++i
)
6782 buffer
.quick_push (filler
);
6784 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
6787 /* Add any trailing bytes due to little-endian layout. */
6788 while (buffer
.length () < buffer_bytes
)
6789 buffer
.quick_push (filler
);
6793 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
6796 return native_decode_rtx (outermode
, buffer
, 0);
6799 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6800 Return 0 if no simplifications are possible. */
6802 simplify_subreg (machine_mode outermode
, rtx op
,
6803 machine_mode innermode
, poly_uint64 byte
)
6805 /* Little bit of sanity checking. */
6806 gcc_assert (innermode
!= VOIDmode
);
6807 gcc_assert (outermode
!= VOIDmode
);
6808 gcc_assert (innermode
!= BLKmode
);
6809 gcc_assert (outermode
!= BLKmode
);
6811 gcc_assert (GET_MODE (op
) == innermode
6812 || GET_MODE (op
) == VOIDmode
);
6814 poly_uint64 outersize
= GET_MODE_SIZE (outermode
);
6815 if (!multiple_p (byte
, outersize
))
6818 poly_uint64 innersize
= GET_MODE_SIZE (innermode
);
6819 if (maybe_ge (byte
, innersize
))
6822 if (outermode
== innermode
&& known_eq (byte
, 0U))
6825 if (GET_CODE (op
) == CONST_VECTOR
)
6826 byte
= simplify_const_vector_byte_offset (op
, byte
);
6828 if (multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
)))
6832 if (VECTOR_MODE_P (outermode
)
6833 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
6834 && vec_duplicate_p (op
, &elt
))
6835 return gen_vec_duplicate (outermode
, elt
);
6837 if (outermode
== GET_MODE_INNER (innermode
)
6838 && vec_duplicate_p (op
, &elt
))
6842 if (CONST_SCALAR_INT_P (op
)
6843 || CONST_DOUBLE_AS_FLOAT_P (op
)
6844 || CONST_FIXED_P (op
)
6845 || GET_CODE (op
) == CONST_VECTOR
)
6847 unsigned HOST_WIDE_INT cbyte
;
6848 if (byte
.is_constant (&cbyte
))
6850 if (GET_CODE (op
) == CONST_VECTOR
&& VECTOR_MODE_P (outermode
))
6852 rtx tmp
= simplify_const_vector_subreg (outermode
, op
,
6858 fixed_size_mode fs_outermode
;
6859 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
))
6860 return simplify_immed_subreg (fs_outermode
, op
, innermode
, cbyte
);
6864 /* Changing mode twice with SUBREG => just change it once,
6865 or not at all if changing back op starting mode. */
6866 if (GET_CODE (op
) == SUBREG
)
6868 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6869 poly_uint64 innermostsize
= GET_MODE_SIZE (innermostmode
);
6872 if (outermode
== innermostmode
6873 && known_eq (byte
, 0U)
6874 && known_eq (SUBREG_BYTE (op
), 0))
6875 return SUBREG_REG (op
);
6877 /* Work out the memory offset of the final OUTERMODE value relative
6878 to the inner value of OP. */
6879 poly_int64 mem_offset
= subreg_memory_offset (outermode
,
6881 poly_int64 op_mem_offset
= subreg_memory_offset (op
);
6882 poly_int64 final_offset
= mem_offset
+ op_mem_offset
;
6884 /* See whether resulting subreg will be paradoxical. */
6885 if (!paradoxical_subreg_p (outermode
, innermostmode
))
6887 /* Bail out in case resulting subreg would be incorrect. */
6888 if (maybe_lt (final_offset
, 0)
6889 || maybe_ge (poly_uint64 (final_offset
), innermostsize
)
6890 || !multiple_p (final_offset
, outersize
))
6895 poly_int64 required_offset
= subreg_memory_offset (outermode
,
6897 if (maybe_ne (final_offset
, required_offset
))
6899 /* Paradoxical subregs always have byte offset 0. */
6903 /* Recurse for further possible simplifications. */
6904 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6908 if (validate_subreg (outermode
, innermostmode
,
6909 SUBREG_REG (op
), final_offset
))
6911 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6912 if (SUBREG_PROMOTED_VAR_P (op
)
6913 && SUBREG_PROMOTED_SIGN (op
) >= 0
6914 && GET_MODE_CLASS (outermode
) == MODE_INT
6915 && known_ge (outersize
, innersize
)
6916 && known_le (outersize
, innermostsize
)
6917 && subreg_lowpart_p (newx
))
6919 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6920 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6927 /* SUBREG of a hard register => just change the register number
6928 and/or mode. If the hard register is not valid in that mode,
6929 suppress this simplification. If the hard register is the stack,
6930 frame, or argument pointer, leave this as a SUBREG. */
6932 if (REG_P (op
) && HARD_REGISTER_P (op
))
6934 unsigned int regno
, final_regno
;
6937 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6938 if (HARD_REGISTER_NUM_P (final_regno
))
6940 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
6941 subreg_memory_offset (outermode
,
6944 /* Propagate original regno. We don't have any way to specify
6945 the offset inside original regno, so do so only for lowpart.
6946 The information is used only by alias analysis that cannot
6947 grog partial register anyway. */
6949 if (known_eq (subreg_lowpart_offset (outermode
, innermode
), byte
))
6950 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6955 /* If we have a SUBREG of a register that we are replacing and we are
6956 replacing it with a MEM, make a new MEM and try replacing the
6957 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6958 or if we would be widening it. */
6961 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6962 /* Allow splitting of volatile memory references in case we don't
6963 have instruction to move the whole thing. */
6964 && (! MEM_VOLATILE_P (op
)
6965 || ! have_insn_for (SET
, innermode
))
6966 && known_le (outersize
, innersize
))
6967 return adjust_address_nv (op
, outermode
, byte
);
6969 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6971 if (GET_CODE (op
) == CONCAT
6972 || GET_CODE (op
) == VEC_CONCAT
)
6974 poly_uint64 final_offset
;
6977 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6978 if (part_mode
== VOIDmode
)
6979 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6980 poly_uint64 part_size
= GET_MODE_SIZE (part_mode
);
6981 if (known_lt (byte
, part_size
))
6983 part
= XEXP (op
, 0);
6984 final_offset
= byte
;
6986 else if (known_ge (byte
, part_size
))
6988 part
= XEXP (op
, 1);
6989 final_offset
= byte
- part_size
;
6994 if (maybe_gt (final_offset
+ outersize
, part_size
))
6997 part_mode
= GET_MODE (part
);
6998 if (part_mode
== VOIDmode
)
6999 part_mode
= GET_MODE_INNER (GET_MODE (op
));
7000 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
7003 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
7004 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
7009 (subreg (vec_merge (X)
7011 (const_int ((1 << N) | M)))
7012 (N * sizeof (outermode)))
7014 (subreg (X) (N * sizeof (outermode)))
7017 if (constant_multiple_p (byte
, GET_MODE_SIZE (outermode
), &idx
)
7018 && idx
< HOST_BITS_PER_WIDE_INT
7019 && GET_CODE (op
) == VEC_MERGE
7020 && GET_MODE_INNER (innermode
) == outermode
7021 && CONST_INT_P (XEXP (op
, 2))
7022 && (UINTVAL (XEXP (op
, 2)) & (HOST_WIDE_INT_1U
<< idx
)) != 0)
7023 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
, byte
);
7025 /* A SUBREG resulting from a zero extension may fold to zero if
7026 it extracts higher bits that the ZERO_EXTEND's source bits. */
7027 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
7029 poly_uint64 bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
7030 if (known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))))
7031 return CONST0_RTX (outermode
);
7034 scalar_int_mode int_outermode
, int_innermode
;
7035 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
7036 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
7037 && known_eq (byte
, subreg_lowpart_offset (int_outermode
, int_innermode
)))
7039 /* Handle polynomial integers. The upper bits of a paradoxical
7040 subreg are undefined, so this is safe regardless of whether
7041 we're truncating or extending. */
7042 if (CONST_POLY_INT_P (op
))
7045 = poly_wide_int::from (const_poly_int_value (op
),
7046 GET_MODE_PRECISION (int_outermode
),
7048 return immed_wide_int_const (val
, int_outermode
);
7051 if (GET_MODE_PRECISION (int_outermode
)
7052 < GET_MODE_PRECISION (int_innermode
))
7054 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
7060 /* If OP is a vector comparison and the subreg is not changing the
7061 number of elements or the size of the elements, change the result
7062 of the comparison to the new mode. */
7063 if (COMPARISON_P (op
)
7064 && VECTOR_MODE_P (outermode
)
7065 && VECTOR_MODE_P (innermode
)
7066 && known_eq (GET_MODE_NUNITS (outermode
), GET_MODE_NUNITS (innermode
))
7067 && known_eq (GET_MODE_UNIT_SIZE (outermode
),
7068 GET_MODE_UNIT_SIZE (innermode
)))
7069 return simplify_gen_relational (GET_CODE (op
), outermode
, innermode
,
7070 XEXP (op
, 0), XEXP (op
, 1));
7074 /* Make a SUBREG operation or equivalent if it folds. */
7077 simplify_gen_subreg (machine_mode outermode
, rtx op
,
7078 machine_mode innermode
, poly_uint64 byte
)
7082 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
7086 if (GET_CODE (op
) == SUBREG
7087 || GET_CODE (op
) == CONCAT
7088 || GET_MODE (op
) == VOIDmode
)
7091 if (validate_subreg (outermode
, innermode
, op
, byte
))
7092 return gen_rtx_SUBREG (outermode
, op
, byte
);
7097 /* Generates a subreg to get the least significant part of EXPR (in mode
7098 INNER_MODE) to OUTER_MODE. */
7101 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
7102 machine_mode inner_mode
)
7104 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
7105 subreg_lowpart_offset (outer_mode
, inner_mode
));
7108 /* Simplify X, an rtx expression.
7110 Return the simplified expression or NULL if no simplifications
7113 This is the preferred entry point into the simplification routines;
7114 however, we still allow passes to call the more specific routines.
7116 Right now GCC has three (yes, three) major bodies of RTL simplification
7117 code that need to be unified.
7119 1. fold_rtx in cse.c. This code uses various CSE specific
7120 information to aid in RTL simplification.
7122 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
7123 it uses combine specific information to aid in RTL
7126 3. The routines in this file.
7129 Long term we want to only have one body of simplification code; to
7130 get to that state I recommend the following steps:
7132 1. Pour over fold_rtx & simplify_rtx and move any simplifications
7133 which are not pass dependent state into these routines.
7135 2. As code is moved by #1, change fold_rtx & simplify_rtx to
7136 use this routine whenever possible.
7138 3. Allow for pass dependent state to be provided to these
7139 routines and add simplifications based on the pass dependent
7140 state. Remove code from cse.c & combine.c that becomes
7143 It will take time, but ultimately the compiler will be easier to
7144 maintain and improve. It's totally silly that when we add a
7145 simplification that it needs to be added to 4 places (3 for RTL
7146 simplification and 1 for tree simplification. */
7149 simplify_rtx (const_rtx x
)
7151 const enum rtx_code code
= GET_CODE (x
);
7152 const machine_mode mode
= GET_MODE (x
);
7154 switch (GET_RTX_CLASS (code
))
7157 return simplify_unary_operation (code
, mode
,
7158 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
7159 case RTX_COMM_ARITH
:
7160 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
7161 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
7166 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
7169 case RTX_BITFIELD_OPS
:
7170 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
7171 XEXP (x
, 0), XEXP (x
, 1),
7175 case RTX_COMM_COMPARE
:
7176 return simplify_relational_operation (code
, mode
,
7177 ((GET_MODE (XEXP (x
, 0))
7179 ? GET_MODE (XEXP (x
, 0))
7180 : GET_MODE (XEXP (x
, 1))),
7186 return simplify_subreg (mode
, SUBREG_REG (x
),
7187 GET_MODE (SUBREG_REG (x
)),
7194 /* Convert (lo_sum (high FOO) FOO) to FOO. */
7195 if (GET_CODE (XEXP (x
, 0)) == HIGH
7196 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
7209 namespace selftest
{
7211 /* Make a unique pseudo REG of mode MODE for use by selftests. */
7214 make_test_reg (machine_mode mode
)
7216 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
7218 return gen_rtx_REG (mode
, test_reg_num
++);
7221 /* Test vector simplifications involving VEC_DUPLICATE in which the
7222 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7223 register that holds one element of MODE. */
7226 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
7228 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7229 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7230 poly_uint64 nunits
= GET_MODE_NUNITS (mode
);
7231 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
7233 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
7234 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
7235 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
7236 ASSERT_RTX_EQ (duplicate
,
7237 simplify_unary_operation (NOT
, mode
,
7238 duplicate_not
, mode
));
7240 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7241 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
7242 ASSERT_RTX_EQ (duplicate
,
7243 simplify_unary_operation (NEG
, mode
,
7244 duplicate_neg
, mode
));
7246 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
7247 ASSERT_RTX_EQ (duplicate
,
7248 simplify_binary_operation (PLUS
, mode
, duplicate
,
7249 CONST0_RTX (mode
)));
7251 ASSERT_RTX_EQ (duplicate
,
7252 simplify_binary_operation (MINUS
, mode
, duplicate
,
7253 CONST0_RTX (mode
)));
7255 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
7256 simplify_binary_operation (MINUS
, mode
, duplicate
,
7260 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
7261 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
7262 ASSERT_RTX_PTR_EQ (scalar_reg
,
7263 simplify_binary_operation (VEC_SELECT
, inner_mode
,
7264 duplicate
, zero_par
));
7266 unsigned HOST_WIDE_INT const_nunits
;
7267 if (nunits
.is_constant (&const_nunits
))
7269 /* And again with the final element. */
7270 rtx last_index
= gen_int_mode (const_nunits
- 1, word_mode
);
7271 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
7272 ASSERT_RTX_PTR_EQ (scalar_reg
,
7273 simplify_binary_operation (VEC_SELECT
, inner_mode
,
7274 duplicate
, last_par
));
7276 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
7277 rtx vector_reg
= make_test_reg (mode
);
7278 for (unsigned HOST_WIDE_INT i
= 0; i
< const_nunits
; i
++)
7280 if (i
>= HOST_BITS_PER_WIDE_INT
)
7282 rtx mask
= GEN_INT ((HOST_WIDE_INT_1U
<< i
) | (i
+ 1));
7283 rtx vm
= gen_rtx_VEC_MERGE (mode
, duplicate
, vector_reg
, mask
);
7284 poly_uint64 offset
= i
* GET_MODE_SIZE (inner_mode
);
7285 ASSERT_RTX_EQ (scalar_reg
,
7286 simplify_gen_subreg (inner_mode
, vm
,
7291 /* Test a scalar subreg of a VEC_DUPLICATE. */
7292 poly_uint64 offset
= subreg_lowpart_offset (inner_mode
, mode
);
7293 ASSERT_RTX_EQ (scalar_reg
,
7294 simplify_gen_subreg (inner_mode
, duplicate
,
7297 machine_mode narrower_mode
;
7298 if (maybe_ne (nunits
, 2U)
7299 && multiple_p (nunits
, 2)
7300 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
7301 && VECTOR_MODE_P (narrower_mode
))
7303 /* Test VEC_DUPLICATE of a vector. */
7304 rtx_vector_builder
nbuilder (narrower_mode
, 2, 1);
7305 nbuilder
.quick_push (const0_rtx
);
7306 nbuilder
.quick_push (const1_rtx
);
7307 rtx_vector_builder
builder (mode
, 2, 1);
7308 builder
.quick_push (const0_rtx
);
7309 builder
.quick_push (const1_rtx
);
7310 ASSERT_RTX_EQ (builder
.build (),
7311 simplify_unary_operation (VEC_DUPLICATE
, mode
,
7315 /* Test VEC_SELECT of a vector. */
7317 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
7318 rtx narrower_duplicate
7319 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
7320 ASSERT_RTX_EQ (narrower_duplicate
,
7321 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
7322 duplicate
, vec_par
));
7324 /* Test a vector subreg of a VEC_DUPLICATE. */
7325 poly_uint64 offset
= subreg_lowpart_offset (narrower_mode
, mode
);
7326 ASSERT_RTX_EQ (narrower_duplicate
,
7327 simplify_gen_subreg (narrower_mode
, duplicate
,
7332 /* Test vector simplifications involving VEC_SERIES in which the
7333 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7334 register that holds one element of MODE. */
7337 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
7339 /* Test unary cases with VEC_SERIES arguments. */
7340 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7341 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7342 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7343 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
7344 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
7345 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
7346 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
7347 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
7348 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
7350 ASSERT_RTX_EQ (series_0_r
,
7351 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
7352 ASSERT_RTX_EQ (series_r_m1
,
7353 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
7354 ASSERT_RTX_EQ (series_r_r
,
7355 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
7357 /* Test that a VEC_SERIES with a zero step is simplified away. */
7358 ASSERT_RTX_EQ (duplicate
,
7359 simplify_binary_operation (VEC_SERIES
, mode
,
7360 scalar_reg
, const0_rtx
));
7362 /* Test PLUS and MINUS with VEC_SERIES. */
7363 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
7364 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
7365 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
7366 ASSERT_RTX_EQ (series_r_r
,
7367 simplify_binary_operation (PLUS
, mode
, series_0_r
,
7369 ASSERT_RTX_EQ (series_r_1
,
7370 simplify_binary_operation (PLUS
, mode
, duplicate
,
7372 ASSERT_RTX_EQ (series_r_m1
,
7373 simplify_binary_operation (PLUS
, mode
, duplicate
,
7375 ASSERT_RTX_EQ (series_0_r
,
7376 simplify_binary_operation (MINUS
, mode
, series_r_r
,
7378 ASSERT_RTX_EQ (series_r_m1
,
7379 simplify_binary_operation (MINUS
, mode
, duplicate
,
7381 ASSERT_RTX_EQ (series_r_1
,
7382 simplify_binary_operation (MINUS
, mode
, duplicate
,
7384 ASSERT_RTX_EQ (series_0_m1
,
7385 simplify_binary_operation (VEC_SERIES
, mode
, const0_rtx
,
7388 /* Test NEG on constant vector series. */
7389 ASSERT_RTX_EQ (series_0_m1
,
7390 simplify_unary_operation (NEG
, mode
, series_0_1
, mode
));
7391 ASSERT_RTX_EQ (series_0_1
,
7392 simplify_unary_operation (NEG
, mode
, series_0_m1
, mode
));
7394 /* Test PLUS and MINUS on constant vector series. */
7395 rtx scalar2
= gen_int_mode (2, inner_mode
);
7396 rtx scalar3
= gen_int_mode (3, inner_mode
);
7397 rtx series_1_1
= gen_const_vec_series (mode
, const1_rtx
, const1_rtx
);
7398 rtx series_0_2
= gen_const_vec_series (mode
, const0_rtx
, scalar2
);
7399 rtx series_1_3
= gen_const_vec_series (mode
, const1_rtx
, scalar3
);
7400 ASSERT_RTX_EQ (series_1_1
,
7401 simplify_binary_operation (PLUS
, mode
, series_0_1
,
7402 CONST1_RTX (mode
)));
7403 ASSERT_RTX_EQ (series_0_m1
,
7404 simplify_binary_operation (PLUS
, mode
, CONST0_RTX (mode
),
7406 ASSERT_RTX_EQ (series_1_3
,
7407 simplify_binary_operation (PLUS
, mode
, series_1_1
,
7409 ASSERT_RTX_EQ (series_0_1
,
7410 simplify_binary_operation (MINUS
, mode
, series_1_1
,
7411 CONST1_RTX (mode
)));
7412 ASSERT_RTX_EQ (series_1_1
,
7413 simplify_binary_operation (MINUS
, mode
, CONST1_RTX (mode
),
7415 ASSERT_RTX_EQ (series_1_1
,
7416 simplify_binary_operation (MINUS
, mode
, series_1_3
,
7419 /* Test MULT between constant vectors. */
7420 rtx vec2
= gen_const_vec_duplicate (mode
, scalar2
);
7421 rtx vec3
= gen_const_vec_duplicate (mode
, scalar3
);
7422 rtx scalar9
= gen_int_mode (9, inner_mode
);
7423 rtx series_3_9
= gen_const_vec_series (mode
, scalar3
, scalar9
);
7424 ASSERT_RTX_EQ (series_0_2
,
7425 simplify_binary_operation (MULT
, mode
, series_0_1
, vec2
));
7426 ASSERT_RTX_EQ (series_3_9
,
7427 simplify_binary_operation (MULT
, mode
, vec3
, series_1_3
));
7428 if (!GET_MODE_NUNITS (mode
).is_constant ())
7429 ASSERT_FALSE (simplify_binary_operation (MULT
, mode
, series_0_1
,
7432 /* Test ASHIFT between constant vectors. */
7433 ASSERT_RTX_EQ (series_0_2
,
7434 simplify_binary_operation (ASHIFT
, mode
, series_0_1
,
7435 CONST1_RTX (mode
)));
7436 if (!GET_MODE_NUNITS (mode
).is_constant ())
7437 ASSERT_FALSE (simplify_binary_operation (ASHIFT
, mode
, CONST1_RTX (mode
),
7441 /* Verify simplify_merge_mask works correctly. */
7444 test_vec_merge (machine_mode mode
)
7446 rtx op0
= make_test_reg (mode
);
7447 rtx op1
= make_test_reg (mode
);
7448 rtx op2
= make_test_reg (mode
);
7449 rtx op3
= make_test_reg (mode
);
7450 rtx op4
= make_test_reg (mode
);
7451 rtx op5
= make_test_reg (mode
);
7452 rtx mask1
= make_test_reg (SImode
);
7453 rtx mask2
= make_test_reg (SImode
);
7454 rtx vm1
= gen_rtx_VEC_MERGE (mode
, op0
, op1
, mask1
);
7455 rtx vm2
= gen_rtx_VEC_MERGE (mode
, op2
, op3
, mask1
);
7456 rtx vm3
= gen_rtx_VEC_MERGE (mode
, op4
, op5
, mask1
);
7458 /* Simple vec_merge. */
7459 ASSERT_EQ (op0
, simplify_merge_mask (vm1
, mask1
, 0));
7460 ASSERT_EQ (op1
, simplify_merge_mask (vm1
, mask1
, 1));
7461 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 0));
7462 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 1));
7464 /* Nested vec_merge.
7465 It's tempting to make this simplify right down to opN, but we don't
7466 because all the simplify_* functions assume that the operands have
7467 already been simplified. */
7468 rtx nvm
= gen_rtx_VEC_MERGE (mode
, vm1
, vm2
, mask1
);
7469 ASSERT_EQ (vm1
, simplify_merge_mask (nvm
, mask1
, 0));
7470 ASSERT_EQ (vm2
, simplify_merge_mask (nvm
, mask1
, 1));
7472 /* Intermediate unary op. */
7473 rtx unop
= gen_rtx_NOT (mode
, vm1
);
7474 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op0
),
7475 simplify_merge_mask (unop
, mask1
, 0));
7476 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op1
),
7477 simplify_merge_mask (unop
, mask1
, 1));
7479 /* Intermediate binary op. */
7480 rtx binop
= gen_rtx_PLUS (mode
, vm1
, vm2
);
7481 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op0
, op2
),
7482 simplify_merge_mask (binop
, mask1
, 0));
7483 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op1
, op3
),
7484 simplify_merge_mask (binop
, mask1
, 1));
7486 /* Intermediate ternary op. */
7487 rtx tenop
= gen_rtx_FMA (mode
, vm1
, vm2
, vm3
);
7488 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op0
, op2
, op4
),
7489 simplify_merge_mask (tenop
, mask1
, 0));
7490 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op1
, op3
, op5
),
7491 simplify_merge_mask (tenop
, mask1
, 1));
7494 rtx badop0
= gen_rtx_PRE_INC (mode
, op0
);
7495 rtx badvm
= gen_rtx_VEC_MERGE (mode
, badop0
, op1
, mask1
);
7496 ASSERT_EQ (badop0
, simplify_merge_mask (badvm
, mask1
, 0));
7497 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (badvm
, mask1
, 1));
7499 /* Called indirectly. */
7500 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode
, op0
, op3
, mask1
),
7501 simplify_rtx (nvm
));
7504 /* Test subregs of integer vector constant X, trying elements in
7505 the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
7506 where NELTS is the number of elements in X. Subregs involving
7507 elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail. */
7510 test_vector_subregs_modes (rtx x
, poly_uint64 elt_bias
= 0,
7511 unsigned int first_valid
= 0)
7513 machine_mode inner_mode
= GET_MODE (x
);
7514 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7516 for (unsigned int modei
= 0; modei
< NUM_MACHINE_MODES
; ++modei
)
7518 machine_mode outer_mode
= (machine_mode
) modei
;
7519 if (!VECTOR_MODE_P (outer_mode
))
7522 unsigned int outer_nunits
;
7523 if (GET_MODE_INNER (outer_mode
) == int_mode
7524 && GET_MODE_NUNITS (outer_mode
).is_constant (&outer_nunits
)
7525 && multiple_p (GET_MODE_NUNITS (inner_mode
), outer_nunits
))
7527 /* Test subregs in which the outer mode is a smaller,
7528 constant-sized vector of the same element type. */
7530 = constant_lower_bound (GET_MODE_NUNITS (inner_mode
));
7531 for (unsigned int elt
= 0; elt
< limit
; elt
+= outer_nunits
)
7533 rtx expected
= NULL_RTX
;
7534 if (elt
>= first_valid
)
7536 rtx_vector_builder
builder (outer_mode
, outer_nunits
, 1);
7537 for (unsigned int i
= 0; i
< outer_nunits
; ++i
)
7538 builder
.quick_push (CONST_VECTOR_ELT (x
, elt
+ i
));
7539 expected
= builder
.build ();
7541 poly_uint64 byte
= (elt_bias
+ elt
) * GET_MODE_SIZE (int_mode
);
7542 ASSERT_RTX_EQ (expected
,
7543 simplify_subreg (outer_mode
, x
,
7547 else if (known_eq (GET_MODE_SIZE (outer_mode
),
7548 GET_MODE_SIZE (inner_mode
))
7549 && known_eq (elt_bias
, 0U)
7550 && (GET_MODE_CLASS (outer_mode
) != MODE_VECTOR_BOOL
7551 || known_eq (GET_MODE_BITSIZE (outer_mode
),
7552 GET_MODE_NUNITS (outer_mode
)))
7553 && (!FLOAT_MODE_P (outer_mode
)
7554 || (FLOAT_MODE_FORMAT (outer_mode
)->ieee_bits
7555 == GET_MODE_UNIT_PRECISION (outer_mode
)))
7556 && (GET_MODE_SIZE (inner_mode
).is_constant ()
7557 || !CONST_VECTOR_STEPPED_P (x
)))
7559 /* Try converting to OUTER_MODE and back. */
7560 rtx outer_x
= simplify_subreg (outer_mode
, x
, inner_mode
, 0);
7561 ASSERT_TRUE (outer_x
!= NULL_RTX
);
7562 ASSERT_RTX_EQ (x
, simplify_subreg (inner_mode
, outer_x
,
7567 if (BYTES_BIG_ENDIAN
== WORDS_BIG_ENDIAN
)
7569 /* Test each byte in the element range. */
7571 = constant_lower_bound (GET_MODE_SIZE (inner_mode
));
7572 for (unsigned int i
= 0; i
< limit
; ++i
)
7574 unsigned int elt
= i
/ GET_MODE_SIZE (int_mode
);
7575 rtx expected
= NULL_RTX
;
7576 if (elt
>= first_valid
)
7578 unsigned int byte_shift
= i
% GET_MODE_SIZE (int_mode
);
7579 if (BYTES_BIG_ENDIAN
)
7580 byte_shift
= GET_MODE_SIZE (int_mode
) - byte_shift
- 1;
7581 rtx_mode_t
vec_elt (CONST_VECTOR_ELT (x
, elt
), int_mode
);
7582 wide_int shifted_elt
7583 = wi::lrshift (vec_elt
, byte_shift
* BITS_PER_UNIT
);
7584 expected
= immed_wide_int_const (shifted_elt
, QImode
);
7586 poly_uint64 byte
= elt_bias
* GET_MODE_SIZE (int_mode
) + i
;
7587 ASSERT_RTX_EQ (expected
,
7588 simplify_subreg (QImode
, x
, inner_mode
, byte
));
7593 /* Test constant subregs of integer vector mode INNER_MODE, using 1
7594 element per pattern. */
7597 test_vector_subregs_repeating (machine_mode inner_mode
)
7599 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
7600 unsigned int min_nunits
= constant_lower_bound (nunits
);
7601 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7602 unsigned int count
= gcd (min_nunits
, 8);
7604 rtx_vector_builder
builder (inner_mode
, count
, 1);
7605 for (unsigned int i
= 0; i
< count
; ++i
)
7606 builder
.quick_push (gen_int_mode (8 - i
, int_mode
));
7607 rtx x
= builder
.build ();
7609 test_vector_subregs_modes (x
);
7610 if (!nunits
.is_constant ())
7611 test_vector_subregs_modes (x
, nunits
- min_nunits
);
7614 /* Test constant subregs of integer vector mode INNER_MODE, using 2
7615 elements per pattern. */
7618 test_vector_subregs_fore_back (machine_mode inner_mode
)
7620 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
7621 unsigned int min_nunits
= constant_lower_bound (nunits
);
7622 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7623 unsigned int count
= gcd (min_nunits
, 4);
7625 rtx_vector_builder
builder (inner_mode
, count
, 2);
7626 for (unsigned int i
= 0; i
< count
; ++i
)
7627 builder
.quick_push (gen_int_mode (i
, int_mode
));
7628 for (unsigned int i
= 0; i
< count
; ++i
)
7629 builder
.quick_push (gen_int_mode (-(int) i
, int_mode
));
7630 rtx x
= builder
.build ();
7632 test_vector_subregs_modes (x
);
7633 if (!nunits
.is_constant ())
7634 test_vector_subregs_modes (x
, nunits
- min_nunits
, count
);
7637 /* Test constant subregs of integer vector mode INNER_MODE, using 3
7638 elements per pattern. */
7641 test_vector_subregs_stepped (machine_mode inner_mode
)
7643 /* Build { 0, 1, 2, 3, ... }. */
7644 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7645 rtx_vector_builder
builder (inner_mode
, 1, 3);
7646 for (unsigned int i
= 0; i
< 3; ++i
)
7647 builder
.quick_push (gen_int_mode (i
, int_mode
));
7648 rtx x
= builder
.build ();
7650 test_vector_subregs_modes (x
);
7653 /* Test constant subregs of integer vector mode INNER_MODE. */
7656 test_vector_subregs (machine_mode inner_mode
)
7658 test_vector_subregs_repeating (inner_mode
);
7659 test_vector_subregs_fore_back (inner_mode
);
7660 test_vector_subregs_stepped (inner_mode
);
7663 /* Verify some simplifications involving vectors. */
7668 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
7670 machine_mode mode
= (machine_mode
) i
;
7671 if (VECTOR_MODE_P (mode
))
7673 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
7674 test_vector_ops_duplicate (mode
, scalar_reg
);
7675 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
7676 && maybe_gt (GET_MODE_NUNITS (mode
), 2))
7678 test_vector_ops_series (mode
, scalar_reg
);
7679 test_vector_subregs (mode
);
7681 test_vec_merge (mode
);
7686 template<unsigned int N
>
7687 struct simplify_const_poly_int_tests
7693 struct simplify_const_poly_int_tests
<1>
7695 static void run () {}
7698 /* Test various CONST_POLY_INT properties. */
7700 template<unsigned int N
>
7702 simplify_const_poly_int_tests
<N
>::run ()
7704 rtx x1
= gen_int_mode (poly_int64 (1, 1), QImode
);
7705 rtx x2
= gen_int_mode (poly_int64 (-80, 127), QImode
);
7706 rtx x3
= gen_int_mode (poly_int64 (-79, -128), QImode
);
7707 rtx x4
= gen_int_mode (poly_int64 (5, 4), QImode
);
7708 rtx x5
= gen_int_mode (poly_int64 (30, 24), QImode
);
7709 rtx x6
= gen_int_mode (poly_int64 (20, 16), QImode
);
7710 rtx x7
= gen_int_mode (poly_int64 (7, 4), QImode
);
7711 rtx x8
= gen_int_mode (poly_int64 (30, 24), HImode
);
7712 rtx x9
= gen_int_mode (poly_int64 (-30, -24), HImode
);
7713 rtx x10
= gen_int_mode (poly_int64 (-31, -24), HImode
);
7714 rtx two
= GEN_INT (2);
7715 rtx six
= GEN_INT (6);
7716 poly_uint64 offset
= subreg_lowpart_offset (QImode
, HImode
);
7718 /* These tests only try limited operation combinations. Fuller arithmetic
7719 testing is done directly on poly_ints. */
7720 ASSERT_EQ (simplify_unary_operation (NEG
, HImode
, x8
, HImode
), x9
);
7721 ASSERT_EQ (simplify_unary_operation (NOT
, HImode
, x8
, HImode
), x10
);
7722 ASSERT_EQ (simplify_unary_operation (TRUNCATE
, QImode
, x8
, HImode
), x5
);
7723 ASSERT_EQ (simplify_binary_operation (PLUS
, QImode
, x1
, x2
), x3
);
7724 ASSERT_EQ (simplify_binary_operation (MINUS
, QImode
, x3
, x1
), x2
);
7725 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, x4
, six
), x5
);
7726 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, six
, x4
), x5
);
7727 ASSERT_EQ (simplify_binary_operation (ASHIFT
, QImode
, x4
, two
), x6
);
7728 ASSERT_EQ (simplify_binary_operation (IOR
, QImode
, x4
, two
), x7
);
7729 ASSERT_EQ (simplify_subreg (HImode
, x5
, QImode
, 0), x8
);
7730 ASSERT_EQ (simplify_subreg (QImode
, x8
, HImode
, offset
), x5
);
7733 /* Run all of the selftests within this file. */
7736 simplify_rtx_c_tests ()
7739 simplify_const_poly_int_tests
<NUM_POLY_INT_COEFFS
>::run ();
7742 } // namespace selftest
7744 #endif /* CHECKING_P */