1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
49 static bool plus_minus_operand_p (const_rtx
);
50 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
51 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
53 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
54 machine_mode
, rtx
, rtx
);
55 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
56 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
59 /* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */
62 neg_poly_int_rtx (machine_mode mode
, const_rtx i
)
64 return immed_wide_int_const (-wi::to_poly_wide (i
, mode
), mode
);
67 /* Test whether expression, X, is an immediate constant that represents
68 the most significant bit of machine mode MODE. */
71 mode_signbit_p (machine_mode mode
, const_rtx x
)
73 unsigned HOST_WIDE_INT val
;
75 scalar_int_mode int_mode
;
77 if (!is_int_mode (mode
, &int_mode
))
80 width
= GET_MODE_PRECISION (int_mode
);
84 if (width
<= HOST_BITS_PER_WIDE_INT
87 #if TARGET_SUPPORTS_WIDE_INT
88 else if (CONST_WIDE_INT_P (x
))
91 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
92 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
94 for (i
= 0; i
< elts
- 1; i
++)
95 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
97 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
98 width
%= HOST_BITS_PER_WIDE_INT
;
100 width
= HOST_BITS_PER_WIDE_INT
;
103 else if (width
<= HOST_BITS_PER_DOUBLE_INT
104 && CONST_DOUBLE_AS_INT_P (x
)
105 && CONST_DOUBLE_LOW (x
) == 0)
107 val
= CONST_DOUBLE_HIGH (x
);
108 width
-= HOST_BITS_PER_WIDE_INT
;
112 /* X is not an integer constant. */
115 if (width
< HOST_BITS_PER_WIDE_INT
)
116 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
117 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
120 /* Test whether VAL is equal to the most significant bit of mode MODE
121 (after masking with the mode mask of MODE). Returns false if the
122 precision of MODE is too large to handle. */
125 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
128 scalar_int_mode int_mode
;
130 if (!is_int_mode (mode
, &int_mode
))
133 width
= GET_MODE_PRECISION (int_mode
);
134 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
137 val
&= GET_MODE_MASK (int_mode
);
138 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
141 /* Test whether the most significant bit of mode MODE is set in VAL.
142 Returns false if the precision of MODE is too large to handle. */
144 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
148 scalar_int_mode int_mode
;
149 if (!is_int_mode (mode
, &int_mode
))
152 width
= GET_MODE_PRECISION (int_mode
);
153 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
156 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
160 /* Test whether the most significant bit of mode MODE is clear in VAL.
161 Returns false if the precision of MODE is too large to handle. */
163 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
167 scalar_int_mode int_mode
;
168 if (!is_int_mode (mode
, &int_mode
))
171 width
= GET_MODE_PRECISION (int_mode
);
172 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
175 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
179 /* Make a binary operation by properly ordering the operands and
180 seeing if the expression folds. */
183 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
188 /* If this simplifies, do it. */
189 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
193 /* Put complex operands first and constants second if commutative. */
194 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
195 && swap_commutative_operands_p (op0
, op1
))
196 std::swap (op0
, op1
);
198 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
201 /* If X is a MEM referencing the constant pool, return the real value.
202 Otherwise return X. */
204 avoid_constant_pool_reference (rtx x
)
208 poly_int64 offset
= 0;
210 switch (GET_CODE (x
))
216 /* Handle float extensions of constant pool references. */
218 c
= avoid_constant_pool_reference (tmp
);
219 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
220 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
228 if (GET_MODE (x
) == BLKmode
)
233 /* Call target hook to avoid the effects of -fpic etc.... */
234 addr
= targetm
.delegitimize_address (addr
);
236 /* Split the address into a base and integer offset. */
237 addr
= strip_offset (addr
, &offset
);
239 if (GET_CODE (addr
) == LO_SUM
)
240 addr
= XEXP (addr
, 1);
242 /* If this is a constant pool reference, we can turn it into its
243 constant and hope that simplifications happen. */
244 if (GET_CODE (addr
) == SYMBOL_REF
245 && CONSTANT_POOL_ADDRESS_P (addr
))
247 c
= get_pool_constant (addr
);
248 cmode
= get_pool_mode (addr
);
250 /* If we're accessing the constant in a different mode than it was
251 originally stored, attempt to fix that up via subreg simplifications.
252 If that fails we have no choice but to return the original memory. */
253 if (known_eq (offset
, 0) && cmode
== GET_MODE (x
))
255 else if (known_in_range_p (offset
, 0, GET_MODE_SIZE (cmode
)))
257 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
258 if (tem
&& CONSTANT_P (tem
))
266 /* Simplify a MEM based on its attributes. This is the default
267 delegitimize_address target hook, and it's recommended that every
268 overrider call it. */
271 delegitimize_mem_from_attrs (rtx x
)
273 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
274 use their base addresses as equivalent. */
277 && MEM_OFFSET_KNOWN_P (x
))
279 tree decl
= MEM_EXPR (x
);
280 machine_mode mode
= GET_MODE (x
);
281 poly_int64 offset
= 0;
283 switch (TREE_CODE (decl
))
293 case ARRAY_RANGE_REF
:
298 case VIEW_CONVERT_EXPR
:
300 poly_int64 bitsize
, bitpos
, bytepos
, toffset_val
= 0;
302 int unsignedp
, reversep
, volatilep
= 0;
305 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
306 &unsignedp
, &reversep
, &volatilep
);
307 if (maybe_ne (bitsize
, GET_MODE_BITSIZE (mode
))
308 || !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
309 || (toffset
&& !poly_int_tree_p (toffset
, &toffset_val
)))
312 offset
+= bytepos
+ toffset_val
;
318 && mode
== GET_MODE (x
)
320 && (TREE_STATIC (decl
)
321 || DECL_THREAD_LOCAL_P (decl
))
322 && DECL_RTL_SET_P (decl
)
323 && MEM_P (DECL_RTL (decl
)))
327 offset
+= MEM_OFFSET (x
);
329 newx
= DECL_RTL (decl
);
333 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
334 poly_int64 n_offset
, o_offset
;
336 /* Avoid creating a new MEM needlessly if we already had
337 the same address. We do if there's no OFFSET and the
338 old address X is identical to NEWX, or if X is of the
339 form (plus NEWX OFFSET), or the NEWX is of the form
340 (plus Y (const_int Z)) and X is that with the offset
341 added: (plus Y (const_int Z+OFFSET)). */
342 n
= strip_offset (n
, &n_offset
);
343 o
= strip_offset (o
, &o_offset
);
344 if (!(known_eq (o_offset
, n_offset
+ offset
)
345 && rtx_equal_p (o
, n
)))
346 x
= adjust_address_nv (newx
, mode
, offset
);
348 else if (GET_MODE (x
) == GET_MODE (newx
)
349 && known_eq (offset
, 0))
357 /* Make a unary operation by first seeing if it folds and otherwise making
358 the specified operation. */
361 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
362 machine_mode op_mode
)
366 /* If this simplifies, use it. */
367 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
370 return gen_rtx_fmt_e (code
, mode
, op
);
373 /* Likewise for ternary operations. */
376 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
377 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
381 /* If this simplifies, use it. */
382 if ((tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
383 op0
, op1
, op2
)) != 0)
386 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
389 /* Likewise, for relational operations.
390 CMP_MODE specifies mode comparison is done in. */
393 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
394 machine_mode cmp_mode
, rtx op0
, rtx op1
)
398 if ((tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
402 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
405 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
406 and simplify the result. If FN is non-NULL, call this callback on each
407 X, if it returns non-NULL, replace X with its return value and simplify the
411 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
412 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
414 enum rtx_code code
= GET_CODE (x
);
415 machine_mode mode
= GET_MODE (x
);
416 machine_mode op_mode
;
418 rtx op0
, op1
, op2
, newx
, op
;
422 if (__builtin_expect (fn
!= NULL
, 0))
424 newx
= fn (x
, old_rtx
, data
);
428 else if (rtx_equal_p (x
, old_rtx
))
429 return copy_rtx ((rtx
) data
);
431 switch (GET_RTX_CLASS (code
))
435 op_mode
= GET_MODE (op0
);
436 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
437 if (op0
== XEXP (x
, 0))
439 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
443 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
444 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
445 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
447 return simplify_gen_binary (code
, mode
, op0
, op1
);
450 case RTX_COMM_COMPARE
:
453 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
454 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
455 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
456 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
458 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
461 case RTX_BITFIELD_OPS
:
463 op_mode
= GET_MODE (op0
);
464 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
465 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
466 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
467 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
469 if (op_mode
== VOIDmode
)
470 op_mode
= GET_MODE (op0
);
471 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
476 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
477 if (op0
== SUBREG_REG (x
))
479 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
480 GET_MODE (SUBREG_REG (x
)),
482 return op0
? op0
: x
;
489 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
490 if (op0
== XEXP (x
, 0))
492 return replace_equiv_address_nv (x
, op0
);
494 else if (code
== LO_SUM
)
496 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
497 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
499 /* (lo_sum (high x) y) -> y where x and y have the same base. */
500 if (GET_CODE (op0
) == HIGH
)
502 rtx base0
, base1
, offset0
, offset1
;
503 split_const (XEXP (op0
, 0), &base0
, &offset0
);
504 split_const (op1
, &base1
, &offset1
);
505 if (rtx_equal_p (base0
, base1
))
509 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
511 return gen_rtx_LO_SUM (mode
, op0
, op1
);
520 fmt
= GET_RTX_FORMAT (code
);
521 for (i
= 0; fmt
[i
]; i
++)
526 newvec
= XVEC (newx
, i
);
527 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
529 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
531 if (op
!= RTVEC_ELT (vec
, j
))
535 newvec
= shallow_copy_rtvec (vec
);
537 newx
= shallow_copy_rtx (x
);
538 XVEC (newx
, i
) = newvec
;
540 RTVEC_ELT (newvec
, j
) = op
;
548 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
549 if (op
!= XEXP (x
, i
))
552 newx
= shallow_copy_rtx (x
);
561 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
562 resulting RTX. Return a new RTX which is as simplified as possible. */
565 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
567 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
570 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
571 Only handle cases where the truncated value is inherently an rvalue.
573 RTL provides two ways of truncating a value:
575 1. a lowpart subreg. This form is only a truncation when both
576 the outer and inner modes (here MODE and OP_MODE respectively)
577 are scalar integers, and only then when the subreg is used as
580 It is only valid to form such truncating subregs if the
581 truncation requires no action by the target. The onus for
582 proving this is on the creator of the subreg -- e.g. the
583 caller to simplify_subreg or simplify_gen_subreg -- and typically
584 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
586 2. a TRUNCATE. This form handles both scalar and compound integers.
588 The first form is preferred where valid. However, the TRUNCATE
589 handling in simplify_unary_operation turns the second form into the
590 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
591 so it is generally safe to form rvalue truncations using:
593 simplify_gen_unary (TRUNCATE, ...)
595 and leave simplify_unary_operation to work out which representation
598 Because of the proof requirements on (1), simplify_truncation must
599 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
600 regardless of whether the outer truncation came from a SUBREG or a
601 TRUNCATE. For example, if the caller has proven that an SImode
606 is a no-op and can be represented as a subreg, it does not follow
607 that SImode truncations of X and Y are also no-ops. On a target
608 like 64-bit MIPS that requires SImode values to be stored in
609 sign-extended form, an SImode truncation of:
611 (and:DI (reg:DI X) (const_int 63))
613 is trivially a no-op because only the lower 6 bits can be set.
614 However, X is still an arbitrary 64-bit number and so we cannot
615 assume that truncating it too is a no-op. */
618 simplify_truncation (machine_mode mode
, rtx op
,
619 machine_mode op_mode
)
621 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
622 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
623 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
625 gcc_assert (precision
<= op_precision
);
627 /* Optimize truncations of zero and sign extended values. */
628 if (GET_CODE (op
) == ZERO_EXTEND
629 || GET_CODE (op
) == SIGN_EXTEND
)
631 /* There are three possibilities. If MODE is the same as the
632 origmode, we can omit both the extension and the subreg.
633 If MODE is not larger than the origmode, we can apply the
634 truncation without the extension. Finally, if the outermode
635 is larger than the origmode, we can just extend to the appropriate
637 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
638 if (mode
== origmode
)
640 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
641 return simplify_gen_unary (TRUNCATE
, mode
,
642 XEXP (op
, 0), origmode
);
644 return simplify_gen_unary (GET_CODE (op
), mode
,
645 XEXP (op
, 0), origmode
);
648 /* If the machine can perform operations in the truncated mode, distribute
649 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
650 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
652 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
653 && (GET_CODE (op
) == PLUS
654 || GET_CODE (op
) == MINUS
655 || GET_CODE (op
) == MULT
))
657 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
660 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
662 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
666 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
667 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
668 the outer subreg is effectively a truncation to the original mode. */
669 if ((GET_CODE (op
) == LSHIFTRT
670 || GET_CODE (op
) == ASHIFTRT
)
671 /* Ensure that OP_MODE is at least twice as wide as MODE
672 to avoid the possibility that an outer LSHIFTRT shifts by more
673 than the sign extension's sign_bit_copies and introduces zeros
674 into the high bits of the result. */
675 && 2 * precision
<= op_precision
676 && CONST_INT_P (XEXP (op
, 1))
677 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
678 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
679 && UINTVAL (XEXP (op
, 1)) < precision
)
680 return simplify_gen_binary (ASHIFTRT
, mode
,
681 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
683 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
684 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
685 the outer subreg is effectively a truncation to the original mode. */
686 if ((GET_CODE (op
) == LSHIFTRT
687 || GET_CODE (op
) == ASHIFTRT
)
688 && CONST_INT_P (XEXP (op
, 1))
689 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
690 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
691 && UINTVAL (XEXP (op
, 1)) < precision
)
692 return simplify_gen_binary (LSHIFTRT
, mode
,
693 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
695 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
696 to (ashift:QI (x:QI) C), where C is a suitable small constant and
697 the outer subreg is effectively a truncation to the original mode. */
698 if (GET_CODE (op
) == ASHIFT
699 && CONST_INT_P (XEXP (op
, 1))
700 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
701 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
702 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
703 && UINTVAL (XEXP (op
, 1)) < precision
)
704 return simplify_gen_binary (ASHIFT
, mode
,
705 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
707 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
708 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
710 if (GET_CODE (op
) == AND
711 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
712 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
713 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
714 && CONST_INT_P (XEXP (op
, 1)))
716 rtx op0
= (XEXP (XEXP (op
, 0), 0));
717 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
718 rtx mask_op
= XEXP (op
, 1);
719 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
720 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
722 if (shift
< precision
723 /* If doing this transform works for an X with all bits set,
724 it works for any X. */
725 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
726 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
727 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
728 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
730 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
731 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
735 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
736 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
738 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
739 && REG_P (XEXP (op
, 0))
740 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
741 && CONST_INT_P (XEXP (op
, 1))
742 && CONST_INT_P (XEXP (op
, 2)))
744 rtx op0
= XEXP (op
, 0);
745 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
746 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
747 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
749 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
752 pos
-= op_precision
- precision
;
753 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
754 XEXP (op
, 1), GEN_INT (pos
));
757 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
759 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
761 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
762 XEXP (op
, 1), XEXP (op
, 2));
766 /* Recognize a word extraction from a multi-word subreg. */
767 if ((GET_CODE (op
) == LSHIFTRT
768 || GET_CODE (op
) == ASHIFTRT
)
769 && SCALAR_INT_MODE_P (mode
)
770 && SCALAR_INT_MODE_P (op_mode
)
771 && precision
>= BITS_PER_WORD
772 && 2 * precision
<= op_precision
773 && CONST_INT_P (XEXP (op
, 1))
774 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
775 && UINTVAL (XEXP (op
, 1)) < op_precision
)
777 poly_int64 byte
= subreg_lowpart_offset (mode
, op_mode
);
778 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
779 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
781 ? byte
- shifted_bytes
782 : byte
+ shifted_bytes
));
785 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
786 and try replacing the TRUNCATE and shift with it. Don't do this
787 if the MEM has a mode-dependent address. */
788 if ((GET_CODE (op
) == LSHIFTRT
789 || GET_CODE (op
) == ASHIFTRT
)
790 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
791 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
792 && MEM_P (XEXP (op
, 0))
793 && CONST_INT_P (XEXP (op
, 1))
794 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
795 && INTVAL (XEXP (op
, 1)) > 0
796 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
797 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
798 MEM_ADDR_SPACE (XEXP (op
, 0)))
799 && ! MEM_VOLATILE_P (XEXP (op
, 0))
800 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
801 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
803 poly_int64 byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
804 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
805 return adjust_address_nv (XEXP (op
, 0), int_mode
,
807 ? byte
- shifted_bytes
808 : byte
+ shifted_bytes
));
811 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
812 (OP:SI foo:SI) if OP is NEG or ABS. */
813 if ((GET_CODE (op
) == ABS
814 || GET_CODE (op
) == NEG
)
815 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
816 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
817 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
818 return simplify_gen_unary (GET_CODE (op
), mode
,
819 XEXP (XEXP (op
, 0), 0), mode
);
821 /* (truncate:A (subreg:B (truncate:C X) 0)) is
823 if (GET_CODE (op
) == SUBREG
824 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
825 && SCALAR_INT_MODE_P (op_mode
)
826 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
827 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
828 && subreg_lowpart_p (op
))
830 rtx inner
= XEXP (SUBREG_REG (op
), 0);
831 if (GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (subreg_mode
))
832 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
835 /* If subreg above is paradoxical and C is narrower
836 than A, return (subreg:A (truncate:C X) 0). */
837 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
), subreg_mode
, 0);
840 /* (truncate:A (truncate:B X)) is (truncate:A X). */
841 if (GET_CODE (op
) == TRUNCATE
)
842 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
843 GET_MODE (XEXP (op
, 0)));
845 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
847 if (GET_CODE (op
) == IOR
848 && SCALAR_INT_MODE_P (mode
)
849 && SCALAR_INT_MODE_P (op_mode
)
850 && CONST_INT_P (XEXP (op
, 1))
851 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
857 /* Try to simplify a unary operation CODE whose output mode is to be
858 MODE with input operand OP whose mode was originally OP_MODE.
859 Return zero if no simplification can be made. */
861 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
862 rtx op
, machine_mode op_mode
)
866 trueop
= avoid_constant_pool_reference (op
);
868 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
872 return simplify_unary_operation_1 (code
, mode
, op
);
875 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
879 exact_int_to_float_conversion_p (const_rtx op
)
881 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
882 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
883 /* Constants shouldn't reach here. */
884 gcc_assert (op0_mode
!= VOIDmode
);
885 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
886 int in_bits
= in_prec
;
887 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
889 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
890 if (GET_CODE (op
) == FLOAT
)
891 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
892 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
893 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
896 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
898 return in_bits
<= out_bits
;
901 /* Perform some simplifications we can do even if the operands
904 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
906 enum rtx_code reversed
;
907 rtx temp
, elt
, base
, step
;
908 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
913 /* (not (not X)) == X. */
914 if (GET_CODE (op
) == NOT
)
917 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
918 comparison is all ones. */
919 if (COMPARISON_P (op
)
920 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
921 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
922 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
923 XEXP (op
, 0), XEXP (op
, 1));
925 /* (not (plus X -1)) can become (neg X). */
926 if (GET_CODE (op
) == PLUS
927 && XEXP (op
, 1) == constm1_rtx
)
928 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
930 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
931 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
932 and MODE_VECTOR_INT. */
933 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
934 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
937 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
938 if (GET_CODE (op
) == XOR
939 && CONST_INT_P (XEXP (op
, 1))
940 && (temp
= simplify_unary_operation (NOT
, mode
,
941 XEXP (op
, 1), mode
)) != 0)
942 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
944 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
945 if (GET_CODE (op
) == PLUS
946 && CONST_INT_P (XEXP (op
, 1))
947 && mode_signbit_p (mode
, XEXP (op
, 1))
948 && (temp
= simplify_unary_operation (NOT
, mode
,
949 XEXP (op
, 1), mode
)) != 0)
950 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
953 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
954 operands other than 1, but that is not valid. We could do a
955 similar simplification for (not (lshiftrt C X)) where C is
956 just the sign bit, but this doesn't seem common enough to
958 if (GET_CODE (op
) == ASHIFT
959 && XEXP (op
, 0) == const1_rtx
)
961 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
962 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
965 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
966 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
967 so we can perform the above simplification. */
968 if (STORE_FLAG_VALUE
== -1
969 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
970 && GET_CODE (op
) == ASHIFTRT
971 && CONST_INT_P (XEXP (op
, 1))
972 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
973 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
974 XEXP (op
, 0), const0_rtx
);
977 if (partial_subreg_p (op
)
978 && subreg_lowpart_p (op
)
979 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
980 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
982 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
985 x
= gen_rtx_ROTATE (inner_mode
,
986 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
988 XEXP (SUBREG_REG (op
), 1));
989 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
994 /* Apply De Morgan's laws to reduce number of patterns for machines
995 with negating logical insns (and-not, nand, etc.). If result has
996 only one NOT, put it first, since that is how the patterns are
998 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1000 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1001 machine_mode op_mode
;
1003 op_mode
= GET_MODE (in1
);
1004 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1006 op_mode
= GET_MODE (in2
);
1007 if (op_mode
== VOIDmode
)
1009 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1011 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1012 std::swap (in1
, in2
);
1014 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1018 /* (not (bswap x)) -> (bswap (not x)). */
1019 if (GET_CODE (op
) == BSWAP
)
1021 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1022 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1027 /* (neg (neg X)) == X. */
1028 if (GET_CODE (op
) == NEG
)
1029 return XEXP (op
, 0);
1031 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1032 If comparison is not reversible use
1034 if (GET_CODE (op
) == IF_THEN_ELSE
)
1036 rtx cond
= XEXP (op
, 0);
1037 rtx true_rtx
= XEXP (op
, 1);
1038 rtx false_rtx
= XEXP (op
, 2);
1040 if ((GET_CODE (true_rtx
) == NEG
1041 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1042 || (GET_CODE (false_rtx
) == NEG
1043 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1045 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1046 temp
= reversed_comparison (cond
, mode
);
1050 std::swap (true_rtx
, false_rtx
);
1052 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1053 mode
, temp
, true_rtx
, false_rtx
);
1057 /* (neg (plus X 1)) can become (not X). */
1058 if (GET_CODE (op
) == PLUS
1059 && XEXP (op
, 1) == const1_rtx
)
1060 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1062 /* Similarly, (neg (not X)) is (plus X 1). */
1063 if (GET_CODE (op
) == NOT
)
1064 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1067 /* (neg (minus X Y)) can become (minus Y X). This transformation
1068 isn't safe for modes with signed zeros, since if X and Y are
1069 both +0, (minus Y X) is the same as (minus X Y). If the
1070 rounding mode is towards +infinity (or -infinity) then the two
1071 expressions will be rounded differently. */
1072 if (GET_CODE (op
) == MINUS
1073 && !HONOR_SIGNED_ZEROS (mode
)
1074 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1075 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1077 if (GET_CODE (op
) == PLUS
1078 && !HONOR_SIGNED_ZEROS (mode
)
1079 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1081 /* (neg (plus A C)) is simplified to (minus -C A). */
1082 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1083 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1085 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1087 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1090 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1091 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1092 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1095 /* (neg (mult A B)) becomes (mult A (neg B)).
1096 This works even for floating-point values. */
1097 if (GET_CODE (op
) == MULT
1098 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1100 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1101 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1104 /* NEG commutes with ASHIFT since it is multiplication. Only do
1105 this if we can then eliminate the NEG (e.g., if the operand
1107 if (GET_CODE (op
) == ASHIFT
)
1109 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1111 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1114 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1115 C is equal to the width of MODE minus 1. */
1116 if (GET_CODE (op
) == ASHIFTRT
1117 && CONST_INT_P (XEXP (op
, 1))
1118 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1119 return simplify_gen_binary (LSHIFTRT
, mode
,
1120 XEXP (op
, 0), XEXP (op
, 1));
1122 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1123 C is equal to the width of MODE minus 1. */
1124 if (GET_CODE (op
) == LSHIFTRT
1125 && CONST_INT_P (XEXP (op
, 1))
1126 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1127 return simplify_gen_binary (ASHIFTRT
, mode
,
1128 XEXP (op
, 0), XEXP (op
, 1));
1130 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1131 if (GET_CODE (op
) == XOR
1132 && XEXP (op
, 1) == const1_rtx
1133 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1134 return plus_constant (mode
, XEXP (op
, 0), -1);
1136 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1137 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1138 if (GET_CODE (op
) == LT
1139 && XEXP (op
, 1) == const0_rtx
1140 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1142 int_mode
= as_a
<scalar_int_mode
> (mode
);
1143 int isize
= GET_MODE_PRECISION (inner
);
1144 if (STORE_FLAG_VALUE
== 1)
1146 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1147 gen_int_shift_amount (inner
,
1149 if (int_mode
== inner
)
1151 if (GET_MODE_PRECISION (int_mode
) > isize
)
1152 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1153 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1155 else if (STORE_FLAG_VALUE
== -1)
1157 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1158 gen_int_shift_amount (inner
,
1160 if (int_mode
== inner
)
1162 if (GET_MODE_PRECISION (int_mode
) > isize
)
1163 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1164 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1168 if (vec_series_p (op
, &base
, &step
))
1170 /* Only create a new series if we can simplify both parts. In other
1171 cases this isn't really a simplification, and it's not necessarily
1172 a win to replace a vector operation with a scalar operation. */
1173 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1174 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1177 step
= simplify_unary_operation (NEG
, inner_mode
,
1180 return gen_vec_series (mode
, base
, step
);
1186 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1187 with the umulXi3_highpart patterns. */
1188 if (GET_CODE (op
) == LSHIFTRT
1189 && GET_CODE (XEXP (op
, 0)) == MULT
)
1192 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1194 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1196 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1200 /* We can't handle truncation to a partial integer mode here
1201 because we don't know the real bitsize of the partial
1206 if (GET_MODE (op
) != VOIDmode
)
1208 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1213 /* If we know that the value is already truncated, we can
1214 replace the TRUNCATE with a SUBREG. */
1215 if (known_eq (GET_MODE_NUNITS (mode
), 1)
1216 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1217 || truncated_to_mode (mode
, op
)))
1219 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1224 /* A truncate of a comparison can be replaced with a subreg if
1225 STORE_FLAG_VALUE permits. This is like the previous test,
1226 but it works even if the comparison is done in a mode larger
1227 than HOST_BITS_PER_WIDE_INT. */
1228 if (HWI_COMPUTABLE_MODE_P (mode
)
1229 && COMPARISON_P (op
)
1230 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1232 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1237 /* A truncate of a memory is just loading the low part of the memory
1238 if we are not changing the meaning of the address. */
1239 if (GET_CODE (op
) == MEM
1240 && !VECTOR_MODE_P (mode
)
1241 && !MEM_VOLATILE_P (op
)
1242 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1244 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1251 case FLOAT_TRUNCATE
:
1252 if (DECIMAL_FLOAT_MODE_P (mode
))
1255 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1256 if (GET_CODE (op
) == FLOAT_EXTEND
1257 && GET_MODE (XEXP (op
, 0)) == mode
)
1258 return XEXP (op
, 0);
1260 /* (float_truncate:SF (float_truncate:DF foo:XF))
1261 = (float_truncate:SF foo:XF).
1262 This may eliminate double rounding, so it is unsafe.
1264 (float_truncate:SF (float_extend:XF foo:DF))
1265 = (float_truncate:SF foo:DF).
1267 (float_truncate:DF (float_extend:XF foo:SF))
1268 = (float_extend:DF foo:SF). */
1269 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1270 && flag_unsafe_math_optimizations
)
1271 || GET_CODE (op
) == FLOAT_EXTEND
)
1272 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1273 > GET_MODE_UNIT_SIZE (mode
)
1274 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1276 XEXP (op
, 0), mode
);
1278 /* (float_truncate (float x)) is (float x) */
1279 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1280 && (flag_unsafe_math_optimizations
1281 || exact_int_to_float_conversion_p (op
)))
1282 return simplify_gen_unary (GET_CODE (op
), mode
,
1284 GET_MODE (XEXP (op
, 0)));
1286 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1287 (OP:SF foo:SF) if OP is NEG or ABS. */
1288 if ((GET_CODE (op
) == ABS
1289 || GET_CODE (op
) == NEG
)
1290 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1291 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1292 return simplify_gen_unary (GET_CODE (op
), mode
,
1293 XEXP (XEXP (op
, 0), 0), mode
);
1295 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1296 is (float_truncate:SF x). */
1297 if (GET_CODE (op
) == SUBREG
1298 && subreg_lowpart_p (op
)
1299 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1300 return SUBREG_REG (op
);
1304 if (DECIMAL_FLOAT_MODE_P (mode
))
1307 /* (float_extend (float_extend x)) is (float_extend x)
1309 (float_extend (float x)) is (float x) assuming that double
1310 rounding can't happen.
1312 if (GET_CODE (op
) == FLOAT_EXTEND
1313 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1314 && exact_int_to_float_conversion_p (op
)))
1315 return simplify_gen_unary (GET_CODE (op
), mode
,
1317 GET_MODE (XEXP (op
, 0)));
1322 /* (abs (neg <foo>)) -> (abs <foo>) */
1323 if (GET_CODE (op
) == NEG
)
1324 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1325 GET_MODE (XEXP (op
, 0)));
1327 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1329 if (GET_MODE (op
) == VOIDmode
)
1332 /* If operand is something known to be positive, ignore the ABS. */
1333 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1334 || val_signbit_known_clear_p (GET_MODE (op
),
1335 nonzero_bits (op
, GET_MODE (op
))))
1338 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1339 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1340 && (num_sign_bit_copies (op
, int_mode
)
1341 == GET_MODE_PRECISION (int_mode
)))
1342 return gen_rtx_NEG (int_mode
, op
);
1347 /* (ffs (*_extend <X>)) = (ffs <X>) */
1348 if (GET_CODE (op
) == SIGN_EXTEND
1349 || GET_CODE (op
) == ZERO_EXTEND
)
1350 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1351 GET_MODE (XEXP (op
, 0)));
1355 switch (GET_CODE (op
))
1359 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1360 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1361 GET_MODE (XEXP (op
, 0)));
1365 /* Rotations don't affect popcount. */
1366 if (!side_effects_p (XEXP (op
, 1)))
1367 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1368 GET_MODE (XEXP (op
, 0)));
1377 switch (GET_CODE (op
))
1383 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1384 GET_MODE (XEXP (op
, 0)));
1388 /* Rotations don't affect parity. */
1389 if (!side_effects_p (XEXP (op
, 1)))
1390 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1391 GET_MODE (XEXP (op
, 0)));
1400 /* (bswap (bswap x)) -> x. */
1401 if (GET_CODE (op
) == BSWAP
)
1402 return XEXP (op
, 0);
1406 /* (float (sign_extend <X>)) = (float <X>). */
1407 if (GET_CODE (op
) == SIGN_EXTEND
)
1408 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1409 GET_MODE (XEXP (op
, 0)));
1413 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1414 becomes just the MINUS if its mode is MODE. This allows
1415 folding switch statements on machines using casesi (such as
1417 if (GET_CODE (op
) == TRUNCATE
1418 && GET_MODE (XEXP (op
, 0)) == mode
1419 && GET_CODE (XEXP (op
, 0)) == MINUS
1420 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1421 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1422 return XEXP (op
, 0);
1424 /* Extending a widening multiplication should be canonicalized to
1425 a wider widening multiplication. */
1426 if (GET_CODE (op
) == MULT
)
1428 rtx lhs
= XEXP (op
, 0);
1429 rtx rhs
= XEXP (op
, 1);
1430 enum rtx_code lcode
= GET_CODE (lhs
);
1431 enum rtx_code rcode
= GET_CODE (rhs
);
1433 /* Widening multiplies usually extend both operands, but sometimes
1434 they use a shift to extract a portion of a register. */
1435 if ((lcode
== SIGN_EXTEND
1436 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1437 && (rcode
== SIGN_EXTEND
1438 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1440 machine_mode lmode
= GET_MODE (lhs
);
1441 machine_mode rmode
= GET_MODE (rhs
);
1444 if (lcode
== ASHIFTRT
)
1445 /* Number of bits not shifted off the end. */
1446 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1447 - INTVAL (XEXP (lhs
, 1)));
1448 else /* lcode == SIGN_EXTEND */
1449 /* Size of inner mode. */
1450 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1452 if (rcode
== ASHIFTRT
)
1453 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1454 - INTVAL (XEXP (rhs
, 1)));
1455 else /* rcode == SIGN_EXTEND */
1456 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1458 /* We can only widen multiplies if the result is mathematiclly
1459 equivalent. I.e. if overflow was impossible. */
1460 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1461 return simplify_gen_binary
1463 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1464 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1468 /* Check for a sign extension of a subreg of a promoted
1469 variable, where the promotion is sign-extended, and the
1470 target mode is the same as the variable's promotion. */
1471 if (GET_CODE (op
) == SUBREG
1472 && SUBREG_PROMOTED_VAR_P (op
)
1473 && SUBREG_PROMOTED_SIGNED_P (op
)
1474 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1476 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1481 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1482 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1483 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1485 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1486 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1487 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1488 GET_MODE (XEXP (op
, 0)));
1491 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1492 is (sign_extend:M (subreg:O <X>)) if there is mode with
1493 GET_MODE_BITSIZE (N) - I bits.
1494 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1495 is similarly (zero_extend:M (subreg:O <X>)). */
1496 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1497 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1498 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1499 && CONST_INT_P (XEXP (op
, 1))
1500 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1501 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1502 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1504 scalar_int_mode tmode
;
1505 gcc_assert (GET_MODE_PRECISION (int_mode
)
1506 > GET_MODE_PRECISION (op_mode
));
1507 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1508 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1511 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1513 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1514 ? SIGN_EXTEND
: ZERO_EXTEND
,
1515 int_mode
, inner
, tmode
);
1519 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1520 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1521 if (GET_CODE (op
) == LSHIFTRT
1522 && CONST_INT_P (XEXP (op
, 1))
1523 && XEXP (op
, 1) != const0_rtx
)
1524 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1526 #if defined(POINTERS_EXTEND_UNSIGNED)
1527 /* As we do not know which address space the pointer is referring to,
1528 we can do this only if the target does not support different pointer
1529 or address modes depending on the address space. */
1530 if (target_default_pointer_address_modes_p ()
1531 && ! POINTERS_EXTEND_UNSIGNED
1532 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1534 || (GET_CODE (op
) == SUBREG
1535 && REG_P (SUBREG_REG (op
))
1536 && REG_POINTER (SUBREG_REG (op
))
1537 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1538 && !targetm
.have_ptr_extend ())
1541 = convert_memory_address_addr_space_1 (Pmode
, op
,
1542 ADDR_SPACE_GENERIC
, false,
1551 /* Check for a zero extension of a subreg of a promoted
1552 variable, where the promotion is zero-extended, and the
1553 target mode is the same as the variable's promotion. */
1554 if (GET_CODE (op
) == SUBREG
1555 && SUBREG_PROMOTED_VAR_P (op
)
1556 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1557 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1559 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1564 /* Extending a widening multiplication should be canonicalized to
1565 a wider widening multiplication. */
1566 if (GET_CODE (op
) == MULT
)
1568 rtx lhs
= XEXP (op
, 0);
1569 rtx rhs
= XEXP (op
, 1);
1570 enum rtx_code lcode
= GET_CODE (lhs
);
1571 enum rtx_code rcode
= GET_CODE (rhs
);
1573 /* Widening multiplies usually extend both operands, but sometimes
1574 they use a shift to extract a portion of a register. */
1575 if ((lcode
== ZERO_EXTEND
1576 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1577 && (rcode
== ZERO_EXTEND
1578 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1580 machine_mode lmode
= GET_MODE (lhs
);
1581 machine_mode rmode
= GET_MODE (rhs
);
1584 if (lcode
== LSHIFTRT
)
1585 /* Number of bits not shifted off the end. */
1586 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1587 - INTVAL (XEXP (lhs
, 1)));
1588 else /* lcode == ZERO_EXTEND */
1589 /* Size of inner mode. */
1590 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1592 if (rcode
== LSHIFTRT
)
1593 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1594 - INTVAL (XEXP (rhs
, 1)));
1595 else /* rcode == ZERO_EXTEND */
1596 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1598 /* We can only widen multiplies if the result is mathematiclly
1599 equivalent. I.e. if overflow was impossible. */
1600 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1601 return simplify_gen_binary
1603 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1604 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1608 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1609 if (GET_CODE (op
) == ZERO_EXTEND
)
1610 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1611 GET_MODE (XEXP (op
, 0)));
1613 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1614 is (zero_extend:M (subreg:O <X>)) if there is mode with
1615 GET_MODE_PRECISION (N) - I bits. */
1616 if (GET_CODE (op
) == LSHIFTRT
1617 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1618 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1619 && CONST_INT_P (XEXP (op
, 1))
1620 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1621 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1622 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1624 scalar_int_mode tmode
;
1625 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1626 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1629 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1631 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1636 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1637 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1639 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1640 (and:SI (reg:SI) (const_int 63)). */
1641 if (partial_subreg_p (op
)
1642 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1643 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1644 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1645 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1646 && subreg_lowpart_p (op
)
1647 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1648 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1650 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1651 return SUBREG_REG (op
);
1652 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1656 #if defined(POINTERS_EXTEND_UNSIGNED)
1657 /* As we do not know which address space the pointer is referring to,
1658 we can do this only if the target does not support different pointer
1659 or address modes depending on the address space. */
1660 if (target_default_pointer_address_modes_p ()
1661 && POINTERS_EXTEND_UNSIGNED
> 0
1662 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1664 || (GET_CODE (op
) == SUBREG
1665 && REG_P (SUBREG_REG (op
))
1666 && REG_POINTER (SUBREG_REG (op
))
1667 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1668 && !targetm
.have_ptr_extend ())
1671 = convert_memory_address_addr_space_1 (Pmode
, op
,
1672 ADDR_SPACE_GENERIC
, false,
1684 if (VECTOR_MODE_P (mode
)
1685 && vec_duplicate_p (op
, &elt
)
1686 && code
!= VEC_DUPLICATE
)
1688 /* Try applying the operator to ELT and see if that simplifies.
1689 We can duplicate the result if so.
1691 The reason we don't use simplify_gen_unary is that it isn't
1692 necessarily a win to convert things like:
1694 (neg:V (vec_duplicate:V (reg:S R)))
1698 (vec_duplicate:V (neg:S (reg:S R)))
1700 The first might be done entirely in vector registers while the
1701 second might need a move between register files. */
1702 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1703 elt
, GET_MODE_INNER (GET_MODE (op
)));
1705 return gen_vec_duplicate (mode
, temp
);
1711 /* Try to compute the value of a unary operation CODE whose output mode is to
1712 be MODE with input operand OP whose mode was originally OP_MODE.
1713 Return zero if the value cannot be computed. */
1715 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1716 rtx op
, machine_mode op_mode
)
1718 scalar_int_mode result_mode
;
1720 if (code
== VEC_DUPLICATE
)
1722 gcc_assert (VECTOR_MODE_P (mode
));
1723 if (GET_MODE (op
) != VOIDmode
)
1725 if (!VECTOR_MODE_P (GET_MODE (op
)))
1726 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1728 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1731 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1732 return gen_const_vec_duplicate (mode
, op
);
1733 if (GET_CODE (op
) == CONST_VECTOR
1734 && (CONST_VECTOR_DUPLICATE_P (op
)
1735 || CONST_VECTOR_NUNITS (op
).is_constant ()))
1737 unsigned int npatterns
= (CONST_VECTOR_DUPLICATE_P (op
)
1738 ? CONST_VECTOR_NPATTERNS (op
)
1739 : CONST_VECTOR_NUNITS (op
).to_constant ());
1740 gcc_assert (multiple_p (GET_MODE_NUNITS (mode
), npatterns
));
1741 rtx_vector_builder
builder (mode
, npatterns
, 1);
1742 for (unsigned i
= 0; i
< npatterns
; i
++)
1743 builder
.quick_push (CONST_VECTOR_ELT (op
, i
));
1744 return builder
.build ();
1748 if (VECTOR_MODE_P (mode
)
1749 && GET_CODE (op
) == CONST_VECTOR
1750 && known_eq (GET_MODE_NUNITS (mode
), CONST_VECTOR_NUNITS (op
)))
1752 gcc_assert (GET_MODE (op
) == op_mode
);
1754 rtx_vector_builder builder
;
1755 if (!builder
.new_unary_operation (mode
, op
, false))
1758 unsigned int count
= builder
.encoded_nelts ();
1759 for (unsigned int i
= 0; i
< count
; i
++)
1761 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1762 CONST_VECTOR_ELT (op
, i
),
1763 GET_MODE_INNER (op_mode
));
1764 if (!x
|| !valid_for_const_vector_p (mode
, x
))
1766 builder
.quick_push (x
);
1768 return builder
.build ();
1771 /* The order of these tests is critical so that, for example, we don't
1772 check the wrong mode (input vs. output) for a conversion operation,
1773 such as FIX. At some point, this should be simplified. */
1775 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1779 if (op_mode
== VOIDmode
)
1781 /* CONST_INT have VOIDmode as the mode. We assume that all
1782 the bits of the constant are significant, though, this is
1783 a dangerous assumption as many times CONST_INTs are
1784 created and used with garbage in the bits outside of the
1785 precision of the implied mode of the const_int. */
1786 op_mode
= MAX_MODE_INT
;
1789 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1791 /* Avoid the folding if flag_signaling_nans is on and
1792 operand is a signaling NaN. */
1793 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1796 d
= real_value_truncate (mode
, d
);
1797 return const_double_from_real_value (d
, mode
);
1799 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1803 if (op_mode
== VOIDmode
)
1805 /* CONST_INT have VOIDmode as the mode. We assume that all
1806 the bits of the constant are significant, though, this is
1807 a dangerous assumption as many times CONST_INTs are
1808 created and used with garbage in the bits outside of the
1809 precision of the implied mode of the const_int. */
1810 op_mode
= MAX_MODE_INT
;
1813 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1815 /* Avoid the folding if flag_signaling_nans is on and
1816 operand is a signaling NaN. */
1817 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1820 d
= real_value_truncate (mode
, d
);
1821 return const_double_from_real_value (d
, mode
);
1824 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1826 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1827 if (width
> MAX_BITSIZE_MODE_ANY_INT
)
1831 scalar_int_mode imode
= (op_mode
== VOIDmode
1833 : as_a
<scalar_int_mode
> (op_mode
));
1834 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1837 #if TARGET_SUPPORTS_WIDE_INT == 0
1838 /* This assert keeps the simplification from producing a result
1839 that cannot be represented in a CONST_DOUBLE but a lot of
1840 upstream callers expect that this function never fails to
1841 simplify something and so you if you added this to the test
1842 above the code would die later anyway. If this assert
1843 happens, you just need to make the port support wide int. */
1844 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1850 result
= wi::bit_not (op0
);
1854 result
= wi::neg (op0
);
1858 result
= wi::abs (op0
);
1862 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1866 if (wi::ne_p (op0
, 0))
1867 int_value
= wi::clz (op0
);
1868 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1870 result
= wi::shwi (int_value
, result_mode
);
1874 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
1878 if (wi::ne_p (op0
, 0))
1879 int_value
= wi::ctz (op0
);
1880 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1882 result
= wi::shwi (int_value
, result_mode
);
1886 result
= wi::shwi (wi::popcount (op0
), result_mode
);
1890 result
= wi::shwi (wi::parity (op0
), result_mode
);
1894 result
= wide_int (op0
).bswap ();
1899 result
= wide_int::from (op0
, width
, UNSIGNED
);
1903 result
= wide_int::from (op0
, width
, SIGNED
);
1911 return immed_wide_int_const (result
, result_mode
);
1914 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1915 && SCALAR_FLOAT_MODE_P (mode
)
1916 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1918 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1924 d
= real_value_abs (&d
);
1927 d
= real_value_negate (&d
);
1929 case FLOAT_TRUNCATE
:
1930 /* Don't perform the operation if flag_signaling_nans is on
1931 and the operand is a signaling NaN. */
1932 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1934 d
= real_value_truncate (mode
, d
);
1937 /* Don't perform the operation if flag_signaling_nans is on
1938 and the operand is a signaling NaN. */
1939 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1941 /* All this does is change the mode, unless changing
1943 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1944 real_convert (&d
, mode
, &d
);
1947 /* Don't perform the operation if flag_signaling_nans is on
1948 and the operand is a signaling NaN. */
1949 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1951 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1958 real_to_target (tmp
, &d
, GET_MODE (op
));
1959 for (i
= 0; i
< 4; i
++)
1961 real_from_target (&d
, tmp
, mode
);
1967 return const_double_from_real_value (d
, mode
);
1969 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1970 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1971 && is_int_mode (mode
, &result_mode
))
1973 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1974 if (width
> MAX_BITSIZE_MODE_ANY_INT
)
1977 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1978 operators are intentionally left unspecified (to ease implementation
1979 by target backends), for consistency, this routine implements the
1980 same semantics for constant folding as used by the middle-end. */
1982 /* This was formerly used only for non-IEEE float.
1983 eggert@twinsun.com says it is safe for IEEE also. */
1985 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1986 wide_int wmax
, wmin
;
1987 /* This is part of the abi to real_to_integer, but we check
1988 things before making this call. */
1994 if (REAL_VALUE_ISNAN (*x
))
1997 /* Test against the signed upper bound. */
1998 wmax
= wi::max_value (width
, SIGNED
);
1999 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
2000 if (real_less (&t
, x
))
2001 return immed_wide_int_const (wmax
, mode
);
2003 /* Test against the signed lower bound. */
2004 wmin
= wi::min_value (width
, SIGNED
);
2005 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2006 if (real_less (x
, &t
))
2007 return immed_wide_int_const (wmin
, mode
);
2009 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2013 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2016 /* Test against the unsigned upper bound. */
2017 wmax
= wi::max_value (width
, UNSIGNED
);
2018 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2019 if (real_less (&t
, x
))
2020 return immed_wide_int_const (wmax
, mode
);
2022 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2030 /* Handle polynomial integers. */
2031 else if (CONST_POLY_INT_P (op
))
2033 poly_wide_int result
;
2037 result
= -const_poly_int_value (op
);
2041 result
= ~const_poly_int_value (op
);
2047 return immed_wide_int_const (result
, mode
);
2053 /* Subroutine of simplify_binary_operation to simplify a binary operation
2054 CODE that can commute with byte swapping, with result mode MODE and
2055 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2056 Return zero if no simplification or canonicalization is possible. */
2059 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2064 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2065 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2067 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2068 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2069 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2072 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2073 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2075 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2076 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2082 /* Subroutine of simplify_binary_operation to simplify a commutative,
2083 associative binary operation CODE with result mode MODE, operating
2084 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2085 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2086 canonicalization is possible. */
2089 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2094 /* Linearize the operator to the left. */
2095 if (GET_CODE (op1
) == code
)
2097 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2098 if (GET_CODE (op0
) == code
)
2100 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2101 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2104 /* "a op (b op c)" becomes "(b op c) op a". */
2105 if (! swap_commutative_operands_p (op1
, op0
))
2106 return simplify_gen_binary (code
, mode
, op1
, op0
);
2108 std::swap (op0
, op1
);
2111 if (GET_CODE (op0
) == code
)
2113 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2114 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2116 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2117 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2120 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2121 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2123 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2125 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2126 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2128 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2134 /* Return a mask describing the COMPARISON. */
2136 comparison_to_mask (enum rtx_code comparison
)
2176 /* Return a comparison corresponding to the MASK. */
2177 static enum rtx_code
2178 mask_to_comparison (int mask
)
2218 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2219 and OP1, which should be both relational operations. Return 0 if no such
2220 simplification is possible. */
2222 simplify_logical_relational_operation (enum rtx_code code
, machine_mode mode
,
2225 /* We only handle IOR of two relational operations. */
2229 if (!(COMPARISON_P (op0
) && COMPARISON_P (op1
)))
2232 if (!(rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2233 && rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 1))))
2236 enum rtx_code code0
= GET_CODE (op0
);
2237 enum rtx_code code1
= GET_CODE (op1
);
2239 /* We don't handle unsigned comparisons currently. */
2240 if (code0
== LTU
|| code0
== GTU
|| code0
== LEU
|| code0
== GEU
)
2242 if (code1
== LTU
|| code1
== GTU
|| code1
== LEU
|| code1
== GEU
)
2245 int mask0
= comparison_to_mask (code0
);
2246 int mask1
= comparison_to_mask (code1
);
2248 int mask
= mask0
| mask1
;
2251 return const_true_rtx
;
2253 code
= mask_to_comparison (mask
);
2255 op0
= XEXP (op1
, 0);
2256 op1
= XEXP (op1
, 1);
2258 return simplify_gen_relational (code
, mode
, VOIDmode
, op0
, op1
);
2261 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2262 and OP1. Return 0 if no simplification is possible.
2264 Don't use this for relational operations such as EQ or LT.
2265 Use simplify_relational_operation instead. */
2267 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2270 rtx trueop0
, trueop1
;
2273 /* Relational operations don't work here. We must know the mode
2274 of the operands in order to do the comparison correctly.
2275 Assuming a full word can give incorrect results.
2276 Consider comparing 128 with -128 in QImode. */
2277 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2278 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2280 /* Make sure the constant is second. */
2281 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2282 && swap_commutative_operands_p (op0
, op1
))
2283 std::swap (op0
, op1
);
2285 trueop0
= avoid_constant_pool_reference (op0
);
2286 trueop1
= avoid_constant_pool_reference (op1
);
2288 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2291 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2296 /* If the above steps did not result in a simplification and op0 or op1
2297 were constant pool references, use the referenced constants directly. */
2298 if (trueop0
!= op0
|| trueop1
!= op1
)
2299 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2304 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2305 which OP0 and OP1 are both vector series or vector duplicates
2306 (which are really just series with a step of 0). If so, try to
2307 form a new series by applying CODE to the bases and to the steps.
2308 Return null if no simplification is possible.
2310 MODE is the mode of the operation and is known to be a vector
2314 simplify_binary_operation_series (rtx_code code
, machine_mode mode
,
2318 if (vec_duplicate_p (op0
, &base0
))
2320 else if (!vec_series_p (op0
, &base0
, &step0
))
2324 if (vec_duplicate_p (op1
, &base1
))
2326 else if (!vec_series_p (op1
, &base1
, &step1
))
2329 /* Only create a new series if we can simplify both parts. In other
2330 cases this isn't really a simplification, and it's not necessarily
2331 a win to replace a vector operation with a scalar operation. */
2332 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2333 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2337 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2341 return gen_vec_series (mode
, new_base
, new_step
);
2344 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2345 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2346 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2347 actual constants. */
2350 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2351 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2353 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2355 scalar_int_mode int_mode
, inner_mode
;
2358 /* Even if we can't compute a constant result,
2359 there are some cases worth simplifying. */
2364 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2365 when x is NaN, infinite, or finite and nonzero. They aren't
2366 when x is -0 and the rounding mode is not towards -infinity,
2367 since (-0) + 0 is then 0. */
2368 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2371 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2372 transformations are safe even for IEEE. */
2373 if (GET_CODE (op0
) == NEG
)
2374 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2375 else if (GET_CODE (op1
) == NEG
)
2376 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2378 /* (~a) + 1 -> -a */
2379 if (INTEGRAL_MODE_P (mode
)
2380 && GET_CODE (op0
) == NOT
2381 && trueop1
== const1_rtx
)
2382 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2384 /* Handle both-operands-constant cases. We can only add
2385 CONST_INTs to constants since the sum of relocatable symbols
2386 can't be handled by most assemblers. Don't add CONST_INT
2387 to CONST_INT since overflow won't be computed properly if wider
2388 than HOST_BITS_PER_WIDE_INT. */
2390 if ((GET_CODE (op0
) == CONST
2391 || GET_CODE (op0
) == SYMBOL_REF
2392 || GET_CODE (op0
) == LABEL_REF
)
2393 && poly_int_rtx_p (op1
, &offset
))
2394 return plus_constant (mode
, op0
, offset
);
2395 else if ((GET_CODE (op1
) == CONST
2396 || GET_CODE (op1
) == SYMBOL_REF
2397 || GET_CODE (op1
) == LABEL_REF
)
2398 && poly_int_rtx_p (op0
, &offset
))
2399 return plus_constant (mode
, op1
, offset
);
2401 /* See if this is something like X * C - X or vice versa or
2402 if the multiplication is written as a shift. If so, we can
2403 distribute and make a new multiply, shift, or maybe just
2404 have X (if C is 2 in the example above). But don't make
2405 something more expensive than we had before. */
2407 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2409 rtx lhs
= op0
, rhs
= op1
;
2411 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2412 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2414 if (GET_CODE (lhs
) == NEG
)
2416 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2417 lhs
= XEXP (lhs
, 0);
2419 else if (GET_CODE (lhs
) == MULT
2420 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2422 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2423 lhs
= XEXP (lhs
, 0);
2425 else if (GET_CODE (lhs
) == ASHIFT
2426 && CONST_INT_P (XEXP (lhs
, 1))
2427 && INTVAL (XEXP (lhs
, 1)) >= 0
2428 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2430 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2431 GET_MODE_PRECISION (int_mode
));
2432 lhs
= XEXP (lhs
, 0);
2435 if (GET_CODE (rhs
) == NEG
)
2437 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2438 rhs
= XEXP (rhs
, 0);
2440 else if (GET_CODE (rhs
) == MULT
2441 && CONST_INT_P (XEXP (rhs
, 1)))
2443 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2444 rhs
= XEXP (rhs
, 0);
2446 else if (GET_CODE (rhs
) == ASHIFT
2447 && CONST_INT_P (XEXP (rhs
, 1))
2448 && INTVAL (XEXP (rhs
, 1)) >= 0
2449 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2451 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2452 GET_MODE_PRECISION (int_mode
));
2453 rhs
= XEXP (rhs
, 0);
2456 if (rtx_equal_p (lhs
, rhs
))
2458 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2460 bool speed
= optimize_function_for_speed_p (cfun
);
2462 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2464 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2465 return (set_src_cost (tem
, int_mode
, speed
)
2466 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2470 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2471 if (CONST_SCALAR_INT_P (op1
)
2472 && GET_CODE (op0
) == XOR
2473 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2474 && mode_signbit_p (mode
, op1
))
2475 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2476 simplify_gen_binary (XOR
, mode
, op1
,
2479 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2480 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2481 && GET_CODE (op0
) == MULT
2482 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2486 in1
= XEXP (XEXP (op0
, 0), 0);
2487 in2
= XEXP (op0
, 1);
2488 return simplify_gen_binary (MINUS
, mode
, op1
,
2489 simplify_gen_binary (MULT
, mode
,
2493 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2494 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2496 if (COMPARISON_P (op0
)
2497 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2498 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2499 && (reversed
= reversed_comparison (op0
, mode
)))
2501 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2503 /* If one of the operands is a PLUS or a MINUS, see if we can
2504 simplify this by the associative law.
2505 Don't use the associative law for floating point.
2506 The inaccuracy makes it nonassociative,
2507 and subtle programs can break if operations are associated. */
2509 if (INTEGRAL_MODE_P (mode
)
2510 && (plus_minus_operand_p (op0
)
2511 || plus_minus_operand_p (op1
))
2512 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2515 /* Reassociate floating point addition only when the user
2516 specifies associative math operations. */
2517 if (FLOAT_MODE_P (mode
)
2518 && flag_associative_math
)
2520 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2525 /* Handle vector series. */
2526 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2528 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2535 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2536 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2537 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2538 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2540 rtx xop00
= XEXP (op0
, 0);
2541 rtx xop10
= XEXP (op1
, 0);
2543 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2546 if (REG_P (xop00
) && REG_P (xop10
)
2547 && REGNO (xop00
) == REGNO (xop10
)
2548 && GET_MODE (xop00
) == mode
2549 && GET_MODE (xop10
) == mode
2550 && GET_MODE_CLASS (mode
) == MODE_CC
)
2556 /* We can't assume x-x is 0 even with non-IEEE floating point,
2557 but since it is zero except in very strange circumstances, we
2558 will treat it as zero with -ffinite-math-only. */
2559 if (rtx_equal_p (trueop0
, trueop1
)
2560 && ! side_effects_p (op0
)
2561 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2562 return CONST0_RTX (mode
);
2564 /* Change subtraction from zero into negation. (0 - x) is the
2565 same as -x when x is NaN, infinite, or finite and nonzero.
2566 But if the mode has signed zeros, and does not round towards
2567 -infinity, then 0 - 0 is 0, not -0. */
2568 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2569 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2571 /* (-1 - a) is ~a, unless the expression contains symbolic
2572 constants, in which case not retaining additions and
2573 subtractions could cause invalid assembly to be produced. */
2574 if (trueop0
== constm1_rtx
2575 && !contains_symbolic_reference_p (op1
))
2576 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2578 /* Subtracting 0 has no effect unless the mode has signed zeros
2579 and supports rounding towards -infinity. In such a case,
2581 if (!(HONOR_SIGNED_ZEROS (mode
)
2582 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2583 && trueop1
== CONST0_RTX (mode
))
2586 /* See if this is something like X * C - X or vice versa or
2587 if the multiplication is written as a shift. If so, we can
2588 distribute and make a new multiply, shift, or maybe just
2589 have X (if C is 2 in the example above). But don't make
2590 something more expensive than we had before. */
2592 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2594 rtx lhs
= op0
, rhs
= op1
;
2596 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2597 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2599 if (GET_CODE (lhs
) == NEG
)
2601 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2602 lhs
= XEXP (lhs
, 0);
2604 else if (GET_CODE (lhs
) == MULT
2605 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2607 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2608 lhs
= XEXP (lhs
, 0);
2610 else if (GET_CODE (lhs
) == ASHIFT
2611 && CONST_INT_P (XEXP (lhs
, 1))
2612 && INTVAL (XEXP (lhs
, 1)) >= 0
2613 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2615 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2616 GET_MODE_PRECISION (int_mode
));
2617 lhs
= XEXP (lhs
, 0);
2620 if (GET_CODE (rhs
) == NEG
)
2622 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2623 rhs
= XEXP (rhs
, 0);
2625 else if (GET_CODE (rhs
) == MULT
2626 && CONST_INT_P (XEXP (rhs
, 1)))
2628 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2629 rhs
= XEXP (rhs
, 0);
2631 else if (GET_CODE (rhs
) == ASHIFT
2632 && CONST_INT_P (XEXP (rhs
, 1))
2633 && INTVAL (XEXP (rhs
, 1)) >= 0
2634 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2636 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2637 GET_MODE_PRECISION (int_mode
));
2638 negcoeff1
= -negcoeff1
;
2639 rhs
= XEXP (rhs
, 0);
2642 if (rtx_equal_p (lhs
, rhs
))
2644 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2646 bool speed
= optimize_function_for_speed_p (cfun
);
2648 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2650 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2651 return (set_src_cost (tem
, int_mode
, speed
)
2652 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2656 /* (a - (-b)) -> (a + b). True even for IEEE. */
2657 if (GET_CODE (op1
) == NEG
)
2658 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2660 /* (-x - c) may be simplified as (-c - x). */
2661 if (GET_CODE (op0
) == NEG
2662 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2664 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2666 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2669 if ((GET_CODE (op0
) == CONST
2670 || GET_CODE (op0
) == SYMBOL_REF
2671 || GET_CODE (op0
) == LABEL_REF
)
2672 && poly_int_rtx_p (op1
, &offset
))
2673 return plus_constant (mode
, op0
, trunc_int_for_mode (-offset
, mode
));
2675 /* Don't let a relocatable value get a negative coeff. */
2676 if (poly_int_rtx_p (op1
) && GET_MODE (op0
) != VOIDmode
)
2677 return simplify_gen_binary (PLUS
, mode
,
2679 neg_poly_int_rtx (mode
, op1
));
2681 /* (x - (x & y)) -> (x & ~y) */
2682 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2684 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2686 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2687 GET_MODE (XEXP (op1
, 1)));
2688 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2690 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2692 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2693 GET_MODE (XEXP (op1
, 0)));
2694 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2698 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2699 by reversing the comparison code if valid. */
2700 if (STORE_FLAG_VALUE
== 1
2701 && trueop0
== const1_rtx
2702 && COMPARISON_P (op1
)
2703 && (reversed
= reversed_comparison (op1
, mode
)))
2706 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2707 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2708 && GET_CODE (op1
) == MULT
2709 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2713 in1
= XEXP (XEXP (op1
, 0), 0);
2714 in2
= XEXP (op1
, 1);
2715 return simplify_gen_binary (PLUS
, mode
,
2716 simplify_gen_binary (MULT
, mode
,
2721 /* Canonicalize (minus (neg A) (mult B C)) to
2722 (minus (mult (neg B) C) A). */
2723 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2724 && GET_CODE (op1
) == MULT
2725 && GET_CODE (op0
) == NEG
)
2729 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2730 in2
= XEXP (op1
, 1);
2731 return simplify_gen_binary (MINUS
, mode
,
2732 simplify_gen_binary (MULT
, mode
,
2737 /* If one of the operands is a PLUS or a MINUS, see if we can
2738 simplify this by the associative law. This will, for example,
2739 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2740 Don't use the associative law for floating point.
2741 The inaccuracy makes it nonassociative,
2742 and subtle programs can break if operations are associated. */
2744 if (INTEGRAL_MODE_P (mode
)
2745 && (plus_minus_operand_p (op0
)
2746 || plus_minus_operand_p (op1
))
2747 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2750 /* Handle vector series. */
2751 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2753 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2760 if (trueop1
== constm1_rtx
)
2761 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2763 if (GET_CODE (op0
) == NEG
)
2765 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2766 /* If op1 is a MULT as well and simplify_unary_operation
2767 just moved the NEG to the second operand, simplify_gen_binary
2768 below could through simplify_associative_operation move
2769 the NEG around again and recurse endlessly. */
2771 && GET_CODE (op1
) == MULT
2772 && GET_CODE (temp
) == MULT
2773 && XEXP (op1
, 0) == XEXP (temp
, 0)
2774 && GET_CODE (XEXP (temp
, 1)) == NEG
2775 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2778 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2780 if (GET_CODE (op1
) == NEG
)
2782 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2783 /* If op0 is a MULT as well and simplify_unary_operation
2784 just moved the NEG to the second operand, simplify_gen_binary
2785 below could through simplify_associative_operation move
2786 the NEG around again and recurse endlessly. */
2788 && GET_CODE (op0
) == MULT
2789 && GET_CODE (temp
) == MULT
2790 && XEXP (op0
, 0) == XEXP (temp
, 0)
2791 && GET_CODE (XEXP (temp
, 1)) == NEG
2792 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2795 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2798 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2799 x is NaN, since x * 0 is then also NaN. Nor is it valid
2800 when the mode has signed zeros, since multiplying a negative
2801 number by 0 will give -0, not 0. */
2802 if (!HONOR_NANS (mode
)
2803 && !HONOR_SIGNED_ZEROS (mode
)
2804 && trueop1
== CONST0_RTX (mode
)
2805 && ! side_effects_p (op0
))
2808 /* In IEEE floating point, x*1 is not equivalent to x for
2810 if (!HONOR_SNANS (mode
)
2811 && trueop1
== CONST1_RTX (mode
))
2814 /* Convert multiply by constant power of two into shift. */
2815 if (CONST_SCALAR_INT_P (trueop1
))
2817 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2819 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2820 gen_int_shift_amount (mode
, val
));
2823 /* x*2 is x+x and x*(-1) is -x */
2824 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2825 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2826 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2827 && GET_MODE (op0
) == mode
)
2829 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2831 if (real_equal (d1
, &dconst2
))
2832 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2834 if (!HONOR_SNANS (mode
)
2835 && real_equal (d1
, &dconstm1
))
2836 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2839 /* Optimize -x * -x as x * x. */
2840 if (FLOAT_MODE_P (mode
)
2841 && GET_CODE (op0
) == NEG
2842 && GET_CODE (op1
) == NEG
2843 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2844 && !side_effects_p (XEXP (op0
, 0)))
2845 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2847 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2848 if (SCALAR_FLOAT_MODE_P (mode
)
2849 && GET_CODE (op0
) == ABS
2850 && GET_CODE (op1
) == ABS
2851 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2852 && !side_effects_p (XEXP (op0
, 0)))
2853 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2855 /* Reassociate multiplication, but for floating point MULTs
2856 only when the user specifies unsafe math optimizations. */
2857 if (! FLOAT_MODE_P (mode
)
2858 || flag_unsafe_math_optimizations
)
2860 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2867 if (trueop1
== CONST0_RTX (mode
))
2869 if (INTEGRAL_MODE_P (mode
)
2870 && trueop1
== CONSTM1_RTX (mode
)
2871 && !side_effects_p (op0
))
2873 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2875 /* A | (~A) -> -1 */
2876 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2877 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2878 && ! side_effects_p (op0
)
2879 && SCALAR_INT_MODE_P (mode
))
2882 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2883 if (CONST_INT_P (op1
)
2884 && HWI_COMPUTABLE_MODE_P (mode
)
2885 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2886 && !side_effects_p (op0
))
2889 /* Canonicalize (X & C1) | C2. */
2890 if (GET_CODE (op0
) == AND
2891 && CONST_INT_P (trueop1
)
2892 && CONST_INT_P (XEXP (op0
, 1)))
2894 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2895 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2896 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2898 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2900 && !side_effects_p (XEXP (op0
, 0)))
2903 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2904 if (((c1
|c2
) & mask
) == mask
)
2905 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2908 /* Convert (A & B) | A to A. */
2909 if (GET_CODE (op0
) == AND
2910 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2911 || rtx_equal_p (XEXP (op0
, 1), op1
))
2912 && ! side_effects_p (XEXP (op0
, 0))
2913 && ! side_effects_p (XEXP (op0
, 1)))
2916 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2917 mode size to (rotate A CX). */
2919 if (GET_CODE (op1
) == ASHIFT
2920 || GET_CODE (op1
) == SUBREG
)
2931 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2932 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2933 && CONST_INT_P (XEXP (opleft
, 1))
2934 && CONST_INT_P (XEXP (opright
, 1))
2935 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2936 == GET_MODE_UNIT_PRECISION (mode
)))
2937 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2939 /* Same, but for ashift that has been "simplified" to a wider mode
2940 by simplify_shift_const. */
2942 if (GET_CODE (opleft
) == SUBREG
2943 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2944 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
2946 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2947 && GET_CODE (opright
) == LSHIFTRT
2948 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2949 && known_eq (SUBREG_BYTE (opleft
), SUBREG_BYTE (XEXP (opright
, 0)))
2950 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
2951 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2952 SUBREG_REG (XEXP (opright
, 0)))
2953 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2954 && CONST_INT_P (XEXP (opright
, 1))
2955 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
2956 + INTVAL (XEXP (opright
, 1))
2957 == GET_MODE_PRECISION (int_mode
)))
2958 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
2959 XEXP (SUBREG_REG (opleft
), 1));
2961 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2962 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2963 the PLUS does not affect any of the bits in OP1: then we can do
2964 the IOR as a PLUS and we can associate. This is valid if OP1
2965 can be safely shifted left C bits. */
2966 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2967 && GET_CODE (XEXP (op0
, 0)) == PLUS
2968 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2969 && CONST_INT_P (XEXP (op0
, 1))
2970 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2972 int count
= INTVAL (XEXP (op0
, 1));
2973 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
2975 if (mask
>> count
== INTVAL (trueop1
)
2976 && trunc_int_for_mode (mask
, mode
) == mask
2977 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2978 return simplify_gen_binary (ASHIFTRT
, mode
,
2979 plus_constant (mode
, XEXP (op0
, 0),
2984 /* The following happens with bitfield merging.
2985 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
2986 if (GET_CODE (op0
) == AND
2987 && GET_CODE (op1
) == AND
2988 && CONST_INT_P (XEXP (op0
, 1))
2989 && CONST_INT_P (XEXP (op1
, 1))
2990 && (INTVAL (XEXP (op0
, 1))
2991 == ~INTVAL (XEXP (op1
, 1))))
2993 /* The IOR may be on both sides. */
2994 rtx top0
= NULL_RTX
, top1
= NULL_RTX
;
2995 if (GET_CODE (XEXP (op1
, 0)) == IOR
)
2996 top0
= op0
, top1
= op1
;
2997 else if (GET_CODE (XEXP (op0
, 0)) == IOR
)
2998 top0
= op1
, top1
= op0
;
3001 /* X may be on either side of the inner IOR. */
3003 if (rtx_equal_p (XEXP (top0
, 0),
3004 XEXP (XEXP (top1
, 0), 0)))
3005 tem
= XEXP (XEXP (top1
, 0), 1);
3006 else if (rtx_equal_p (XEXP (top0
, 0),
3007 XEXP (XEXP (top1
, 0), 1)))
3008 tem
= XEXP (XEXP (top1
, 0), 0);
3010 return simplify_gen_binary (IOR
, mode
, XEXP (top0
, 0),
3012 (AND
, mode
, tem
, XEXP (top1
, 1)));
3016 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3020 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3024 tem
= simplify_logical_relational_operation (code
, mode
, op0
, op1
);
3030 if (trueop1
== CONST0_RTX (mode
))
3032 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3033 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
3034 if (rtx_equal_p (trueop0
, trueop1
)
3035 && ! side_effects_p (op0
)
3036 && GET_MODE_CLASS (mode
) != MODE_CC
)
3037 return CONST0_RTX (mode
);
3039 /* Canonicalize XOR of the most significant bit to PLUS. */
3040 if (CONST_SCALAR_INT_P (op1
)
3041 && mode_signbit_p (mode
, op1
))
3042 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
3043 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
3044 if (CONST_SCALAR_INT_P (op1
)
3045 && GET_CODE (op0
) == PLUS
3046 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
3047 && mode_signbit_p (mode
, XEXP (op0
, 1)))
3048 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
3049 simplify_gen_binary (XOR
, mode
, op1
,
3052 /* If we are XORing two things that have no bits in common,
3053 convert them into an IOR. This helps to detect rotation encoded
3054 using those methods and possibly other simplifications. */
3056 if (HWI_COMPUTABLE_MODE_P (mode
)
3057 && (nonzero_bits (op0
, mode
)
3058 & nonzero_bits (op1
, mode
)) == 0)
3059 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
3061 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3062 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3065 int num_negated
= 0;
3067 if (GET_CODE (op0
) == NOT
)
3068 num_negated
++, op0
= XEXP (op0
, 0);
3069 if (GET_CODE (op1
) == NOT
)
3070 num_negated
++, op1
= XEXP (op1
, 0);
3072 if (num_negated
== 2)
3073 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
3074 else if (num_negated
== 1)
3075 return simplify_gen_unary (NOT
, mode
,
3076 simplify_gen_binary (XOR
, mode
, op0
, op1
),
3080 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
3081 correspond to a machine insn or result in further simplifications
3082 if B is a constant. */
3084 if (GET_CODE (op0
) == AND
3085 && rtx_equal_p (XEXP (op0
, 1), op1
)
3086 && ! side_effects_p (op1
))
3087 return simplify_gen_binary (AND
, mode
,
3088 simplify_gen_unary (NOT
, mode
,
3089 XEXP (op0
, 0), mode
),
3092 else if (GET_CODE (op0
) == AND
3093 && rtx_equal_p (XEXP (op0
, 0), op1
)
3094 && ! side_effects_p (op1
))
3095 return simplify_gen_binary (AND
, mode
,
3096 simplify_gen_unary (NOT
, mode
,
3097 XEXP (op0
, 1), mode
),
3100 /* Given (xor (ior (xor A B) C) D), where B, C and D are
3101 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3102 out bits inverted twice and not set by C. Similarly, given
3103 (xor (and (xor A B) C) D), simplify without inverting C in
3104 the xor operand: (xor (and A C) (B&C)^D).
3106 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
3107 && GET_CODE (XEXP (op0
, 0)) == XOR
3108 && CONST_INT_P (op1
)
3109 && CONST_INT_P (XEXP (op0
, 1))
3110 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
3112 enum rtx_code op
= GET_CODE (op0
);
3113 rtx a
= XEXP (XEXP (op0
, 0), 0);
3114 rtx b
= XEXP (XEXP (op0
, 0), 1);
3115 rtx c
= XEXP (op0
, 1);
3117 HOST_WIDE_INT bval
= INTVAL (b
);
3118 HOST_WIDE_INT cval
= INTVAL (c
);
3119 HOST_WIDE_INT dval
= INTVAL (d
);
3120 HOST_WIDE_INT xcval
;
3127 return simplify_gen_binary (XOR
, mode
,
3128 simplify_gen_binary (op
, mode
, a
, c
),
3129 gen_int_mode ((bval
& xcval
) ^ dval
,
3133 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3134 we can transform like this:
3135 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3136 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3137 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3138 Attempt a few simplifications when B and C are both constants. */
3139 if (GET_CODE (op0
) == AND
3140 && CONST_INT_P (op1
)
3141 && CONST_INT_P (XEXP (op0
, 1)))
3143 rtx a
= XEXP (op0
, 0);
3144 rtx b
= XEXP (op0
, 1);
3146 HOST_WIDE_INT bval
= INTVAL (b
);
3147 HOST_WIDE_INT cval
= INTVAL (c
);
3149 /* Instead of computing ~A&C, we compute its negated value,
3150 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3151 optimize for sure. If it does not simplify, we still try
3152 to compute ~A&C below, but since that always allocates
3153 RTL, we don't try that before committing to returning a
3154 simplified expression. */
3155 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
3158 if ((~cval
& bval
) == 0)
3160 rtx na_c
= NULL_RTX
;
3162 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
3165 /* If ~A does not simplify, don't bother: we don't
3166 want to simplify 2 operations into 3, and if na_c
3167 were to simplify with na, n_na_c would have
3168 simplified as well. */
3169 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
3171 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
3174 /* Try to simplify ~A&C | ~B&C. */
3175 if (na_c
!= NULL_RTX
)
3176 return simplify_gen_binary (IOR
, mode
, na_c
,
3177 gen_int_mode (~bval
& cval
, mode
));
3181 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3182 if (n_na_c
== CONSTM1_RTX (mode
))
3184 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3185 gen_int_mode (~cval
& bval
,
3187 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3188 gen_int_mode (~bval
& cval
,
3194 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3195 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3196 machines, and also has shorter instruction path length. */
3197 if (GET_CODE (op0
) == AND
3198 && GET_CODE (XEXP (op0
, 0)) == XOR
3199 && CONST_INT_P (XEXP (op0
, 1))
3200 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3203 rtx b
= XEXP (XEXP (op0
, 0), 1);
3204 rtx c
= XEXP (op0
, 1);
3205 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3206 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3207 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3208 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3210 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3211 else if (GET_CODE (op0
) == AND
3212 && GET_CODE (XEXP (op0
, 0)) == XOR
3213 && CONST_INT_P (XEXP (op0
, 1))
3214 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3216 rtx a
= XEXP (XEXP (op0
, 0), 0);
3218 rtx c
= XEXP (op0
, 1);
3219 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3220 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3221 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3222 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3225 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3226 comparison if STORE_FLAG_VALUE is 1. */
3227 if (STORE_FLAG_VALUE
== 1
3228 && trueop1
== const1_rtx
3229 && COMPARISON_P (op0
)
3230 && (reversed
= reversed_comparison (op0
, mode
)))
3233 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3234 is (lt foo (const_int 0)), so we can perform the above
3235 simplification if STORE_FLAG_VALUE is 1. */
3237 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3238 && STORE_FLAG_VALUE
== 1
3239 && trueop1
== const1_rtx
3240 && GET_CODE (op0
) == LSHIFTRT
3241 && CONST_INT_P (XEXP (op0
, 1))
3242 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3243 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3245 /* (xor (comparison foo bar) (const_int sign-bit))
3246 when STORE_FLAG_VALUE is the sign bit. */
3247 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3248 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3249 && trueop1
== const_true_rtx
3250 && COMPARISON_P (op0
)
3251 && (reversed
= reversed_comparison (op0
, int_mode
)))
3254 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3258 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3264 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3266 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3268 if (HWI_COMPUTABLE_MODE_P (mode
))
3270 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3271 HOST_WIDE_INT nzop1
;
3272 if (CONST_INT_P (trueop1
))
3274 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3275 /* If we are turning off bits already known off in OP0, we need
3277 if ((nzop0
& ~val1
) == 0)
3280 nzop1
= nonzero_bits (trueop1
, mode
);
3281 /* If we are clearing all the nonzero bits, the result is zero. */
3282 if ((nzop1
& nzop0
) == 0
3283 && !side_effects_p (op0
) && !side_effects_p (op1
))
3284 return CONST0_RTX (mode
);
3286 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3287 && GET_MODE_CLASS (mode
) != MODE_CC
)
3290 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3291 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3292 && ! side_effects_p (op0
)
3293 && GET_MODE_CLASS (mode
) != MODE_CC
)
3294 return CONST0_RTX (mode
);
3296 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3297 there are no nonzero bits of C outside of X's mode. */
3298 if ((GET_CODE (op0
) == SIGN_EXTEND
3299 || GET_CODE (op0
) == ZERO_EXTEND
)
3300 && CONST_INT_P (trueop1
)
3301 && HWI_COMPUTABLE_MODE_P (mode
)
3302 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3303 & UINTVAL (trueop1
)) == 0)
3305 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3306 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3307 gen_int_mode (INTVAL (trueop1
),
3309 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3312 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3313 we might be able to further simplify the AND with X and potentially
3314 remove the truncation altogether. */
3315 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3317 rtx x
= XEXP (op0
, 0);
3318 machine_mode xmode
= GET_MODE (x
);
3319 tem
= simplify_gen_binary (AND
, xmode
, x
,
3320 gen_int_mode (INTVAL (trueop1
), xmode
));
3321 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3324 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3325 if (GET_CODE (op0
) == IOR
3326 && CONST_INT_P (trueop1
)
3327 && CONST_INT_P (XEXP (op0
, 1)))
3329 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3330 return simplify_gen_binary (IOR
, mode
,
3331 simplify_gen_binary (AND
, mode
,
3332 XEXP (op0
, 0), op1
),
3333 gen_int_mode (tmp
, mode
));
3336 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3337 insn (and may simplify more). */
3338 if (GET_CODE (op0
) == XOR
3339 && rtx_equal_p (XEXP (op0
, 0), op1
)
3340 && ! side_effects_p (op1
))
3341 return simplify_gen_binary (AND
, mode
,
3342 simplify_gen_unary (NOT
, mode
,
3343 XEXP (op0
, 1), mode
),
3346 if (GET_CODE (op0
) == XOR
3347 && rtx_equal_p (XEXP (op0
, 1), op1
)
3348 && ! side_effects_p (op1
))
3349 return simplify_gen_binary (AND
, mode
,
3350 simplify_gen_unary (NOT
, mode
,
3351 XEXP (op0
, 0), mode
),
3354 /* Similarly for (~(A ^ B)) & A. */
3355 if (GET_CODE (op0
) == NOT
3356 && GET_CODE (XEXP (op0
, 0)) == XOR
3357 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3358 && ! side_effects_p (op1
))
3359 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3361 if (GET_CODE (op0
) == NOT
3362 && GET_CODE (XEXP (op0
, 0)) == XOR
3363 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3364 && ! side_effects_p (op1
))
3365 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3367 /* Convert (A | B) & A to A. */
3368 if (GET_CODE (op0
) == IOR
3369 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3370 || rtx_equal_p (XEXP (op0
, 1), op1
))
3371 && ! side_effects_p (XEXP (op0
, 0))
3372 && ! side_effects_p (XEXP (op0
, 1)))
3375 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3376 ((A & N) + B) & M -> (A + B) & M
3377 Similarly if (N & M) == 0,
3378 ((A | N) + B) & M -> (A + B) & M
3379 and for - instead of + and/or ^ instead of |.
3380 Also, if (N & M) == 0, then
3381 (A +- N) & M -> A & M. */
3382 if (CONST_INT_P (trueop1
)
3383 && HWI_COMPUTABLE_MODE_P (mode
)
3384 && ~UINTVAL (trueop1
)
3385 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3386 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3391 pmop
[0] = XEXP (op0
, 0);
3392 pmop
[1] = XEXP (op0
, 1);
3394 if (CONST_INT_P (pmop
[1])
3395 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3396 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3398 for (which
= 0; which
< 2; which
++)
3401 switch (GET_CODE (tem
))
3404 if (CONST_INT_P (XEXP (tem
, 1))
3405 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3406 == UINTVAL (trueop1
))
3407 pmop
[which
] = XEXP (tem
, 0);
3411 if (CONST_INT_P (XEXP (tem
, 1))
3412 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3413 pmop
[which
] = XEXP (tem
, 0);
3420 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3422 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3424 return simplify_gen_binary (code
, mode
, tem
, op1
);
3428 /* (and X (ior (not X) Y) -> (and X Y) */
3429 if (GET_CODE (op1
) == IOR
3430 && GET_CODE (XEXP (op1
, 0)) == NOT
3431 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3432 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3434 /* (and (ior (not X) Y) X) -> (and X Y) */
3435 if (GET_CODE (op0
) == IOR
3436 && GET_CODE (XEXP (op0
, 0)) == NOT
3437 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3438 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3440 /* (and X (ior Y (not X)) -> (and X Y) */
3441 if (GET_CODE (op1
) == IOR
3442 && GET_CODE (XEXP (op1
, 1)) == NOT
3443 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3444 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3446 /* (and (ior Y (not X)) X) -> (and X Y) */
3447 if (GET_CODE (op0
) == IOR
3448 && GET_CODE (XEXP (op0
, 1)) == NOT
3449 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3450 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3452 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3456 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3462 /* 0/x is 0 (or x&0 if x has side-effects). */
3463 if (trueop0
== CONST0_RTX (mode
)
3464 && !cfun
->can_throw_non_call_exceptions
)
3466 if (side_effects_p (op1
))
3467 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3471 if (trueop1
== CONST1_RTX (mode
))
3473 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3477 /* Convert divide by power of two into shift. */
3478 if (CONST_INT_P (trueop1
)
3479 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3480 return simplify_gen_binary (LSHIFTRT
, mode
, op0
,
3481 gen_int_shift_amount (mode
, val
));
3485 /* Handle floating point and integers separately. */
3486 if (SCALAR_FLOAT_MODE_P (mode
))
3488 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3489 safe for modes with NaNs, since 0.0 / 0.0 will then be
3490 NaN rather than 0.0. Nor is it safe for modes with signed
3491 zeros, since dividing 0 by a negative number gives -0.0 */
3492 if (trueop0
== CONST0_RTX (mode
)
3493 && !HONOR_NANS (mode
)
3494 && !HONOR_SIGNED_ZEROS (mode
)
3495 && ! side_effects_p (op1
))
3498 if (trueop1
== CONST1_RTX (mode
)
3499 && !HONOR_SNANS (mode
))
3502 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3503 && trueop1
!= CONST0_RTX (mode
))
3505 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3508 if (real_equal (d1
, &dconstm1
)
3509 && !HONOR_SNANS (mode
))
3510 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3512 /* Change FP division by a constant into multiplication.
3513 Only do this with -freciprocal-math. */
3514 if (flag_reciprocal_math
3515 && !real_equal (d1
, &dconst0
))
3518 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3519 tem
= const_double_from_real_value (d
, mode
);
3520 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3524 else if (SCALAR_INT_MODE_P (mode
))
3526 /* 0/x is 0 (or x&0 if x has side-effects). */
3527 if (trueop0
== CONST0_RTX (mode
)
3528 && !cfun
->can_throw_non_call_exceptions
)
3530 if (side_effects_p (op1
))
3531 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3535 if (trueop1
== CONST1_RTX (mode
))
3537 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3542 if (trueop1
== constm1_rtx
)
3544 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3546 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3552 /* 0%x is 0 (or x&0 if x has side-effects). */
3553 if (trueop0
== CONST0_RTX (mode
))
3555 if (side_effects_p (op1
))
3556 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3559 /* x%1 is 0 (of x&0 if x has side-effects). */
3560 if (trueop1
== CONST1_RTX (mode
))
3562 if (side_effects_p (op0
))
3563 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3564 return CONST0_RTX (mode
);
3566 /* Implement modulus by power of two as AND. */
3567 if (CONST_INT_P (trueop1
)
3568 && exact_log2 (UINTVAL (trueop1
)) > 0)
3569 return simplify_gen_binary (AND
, mode
, op0
,
3570 gen_int_mode (UINTVAL (trueop1
) - 1,
3575 /* 0%x is 0 (or x&0 if x has side-effects). */
3576 if (trueop0
== CONST0_RTX (mode
))
3578 if (side_effects_p (op1
))
3579 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3582 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3583 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3585 if (side_effects_p (op0
))
3586 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3587 return CONST0_RTX (mode
);
3593 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3594 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3595 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3597 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3598 if (CONST_INT_P (trueop1
)
3599 && IN_RANGE (INTVAL (trueop1
),
3600 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
3601 GET_MODE_UNIT_PRECISION (mode
) - 1))
3603 int new_amount
= GET_MODE_UNIT_PRECISION (mode
) - INTVAL (trueop1
);
3604 rtx new_amount_rtx
= gen_int_shift_amount (mode
, new_amount
);
3605 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3606 mode
, op0
, new_amount_rtx
);
3611 if (trueop1
== CONST0_RTX (mode
))
3613 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3615 /* Rotating ~0 always results in ~0. */
3616 if (CONST_INT_P (trueop0
)
3617 && HWI_COMPUTABLE_MODE_P (mode
)
3618 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3619 && ! side_effects_p (op1
))
3625 scalar constants c1, c2
3626 size (M2) > size (M1)
3627 c1 == size (M2) - size (M1)
3629 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3633 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3635 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3636 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3638 && CONST_INT_P (op1
)
3639 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3640 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
3642 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3643 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
3644 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3645 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
3646 && subreg_lowpart_p (op0
))
3648 rtx tmp
= gen_int_shift_amount
3649 (inner_mode
, INTVAL (XEXP (SUBREG_REG (op0
), 1)) + INTVAL (op1
));
3650 tmp
= simplify_gen_binary (code
, inner_mode
,
3651 XEXP (SUBREG_REG (op0
), 0),
3653 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
3656 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3658 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
3659 if (val
!= INTVAL (op1
))
3660 return simplify_gen_binary (code
, mode
, op0
,
3661 gen_int_shift_amount (mode
, val
));
3668 if (trueop1
== CONST0_RTX (mode
))
3670 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3672 goto canonicalize_shift
;
3675 if (trueop1
== CONST0_RTX (mode
))
3677 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3679 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3680 if (GET_CODE (op0
) == CLZ
3681 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
3682 && CONST_INT_P (trueop1
)
3683 && STORE_FLAG_VALUE
== 1
3684 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
3686 unsigned HOST_WIDE_INT zero_val
= 0;
3688 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
3689 && zero_val
== GET_MODE_PRECISION (inner_mode
)
3690 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3691 return simplify_gen_relational (EQ
, mode
, inner_mode
,
3692 XEXP (op0
, 0), const0_rtx
);
3694 goto canonicalize_shift
;
3697 if (HWI_COMPUTABLE_MODE_P (mode
)
3698 && mode_signbit_p (mode
, trueop1
)
3699 && ! side_effects_p (op0
))
3701 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3703 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3709 if (HWI_COMPUTABLE_MODE_P (mode
)
3710 && CONST_INT_P (trueop1
)
3711 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3712 && ! side_effects_p (op0
))
3714 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3716 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3722 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3724 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3726 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3732 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3734 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3736 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3749 /* ??? There are simplifications that can be done. */
3753 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
3754 return gen_vec_duplicate (mode
, op0
);
3755 if (valid_for_const_vector_p (mode
, op0
)
3756 && valid_for_const_vector_p (mode
, op1
))
3757 return gen_const_vec_series (mode
, op0
, op1
);
3761 if (!VECTOR_MODE_P (mode
))
3763 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3764 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3765 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3766 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3768 /* We can't reason about selections made at runtime. */
3769 if (!CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3772 if (vec_duplicate_p (trueop0
, &elt0
))
3775 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3776 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3779 /* Extract a scalar element from a nested VEC_SELECT expression
3780 (with optional nested VEC_CONCAT expression). Some targets
3781 (i386) extract scalar element from a vector using chain of
3782 nested VEC_SELECT expressions. When input operand is a memory
3783 operand, this operation can be simplified to a simple scalar
3784 load from an offseted memory address. */
3786 if (GET_CODE (trueop0
) == VEC_SELECT
3787 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3788 .is_constant (&n_elts
)))
3790 rtx op0
= XEXP (trueop0
, 0);
3791 rtx op1
= XEXP (trueop0
, 1);
3793 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3799 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3800 gcc_assert (i
< n_elts
);
3802 /* Select element, pointed by nested selector. */
3803 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3805 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3806 if (GET_CODE (op0
) == VEC_CONCAT
)
3808 rtx op00
= XEXP (op0
, 0);
3809 rtx op01
= XEXP (op0
, 1);
3811 machine_mode mode00
, mode01
;
3812 int n_elts00
, n_elts01
;
3814 mode00
= GET_MODE (op00
);
3815 mode01
= GET_MODE (op01
);
3817 /* Find out the number of elements of each operand.
3818 Since the concatenated result has a constant number
3819 of elements, the operands must too. */
3820 n_elts00
= GET_MODE_NUNITS (mode00
).to_constant ();
3821 n_elts01
= GET_MODE_NUNITS (mode01
).to_constant ();
3823 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3825 /* Select correct operand of VEC_CONCAT
3826 and adjust selector. */
3827 if (elem
< n_elts01
)
3838 vec
= rtvec_alloc (1);
3839 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3841 tmp
= gen_rtx_fmt_ee (code
, mode
,
3842 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3848 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3849 gcc_assert (GET_MODE_INNER (mode
)
3850 == GET_MODE_INNER (GET_MODE (trueop0
)));
3851 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3853 if (vec_duplicate_p (trueop0
, &elt0
))
3854 /* It doesn't matter which elements are selected by trueop1,
3855 because they are all the same. */
3856 return gen_vec_duplicate (mode
, elt0
);
3858 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3860 unsigned n_elts
= XVECLEN (trueop1
, 0);
3861 rtvec v
= rtvec_alloc (n_elts
);
3864 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
3865 for (i
= 0; i
< n_elts
; i
++)
3867 rtx x
= XVECEXP (trueop1
, 0, i
);
3869 if (!CONST_INT_P (x
))
3872 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3876 return gen_rtx_CONST_VECTOR (mode
, v
);
3879 /* Recognize the identity. */
3880 if (GET_MODE (trueop0
) == mode
)
3882 bool maybe_ident
= true;
3883 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3885 rtx j
= XVECEXP (trueop1
, 0, i
);
3886 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3888 maybe_ident
= false;
3896 /* If we build {a,b} then permute it, build the result directly. */
3897 if (XVECLEN (trueop1
, 0) == 2
3898 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3899 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3900 && GET_CODE (trueop0
) == VEC_CONCAT
3901 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3902 && GET_MODE (XEXP (trueop0
, 0)) == mode
3903 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3904 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3906 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3907 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3910 gcc_assert (i0
< 4 && i1
< 4);
3911 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3912 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3914 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3917 if (XVECLEN (trueop1
, 0) == 2
3918 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3919 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3920 && GET_CODE (trueop0
) == VEC_CONCAT
3921 && GET_MODE (trueop0
) == mode
)
3923 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3924 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3927 gcc_assert (i0
< 2 && i1
< 2);
3928 subop0
= XEXP (trueop0
, i0
);
3929 subop1
= XEXP (trueop0
, i1
);
3931 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3934 /* If we select one half of a vec_concat, return that. */
3936 if (GET_CODE (trueop0
) == VEC_CONCAT
3937 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3939 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 1)))
3941 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3943 rtx subop0
= XEXP (trueop0
, 0);
3944 rtx subop1
= XEXP (trueop0
, 1);
3945 machine_mode mode0
= GET_MODE (subop0
);
3946 machine_mode mode1
= GET_MODE (subop1
);
3947 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3948 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3950 bool success
= true;
3951 for (int i
= 1; i
< l0
; ++i
)
3953 rtx j
= XVECEXP (trueop1
, 0, i
);
3954 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3963 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3965 bool success
= true;
3966 for (int i
= 1; i
< l1
; ++i
)
3968 rtx j
= XVECEXP (trueop1
, 0, i
);
3969 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3981 if (XVECLEN (trueop1
, 0) == 1
3982 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3983 && GET_CODE (trueop0
) == VEC_CONCAT
)
3986 offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3988 /* Try to find the element in the VEC_CONCAT. */
3989 while (GET_MODE (vec
) != mode
3990 && GET_CODE (vec
) == VEC_CONCAT
)
3992 poly_int64 vec_size
;
3994 if (CONST_INT_P (XEXP (vec
, 0)))
3996 /* vec_concat of two const_ints doesn't make sense with
3997 respect to modes. */
3998 if (CONST_INT_P (XEXP (vec
, 1)))
4001 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
4002 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
4005 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
4007 if (known_lt (offset
, vec_size
))
4008 vec
= XEXP (vec
, 0);
4009 else if (known_ge (offset
, vec_size
))
4012 vec
= XEXP (vec
, 1);
4016 vec
= avoid_constant_pool_reference (vec
);
4019 if (GET_MODE (vec
) == mode
)
4023 /* If we select elements in a vec_merge that all come from the same
4024 operand, select from that operand directly. */
4025 if (GET_CODE (op0
) == VEC_MERGE
)
4027 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
4028 if (CONST_INT_P (trueop02
))
4030 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
4031 bool all_operand0
= true;
4032 bool all_operand1
= true;
4033 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
4035 rtx j
= XVECEXP (trueop1
, 0, i
);
4036 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
4037 all_operand1
= false;
4039 all_operand0
= false;
4041 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
4042 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
4043 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
4044 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
4048 /* If we have two nested selects that are inverses of each
4049 other, replace them with the source operand. */
4050 if (GET_CODE (trueop0
) == VEC_SELECT
4051 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4053 rtx op0_subop1
= XEXP (trueop0
, 1);
4054 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
4055 gcc_assert (known_eq (XVECLEN (trueop1
, 0), GET_MODE_NUNITS (mode
)));
4057 /* Apply the outer ordering vector to the inner one. (The inner
4058 ordering vector is expressly permitted to be of a different
4059 length than the outer one.) If the result is { 0, 1, ..., n-1 }
4060 then the two VEC_SELECTs cancel. */
4061 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
4063 rtx x
= XVECEXP (trueop1
, 0, i
);
4064 if (!CONST_INT_P (x
))
4066 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
4067 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
4070 return XEXP (trueop0
, 0);
4076 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
4077 ? GET_MODE (trueop0
)
4078 : GET_MODE_INNER (mode
));
4079 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
4080 ? GET_MODE (trueop1
)
4081 : GET_MODE_INNER (mode
));
4083 gcc_assert (VECTOR_MODE_P (mode
));
4084 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode
)
4085 + GET_MODE_SIZE (op1_mode
),
4086 GET_MODE_SIZE (mode
)));
4088 if (VECTOR_MODE_P (op0_mode
))
4089 gcc_assert (GET_MODE_INNER (mode
)
4090 == GET_MODE_INNER (op0_mode
));
4092 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
4094 if (VECTOR_MODE_P (op1_mode
))
4095 gcc_assert (GET_MODE_INNER (mode
)
4096 == GET_MODE_INNER (op1_mode
));
4098 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
4100 unsigned int n_elts
, in_n_elts
;
4101 if ((GET_CODE (trueop0
) == CONST_VECTOR
4102 || CONST_SCALAR_INT_P (trueop0
)
4103 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
4104 && (GET_CODE (trueop1
) == CONST_VECTOR
4105 || CONST_SCALAR_INT_P (trueop1
)
4106 || CONST_DOUBLE_AS_FLOAT_P (trueop1
))
4107 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
)
4108 && GET_MODE_NUNITS (op0_mode
).is_constant (&in_n_elts
))
4110 rtvec v
= rtvec_alloc (n_elts
);
4112 for (i
= 0; i
< n_elts
; i
++)
4116 if (!VECTOR_MODE_P (op0_mode
))
4117 RTVEC_ELT (v
, i
) = trueop0
;
4119 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
4123 if (!VECTOR_MODE_P (op1_mode
))
4124 RTVEC_ELT (v
, i
) = trueop1
;
4126 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
4131 return gen_rtx_CONST_VECTOR (mode
, v
);
4134 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4135 Restrict the transformation to avoid generating a VEC_SELECT with a
4136 mode unrelated to its operand. */
4137 if (GET_CODE (trueop0
) == VEC_SELECT
4138 && GET_CODE (trueop1
) == VEC_SELECT
4139 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
4140 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4142 rtx par0
= XEXP (trueop0
, 1);
4143 rtx par1
= XEXP (trueop1
, 1);
4144 int len0
= XVECLEN (par0
, 0);
4145 int len1
= XVECLEN (par1
, 0);
4146 rtvec vec
= rtvec_alloc (len0
+ len1
);
4147 for (int i
= 0; i
< len0
; i
++)
4148 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
4149 for (int i
= 0; i
< len1
; i
++)
4150 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
4151 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
4152 gen_rtx_PARALLEL (VOIDmode
, vec
));
4161 if (mode
== GET_MODE (op0
)
4162 && mode
== GET_MODE (op1
)
4163 && vec_duplicate_p (op0
, &elt0
)
4164 && vec_duplicate_p (op1
, &elt1
))
4166 /* Try applying the operator to ELT and see if that simplifies.
4167 We can duplicate the result if so.
4169 The reason we don't use simplify_gen_binary is that it isn't
4170 necessarily a win to convert things like:
4172 (plus:V (vec_duplicate:V (reg:S R1))
4173 (vec_duplicate:V (reg:S R2)))
4177 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4179 The first might be done entirely in vector registers while the
4180 second might need a move between register files. */
4181 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4184 return gen_vec_duplicate (mode
, tem
);
4190 /* Return true if binary operation OP distributes over addition in operand
4191 OPNO, with the other operand being held constant. OPNO counts from 1. */
4194 distributes_over_addition_p (rtx_code op
, int opno
)
4212 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
4215 if (VECTOR_MODE_P (mode
)
4216 && code
!= VEC_CONCAT
4217 && GET_CODE (op0
) == CONST_VECTOR
4218 && GET_CODE (op1
) == CONST_VECTOR
)
4221 if (CONST_VECTOR_STEPPED_P (op0
)
4222 && CONST_VECTOR_STEPPED_P (op1
))
4223 /* We can operate directly on the encoding if:
4225 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4227 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4229 Addition and subtraction are the supported operators
4230 for which this is true. */
4231 step_ok_p
= (code
== PLUS
|| code
== MINUS
);
4232 else if (CONST_VECTOR_STEPPED_P (op0
))
4233 /* We can operate directly on stepped encodings if:
4237 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4239 which is true if (x -> x op c) distributes over addition. */
4240 step_ok_p
= distributes_over_addition_p (code
, 1);
4242 /* Similarly in reverse. */
4243 step_ok_p
= distributes_over_addition_p (code
, 2);
4244 rtx_vector_builder builder
;
4245 if (!builder
.new_binary_operation (mode
, op0
, op1
, step_ok_p
))
4248 unsigned int count
= builder
.encoded_nelts ();
4249 for (unsigned int i
= 0; i
< count
; i
++)
4251 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4252 CONST_VECTOR_ELT (op0
, i
),
4253 CONST_VECTOR_ELT (op1
, i
));
4254 if (!x
|| !valid_for_const_vector_p (mode
, x
))
4256 builder
.quick_push (x
);
4258 return builder
.build ();
4261 if (VECTOR_MODE_P (mode
)
4262 && code
== VEC_CONCAT
4263 && (CONST_SCALAR_INT_P (op0
)
4264 || CONST_FIXED_P (op0
)
4265 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4266 && (CONST_SCALAR_INT_P (op1
)
4267 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4268 || CONST_FIXED_P (op1
)))
4270 /* Both inputs have a constant number of elements, so the result
4272 unsigned n_elts
= GET_MODE_NUNITS (mode
).to_constant ();
4273 rtvec v
= rtvec_alloc (n_elts
);
4275 gcc_assert (n_elts
>= 2);
4278 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4279 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4281 RTVEC_ELT (v
, 0) = op0
;
4282 RTVEC_ELT (v
, 1) = op1
;
4286 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
)).to_constant ();
4287 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
)).to_constant ();
4290 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4291 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4292 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4294 for (i
= 0; i
< op0_n_elts
; ++i
)
4295 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op0
, i
);
4296 for (i
= 0; i
< op1_n_elts
; ++i
)
4297 RTVEC_ELT (v
, op0_n_elts
+i
) = CONST_VECTOR_ELT (op1
, i
);
4300 return gen_rtx_CONST_VECTOR (mode
, v
);
4303 if (SCALAR_FLOAT_MODE_P (mode
)
4304 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4305 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4306 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4317 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4319 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4321 for (i
= 0; i
< 4; i
++)
4338 real_from_target (&r
, tmp0
, mode
);
4339 return const_double_from_real_value (r
, mode
);
4343 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4344 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4347 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4348 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4350 if (HONOR_SNANS (mode
)
4351 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4352 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4355 real_convert (&f0
, mode
, opr0
);
4356 real_convert (&f1
, mode
, opr1
);
4359 && real_equal (&f1
, &dconst0
)
4360 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4363 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4364 && flag_trapping_math
4365 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4367 int s0
= REAL_VALUE_NEGATIVE (f0
);
4368 int s1
= REAL_VALUE_NEGATIVE (f1
);
4373 /* Inf + -Inf = NaN plus exception. */
4378 /* Inf - Inf = NaN plus exception. */
4383 /* Inf / Inf = NaN plus exception. */
4390 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4391 && flag_trapping_math
4392 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4393 || (REAL_VALUE_ISINF (f1
)
4394 && real_equal (&f0
, &dconst0
))))
4395 /* Inf * 0 = NaN plus exception. */
4398 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4400 real_convert (&result
, mode
, &value
);
4402 /* Don't constant fold this floating point operation if
4403 the result has overflowed and flag_trapping_math. */
4405 if (flag_trapping_math
4406 && MODE_HAS_INFINITIES (mode
)
4407 && REAL_VALUE_ISINF (result
)
4408 && !REAL_VALUE_ISINF (f0
)
4409 && !REAL_VALUE_ISINF (f1
))
4410 /* Overflow plus exception. */
4413 /* Don't constant fold this floating point operation if the
4414 result may dependent upon the run-time rounding mode and
4415 flag_rounding_math is set, or if GCC's software emulation
4416 is unable to accurately represent the result. */
4418 if ((flag_rounding_math
4419 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4420 && (inexact
|| !real_identical (&result
, &value
)))
4423 return const_double_from_real_value (result
, mode
);
4427 /* We can fold some multi-word operations. */
4428 scalar_int_mode int_mode
;
4429 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4430 && CONST_SCALAR_INT_P (op0
)
4431 && CONST_SCALAR_INT_P (op1
)
4432 && GET_MODE_PRECISION (int_mode
) <= MAX_BITSIZE_MODE_ANY_INT
)
4435 wi::overflow_type overflow
;
4436 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4437 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4439 #if TARGET_SUPPORTS_WIDE_INT == 0
4440 /* This assert keeps the simplification from producing a result
4441 that cannot be represented in a CONST_DOUBLE but a lot of
4442 upstream callers expect that this function never fails to
4443 simplify something and so you if you added this to the test
4444 above the code would die later anyway. If this assert
4445 happens, you just need to make the port support wide int. */
4446 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4451 result
= wi::sub (pop0
, pop1
);
4455 result
= wi::add (pop0
, pop1
);
4459 result
= wi::mul (pop0
, pop1
);
4463 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4469 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4475 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4481 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4487 result
= wi::bit_and (pop0
, pop1
);
4491 result
= wi::bit_or (pop0
, pop1
);
4495 result
= wi::bit_xor (pop0
, pop1
);
4499 result
= wi::smin (pop0
, pop1
);
4503 result
= wi::smax (pop0
, pop1
);
4507 result
= wi::umin (pop0
, pop1
);
4511 result
= wi::umax (pop0
, pop1
);
4518 wide_int wop1
= pop1
;
4519 if (SHIFT_COUNT_TRUNCATED
)
4520 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
4521 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
4527 result
= wi::lrshift (pop0
, wop1
);
4531 result
= wi::arshift (pop0
, wop1
);
4535 result
= wi::lshift (pop0
, wop1
);
4546 if (wi::neg_p (pop1
))
4552 result
= wi::lrotate (pop0
, pop1
);
4556 result
= wi::rrotate (pop0
, pop1
);
4567 return immed_wide_int_const (result
, int_mode
);
4570 /* Handle polynomial integers. */
4571 if (NUM_POLY_INT_COEFFS
> 1
4572 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4573 && poly_int_rtx_p (op0
)
4574 && poly_int_rtx_p (op1
))
4576 poly_wide_int result
;
4580 result
= wi::to_poly_wide (op0
, mode
) + wi::to_poly_wide (op1
, mode
);
4584 result
= wi::to_poly_wide (op0
, mode
) - wi::to_poly_wide (op1
, mode
);
4588 if (CONST_SCALAR_INT_P (op1
))
4589 result
= wi::to_poly_wide (op0
, mode
) * rtx_mode_t (op1
, mode
);
4595 if (CONST_SCALAR_INT_P (op1
))
4597 wide_int shift
= rtx_mode_t (op1
, mode
);
4598 if (SHIFT_COUNT_TRUNCATED
)
4599 shift
= wi::umod_trunc (shift
, GET_MODE_PRECISION (int_mode
));
4600 else if (wi::geu_p (shift
, GET_MODE_PRECISION (int_mode
)))
4602 result
= wi::to_poly_wide (op0
, mode
) << shift
;
4609 if (!CONST_SCALAR_INT_P (op1
)
4610 || !can_ior_p (wi::to_poly_wide (op0
, mode
),
4611 rtx_mode_t (op1
, mode
), &result
))
4618 return immed_wide_int_const (result
, int_mode
);
4626 /* Return a positive integer if X should sort after Y. The value
4627 returned is 1 if and only if X and Y are both regs. */
4630 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4634 result
= (commutative_operand_precedence (y
)
4635 - commutative_operand_precedence (x
));
4637 return result
+ result
;
4639 /* Group together equal REGs to do more simplification. */
4640 if (REG_P (x
) && REG_P (y
))
4641 return REGNO (x
) > REGNO (y
);
4646 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4647 operands may be another PLUS or MINUS.
4649 Rather than test for specific case, we do this by a brute-force method
4650 and do all possible simplifications until no more changes occur. Then
4651 we rebuild the operation.
4653 May return NULL_RTX when no changes were made. */
4656 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4659 struct simplify_plus_minus_op_data
4666 int changed
, n_constants
, canonicalized
= 0;
4669 memset (ops
, 0, sizeof ops
);
4671 /* Set up the two operands and then expand them until nothing has been
4672 changed. If we run out of room in our array, give up; this should
4673 almost never happen. */
4678 ops
[1].neg
= (code
== MINUS
);
4685 for (i
= 0; i
< n_ops
; i
++)
4687 rtx this_op
= ops
[i
].op
;
4688 int this_neg
= ops
[i
].neg
;
4689 enum rtx_code this_code
= GET_CODE (this_op
);
4695 if (n_ops
== ARRAY_SIZE (ops
))
4698 ops
[n_ops
].op
= XEXP (this_op
, 1);
4699 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4702 ops
[i
].op
= XEXP (this_op
, 0);
4704 /* If this operand was negated then we will potentially
4705 canonicalize the expression. Similarly if we don't
4706 place the operands adjacent we're re-ordering the
4707 expression and thus might be performing a
4708 canonicalization. Ignore register re-ordering.
4709 ??? It might be better to shuffle the ops array here,
4710 but then (plus (plus (A, B), plus (C, D))) wouldn't
4711 be seen as non-canonical. */
4714 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4719 ops
[i
].op
= XEXP (this_op
, 0);
4720 ops
[i
].neg
= ! this_neg
;
4726 if (n_ops
!= ARRAY_SIZE (ops
)
4727 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4728 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4729 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4731 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4732 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4733 ops
[n_ops
].neg
= this_neg
;
4741 /* ~a -> (-a - 1) */
4742 if (n_ops
!= ARRAY_SIZE (ops
))
4744 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4745 ops
[n_ops
++].neg
= this_neg
;
4746 ops
[i
].op
= XEXP (this_op
, 0);
4747 ops
[i
].neg
= !this_neg
;
4753 CASE_CONST_SCALAR_INT
:
4754 case CONST_POLY_INT
:
4758 ops
[i
].op
= neg_poly_int_rtx (mode
, this_op
);
4772 if (n_constants
> 1)
4775 gcc_assert (n_ops
>= 2);
4777 /* If we only have two operands, we can avoid the loops. */
4780 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4783 /* Get the two operands. Be careful with the order, especially for
4784 the cases where code == MINUS. */
4785 if (ops
[0].neg
&& ops
[1].neg
)
4787 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4790 else if (ops
[0].neg
)
4801 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4804 /* Now simplify each pair of operands until nothing changes. */
4807 /* Insertion sort is good enough for a small array. */
4808 for (i
= 1; i
< n_ops
; i
++)
4810 struct simplify_plus_minus_op_data save
;
4814 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4817 /* Just swapping registers doesn't count as canonicalization. */
4823 ops
[j
+ 1] = ops
[j
];
4825 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4830 for (i
= n_ops
- 1; i
> 0; i
--)
4831 for (j
= i
- 1; j
>= 0; j
--)
4833 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4834 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4836 if (lhs
!= 0 && rhs
!= 0)
4838 enum rtx_code ncode
= PLUS
;
4844 std::swap (lhs
, rhs
);
4846 else if (swap_commutative_operands_p (lhs
, rhs
))
4847 std::swap (lhs
, rhs
);
4849 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4850 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4852 rtx tem_lhs
, tem_rhs
;
4854 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4855 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4856 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4859 if (tem
&& !CONSTANT_P (tem
))
4860 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4863 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4867 /* Reject "simplifications" that just wrap the two
4868 arguments in a CONST. Failure to do so can result
4869 in infinite recursion with simplify_binary_operation
4870 when it calls us to simplify CONST operations.
4871 Also, if we find such a simplification, don't try
4872 any more combinations with this rhs: We must have
4873 something like symbol+offset, ie. one of the
4874 trivial CONST expressions we handle later. */
4875 if (GET_CODE (tem
) == CONST
4876 && GET_CODE (XEXP (tem
, 0)) == ncode
4877 && XEXP (XEXP (tem
, 0), 0) == lhs
4878 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4881 if (GET_CODE (tem
) == NEG
)
4882 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4883 if (poly_int_rtx_p (tem
) && lneg
)
4884 tem
= neg_poly_int_rtx (mode
, tem
), lneg
= 0;
4888 ops
[j
].op
= NULL_RTX
;
4898 /* Pack all the operands to the lower-numbered entries. */
4899 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4908 /* If nothing changed, check that rematerialization of rtl instructions
4909 is still required. */
4912 /* Perform rematerialization if only all operands are registers and
4913 all operations are PLUS. */
4914 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4915 around rs6000 and how it uses the CA register. See PR67145. */
4916 for (i
= 0; i
< n_ops
; i
++)
4918 || !REG_P (ops
[i
].op
)
4919 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4920 && fixed_regs
[REGNO (ops
[i
].op
)]
4921 && !global_regs
[REGNO (ops
[i
].op
)]
4922 && ops
[i
].op
!= frame_pointer_rtx
4923 && ops
[i
].op
!= arg_pointer_rtx
4924 && ops
[i
].op
!= stack_pointer_rtx
))
4929 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4931 && CONST_INT_P (ops
[1].op
)
4932 && CONSTANT_P (ops
[0].op
)
4934 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4936 /* We suppressed creation of trivial CONST expressions in the
4937 combination loop to avoid recursion. Create one manually now.
4938 The combination loop should have ensured that there is exactly
4939 one CONST_INT, and the sort will have ensured that it is last
4940 in the array and that any other constant will be next-to-last. */
4943 && poly_int_rtx_p (ops
[n_ops
- 1].op
)
4944 && CONSTANT_P (ops
[n_ops
- 2].op
))
4946 rtx value
= ops
[n_ops
- 1].op
;
4947 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4948 value
= neg_poly_int_rtx (mode
, value
);
4949 if (CONST_INT_P (value
))
4951 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4957 /* Put a non-negated operand first, if possible. */
4959 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4962 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4971 /* Now make the result by performing the requested operations. */
4974 for (i
= 1; i
< n_ops
; i
++)
4975 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4976 mode
, result
, ops
[i
].op
);
4981 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4983 plus_minus_operand_p (const_rtx x
)
4985 return GET_CODE (x
) == PLUS
4986 || GET_CODE (x
) == MINUS
4987 || (GET_CODE (x
) == CONST
4988 && GET_CODE (XEXP (x
, 0)) == PLUS
4989 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4990 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4993 /* Like simplify_binary_operation except used for relational operators.
4994 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4995 not also be VOIDmode.
4997 CMP_MODE specifies in which mode the comparison is done in, so it is
4998 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4999 the operands or, if both are VOIDmode, the operands are compared in
5000 "infinite precision". */
5002 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
5003 machine_mode cmp_mode
, rtx op0
, rtx op1
)
5005 rtx tem
, trueop0
, trueop1
;
5007 if (cmp_mode
== VOIDmode
)
5008 cmp_mode
= GET_MODE (op0
);
5009 if (cmp_mode
== VOIDmode
)
5010 cmp_mode
= GET_MODE (op1
);
5012 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
5015 if (SCALAR_FLOAT_MODE_P (mode
))
5017 if (tem
== const0_rtx
)
5018 return CONST0_RTX (mode
);
5019 #ifdef FLOAT_STORE_FLAG_VALUE
5021 REAL_VALUE_TYPE val
;
5022 val
= FLOAT_STORE_FLAG_VALUE (mode
);
5023 return const_double_from_real_value (val
, mode
);
5029 if (VECTOR_MODE_P (mode
))
5031 if (tem
== const0_rtx
)
5032 return CONST0_RTX (mode
);
5033 #ifdef VECTOR_STORE_FLAG_VALUE
5035 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
5036 if (val
== NULL_RTX
)
5038 if (val
== const1_rtx
)
5039 return CONST1_RTX (mode
);
5041 return gen_const_vec_duplicate (mode
, val
);
5047 /* For vector comparison with scalar int result, it is unknown
5048 if the target means here a comparison into an integral bitmask,
5049 or comparison where all comparisons true mean const_true_rtx
5050 whole result, or where any comparisons true mean const_true_rtx
5051 whole result. For const0_rtx all the cases are the same. */
5052 if (VECTOR_MODE_P (cmp_mode
)
5053 && SCALAR_INT_MODE_P (mode
)
5054 && tem
== const_true_rtx
)
5060 /* For the following tests, ensure const0_rtx is op1. */
5061 if (swap_commutative_operands_p (op0
, op1
)
5062 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
5063 std::swap (op0
, op1
), code
= swap_condition (code
);
5065 /* If op0 is a compare, extract the comparison arguments from it. */
5066 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5067 return simplify_gen_relational (code
, mode
, VOIDmode
,
5068 XEXP (op0
, 0), XEXP (op0
, 1));
5070 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
5074 trueop0
= avoid_constant_pool_reference (op0
);
5075 trueop1
= avoid_constant_pool_reference (op1
);
5076 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
5080 /* This part of simplify_relational_operation is only used when CMP_MODE
5081 is not in class MODE_CC (i.e. it is a real comparison).
5083 MODE is the mode of the result, while CMP_MODE specifies in which
5084 mode the comparison is done in, so it is the mode of the operands. */
5087 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
5088 machine_mode cmp_mode
, rtx op0
, rtx op1
)
5090 enum rtx_code op0code
= GET_CODE (op0
);
5092 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
5094 /* If op0 is a comparison, extract the comparison arguments
5098 if (GET_MODE (op0
) == mode
)
5099 return simplify_rtx (op0
);
5101 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
5102 XEXP (op0
, 0), XEXP (op0
, 1));
5104 else if (code
== EQ
)
5106 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
5107 if (new_code
!= UNKNOWN
)
5108 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
5109 XEXP (op0
, 0), XEXP (op0
, 1));
5113 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5114 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
5115 if ((code
== LTU
|| code
== GEU
)
5116 && GET_CODE (op0
) == PLUS
5117 && CONST_INT_P (XEXP (op0
, 1))
5118 && (rtx_equal_p (op1
, XEXP (op0
, 0))
5119 || rtx_equal_p (op1
, XEXP (op0
, 1)))
5120 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5121 && XEXP (op0
, 1) != const0_rtx
)
5124 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5125 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
5126 cmp_mode
, XEXP (op0
, 0), new_cmp
);
5129 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5130 transformed into (LTU a -C). */
5131 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
5132 && CONST_INT_P (XEXP (op0
, 1))
5133 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
5134 && XEXP (op0
, 1) != const0_rtx
)
5137 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5138 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
5139 XEXP (op0
, 0), new_cmp
);
5142 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5143 if ((code
== LTU
|| code
== GEU
)
5144 && GET_CODE (op0
) == PLUS
5145 && rtx_equal_p (op1
, XEXP (op0
, 1))
5146 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5147 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
5148 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
5149 copy_rtx (XEXP (op0
, 0)));
5151 if (op1
== const0_rtx
)
5153 /* Canonicalize (GTU x 0) as (NE x 0). */
5155 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
5156 /* Canonicalize (LEU x 0) as (EQ x 0). */
5158 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
5160 else if (op1
== const1_rtx
)
5165 /* Canonicalize (GE x 1) as (GT x 0). */
5166 return simplify_gen_relational (GT
, mode
, cmp_mode
,
5169 /* Canonicalize (GEU x 1) as (NE x 0). */
5170 return simplify_gen_relational (NE
, mode
, cmp_mode
,
5173 /* Canonicalize (LT x 1) as (LE x 0). */
5174 return simplify_gen_relational (LE
, mode
, cmp_mode
,
5177 /* Canonicalize (LTU x 1) as (EQ x 0). */
5178 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
5184 else if (op1
== constm1_rtx
)
5186 /* Canonicalize (LE x -1) as (LT x 0). */
5188 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
5189 /* Canonicalize (GT x -1) as (GE x 0). */
5191 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
5194 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5195 if ((code
== EQ
|| code
== NE
)
5196 && (op0code
== PLUS
|| op0code
== MINUS
)
5198 && CONSTANT_P (XEXP (op0
, 1))
5199 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
5201 rtx x
= XEXP (op0
, 0);
5202 rtx c
= XEXP (op0
, 1);
5203 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
5204 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
5206 /* Detect an infinite recursive condition, where we oscillate at this
5207 simplification case between:
5208 A + B == C <---> C - B == A,
5209 where A, B, and C are all constants with non-simplifiable expressions,
5210 usually SYMBOL_REFs. */
5211 if (GET_CODE (tem
) == invcode
5213 && rtx_equal_p (c
, XEXP (tem
, 1)))
5216 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
5219 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5220 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5221 scalar_int_mode int_mode
, int_cmp_mode
;
5223 && op1
== const0_rtx
5224 && is_int_mode (mode
, &int_mode
)
5225 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
5226 /* ??? Work-around BImode bugs in the ia64 backend. */
5227 && int_mode
!= BImode
5228 && int_cmp_mode
!= BImode
5229 && nonzero_bits (op0
, int_cmp_mode
) == 1
5230 && STORE_FLAG_VALUE
== 1)
5231 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
5232 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
5233 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
5235 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5236 if ((code
== EQ
|| code
== NE
)
5237 && op1
== const0_rtx
5239 return simplify_gen_relational (code
, mode
, cmp_mode
,
5240 XEXP (op0
, 0), XEXP (op0
, 1));
5242 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5243 if ((code
== EQ
|| code
== NE
)
5245 && rtx_equal_p (XEXP (op0
, 0), op1
)
5246 && !side_effects_p (XEXP (op0
, 0)))
5247 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
5250 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5251 if ((code
== EQ
|| code
== NE
)
5253 && rtx_equal_p (XEXP (op0
, 1), op1
)
5254 && !side_effects_p (XEXP (op0
, 1)))
5255 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5258 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5259 if ((code
== EQ
|| code
== NE
)
5261 && CONST_SCALAR_INT_P (op1
)
5262 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
5263 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5264 simplify_gen_binary (XOR
, cmp_mode
,
5265 XEXP (op0
, 1), op1
));
5267 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5268 constant folding if x/y is a constant. */
5269 if ((code
== EQ
|| code
== NE
)
5270 && (op0code
== AND
|| op0code
== IOR
)
5271 && !side_effects_p (op1
)
5272 && op1
!= CONST0_RTX (cmp_mode
))
5274 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5275 (eq/ne (and (not y) x) 0). */
5276 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 0), op1
))
5277 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 1), op1
)))
5279 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1),
5281 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
5283 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5284 CONST0_RTX (cmp_mode
));
5287 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5288 (eq/ne (and (not x) y) 0). */
5289 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 1), op1
))
5290 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 0), op1
)))
5292 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0),
5294 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
5296 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5297 CONST0_RTX (cmp_mode
));
5301 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5302 if ((code
== EQ
|| code
== NE
)
5303 && GET_CODE (op0
) == BSWAP
5304 && CONST_SCALAR_INT_P (op1
))
5305 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5306 simplify_gen_unary (BSWAP
, cmp_mode
,
5309 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5310 if ((code
== EQ
|| code
== NE
)
5311 && GET_CODE (op0
) == BSWAP
5312 && GET_CODE (op1
) == BSWAP
)
5313 return simplify_gen_relational (code
, mode
, cmp_mode
,
5314 XEXP (op0
, 0), XEXP (op1
, 0));
5316 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
5322 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5323 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
5324 XEXP (op0
, 0), const0_rtx
);
5329 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5330 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
5331 XEXP (op0
, 0), const0_rtx
);
5350 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5351 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5352 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5353 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5354 For floating-point comparisons, assume that the operands were ordered. */
5357 comparison_result (enum rtx_code code
, int known_results
)
5363 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
5366 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
5370 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
5373 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
5377 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
5380 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5383 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5385 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5388 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5390 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5393 return const_true_rtx
;
5401 /* Check if the given comparison (done in the given MODE) is actually
5402 a tautology or a contradiction. If the mode is VOIDmode, the
5403 comparison is done in "infinite precision". If no simplification
5404 is possible, this function returns zero. Otherwise, it returns
5405 either const_true_rtx or const0_rtx. */
5408 simplify_const_relational_operation (enum rtx_code code
,
5416 gcc_assert (mode
!= VOIDmode
5417 || (GET_MODE (op0
) == VOIDmode
5418 && GET_MODE (op1
) == VOIDmode
));
5420 /* If op0 is a compare, extract the comparison arguments from it. */
5421 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5423 op1
= XEXP (op0
, 1);
5424 op0
= XEXP (op0
, 0);
5426 if (GET_MODE (op0
) != VOIDmode
)
5427 mode
= GET_MODE (op0
);
5428 else if (GET_MODE (op1
) != VOIDmode
)
5429 mode
= GET_MODE (op1
);
5434 /* We can't simplify MODE_CC values since we don't know what the
5435 actual comparison is. */
5436 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5439 /* Make sure the constant is second. */
5440 if (swap_commutative_operands_p (op0
, op1
))
5442 std::swap (op0
, op1
);
5443 code
= swap_condition (code
);
5446 trueop0
= avoid_constant_pool_reference (op0
);
5447 trueop1
= avoid_constant_pool_reference (op1
);
5449 /* For integer comparisons of A and B maybe we can simplify A - B and can
5450 then simplify a comparison of that with zero. If A and B are both either
5451 a register or a CONST_INT, this can't help; testing for these cases will
5452 prevent infinite recursion here and speed things up.
5454 We can only do this for EQ and NE comparisons as otherwise we may
5455 lose or introduce overflow which we cannot disregard as undefined as
5456 we do not know the signedness of the operation on either the left or
5457 the right hand side of the comparison. */
5459 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5460 && (code
== EQ
|| code
== NE
)
5461 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5462 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5463 && (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
)) != 0
5464 /* We cannot do this if tem is a nonzero address. */
5465 && ! nonzero_address_p (tem
))
5466 return simplify_const_relational_operation (signed_condition (code
),
5467 mode
, tem
, const0_rtx
);
5469 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5470 return const_true_rtx
;
5472 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5475 /* For modes without NaNs, if the two operands are equal, we know the
5476 result except if they have side-effects. Even with NaNs we know
5477 the result of unordered comparisons and, if signaling NaNs are
5478 irrelevant, also the result of LT/GT/LTGT. */
5479 if ((! HONOR_NANS (trueop0
)
5480 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5481 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5482 && ! HONOR_SNANS (trueop0
)))
5483 && rtx_equal_p (trueop0
, trueop1
)
5484 && ! side_effects_p (trueop0
))
5485 return comparison_result (code
, CMP_EQ
);
5487 /* If the operands are floating-point constants, see if we can fold
5489 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5490 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5491 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5493 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5494 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5496 /* Comparisons are unordered iff at least one of the values is NaN. */
5497 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5507 return const_true_rtx
;
5520 return comparison_result (code
,
5521 (real_equal (d0
, d1
) ? CMP_EQ
:
5522 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5525 /* Otherwise, see if the operands are both integers. */
5526 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5527 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5529 /* It would be nice if we really had a mode here. However, the
5530 largest int representable on the target is as good as
5532 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5533 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5534 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5536 if (wi::eq_p (ptrueop0
, ptrueop1
))
5537 return comparison_result (code
, CMP_EQ
);
5540 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5541 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5542 return comparison_result (code
, cr
);
5546 /* Optimize comparisons with upper and lower bounds. */
5547 scalar_int_mode int_mode
;
5548 if (CONST_INT_P (trueop1
)
5549 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5550 && HWI_COMPUTABLE_MODE_P (int_mode
)
5551 && !side_effects_p (trueop0
))
5554 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
5555 HOST_WIDE_INT val
= INTVAL (trueop1
);
5556 HOST_WIDE_INT mmin
, mmax
;
5566 /* Get a reduced range if the sign bit is zero. */
5567 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
5574 rtx mmin_rtx
, mmax_rtx
;
5575 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
5577 mmin
= INTVAL (mmin_rtx
);
5578 mmax
= INTVAL (mmax_rtx
);
5581 unsigned int sign_copies
5582 = num_sign_bit_copies (trueop0
, int_mode
);
5584 mmin
>>= (sign_copies
- 1);
5585 mmax
>>= (sign_copies
- 1);
5591 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5593 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5594 return const_true_rtx
;
5595 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5600 return const_true_rtx
;
5605 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5607 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5608 return const_true_rtx
;
5609 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5614 return const_true_rtx
;
5620 /* x == y is always false for y out of range. */
5621 if (val
< mmin
|| val
> mmax
)
5625 /* x > y is always false for y >= mmax, always true for y < mmin. */
5627 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5629 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5630 return const_true_rtx
;
5636 return const_true_rtx
;
5639 /* x < y is always false for y <= mmin, always true for y > mmax. */
5641 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5643 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5644 return const_true_rtx
;
5650 return const_true_rtx
;
5654 /* x != y is always true for y out of range. */
5655 if (val
< mmin
|| val
> mmax
)
5656 return const_true_rtx
;
5664 /* Optimize integer comparisons with zero. */
5665 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5666 && trueop1
== const0_rtx
5667 && !side_effects_p (trueop0
))
5669 /* Some addresses are known to be nonzero. We don't know
5670 their sign, but equality comparisons are known. */
5671 if (nonzero_address_p (trueop0
))
5673 if (code
== EQ
|| code
== LEU
)
5675 if (code
== NE
|| code
== GTU
)
5676 return const_true_rtx
;
5679 /* See if the first operand is an IOR with a constant. If so, we
5680 may be able to determine the result of this comparison. */
5681 if (GET_CODE (op0
) == IOR
)
5683 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5684 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5686 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
5687 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5688 && (UINTVAL (inner_const
)
5699 return const_true_rtx
;
5703 return const_true_rtx
;
5717 /* Optimize comparison of ABS with zero. */
5718 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5719 && (GET_CODE (trueop0
) == ABS
5720 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5721 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5726 /* Optimize abs(x) < 0.0. */
5727 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5732 /* Optimize abs(x) >= 0.0. */
5733 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5734 return const_true_rtx
;
5738 /* Optimize ! (abs(x) < 0.0). */
5739 return const_true_rtx
;
5749 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5750 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5751 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5752 can be simplified to that or NULL_RTX if not.
5753 Assume X is compared against zero with CMP_CODE and the true
5754 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5757 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5759 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5762 /* Result on X == 0 and X !=0 respectively. */
5763 rtx on_zero
, on_nonzero
;
5767 on_nonzero
= false_val
;
5771 on_zero
= false_val
;
5772 on_nonzero
= true_val
;
5775 rtx_code op_code
= GET_CODE (on_nonzero
);
5776 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5777 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5778 || !CONST_INT_P (on_zero
))
5781 HOST_WIDE_INT op_val
;
5782 scalar_int_mode mode ATTRIBUTE_UNUSED
5783 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
5784 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
5785 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
5786 && op_val
== INTVAL (on_zero
))
5792 /* Try to simplify X given that it appears within operand OP of a
5793 VEC_MERGE operation whose mask is MASK. X need not use the same
5794 vector mode as the VEC_MERGE, but it must have the same number of
5797 Return the simplified X on success, otherwise return NULL_RTX. */
5800 simplify_merge_mask (rtx x
, rtx mask
, int op
)
5802 gcc_assert (VECTOR_MODE_P (GET_MODE (x
)));
5803 poly_uint64 nunits
= GET_MODE_NUNITS (GET_MODE (x
));
5804 if (GET_CODE (x
) == VEC_MERGE
&& rtx_equal_p (XEXP (x
, 2), mask
))
5806 if (side_effects_p (XEXP (x
, 1 - op
)))
5809 return XEXP (x
, op
);
5812 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5813 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
))
5815 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5817 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), top0
,
5818 GET_MODE (XEXP (x
, 0)));
5821 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5822 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
5823 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
5824 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
))
5826 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5827 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
5830 if (COMPARISON_P (x
))
5831 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
5832 GET_MODE (XEXP (x
, 0)) != VOIDmode
5833 ? GET_MODE (XEXP (x
, 0))
5834 : GET_MODE (XEXP (x
, 1)),
5835 top0
? top0
: XEXP (x
, 0),
5836 top1
? top1
: XEXP (x
, 1));
5838 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
5839 top0
? top0
: XEXP (x
, 0),
5840 top1
? top1
: XEXP (x
, 1));
5843 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_TERNARY
5844 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5845 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
5846 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
5847 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
)
5848 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 2)))
5849 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 2))), nunits
))
5851 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5852 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
5853 rtx top2
= simplify_merge_mask (XEXP (x
, 2), mask
, op
);
5854 if (top0
|| top1
|| top2
)
5855 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
5856 GET_MODE (XEXP (x
, 0)),
5857 top0
? top0
: XEXP (x
, 0),
5858 top1
? top1
: XEXP (x
, 1),
5859 top2
? top2
: XEXP (x
, 2));
5865 /* Simplify CODE, an operation with result mode MODE and three operands,
5866 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5867 a constant. Return 0 if no simplifications is possible. */
5870 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5871 machine_mode op0_mode
, rtx op0
, rtx op1
,
5874 bool any_change
= false;
5876 scalar_int_mode int_mode
, int_op0_mode
;
5877 unsigned int n_elts
;
5882 /* Simplify negations around the multiplication. */
5883 /* -a * -b + c => a * b + c. */
5884 if (GET_CODE (op0
) == NEG
)
5886 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5888 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5890 else if (GET_CODE (op1
) == NEG
)
5892 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5894 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5897 /* Canonicalize the two multiplication operands. */
5898 /* a * -b + c => -b * a + c. */
5899 if (swap_commutative_operands_p (op0
, op1
))
5900 std::swap (op0
, op1
), any_change
= true;
5903 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5908 if (CONST_INT_P (op0
)
5909 && CONST_INT_P (op1
)
5910 && CONST_INT_P (op2
)
5911 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5912 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
5913 && HWI_COMPUTABLE_MODE_P (int_mode
))
5915 /* Extracting a bit-field from a constant */
5916 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5917 HOST_WIDE_INT op1val
= INTVAL (op1
);
5918 HOST_WIDE_INT op2val
= INTVAL (op2
);
5919 if (!BITS_BIG_ENDIAN
)
5921 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
5922 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
5924 /* Not enough information to calculate the bit position. */
5927 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5929 /* First zero-extend. */
5930 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5931 /* If desired, propagate sign bit. */
5932 if (code
== SIGN_EXTRACT
5933 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5935 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
5938 return gen_int_mode (val
, int_mode
);
5943 if (CONST_INT_P (op0
))
5944 return op0
!= const0_rtx
? op1
: op2
;
5946 /* Convert c ? a : a into "a". */
5947 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5950 /* Convert a != b ? a : b into "a". */
5951 if (GET_CODE (op0
) == NE
5952 && ! side_effects_p (op0
)
5953 && ! HONOR_NANS (mode
)
5954 && ! HONOR_SIGNED_ZEROS (mode
)
5955 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5956 && rtx_equal_p (XEXP (op0
, 1), op2
))
5957 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5958 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5961 /* Convert a == b ? a : b into "b". */
5962 if (GET_CODE (op0
) == EQ
5963 && ! side_effects_p (op0
)
5964 && ! HONOR_NANS (mode
)
5965 && ! HONOR_SIGNED_ZEROS (mode
)
5966 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5967 && rtx_equal_p (XEXP (op0
, 1), op2
))
5968 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5969 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5972 /* Convert (!c) != {0,...,0} ? a : b into
5973 c != {0,...,0} ? b : a for vector modes. */
5974 if (VECTOR_MODE_P (GET_MODE (op1
))
5975 && GET_CODE (op0
) == NE
5976 && GET_CODE (XEXP (op0
, 0)) == NOT
5977 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5979 rtx cv
= XEXP (op0
, 1);
5982 if (!CONST_VECTOR_NUNITS (cv
).is_constant (&nunits
))
5985 for (int i
= 0; i
< nunits
; ++i
)
5986 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5993 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5994 XEXP (XEXP (op0
, 0), 0),
5996 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
6001 /* Convert x == 0 ? N : clz (x) into clz (x) when
6002 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
6003 Similarly for ctz (x). */
6004 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
6005 && XEXP (op0
, 1) == const0_rtx
)
6008 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
6014 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
6016 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
6017 ? GET_MODE (XEXP (op0
, 1))
6018 : GET_MODE (XEXP (op0
, 0)));
6021 /* Look for happy constants in op1 and op2. */
6022 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
6024 HOST_WIDE_INT t
= INTVAL (op1
);
6025 HOST_WIDE_INT f
= INTVAL (op2
);
6027 if (t
== STORE_FLAG_VALUE
&& f
== 0)
6028 code
= GET_CODE (op0
);
6029 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
6032 tmp
= reversed_comparison_code (op0
, NULL
);
6040 return simplify_gen_relational (code
, mode
, cmp_mode
,
6041 XEXP (op0
, 0), XEXP (op0
, 1));
6044 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
6045 cmp_mode
, XEXP (op0
, 0),
6048 /* See if any simplifications were possible. */
6051 if (CONST_INT_P (temp
))
6052 return temp
== const0_rtx
? op2
: op1
;
6054 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
6060 gcc_assert (GET_MODE (op0
) == mode
);
6061 gcc_assert (GET_MODE (op1
) == mode
);
6062 gcc_assert (VECTOR_MODE_P (mode
));
6063 trueop2
= avoid_constant_pool_reference (op2
);
6064 if (CONST_INT_P (trueop2
)
6065 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
6067 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
6068 unsigned HOST_WIDE_INT mask
;
6069 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
6072 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
6074 if (!(sel
& mask
) && !side_effects_p (op0
))
6076 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
6079 rtx trueop0
= avoid_constant_pool_reference (op0
);
6080 rtx trueop1
= avoid_constant_pool_reference (op1
);
6081 if (GET_CODE (trueop0
) == CONST_VECTOR
6082 && GET_CODE (trueop1
) == CONST_VECTOR
)
6084 rtvec v
= rtvec_alloc (n_elts
);
6087 for (i
= 0; i
< n_elts
; i
++)
6088 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
6089 ? CONST_VECTOR_ELT (trueop0
, i
)
6090 : CONST_VECTOR_ELT (trueop1
, i
));
6091 return gen_rtx_CONST_VECTOR (mode
, v
);
6094 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6095 if no element from a appears in the result. */
6096 if (GET_CODE (op0
) == VEC_MERGE
)
6098 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
6099 if (CONST_INT_P (tem
))
6101 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
6102 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
6103 return simplify_gen_ternary (code
, mode
, mode
,
6104 XEXP (op0
, 1), op1
, op2
);
6105 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
6106 return simplify_gen_ternary (code
, mode
, mode
,
6107 XEXP (op0
, 0), op1
, op2
);
6110 if (GET_CODE (op1
) == VEC_MERGE
)
6112 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
6113 if (CONST_INT_P (tem
))
6115 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
6116 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
6117 return simplify_gen_ternary (code
, mode
, mode
,
6118 op0
, XEXP (op1
, 1), op2
);
6119 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
6120 return simplify_gen_ternary (code
, mode
, mode
,
6121 op0
, XEXP (op1
, 0), op2
);
6125 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6127 if (GET_CODE (op0
) == VEC_DUPLICATE
6128 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
6129 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
6130 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0
, 0))), 1))
6132 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
6133 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
6135 if (XEXP (XEXP (op0
, 0), 0) == op1
6136 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
6140 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6142 with (vec_concat (X) (B)) if N == 1 or
6143 (vec_concat (A) (X)) if N == 2. */
6144 if (GET_CODE (op0
) == VEC_DUPLICATE
6145 && GET_CODE (op1
) == CONST_VECTOR
6146 && known_eq (CONST_VECTOR_NUNITS (op1
), 2)
6147 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6148 && IN_RANGE (sel
, 1, 2))
6150 rtx newop0
= XEXP (op0
, 0);
6151 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
6153 std::swap (newop0
, newop1
);
6154 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6156 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6157 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6158 Only applies for vectors of two elements. */
6159 if (GET_CODE (op0
) == VEC_DUPLICATE
6160 && GET_CODE (op1
) == VEC_CONCAT
6161 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6162 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6163 && IN_RANGE (sel
, 1, 2))
6165 rtx newop0
= XEXP (op0
, 0);
6166 rtx newop1
= XEXP (op1
, 2 - sel
);
6167 rtx otherop
= XEXP (op1
, sel
- 1);
6169 std::swap (newop0
, newop1
);
6170 /* Don't want to throw away the other part of the vec_concat if
6171 it has side-effects. */
6172 if (!side_effects_p (otherop
))
6173 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6178 (vec_merge:outer (vec_duplicate:outer x:inner)
6179 (subreg:outer y:inner 0)
6182 with (vec_concat:outer x:inner y:inner) if N == 1,
6183 or (vec_concat:outer y:inner x:inner) if N == 2.
6185 Implicitly, this means we have a paradoxical subreg, but such
6186 a check is cheap, so make it anyway.
6188 Only applies for vectors of two elements. */
6189 if (GET_CODE (op0
) == VEC_DUPLICATE
6190 && GET_CODE (op1
) == SUBREG
6191 && GET_MODE (op1
) == GET_MODE (op0
)
6192 && GET_MODE (SUBREG_REG (op1
)) == GET_MODE (XEXP (op0
, 0))
6193 && paradoxical_subreg_p (op1
)
6194 && subreg_lowpart_p (op1
)
6195 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6196 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6197 && IN_RANGE (sel
, 1, 2))
6199 rtx newop0
= XEXP (op0
, 0);
6200 rtx newop1
= SUBREG_REG (op1
);
6202 std::swap (newop0
, newop1
);
6203 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6206 /* Same as above but with switched operands:
6207 Replace (vec_merge:outer (subreg:outer x:inner 0)
6208 (vec_duplicate:outer y:inner)
6211 with (vec_concat:outer x:inner y:inner) if N == 1,
6212 or (vec_concat:outer y:inner x:inner) if N == 2. */
6213 if (GET_CODE (op1
) == VEC_DUPLICATE
6214 && GET_CODE (op0
) == SUBREG
6215 && GET_MODE (op0
) == GET_MODE (op1
)
6216 && GET_MODE (SUBREG_REG (op0
)) == GET_MODE (XEXP (op1
, 0))
6217 && paradoxical_subreg_p (op0
)
6218 && subreg_lowpart_p (op0
)
6219 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6220 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6221 && IN_RANGE (sel
, 1, 2))
6223 rtx newop0
= SUBREG_REG (op0
);
6224 rtx newop1
= XEXP (op1
, 0);
6226 std::swap (newop0
, newop1
);
6227 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6230 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6232 with (vec_concat x y) or (vec_concat y x) depending on value
6234 if (GET_CODE (op0
) == VEC_DUPLICATE
6235 && GET_CODE (op1
) == VEC_DUPLICATE
6236 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6237 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6238 && IN_RANGE (sel
, 1, 2))
6240 rtx newop0
= XEXP (op0
, 0);
6241 rtx newop1
= XEXP (op1
, 0);
6243 std::swap (newop0
, newop1
);
6245 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6249 if (rtx_equal_p (op0
, op1
)
6250 && !side_effects_p (op2
) && !side_effects_p (op1
))
6253 if (!side_effects_p (op2
))
6256 = may_trap_p (op0
) ? NULL_RTX
: simplify_merge_mask (op0
, op2
, 0);
6258 = may_trap_p (op1
) ? NULL_RTX
: simplify_merge_mask (op1
, op2
, 1);
6260 return simplify_gen_ternary (code
, mode
, mode
,
6262 top1
? top1
: op1
, op2
);
6274 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6275 starting at byte FIRST_BYTE. Return true on success and add the
6276 bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6277 that the bytes follow target memory order. Leave BYTES unmodified
6280 MODE is the mode of X. The caller must reserve NUM_BYTES bytes in
6281 BYTES before calling this function. */
6284 native_encode_rtx (machine_mode mode
, rtx x
, vec
<target_unit
> &bytes
,
6285 unsigned int first_byte
, unsigned int num_bytes
)
6287 /* Check the mode is sensible. */
6288 gcc_assert (GET_MODE (x
) == VOIDmode
6289 ? is_a
<scalar_int_mode
> (mode
)
6290 : mode
== GET_MODE (x
));
6292 if (GET_CODE (x
) == CONST_VECTOR
)
6294 /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6295 is necessary. The only complication is that MODE_VECTOR_BOOL
6296 vectors can have several elements per byte. */
6297 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6298 GET_MODE_NUNITS (mode
));
6299 unsigned int elt
= first_byte
* BITS_PER_UNIT
/ elt_bits
;
6300 if (elt_bits
< BITS_PER_UNIT
)
6302 /* This is the only case in which elements can be smaller than
6304 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
6305 for (unsigned int i
= 0; i
< num_bytes
; ++i
)
6307 target_unit value
= 0;
6308 for (unsigned int j
= 0; j
< BITS_PER_UNIT
; j
+= elt_bits
)
6310 value
|= (INTVAL (CONST_VECTOR_ELT (x
, elt
)) & 1) << j
;
6313 bytes
.quick_push (value
);
6318 unsigned int start
= bytes
.length ();
6319 unsigned int elt_bytes
= GET_MODE_UNIT_SIZE (mode
);
6320 /* Make FIRST_BYTE relative to ELT. */
6321 first_byte
%= elt_bytes
;
6322 while (num_bytes
> 0)
6324 /* Work out how many bytes we want from element ELT. */
6325 unsigned int chunk_bytes
= MIN (num_bytes
, elt_bytes
- first_byte
);
6326 if (!native_encode_rtx (GET_MODE_INNER (mode
),
6327 CONST_VECTOR_ELT (x
, elt
), bytes
,
6328 first_byte
, chunk_bytes
))
6330 bytes
.truncate (start
);
6335 num_bytes
-= chunk_bytes
;
6340 /* All subsequent cases are limited to scalars. */
6342 if (!is_a
<scalar_mode
> (mode
, &smode
))
6345 /* Make sure that the region is in range. */
6346 unsigned int end_byte
= first_byte
+ num_bytes
;
6347 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
6348 gcc_assert (end_byte
<= mode_bytes
);
6350 if (CONST_SCALAR_INT_P (x
))
6352 /* The target memory layout is affected by both BYTES_BIG_ENDIAN
6353 and WORDS_BIG_ENDIAN. Use the subreg machinery to get the lsb
6354 position of each byte. */
6355 rtx_mode_t
value (x
, smode
);
6356 wide_int_ref
value_wi (value
);
6357 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6359 /* Always constant because the inputs are. */
6361 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6362 /* Operate directly on the encoding rather than using
6363 wi::extract_uhwi, so that we preserve the sign or zero
6364 extension for modes that are not a whole number of bits in
6365 size. (Zero extension is only used for the combination of
6366 innermode == BImode && STORE_FLAG_VALUE == 1). */
6367 unsigned int elt
= lsb
/ HOST_BITS_PER_WIDE_INT
;
6368 unsigned int shift
= lsb
% HOST_BITS_PER_WIDE_INT
;
6369 unsigned HOST_WIDE_INT uhwi
= value_wi
.elt (elt
);
6370 bytes
.quick_push (uhwi
>> shift
);
6375 if (CONST_DOUBLE_P (x
))
6377 /* real_to_target produces an array of integers in target memory order.
6378 All integers before the last one have 32 bits; the last one may
6379 have 32 bits or fewer, depending on whether the mode bitsize
6380 is divisible by 32. Each of these integers is then laid out
6381 in target memory as any other integer would be. */
6382 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6383 real_to_target (el32
, CONST_DOUBLE_REAL_VALUE (x
), smode
);
6385 /* The (maximum) number of target bytes per element of el32. */
6386 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
6387 gcc_assert (bytes_per_el32
!= 0);
6389 /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
6391 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6393 unsigned int index
= byte
/ bytes_per_el32
;
6394 unsigned int subbyte
= byte
% bytes_per_el32
;
6395 unsigned int int_bytes
= MIN (bytes_per_el32
,
6396 mode_bytes
- index
* bytes_per_el32
);
6397 /* Always constant because the inputs are. */
6399 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
6400 bytes
.quick_push ((unsigned long) el32
[index
] >> lsb
);
6405 if (GET_CODE (x
) == CONST_FIXED
)
6407 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6409 /* Always constant because the inputs are. */
6411 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6412 unsigned HOST_WIDE_INT piece
= CONST_FIXED_VALUE_LOW (x
);
6413 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
6415 lsb
-= HOST_BITS_PER_WIDE_INT
;
6416 piece
= CONST_FIXED_VALUE_HIGH (x
);
6418 bytes
.quick_push (piece
>> lsb
);
6426 /* Read a vector of mode MODE from the target memory image given by BYTES,
6427 starting at byte FIRST_BYTE. The vector is known to be encodable using
6428 NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
6429 and BYTES is known to have enough bytes to supply NPATTERNS *
6430 NELTS_PER_PATTERN vector elements. Each element of BYTES contains
6431 BITS_PER_UNIT bits and the bytes are in target memory order.
6433 Return the vector on success, otherwise return NULL_RTX. */
6436 native_decode_vector_rtx (machine_mode mode
, vec
<target_unit
> bytes
,
6437 unsigned int first_byte
, unsigned int npatterns
,
6438 unsigned int nelts_per_pattern
)
6440 rtx_vector_builder
builder (mode
, npatterns
, nelts_per_pattern
);
6442 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6443 GET_MODE_NUNITS (mode
));
6444 if (elt_bits
< BITS_PER_UNIT
)
6446 /* This is the only case in which elements can be smaller than a byte.
6447 Element 0 is always in the lsb of the containing byte. */
6448 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
6449 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
6451 unsigned int bit_index
= first_byte
* BITS_PER_UNIT
+ i
* elt_bits
;
6452 unsigned int byte_index
= bit_index
/ BITS_PER_UNIT
;
6453 unsigned int lsb
= bit_index
% BITS_PER_UNIT
;
6454 builder
.quick_push (bytes
[byte_index
] & (1 << lsb
)
6455 ? CONST1_RTX (BImode
)
6456 : CONST0_RTX (BImode
));
6461 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
6463 rtx x
= native_decode_rtx (GET_MODE_INNER (mode
), bytes
, first_byte
);
6466 builder
.quick_push (x
);
6467 first_byte
+= elt_bits
/ BITS_PER_UNIT
;
6470 return builder
.build ();
6473 /* Read an rtx of mode MODE from the target memory image given by BYTES,
6474 starting at byte FIRST_BYTE. Each element of BYTES contains BITS_PER_UNIT
6475 bits and the bytes are in target memory order. The image has enough
6476 values to specify all bytes of MODE.
6478 Return the rtx on success, otherwise return NULL_RTX. */
6481 native_decode_rtx (machine_mode mode
, vec
<target_unit
> bytes
,
6482 unsigned int first_byte
)
6484 if (VECTOR_MODE_P (mode
))
6486 /* If we know at compile time how many elements there are,
6487 pull each element directly from BYTES. */
6489 if (GET_MODE_NUNITS (mode
).is_constant (&nelts
))
6490 return native_decode_vector_rtx (mode
, bytes
, first_byte
, nelts
, 1);
6494 scalar_int_mode imode
;
6495 if (is_a
<scalar_int_mode
> (mode
, &imode
)
6496 && GET_MODE_PRECISION (imode
) <= MAX_BITSIZE_MODE_ANY_INT
)
6498 /* Pull the bytes msb first, so that we can use simple
6499 shift-and-insert wide_int operations. */
6500 unsigned int size
= GET_MODE_SIZE (imode
);
6501 wide_int
result (wi::zero (GET_MODE_PRECISION (imode
)));
6502 for (unsigned int i
= 0; i
< size
; ++i
)
6504 unsigned int lsb
= (size
- i
- 1) * BITS_PER_UNIT
;
6505 /* Always constant because the inputs are. */
6506 unsigned int subbyte
6507 = subreg_size_offset_from_lsb (1, size
, lsb
).to_constant ();
6508 result
<<= BITS_PER_UNIT
;
6509 result
|= bytes
[first_byte
+ subbyte
];
6511 return immed_wide_int_const (result
, imode
);
6514 scalar_float_mode fmode
;
6515 if (is_a
<scalar_float_mode
> (mode
, &fmode
))
6517 /* We need to build an array of integers in target memory order.
6518 All integers before the last one have 32 bits; the last one may
6519 have 32 bits or fewer, depending on whether the mode bitsize
6520 is divisible by 32. */
6521 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6522 unsigned int num_el32
= CEIL (GET_MODE_BITSIZE (fmode
), 32);
6523 memset (el32
, 0, num_el32
* sizeof (long));
6525 /* The (maximum) number of target bytes per element of el32. */
6526 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
6527 gcc_assert (bytes_per_el32
!= 0);
6529 unsigned int mode_bytes
= GET_MODE_SIZE (fmode
);
6530 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
6532 unsigned int index
= byte
/ bytes_per_el32
;
6533 unsigned int subbyte
= byte
% bytes_per_el32
;
6534 unsigned int int_bytes
= MIN (bytes_per_el32
,
6535 mode_bytes
- index
* bytes_per_el32
);
6536 /* Always constant because the inputs are. */
6538 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
6539 el32
[index
] |= (unsigned long) bytes
[first_byte
+ byte
] << lsb
;
6542 real_from_target (&r
, el32
, fmode
);
6543 return const_double_from_real_value (r
, fmode
);
6546 if (ALL_SCALAR_FIXED_POINT_MODE_P (mode
))
6548 scalar_mode smode
= as_a
<scalar_mode
> (mode
);
6554 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
6555 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
6557 /* Always constant because the inputs are. */
6559 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6560 unsigned HOST_WIDE_INT unit
= bytes
[first_byte
+ byte
];
6561 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
6562 f
.data
.high
|= unit
<< (lsb
- HOST_BITS_PER_WIDE_INT
);
6564 f
.data
.low
|= unit
<< lsb
;
6566 return CONST_FIXED_FROM_FIXED_VALUE (f
, mode
);
6572 /* Simplify a byte offset BYTE into CONST_VECTOR X. The main purpose
6573 is to convert a runtime BYTE value into a constant one. */
6576 simplify_const_vector_byte_offset (rtx x
, poly_uint64 byte
)
6578 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
6579 machine_mode mode
= GET_MODE (x
);
6580 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6581 GET_MODE_NUNITS (mode
));
6582 /* The number of bits needed to encode one element from each pattern. */
6583 unsigned int sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * elt_bits
;
6585 /* Identify the start point in terms of a sequence number and a byte offset
6586 within that sequence. */
6587 poly_uint64 first_sequence
;
6588 unsigned HOST_WIDE_INT subbit
;
6589 if (can_div_trunc_p (byte
* BITS_PER_UNIT
, sequence_bits
,
6590 &first_sequence
, &subbit
))
6592 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
6593 if (nelts_per_pattern
== 1)
6594 /* This is a duplicated vector, so the value of FIRST_SEQUENCE
6596 byte
= subbit
/ BITS_PER_UNIT
;
6597 else if (nelts_per_pattern
== 2 && known_gt (first_sequence
, 0U))
6599 /* The subreg drops the first element from each pattern and
6600 only uses the second element. Find the first sequence
6601 that starts on a byte boundary. */
6602 subbit
+= least_common_multiple (sequence_bits
, BITS_PER_UNIT
);
6603 byte
= subbit
/ BITS_PER_UNIT
;
6609 /* Subroutine of simplify_subreg in which:
6611 - X is known to be a CONST_VECTOR
6612 - OUTERMODE is known to be a vector mode
6614 Try to handle the subreg by operating on the CONST_VECTOR encoding
6615 rather than on each individual element of the CONST_VECTOR.
6617 Return the simplified subreg on success, otherwise return NULL_RTX. */
6620 simplify_const_vector_subreg (machine_mode outermode
, rtx x
,
6621 machine_mode innermode
, unsigned int first_byte
)
6623 /* Paradoxical subregs of vectors have dubious semantics. */
6624 if (paradoxical_subreg_p (outermode
, innermode
))
6627 /* We can only preserve the semantics of a stepped pattern if the new
6628 vector element is the same as the original one. */
6629 if (CONST_VECTOR_STEPPED_P (x
)
6630 && GET_MODE_INNER (outermode
) != GET_MODE_INNER (innermode
))
6633 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
6634 unsigned int x_elt_bits
6635 = vector_element_size (GET_MODE_BITSIZE (innermode
),
6636 GET_MODE_NUNITS (innermode
));
6637 unsigned int out_elt_bits
6638 = vector_element_size (GET_MODE_BITSIZE (outermode
),
6639 GET_MODE_NUNITS (outermode
));
6641 /* The number of bits needed to encode one element from every pattern
6642 of the original vector. */
6643 unsigned int x_sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * x_elt_bits
;
6645 /* The number of bits needed to encode one element from every pattern
6647 unsigned int out_sequence_bits
6648 = least_common_multiple (x_sequence_bits
, out_elt_bits
);
6650 /* Work out the number of interleaved patterns in the output vector
6651 and the number of encoded elements per pattern. */
6652 unsigned int out_npatterns
= out_sequence_bits
/ out_elt_bits
;
6653 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
6655 /* The encoding scheme requires the number of elements to be a multiple
6656 of the number of patterns, so that each pattern appears at least once
6657 and so that the same number of elements appear from each pattern. */
6658 bool ok_p
= multiple_p (GET_MODE_NUNITS (outermode
), out_npatterns
);
6659 unsigned int const_nunits
;
6660 if (GET_MODE_NUNITS (outermode
).is_constant (&const_nunits
)
6661 && (!ok_p
|| out_npatterns
* nelts_per_pattern
> const_nunits
))
6663 /* Either the encoding is invalid, or applying it would give us
6664 more elements than we need. Just encode each element directly. */
6665 out_npatterns
= const_nunits
;
6666 nelts_per_pattern
= 1;
6671 /* Get enough bytes of X to form the new encoding. */
6672 unsigned int buffer_bits
= out_npatterns
* nelts_per_pattern
* out_elt_bits
;
6673 unsigned int buffer_bytes
= CEIL (buffer_bits
, BITS_PER_UNIT
);
6674 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
6675 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, buffer_bytes
))
6678 /* Reencode the bytes as OUTERMODE. */
6679 return native_decode_vector_rtx (outermode
, buffer
, 0, out_npatterns
,
6683 /* Try to simplify a subreg of a constant by encoding the subreg region
6684 as a sequence of target bytes and reading them back in the new mode.
6685 Return the new value on success, otherwise return null.
6687 The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
6688 and byte offset FIRST_BYTE. */
6691 simplify_immed_subreg (fixed_size_mode outermode
, rtx x
,
6692 machine_mode innermode
, unsigned int first_byte
)
6694 unsigned int buffer_bytes
= GET_MODE_SIZE (outermode
);
6695 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
6697 /* Some ports misuse CCmode. */
6698 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (x
))
6701 /* Paradoxical subregs read undefined values for bytes outside of the
6702 inner value. However, we have traditionally always sign-extended
6703 integer constants and zero-extended others. */
6704 unsigned int inner_bytes
= buffer_bytes
;
6705 if (paradoxical_subreg_p (outermode
, innermode
))
6707 if (!GET_MODE_SIZE (innermode
).is_constant (&inner_bytes
))
6710 target_unit filler
= 0;
6711 if (CONST_SCALAR_INT_P (x
) && wi::neg_p (rtx_mode_t (x
, innermode
)))
6714 /* Add any leading bytes due to big-endian layout. The number of
6715 bytes must be constant because both modes have constant size. */
6716 unsigned int leading_bytes
6717 = -byte_lowpart_offset (outermode
, innermode
).to_constant ();
6718 for (unsigned int i
= 0; i
< leading_bytes
; ++i
)
6719 buffer
.quick_push (filler
);
6721 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
6724 /* Add any trailing bytes due to little-endian layout. */
6725 while (buffer
.length () < buffer_bytes
)
6726 buffer
.quick_push (filler
);
6730 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
6733 return native_decode_rtx (outermode
, buffer
, 0);
6736 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6737 Return 0 if no simplifications are possible. */
6739 simplify_subreg (machine_mode outermode
, rtx op
,
6740 machine_mode innermode
, poly_uint64 byte
)
6742 /* Little bit of sanity checking. */
6743 gcc_assert (innermode
!= VOIDmode
);
6744 gcc_assert (outermode
!= VOIDmode
);
6745 gcc_assert (innermode
!= BLKmode
);
6746 gcc_assert (outermode
!= BLKmode
);
6748 gcc_assert (GET_MODE (op
) == innermode
6749 || GET_MODE (op
) == VOIDmode
);
6751 poly_uint64 outersize
= GET_MODE_SIZE (outermode
);
6752 if (!multiple_p (byte
, outersize
))
6755 poly_uint64 innersize
= GET_MODE_SIZE (innermode
);
6756 if (maybe_ge (byte
, innersize
))
6759 if (outermode
== innermode
&& known_eq (byte
, 0U))
6762 if (GET_CODE (op
) == CONST_VECTOR
)
6763 byte
= simplify_const_vector_byte_offset (op
, byte
);
6765 if (multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
)))
6769 if (VECTOR_MODE_P (outermode
)
6770 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
6771 && vec_duplicate_p (op
, &elt
))
6772 return gen_vec_duplicate (outermode
, elt
);
6774 if (outermode
== GET_MODE_INNER (innermode
)
6775 && vec_duplicate_p (op
, &elt
))
6779 if (CONST_SCALAR_INT_P (op
)
6780 || CONST_DOUBLE_AS_FLOAT_P (op
)
6781 || CONST_FIXED_P (op
)
6782 || GET_CODE (op
) == CONST_VECTOR
)
6784 unsigned HOST_WIDE_INT cbyte
;
6785 if (byte
.is_constant (&cbyte
))
6787 if (GET_CODE (op
) == CONST_VECTOR
&& VECTOR_MODE_P (outermode
))
6789 rtx tmp
= simplify_const_vector_subreg (outermode
, op
,
6795 fixed_size_mode fs_outermode
;
6796 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
))
6797 return simplify_immed_subreg (fs_outermode
, op
, innermode
, cbyte
);
6801 /* Changing mode twice with SUBREG => just change it once,
6802 or not at all if changing back op starting mode. */
6803 if (GET_CODE (op
) == SUBREG
)
6805 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6806 poly_uint64 innermostsize
= GET_MODE_SIZE (innermostmode
);
6809 if (outermode
== innermostmode
6810 && known_eq (byte
, 0U)
6811 && known_eq (SUBREG_BYTE (op
), 0))
6812 return SUBREG_REG (op
);
6814 /* Work out the memory offset of the final OUTERMODE value relative
6815 to the inner value of OP. */
6816 poly_int64 mem_offset
= subreg_memory_offset (outermode
,
6818 poly_int64 op_mem_offset
= subreg_memory_offset (op
);
6819 poly_int64 final_offset
= mem_offset
+ op_mem_offset
;
6821 /* See whether resulting subreg will be paradoxical. */
6822 if (!paradoxical_subreg_p (outermode
, innermostmode
))
6824 /* Bail out in case resulting subreg would be incorrect. */
6825 if (maybe_lt (final_offset
, 0)
6826 || maybe_ge (poly_uint64 (final_offset
), innermostsize
)
6827 || !multiple_p (final_offset
, outersize
))
6832 poly_int64 required_offset
= subreg_memory_offset (outermode
,
6834 if (maybe_ne (final_offset
, required_offset
))
6836 /* Paradoxical subregs always have byte offset 0. */
6840 /* Recurse for further possible simplifications. */
6841 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6845 if (validate_subreg (outermode
, innermostmode
,
6846 SUBREG_REG (op
), final_offset
))
6848 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6849 if (SUBREG_PROMOTED_VAR_P (op
)
6850 && SUBREG_PROMOTED_SIGN (op
) >= 0
6851 && GET_MODE_CLASS (outermode
) == MODE_INT
6852 && known_ge (outersize
, innersize
)
6853 && known_le (outersize
, innermostsize
)
6854 && subreg_lowpart_p (newx
))
6856 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6857 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6864 /* SUBREG of a hard register => just change the register number
6865 and/or mode. If the hard register is not valid in that mode,
6866 suppress this simplification. If the hard register is the stack,
6867 frame, or argument pointer, leave this as a SUBREG. */
6869 if (REG_P (op
) && HARD_REGISTER_P (op
))
6871 unsigned int regno
, final_regno
;
6874 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6875 if (HARD_REGISTER_NUM_P (final_regno
))
6877 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
6878 subreg_memory_offset (outermode
,
6881 /* Propagate original regno. We don't have any way to specify
6882 the offset inside original regno, so do so only for lowpart.
6883 The information is used only by alias analysis that cannot
6884 grog partial register anyway. */
6886 if (known_eq (subreg_lowpart_offset (outermode
, innermode
), byte
))
6887 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6892 /* If we have a SUBREG of a register that we are replacing and we are
6893 replacing it with a MEM, make a new MEM and try replacing the
6894 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6895 or if we would be widening it. */
6898 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6899 /* Allow splitting of volatile memory references in case we don't
6900 have instruction to move the whole thing. */
6901 && (! MEM_VOLATILE_P (op
)
6902 || ! have_insn_for (SET
, innermode
))
6903 && known_le (outersize
, innersize
))
6904 return adjust_address_nv (op
, outermode
, byte
);
6906 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6908 if (GET_CODE (op
) == CONCAT
6909 || GET_CODE (op
) == VEC_CONCAT
)
6911 poly_uint64 final_offset
;
6914 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6915 if (part_mode
== VOIDmode
)
6916 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6917 poly_uint64 part_size
= GET_MODE_SIZE (part_mode
);
6918 if (known_lt (byte
, part_size
))
6920 part
= XEXP (op
, 0);
6921 final_offset
= byte
;
6923 else if (known_ge (byte
, part_size
))
6925 part
= XEXP (op
, 1);
6926 final_offset
= byte
- part_size
;
6931 if (maybe_gt (final_offset
+ outersize
, part_size
))
6934 part_mode
= GET_MODE (part
);
6935 if (part_mode
== VOIDmode
)
6936 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6937 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
6940 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
6941 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6946 (subreg (vec_merge (X)
6948 (const_int ((1 << N) | M)))
6949 (N * sizeof (outermode)))
6951 (subreg (X) (N * sizeof (outermode)))
6954 if (constant_multiple_p (byte
, GET_MODE_SIZE (outermode
), &idx
)
6955 && idx
< HOST_BITS_PER_WIDE_INT
6956 && GET_CODE (op
) == VEC_MERGE
6957 && GET_MODE_INNER (innermode
) == outermode
6958 && CONST_INT_P (XEXP (op
, 2))
6959 && (UINTVAL (XEXP (op
, 2)) & (HOST_WIDE_INT_1U
<< idx
)) != 0)
6960 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
, byte
);
6962 /* A SUBREG resulting from a zero extension may fold to zero if
6963 it extracts higher bits that the ZERO_EXTEND's source bits. */
6964 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6966 poly_uint64 bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6967 if (known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))))
6968 return CONST0_RTX (outermode
);
6971 scalar_int_mode int_outermode
, int_innermode
;
6972 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
6973 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
6974 && known_eq (byte
, subreg_lowpart_offset (int_outermode
, int_innermode
)))
6976 /* Handle polynomial integers. The upper bits of a paradoxical
6977 subreg are undefined, so this is safe regardless of whether
6978 we're truncating or extending. */
6979 if (CONST_POLY_INT_P (op
))
6982 = poly_wide_int::from (const_poly_int_value (op
),
6983 GET_MODE_PRECISION (int_outermode
),
6985 return immed_wide_int_const (val
, int_outermode
);
6988 if (GET_MODE_PRECISION (int_outermode
)
6989 < GET_MODE_PRECISION (int_innermode
))
6991 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
6997 /* If OP is a vector comparison and the subreg is not changing the
6998 number of elements or the size of the elements, change the result
6999 of the comparison to the new mode. */
7000 if (COMPARISON_P (op
)
7001 && VECTOR_MODE_P (outermode
)
7002 && VECTOR_MODE_P (innermode
)
7003 && known_eq (GET_MODE_NUNITS (outermode
), GET_MODE_NUNITS (innermode
))
7004 && known_eq (GET_MODE_UNIT_SIZE (outermode
),
7005 GET_MODE_UNIT_SIZE (innermode
)))
7006 return simplify_gen_relational (GET_CODE (op
), outermode
, innermode
,
7007 XEXP (op
, 0), XEXP (op
, 1));
7011 /* Make a SUBREG operation or equivalent if it folds. */
7014 simplify_gen_subreg (machine_mode outermode
, rtx op
,
7015 machine_mode innermode
, poly_uint64 byte
)
7019 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
7023 if (GET_CODE (op
) == SUBREG
7024 || GET_CODE (op
) == CONCAT
7025 || GET_MODE (op
) == VOIDmode
)
7028 if (validate_subreg (outermode
, innermode
, op
, byte
))
7029 return gen_rtx_SUBREG (outermode
, op
, byte
);
7034 /* Generates a subreg to get the least significant part of EXPR (in mode
7035 INNER_MODE) to OUTER_MODE. */
7038 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
7039 machine_mode inner_mode
)
7041 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
7042 subreg_lowpart_offset (outer_mode
, inner_mode
));
7045 /* Simplify X, an rtx expression.
7047 Return the simplified expression or NULL if no simplifications
7050 This is the preferred entry point into the simplification routines;
7051 however, we still allow passes to call the more specific routines.
7053 Right now GCC has three (yes, three) major bodies of RTL simplification
7054 code that need to be unified.
7056 1. fold_rtx in cse.c. This code uses various CSE specific
7057 information to aid in RTL simplification.
7059 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
7060 it uses combine specific information to aid in RTL
7063 3. The routines in this file.
7066 Long term we want to only have one body of simplification code; to
7067 get to that state I recommend the following steps:
7069 1. Pour over fold_rtx & simplify_rtx and move any simplifications
7070 which are not pass dependent state into these routines.
7072 2. As code is moved by #1, change fold_rtx & simplify_rtx to
7073 use this routine whenever possible.
7075 3. Allow for pass dependent state to be provided to these
7076 routines and add simplifications based on the pass dependent
7077 state. Remove code from cse.c & combine.c that becomes
7080 It will take time, but ultimately the compiler will be easier to
7081 maintain and improve. It's totally silly that when we add a
7082 simplification that it needs to be added to 4 places (3 for RTL
7083 simplification and 1 for tree simplification. */
7086 simplify_rtx (const_rtx x
)
7088 const enum rtx_code code
= GET_CODE (x
);
7089 const machine_mode mode
= GET_MODE (x
);
7091 switch (GET_RTX_CLASS (code
))
7094 return simplify_unary_operation (code
, mode
,
7095 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
7096 case RTX_COMM_ARITH
:
7097 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
7098 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
7103 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
7106 case RTX_BITFIELD_OPS
:
7107 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
7108 XEXP (x
, 0), XEXP (x
, 1),
7112 case RTX_COMM_COMPARE
:
7113 return simplify_relational_operation (code
, mode
,
7114 ((GET_MODE (XEXP (x
, 0))
7116 ? GET_MODE (XEXP (x
, 0))
7117 : GET_MODE (XEXP (x
, 1))),
7123 return simplify_subreg (mode
, SUBREG_REG (x
),
7124 GET_MODE (SUBREG_REG (x
)),
7131 /* Convert (lo_sum (high FOO) FOO) to FOO. */
7132 if (GET_CODE (XEXP (x
, 0)) == HIGH
7133 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
7146 namespace selftest
{
7148 /* Make a unique pseudo REG of mode MODE for use by selftests. */
7151 make_test_reg (machine_mode mode
)
7153 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
7155 return gen_rtx_REG (mode
, test_reg_num
++);
7158 /* Test vector simplifications involving VEC_DUPLICATE in which the
7159 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7160 register that holds one element of MODE. */
7163 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
7165 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7166 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7167 poly_uint64 nunits
= GET_MODE_NUNITS (mode
);
7168 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
7170 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
7171 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
7172 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
7173 ASSERT_RTX_EQ (duplicate
,
7174 simplify_unary_operation (NOT
, mode
,
7175 duplicate_not
, mode
));
7177 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7178 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
7179 ASSERT_RTX_EQ (duplicate
,
7180 simplify_unary_operation (NEG
, mode
,
7181 duplicate_neg
, mode
));
7183 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
7184 ASSERT_RTX_EQ (duplicate
,
7185 simplify_binary_operation (PLUS
, mode
, duplicate
,
7186 CONST0_RTX (mode
)));
7188 ASSERT_RTX_EQ (duplicate
,
7189 simplify_binary_operation (MINUS
, mode
, duplicate
,
7190 CONST0_RTX (mode
)));
7192 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
7193 simplify_binary_operation (MINUS
, mode
, duplicate
,
7197 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
7198 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
7199 ASSERT_RTX_PTR_EQ (scalar_reg
,
7200 simplify_binary_operation (VEC_SELECT
, inner_mode
,
7201 duplicate
, zero_par
));
7203 unsigned HOST_WIDE_INT const_nunits
;
7204 if (nunits
.is_constant (&const_nunits
))
7206 /* And again with the final element. */
7207 rtx last_index
= gen_int_mode (const_nunits
- 1, word_mode
);
7208 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
7209 ASSERT_RTX_PTR_EQ (scalar_reg
,
7210 simplify_binary_operation (VEC_SELECT
, inner_mode
,
7211 duplicate
, last_par
));
7213 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
7214 rtx vector_reg
= make_test_reg (mode
);
7215 for (unsigned HOST_WIDE_INT i
= 0; i
< const_nunits
; i
++)
7217 if (i
>= HOST_BITS_PER_WIDE_INT
)
7219 rtx mask
= GEN_INT ((HOST_WIDE_INT_1U
<< i
) | (i
+ 1));
7220 rtx vm
= gen_rtx_VEC_MERGE (mode
, duplicate
, vector_reg
, mask
);
7221 poly_uint64 offset
= i
* GET_MODE_SIZE (inner_mode
);
7222 ASSERT_RTX_EQ (scalar_reg
,
7223 simplify_gen_subreg (inner_mode
, vm
,
7228 /* Test a scalar subreg of a VEC_DUPLICATE. */
7229 poly_uint64 offset
= subreg_lowpart_offset (inner_mode
, mode
);
7230 ASSERT_RTX_EQ (scalar_reg
,
7231 simplify_gen_subreg (inner_mode
, duplicate
,
7234 machine_mode narrower_mode
;
7235 if (maybe_ne (nunits
, 2U)
7236 && multiple_p (nunits
, 2)
7237 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
7238 && VECTOR_MODE_P (narrower_mode
))
7240 /* Test VEC_DUPLICATE of a vector. */
7241 rtx_vector_builder
nbuilder (narrower_mode
, 2, 1);
7242 nbuilder
.quick_push (const0_rtx
);
7243 nbuilder
.quick_push (const1_rtx
);
7244 rtx_vector_builder
builder (mode
, 2, 1);
7245 builder
.quick_push (const0_rtx
);
7246 builder
.quick_push (const1_rtx
);
7247 ASSERT_RTX_EQ (builder
.build (),
7248 simplify_unary_operation (VEC_DUPLICATE
, mode
,
7252 /* Test VEC_SELECT of a vector. */
7254 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
7255 rtx narrower_duplicate
7256 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
7257 ASSERT_RTX_EQ (narrower_duplicate
,
7258 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
7259 duplicate
, vec_par
));
7261 /* Test a vector subreg of a VEC_DUPLICATE. */
7262 poly_uint64 offset
= subreg_lowpart_offset (narrower_mode
, mode
);
7263 ASSERT_RTX_EQ (narrower_duplicate
,
7264 simplify_gen_subreg (narrower_mode
, duplicate
,
7269 /* Test vector simplifications involving VEC_SERIES in which the
7270 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7271 register that holds one element of MODE. */
7274 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
7276 /* Test unary cases with VEC_SERIES arguments. */
7277 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7278 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7279 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7280 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
7281 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
7282 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
7283 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
7284 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
7285 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
7287 ASSERT_RTX_EQ (series_0_r
,
7288 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
7289 ASSERT_RTX_EQ (series_r_m1
,
7290 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
7291 ASSERT_RTX_EQ (series_r_r
,
7292 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
7294 /* Test that a VEC_SERIES with a zero step is simplified away. */
7295 ASSERT_RTX_EQ (duplicate
,
7296 simplify_binary_operation (VEC_SERIES
, mode
,
7297 scalar_reg
, const0_rtx
));
7299 /* Test PLUS and MINUS with VEC_SERIES. */
7300 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
7301 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
7302 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
7303 ASSERT_RTX_EQ (series_r_r
,
7304 simplify_binary_operation (PLUS
, mode
, series_0_r
,
7306 ASSERT_RTX_EQ (series_r_1
,
7307 simplify_binary_operation (PLUS
, mode
, duplicate
,
7309 ASSERT_RTX_EQ (series_r_m1
,
7310 simplify_binary_operation (PLUS
, mode
, duplicate
,
7312 ASSERT_RTX_EQ (series_0_r
,
7313 simplify_binary_operation (MINUS
, mode
, series_r_r
,
7315 ASSERT_RTX_EQ (series_r_m1
,
7316 simplify_binary_operation (MINUS
, mode
, duplicate
,
7318 ASSERT_RTX_EQ (series_r_1
,
7319 simplify_binary_operation (MINUS
, mode
, duplicate
,
7321 ASSERT_RTX_EQ (series_0_m1
,
7322 simplify_binary_operation (VEC_SERIES
, mode
, const0_rtx
,
7325 /* Test NEG on constant vector series. */
7326 ASSERT_RTX_EQ (series_0_m1
,
7327 simplify_unary_operation (NEG
, mode
, series_0_1
, mode
));
7328 ASSERT_RTX_EQ (series_0_1
,
7329 simplify_unary_operation (NEG
, mode
, series_0_m1
, mode
));
7331 /* Test PLUS and MINUS on constant vector series. */
7332 rtx scalar2
= gen_int_mode (2, inner_mode
);
7333 rtx scalar3
= gen_int_mode (3, inner_mode
);
7334 rtx series_1_1
= gen_const_vec_series (mode
, const1_rtx
, const1_rtx
);
7335 rtx series_0_2
= gen_const_vec_series (mode
, const0_rtx
, scalar2
);
7336 rtx series_1_3
= gen_const_vec_series (mode
, const1_rtx
, scalar3
);
7337 ASSERT_RTX_EQ (series_1_1
,
7338 simplify_binary_operation (PLUS
, mode
, series_0_1
,
7339 CONST1_RTX (mode
)));
7340 ASSERT_RTX_EQ (series_0_m1
,
7341 simplify_binary_operation (PLUS
, mode
, CONST0_RTX (mode
),
7343 ASSERT_RTX_EQ (series_1_3
,
7344 simplify_binary_operation (PLUS
, mode
, series_1_1
,
7346 ASSERT_RTX_EQ (series_0_1
,
7347 simplify_binary_operation (MINUS
, mode
, series_1_1
,
7348 CONST1_RTX (mode
)));
7349 ASSERT_RTX_EQ (series_1_1
,
7350 simplify_binary_operation (MINUS
, mode
, CONST1_RTX (mode
),
7352 ASSERT_RTX_EQ (series_1_1
,
7353 simplify_binary_operation (MINUS
, mode
, series_1_3
,
7356 /* Test MULT between constant vectors. */
7357 rtx vec2
= gen_const_vec_duplicate (mode
, scalar2
);
7358 rtx vec3
= gen_const_vec_duplicate (mode
, scalar3
);
7359 rtx scalar9
= gen_int_mode (9, inner_mode
);
7360 rtx series_3_9
= gen_const_vec_series (mode
, scalar3
, scalar9
);
7361 ASSERT_RTX_EQ (series_0_2
,
7362 simplify_binary_operation (MULT
, mode
, series_0_1
, vec2
));
7363 ASSERT_RTX_EQ (series_3_9
,
7364 simplify_binary_operation (MULT
, mode
, vec3
, series_1_3
));
7365 if (!GET_MODE_NUNITS (mode
).is_constant ())
7366 ASSERT_FALSE (simplify_binary_operation (MULT
, mode
, series_0_1
,
7369 /* Test ASHIFT between constant vectors. */
7370 ASSERT_RTX_EQ (series_0_2
,
7371 simplify_binary_operation (ASHIFT
, mode
, series_0_1
,
7372 CONST1_RTX (mode
)));
7373 if (!GET_MODE_NUNITS (mode
).is_constant ())
7374 ASSERT_FALSE (simplify_binary_operation (ASHIFT
, mode
, CONST1_RTX (mode
),
7378 /* Verify simplify_merge_mask works correctly. */
7381 test_vec_merge (machine_mode mode
)
7383 rtx op0
= make_test_reg (mode
);
7384 rtx op1
= make_test_reg (mode
);
7385 rtx op2
= make_test_reg (mode
);
7386 rtx op3
= make_test_reg (mode
);
7387 rtx op4
= make_test_reg (mode
);
7388 rtx op5
= make_test_reg (mode
);
7389 rtx mask1
= make_test_reg (SImode
);
7390 rtx mask2
= make_test_reg (SImode
);
7391 rtx vm1
= gen_rtx_VEC_MERGE (mode
, op0
, op1
, mask1
);
7392 rtx vm2
= gen_rtx_VEC_MERGE (mode
, op2
, op3
, mask1
);
7393 rtx vm3
= gen_rtx_VEC_MERGE (mode
, op4
, op5
, mask1
);
7395 /* Simple vec_merge. */
7396 ASSERT_EQ (op0
, simplify_merge_mask (vm1
, mask1
, 0));
7397 ASSERT_EQ (op1
, simplify_merge_mask (vm1
, mask1
, 1));
7398 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 0));
7399 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 1));
7401 /* Nested vec_merge.
7402 It's tempting to make this simplify right down to opN, but we don't
7403 because all the simplify_* functions assume that the operands have
7404 already been simplified. */
7405 rtx nvm
= gen_rtx_VEC_MERGE (mode
, vm1
, vm2
, mask1
);
7406 ASSERT_EQ (vm1
, simplify_merge_mask (nvm
, mask1
, 0));
7407 ASSERT_EQ (vm2
, simplify_merge_mask (nvm
, mask1
, 1));
7409 /* Intermediate unary op. */
7410 rtx unop
= gen_rtx_NOT (mode
, vm1
);
7411 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op0
),
7412 simplify_merge_mask (unop
, mask1
, 0));
7413 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op1
),
7414 simplify_merge_mask (unop
, mask1
, 1));
7416 /* Intermediate binary op. */
7417 rtx binop
= gen_rtx_PLUS (mode
, vm1
, vm2
);
7418 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op0
, op2
),
7419 simplify_merge_mask (binop
, mask1
, 0));
7420 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op1
, op3
),
7421 simplify_merge_mask (binop
, mask1
, 1));
7423 /* Intermediate ternary op. */
7424 rtx tenop
= gen_rtx_FMA (mode
, vm1
, vm2
, vm3
);
7425 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op0
, op2
, op4
),
7426 simplify_merge_mask (tenop
, mask1
, 0));
7427 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op1
, op3
, op5
),
7428 simplify_merge_mask (tenop
, mask1
, 1));
7431 rtx badop0
= gen_rtx_PRE_INC (mode
, op0
);
7432 rtx badvm
= gen_rtx_VEC_MERGE (mode
, badop0
, op1
, mask1
);
7433 ASSERT_EQ (badop0
, simplify_merge_mask (badvm
, mask1
, 0));
7434 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (badvm
, mask1
, 1));
7436 /* Called indirectly. */
7437 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode
, op0
, op3
, mask1
),
7438 simplify_rtx (nvm
));
7441 /* Test subregs of integer vector constant X, trying elements in
7442 the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
7443 where NELTS is the number of elements in X. Subregs involving
7444 elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail. */
7447 test_vector_subregs_modes (rtx x
, poly_uint64 elt_bias
= 0,
7448 unsigned int first_valid
= 0)
7450 machine_mode inner_mode
= GET_MODE (x
);
7451 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7453 for (unsigned int modei
= 0; modei
< NUM_MACHINE_MODES
; ++modei
)
7455 machine_mode outer_mode
= (machine_mode
) modei
;
7456 if (!VECTOR_MODE_P (outer_mode
))
7459 unsigned int outer_nunits
;
7460 if (GET_MODE_INNER (outer_mode
) == int_mode
7461 && GET_MODE_NUNITS (outer_mode
).is_constant (&outer_nunits
)
7462 && multiple_p (GET_MODE_NUNITS (inner_mode
), outer_nunits
))
7464 /* Test subregs in which the outer mode is a smaller,
7465 constant-sized vector of the same element type. */
7467 = constant_lower_bound (GET_MODE_NUNITS (inner_mode
));
7468 for (unsigned int elt
= 0; elt
< limit
; elt
+= outer_nunits
)
7470 rtx expected
= NULL_RTX
;
7471 if (elt
>= first_valid
)
7473 rtx_vector_builder
builder (outer_mode
, outer_nunits
, 1);
7474 for (unsigned int i
= 0; i
< outer_nunits
; ++i
)
7475 builder
.quick_push (CONST_VECTOR_ELT (x
, elt
+ i
));
7476 expected
= builder
.build ();
7478 poly_uint64 byte
= (elt_bias
+ elt
) * GET_MODE_SIZE (int_mode
);
7479 ASSERT_RTX_EQ (expected
,
7480 simplify_subreg (outer_mode
, x
,
7484 else if (known_eq (GET_MODE_SIZE (outer_mode
),
7485 GET_MODE_SIZE (inner_mode
))
7486 && known_eq (elt_bias
, 0U)
7487 && (GET_MODE_CLASS (outer_mode
) != MODE_VECTOR_BOOL
7488 || known_eq (GET_MODE_BITSIZE (outer_mode
),
7489 GET_MODE_NUNITS (outer_mode
)))
7490 && (!FLOAT_MODE_P (outer_mode
)
7491 || (FLOAT_MODE_FORMAT (outer_mode
)->ieee_bits
7492 == GET_MODE_UNIT_PRECISION (outer_mode
)))
7493 && (GET_MODE_SIZE (inner_mode
).is_constant ()
7494 || !CONST_VECTOR_STEPPED_P (x
)))
7496 /* Try converting to OUTER_MODE and back. */
7497 rtx outer_x
= simplify_subreg (outer_mode
, x
, inner_mode
, 0);
7498 ASSERT_TRUE (outer_x
!= NULL_RTX
);
7499 ASSERT_RTX_EQ (x
, simplify_subreg (inner_mode
, outer_x
,
7504 if (BYTES_BIG_ENDIAN
== WORDS_BIG_ENDIAN
)
7506 /* Test each byte in the element range. */
7508 = constant_lower_bound (GET_MODE_SIZE (inner_mode
));
7509 for (unsigned int i
= 0; i
< limit
; ++i
)
7511 unsigned int elt
= i
/ GET_MODE_SIZE (int_mode
);
7512 rtx expected
= NULL_RTX
;
7513 if (elt
>= first_valid
)
7515 unsigned int byte_shift
= i
% GET_MODE_SIZE (int_mode
);
7516 if (BYTES_BIG_ENDIAN
)
7517 byte_shift
= GET_MODE_SIZE (int_mode
) - byte_shift
- 1;
7518 rtx_mode_t
vec_elt (CONST_VECTOR_ELT (x
, elt
), int_mode
);
7519 wide_int shifted_elt
7520 = wi::lrshift (vec_elt
, byte_shift
* BITS_PER_UNIT
);
7521 expected
= immed_wide_int_const (shifted_elt
, QImode
);
7523 poly_uint64 byte
= elt_bias
* GET_MODE_SIZE (int_mode
) + i
;
7524 ASSERT_RTX_EQ (expected
,
7525 simplify_subreg (QImode
, x
, inner_mode
, byte
));
7530 /* Test constant subregs of integer vector mode INNER_MODE, using 1
7531 element per pattern. */
7534 test_vector_subregs_repeating (machine_mode inner_mode
)
7536 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
7537 unsigned int min_nunits
= constant_lower_bound (nunits
);
7538 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7539 unsigned int count
= gcd (min_nunits
, 8);
7541 rtx_vector_builder
builder (inner_mode
, count
, 1);
7542 for (unsigned int i
= 0; i
< count
; ++i
)
7543 builder
.quick_push (gen_int_mode (8 - i
, int_mode
));
7544 rtx x
= builder
.build ();
7546 test_vector_subregs_modes (x
);
7547 if (!nunits
.is_constant ())
7548 test_vector_subregs_modes (x
, nunits
- min_nunits
);
7551 /* Test constant subregs of integer vector mode INNER_MODE, using 2
7552 elements per pattern. */
7555 test_vector_subregs_fore_back (machine_mode inner_mode
)
7557 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
7558 unsigned int min_nunits
= constant_lower_bound (nunits
);
7559 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7560 unsigned int count
= gcd (min_nunits
, 4);
7562 rtx_vector_builder
builder (inner_mode
, count
, 2);
7563 for (unsigned int i
= 0; i
< count
; ++i
)
7564 builder
.quick_push (gen_int_mode (i
, int_mode
));
7565 for (unsigned int i
= 0; i
< count
; ++i
)
7566 builder
.quick_push (gen_int_mode (-(int) i
, int_mode
));
7567 rtx x
= builder
.build ();
7569 test_vector_subregs_modes (x
);
7570 if (!nunits
.is_constant ())
7571 test_vector_subregs_modes (x
, nunits
- min_nunits
, count
);
7574 /* Test constant subregs of integer vector mode INNER_MODE, using 3
7575 elements per pattern. */
7578 test_vector_subregs_stepped (machine_mode inner_mode
)
7580 /* Build { 0, 1, 2, 3, ... }. */
7581 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7582 rtx_vector_builder
builder (inner_mode
, 1, 3);
7583 for (unsigned int i
= 0; i
< 3; ++i
)
7584 builder
.quick_push (gen_int_mode (i
, int_mode
));
7585 rtx x
= builder
.build ();
7587 test_vector_subregs_modes (x
);
7590 /* Test constant subregs of integer vector mode INNER_MODE. */
7593 test_vector_subregs (machine_mode inner_mode
)
7595 test_vector_subregs_repeating (inner_mode
);
7596 test_vector_subregs_fore_back (inner_mode
);
7597 test_vector_subregs_stepped (inner_mode
);
7600 /* Verify some simplifications involving vectors. */
7605 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
7607 machine_mode mode
= (machine_mode
) i
;
7608 if (VECTOR_MODE_P (mode
))
7610 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
7611 test_vector_ops_duplicate (mode
, scalar_reg
);
7612 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
7613 && maybe_gt (GET_MODE_NUNITS (mode
), 2))
7615 test_vector_ops_series (mode
, scalar_reg
);
7616 test_vector_subregs (mode
);
7618 test_vec_merge (mode
);
7623 template<unsigned int N
>
7624 struct simplify_const_poly_int_tests
7630 struct simplify_const_poly_int_tests
<1>
7632 static void run () {}
7635 /* Test various CONST_POLY_INT properties. */
7637 template<unsigned int N
>
7639 simplify_const_poly_int_tests
<N
>::run ()
7641 rtx x1
= gen_int_mode (poly_int64 (1, 1), QImode
);
7642 rtx x2
= gen_int_mode (poly_int64 (-80, 127), QImode
);
7643 rtx x3
= gen_int_mode (poly_int64 (-79, -128), QImode
);
7644 rtx x4
= gen_int_mode (poly_int64 (5, 4), QImode
);
7645 rtx x5
= gen_int_mode (poly_int64 (30, 24), QImode
);
7646 rtx x6
= gen_int_mode (poly_int64 (20, 16), QImode
);
7647 rtx x7
= gen_int_mode (poly_int64 (7, 4), QImode
);
7648 rtx x8
= gen_int_mode (poly_int64 (30, 24), HImode
);
7649 rtx x9
= gen_int_mode (poly_int64 (-30, -24), HImode
);
7650 rtx x10
= gen_int_mode (poly_int64 (-31, -24), HImode
);
7651 rtx two
= GEN_INT (2);
7652 rtx six
= GEN_INT (6);
7653 poly_uint64 offset
= subreg_lowpart_offset (QImode
, HImode
);
7655 /* These tests only try limited operation combinations. Fuller arithmetic
7656 testing is done directly on poly_ints. */
7657 ASSERT_EQ (simplify_unary_operation (NEG
, HImode
, x8
, HImode
), x9
);
7658 ASSERT_EQ (simplify_unary_operation (NOT
, HImode
, x8
, HImode
), x10
);
7659 ASSERT_EQ (simplify_unary_operation (TRUNCATE
, QImode
, x8
, HImode
), x5
);
7660 ASSERT_EQ (simplify_binary_operation (PLUS
, QImode
, x1
, x2
), x3
);
7661 ASSERT_EQ (simplify_binary_operation (MINUS
, QImode
, x3
, x1
), x2
);
7662 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, x4
, six
), x5
);
7663 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, six
, x4
), x5
);
7664 ASSERT_EQ (simplify_binary_operation (ASHIFT
, QImode
, x4
, two
), x6
);
7665 ASSERT_EQ (simplify_binary_operation (IOR
, QImode
, x4
, two
), x7
);
7666 ASSERT_EQ (simplify_subreg (HImode
, x5
, QImode
, 0), x8
);
7667 ASSERT_EQ (simplify_subreg (QImode
, x8
, HImode
, offset
), x5
);
7670 /* Run all of the selftests within this file. */
7673 simplify_rtx_c_tests ()
7676 simplify_const_poly_int_tests
<NUM_POLY_INT_COEFFS
>::run ();
7679 } // namespace selftest
7681 #endif /* CHECKING_P */