* cse.c (find_comparison_args): Use val_mode_signbit_set_p.
* simplify-rtx.c (mode_signbit_p): Use GET_MODE_PRECISION.
(val_mode_signbit_p, val_mode_signbit_set_p): New functions.
(simplify_const_unary_operation, simplify_binary_operation_1,
simplify_const_binary_operation,
simplify_const_relational_operation): Use them. Use
GET_MODE_MASK for masking and sign-extensions.
* combine.c (set_nonzero_bits_and_sign_copies, simplify_set,
combine_simplify_rtx, force_to_mode, reg_nonzero_bits_for_combine,
simplify_shift_const_1, simplify_comparison): Likewise.
* expr.c (convert_modes): Likewise.
* rtlanal.c (nonzero_bits1, canonicalize_condition): Likewise.
* expmed.c (emit_cstore, emit_store_flag_1, emit_store_flag):
Likewise.
* rtl.h (val_mode_signbit_p, val_mode_signbit_set_p): Declare.
From-SVN: r175917
+2011-07-06 Bernd Schmidt <bernds@codesourcery.com>
+
+ * cse.c (find_comparison_args): Use val_mode_signbit_set_p.
+ * simplify-rtx.c (mode_signbit_p): Use GET_MODE_PRECISION.
+ (val_mode_signbit_p, val_mode_signbit_set_p): New functions.
+ (simplify_const_unary_operation, simplify_binary_operation_1,
+ simplify_const_binary_operation,
+ simplify_const_relational_operation): Use them. Use
+ GET_MODE_MASK for masking and sign-extensions.
+ * combine.c (set_nonzero_bits_and_sign_copies, simplify_set,
+ combine_simplify_rtx, force_to_mode, reg_nonzero_bits_for_combine,
+ simplify_shift_const_1, simplify_comparison): Likewise.
+ * expr.c (convert_modes): Likewise.
+ * rtlanal.c (nonzero_bits1, canonicalize_condition): Likewise.
+ * expmed.c (emit_cstore, emit_store_flag_1, emit_store_flag):
+ Likewise.
+ * rtl.h (val_mode_signbit_p, val_mode_signbit_set_p): Declare.
+
2011-07-06 Richard Guenther <rguenther@suse.de>
PR tree-optimization/49645
??? For 2.5, try to tighten up the MD files in this regard
instead of this kludge. */
- if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
+ if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
&& CONST_INT_P (src)
&& INTVAL (src) > 0
- && 0 != (UINTVAL (src)
- & ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
- src = GEN_INT (UINTVAL (src)
- | ((unsigned HOST_WIDE_INT) (-1)
- << GET_MODE_BITSIZE (GET_MODE (x))));
+ && val_signbit_known_set_p (GET_MODE (x), INTVAL (src)))
+ src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (GET_MODE (x)));
#endif
/* Don't call nonzero_bits if it cannot change anything. */
going to test the sign bit. */
if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
- == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
+ && val_signbit_p (mode, STORE_FLAG_VALUE)
&& op1 == const0_rtx
&& mode == GET_MODE (op0)
&& (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
enum machine_mode inner_mode = GET_MODE (inner);
/* Here we make sure that we don't have a sign bit on. */
- if (GET_MODE_BITSIZE (inner_mode) <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (inner, inner_mode)
- < ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (src)) - 1))))
+ if (val_signbit_known_clear_p (GET_MODE (src),
+ nonzero_bits (inner, inner_mode)))
{
SUBST (SET_SRC (x), inner);
src = SET_SRC (x);
case ASHIFTRT:
/* If we are just looking for the sign bit, we don't need this shift at
all, even if it has a variable count. */
- if (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
- && (mask == ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
+ if (val_signbit_p (GET_MODE (x), mask))
return force_to_mode (XEXP (x, 0), mode, mask, next_select);
/* If this is a shift by a constant, get a mask that contains those bits
??? For 2.5, try to tighten up the MD files in this regard
instead of this kludge. */
- if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode)
+ if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode)
&& CONST_INT_P (tem)
&& INTVAL (tem) > 0
- && 0 != (UINTVAL (tem)
- & ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
- tem = GEN_INT (UINTVAL (tem)
- | ((unsigned HOST_WIDE_INT) (-1)
- << GET_MODE_BITSIZE (GET_MODE (x))));
+ && val_signbit_known_set_p (GET_MODE (x), INTVAL (tem)))
+ tem = GEN_INT (INTVAL (tem) | ~GET_MODE_MASK (GET_MODE (x)));
#endif
return tem;
}
ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
`make_compound_operation' will convert it to an ASHIFTRT for
those machines (such as VAX) that don't have an LSHIFTRT. */
- if (GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
- && code == ASHIFTRT
- && ((nonzero_bits (varop, shift_mode)
- & ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (shift_mode) - 1))) == 0))
+ if (code == ASHIFTRT
+ && val_signbit_known_clear_p (shift_mode,
+ nonzero_bits (varop, shift_mode)))
code = LSHIFTRT;
if (((code == LSHIFTRT
mode = GET_MODE (XEXP (op0, 0));
if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
&& ! unsigned_comparison_p
- && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
- && ((unsigned HOST_WIDE_INT) const_op
- < (((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (mode) - 1))))
+ && val_signbit_known_clear_p (mode, const_op)
&& have_insn_for (COMPARE, mode))
{
op0 = XEXP (op0, 0);
/* Check for the cases where we simply want the result of the
earlier test or the opposite of that result. */
if (code == NE || code == EQ
- || (GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
- && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
- && (STORE_FLAG_VALUE
- & (((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
+ || (val_signbit_known_set_p (GET_MODE (op0), STORE_FLAG_VALUE)
&& (code == LT || code == GE)))
{
enum rtx_code new_code;
for STORE_FLAG_VALUE, also look at LT and GE operations. */
|| ((code == NE
|| (code == LT
- && GET_MODE_CLASS (inner_mode) == MODE_INT
- && (GET_MODE_BITSIZE (inner_mode)
- <= HOST_BITS_PER_WIDE_INT)
- && (STORE_FLAG_VALUE
- & ((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (inner_mode) - 1))))
+ && val_signbit_known_set_p (inner_mode,
+ STORE_FLAG_VALUE))
#ifdef FLOAT_STORE_FLAG_VALUE
|| (code == LT
&& SCALAR_FLOAT_MODE_P (inner_mode)
}
else if ((code == EQ
|| (code == GE
- && GET_MODE_CLASS (inner_mode) == MODE_INT
- && (GET_MODE_BITSIZE (inner_mode)
- <= HOST_BITS_PER_WIDE_INT)
- && (STORE_FLAG_VALUE
- & ((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (inner_mode) - 1))))
+ && val_signbit_known_set_p (inner_mode,
+ STORE_FLAG_VALUE))
#ifdef FLOAT_STORE_FLAG_VALUE
|| (code == GE
&& SCALAR_FLOAT_MODE_P (inner_mode)
if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (result_mode))
{
convert_move (target, subtarget,
- (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT)
- && 0 == (STORE_FLAG_VALUE
- & ((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (result_mode) -1))));
+ val_signbit_known_clear_p (result_mode,
+ STORE_FLAG_VALUE));
op0 = target;
result_mode = target_mode;
}
/* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
it hard to use a value of just the sign bit due to ANSI integer
constant typing rules. */
- else if (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
- && (STORE_FLAG_VALUE
- & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (result_mode) - 1))))
+ else if (val_signbit_known_set_p (result_mode, STORE_FLAG_VALUE))
op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
GET_MODE_BITSIZE (result_mode) - 1, subtarget,
normalizep == 1);
target = gen_reg_rtx (target_mode);
convert_move (target, tem,
- 0 == ((normalizep ? normalizep : STORE_FLAG_VALUE)
- & ((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (word_mode) -1))));
+ !val_signbit_known_set_p (word_mode,
+ (normalizep ? normalizep
+ : STORE_FLAG_VALUE)));
return target;
}
}
if (op1 == const0_rtx && (code == LT || code == GE)
&& GET_MODE_CLASS (mode) == MODE_INT
&& (normalizep || STORE_FLAG_VALUE == 1
- || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
- == ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (mode) - 1))))))
+ || val_signbit_p (mode, STORE_FLAG_VALUE)))
{
subtarget = target;
if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
normalizep = STORE_FLAG_VALUE;
- else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
- == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
+ else if (val_signbit_p (mode, STORE_FLAG_VALUE))
;
else
return 0;
&& GET_MODE_SIZE (mode) > GET_MODE_SIZE (oldmode))
{
HOST_WIDE_INT val = INTVAL (x);
- int width = GET_MODE_BITSIZE (oldmode);
/* We must sign or zero-extend in this case. Start by
zero-extending, then sign extend if we need to. */
- val &= ((HOST_WIDE_INT) 1 << width) - 1;
+ val &= GET_MODE_MASK (oldmode);
if (! unsignedp
- && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
- val |= (HOST_WIDE_INT) (-1) << width;
+ && val_signbit_known_set_p (oldmode, val))
+ val |= ~GET_MODE_MASK (oldmode);
return gen_int_mode (val, mode);
}
extern rtx avoid_constant_pool_reference (rtx);
extern rtx delegitimize_mem_from_attrs (rtx);
extern bool mode_signbit_p (enum machine_mode, const_rtx);
+extern bool val_signbit_p (enum machine_mode, unsigned HOST_WIDE_INT);
+extern bool val_signbit_known_set_p (enum machine_mode,
+ unsigned HOST_WIDE_INT);
+extern bool val_signbit_known_clear_p (enum machine_mode,
+ unsigned HOST_WIDE_INT);
/* In reginfo.c */
extern enum machine_mode choose_hard_reg_mode (unsigned int, unsigned int,
unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
unsigned HOST_WIDE_INT inner_nz;
enum rtx_code code;
+ enum machine_mode inner_mode;
unsigned int mode_width = GET_MODE_BITSIZE (mode);
/* For floating-point and vector values, assume all bits are needed. */
if (GET_MODE (XEXP (x, 0)) != VOIDmode)
{
inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
- if (inner_nz
- & (((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))))
+ if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
inner_nz |= (GET_MODE_MASK (mode)
& ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
}
& cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
known_x, known_mode, known_ret);
+ inner_mode = GET_MODE (SUBREG_REG (x));
/* If the inner mode is a single word for both the host and target
machines, we can compute this from which bits of the inner
object might be nonzero. */
- if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD
- && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
- <= HOST_BITS_PER_WIDE_INT))
+ if (GET_MODE_BITSIZE (inner_mode) <= BITS_PER_WORD
+ && (GET_MODE_BITSIZE (inner_mode) <= HOST_BITS_PER_WIDE_INT))
{
nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
known_x, known_mode, known_ret);
#if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
/* If this is a typical RISC machine, we only have to worry
about the way loads are extended. */
- if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
- ? (((nonzero
- & (((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1))))
- != 0))
- : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND)
+ if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
+ ? val_signbit_known_set_p (inner_mode, nonzero)
+ : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
|| !MEM_P (SUBREG_REG (x)))
#endif
{
causes the high-order bits to become undefined. So they are
not known to be zero. */
if (GET_MODE_SIZE (GET_MODE (x))
- > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ > GET_MODE_SIZE (inner_mode))
nonzero |= (GET_MODE_MASK (GET_MODE (x))
- & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
+ & ~GET_MODE_MASK (inner_mode));
}
}
break;
if ((GET_CODE (SET_SRC (set)) == COMPARE
|| (((code == NE
|| (code == LT
- && GET_MODE_CLASS (inner_mode) == MODE_INT
- && (GET_MODE_BITSIZE (inner_mode)
- <= HOST_BITS_PER_WIDE_INT)
- && (STORE_FLAG_VALUE
- & ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (inner_mode) - 1))))
+ && val_signbit_known_set_p (inner_mode,
+ STORE_FLAG_VALUE))
#ifdef FLOAT_STORE_FLAG_VALUE
|| (code == LT
&& SCALAR_FLOAT_MODE_P (inner_mode)
x = SET_SRC (set);
else if (((code == EQ
|| (code == GE
- && (GET_MODE_BITSIZE (inner_mode)
- <= HOST_BITS_PER_WIDE_INT)
- && GET_MODE_CLASS (inner_mode) == MODE_INT
- && (STORE_FLAG_VALUE
- & ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (inner_mode) - 1))))
+ && val_signbit_known_set_p (inner_mode,
+ STORE_FLAG_VALUE))
#ifdef FLOAT_STORE_FLAG_VALUE
|| (code == GE
&& SCALAR_FLOAT_MODE_P (inner_mode)
if (GET_MODE_CLASS (mode) != MODE_INT)
return false;
- width = GET_MODE_BITSIZE (mode);
+ width = GET_MODE_PRECISION (mode);
if (width == 0)
return false;
val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
}
+
+/* Test whether VAL is equal to the most significant bit of mode MODE
+ (after masking with the mode mask of MODE). Returns false if the
+ precision of MODE is too large to handle. */
+
+bool
+val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
+{
+ unsigned int width;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT)
+ return false;
+
+ width = GET_MODE_PRECISION (mode);
+ if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
+ return false;
+
+ val &= GET_MODE_MASK (mode);
+ return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
+}
+
+/* Test whether the most significant bit of mode MODE is set in VAL.
+ Returns false if the precision of MODE is too large to handle. */
+bool
+val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
+{
+ unsigned int width;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT)
+ return false;
+
+ width = GET_MODE_PRECISION (mode);
+ if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
+ return false;
+
+ val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
+ return val != 0;
+}
+
+/* Test whether the most significant bit of mode MODE is clear in VAL.
+ Returns false if the precision of MODE is too large to handle. */
+bool
+val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
+{
+ unsigned int width;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT)
+ return false;
+
+ width = GET_MODE_PRECISION (mode);
+ if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
+ return false;
+
+ val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
+ return val == 0;
+}
\f
/* Make a binary operation by properly ordering the operands and
seeing if the expression folds. */
/* If operand is something known to be positive, ignore the ABS. */
if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
- || ((GET_MODE_BITSIZE (GET_MODE (op))
- <= HOST_BITS_PER_WIDE_INT)
- && ((nonzero_bits (op, GET_MODE (op))
- & ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
- == 0)))
+ || val_signbit_known_clear_p (GET_MODE (op),
+ nonzero_bits (op, GET_MODE (op))))
return op;
/* If operand is known to be only -1 or 0, convert ABS to NEG. */
val = arg0;
}
else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
- val = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
- << GET_MODE_BITSIZE (op_mode));
+ val = arg0 & GET_MODE_MASK (op_mode);
else
return 0;
break;
}
else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
{
- val
- = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
- << GET_MODE_BITSIZE (op_mode));
- if (val & ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (op_mode) - 1)))
- val
- -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
+ val = arg0 & GET_MODE_MASK (op_mode);
+ if (val_signbit_known_set_p (op_mode, val))
+ val |= ~GET_MODE_MASK (op_mode);
}
else
return 0;
else
{
lv = l1 & GET_MODE_MASK (op_mode);
- if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
- && (lv & ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
- lv -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
+ if (val_signbit_known_set_p (op_mode, lv))
+ lv |= ~GET_MODE_MASK (op_mode);
hv = HWI_SIGN_EXTEND (lv);
}
/* (xor (comparison foo bar) (const_int sign-bit))
when STORE_FLAG_VALUE is the sign bit. */
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
- == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
+ if (val_signbit_p (mode, STORE_FLAG_VALUE)
&& trueop1 == const_true_rtx
&& COMPARISON_P (op0)
&& (reversed = reversed_comparison (op0, mode)))
case SMIN:
if (width <= HOST_BITS_PER_WIDE_INT
- && CONST_INT_P (trueop1)
- && UINTVAL (trueop1) == (unsigned HOST_WIDE_INT) 1 << (width -1)
+ && mode_signbit_p (mode, trueop1)
&& ! side_effects_p (op0))
return op1;
if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
if (width < HOST_BITS_PER_WIDE_INT)
{
- arg0 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
- arg1 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
+ arg0 &= GET_MODE_MASK (mode);
+ arg1 &= GET_MODE_MASK (mode);
arg0s = arg0;
- if (arg0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
- arg0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
+ if (val_signbit_known_set_p (mode, arg0s))
+ arg0s |= ~GET_MODE_MASK (mode);
- arg1s = arg1;
- if (arg1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
- arg1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
+ arg1s = arg1;
+ if (val_signbit_known_set_p (mode, arg1s))
+ arg1s |= ~GET_MODE_MASK (mode);
}
else
{
we have to sign or zero-extend the values. */
if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
{
- l0u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
- l1u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
+ l0u &= GET_MODE_MASK (mode);
+ l1u &= GET_MODE_MASK (mode);
- if (l0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
- l0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
+ if (val_signbit_known_set_p (mode, l0s))
+ l0s |= ~GET_MODE_MASK (mode);
- if (l1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
- l1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
+ if (val_signbit_known_set_p (mode, l1s))
+ l1s |= ~GET_MODE_MASK (mode);
}
if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);