+2012-06-2 Kenneth Zadeck <zadeck@naturalbridge.com>
+
+ * expmed.c (expand_mult, choose_multiplier): Change "2 *
+ HOST_BITS_PER_WIDE_INT" to "HOST_BITS_PER_DOUBLE_INT".
+ * expr.c (convert_modes): Likewise.
+ * explow.c (plus_constant): Likewise.
+ * fixed-value.c (fixed_saturate1, fixed_saturate2)
+ (do_fixed_add, do_fixed_multiply, do_fixed_multiply)
+ (do_fixed_multiply, do_fixed_multiply, do_fixed_divide)
+ (do_fixed_divide, do_fixed_divide, do_fixed_divide)
+ (do_fixed_divide, do_fixed_divide, do_fixed_shift, do_fixed_shift)
+ (do_fixed_shift, fixed_convert, fixed_convert)
+ (fixed_convert_from_int, fixed_convert_from_int)
+ (fixed_convert_from_real): Likewise.
+ * fold-const.c (fold_convert_const_int_from_fixed, sign_bit_p)
+ (native_interpret_int, fold_binary_loc, fold_ternary_loc): Likewise.
+ * varasm.c (output_constructor_bitfield): Likewise.
+ * tree-vrp.c (register_edge_assert_for_2): Likewise.
+ * double-int.c (rshift_double, lshift_double): Likewise.
+ * double-int.h (double_int_fits_in_uhwi_p, double_int, double_int): Likewise.
+ * simplify-rtx.c (mode_signbit_p)
+ (simplify_const_unary_operation, simplify_binary_operation_1)
+ (simplify_immed_subreg): Likewise.
+ * builtins.c (c_readstr, fold_builtin_bitop): Likewise.
+ * tree-vect-generic.c (build_replicated_const): Likewise.
+ * dbxout.c (stabstr_O): Likewise.
+ * emit-rtl.c (immed_double_int_const, immed_double_const)
+ (gen_lowpart_common, init_emit_once): Likewise.
+ * tree.c (integer_pow2p, tree_log2, tree_floor_log2)
+ (widest_int_cst_value, upper_bound_in_type): Likewise.
+ * stor-layout.c (initialize_sizetypes, fixup_signed_type)
+ (fixup_unsigned_type): Likewise.
+ * real.c (real_to_integer2, real_from_integer): Likewise.
+ * dwarf2out.c (size_of_loc_descr, size_of_die, output_die)
+ (clz_loc_descriptor, mem_loc_descriptor): Likewise.
+
2012-06-01 Eric Botcazou <ebotcazou@adacore.com>
PR middle-end/53501
&& GET_MODE_SIZE (mode) >= UNITS_PER_WORD)
j = j + UNITS_PER_WORD - 2 * (j % UNITS_PER_WORD) - 1;
j *= BITS_PER_UNIT;
- gcc_assert (j < 2 * HOST_BITS_PER_WIDE_INT);
+ gcc_assert (j < HOST_BITS_PER_DOUBLE_INT);
if (ch)
ch = (unsigned char) str[i];
if (width > HOST_BITS_PER_WIDE_INT)
{
hi = TREE_INT_CST_HIGH (arg);
- if (width < 2 * HOST_BITS_PER_WIDE_INT)
+ if (width < HOST_BITS_PER_DOUBLE_INT)
hi &= ~((unsigned HOST_WIDE_INT) (-1)
<< (width - HOST_BITS_PER_WIDE_INT));
}
present. */
{
const unsigned int width = TYPE_PRECISION (TREE_TYPE (cst));
- if (width == HOST_BITS_PER_WIDE_INT * 2)
+ if (width == HOST_BITS_PER_DOUBLE_INT)
;
else if (width > HOST_BITS_PER_WIDE_INT)
high &= (((HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT)) - 1);
if (SHIFT_COUNT_TRUNCATED)
count %= prec;
- if (count >= 2 * HOST_BITS_PER_WIDE_INT)
+ if (count >= HOST_BITS_PER_DOUBLE_INT)
{
/* Shifting by the host word size is undefined according to the
ANSI standard, so we must handle this as a special case. */
*hv = signmask;
*lv = signmask;
}
- else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
+ else if ((prec - count) >= HOST_BITS_PER_DOUBLE_INT)
;
else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
{
if (SHIFT_COUNT_TRUNCATED)
count %= prec;
- if (count >= 2 * HOST_BITS_PER_WIDE_INT)
+ if (count >= HOST_BITS_PER_DOUBLE_INT)
{
/* Shifting by the host word size is undefined according to the
ANSI standard, so we must handle this as a special case. */
>> (prec - HOST_BITS_PER_WIDE_INT - 1))
: (*lv >> (prec - 1))) & 1);
- if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
+ if (prec >= HOST_BITS_PER_DOUBLE_INT)
;
else if (prec >= HOST_BITS_PER_WIDE_INT)
{
/* The following operations perform arithmetics modulo 2^precision,
so you do not need to call double_int_ext between them, even if
you are representing numbers with precision less than
- 2 * HOST_BITS_PER_WIDE_INT bits. */
+ HOST_BITS_PER_DOUBLE_INT bits. */
double_int double_int_mul (double_int, double_int);
double_int double_int_mul_with_sign (double_int, double_int, bool, int *);
/* You must ensure that double_int_ext is called on the operands
of the following operations, if the precision of the numbers
- is less than 2 * HOST_BITS_PER_WIDE_INT bits. */
+ is less than HOST_BITS_PER_DOUBLE_INT bits. */
double_int double_int_div (double_int, double_int, bool, unsigned);
double_int double_int_sdiv (double_int, double_int, unsigned);
double_int double_int_udiv (double_int, double_int, unsigned);
/* The operands of the following comparison functions must be processed
with double_int_ext, if their precision is less than
- 2 * HOST_BITS_PER_WIDE_INT bits. */
+ HOST_BITS_PER_DOUBLE_INT bits. */
/* Returns true if CST is zero. */
size += HOST_BITS_PER_WIDE_INT / BITS_PER_UNIT;
break;
case dw_val_class_const_double:
- size += 2 * HOST_BITS_PER_WIDE_INT / BITS_PER_UNIT;
+ size += HOST_BITS_PER_DOUBLE_INT / BITS_PER_UNIT;
break;
default:
gcc_unreachable ();
}
break;
case dw_val_class_const_double:
- size += 2 * HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
+ size += HOST_BITS_PER_DOUBLE_INT / HOST_BITS_PER_CHAR;
if (HOST_BITS_PER_WIDE_INT >= 64)
size++; /* block */
break;
if (HOST_BITS_PER_WIDE_INT >= 64)
dw2_asm_output_data (1,
- 2 * HOST_BITS_PER_WIDE_INT
+ HOST_BITS_PER_DOUBLE_INT
/ HOST_BITS_PER_CHAR,
NULL);
if (GET_MODE_CLASS (mode) != MODE_INT
|| GET_MODE (XEXP (rtl, 0)) != mode
|| (GET_CODE (rtl) == CLZ
- && GET_MODE_BITSIZE (mode) > 2 * HOST_BITS_PER_WIDE_INT))
+ && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_DOUBLE_INT))
return NULL;
op0 = mem_loc_descriptor (XEXP (rtl, 0), mode, mem_mode,
}
if (!dwarf_strict
&& (GET_MODE_BITSIZE (mode) == HOST_BITS_PER_WIDE_INT
- || GET_MODE_BITSIZE (mode) == 2 * HOST_BITS_PER_WIDE_INT))
+ || GET_MODE_BITSIZE (mode) == HOST_BITS_PER_DOUBLE_INT))
{
dw_die_ref type_die = base_type_for_mode (mode, 1);
enum machine_mode amode;
adequately represented. We output CONST_DOUBLEs as blocks. */
if (mode == VOIDmode
|| (GET_MODE (rtl) == VOIDmode
- && GET_MODE_BITSIZE (mode) != 2 * HOST_BITS_PER_WIDE_INT))
+ && GET_MODE_BITSIZE (mode) != HOST_BITS_PER_DOUBLE_INT))
break;
type_die = base_type_for_mode (mode,
GET_MODE_CLASS (mode) == MODE_INT);
/* Return a CONST_DOUBLE or CONST_INT for a value specified as a pair
of ints: I0 is the low-order word and I1 is the high-order word.
- For values that are larger than 2*HOST_BITS_PER_WIDE_INT, the
+ For values that are larger than HOST_BITS_PER_DOUBLE_INT, the
implied upper bits are copies of the high bit of i1. The value
itself is neither signed nor unsigned. Do not use this routine for
non-integer modes; convert to REAL_VALUE_TYPE and use
unsigned int i;
/* There are the following cases (note that there are no modes with
- HOST_BITS_PER_WIDE_INT < GET_MODE_BITSIZE (mode) < 2 * HOST_BITS_PER_WIDE_INT):
+ HOST_BITS_PER_WIDE_INT < GET_MODE_BITSIZE (mode) < HOST_BITS_PER_DOUBLE_INT):
1) If GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT, then we use
gen_int_mode.
&& msize * BITS_PER_UNIT <= HOST_BITS_PER_WIDE_INT)
innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
else if (innermode == VOIDmode)
- innermode = mode_for_size (HOST_BITS_PER_WIDE_INT * 2, MODE_INT, 0);
+ innermode = mode_for_size (HOST_BITS_PER_DOUBLE_INT, MODE_INT, 0);
xsize = GET_MODE_SIZE (innermode);
FCONST1(mode).data.low = 0;
FCONST1(mode).mode = mode;
lshift_double (1, 0, GET_MODE_FBIT (mode),
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&FCONST1(mode).data.low,
&FCONST1(mode).data.high,
SIGNED_FIXED_POINT_MODE_P (mode));
FCONST1(mode).data.low = 0;
FCONST1(mode).mode = mode;
lshift_double (1, 0, GET_MODE_FBIT (mode),
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&FCONST1(mode).data.low,
&FCONST1(mode).data.high,
SIGNED_FIXED_POINT_MODE_P (mode));
if (add_double_with_sign (l1, h1, l2, h2, &lv, &hv, false))
/* Sorry, we have no way to represent overflows this wide.
To fix, add constant support wider than CONST_DOUBLE. */
- gcc_assert (GET_MODE_BITSIZE (mode) <= 2 * HOST_BITS_PER_WIDE_INT);
+ gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT);
return immed_double_const (lv, hv, VOIDmode);
}
{
int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
+ HOST_BITS_PER_WIDE_INT;
- if (shift < 2 * HOST_BITS_PER_WIDE_INT - 1
- || GET_MODE_BITSIZE (mode) <= 2 * HOST_BITS_PER_WIDE_INT)
+ if (shift < HOST_BITS_PER_DOUBLE_INT - 1
+ || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT)
return expand_shift (LSHIFT_EXPR, mode, op0,
shift, target, unsignedp);
}
/* We could handle this with some effort, but this case is much
better handled directly with a scc insn, so rely on caller using
that. */
- gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
+ gcc_assert (pow != HOST_BITS_PER_DOUBLE_INT);
/* mlow = 2^(N + lgup)/d */
if (pow >= HOST_BITS_PER_WIDE_INT)
make the high-order word of the constant zero, not all ones. */
if (unsignedp && GET_MODE_CLASS (mode) == MODE_INT
- && GET_MODE_BITSIZE (mode) == 2 * HOST_BITS_PER_WIDE_INT
+ && GET_MODE_BITSIZE (mode) == HOST_BITS_PER_DOUBLE_INT
&& CONST_INT_P (x) && INTVAL (x) < 0)
{
double_int val = uhwi_to_double_int (INTVAL (x));
min.high = 0;
min.low = 1;
lshift_double (min.low, min.high, i_f_bits,
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&min.low, &min.high, 1);
min = double_int_ext (min, 1 + i_f_bits, 0);
if (double_int_cmp (a, max, 0) == 1)
min_s.high = 0;
min_s.low = 1;
lshift_double (min_s.low, min_s.high, i_f_bits,
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&min_s.low, &min_s.high, 1);
min_s = double_int_ext (min_s, 1 + i_f_bits, 0);
if (double_int_cmp (a_high, max_r, 0) == 1
f->data.low = 1;
f->data.high = 0;
lshift_double (f->data.low, f->data.high, i_f_bits,
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&f->data.low, &f->data.high, 1);
if (get_fixed_sign_bit (a->data, i_f_bits) == 0)
{
f->data = double_int_mul (a->data, b->data);
lshift_double (f->data.low, f->data.high,
(-GET_MODE_FBIT (f->mode)),
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&f->data.low, &f->data.high, !unsigned_p);
overflow_p = fixed_saturate1 (f->mode, f->data, &f->data, sat_p);
}
r = double_int_sub (r, a->data);
/* Shift right the result by FBIT. */
- if (GET_MODE_FBIT (f->mode) == 2 * HOST_BITS_PER_WIDE_INT)
+ if (GET_MODE_FBIT (f->mode) == HOST_BITS_PER_DOUBLE_INT)
{
s.low = r.low;
s.high = r.high;
{
lshift_double (s.low, s.high,
(-GET_MODE_FBIT (f->mode)),
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&s.low, &s.high, 0);
lshift_double (r.low, r.high,
- (2 * HOST_BITS_PER_WIDE_INT
+ (HOST_BITS_PER_DOUBLE_INT
- GET_MODE_FBIT (f->mode)),
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&f->data.low, &f->data.high, 0);
f->data.low = f->data.low | s.low;
f->data.high = f->data.high | s.high;
s.high = f->data.high;
lshift_double (r.low, r.high,
(-GET_MODE_FBIT (f->mode)),
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&r.low, &r.high, !unsigned_p);
}
{
lshift_double (a->data.low, a->data.high,
GET_MODE_FBIT (f->mode),
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&f->data.low, &f->data.high, !unsigned_p);
f->data = double_int_div (f->data, b->data, unsigned_p, TRUNC_DIV_EXPR);
overflow_p = fixed_saturate1 (f->mode, f->data, &f->data, sat_p);
pos_b = b->data;
/* Left shift pos_a to {r, s} by FBIT. */
- if (GET_MODE_FBIT (f->mode) == 2 * HOST_BITS_PER_WIDE_INT)
+ if (GET_MODE_FBIT (f->mode) == HOST_BITS_PER_DOUBLE_INT)
{
r = pos_a;
s.high = 0;
{
lshift_double (pos_a.low, pos_a.high,
GET_MODE_FBIT (f->mode),
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&s.low, &s.high, 0);
lshift_double (pos_a.low, pos_a.high,
- - (2 * HOST_BITS_PER_WIDE_INT
+ - (HOST_BITS_PER_DOUBLE_INT
- GET_MODE_FBIT (f->mode)),
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&r.low, &r.high, 0);
}
quo_s.high = 0;
quo_s.low = 0;
- for (i = 0; i < 2 * HOST_BITS_PER_WIDE_INT; i++)
+ for (i = 0; i < HOST_BITS_PER_DOUBLE_INT; i++)
{
/* Record the leftmost bit of mod. */
int leftmost_mod = (mod.high < 0);
/* Shift left mod by 1 bit. */
- lshift_double (mod.low, mod.high, 1, 2 * HOST_BITS_PER_WIDE_INT,
+ lshift_double (mod.low, mod.high, 1, HOST_BITS_PER_DOUBLE_INT,
&mod.low, &mod.high, 0);
/* Test the leftmost bit of s to add to mod. */
mod.low += 1;
/* Shift left quo_s by 1 bit. */
- lshift_double (quo_s.low, quo_s.high, 1, 2 * HOST_BITS_PER_WIDE_INT,
+ lshift_double (quo_s.low, quo_s.high, 1, HOST_BITS_PER_DOUBLE_INT,
&quo_s.low, &quo_s.high, 0);
/* Try to calculate (mod - pos_b). */
}
/* Shift left s by 1 bit. */
- lshift_double (s.low, s.high, 1, 2 * HOST_BITS_PER_WIDE_INT,
+ lshift_double (s.low, s.high, 1, HOST_BITS_PER_DOUBLE_INT,
&s.low, &s.high, 0);
}
{
lshift_double (a->data.low, a->data.high,
left_p ? b->data.low : (-b->data.low),
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&f->data.low, &f->data.high, !unsigned_p);
if (left_p) /* Only left shift saturates. */
overflow_p = fixed_saturate1 (f->mode, f->data, &f->data, sat_p);
else /* We need two double_int to store the left-shift result. */
{
double_int temp_high, temp_low;
- if (b->data.low == 2 * HOST_BITS_PER_WIDE_INT)
+ if (b->data.low == HOST_BITS_PER_DOUBLE_INT)
{
temp_high = a->data;
temp_low.high = 0;
{
lshift_double (a->data.low, a->data.high,
b->data.low,
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&temp_low.low, &temp_low.high, !unsigned_p);
/* Logical shift right to temp_high. */
lshift_double (a->data.low, a->data.high,
- b->data.low - 2 * HOST_BITS_PER_WIDE_INT,
- 2 * HOST_BITS_PER_WIDE_INT,
+ b->data.low - HOST_BITS_PER_DOUBLE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&temp_high.low, &temp_high.high, 0);
}
if (!unsigned_p && a->data.high < 0) /* Signed-extend temp_high. */
int amount = GET_MODE_FBIT (mode) - GET_MODE_FBIT (a->mode);
lshift_double (a->data.low, a->data.high,
amount,
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&temp_low.low, &temp_low.high,
SIGNED_FIXED_POINT_MODE_P (a->mode));
/* Logical shift right to temp_high. */
lshift_double (a->data.low, a->data.high,
- amount - 2 * HOST_BITS_PER_WIDE_INT,
- 2 * HOST_BITS_PER_WIDE_INT,
+ amount - HOST_BITS_PER_DOUBLE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&temp_high.low, &temp_high.high, 0);
if (SIGNED_FIXED_POINT_MODE_P (a->mode)
&& a->data.high < 0) /* Signed-extend temp_high. */
double_int temp;
lshift_double (a->data.low, a->data.high,
GET_MODE_FBIT (mode) - GET_MODE_FBIT (a->mode),
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&temp.low, &temp.high,
SIGNED_FIXED_POINT_MODE_P (a->mode));
f->mode = mode;
/* Left shift a to temp_high, temp_low. */
double_int temp_high, temp_low;
int amount = GET_MODE_FBIT (mode);
- if (amount == 2 * HOST_BITS_PER_WIDE_INT)
+ if (amount == HOST_BITS_PER_DOUBLE_INT)
{
temp_high = a;
temp_low.low = 0;
{
lshift_double (a.low, a.high,
amount,
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&temp_low.low, &temp_low.high, 0);
/* Logical shift right to temp_high. */
lshift_double (a.low, a.high,
- amount - 2 * HOST_BITS_PER_WIDE_INT,
- 2 * HOST_BITS_PER_WIDE_INT,
+ amount - HOST_BITS_PER_DOUBLE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&temp_high.low, &temp_high.high, 0);
}
if (!unsigned_p && a.high < 0) /* Signed-extend temp_high. */
f->data.low = 1;
f->data.high = 0;
lshift_double (f->data.low, f->data.high, i_f_bits,
- 2 * HOST_BITS_PER_WIDE_INT,
+ HOST_BITS_PER_DOUBLE_INT,
&f->data.low, &f->data.high, 1);
f->data = double_int_ext (f->data, 1 + i_f_bits, 0);
}
/* Right shift FIXED_CST to temp by fbit. */
temp = TREE_FIXED_CST (arg1).data;
mode = TREE_FIXED_CST (arg1).mode;
- if (GET_MODE_FBIT (mode) < 2 * HOST_BITS_PER_WIDE_INT)
+ if (GET_MODE_FBIT (mode) < HOST_BITS_PER_DOUBLE_INT)
{
temp = double_int_rshift (temp, GET_MODE_FBIT (mode),
HOST_BITS_PER_DOUBLE_INT,
lo = 0;
mask_hi = ((unsigned HOST_WIDE_INT) -1
- >> (2 * HOST_BITS_PER_WIDE_INT - width));
+ >> (HOST_BITS_PER_DOUBLE_INT - width));
mask_lo = -1;
}
else
if (total_bytes > len)
return NULL_TREE;
- if (total_bytes * BITS_PER_UNIT > 2 * HOST_BITS_PER_WIDE_INT)
+ if (total_bytes * BITS_PER_UNIT > HOST_BITS_PER_DOUBLE_INT)
return NULL_TREE;
result = double_int_zero;
unsigned int width = TYPE_PRECISION (arg1_type);
if (TREE_CODE (arg1) == INTEGER_CST
- && width <= 2 * HOST_BITS_PER_WIDE_INT
+ && width <= HOST_BITS_PER_DOUBLE_INT
&& (INTEGRAL_TYPE_P (arg1_type) || POINTER_TYPE_P (arg1_type)))
{
HOST_WIDE_INT signed_max_hi;
if (outer_width > HOST_BITS_PER_WIDE_INT)
{
mask_hi = ((unsigned HOST_WIDE_INT) -1
- >> (2 * HOST_BITS_PER_WIDE_INT - outer_width));
+ >> (HOST_BITS_PER_DOUBLE_INT - outer_width));
mask_lo = -1;
}
else
undefined, so it doesn't matter what we return, and some callers
expect to be able to use this routine for both signed and
unsigned conversions. */
- if (exp > 2*HOST_BITS_PER_WIDE_INT)
+ if (exp > HOST_BITS_PER_DOUBLE_INT)
goto overflow;
- rshift_significand (&t, r, 2*HOST_BITS_PER_WIDE_INT - exp);
+ rshift_significand (&t, r, HOST_BITS_PER_DOUBLE_INT - exp);
if (HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG)
{
high = t.sig[SIGSZ-1];
memset (r, 0, sizeof (*r));
r->cl = rvc_normal;
r->sign = high < 0 && !unsigned_p;
- SET_REAL_EXP (r, 2 * HOST_BITS_PER_WIDE_INT);
+ SET_REAL_EXP (r, HOST_BITS_PER_DOUBLE_INT);
if (r->sign)
{
if (width <= HOST_BITS_PER_WIDE_INT
&& CONST_INT_P (x))
val = INTVAL (x);
- else if (width <= 2 * HOST_BITS_PER_WIDE_INT
+ else if (width <= HOST_BITS_PER_DOUBLE_INT
&& GET_CODE (x) == CONST_DOUBLE
&& CONST_DOUBLE_LOW (x) == 0)
{
lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
if (op_mode == VOIDmode
- || GET_MODE_PRECISION (op_mode) > 2 * HOST_BITS_PER_WIDE_INT)
+ || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
/* We should never get a negative number. */
gcc_assert (hv >= 0);
else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
/* We can do some operations on integer CONST_DOUBLEs. Also allow
for a DImode operation on a CONST_INT. */
else if (GET_MODE (op) == VOIDmode
- && width <= HOST_BITS_PER_WIDE_INT * 2
+ && width <= HOST_BITS_PER_DOUBLE_INT
&& (GET_CODE (op) == CONST_DOUBLE
|| CONST_INT_P (op)))
{
else if (GET_CODE (op) == CONST_DOUBLE
&& SCALAR_FLOAT_MODE_P (GET_MODE (op))
&& GET_MODE_CLASS (mode) == MODE_INT
- && width <= 2 * HOST_BITS_PER_WIDE_INT && width > 0)
+ && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
{
/* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
operators are intentionally left unspecified (to ease implementation
return const0_rtx;
/* Test against the unsigned upper bound. */
- if (width == 2 * HOST_BITS_PER_WIDE_INT)
+ if (width == HOST_BITS_PER_DOUBLE_INT)
{
th = -1;
tl = -1;
&& GET_MODE (op0) == mode
&& CONST_DOUBLE_LOW (trueop1) == 0
&& (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
- && (val < 2 * HOST_BITS_PER_WIDE_INT - 1
- || GET_MODE_BITSIZE (mode) <= 2 * HOST_BITS_PER_WIDE_INT))
+ && (val < HOST_BITS_PER_DOUBLE_INT - 1
+ || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
return simplify_gen_binary (ASHIFT, mode, op0,
GEN_INT (val + HOST_BITS_PER_WIDE_INT));
for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
*vp++ = CONST_DOUBLE_LOW (el) >> i;
- while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
+ while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
{
*vp++
= CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
{
for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
- for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
+ for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
i += value_bit)
*vp++ = CONST_FIXED_VALUE_HIGH (el)
>> (i - HOST_BITS_PER_WIDE_INT);
know why. */
if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
elems[elem] = gen_int_mode (lo, outer_submode);
- else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
+ else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
elems[elem] = immed_double_const (lo, hi, outer_submode);
else
return NULL_RTX;
= MIN (precision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
bprecision
= GET_MODE_PRECISION (smallest_mode_for_size (bprecision, MODE_INT));
- if (bprecision > HOST_BITS_PER_WIDE_INT * 2)
- bprecision = HOST_BITS_PER_WIDE_INT * 2;
+ if (bprecision > HOST_BITS_PER_DOUBLE_INT)
+ bprecision = HOST_BITS_PER_DOUBLE_INT;
/* Create stubs for sizetype and bitsizetype so we can create constants. */
sizetype = make_node (INTEGER_TYPE);
int precision = TYPE_PRECISION (type);
/* We can not represent properly constants greater then
- 2 * HOST_BITS_PER_WIDE_INT, still we need the types
+ HOST_BITS_PER_DOUBLE_INT, still we need the types
as they are used by i386 vector extensions and friends. */
- if (precision > HOST_BITS_PER_WIDE_INT * 2)
- precision = HOST_BITS_PER_WIDE_INT * 2;
+ if (precision > HOST_BITS_PER_DOUBLE_INT)
+ precision = HOST_BITS_PER_DOUBLE_INT;
set_min_and_max_values_for_integral_type (type, precision,
/*is_unsigned=*/false);
int precision = TYPE_PRECISION (type);
/* We can not represent properly constants greater then
- 2 * HOST_BITS_PER_WIDE_INT, still we need the types
+ HOST_BITS_PER_DOUBLE_INT, still we need the types
as they are used by i386 vector extensions and friends. */
- if (precision > HOST_BITS_PER_WIDE_INT * 2)
- precision = HOST_BITS_PER_WIDE_INT * 2;
+ if (precision > HOST_BITS_PER_DOUBLE_INT)
+ precision = HOST_BITS_PER_DOUBLE_INT;
TYPE_UNSIGNED (type) = 1;
low &= ((HOST_WIDE_INT)1 << TYPE_PRECISION (type)) - 1, high = 0;
else if (TYPE_PRECISION (type) == HOST_BITS_PER_WIDE_INT)
high = 0;
- else if (TYPE_PRECISION (type) == 2 * HOST_BITS_PER_WIDE_INT)
+ else if (TYPE_PRECISION (type) == HOST_BITS_PER_DOUBLE_INT)
high = low;
else
gcc_unreachable ();
&& host_integerp (cst2, 1)
&& INTEGRAL_TYPE_P (TREE_TYPE (name2))
&& IN_RANGE (tree_low_cst (cst2, 1), 1, prec - 1)
- && prec <= 2 * HOST_BITS_PER_WIDE_INT
+ && prec <= HOST_BITS_PER_DOUBLE_INT
&& prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
&& live_on_edge (e, name2)
&& !has_single_use (name2))
&& INTEGRAL_TYPE_P (TREE_TYPE (name2))
&& TREE_CODE (cst2) == INTEGER_CST
&& !integer_zerop (cst2)
- && prec <= 2 * HOST_BITS_PER_WIDE_INT
+ && prec <= HOST_BITS_PER_DOUBLE_INT
&& (prec > 1
|| TYPE_UNSIGNED (TREE_TYPE (val))))
{
/* First clear all bits that are beyond the type's precision in case
we've been sign extended. */
- if (prec == 2 * HOST_BITS_PER_WIDE_INT)
+ if (prec == HOST_BITS_PER_DOUBLE_INT)
;
else if (prec > HOST_BITS_PER_WIDE_INT)
high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
/* First clear all bits that are beyond the type's precision in case
we've been sign extended. */
- if (prec == 2 * HOST_BITS_PER_WIDE_INT)
+ if (prec == HOST_BITS_PER_DOUBLE_INT)
;
else if (prec > HOST_BITS_PER_WIDE_INT)
high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
we've been sign extended. Ignore if type's precision hasn't been set
since what we are doing is setting it. */
- if (prec == 2 * HOST_BITS_PER_WIDE_INT || prec == 0)
+ if (prec == HOST_BITS_PER_DOUBLE_INT || prec == 0)
;
else if (prec > HOST_BITS_PER_WIDE_INT)
high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
unsigned HOST_WIDEST_INT val = TREE_INT_CST_LOW (x);
#if HOST_BITS_PER_WIDEST_INT > HOST_BITS_PER_WIDE_INT
- gcc_assert (HOST_BITS_PER_WIDEST_INT >= 2 * HOST_BITS_PER_WIDE_INT);
+ gcc_assert (HOST_BITS_PER_WIDEST_INT >= HOST_BITS_PER_DOUBLE_INT);
val |= (((unsigned HOST_WIDEST_INT) TREE_INT_CST_HIGH (x))
<< HOST_BITS_PER_WIDE_INT);
#else
else
{
high.high = ((~(unsigned HOST_WIDE_INT) 0)
- >> (2 * HOST_BITS_PER_WIDE_INT - prec));
+ >> (HOST_BITS_PER_DOUBLE_INT - prec));
high.low = ~(unsigned HOST_WIDE_INT) 0;
}
value = TREE_INT_CST_LOW (local->val);
else
{
- gcc_assert (shift < 2 * HOST_BITS_PER_WIDE_INT);
+ gcc_assert (shift < HOST_BITS_PER_DOUBLE_INT);
value = TREE_INT_CST_HIGH (local->val);
shift -= HOST_BITS_PER_WIDE_INT;
}
value = TREE_INT_CST_LOW (local->val);
else
{
- gcc_assert (shift < 2 * HOST_BITS_PER_WIDE_INT);
+ gcc_assert (shift < HOST_BITS_PER_DOUBLE_INT);
value = TREE_INT_CST_HIGH (local->val);
shift -= HOST_BITS_PER_WIDE_INT;
}