+2016-05-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * wide-int.h: Update offset_int and widest_int documentation.
+ (WI_SIGNED_SHIFT_RESULT): New macro.
+ (wi::binary_shift): Define signed_shift_result_type for
+ shifts on offset_int- and widest_int-like types.
+ (generic_wide_int): Support <<= and >>= if << and >> are supported.
+ * tree.h (int_bit_position): Use shift operators instead of wi::
+ shifts.
+ * alias.c (adjust_offset_for_component_ref): Likewise.
+ * expr.c (get_inner_reference): Likewise.
+ * fold-const.c (fold_comparison): Likewise.
+ * gimple-fold.c (fold_nonarray_ctor_reference): Likewise.
+ * gimple-ssa-strength-reduction.c (restructure_reference): Likewise.
+ * tree-dfa.c (get_ref_base_and_extent): Likewise.
+ * tree-ssa-alias.c (indirect_ref_may_alias_decl_p): Likewise.
+ (stmt_kills_ref_p): Likewise.
+ * tree-ssa-ccp.c (bit_value_binop_1): Likewise.
+ * tree-ssa-math-opts.c (find_bswap_or_nop_load): Likewise.
+ * tree-ssa-sccvn.c (copy_reference_ops_from_ref): Likewise.
+ (ao_ref_init_from_vn_reference): Likewise.
+
2016-05-02 Richard Sandiford <richard.sandiford@arm.com>
* wide-int.h: Update offset_int and widest_int documentation.
offset_int woffset
= (wi::to_offset (xoffset)
- + wi::lrshift (wi::to_offset (DECL_FIELD_BIT_OFFSET (field)),
- LOG2_BITS_PER_UNIT));
+ + (wi::to_offset (DECL_FIELD_BIT_OFFSET (field))
+ >> LOG2_BITS_PER_UNIT));
if (!wi::fits_uhwi_p (woffset))
{
*known_p = false;
+2016-05-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * init.c (build_new_1): Use shift operators instead of wi:: shifts.
+
2016-05-02 Richard Biener <rguenther@suse.de>
* decl.c (grokdeclarator): Properly insert a DECL_EXPR for
unsigned shift = (max_outer_nelts.get_precision ()) - 7
- wi::clz (max_outer_nelts);
- max_outer_nelts = wi::lshift (wi::lrshift (max_outer_nelts, shift),
- shift);
+ max_outer_nelts = (max_outer_nelts >> shift) << shift;
outer_nelts_check = fold_build2 (LE_EXPR, boolean_type_node,
outer_nelts,
if (!integer_zerop (off))
{
offset_int boff, coff = mem_ref_offset (exp);
- boff = wi::lshift (coff, LOG2_BITS_PER_UNIT);
+ boff = coff << LOG2_BITS_PER_UNIT;
bit_offset += boff;
}
exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
{
offset_int tem = wi::sext (wi::to_offset (offset),
TYPE_PRECISION (sizetype));
- tem = wi::lshift (tem, LOG2_BITS_PER_UNIT);
+ tem <<= LOG2_BITS_PER_UNIT;
tem += bit_offset;
if (wi::fits_shwi_p (tem))
{
/* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
bit_offset -= tem;
- tem = wi::arshift (tem, LOG2_BITS_PER_UNIT);
+ tem >>= LOG2_BITS_PER_UNIT;
offset = size_binop (PLUS_EXPR, offset,
wide_int_to_tree (sizetype, tem));
}
{
offset_int tem = wi::sext (wi::to_offset (offset0),
TYPE_PRECISION (sizetype));
- tem = wi::lshift (tem, LOG2_BITS_PER_UNIT);
+ tem <<= LOG2_BITS_PER_UNIT;
tem += bitpos0;
if (wi::fits_shwi_p (tem))
{
{
offset_int tem = wi::sext (wi::to_offset (offset1),
TYPE_PRECISION (sizetype));
- tem = wi::lshift (tem, LOG2_BITS_PER_UNIT);
+ tem <<= LOG2_BITS_PER_UNIT;
tem += bitpos1;
if (wi::fits_shwi_p (tem))
{
/* Compute bit offset of the field. */
bitoffset = (wi::to_offset (field_offset)
- + wi::lshift (wi::to_offset (byte_offset),
- LOG2_BITS_PER_UNIT));
+ + (wi::to_offset (byte_offset) << LOG2_BITS_PER_UNIT));
/* Compute bit offset where the field ends. */
if (field_size != NULL_TREE)
bitoffset_end = bitoffset + wi::to_offset (field_size);
c2 = 0;
}
- c4 = wi::lrshift (index, LOG2_BITS_PER_UNIT);
+ c4 = index >> LOG2_BITS_PER_UNIT;
c5 = backtrace_base_for_ref (&t2);
*pbase = t1;
if (this_offset && TREE_CODE (this_offset) == INTEGER_CST)
{
- offset_int woffset = wi::lshift (wi::to_offset (this_offset),
- LOG2_BITS_PER_UNIT);
+ offset_int woffset = (wi::to_offset (this_offset)
+ << LOG2_BITS_PER_UNIT);
woffset += wi::to_offset (DECL_FIELD_BIT_OFFSET (field));
bit_offset += woffset;
{
offset_int tem = (wi::to_offset (ssize)
- wi::to_offset (fsize));
- tem = wi::lshift (tem, LOG2_BITS_PER_UNIT);
+ tem <<= LOG2_BITS_PER_UNIT;
tem -= woffset;
maxsize += tem;
}
= wi::sext (wi::to_offset (index) - wi::to_offset (low_bound),
TYPE_PRECISION (TREE_TYPE (index)));
woffset *= wi::to_offset (unit_size);
- woffset = wi::lshift (woffset, LOG2_BITS_PER_UNIT);
+ woffset <<= LOG2_BITS_PER_UNIT;
bit_offset += woffset;
/* An array ref with a constant index up in the structure
else
{
offset_int off = mem_ref_offset (exp);
- off = wi::lshift (off, LOG2_BITS_PER_UNIT);
+ off <<= LOG2_BITS_PER_UNIT;
off += bit_offset;
if (wi::fits_shwi_p (off))
{
/* The offset embedded in MEM_REFs can be negative. Bias them
so that the resulting offset adjustment is positive. */
offset_int moff = mem_ref_offset (base1);
- moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
+ moff <<= LOG2_BITS_PER_UNIT;
if (wi::neg_p (moff))
offset2p += (-moff).to_short_addr ();
else
|| TREE_CODE (dbase2) == TARGET_MEM_REF)
{
offset_int moff = mem_ref_offset (dbase2);
- moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
+ moff <<= LOG2_BITS_PER_UNIT;
if (wi::neg_p (moff))
doffset1 -= (-moff).to_short_addr ();
else
/* The offset embedded in MEM_REFs can be negative. Bias them
so that the resulting offset adjustment is positive. */
moff = mem_ref_offset (base1);
- moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
+ moff <<= LOG2_BITS_PER_UNIT;
if (wi::neg_p (moff))
offset2 += (-moff).to_short_addr ();
else
offset1 += moff.to_shwi ();
moff = mem_ref_offset (base2);
- moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
+ moff <<= LOG2_BITS_PER_UNIT;
if (wi::neg_p (moff))
offset1 += (-moff).to_short_addr ();
else
TREE_OPERAND (ref->base, 1)))
{
offset_int off1 = mem_ref_offset (base);
- off1 = wi::lshift (off1, LOG2_BITS_PER_UNIT);
+ off1 <<= LOG2_BITS_PER_UNIT;
off1 += offset;
offset_int off2 = mem_ref_offset (ref->base);
- off2 = wi::lshift (off2, LOG2_BITS_PER_UNIT);
+ off2 <<= LOG2_BITS_PER_UNIT;
off2 += ref_offset;
if (wi::fits_shwi_p (off1) && wi::fits_shwi_p (off2))
{
if (TREE_CODE (rbase) != MEM_REF)
return false;
// Compare pointers.
- offset += wi::lshift (mem_ref_offset (base),
- LOG2_BITS_PER_UNIT);
- roffset += wi::lshift (mem_ref_offset (rbase),
- LOG2_BITS_PER_UNIT);
+ offset += mem_ref_offset (base) << LOG2_BITS_PER_UNIT;
+ roffset += mem_ref_offset (rbase) << LOG2_BITS_PER_UNIT;
base = TREE_OPERAND (base, 0);
rbase = TREE_OPERAND (rbase, 0);
}
if (base == rbase
&& offset <= roffset
&& (roffset + ref->max_size
- <= offset + wi::lshift (wi::to_offset (len),
- LOG2_BITS_PER_UNIT)))
+ <= offset + (wi::to_offset (len) << LOG2_BITS_PER_UNIT)))
return true;
break;
}
}
else
{
- *mask = wi::ext (wi::lshift (r1mask, shift), width, sgn);
- *val = wi::ext (wi::lshift (r1val, shift), width, sgn);
+ *mask = wi::ext (r1mask << shift, width, sgn);
+ *val = wi::ext (r1val << shift, width, sgn);
}
}
}
if (!integer_zerop (off))
{
offset_int boff, coff = mem_ref_offset (base_addr);
- boff = wi::lshift (coff, LOG2_BITS_PER_UNIT);
+ boff = coff << LOG2_BITS_PER_UNIT;
bit_offset += boff;
}
/* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
bit_offset -= tem;
- tem = wi::arshift (tem, LOG2_BITS_PER_UNIT);
+ tem >>= LOG2_BITS_PER_UNIT;
if (offset)
offset = size_binop (PLUS_EXPR, offset,
wide_int_to_tree (sizetype, tem));
{
offset_int off
= (wi::to_offset (this_offset)
- + wi::lrshift (wi::to_offset (bit_offset),
- LOG2_BITS_PER_UNIT));
+ + (wi::to_offset (bit_offset) >> LOG2_BITS_PER_UNIT));
if (wi::fits_shwi_p (off)
/* Probibit value-numbering zero offset components
of addresses the same before the pass folding
max_size = -1;
else
{
- offset_int woffset = wi::lshift (wi::to_offset (this_offset),
- LOG2_BITS_PER_UNIT);
+ offset_int woffset = (wi::to_offset (this_offset)
+ << LOG2_BITS_PER_UNIT);
woffset += wi::to_offset (DECL_FIELD_BIT_OFFSET (field));
offset += woffset;
}
= wi::sext (wi::to_offset (op->op0) - wi::to_offset (op->op1),
TYPE_PRECISION (TREE_TYPE (op->op0)));
woffset *= wi::to_offset (op->op2);
- woffset = wi::lshift (woffset, LOG2_BITS_PER_UNIT);
+ woffset <<= LOG2_BITS_PER_UNIT;
offset += woffset;
}
break;
inline HOST_WIDE_INT
int_bit_position (const_tree field)
{
- return (wi::lshift (wi::to_offset (DECL_FIELD_OFFSET (field)), BITS_PER_UNIT_LOG)
+ return ((wi::to_offset (DECL_FIELD_OFFSET (field)) << BITS_PER_UNIT_LOG)
+ wi::to_offset (DECL_FIELD_BIT_OFFSET (field))).to_shwi ();
}
Since the values are logically signed, there is no need to
distinguish between signed and unsigned operations. Sign-sensitive
comparison operators <, <=, > and >= are therefore supported.
+ Shift operators << and >> are also supported, with >> being
+ an _arithmetic_ right shift.
[ Note that, even though offset_int is effectively int128_t,
it can still be useful to use unsigned comparisons like
Like offset_int, widest_int is wider than all the values that
it needs to represent, so the integers are logically signed.
- Sign-sensitive comparison operators <, <=, > and >= are supported.
+ Sign-sensitive comparison operators <, <=, > and >= are supported,
+ as are << and >>.
There are several places in the GCC where this should/must be used:
#define WI_BINARY_RESULT(T1, T2) \
typename wi::binary_traits <T1, T2>::result_type
+/* The type of result produced by T1 << T2. Leads to substitution failure
+ if the operation isn't supported. Defined purely for brevity. */
+#define WI_SIGNED_SHIFT_RESULT(T1, T2) \
+ typename wi::binary_traits <T1, T2>::signed_shift_result_type
+
/* The type of result produced by a signed binary predicate on types T1 and T2.
This is bool if signed comparisons make sense for T1 and T2 and leads to
substitution failure otherwise. */
so as not to confuse gengtype. */
typedef generic_wide_int < fixed_wide_int_storage
<int_traits <T1>::precision> > result_type;
+ typedef result_type signed_shift_result_type;
typedef bool signed_predicate_result;
};
STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
typedef generic_wide_int < fixed_wide_int_storage
<int_traits <T1>::precision> > result_type;
+ typedef result_type signed_shift_result_type;
typedef bool signed_predicate_result;
};
template <typename T> \
generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); }
+/* Restrict these to cases where the shift operator is defined. */
+#define SHIFT_ASSIGNMENT_OPERATOR(OP, OP2) \
+ template <typename T> \
+ generic_wide_int &OP (const T &c) { return (*this = *this OP2 c); }
+
#define INCDEC_OPERATOR(OP, DELTA) \
generic_wide_int &OP () { *this += DELTA; return *this; }
ASSIGNMENT_OPERATOR (operator +=, add)
ASSIGNMENT_OPERATOR (operator -=, sub)
ASSIGNMENT_OPERATOR (operator *=, mul)
+ SHIFT_ASSIGNMENT_OPERATOR (operator <<=, <<)
+ SHIFT_ASSIGNMENT_OPERATOR (operator >>=, >>)
INCDEC_OPERATOR (operator ++, 1)
INCDEC_OPERATOR (operator --, -1)
#undef BINARY_PREDICATE
#undef UNARY_OPERATOR
#undef BINARY_OPERATOR
+#undef SHIFT_ASSIGNMENT_OPERATOR
#undef ASSIGNMENT_OPERATOR
#undef INCDEC_OPERATOR
template <typename storage>
template <typename T>
-generic_wide_int <storage> &
+inline generic_wide_int <storage> &
generic_wide_int <storage>::operator = (const T &x)
{
storage::operator = (x);
#undef SIGNED_BINARY_PREDICATE
+template <typename T1, typename T2>
+inline WI_SIGNED_SHIFT_RESULT (T1, T2)
+operator << (const T1 &x, const T2 &y)
+{
+ return wi::lshift (x, y);
+}
+
+template <typename T1, typename T2>
+inline WI_SIGNED_SHIFT_RESULT (T1, T2)
+operator >> (const T1 &x, const T2 &y)
+{
+ return wi::arshift (x, y);
+}
+
template<typename T>
void
gt_ggc_mx (generic_wide_int <T> *)