+2017-12-20 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * expmed.h (store_bit_field): Change bitregion_start and
+ bitregion_end from unsigned HOST_WIDE_INT to poly_uint64.
+ * expmed.c (adjust_bit_field_mem_for_reg, strict_volatile_bitfield_p)
+ (store_bit_field_1, store_integral_bit_field, store_bit_field)
+ (store_fixed_bit_field, store_split_bit_field): Likewise.
+ * expr.c (store_constructor_field, store_field): Likewise.
+ (optimize_bitfield_assignment_op): Likewise. Make the same change
+ to bitsize and bitpos.
+ * machmode.h (bit_field_mode_iterator): Change m_bitregion_start
+ and m_bitregion_end from HOST_WIDE_INT to poly_int64. Make the
+ same change in the constructor arguments.
+ (get_best_mode): Change bitregion_start and bitregion_end from
+ unsigned HOST_WIDE_INT to poly_uint64.
+ * stor-layout.c (bit_field_mode_iterator::bit_field_mode_iterator):
+ Change bitregion_start and bitregion_end from HOST_WIDE_INT to
+ poly_int64.
+ (bit_field_mode_iterator::next_mode): Update for new types
+ of m_bitregion_start and m_bitregion_end.
+ (get_best_mode): Change bitregion_start and bitregion_end from
+ unsigned HOST_WIDE_INT to poly_uint64.
+
2017-12-20 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
static bool store_integral_bit_field (rtx, opt_scalar_int_mode,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
- unsigned HOST_WIDE_INT,
- unsigned HOST_WIDE_INT,
+ poly_uint64, poly_uint64,
machine_mode, rtx, bool, bool);
static void store_fixed_bit_field (rtx, opt_scalar_int_mode,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
- unsigned HOST_WIDE_INT,
- unsigned HOST_WIDE_INT,
+ poly_uint64, poly_uint64,
rtx, scalar_int_mode, bool);
static void store_fixed_bit_field_1 (rtx, scalar_int_mode,
unsigned HOST_WIDE_INT,
static void store_split_bit_field (rtx, opt_scalar_int_mode,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
- unsigned HOST_WIDE_INT,
- unsigned HOST_WIDE_INT,
+ poly_uint64, poly_uint64,
rtx, scalar_int_mode, bool);
static rtx extract_integral_bit_field (rtx, opt_scalar_int_mode,
unsigned HOST_WIDE_INT,
adjust_bit_field_mem_for_reg (enum extraction_pattern pattern,
rtx op0, HOST_WIDE_INT bitsize,
HOST_WIDE_INT bitnum,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end,
+ poly_uint64 bitregion_start,
+ poly_uint64 bitregion_end,
machine_mode fieldmode,
unsigned HOST_WIDE_INT *new_bitnum)
{
strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
scalar_int_mode fieldmode,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end)
+ poly_uint64 bitregion_start,
+ poly_uint64 bitregion_end)
{
unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
return false;
/* Check for cases where the C++ memory model applies. */
- if (bitregion_end != 0
- && (bitnum - bitnum % modesize < bitregion_start
- || bitnum - bitnum % modesize + modesize - 1 > bitregion_end))
+ if (maybe_ne (bitregion_end, 0U)
+ && (maybe_lt (bitnum - bitnum % modesize, bitregion_start)
+ || maybe_gt (bitnum - bitnum % modesize + modesize - 1,
+ bitregion_end)))
return false;
return true;
static bool
store_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end,
+ poly_uint64 bitregion_start, poly_uint64 bitregion_end,
machine_mode fieldmode,
rtx value, bool reverse, bool fallback_p)
{
store_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end,
+ poly_uint64 bitregion_start,
+ poly_uint64 bitregion_end,
machine_mode fieldmode,
rtx value, bool reverse, bool fallback_p)
{
void
store_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end,
+ poly_uint64 bitregion_start, poly_uint64 bitregion_end,
machine_mode fieldmode,
rtx value, bool reverse)
{
/* Under the C++0x memory model, we must not touch bits outside the
bit region. Adjust the address to start at the beginning of the
bit region. */
- if (MEM_P (str_rtx) && bitregion_start > 0)
+ if (MEM_P (str_rtx) && maybe_ne (bitregion_start, 0U))
{
scalar_int_mode best_mode;
machine_mode addr_mode = VOIDmode;
- HOST_WIDE_INT offset;
-
- gcc_assert ((bitregion_start % BITS_PER_UNIT) == 0);
- offset = bitregion_start / BITS_PER_UNIT;
+ poly_uint64 offset = exact_div (bitregion_start, BITS_PER_UNIT);
bitnum -= bitregion_start;
poly_int64 size = bits_to_bytes_round_up (bitnum + bitsize);
bitregion_end -= bitregion_start;
store_fixed_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end,
+ poly_uint64 bitregion_start, poly_uint64 bitregion_end,
rtx value, scalar_int_mode value_mode, bool reverse)
{
/* There is a case not handled here:
store_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitpos,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end,
+ poly_uint64 bitregion_start, poly_uint64 bitregion_end,
rtx value, scalar_int_mode value_mode, bool reverse)
{
unsigned int unit, total_bits, bitsdone = 0;
UNIT close to the end of the region as needed. If op0 is a REG
or SUBREG of REG, don't do this, as there can't be data races
on a register and we can expand shorter code in some cases. */
- if (bitregion_end
+ if (maybe_ne (bitregion_end, 0U)
&& unit > BITS_PER_UNIT
- && bitpos + bitsdone - thispos + unit > bitregion_end + 1
+ && maybe_gt (bitpos + bitsdone - thispos + unit, bitregion_end + 1)
&& !REG_P (op0)
&& (GET_CODE (op0) != SUBREG || !REG_P (SUBREG_REG (op0))))
{
#endif
extern void store_bit_field (rtx, poly_uint64, poly_uint64,
- unsigned HOST_WIDE_INT,
- unsigned HOST_WIDE_INT,
+ poly_uint64, poly_uint64,
machine_mode, rtx, bool);
extern rtx extract_bit_field (rtx, poly_uint64, poly_uint64, int, rtx,
machine_mode, machine_mode, bool, rtx *);
static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int);
static rtx_insn *compress_float_constant (rtx, rtx);
static rtx get_subtarget (rtx);
-static void store_constructor_field (rtx, unsigned HOST_WIDE_INT,
- HOST_WIDE_INT, unsigned HOST_WIDE_INT,
- unsigned HOST_WIDE_INT, machine_mode,
- tree, int, alias_set_type, bool);
static void store_constructor (tree, rtx, int, HOST_WIDE_INT, bool);
static rtx store_field (rtx, HOST_WIDE_INT, HOST_WIDE_INT,
- unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
+ poly_uint64, poly_uint64,
machine_mode, tree, alias_set_type, bool, bool);
static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (const_tree, const_tree);
and there's nothing else to do. */
static bool
-optimize_bitfield_assignment_op (unsigned HOST_WIDE_INT bitsize,
- unsigned HOST_WIDE_INT bitpos,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end,
+optimize_bitfield_assignment_op (poly_uint64 pbitsize,
+ poly_uint64 pbitpos,
+ poly_uint64 pbitregion_start,
+ poly_uint64 pbitregion_end,
machine_mode mode1, rtx str_rtx,
tree to, tree src, bool reverse)
{
gimple *srcstmt;
enum tree_code code;
+ unsigned HOST_WIDE_INT bitsize, bitpos, bitregion_start, bitregion_end;
if (mode1 != VOIDmode
+ || !pbitsize.is_constant (&bitsize)
+ || !pbitpos.is_constant (&bitpos)
+ || !pbitregion_start.is_constant (&bitregion_start)
+ || !pbitregion_end.is_constant (&bitregion_end)
|| bitsize >= BITS_PER_WORD
|| str_bitsize > BITS_PER_WORD
|| TREE_SIDE_EFFECTS (to)
static void
store_constructor_field (rtx target, unsigned HOST_WIDE_INT bitsize,
HOST_WIDE_INT bitpos,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end,
+ poly_uint64 bitregion_start,
+ poly_uint64 bitregion_end,
machine_mode mode,
tree exp, int cleared,
alias_set_type alias_set, bool reverse)
static rtx
store_field (rtx target, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end,
+ poly_uint64 bitregion_start, poly_uint64 bitregion_end,
machine_mode mode, tree exp,
alias_set_type alias_set, bool nontemporal, bool reverse)
{
{
public:
bit_field_mode_iterator (HOST_WIDE_INT, HOST_WIDE_INT,
- HOST_WIDE_INT, HOST_WIDE_INT,
+ poly_int64, poly_int64,
unsigned int, bool);
bool next_mode (scalar_int_mode *);
bool prefer_smaller_modes ();
for invalid input such as gcc.dg/pr48335-8.c. */
HOST_WIDE_INT m_bitsize;
HOST_WIDE_INT m_bitpos;
- HOST_WIDE_INT m_bitregion_start;
- HOST_WIDE_INT m_bitregion_end;
+ poly_int64 m_bitregion_start;
+ poly_int64 m_bitregion_end;
unsigned int m_align;
bool m_volatilep;
int m_count;
/* Find the best mode to use to access a bit field. */
-extern bool get_best_mode (int, int, unsigned HOST_WIDE_INT,
- unsigned HOST_WIDE_INT, unsigned int,
+extern bool get_best_mode (int, int, poly_uint64, poly_uint64, unsigned int,
unsigned HOST_WIDE_INT, bool, scalar_int_mode *);
/* Determine alignment, 1<=result<=BIGGEST_ALIGNMENT. */
bit_field_mode_iterator
::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
- HOST_WIDE_INT bitregion_start,
- HOST_WIDE_INT bitregion_end,
+ poly_int64 bitregion_start,
+ poly_int64 bitregion_end,
unsigned int align, bool volatilep)
: m_mode (NARROWEST_INT_MODE), m_bitsize (bitsize),
m_bitpos (bitpos), m_bitregion_start (bitregion_start),
m_bitregion_end (bitregion_end), m_align (align),
m_volatilep (volatilep), m_count (0)
{
- if (!m_bitregion_end)
+ if (known_eq (m_bitregion_end, 0))
{
/* We can assume that any aligned chunk of ALIGN bits that overlaps
the bitfield is mapped and won't trap, provided that ALIGN isn't
= MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD));
if (bitsize <= 0)
bitsize = 1;
- m_bitregion_end = bitpos + bitsize + units - 1;
- m_bitregion_end -= m_bitregion_end % units + 1;
+ HOST_WIDE_INT end = bitpos + bitsize + units - 1;
+ m_bitregion_end = end - end % units - 1;
}
}
/* Stop if the mode goes outside the bitregion. */
HOST_WIDE_INT start = m_bitpos - substart;
- if (m_bitregion_start && start < m_bitregion_start)
+ if (maybe_ne (m_bitregion_start, 0)
+ && maybe_lt (start, m_bitregion_start))
break;
HOST_WIDE_INT end = start + unit;
- if (end > m_bitregion_end + 1)
+ if (maybe_gt (end, m_bitregion_end + 1))
break;
/* Stop if the mode requires too much alignment. */
bool
get_best_mode (int bitsize, int bitpos,
- unsigned HOST_WIDE_INT bitregion_start,
- unsigned HOST_WIDE_INT bitregion_end,
+ poly_uint64 bitregion_start, poly_uint64 bitregion_end,
unsigned int align,
unsigned HOST_WIDE_INT largest_mode_bitsize, bool volatilep,
scalar_int_mode *best_mode)