+2017-12-21 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * function.h (frame_space): Change start and length from HOST_WIDE_INT
+ to poly_int64.
+ (get_frame_size): Return the size as a poly_int64 rather than a
+ HOST_WIDE_INT.
+ (frame_offset_overflow): Take the offset as a poly_int64 rather
+ than a HOST_WIDE_INT.
+ (assign_stack_local_1, assign_stack_local, assign_stack_temp_for_type)
+ (assign_stack_temp): Likewise for the size.
+ * function.c (get_frame_size): Return a poly_int64 rather than
+ a HOST_WIDE_INT.
+ (frame_offset_overflow): Take the offset as a poly_int64 rather
+ than a HOST_WIDE_INT.
+ (try_fit_stack_local): Take the start, length and size as poly_int64s
+ rather than HOST_WIDE_INTs. Return the offset as a poly_int64_pod
+ rather than a HOST_WIDE_INT.
+ (add_frame_space): Take the start and end as poly_int64s rather than
+ HOST_WIDE_INTs.
+ (assign_stack_local_1, assign_stack_local, assign_stack_temp_for_type)
+ (assign_stack_temp): Likewise for the size.
+ (temp_slot): Change size, base_offset and full_size from HOST_WIDE_INT
+ to poly_int64.
+ (find_temp_slot_from_address): Handle polynomial offsets.
+ (combine_temp_slots): Likewise.
+ * emit-rtl.h (rtl_data::x_frame_offset): Change from HOST_WIDE_INT
+ to poly_int64.
+ * cfgexpand.c (alloc_stack_frame_space): Return the offset as a
+ poly_int64 rather than a HOST_WIDE_INT.
+ (expand_one_stack_var_at): Take the offset as a poly_int64 rather
+ than a HOST_WIDE_INT.
+ (expand_stack_vars, expand_one_stack_var_1, expand_used_vars): Handle
+ polynomial frame offsets.
+ * config/m32r/m32r-protos.h (m32r_compute_frame_size): Take the size
+ as a poly_int64 rather than an int.
+ * config/m32r/m32r.c (m32r_compute_frame_size): Likewise.
+ * config/v850/v850-protos.h (compute_frame_size): Likewise.
+ * config/v850/v850.c (compute_frame_size): Likewise.
+ * config/xtensa/xtensa-protos.h (compute_frame_size): Likewise.
+ * config/xtensa/xtensa.c (compute_frame_size): Likewise.
+ * config/pa/pa-protos.h (pa_compute_frame_size): Likewise.
+ * config/pa/pa.c (pa_compute_frame_size): Likewise.
+ * explow.h (get_dynamic_stack_base): Take the offset as a poly_int64
+ rather than a HOST_WIDE_INT.
+ * explow.c (get_dynamic_stack_base): Likewise.
+ * final.c (final_start_function): Use the constant lower bound
+ of the frame size for -Wframe-larger-than.
+ * ira.c (do_reload): Adjust for new get_frame_size return type.
+ * lra.c (lra): Likewise.
+ * reload1.c (reload): Likewise.
+ * config/avr/avr.c (avr_asm_function_end_prologue): Likewise.
+ * config/pa/pa.h (EXIT_IGNORE_STACK): Likewise.
+ * rtlanal.c (get_initial_register_offset): Return the offset as
+ a poly_int64 rather than a HOST_WIDE_INT.
+
2017-12-21 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
/* Allocate SIZE bytes at byte alignment ALIGN from the stack frame.
Return the frame offset. */
-static HOST_WIDE_INT
+static poly_int64
alloc_stack_frame_space (HOST_WIDE_INT size, unsigned HOST_WIDE_INT align)
{
- HOST_WIDE_INT offset, new_frame_offset;
+ poly_int64 offset, new_frame_offset;
if (FRAME_GROWS_DOWNWARD)
{
new_frame_offset
- = align_base (frame_offset - frame_phase - size,
- align, false) + frame_phase;
+ = aligned_lower_bound (frame_offset - frame_phase - size,
+ align) + frame_phase;
offset = new_frame_offset;
}
else
{
new_frame_offset
- = align_base (frame_offset - frame_phase, align, true) + frame_phase;
+ = aligned_upper_bound (frame_offset - frame_phase,
+ align) + frame_phase;
offset = new_frame_offset;
new_frame_offset += size;
}
static void
expand_one_stack_var_at (tree decl, rtx base, unsigned base_align,
- HOST_WIDE_INT offset)
+ poly_int64 offset)
{
unsigned align;
rtx x;
/* If this fails, we've overflowed the stack frame. Error nicely? */
- gcc_assert (offset == trunc_int_for_mode (offset, Pmode));
+ gcc_assert (known_eq (offset, trunc_int_for_mode (offset, Pmode)));
x = plus_constant (Pmode, base, offset);
x = gen_rtx_MEM (TREE_CODE (decl) == SSA_NAME
important, we'll simply use the alignment that is already set. */
if (base == virtual_stack_vars_rtx)
offset -= frame_phase;
- align = least_bit_hwi (offset);
+ align = known_alignment (offset);
align *= BITS_PER_UNIT;
if (align == 0 || align > base_align)
align = base_align;
{
rtx base;
unsigned base_align, alignb;
- HOST_WIDE_INT offset;
+ poly_int64 offset;
i = stack_vars_sorted[si];
if (alignb * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
{
base = virtual_stack_vars_rtx;
- if ((asan_sanitize_stack_p ())
- && pred)
+ /* ASAN description strings don't yet have a syntax for expressing
+ polynomial offsets. */
+ HOST_WIDE_INT prev_offset;
+ if (asan_sanitize_stack_p ()
+ && pred
+ && frame_offset.is_constant (&prev_offset))
{
- HOST_WIDE_INT prev_offset
- = align_base (frame_offset,
- MAX (alignb, ASAN_RED_ZONE_SIZE),
- !FRAME_GROWS_DOWNWARD);
+ prev_offset = align_base (prev_offset,
+ MAX (alignb, ASAN_RED_ZONE_SIZE),
+ !FRAME_GROWS_DOWNWARD);
tree repr_decl = NULL_TREE;
offset
= alloc_stack_frame_space (stack_vars[i].size
MAX (alignb, ASAN_RED_ZONE_SIZE));
data->asan_vec.safe_push (prev_offset);
- data->asan_vec.safe_push (offset + stack_vars[i].size);
+ /* Allocating a constant amount of space from a constant
+ starting offset must give a constant result. */
+ data->asan_vec.safe_push ((offset + stack_vars[i].size)
+ .to_constant ());
/* Find best representative of the partition.
Prefer those with DECL_NAME, even better
satisfying asan_protect_stack_decl predicate. */
space. */
if (large_size > 0 && ! large_allocation_done)
{
- HOST_WIDE_INT loffset;
+ poly_int64 loffset;
rtx large_allocsize;
large_allocsize = GEN_INT (large_size);
static void
expand_one_stack_var_1 (tree var)
{
- HOST_WIDE_INT size, offset;
+ HOST_WIDE_INT size;
+ poly_int64 offset;
unsigned byte_align;
if (TREE_CODE (var) == SSA_NAME)
in addition to phase 1 and 2. */
expand_stack_vars (asan_decl_phase_3, &data);
- if (!data.asan_vec.is_empty ())
+ /* ASAN description strings don't yet have a syntax for expressing
+ polynomial offsets. */
+ HOST_WIDE_INT prev_offset;
+ if (!data.asan_vec.is_empty ()
+ && frame_offset.is_constant (&prev_offset))
{
- HOST_WIDE_INT prev_offset = frame_offset;
HOST_WIDE_INT offset, sz, redzonesz;
redzonesz = ASAN_RED_ZONE_SIZE;
sz = data.asan_vec[0] - prev_offset;
&& sz + ASAN_RED_ZONE_SIZE >= (int) data.asan_alignb)
redzonesz = ((sz + ASAN_RED_ZONE_SIZE + data.asan_alignb - 1)
& ~(data.asan_alignb - HOST_WIDE_INT_1)) - sz;
- offset
- = alloc_stack_frame_space (redzonesz, ASAN_RED_ZONE_SIZE);
+ /* Allocating a constant amount of space from a constant
+ starting offset must give a constant result. */
+ offset = (alloc_stack_frame_space (redzonesz, ASAN_RED_ZONE_SIZE)
+ .to_constant ());
data.asan_vec.safe_push (prev_offset);
data.asan_vec.safe_push (offset);
/* Leave space for alignment if STRICT_ALIGNMENT. */
if (STACK_ALIGNMENT_NEEDED)
{
HOST_WIDE_INT align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
- if (!FRAME_GROWS_DOWNWARD)
- frame_offset += align - 1;
- frame_offset &= -align;
+ if (FRAME_GROWS_DOWNWARD)
+ frame_offset = aligned_lower_bound (frame_offset, align);
+ else
+ frame_offset = aligned_upper_bound (frame_offset, align);
}
return var_end_seq;
avr_outgoing_args_size());
fprintf (file, "/* frame size = " HOST_WIDE_INT_PRINT_DEC " */\n",
- get_frame_size());
+ (HOST_WIDE_INT) get_frame_size());
if (!cfun->machine->gasisr.yes)
{
extern void m32r_init (void);
extern void m32r_init_expanders (void);
-extern unsigned m32r_compute_frame_size (int);
+extern unsigned m32r_compute_frame_size (poly_int64);
extern void m32r_expand_prologue (void);
extern void m32r_expand_epilogue (void);
extern int direct_return (void);
SIZE is the size needed for local variables. */
unsigned int
-m32r_compute_frame_size (int size) /* # of var. bytes allocated. */
+m32r_compute_frame_size (poly_int64 size) /* # of var. bytes allocated. */
{
unsigned int regno;
unsigned int total_size, var_size, args_size, pretend_size, extra_size;
extern int pa_zdepi_cint_p (unsigned HOST_WIDE_INT);
extern void pa_output_ascii (FILE *, const char *, int);
-extern HOST_WIDE_INT pa_compute_frame_size (HOST_WIDE_INT, int *);
+extern HOST_WIDE_INT pa_compute_frame_size (poly_int64, int *);
extern void pa_expand_prologue (void);
extern void pa_expand_epilogue (void);
extern bool pa_can_use_return_insn (void);
}
HOST_WIDE_INT
-pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
+pa_compute_frame_size (poly_int64 size, int *fregs_live)
{
int freg_saved = 0;
int i, j;
extern int may_call_alloca;
#define EXIT_IGNORE_STACK \
- (get_frame_size () != 0 \
+ (maybe_ne (get_frame_size (), 0) \
|| cfun->calls_alloca || crtl->outgoing_args_size)
/* Length in units of the trampoline for entering a nested function. */
extern void expand_epilogue (void);
extern int v850_handle_pragma (int (*)(void), void (*)(int), char *);
extern int compute_register_save_size (long *);
-extern int compute_frame_size (int, long *);
+extern int compute_frame_size (poly_int64, long *);
extern void v850_init_expanders (void);
#ifdef RTX_CODE
-------------------------- ---- ------------------ V */
int
-compute_frame_size (int size, long * p_reg_saved)
+compute_frame_size (poly_int64 size, long * p_reg_saved)
{
return (size
+ compute_register_save_size (p_reg_saved)
extern void xtensa_setup_frame_addresses (void);
extern int xtensa_dbx_register_number (int);
-extern long compute_frame_size (int);
+extern long compute_frame_size (poly_int64);
extern bool xtensa_use_return_instruction_p (void);
extern void xtensa_expand_prologue (void);
extern void xtensa_expand_epilogue (void);
#define XTENSA_STACK_ALIGN(LOC) (((LOC) + STACK_BYTES-1) & ~(STACK_BYTES-1))
long
-compute_frame_size (int size)
+compute_frame_size (poly_int64 size)
{
int regno;
/* Offset to end of allocated area of stack frame.
If stack grows down, this is the address of the last stack slot allocated.
If stack grows up, this is the address for the next slot. */
- HOST_WIDE_INT x_frame_offset;
+ poly_int64_pod x_frame_offset;
/* Insn after which register parms and SAVE_EXPRs are born, if nonopt. */
rtx_insn *x_parm_birth_insn;
of memory. */
rtx
-get_dynamic_stack_base (HOST_WIDE_INT offset, unsigned required_align)
+get_dynamic_stack_base (poly_int64 offset, unsigned required_align)
{
rtx target;
extern void get_dynamic_stack_size (rtx *, unsigned, unsigned, HOST_WIDE_INT *);
/* Returns the address of the dynamic stack space without allocating it. */
-extern rtx get_dynamic_stack_base (HOST_WIDE_INT offset,
- unsigned required_align);
+extern rtx get_dynamic_stack_base (poly_int64, unsigned);
/* Emit one stack probe at ADDRESS, an address within the stack. */
extern void emit_stack_probe (rtx);
TREE_ASM_WRITTEN (DECL_INITIAL (current_function_decl)) = 1;
}
+ HOST_WIDE_INT min_frame_size = constant_lower_bound (get_frame_size ());
if (warn_frame_larger_than
- && get_frame_size () > frame_larger_than_size)
- {
+ && min_frame_size > frame_larger_than_size)
+ {
/* Issue a warning */
warning (OPT_Wframe_larger_than_,
- "the frame size of %wd bytes is larger than %wd bytes",
- get_frame_size (), frame_larger_than_size);
- }
+ "the frame size of %wd bytes is larger than %wd bytes",
+ min_frame_size, frame_larger_than_size);
+ }
/* First output the function prologue: code to set up the stack frame. */
targetm.asm_out.function_prologue (file);
This size counts from zero. It is not rounded to PREFERRED_STACK_BOUNDARY;
the caller may have to do that. */
-HOST_WIDE_INT
+poly_int64
get_frame_size (void)
{
if (FRAME_GROWS_DOWNWARD)
return FALSE. */
bool
-frame_offset_overflow (HOST_WIDE_INT offset, tree func)
+frame_offset_overflow (poly_int64 offset, tree func)
{
- unsigned HOST_WIDE_INT size = FRAME_GROWS_DOWNWARD ? -offset : offset;
+ poly_uint64 size = FRAME_GROWS_DOWNWARD ? -offset : offset;
+ unsigned HOST_WIDE_INT limit
+ = ((HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (Pmode) - 1))
+ /* Leave room for the fixed part of the frame. */
+ - 64 * UNITS_PER_WORD);
- if (size > (HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (Pmode) - 1))
- /* Leave room for the fixed part of the frame. */
- - 64 * UNITS_PER_WORD)
+ if (!coeffs_in_range_p (size, 0U, limit))
{
error_at (DECL_SOURCE_LOCATION (func),
"total size of local objects too large");
- return TRUE;
+ return true;
}
- return FALSE;
+ return false;
}
/* Return the minimum spill slot alignment for a register of mode MODE. */
given a start/length pair that lies at the end of the frame. */
static bool
-try_fit_stack_local (HOST_WIDE_INT start, HOST_WIDE_INT length,
- HOST_WIDE_INT size, unsigned int alignment,
- HOST_WIDE_INT *poffset)
+try_fit_stack_local (poly_int64 start, poly_int64 length,
+ poly_int64 size, unsigned int alignment,
+ poly_int64_pod *poffset)
{
- HOST_WIDE_INT this_frame_offset;
+ poly_int64 this_frame_offset;
int frame_off, frame_alignment, frame_phase;
/* Calculate how many bytes the start of local variables is off from
/* Round the frame offset to the specified alignment. */
- /* We must be careful here, since FRAME_OFFSET might be negative and
- division with a negative dividend isn't as well defined as we might
- like. So we instead assume that ALIGNMENT is a power of two and
- use logical operations which are unambiguous. */
if (FRAME_GROWS_DOWNWARD)
this_frame_offset
- = (FLOOR_ROUND (start + length - size - frame_phase,
- (unsigned HOST_WIDE_INT) alignment)
+ = (aligned_lower_bound (start + length - size - frame_phase, alignment)
+ frame_phase);
else
this_frame_offset
- = (CEIL_ROUND (start - frame_phase,
- (unsigned HOST_WIDE_INT) alignment)
- + frame_phase);
+ = aligned_upper_bound (start - frame_phase, alignment) + frame_phase;
/* See if it fits. If this space is at the edge of the frame,
consider extending the frame to make it fit. Our caller relies on
this when allocating a new slot. */
- if (frame_offset == start && this_frame_offset < frame_offset)
- frame_offset = this_frame_offset;
- else if (this_frame_offset < start)
- return false;
- else if (start + length == frame_offset
- && this_frame_offset + size > start + length)
- frame_offset = this_frame_offset + size;
- else if (this_frame_offset + size > start + length)
- return false;
+ if (maybe_lt (this_frame_offset, start))
+ {
+ if (known_eq (frame_offset, start))
+ frame_offset = this_frame_offset;
+ else
+ return false;
+ }
+ else if (maybe_gt (this_frame_offset + size, start + length))
+ {
+ if (known_eq (frame_offset, start + length))
+ frame_offset = this_frame_offset + size;
+ else
+ return false;
+ }
*poffset = this_frame_offset;
return true;
function's frame_space_list. */
static void
-add_frame_space (HOST_WIDE_INT start, HOST_WIDE_INT end)
+add_frame_space (poly_int64 start, poly_int64 end)
{
struct frame_space *space = ggc_alloc<frame_space> ();
space->next = crtl->frame_space_list;
We do not round to stack_boundary here. */
rtx
-assign_stack_local_1 (machine_mode mode, HOST_WIDE_INT size,
+assign_stack_local_1 (machine_mode mode, poly_int64 size,
int align, int kind)
{
rtx x, addr;
- int bigend_correction = 0;
- HOST_WIDE_INT slot_offset = 0, old_frame_offset;
+ poly_int64 bigend_correction = 0;
+ poly_int64 slot_offset = 0, old_frame_offset;
unsigned int alignment, alignment_in_bits;
if (align == 0)
else if (align == -1)
{
alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
- size = CEIL_ROUND (size, alignment);
+ size = aligned_upper_bound (size, alignment);
}
else if (align == -2)
alignment = 1; /* BITS_PER_UNIT / BITS_PER_UNIT */
requested size is 0 or the estimated stack
alignment >= mode alignment. */
gcc_assert ((kind & ASLK_REDUCE_ALIGN)
- || size == 0
+ || known_eq (size, 0)
|| (crtl->stack_alignment_estimated
>= GET_MODE_ALIGNMENT (mode)));
alignment_in_bits = crtl->stack_alignment_estimated;
if (crtl->max_used_stack_slot_alignment < alignment_in_bits)
crtl->max_used_stack_slot_alignment = alignment_in_bits;
- if (mode != BLKmode || size != 0)
+ if (mode != BLKmode || maybe_ne (size, 0))
{
if (kind & ASLK_RECORD_PAD)
{
alignment, &slot_offset))
continue;
*psp = space->next;
- if (slot_offset > space->start)
+ if (known_gt (slot_offset, space->start))
add_frame_space (space->start, slot_offset);
- if (slot_offset + size < space->start + space->length)
+ if (known_lt (slot_offset + size, space->start + space->length))
add_frame_space (slot_offset + size,
space->start + space->length);
goto found_space;
if (kind & ASLK_RECORD_PAD)
{
- if (slot_offset > frame_offset)
+ if (known_gt (slot_offset, frame_offset))
add_frame_space (frame_offset, slot_offset);
- if (slot_offset + size < old_frame_offset)
+ if (known_lt (slot_offset + size, old_frame_offset))
add_frame_space (slot_offset + size, old_frame_offset);
}
}
if (kind & ASLK_RECORD_PAD)
{
- if (slot_offset > old_frame_offset)
+ if (known_gt (slot_offset, old_frame_offset))
add_frame_space (old_frame_offset, slot_offset);
- if (slot_offset + size < frame_offset)
+ if (known_lt (slot_offset + size, frame_offset))
add_frame_space (slot_offset + size, frame_offset);
}
}
found_space:
/* On a big-endian machine, if we are allocating more space than we will use,
use the least significant bytes of those that are allocated. */
- if (BYTES_BIG_ENDIAN && mode != BLKmode && GET_MODE_SIZE (mode) < size)
- bigend_correction = size - GET_MODE_SIZE (mode);
+ if (mode != BLKmode)
+ {
+ /* The slot size can sometimes be smaller than the mode size;
+ e.g. the rs6000 port allocates slots with a vector mode
+ that have the size of only one element. However, the slot
+ size must always be ordered wrt to the mode size, in the
+ same way as for a subreg. */
+ gcc_checking_assert (ordered_p (GET_MODE_SIZE (mode), size));
+ if (BYTES_BIG_ENDIAN && maybe_lt (GET_MODE_SIZE (mode), size))
+ bigend_correction = size - GET_MODE_SIZE (mode);
+ }
/* If we have already instantiated virtual registers, return the actual
address relative to the frame pointer. */
/* Wrap up assign_stack_local_1 with last parameter as false. */
rtx
-assign_stack_local (machine_mode mode, HOST_WIDE_INT size, int align)
+assign_stack_local (machine_mode mode, poly_int64 size, int align)
{
return assign_stack_local_1 (mode, size, align, ASLK_RECORD_PAD);
}
/* The rtx to used to reference the slot. */
rtx slot;
/* The size, in units, of the slot. */
- HOST_WIDE_INT size;
+ poly_int64 size;
/* The type of the object in the slot, or zero if it doesn't correspond
to a type. We use this to determine whether a slot can be reused.
It can be reused if objects of the type of the new slot will always
int level;
/* The offset of the slot from the frame_pointer, including extra space
for alignment. This info is for combine_temp_slots. */
- HOST_WIDE_INT base_offset;
+ poly_int64 base_offset;
/* The size of the slot, including extra space for alignment. This
info is for combine_temp_slots. */
- HOST_WIDE_INT full_size;
+ poly_int64 full_size;
};
/* Entry for the below hash table. */
return p;
/* Last resort: Address is a virtual stack var address. */
- if (GET_CODE (x) == PLUS
- && XEXP (x, 0) == virtual_stack_vars_rtx
- && CONST_INT_P (XEXP (x, 1)))
+ poly_int64 offset;
+ if (strip_offset (x, &offset) == virtual_stack_vars_rtx)
{
int i;
for (i = max_slot_level (); i >= 0; i--)
for (p = *temp_slots_at_level (i); p; p = p->next)
- {
- if (INTVAL (XEXP (x, 1)) >= p->base_offset
- && INTVAL (XEXP (x, 1)) < p->base_offset + p->full_size)
- return p;
- }
+ if (known_in_range_p (offset, p->base_offset, p->full_size))
+ return p;
}
return NULL;
TYPE is the type that will be used for the stack slot. */
rtx
-assign_stack_temp_for_type (machine_mode mode, HOST_WIDE_INT size,
- tree type)
+assign_stack_temp_for_type (machine_mode mode, poly_int64 size, tree type)
{
unsigned int align;
struct temp_slot *p, *best_p = 0, *selected = NULL, **pp;
rtx slot;
- /* If SIZE is -1 it means that somebody tried to allocate a temporary
- of a variable size. */
- gcc_assert (size != -1);
+ gcc_assert (known_size_p (size));
align = get_stack_local_alignment (type, mode);
{
for (p = avail_temp_slots; p; p = p->next)
{
- if (p->align >= align && p->size >= size
+ if (p->align >= align
+ && known_ge (p->size, size)
&& GET_MODE (p->slot) == mode
&& objects_must_conflict_p (p->type, type)
- && (best_p == 0 || best_p->size > p->size
- || (best_p->size == p->size && best_p->align > p->align)))
+ && (best_p == 0
+ || (known_eq (best_p->size, p->size)
+ ? best_p->align > p->align
+ : known_ge (best_p->size, p->size))))
{
- if (p->align == align && p->size == size)
+ if (p->align == align && known_eq (p->size, size))
{
selected = p;
cut_slot_from_list (selected, &avail_temp_slots);
if (GET_MODE (best_p->slot) == BLKmode)
{
int alignment = best_p->align / BITS_PER_UNIT;
- HOST_WIDE_INT rounded_size = CEIL_ROUND (size, alignment);
+ poly_int64 rounded_size = aligned_upper_bound (size, alignment);
- if (best_p->size - rounded_size >= alignment)
+ if (known_ge (best_p->size - rounded_size, alignment))
{
p = ggc_alloc<temp_slot> ();
p->in_use = 0;
/* If we still didn't find one, make a new temporary. */
if (selected == 0)
{
- HOST_WIDE_INT frame_offset_old = frame_offset;
+ poly_int64 frame_offset_old = frame_offset;
p = ggc_alloc<temp_slot> ();
gcc_assert (mode != BLKmode || align == BIGGEST_ALIGNMENT);
p->slot = assign_stack_local_1 (mode,
(mode == BLKmode
- ? CEIL_ROUND (size,
- (int) align
- / BITS_PER_UNIT)
+ ? aligned_upper_bound (size,
+ (int) align
+ / BITS_PER_UNIT)
: size),
align, 0);
reuse. First two arguments are same as in preceding function. */
rtx
-assign_stack_temp (machine_mode mode, HOST_WIDE_INT size)
+assign_stack_temp (machine_mode mode, poly_int64 size)
{
return assign_stack_temp_for_type (mode, size, NULL_TREE);
}
if (GET_MODE (q->slot) != BLKmode)
continue;
- if (p->base_offset + p->full_size == q->base_offset)
+ if (known_eq (p->base_offset + p->full_size, q->base_offset))
{
/* Q comes after P; combine Q into P. */
p->size += q->size;
p->full_size += q->full_size;
delete_q = 1;
}
- else if (q->base_offset + q->full_size == p->base_offset)
+ else if (known_eq (q->base_offset + q->full_size, p->base_offset))
{
/* P comes after Q; combine P into Q. */
q->size += p->size;
{
struct frame_space *next;
- HOST_WIDE_INT start;
- HOST_WIDE_INT length;
+ poly_int64 start;
+ poly_int64 length;
};
struct GTY(()) stack_usage
/* Return size needed for stack frame based on slots so far allocated.
This size counts from zero. It is not rounded to STACK_BOUNDARY;
the caller may have to do that. */
-extern HOST_WIDE_INT get_frame_size (void);
+extern poly_int64 get_frame_size (void);
/* Issue an error message and return TRUE if frame OFFSET overflows in
the signed target pointer arithmetics for function FUNC. Otherwise
return FALSE. */
-extern bool frame_offset_overflow (HOST_WIDE_INT, tree);
+extern bool frame_offset_overflow (poly_int64, tree);
extern unsigned int spill_slot_alignment (machine_mode);
-extern rtx assign_stack_local_1 (machine_mode, HOST_WIDE_INT, int, int);
-extern rtx assign_stack_local (machine_mode, HOST_WIDE_INT, int);
-extern rtx assign_stack_temp_for_type (machine_mode, HOST_WIDE_INT, tree);
-extern rtx assign_stack_temp (machine_mode, HOST_WIDE_INT);
+extern rtx assign_stack_local_1 (machine_mode, poly_int64, int, int);
+extern rtx assign_stack_local (machine_mode, poly_int64, int);
+extern rtx assign_stack_temp_for_type (machine_mode, poly_int64, tree);
+extern rtx assign_stack_temp (machine_mode, poly_int64);
extern rtx assign_temp (tree, int, int);
extern void update_temp_slot_address (rtx, rtx);
extern void preserve_temp_slots (rtx);
function's frame size is larger than we expect. */
if (flag_stack_check == GENERIC_STACK_CHECK)
{
- HOST_WIDE_INT size = get_frame_size () + STACK_CHECK_FIXED_FRAME_SIZE;
+ poly_int64 size = get_frame_size () + STACK_CHECK_FIXED_FRAME_SIZE;
for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (df_regs_ever_live_p (i) && !fixed_regs[i] && call_used_regs[i])
size += UNITS_PER_WORD;
- if (size > STACK_CHECK_MAX_FRAME_SIZE)
+ if (constant_lower_bound (size) > STACK_CHECK_MAX_FRAME_SIZE)
warning (0, "frame size too large for reliable stack checking");
}
bitmap_initialize (&lra_optional_reload_pseudos, ®_obstack);
bitmap_initialize (&lra_subreg_reload_pseudos, ®_obstack);
live_p = false;
- if (get_frame_size () != 0 && crtl->stack_alignment_needed)
+ if (maybe_ne (get_frame_size (), 0) && crtl->stack_alignment_needed)
/* If we have a stack frame, we must align it now. The stack size
may be a part of the offset computation for register
elimination. */
for (;;)
{
int something_changed;
- HOST_WIDE_INT starting_frame_size;
+ poly_int64 starting_frame_size;
starting_frame_size = get_frame_size ();
something_was_spilled = false;
if (caller_save_needed)
setup_save_areas ();
- if (starting_frame_size && crtl->stack_alignment_needed)
+ if (maybe_ne (starting_frame_size, 0) && crtl->stack_alignment_needed)
{
/* If we have a stack frame, we must align it now. The
stack size may be a part of the offset computation for
assign_stack_local (BLKmode, 0, crtl->stack_alignment_needed);
}
/* If we allocated another stack slot, redo elimination bookkeeping. */
- if (something_was_spilled || starting_frame_size != get_frame_size ())
+ if (something_was_spilled
+ || maybe_ne (starting_frame_size, get_frame_size ()))
{
if (update_eliminables_and_spill ())
finish_spills (0);
/* If we allocated any new memory locations, make another pass
since it might have changed elimination offsets. */
- if (something_was_spilled || starting_frame_size != get_frame_size ())
+ if (something_was_spilled
+ || maybe_ne (starting_frame_size, get_frame_size ()))
something_changed = 1;
/* Even if the frame size remained the same, we might still have
if (insns_need_reload != 0 || something_needs_elimination
|| something_needs_operands_changed)
{
- HOST_WIDE_INT old_frame_size = get_frame_size ();
+ poly_int64 old_frame_size = get_frame_size ();
reload_as_needed (global);
- gcc_assert (old_frame_size == get_frame_size ());
+ gcc_assert (known_eq (old_frame_size, get_frame_size ()));
gcc_assert (verify_initial_elim_offsets ());
}
FROM and TO for the current function, as it was at the start
of the routine. */
-static HOST_WIDE_INT
+static poly_int64
get_initial_register_offset (int from, int to)
{
static const struct elim_table_t
const int from;
const int to;
} table[] = ELIMINABLE_REGS;
- HOST_WIDE_INT offset1, offset2;
+ poly_int64 offset1, offset2;
unsigned int i, j;
if (to == from)