+2018-06-12 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * poly-int.h (can_div_trunc_p): Add new overload in which all values
+ are poly_ints.
+ * alias.c (get_addr): Extend CONST_INT handling to poly_int_rtx_p.
+ (memrefs_conflict_p): Likewise.
+ (init_alias_analysis): Likewise.
+ * cfgexpand.c (expand_debug_expr): Likewise.
+ * combine.c (combine_simplify_rtx, force_int_to_mode): Likewise.
+ * cse.c (fold_rtx): Likewise.
+ * explow.c (adjust_stack, anti_adjust_stack): Likewise.
+ * expr.c (emit_block_move_hints): Likewise.
+ (clear_storage_hints, push_block, emit_push_insn): Likewise.
+ (store_expr_with_bounds, reduce_to_bit_field_precision): Likewise.
+ (emit_group_load_1): Use rtx_to_poly_int64 for group offsets.
+ (emit_group_store): Likewise.
+ (find_args_size_adjust): Use strip_offset. Use rtx_to_poly_int64
+ to read the PRE/POST_MODIFY increment.
+ * calls.c (store_one_arg): Use strip_offset.
+ * rtlanal.c (rtx_addr_can_trap_p_1): Extend CONST_INT handling to
+ poly_int_rtx_p.
+ (set_noop_p): Use rtx_to_poly_int64 for the elements selected
+ by a VEC_SELECT.
+ * simplify-rtx.c (avoid_constant_pool_reference): Use strip_offset.
+ (simplify_binary_operation_1): Extend CONST_INT handling to
+ poly_int_rtx_p.
+ * var-tracking.c (compute_cfa_pointer): Take a poly_int64 rather
+ than a HOST_WIDE_INT.
+ (hard_frame_pointer_adjustment): Change from HOST_WIDE_INT to
+ poly_int64.
+ (adjust_mems, add_stores): Update accodingly.
+ (vt_canonicalize_addr): Track polynomial offsets.
+ (emit_note_insn_var_location): Likewise.
+ (vt_add_function_parameter): Likewise.
+ (vt_initialize): Likewise.
+
2018-06-12 Jeff Law <law@redhat.com>
* config.gcc (alpha*-*-freebsd*): Remove.
rtx op0 = get_addr (XEXP (x, 0));
if (op0 != XEXP (x, 0))
{
+ poly_int64 c;
if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
- return plus_constant (GET_MODE (x), op0, INTVAL (XEXP (x, 1)));
+ && poly_int_rtx_p (XEXP (x, 1), &c))
+ return plus_constant (GET_MODE (x), op0, c);
return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
op0, XEXP (x, 1));
}
return offset_overlap_p (c, xsize, ysize);
/* Can't properly adjust our sizes. */
- if (!CONST_INT_P (x1)
- || !can_div_trunc_p (xsize, INTVAL (x1), &xsize)
- || !can_div_trunc_p (ysize, INTVAL (x1), &ysize)
- || !can_div_trunc_p (c, INTVAL (x1), &c))
+ poly_int64 c1;
+ if (!poly_int_rtx_p (x1, &c1)
+ || !can_div_trunc_p (xsize, c1, &xsize)
+ || !can_div_trunc_p (ysize, c1, &ysize)
+ || !can_div_trunc_p (c, c1, &c))
return -1;
return memrefs_conflict_p (xsize, x0, ysize, y0, c);
}
&& DF_REG_DEF_COUNT (regno) != 1)
note = NULL_RTX;
+ poly_int64 offset;
if (note != NULL_RTX
&& GET_CODE (XEXP (note, 0)) != EXPR_LIST
&& ! rtx_varies_p (XEXP (note, 0), 1)
&& GET_CODE (src) == PLUS
&& REG_P (XEXP (src, 0))
&& (t = get_reg_known_value (REGNO (XEXP (src, 0))))
- && CONST_INT_P (XEXP (src, 1)))
+ && poly_int_rtx_p (XEXP (src, 1), &offset))
{
- t = plus_constant (GET_MODE (src), t,
- INTVAL (XEXP (src, 1)));
+ t = plus_constant (GET_MODE (src), t, offset);
set_reg_known_value (regno, t);
set_reg_known_equiv_p (regno, false);
}
rtx x = arg->value;
poly_int64 i = 0;
- if (XEXP (x, 0) == crtl->args.internal_arg_pointer
- || (GET_CODE (XEXP (x, 0)) == PLUS
- && XEXP (XEXP (x, 0), 0) ==
- crtl->args.internal_arg_pointer
- && CONST_INT_P (XEXP (XEXP (x, 0), 1))))
+ if (strip_offset (XEXP (x, 0), &i)
+ == crtl->args.internal_arg_pointer)
{
- if (XEXP (x, 0) != crtl->args.internal_arg_pointer)
- i = rtx_to_poly_int64 (XEXP (XEXP (x, 0), 1));
-
/* arg.locate doesn't contain the pretend_args_size offset,
it's part of argblock. Ensure we don't count it in I. */
if (STACK_GROWS_DOWNWARD)
goto component_ref;
op1 = expand_debug_expr (TREE_OPERAND (exp, 1));
- if (!op1 || !CONST_INT_P (op1))
+ poly_int64 offset;
+ if (!op1 || !poly_int_rtx_p (op1, &offset))
return NULL;
- op0 = plus_constant (inner_mode, op0, INTVAL (op1));
+ op0 = plus_constant (inner_mode, op0, offset);
}
as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
{
op1 = expand_debug_expr (TREE_OPERAND (TREE_OPERAND (exp, 0),
1));
- if (!op1 || !CONST_INT_P (op1))
+ poly_int64 offset;
+ if (!op1 || !poly_int_rtx_p (op1, &offset))
return NULL;
- return plus_constant (mode, op0, INTVAL (op1));
+ return plus_constant (mode, op0, offset);
}
}
GET_MODE_MASK (mode), 0));
/* We can truncate a constant value and return it. */
- if (CONST_INT_P (XEXP (x, 0)))
- return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
+ {
+ poly_int64 c;
+ if (poly_int_rtx_p (XEXP (x, 0), &c))
+ return gen_int_mode (c, mode);
+ }
/* Similarly to what we do in simplify-rtx.c, a truncate of a register
whose value is a comparison can be replaced with a subreg if
int next_select = just_select || code == XOR || code == NOT || code == NEG;
unsigned HOST_WIDE_INT fuller_mask;
rtx op0, op1, temp;
+ poly_int64 const_op0;
/* When we have an arithmetic operation, or a shift whose count we
do not know, we need to assume that all bits up to the highest-order
case MINUS:
/* If X is (minus C Y) where C's least set bit is larger than any bit
in the mask, then we may replace with (neg Y). */
- if (CONST_INT_P (XEXP (x, 0))
- && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
+ if (poly_int_rtx_p (XEXP (x, 0), &const_op0)
+ && (unsigned HOST_WIDE_INT) known_alignment (const_op0) > mask)
{
x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
return force_to_mode (x, mode, mask, next_select);
int i;
rtx new_rtx = 0;
int changed = 0;
+ poly_int64 xval;
/* Operands of X. */
/* Workaround -Wmaybe-uninitialized false positive during
case MINUS:
/* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
If so, produce (PLUS Z C2-C). */
- if (const_arg1 != 0 && CONST_INT_P (const_arg1))
+ if (const_arg1 != 0 && poly_int_rtx_p (const_arg1, &xval))
{
rtx y = lookup_as_function (XEXP (x, 0), PLUS);
- if (y && CONST_INT_P (XEXP (y, 1)))
- return fold_rtx (plus_constant (mode, copy_rtx (y),
- -INTVAL (const_arg1)),
+ if (y && poly_int_rtx_p (XEXP (y, 1)))
+ return fold_rtx (plus_constant (mode, copy_rtx (y), -xval),
NULL);
}
/* We expect all variable sized adjustments to be multiple of
PREFERRED_STACK_BOUNDARY. */
- if (CONST_INT_P (adjust))
- stack_pointer_delta -= INTVAL (adjust);
+ poly_int64 const_adjust;
+ if (poly_int_rtx_p (adjust, &const_adjust))
+ stack_pointer_delta -= const_adjust;
adjust_stack_1 (adjust, false);
}
/* We expect all variable sized adjustments to be multiple of
PREFERRED_STACK_BOUNDARY. */
- if (CONST_INT_P (adjust))
- stack_pointer_delta += INTVAL (adjust);
+ poly_int64 const_adjust;
+ if (poly_int_rtx_p (adjust, &const_adjust))
+ stack_pointer_delta += const_adjust;
adjust_stack_1 (adjust, true);
}
/* Set MEM_SIZE as appropriate for this block copy. The main place this
can be incorrect is coming from __builtin_memcpy. */
- if (CONST_INT_P (size))
+ poly_int64 const_size;
+ if (poly_int_rtx_p (size, &const_size))
{
x = shallow_copy_rtx (x);
y = shallow_copy_rtx (y);
- set_mem_size (x, INTVAL (size));
- set_mem_size (y, INTVAL (size));
+ set_mem_size (x, const_size);
+ set_mem_size (y, const_size);
}
if (CONST_INT_P (size) && can_move_by_pieces (INTVAL (size), align))
for (i = start; i < XVECLEN (dst, 0); i++)
{
machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0));
- poly_int64 bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1));
+ poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (dst, 0, i), 1));
poly_int64 bytelen = GET_MODE_SIZE (mode);
poly_int64 shift = 0;
{
inner = GET_MODE (tmps[start]);
bytepos = subreg_lowpart_offset (inner, outer);
- if (known_eq (INTVAL (XEXP (XVECEXP (src, 0, start), 1)), bytepos))
+ if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, start), 1)),
+ bytepos))
{
temp = simplify_gen_subreg (outer, tmps[start],
inner, 0);
{
inner = GET_MODE (tmps[finish - 1]);
bytepos = subreg_lowpart_offset (inner, outer);
- if (known_eq (INTVAL (XEXP (XVECEXP (src, 0, finish - 1), 1)),
+ if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0,
+ finish - 1), 1)),
bytepos))
{
temp = simplify_gen_subreg (outer, tmps[finish - 1],
/* Process the pieces. */
for (i = start; i < finish; i++)
{
- poly_int64 bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1));
+ poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, i), 1));
machine_mode mode = GET_MODE (tmps[i]);
poly_int64 bytelen = GET_MODE_SIZE (mode);
poly_uint64 adj_bytelen;
/* If OBJECT is not BLKmode and SIZE is the same size as its mode,
just move a zero. Otherwise, do this a piece at a time. */
+ poly_int64 size_val;
if (mode != BLKmode
- && CONST_INT_P (size)
- && known_eq (INTVAL (size), GET_MODE_SIZE (mode)))
+ && poly_int_rtx_p (size, &size_val)
+ && known_eq (size_val, GET_MODE_SIZE (mode)))
{
rtx zero = CONST0_RTX (mode);
if (zero != NULL)
}
else
{
- if (CONST_INT_P (size))
+ poly_int64 csize;
+ if (poly_int_rtx_p (size, &csize))
temp = plus_constant (Pmode, virtual_outgoing_args_rtx,
- -INTVAL (size) - (below ? 0 : extra));
+ -csize - (below ? 0 : extra));
else if (maybe_ne (extra, 0) && !below)
temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
negate_rtx (Pmode, plus_constant (Pmode, size,
/* Look for a trivial adjustment, otherwise assume nothing. */
/* Note that the SPU restore_stack_block pattern refers to
the stack pointer in V4SImode. Consider that non-trivial. */
+ poly_int64 offset;
if (SCALAR_INT_MODE_P (GET_MODE (dest))
- && GET_CODE (SET_SRC (set)) == PLUS
- && XEXP (SET_SRC (set), 0) == stack_pointer_rtx
- && CONST_INT_P (XEXP (SET_SRC (set), 1)))
- return INTVAL (XEXP (SET_SRC (set), 1));
+ && strip_offset (SET_SRC (set), &offset) == stack_pointer_rtx)
+ return offset;
/* ??? Reload can generate no-op moves, which will be cleaned
up later. Recognize it and continue searching. */
else if (rtx_equal_p (dest, SET_SRC (set)))
addr = XEXP (addr, 1);
gcc_assert (GET_CODE (addr) == PLUS);
gcc_assert (XEXP (addr, 0) == stack_pointer_rtx);
- gcc_assert (CONST_INT_P (XEXP (addr, 1)));
- return INTVAL (XEXP (addr, 1));
+ return rtx_to_poly_int64 (XEXP (addr, 1));
default:
gcc_unreachable ();
}
/* Get the address of the stack space.
In this case, we do not deal with EXTRA separately.
A single stack adjust will do. */
+ poly_int64 offset;
if (! args_addr)
{
temp = push_block (size, extra, where_pad == PAD_DOWNWARD);
extra = 0;
}
- else if (CONST_INT_P (args_so_far))
+ else if (poly_int_rtx_p (args_so_far, &offset))
temp = memory_address (BLKmode,
plus_constant (Pmode, args_addr,
- skip + INTVAL (args_so_far)));
+ skip + offset));
else
temp = memory_address (BLKmode,
plus_constant (Pmode,
/* Figure out how much is left in TARGET that we have to clear.
Do all calculations in pointer_mode. */
- if (CONST_INT_P (copy_size_rtx))
+ poly_int64 const_copy_size;
+ if (poly_int_rtx_p (copy_size_rtx, &const_copy_size))
{
- size = plus_constant (address_mode, size,
- -INTVAL (copy_size_rtx));
- target = adjust_address (target, BLKmode,
- INTVAL (copy_size_rtx));
+ size = plus_constant (address_mode, size, -const_copy_size);
+ target = adjust_address (target, BLKmode, const_copy_size);
}
else
{
if (target && GET_MODE (target) != GET_MODE (exp))
target = 0;
/* For constant values, reduce using build_int_cst_type. */
- if (CONST_INT_P (exp))
+ poly_int64 const_exp;
+ if (poly_int_rtx_p (exp, &const_exp))
{
- HOST_WIDE_INT value = INTVAL (exp);
- tree t = build_int_cst_type (type, value);
+ tree t = build_int_cst_type (type, const_exp);
return expand_expr (t, target, VOIDmode, EXPAND_NORMAL);
}
else if (TYPE_UNSIGNED (type))
return true;
}
+/* Return true if we can compute A / B at compile time, rounding towards zero.
+ Store the result in QUOTIENT if so.
+
+ This handles cases in which either B is constant or the result is
+ constant. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cq>
+inline bool
+can_div_trunc_p (const poly_int_pod<N, Ca> &a,
+ const poly_int_pod<N, Cb> &b,
+ poly_int_pod<N, Cq> *quotient)
+{
+ if (b.is_constant ())
+ return can_div_trunc_p (a, b.coeffs[0], quotient);
+ if (!can_div_trunc_p (a, b, "ient->coeffs[0]))
+ return false;
+ for (unsigned int i = 1; i < N; ++i)
+ quotient->coeffs[i] = 0;
+ return true;
+}
+
/* Return true if there is some constant Q and polynomial r such that:
(1) a = b * Q + r
{
enum rtx_code code = GET_CODE (x);
gcc_checking_assert (mode == BLKmode || known_size_p (size));
+ poly_int64 const_x1;
/* The offset must be a multiple of the mode size if we are considering
unaligned memory references on strict alignment machines. */
return 0;
/* - or it is an address that can't trap plus a constant integer. */
- if (CONST_INT_P (XEXP (x, 1))
- && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
+ if (poly_int_rtx_p (XEXP (x, 1), &const_x1)
+ && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + const_x1,
size, mode, unaligned_mems))
return 0;
int i;
rtx par = XEXP (src, 1);
rtx src0 = XEXP (src, 0);
- int c0 = INTVAL (XVECEXP (par, 0, 0));
- HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
+ poly_int64 c0 = rtx_to_poly_int64 (XVECEXP (par, 0, 0));
+ poly_int64 offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
for (i = 1; i < XVECLEN (par, 0); i++)
- if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
+ if (maybe_ne (rtx_to_poly_int64 (XVECEXP (par, 0, i)), c0 + i))
return 0;
return
simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
{
rtx c, tmp, addr;
machine_mode cmode;
- HOST_WIDE_INT offset = 0;
+ poly_int64 offset = 0;
switch (GET_CODE (x))
{
addr = targetm.delegitimize_address (addr);
/* Split the address into a base and integer offset. */
- if (GET_CODE (addr) == CONST
- && GET_CODE (XEXP (addr, 0)) == PLUS
- && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
- {
- offset = INTVAL (XEXP (XEXP (addr, 0), 1));
- addr = XEXP (XEXP (addr, 0), 0);
- }
+ addr = strip_offset (addr, &offset);
if (GET_CODE (addr) == LO_SUM)
addr = XEXP (addr, 1);
/* If we're accessing the constant in a different mode than it was
originally stored, attempt to fix that up via subreg simplifications.
If that fails we have no choice but to return the original memory. */
- if (offset == 0 && cmode == GET_MODE (x))
+ if (known_eq (offset, 0) && cmode == GET_MODE (x))
return c;
else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
{
if ((GET_CODE (op0) == CONST
|| GET_CODE (op0) == SYMBOL_REF
|| GET_CODE (op0) == LABEL_REF)
- && CONST_INT_P (op1))
- return plus_constant (mode, op0, INTVAL (op1));
+ && poly_int_rtx_p (op1, &offset))
+ return plus_constant (mode, op0, offset);
else if ((GET_CODE (op1) == CONST
|| GET_CODE (op1) == SYMBOL_REF
|| GET_CODE (op1) == LABEL_REF)
- && CONST_INT_P (op0))
- return plus_constant (mode, op1, INTVAL (op0));
+ && poly_int_rtx_p (op0, &offset))
+ return plus_constant (mode, op1, offset);
/* See if this is something like X * C - X or vice versa or
if the multiplication is written as a shift. If so, we can
or hard_frame_pointer_rtx. */
static inline rtx
-compute_cfa_pointer (HOST_WIDE_INT adjustment)
+compute_cfa_pointer (poly_int64 adjustment)
{
return plus_constant (Pmode, cfa_base_rtx, adjustment + cfa_base_offset);
}
/* Adjustment for hard_frame_pointer_rtx to cfa base reg,
or -1 if the replacement shouldn't be done. */
-static HOST_WIDE_INT hard_frame_pointer_adjustment = -1;
+static poly_int64 hard_frame_pointer_adjustment = -1;
/* Data for adjust_mems callback. */
return compute_cfa_pointer (amd->stack_adjust);
else if (loc == hard_frame_pointer_rtx
&& frame_pointer_needed
- && hard_frame_pointer_adjustment != -1
+ && maybe_ne (hard_frame_pointer_adjustment, -1)
&& cfa_base_rtx)
return compute_cfa_pointer (hard_frame_pointer_adjustment);
gcc_checking_assert (loc != virtual_incoming_args_rtx);
static rtx
vt_canonicalize_addr (dataflow_set *set, rtx oloc)
{
- HOST_WIDE_INT ofst = 0;
+ poly_int64 ofst = 0, term;
machine_mode mode = GET_MODE (oloc);
rtx loc = oloc;
rtx x;
while (retry)
{
while (GET_CODE (loc) == PLUS
- && GET_CODE (XEXP (loc, 1)) == CONST_INT)
+ && poly_int_rtx_p (XEXP (loc, 1), &term))
{
- ofst += INTVAL (XEXP (loc, 1));
+ ofst += term;
loc = XEXP (loc, 0);
}
loc = get_addr_from_global_cache (loc);
/* Consolidate plus_constants. */
- while (ofst && GET_CODE (loc) == PLUS
- && GET_CODE (XEXP (loc, 1)) == CONST_INT)
+ while (maybe_ne (ofst, 0)
+ && GET_CODE (loc) == PLUS
+ && poly_int_rtx_p (XEXP (loc, 1), &term))
{
- ofst += INTVAL (XEXP (loc, 1));
+ ofst += term;
loc = XEXP (loc, 0);
}
}
/* Add OFST back in. */
- if (ofst)
+ if (maybe_ne (ofst, 0))
{
/* Don't build new RTL if we can help it. */
- if (GET_CODE (oloc) == PLUS
- && XEXP (oloc, 0) == loc
- && INTVAL (XEXP (oloc, 1)) == ofst)
+ if (strip_offset (oloc, &term) == loc && known_eq (term, ofst))
return oloc;
loc = plus_constant (mode, loc, ofst);
}
if (loc == stack_pointer_rtx
- && hard_frame_pointer_adjustment != -1
+ && maybe_ne (hard_frame_pointer_adjustment, -1)
&& preserve)
cselib_set_value_sp_based (v);
&& GET_CODE (loc[n_var_parts]) == GET_CODE (loc2))
{
rtx new_loc = NULL;
+ poly_int64 offset2;
if (REG_P (loc[n_var_parts])
&& hard_regno_nregs (REGNO (loc[n_var_parts]), mode) * 2
else if (MEM_P (loc[n_var_parts])
&& GET_CODE (XEXP (loc2, 0)) == PLUS
&& REG_P (XEXP (XEXP (loc2, 0), 0))
- && CONST_INT_P (XEXP (XEXP (loc2, 0), 1)))
+ && poly_int_rtx_p (XEXP (XEXP (loc2, 0), 1), &offset2))
{
- if ((REG_P (XEXP (loc[n_var_parts], 0))
- && rtx_equal_p (XEXP (loc[n_var_parts], 0),
- XEXP (XEXP (loc2, 0), 0))
- && INTVAL (XEXP (XEXP (loc2, 0), 1)) == size)
- || (GET_CODE (XEXP (loc[n_var_parts], 0)) == PLUS
- && CONST_INT_P (XEXP (XEXP (loc[n_var_parts], 0), 1))
- && rtx_equal_p (XEXP (XEXP (loc[n_var_parts], 0), 0),
- XEXP (XEXP (loc2, 0), 0))
- && INTVAL (XEXP (XEXP (loc[n_var_parts], 0), 1)) + size
- == INTVAL (XEXP (XEXP (loc2, 0), 1))))
+ poly_int64 end1 = size;
+ rtx base1 = strip_offset_and_add (XEXP (loc[n_var_parts], 0),
+ &end1);
+ if (rtx_equal_p (base1, XEXP (XEXP (loc2, 0), 0))
+ && known_eq (end1, offset2))
new_loc = adjust_address_nv (loc[n_var_parts],
wider_mode, 0);
}
rewrite the incoming location of parameters passed on the stack
into MEMs based on the argument pointer, so that incoming doesn't
depend on a pseudo. */
+ poly_int64 incoming_offset = 0;
if (MEM_P (incoming)
- && (XEXP (incoming, 0) == crtl->args.internal_arg_pointer
- || (GET_CODE (XEXP (incoming, 0)) == PLUS
- && XEXP (XEXP (incoming, 0), 0)
- == crtl->args.internal_arg_pointer
- && CONST_INT_P (XEXP (XEXP (incoming, 0), 1)))))
+ && (strip_offset (XEXP (incoming, 0), &incoming_offset)
+ == crtl->args.internal_arg_pointer))
{
HOST_WIDE_INT off = -FIRST_PARM_OFFSET (current_function_decl);
- if (GET_CODE (XEXP (incoming, 0)) == PLUS)
- off += INTVAL (XEXP (XEXP (incoming, 0), 1));
incoming
= replace_equiv_address_nv (incoming,
plus_constant (Pmode,
- arg_pointer_rtx, off));
+ arg_pointer_rtx,
+ off + incoming_offset));
}
#ifdef HAVE_window_save
vt_initialize (void)
{
basic_block bb;
- HOST_WIDE_INT fp_cfa_offset = -1;
+ poly_int64 fp_cfa_offset = -1;
alloc_aux_for_blocks (sizeof (variable_tracking_info));
{
if (GET_CODE (elim) == PLUS)
{
- fp_cfa_offset -= INTVAL (XEXP (elim, 1));
+ fp_cfa_offset -= rtx_to_poly_int64 (XEXP (elim, 1));
elim = XEXP (elim, 0);
}
if (elim != hard_frame_pointer_rtx)
VTI (bb)->out.stack_adjust += post;
}
- if (fp_cfa_offset != -1
- && hard_frame_pointer_adjustment == -1
+ if (maybe_ne (fp_cfa_offset, -1)
+ && known_eq (hard_frame_pointer_adjustment, -1)
&& fp_setter_insn (insn))
{
vt_init_cfa_base ();