/* Emit RTL for the GCC expander.
- Copyright (C) 1987-2017 Free Software Foundation, Inc.
+ Copyright (C) 1987-2019 Free Software Foundation, Inc.
This file is part of GCC.
validate_subreg (machine_mode omode, machine_mode imode,
const_rtx reg, poly_uint64 offset)
{
- unsigned int isize = GET_MODE_SIZE (imode);
- unsigned int osize = GET_MODE_SIZE (omode);
+ poly_uint64 isize = GET_MODE_SIZE (imode);
+ poly_uint64 osize = GET_MODE_SIZE (omode);
+
+ /* The sizes must be ordered, so that we know whether the subreg
+ is partial, paradoxical or complete. */
+ if (!ordered_p (isize, osize))
+ return false;
/* All subregs must be aligned. */
if (!multiple_p (offset, osize))
if (maybe_ge (offset, isize))
return false;
- unsigned int regsize = REGMODE_NATURAL_SIZE (imode);
+ poly_uint64 regsize = REGMODE_NATURAL_SIZE (imode);
/* ??? This should not be here. Temporarily continue to allow word_mode
subregs of anything. The most common offender is (subreg:SI (reg:DF)).
;
/* ??? Similarly, e.g. with (subreg:DF (reg:TI)). Though store_bit_field
is the culprit here, and not the backends. */
- else if (osize >= regsize && isize >= osize)
+ else if (known_ge (osize, regsize) && known_ge (isize, osize))
;
/* Allow component subregs of complex and vector. Though given the below
extraction rules, it's not always clear what that means. */
(subreg:SI (reg:DF) 0) isn't. */
else if (FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode))
{
- if (! (isize == osize
+ if (! (known_eq (isize, osize)
/* LRA can use subreg to store a floating point value in
an integer mode. Although the floating point and the
integer modes need the same number of hard registers,
}
/* Paradoxical subregs must have offset zero. */
- if (osize > isize)
+ if (maybe_gt (osize, isize))
return known_eq (offset, 0U);
/* This is a normal subreg. Verify that the offset is representable. */
return subreg_offset_representable_p (regno, imode, offset, omode);
}
+ /* The outer size must be ordered wrt the register size, otherwise
+ we wouldn't know at compile time how many registers the outer
+ mode occupies. */
+ if (!ordered_p (osize, regsize))
+ return false;
+
/* For pseudo registers, we want most of the same checks. Namely:
Assume that the pseudo register will be allocated to hard registers
Given that we've already checked the mode and offset alignment,
we only have to check subblock subregs here. */
- if (osize < regsize
+ if (maybe_lt (osize, regsize)
&& ! (lra_in_progress && (FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode))))
{
- poly_uint64 block_size = MIN (isize, regsize);
+ /* It is invalid for the target to pick a register size for a mode
+ that isn't ordered wrt to the size of that mode. */
+ poly_uint64 block_size = ordered_min (isize, regsize);
unsigned int start_reg;
poly_uint64 offset_within_reg;
if (!can_div_trunc_p (offset, block_size, &start_reg, &offset_within_reg)
rtx
gen_lowpart_common (machine_mode mode, rtx x)
{
- int msize = GET_MODE_SIZE (mode);
- int xsize;
+ poly_uint64 msize = GET_MODE_SIZE (mode);
machine_mode innermode;
/* Unfortunately, this routine doesn't take a parameter for the mode of X,
so we have to make one up. Yuk. */
innermode = GET_MODE (x);
if (CONST_INT_P (x)
- && msize * BITS_PER_UNIT <= HOST_BITS_PER_WIDE_INT)
+ && known_le (msize * BITS_PER_UNIT,
+ (unsigned HOST_WIDE_INT) HOST_BITS_PER_WIDE_INT))
innermode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
else if (innermode == VOIDmode)
innermode = int_mode_for_size (HOST_BITS_PER_DOUBLE_INT, 0).require ();
- xsize = GET_MODE_SIZE (innermode);
-
gcc_assert (innermode != VOIDmode && innermode != BLKmode);
if (innermode == mode)
return x;
+ /* The size of the outer and inner modes must be ordered. */
+ poly_uint64 xsize = GET_MODE_SIZE (innermode);
+ if (!ordered_p (msize, xsize))
+ return 0;
+
if (SCALAR_FLOAT_MODE_P (mode))
{
/* Don't allow paradoxical FLOAT_MODE subregs. */
- if (msize > xsize)
+ if (maybe_gt (msize, xsize))
return 0;
}
else
{
/* MODE must occupy no more of the underlying registers than X. */
- unsigned int regsize = REGMODE_NATURAL_SIZE (innermode);
- unsigned int mregs = CEIL (msize, regsize);
- unsigned int xregs = CEIL (xsize, regsize);
- if (mregs > xregs)
+ poly_uint64 regsize = REGMODE_NATURAL_SIZE (innermode);
+ unsigned int mregs, xregs;
+ if (!can_div_away_from_zero_p (msize, regsize, &mregs)
+ || !can_div_away_from_zero_p (xsize, regsize, &xregs)
+ || mregs > xregs)
return 0;
}
return gen_rtx_fmt_e (GET_CODE (x), int_mode, XEXP (x, 0));
}
else if (GET_CODE (x) == SUBREG || REG_P (x)
- || GET_CODE (x) == CONCAT || const_vec_p (x)
+ || GET_CODE (x) == CONCAT || GET_CODE (x) == CONST_VECTOR
|| CONST_DOUBLE_AS_FLOAT_P (x) || CONST_SCALAR_INT_P (x)
|| CONST_POLY_INT_P (x))
return lowpart_subreg (mode, x, innermode);
rtx
gen_highpart (machine_mode mode, rtx x)
{
- unsigned int msize = GET_MODE_SIZE (mode);
+ poly_uint64 msize = GET_MODE_SIZE (mode);
rtx result;
/* This case loses if X is a subreg. To catch bugs early,
complain if an invalid MODE is used even in other cases. */
- gcc_assert (msize <= UNITS_PER_WORD
- || msize == (unsigned int) GET_MODE_UNIT_SIZE (GET_MODE (x)));
+ gcc_assert (known_le (msize, (unsigned int) UNITS_PER_WORD)
+ || known_eq (msize, GET_MODE_UNIT_SIZE (GET_MODE (x))));
result = simplify_gen_subreg (mode, x, GET_MODE (x),
subreg_highpart_offset (mode, GET_MODE (x)));
if (mode != BLKmode && mode != VOIDmode)
{
- /* If this is a register which can not be accessed by words, copy it
+ /* If this is a register which cannot be accessed by words, copy it
to a pseudo register. */
if (REG_P (op))
op = copy_to_reg (op);
widen_memory_access (rtx memref, machine_mode mode, poly_int64 offset)
{
rtx new_rtx = adjust_address_1 (memref, mode, offset, 1, 1, 0, 0);
- unsigned int size = GET_MODE_SIZE (mode);
+ poly_uint64 size = GET_MODE_SIZE (mode);
/* If there are no changes, just return the original memory reference. */
if (new_rtx == memref)
/* SCRATCH must be shared because they represent distinct values. */
return;
case CLOBBER:
+ case CLOBBER_HIGH:
/* Share clobbers of hard registers (like cc0), but do not share pseudo reg
clobbers or clobbers of hard registers that originated as pseudos.
This is needed to allow safe register renaming. */
/* SCRATCH must be shared because they represent distinct values. */
return;
case CLOBBER:
+ case CLOBBER_HIGH:
/* Share clobbers of hard registers (like cc0), but do not share pseudo reg
clobbers or clobbers of hard registers that originated as pseudos.
This is needed to allow safe register renaming. */
return insn;
}
+/* Return the next INSN, CALL_INSN, JUMP_INSN or DEBUG_INSN after INSN;
+ or 0, if there is none. This routine does not look inside
+ SEQUENCEs. */
+
+rtx_insn *
+next_real_insn (rtx_insn *insn)
+{
+ while (insn)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == 0 || INSN_P (insn))
+ break;
+ }
+
+ return insn;
+}
+
+/* Return the last INSN, CALL_INSN, JUMP_INSN or DEBUG_INSN before INSN;
+ or 0, if there is none. This routine does not look inside
+ SEQUENCEs. */
+
+rtx_insn *
+prev_real_insn (rtx_insn *insn)
+{
+ while (insn)
+ {
+ insn = PREV_INSN (insn);
+ if (insn == 0 || INSN_P (insn))
+ break;
+ }
+
+ return insn;
+}
+
/* Return the next INSN, CALL_INSN or JUMP_INSN after INSN;
or 0, if there is none. This routine does not look inside
SEQUENCEs. */
rtx_insn *
-next_real_insn (rtx uncast_insn)
+next_real_nondebug_insn (rtx uncast_insn)
{
rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
while (insn)
{
insn = NEXT_INSN (insn);
- if (insn == 0 || INSN_P (insn))
+ if (insn == 0 || NONDEBUG_INSN_P (insn))
break;
}
SEQUENCEs. */
rtx_insn *
-prev_real_insn (rtx_insn *insn)
+prev_real_nondebug_insn (rtx_insn *insn)
{
while (insn)
{
insn = PREV_INSN (insn);
- if (insn == 0 || INSN_P (insn))
+ if (insn == 0 || NONDEBUG_INSN_P (insn))
break;
}
for (insn = insn_last; insn ; insn = PREV_INSN (insn))
if (CALL_P (insn))
{
- rtx_insn *next;
- rtx *p;
-
gcc_assert (call_insn == NULL_RTX);
call_insn = insn;
/* Add the old CALL_INSN_FUNCTION_USAGE to whatever the
target may have explicitly specified. */
- p = &CALL_INSN_FUNCTION_USAGE (insn);
+ rtx *p = &CALL_INSN_FUNCTION_USAGE (insn);
while (*p)
p = &XEXP (*p, 1);
*p = CALL_INSN_FUNCTION_USAGE (trial);
/* If the old call was a sibling call, the new one must
be too. */
SIBLING_CALL_P (insn) = SIBLING_CALL_P (trial);
-
- /* If the new call is the last instruction in the sequence,
- it will effectively replace the old call in-situ. Otherwise
- we must move any following NOTE_INSN_CALL_ARG_LOCATION note
- so that it comes immediately after the new call. */
- if (NEXT_INSN (insn))
- for (next = NEXT_INSN (trial);
- next && NOTE_P (next);
- next = NEXT_INSN (next))
- if (NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
- {
- remove_insn (next);
- add_insn_after (next, insn, NULL);
- break;
- }
}
}
case REG_SETJMP:
case REG_TM:
case REG_CALL_NOCF_CHECK:
+ case REG_CALL_ARG_LOCATION:
for (insn = insn_last; insn != NULL_RTX; insn = PREV_INSN (insn))
{
if (CALL_P (insn))
they know how to update a SEQUENCE. */
void
-add_insn_after (rtx uncast_insn, rtx uncast_after, basic_block bb)
+add_insn_after (rtx_insn *insn, rtx_insn *after, basic_block bb)
{
- rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
- rtx_insn *after = as_a <rtx_insn *> (uncast_after);
add_insn_after_nobb (insn, after);
if (!BARRIER_P (after)
&& !BARRIER_P (insn)
they know how to update a SEQUENCE. */
void
-add_insn_before (rtx uncast_insn, rtx uncast_before, basic_block bb)
+add_insn_before (rtx_insn *insn, rtx_insn *before, basic_block bb)
{
- rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
- rtx_insn *before = as_a <rtx_insn *> (uncast_before);
add_insn_before_nobb (insn, before);
if (!bb
/* Replace insn with an deleted instruction note. */
void
-set_insn_deleted (rtx insn)
+set_insn_deleted (rtx_insn *insn)
{
if (INSN_P (insn))
- df_insn_delete (as_a <rtx_insn *> (insn));
+ df_insn_delete (insn);
PUT_CODE (insn, NOTE);
NOTE_KIND (insn) = NOTE_INSN_DELETED;
}
To really delete an insn and related DF information, use delete_insn. */
void
-remove_insn (rtx uncast_insn)
+remove_insn (rtx_insn *insn)
{
- rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
rtx_insn *next = NEXT_INSN (insn);
rtx_insn *prev = PREV_INSN (insn);
basic_block bb;
generated would almost certainly die right after it was created. */
static rtx_insn *
-emit_pattern_before_noloc (rtx x, rtx before, rtx last, basic_block bb,
+emit_pattern_before_noloc (rtx x, rtx_insn *before, rtx_insn *last,
+ basic_block bb,
rtx_insn *(*make_raw) (rtx))
{
rtx_insn *insn;
gcc_assert (before);
if (x == NULL_RTX)
- return safe_as_a <rtx_insn *> (last);
+ return last;
switch (GET_CODE (x))
{
break;
}
- return safe_as_a <rtx_insn *> (last);
+ return last;
}
/* Make X be output before the instruction BEFORE. */
emit_jump_insn_before_noloc (rtx x, rtx_insn *before)
{
return as_a <rtx_jump_insn *> (
- emit_pattern_before_noloc (x, before, NULL_RTX, NULL,
+ emit_pattern_before_noloc (x, before, NULL, NULL,
make_jump_insn_raw));
}
rtx_insn *
emit_call_insn_before_noloc (rtx x, rtx_insn *before)
{
- return emit_pattern_before_noloc (x, before, NULL_RTX, NULL,
+ return emit_pattern_before_noloc (x, before, NULL, NULL,
make_call_insn_raw);
}
and output it before the instruction BEFORE. */
rtx_insn *
-emit_debug_insn_before_noloc (rtx x, rtx before)
+emit_debug_insn_before_noloc (rtx x, rtx_insn *before)
{
- return emit_pattern_before_noloc (x, before, NULL_RTX, NULL,
+ return emit_pattern_before_noloc (x, before, NULL, NULL,
make_debug_insn_raw);
}
and output it before the insn BEFORE. */
rtx_barrier *
-emit_barrier_before (rtx before)
+emit_barrier_before (rtx_insn *before)
{
rtx_barrier *insn = as_a <rtx_barrier *> (rtx_alloc (BARRIER));
/* Emit the label LABEL before the insn BEFORE. */
rtx_code_label *
-emit_label_before (rtx label, rtx_insn *before)
+emit_label_before (rtx_code_label *label, rtx_insn *before)
{
gcc_checking_assert (INSN_UID (label) == 0);
INSN_UID (label) = cur_insn_uid++;
add_insn_before (label, before, NULL);
- return as_a <rtx_code_label *> (label);
+ return label;
}
\f
/* Helper for emit_insn_after, handles lists of instructions
efficiently. */
static rtx_insn *
-emit_insn_after_1 (rtx_insn *first, rtx uncast_after, basic_block bb)
+emit_insn_after_1 (rtx_insn *first, rtx_insn *after, basic_block bb)
{
- rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
rtx_insn *last;
rtx_insn *after_after;
if (!bb && !BARRIER_P (after))
}
static rtx_insn *
-emit_pattern_after_noloc (rtx x, rtx uncast_after, basic_block bb,
+emit_pattern_after_noloc (rtx x, rtx_insn *after, basic_block bb,
rtx_insn *(*make_raw)(rtx))
{
- rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
rtx_insn *last = after;
gcc_assert (after);
BB is NULL, an attempt is made to infer the BB from AFTER. */
rtx_insn *
-emit_insn_after_noloc (rtx x, rtx after, basic_block bb)
+emit_insn_after_noloc (rtx x, rtx_insn *after, basic_block bb)
{
return emit_pattern_after_noloc (x, after, bb, make_insn_raw);
}
and output it after the insn AFTER. */
rtx_jump_insn *
-emit_jump_insn_after_noloc (rtx x, rtx after)
+emit_jump_insn_after_noloc (rtx x, rtx_insn *after)
{
return as_a <rtx_jump_insn *> (
emit_pattern_after_noloc (x, after, NULL, make_jump_insn_raw));
and output it after the instruction AFTER. */
rtx_insn *
-emit_call_insn_after_noloc (rtx x, rtx after)
+emit_call_insn_after_noloc (rtx x, rtx_insn *after)
{
return emit_pattern_after_noloc (x, after, NULL, make_call_insn_raw);
}
and output it after the instruction AFTER. */
rtx_insn *
-emit_debug_insn_after_noloc (rtx x, rtx after)
+emit_debug_insn_after_noloc (rtx x, rtx_insn *after)
{
return emit_pattern_after_noloc (x, after, NULL, make_debug_insn_raw);
}
and output it after the insn AFTER. */
rtx_barrier *
-emit_barrier_after (rtx after)
+emit_barrier_after (rtx_insn *after)
{
rtx_barrier *insn = as_a <rtx_barrier *> (rtx_alloc (BARRIER));
/* Emit the label LABEL after the insn AFTER. */
rtx_insn *
-emit_label_after (rtx label, rtx_insn *after)
+emit_label_after (rtx_insn *label, rtx_insn *after)
{
gcc_checking_assert (INSN_UID (label) == 0);
INSN_UID (label) = cur_insn_uid++;
add_insn_after (label, after, NULL);
- return as_a <rtx_insn *> (label);
+ return label;
}
\f
/* Notes require a bit of special handling: Some notes need to have their
inside basic blocks. If the caller is emitting on the basic block
boundary, do not set BLOCK_FOR_INSN on the new note. */
case NOTE_INSN_VAR_LOCATION:
- case NOTE_INSN_CALL_ARG_LOCATION:
case NOTE_INSN_EH_REGION_BEG:
case NOTE_INSN_EH_REGION_END:
return on_bb_boundary_p;
MAKE_RAW indicates how to turn PATTERN into a real insn. */
static rtx_insn *
-emit_pattern_after_setloc (rtx pattern, rtx uncast_after, int loc,
+emit_pattern_after_setloc (rtx pattern, rtx_insn *after, location_t loc,
rtx_insn *(*make_raw) (rtx))
{
- rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
rtx_insn *last = emit_pattern_after_noloc (pattern, after, NULL, make_raw);
if (pattern == NULL_RTX || !loc)
any DEBUG_INSNs. */
static rtx_insn *
-emit_pattern_after (rtx pattern, rtx uncast_after, bool skip_debug_insns,
+emit_pattern_after (rtx pattern, rtx_insn *after, bool skip_debug_insns,
rtx_insn *(*make_raw) (rtx))
{
- rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
rtx_insn *prev = after;
if (skip_debug_insns)
/* Like emit_insn_after_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
{
return emit_pattern_after_setloc (pattern, after, loc, make_insn_raw);
}
/* Like emit_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
rtx_insn *
-emit_insn_after (rtx pattern, rtx after)
+emit_insn_after (rtx pattern, rtx_insn *after)
{
return emit_pattern_after (pattern, after, true, make_insn_raw);
}
/* Like emit_jump_insn_after_noloc, but set INSN_LOCATION according to LOC. */
rtx_jump_insn *
-emit_jump_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_jump_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
{
return as_a <rtx_jump_insn *> (
emit_pattern_after_setloc (pattern, after, loc, make_jump_insn_raw));
/* Like emit_jump_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
rtx_jump_insn *
-emit_jump_insn_after (rtx pattern, rtx after)
+emit_jump_insn_after (rtx pattern, rtx_insn *after)
{
return as_a <rtx_jump_insn *> (
emit_pattern_after (pattern, after, true, make_jump_insn_raw));
/* Like emit_call_insn_after_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_call_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_call_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
{
return emit_pattern_after_setloc (pattern, after, loc, make_call_insn_raw);
}
/* Like emit_call_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
rtx_insn *
-emit_call_insn_after (rtx pattern, rtx after)
+emit_call_insn_after (rtx pattern, rtx_insn *after)
{
return emit_pattern_after (pattern, after, true, make_call_insn_raw);
}
/* Like emit_debug_insn_after_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_debug_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_debug_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
{
return emit_pattern_after_setloc (pattern, after, loc, make_debug_insn_raw);
}
/* Like emit_debug_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
rtx_insn *
-emit_debug_insn_after (rtx pattern, rtx after)
+emit_debug_insn_after (rtx pattern, rtx_insn *after)
{
return emit_pattern_after (pattern, after, false, make_debug_insn_raw);
}
CALL_INSN, etc. */
static rtx_insn *
-emit_pattern_before_setloc (rtx pattern, rtx uncast_before, int loc, bool insnp,
- rtx_insn *(*make_raw) (rtx))
+emit_pattern_before_setloc (rtx pattern, rtx_insn *before, location_t loc,
+ bool insnp, rtx_insn *(*make_raw) (rtx))
{
- rtx_insn *before = as_a <rtx_insn *> (uncast_before);
rtx_insn *first = PREV_INSN (before);
rtx_insn *last = emit_pattern_before_noloc (pattern, before,
- insnp ? before : NULL_RTX,
+ insnp ? before : NULL,
NULL, make_raw);
if (pattern == NULL_RTX || !loc)
INSN as opposed to a JUMP_INSN, CALL_INSN, etc. */
static rtx_insn *
-emit_pattern_before (rtx pattern, rtx uncast_before, bool skip_debug_insns,
+emit_pattern_before (rtx pattern, rtx_insn *before, bool skip_debug_insns,
bool insnp, rtx_insn *(*make_raw) (rtx))
{
- rtx_insn *before = safe_as_a <rtx_insn *> (uncast_before);
rtx_insn *next = before;
if (skip_debug_insns)
insnp, make_raw);
else
return emit_pattern_before_noloc (pattern, before,
- insnp ? before : NULL_RTX,
+ insnp ? before : NULL,
NULL, make_raw);
}
/* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_insn_before_setloc (rtx pattern, rtx_insn *before, int loc)
+emit_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
{
return emit_pattern_before_setloc (pattern, before, loc, true,
make_insn_raw);
/* Like emit_insn_before_noloc, but set INSN_LOCATION according to BEFORE. */
rtx_insn *
-emit_insn_before (rtx pattern, rtx before)
+emit_insn_before (rtx pattern, rtx_insn *before)
{
return emit_pattern_before (pattern, before, true, true, make_insn_raw);
}
/* like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
rtx_jump_insn *
-emit_jump_insn_before_setloc (rtx pattern, rtx_insn *before, int loc)
+emit_jump_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
{
return as_a <rtx_jump_insn *> (
emit_pattern_before_setloc (pattern, before, loc, false,
/* Like emit_jump_insn_before_noloc, but set INSN_LOCATION according to BEFORE. */
rtx_jump_insn *
-emit_jump_insn_before (rtx pattern, rtx before)
+emit_jump_insn_before (rtx pattern, rtx_insn *before)
{
return as_a <rtx_jump_insn *> (
emit_pattern_before (pattern, before, true, false,
/* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_call_insn_before_setloc (rtx pattern, rtx_insn *before, int loc)
+emit_call_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
{
return emit_pattern_before_setloc (pattern, before, loc, false,
make_call_insn_raw);
/* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_debug_insn_before_setloc (rtx pattern, rtx before, int loc)
+emit_debug_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
{
return emit_pattern_before_setloc (pattern, before, loc, false,
make_debug_insn_raw);
case SIMPLE_RETURN:
return orig;
case CLOBBER:
+ case CLOBBER_HIGH:
/* Share clobbers of hard registers (like cc0), but do not share pseudo reg
clobbers or clobbers of hard registers that originated as pseudos.
This is needed to allow safe register renaming. */
/* A non-duplicated vector with two elements can always be seen as a
series with a nonzero step. Longer vectors must have a stepped
encoding. */
- if (CONST_VECTOR_NUNITS (x) != 2
+ if (maybe_ne (CONST_VECTOR_NUNITS (x), 2)
&& !CONST_VECTOR_STEPPED_P (x))
return false;
rtx
gen_rtx_CONST_VECTOR (machine_mode mode, rtvec v)
{
- gcc_assert (GET_MODE_NUNITS (mode) == GET_NUM_ELEM (v));
+ gcc_assert (known_eq (GET_MODE_NUNITS (mode), GET_NUM_ELEM (v)));
/* If the values are all the same, check to see if we can use one of the
standard constant vectors. */
attrs = ggc_cleared_alloc<mem_attrs> ();
attrs->align = BITS_PER_UNIT;
attrs->addrspace = ADDR_SPACE_GENERIC;
- if (mode != BLKmode)
+ if (mode != BLKmode && mode != VOIDmode)
{
attrs->size_known_p = true;
attrs->size = GET_MODE_SIZE (mode);
FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
const_tiny_rtx[3][(int) mode] = constm1_rtx;
+ /* For BImode, 1 and -1 are unsigned and signed interpretations
+ of the same value. */
+ const_tiny_rtx[0][(int) BImode] = const0_rtx;
+ const_tiny_rtx[1][(int) BImode] = const_true_rtx;
+ const_tiny_rtx[3][(int) BImode] = const_true_rtx;
+
for (mode = MIN_MODE_PARTIAL_INT;
mode <= MAX_MODE_PARTIAL_INT;
mode = (machine_mode)((int)(mode) + 1))
const_tiny_rtx[0][(int) mode] = gen_rtx_CONCAT (mode, inner, inner);
}
+ /* As for BImode, "all 1" and "all -1" are unsigned and signed
+ interpretations of the same value. */
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_BOOL)
+ {
+ const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
+ const_tiny_rtx[3][(int) mode] = gen_const_vector (mode, 3);
+ const_tiny_rtx[1][(int) mode] = const_tiny_rtx[3][(int) mode];
+ }
+
FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)
{
const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
if (GET_MODE_CLASS ((machine_mode) i) == MODE_CC)
const_tiny_rtx[0][i] = const0_rtx;
- const_tiny_rtx[0][(int) BImode] = const0_rtx;
- if (STORE_FLAG_VALUE == 1)
- const_tiny_rtx[1][(int) BImode] = const1_rtx;
-
- FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_POINTER_BOUNDS)
- {
- scalar_mode smode = smode_iter.require ();
- wide_int wi_zero = wi::zero (GET_MODE_PRECISION (smode));
- const_tiny_rtx[0][smode] = immed_wide_int_const (wi_zero, smode);
- }
-
pc_rtx = gen_rtx_fmt_ (PC, VOIDmode);
ret_rtx = gen_rtx_fmt_ (RETURN, VOIDmode);
simple_return_rtx = gen_rtx_fmt_ (SIMPLE_RETURN, VOIDmode);
gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (mode, regno)));
}
+static GTY((deletable)) rtx
+hard_reg_clobbers_high[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
+
+/* Return a CLOBBER_HIGH expression for register REGNO that clobbers MODE,
+ caching into HARD_REG_CLOBBERS_HIGH. */
+rtx
+gen_hard_reg_clobber_high (machine_mode mode, unsigned int regno)
+{
+ if (hard_reg_clobbers_high[mode][regno])
+ return hard_reg_clobbers_high[mode][regno];
+ else
+ return (hard_reg_clobbers_high[mode][regno]
+ = gen_rtx_CLOBBER_HIGH (VOIDmode, gen_rtx_REG (mode, regno)));
+}
+
location_t prologue_location;
location_t epilogue_location;