/* Emit RTL for the GCC expander.
- Copyright (C) 1987-2015 Free Software Foundation, Inc.
+ Copyright (C) 1987-2019 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "memmodel.h"
#include "backend.h"
-#include "tree.h"
+#include "target.h"
#include "rtl.h"
+#include "tree.h"
#include "df.h"
+#include "tm_p.h"
+#include "stringpool.h"
+#include "insn-config.h"
+#include "regs.h"
+#include "emit-rtl.h"
+#include "recog.h"
#include "diagnostic-core.h"
#include "alias.h"
#include "fold-const.h"
#include "varasm.h"
#include "cfgrtl.h"
#include "tree-eh.h"
-#include "tm_p.h"
-#include "flags.h"
-#include "stringpool.h"
-#include "insn-config.h"
-#include "expmed.h"
-#include "dojump.h"
#include "explow.h"
-#include "calls.h"
-#include "emit-rtl.h"
-#include "stmt.h"
#include "expr.h"
-#include "regs.h"
-#include "recog.h"
-#include "debug.h"
-#include "langhooks.h"
#include "params.h"
-#include "target.h"
#include "builtins.h"
#include "rtl-iter.h"
#include "stor-layout.h"
+#include "opts.h"
+#include "predict.h"
+#include "rtx-vector-builder.h"
struct target_rtl default_target_rtl;
#if SWITCHABLE_TARGET
/* Commonly used modes. */
-machine_mode byte_mode; /* Mode whose width is BITS_PER_UNIT. */
-machine_mode word_mode; /* Mode whose width is BITS_PER_WORD. */
-machine_mode double_mode; /* Mode whose width is DOUBLE_TYPE_SIZE. */
-machine_mode ptr_mode; /* Mode whose width is POINTER_SIZE. */
+scalar_int_mode byte_mode; /* Mode whose width is BITS_PER_UNIT. */
+scalar_int_mode word_mode; /* Mode whose width is BITS_PER_WORD. */
+scalar_int_mode ptr_mode; /* Mode whose width is POINTER_SIZE. */
/* Datastructures maintained for currently processed function in RTL form. */
static GTY ((cache)) hash_table<const_wide_int_hasher> *const_wide_int_htab;
+struct const_poly_int_hasher : ggc_cache_ptr_hash<rtx_def>
+{
+ typedef std::pair<machine_mode, poly_wide_int_ref> compare_type;
+
+ static hashval_t hash (rtx x);
+ static bool equal (rtx x, const compare_type &y);
+};
+
+static GTY ((cache)) hash_table<const_poly_int_hasher> *const_poly_int_htab;
+
/* A hash table storing register attribute structures. */
struct reg_attr_hasher : ggc_cache_ptr_hash<reg_attrs>
{
#endif
static rtx lookup_const_double (rtx);
static rtx lookup_const_fixed (rtx);
-static reg_attrs *get_reg_attrs (tree, int);
static rtx gen_const_vector (machine_mode, int);
static void copy_rtx_if_shared_1 (rtx *orig);
-/* Probability of the conditional branch currently proceeded by try_split.
- Set to -1 otherwise. */
-int split_branch_probability = -1;
+/* Probability of the conditional branch currently proceeded by try_split. */
+profile_probability split_branch_probability;
\f
/* Returns a hash code for X (which is a really a CONST_INT). */
}
#endif
+/* Returns a hash code for CONST_POLY_INT X. */
+
+hashval_t
+const_poly_int_hasher::hash (rtx x)
+{
+ inchash::hash h;
+ h.add_int (GET_MODE (x));
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ h.add_wide_int (CONST_POLY_INT_COEFFS (x)[i]);
+ return h.end ();
+}
+
+/* Returns nonzero if CONST_POLY_INT X is an rtx representation of Y. */
+
+bool
+const_poly_int_hasher::equal (rtx x, const compare_type &y)
+{
+ if (GET_MODE (x) != y.first)
+ return false;
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ if (CONST_POLY_INT_COEFFS (x)[i] != y.second.coeffs[i])
+ return false;
+ return true;
+}
+
/* Returns a hash code for X (which is really a CONST_DOUBLE). */
hashval_t
const_double_hasher::hash (rtx x)
return false;
return (p->alias == q->alias
&& p->offset_known_p == q->offset_known_p
- && (!p->offset_known_p || p->offset == q->offset)
+ && (!p->offset_known_p || known_eq (p->offset, q->offset))
&& p->size_known_p == q->size_known_p
- && (!p->size_known_p || p->size == q->size)
+ && (!p->size_known_p || known_eq (p->size, q->size))
&& p->align == q->align
&& p->addrspace == q->addrspace
&& (p->expr == q->expr
{
const reg_attrs *const p = x;
- return ((p->offset * 1000) ^ (intptr_t) p->decl);
+ inchash::hash h;
+ h.add_ptr (p->decl);
+ h.add_poly_hwi (p->offset);
+ return h.end ();
}
/* Returns nonzero if the value represented by X is the same as that given by
const reg_attrs *const p = x;
const reg_attrs *const q = y;
- return (p->decl == q->decl && p->offset == q->offset);
+ return (p->decl == q->decl && known_eq (p->offset, q->offset));
}
/* Allocate a new reg_attrs structure and insert it into the hash table if
one identical to it is not already in the table. We are doing this for
MEM of mode MODE. */
static reg_attrs *
-get_reg_attrs (tree decl, int offset)
+get_reg_attrs (tree decl, poly_int64 offset)
{
reg_attrs attrs;
/* If everything is the default, we can just return zero. */
- if (decl == 0 && offset == 0)
+ if (decl == 0 && known_eq (offset, 0))
return 0;
attrs.decl = decl;
set_mode_and_regno (rtx x, machine_mode mode, unsigned int regno)
{
unsigned int nregs = (HARD_REGISTER_NUM_P (regno)
- ? hard_regno_nregs[regno][mode]
+ ? hard_regno_nregs (regno, mode)
: 1);
PUT_MODE_RAW (x, mode);
set_regno_raw (x, regno, nregs);
rtx
gen_raw_REG (machine_mode mode, unsigned int regno)
{
- rtx x = rtx_alloc_stat (REG MEM_STAT_INFO);
+ rtx x = rtx_alloc (REG MEM_STAT_INFO);
set_mode_and_regno (x, mode, regno);
REG_ATTRS (x) = NULL;
ORIGINAL_REGNO (x) = regno;
}
rtx
-gen_int_mode (HOST_WIDE_INT c, machine_mode mode)
+gen_int_mode (poly_int64 c, machine_mode mode)
{
- return GEN_INT (trunc_int_for_mode (c, mode));
+ c = trunc_int_for_mode (c, mode);
+ if (c.is_constant ())
+ return GEN_INT (c.coeffs[0]);
+ unsigned int prec = GET_MODE_PRECISION (as_a <scalar_mode> (mode));
+ return immed_wide_int_const (poly_wide_int::from (c, prec, SIGNED), mode);
}
/* CONST_DOUBLEs might be created from pairs of integers, or from
a CONST_DOUBLE (if !TARGET_SUPPORTS_WIDE_INT) or a CONST_WIDE_INT
(if TARGET_SUPPORTS_WIDE_INT). */
-rtx
-immed_wide_int_const (const wide_int_ref &v, machine_mode mode)
+static rtx
+immed_wide_int_const_1 (const wide_int_ref &v, machine_mode mode)
{
unsigned int len = v.get_len ();
- unsigned int prec = GET_MODE_PRECISION (mode);
+ /* Not scalar_int_mode because we also allow pointer bound modes. */
+ unsigned int prec = GET_MODE_PRECISION (as_a <scalar_mode> (mode));
/* Allow truncation but not extension since we do not know if the
number is signed or unsigned. */
implied upper bits are copies of the high bit of i1. The value
itself is neither signed nor unsigned. Do not use this routine for
non-integer modes; convert to REAL_VALUE_TYPE and use
- CONST_DOUBLE_FROM_REAL_VALUE. */
+ const_double_from_real_value. */
rtx
immed_double_const (HOST_WIDE_INT i0, HOST_WIDE_INT i1, machine_mode mode)
(i.e., i1 consists only from copies of the sign bit, and sign
of i0 and i1 are the same), then we return a CONST_INT for i0.
3) Otherwise, we create a CONST_DOUBLE for i0 and i1. */
- if (mode != VOIDmode)
- {
- gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
- || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT
- /* We can get a 0 for an error mark. */
- || GET_MODE_CLASS (mode) == MODE_VECTOR_INT
- || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
- || GET_MODE_CLASS (mode) == MODE_POINTER_BOUNDS);
-
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
- return gen_int_mode (i0, mode);
- }
+ scalar_mode smode;
+ if (is_a <scalar_mode> (mode, &smode)
+ && GET_MODE_BITSIZE (smode) <= HOST_BITS_PER_WIDE_INT)
+ return gen_int_mode (i0, mode);
/* If this integer fits in one word, return a CONST_INT. */
if ((i1 == 0 && i0 >= 0) || (i1 == ~0 && i0 < 0))
}
#endif
+/* Return an rtx representation of C in mode MODE. */
+
+rtx
+immed_wide_int_const (const poly_wide_int_ref &c, machine_mode mode)
+{
+ if (c.is_constant ())
+ return immed_wide_int_const_1 (c.coeffs[0], mode);
+
+ /* Not scalar_int_mode because we also allow pointer bound modes. */
+ unsigned int prec = GET_MODE_PRECISION (as_a <scalar_mode> (mode));
+
+ /* Allow truncation but not extension since we do not know if the
+ number is signed or unsigned. */
+ gcc_assert (prec <= c.coeffs[0].get_precision ());
+ poly_wide_int newc = poly_wide_int::from (c, prec, SIGNED);
+
+ /* See whether we already have an rtx for this constant. */
+ inchash::hash h;
+ h.add_int (mode);
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ h.add_wide_int (newc.coeffs[i]);
+ const_poly_int_hasher::compare_type typed_value (mode, newc);
+ rtx *slot = const_poly_int_htab->find_slot_with_hash (typed_value,
+ h.end (), INSERT);
+ rtx x = *slot;
+ if (x)
+ return x;
+
+ /* Create a new rtx. There's a choice to be made here between installing
+ the actual mode of the rtx or leaving it as VOIDmode (for consistency
+ with CONST_INT). In practice the handling of the codes is different
+ enough that we get no benefit from using VOIDmode, and various places
+ assume that VOIDmode implies CONST_INT. Using the real mode seems like
+ the right long-term direction anyway. */
+ typedef trailing_wide_ints<NUM_POLY_INT_COEFFS> twi;
+ size_t extra_size = twi::extra_size (prec);
+ x = rtx_alloc_v (CONST_POLY_INT,
+ sizeof (struct const_poly_int_def) + extra_size);
+ PUT_MODE (x, mode);
+ CONST_POLY_INT_COEFFS (x).set_precision (prec);
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ CONST_POLY_INT_COEFFS (x)[i] = newc.coeffs[i];
+
+ *slot = x;
+ return x;
+}
+
rtx
gen_rtx_REG (machine_mode mode, unsigned int regno)
{
bool
validate_subreg (machine_mode omode, machine_mode imode,
- const_rtx reg, unsigned int offset)
+ const_rtx reg, poly_uint64 offset)
{
- unsigned int isize = GET_MODE_SIZE (imode);
- unsigned int osize = GET_MODE_SIZE (omode);
+ poly_uint64 isize = GET_MODE_SIZE (imode);
+ poly_uint64 osize = GET_MODE_SIZE (omode);
+
+ /* The sizes must be ordered, so that we know whether the subreg
+ is partial, paradoxical or complete. */
+ if (!ordered_p (isize, osize))
+ return false;
/* All subregs must be aligned. */
- if (offset % osize != 0)
+ if (!multiple_p (offset, osize))
return false;
/* The subreg offset cannot be outside the inner object. */
- if (offset >= isize)
+ if (maybe_ge (offset, isize))
return false;
+ poly_uint64 regsize = REGMODE_NATURAL_SIZE (imode);
+
/* ??? This should not be here. Temporarily continue to allow word_mode
subregs of anything. The most common offender is (subreg:SI (reg:DF)).
Generally, backends are doing something sketchy but it'll take time to
;
/* ??? Similarly, e.g. with (subreg:DF (reg:TI)). Though store_bit_field
is the culprit here, and not the backends. */
- else if (osize >= UNITS_PER_WORD && isize >= osize)
+ else if (known_ge (osize, regsize) && known_ge (isize, osize))
;
/* Allow component subregs of complex and vector. Though given the below
extraction rules, it's not always clear what that means. */
(subreg:SI (reg:DF) 0) isn't. */
else if (FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode))
{
- if (! (isize == osize
+ if (! (known_eq (isize, osize)
/* LRA can use subreg to store a floating point value in
an integer mode. Although the floating point and the
integer modes need the same number of hard registers,
}
/* Paradoxical subregs must have offset zero. */
- if (osize > isize)
- return offset == 0;
+ if (maybe_gt (osize, isize))
+ return known_eq (offset, 0U);
/* This is a normal subreg. Verify that the offset is representable. */
{
unsigned int regno = REGNO (reg);
-#ifdef CANNOT_CHANGE_MODE_CLASS
if ((COMPLEX_MODE_P (imode) || VECTOR_MODE_P (imode))
&& GET_MODE_INNER (imode) == omode)
;
- else if (REG_CANNOT_CHANGE_MODE_P (regno, imode, omode))
+ else if (!REG_CAN_CHANGE_MODE_P (regno, imode, omode))
return false;
-#endif
return subreg_offset_representable_p (regno, imode, offset, omode);
}
+ /* The outer size must be ordered wrt the register size, otherwise
+ we wouldn't know at compile time how many registers the outer
+ mode occupies. */
+ if (!ordered_p (osize, regsize))
+ return false;
+
/* For pseudo registers, we want most of the same checks. Namely:
- If the register no larger than a word, the subreg must be lowpart.
- If the register is larger than a word, the subreg must be the lowpart
- of a subword. A subreg does *not* perform arbitrary bit extraction.
- Given that we've already checked mode/offset alignment, we only have
- to check subword subregs here. */
- if (osize < UNITS_PER_WORD
+
+ Assume that the pseudo register will be allocated to hard registers
+ that can hold REGSIZE bytes each. If OSIZE is not a multiple of REGSIZE,
+ the remainder must correspond to the lowpart of the containing hard
+ register. If BYTES_BIG_ENDIAN, the lowpart is at the highest offset,
+ otherwise it is at the lowest offset.
+
+ Given that we've already checked the mode and offset alignment,
+ we only have to check subblock subregs here. */
+ if (maybe_lt (osize, regsize)
&& ! (lra_in_progress && (FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode))))
{
- machine_mode wmode = isize > UNITS_PER_WORD ? word_mode : imode;
- unsigned int low_off = subreg_lowpart_offset (omode, wmode);
- if (offset % UNITS_PER_WORD != low_off)
+ /* It is invalid for the target to pick a register size for a mode
+ that isn't ordered wrt to the size of that mode. */
+ poly_uint64 block_size = ordered_min (isize, regsize);
+ unsigned int start_reg;
+ poly_uint64 offset_within_reg;
+ if (!can_div_trunc_p (offset, block_size, &start_reg, &offset_within_reg)
+ || (BYTES_BIG_ENDIAN
+ ? maybe_ne (offset_within_reg, block_size - osize)
+ : maybe_ne (offset_within_reg, 0U)))
return false;
}
return true;
}
rtx
-gen_rtx_SUBREG (machine_mode mode, rtx reg, int offset)
+gen_rtx_SUBREG (machine_mode mode, rtx reg, poly_uint64 offset)
{
gcc_assert (validate_subreg (mode, GET_MODE (reg), reg, offset));
return gen_rtx_raw_SUBREG (mode, reg, offset);
paradoxical lowpart, in which case the offset will be negative
on big-endian targets. */
-int
+poly_int64
byte_lowpart_offset (machine_mode outer_mode,
machine_mode inner_mode)
{
- if (GET_MODE_SIZE (outer_mode) < GET_MODE_SIZE (inner_mode))
- return subreg_lowpart_offset (outer_mode, inner_mode);
- else
+ if (paradoxical_subreg_p (outer_mode, inner_mode))
return -subreg_lowpart_offset (inner_mode, outer_mode);
+ else
+ return subreg_lowpart_offset (outer_mode, inner_mode);
+}
+
+/* Return the offset of (subreg:OUTER_MODE (mem:INNER_MODE X) OFFSET)
+ from address X. For paradoxical big-endian subregs this is a
+ negative value, otherwise it's the same as OFFSET. */
+
+poly_int64
+subreg_memory_offset (machine_mode outer_mode, machine_mode inner_mode,
+ poly_uint64 offset)
+{
+ if (paradoxical_subreg_p (outer_mode, inner_mode))
+ {
+ gcc_assert (known_eq (offset, 0U));
+ return -subreg_lowpart_offset (inner_mode, outer_mode);
+ }
+ return offset;
+}
+
+/* As above, but return the offset that existing subreg X would have
+ if SUBREG_REG (X) were stored in memory. The only significant thing
+ about the current SUBREG_REG is its mode. */
+
+poly_int64
+subreg_memory_offset (const_rtx x)
+{
+ return subreg_memory_offset (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
+ SUBREG_BYTE (x));
}
\f
/* Generate a REG rtx for a new pseudo register of mode MODE.
/* Do not call gen_reg_rtx with uninitialized crtl. */
gcc_assert (crtl->emit.regno_pointer_align_length);
- /* Make sure regno_pointer_align, and regno_reg_rtx are large
- enough to have an element for this pseudo reg number. */
+ crtl->emit.ensure_regno_capacity ();
+ gcc_assert (reg_rtx_no < crtl->emit.regno_pointer_align_length);
- if (reg_rtx_no == crtl->emit.regno_pointer_align_length)
- {
- int old_size = crtl->emit.regno_pointer_align_length;
- char *tmp;
- rtx *new1;
+ val = gen_raw_REG (mode, reg_rtx_no);
+ regno_reg_rtx[reg_rtx_no++] = val;
+ return val;
+}
- tmp = XRESIZEVEC (char, crtl->emit.regno_pointer_align, old_size * 2);
- memset (tmp + old_size, 0, old_size);
- crtl->emit.regno_pointer_align = (unsigned char *) tmp;
+/* Make sure m_regno_pointer_align, and regno_reg_rtx are large
+ enough to have elements in the range 0 <= idx <= reg_rtx_no. */
- new1 = GGC_RESIZEVEC (rtx, regno_reg_rtx, old_size * 2);
- memset (new1 + old_size, 0, old_size * sizeof (rtx));
- regno_reg_rtx = new1;
+void
+emit_status::ensure_regno_capacity ()
+{
+ int old_size = regno_pointer_align_length;
- crtl->emit.regno_pointer_align_length = old_size * 2;
- }
+ if (reg_rtx_no < old_size)
+ return;
- val = gen_raw_REG (mode, reg_rtx_no);
- regno_reg_rtx[reg_rtx_no++] = val;
- return val;
+ int new_size = old_size * 2;
+ while (reg_rtx_no >= new_size)
+ new_size *= 2;
+
+ char *tmp = XRESIZEVEC (char, regno_pointer_align, new_size);
+ memset (tmp + old_size, 0, new_size - old_size);
+ regno_pointer_align = (unsigned char *) tmp;
+
+ rtx *new1 = GGC_RESIZEVEC (rtx, regno_reg_rtx, new_size);
+ memset (new1 + old_size, 0, (new_size - old_size) * sizeof (rtx));
+ regno_reg_rtx = new1;
+
+ crtl->emit.regno_pointer_align_length = new_size;
}
/* Return TRUE if REG is a PARM_DECL, FALSE otherwise. */
to the REG_OFFSET. */
static void
-update_reg_offset (rtx new_rtx, rtx reg, int offset)
+update_reg_offset (rtx new_rtx, rtx reg, poly_int64 offset)
{
REG_ATTRS (new_rtx) = get_reg_attrs (REG_EXPR (reg),
- REG_OFFSET (reg) + offset);
+ REG_OFFSET (reg) + offset);
}
/* Generate a register with same attributes as REG, but with OFFSET
rtx
gen_rtx_REG_offset (rtx reg, machine_mode mode, unsigned int regno,
- int offset)
+ poly_int64 offset)
{
rtx new_rtx = gen_rtx_REG (mode, regno);
void
set_reg_attrs_from_value (rtx reg, rtx x)
{
- int offset;
+ poly_int64 offset;
bool can_be_reg_pointer = true;
/* Don't call mark_reg_pointer for incompatible pointer sign
{
#if defined(POINTERS_EXTEND_UNSIGNED)
if (((GET_CODE (x) == SIGN_EXTEND && POINTERS_EXTEND_UNSIGNED)
- || (GET_CODE (x) != SIGN_EXTEND && ! POINTERS_EXTEND_UNSIGNED))
+ || (GET_CODE (x) == ZERO_EXTEND && ! POINTERS_EXTEND_UNSIGNED)
+ || (paradoxical_subreg_p (x)
+ && ! (SUBREG_PROMOTED_VAR_P (x)
+ && SUBREG_CHECK_PROMOTED_SIGN (x,
+ POINTERS_EXTEND_UNSIGNED))))
&& !targetm.have_ptr_extend ())
can_be_reg_pointer = false;
#endif
Fix this now so that array indices work later. */
void
-maybe_set_first_label_num (rtx x)
+maybe_set_first_label_num (rtx_code_label *x)
{
if (CODE_LABEL_NUMBER (x) < first_label_num)
first_label_num = CODE_LABEL_NUMBER (x);
}
+
+/* For use by the RTL function loader, when mingling with normal
+ functions.
+ Ensure that label_num is greater than the label num of X, to avoid
+ duplicate labels in the generated assembler. */
+
+void
+maybe_set_max_label_num (rtx_code_label *x)
+{
+ if (CODE_LABEL_NUMBER (x) >= label_num)
+ label_num = CODE_LABEL_NUMBER (x) + 1;
+}
+
\f
/* Return a value representing some low-order bits of X, where the number
of low-order bits is given by MODE. Note that no conversion is done
rtx
gen_lowpart_common (machine_mode mode, rtx x)
{
- int msize = GET_MODE_SIZE (mode);
- int xsize;
+ poly_uint64 msize = GET_MODE_SIZE (mode);
machine_mode innermode;
/* Unfortunately, this routine doesn't take a parameter for the mode of X,
so we have to make one up. Yuk. */
innermode = GET_MODE (x);
if (CONST_INT_P (x)
- && msize * BITS_PER_UNIT <= HOST_BITS_PER_WIDE_INT)
- innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
+ && known_le (msize * BITS_PER_UNIT,
+ (unsigned HOST_WIDE_INT) HOST_BITS_PER_WIDE_INT))
+ innermode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
else if (innermode == VOIDmode)
- innermode = mode_for_size (HOST_BITS_PER_DOUBLE_INT, MODE_INT, 0);
-
- xsize = GET_MODE_SIZE (innermode);
+ innermode = int_mode_for_size (HOST_BITS_PER_DOUBLE_INT, 0).require ();
gcc_assert (innermode != VOIDmode && innermode != BLKmode);
if (innermode == mode)
return x;
- /* MODE must occupy no more words than the mode of X. */
- if ((msize + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD
- > ((xsize + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
+ /* The size of the outer and inner modes must be ordered. */
+ poly_uint64 xsize = GET_MODE_SIZE (innermode);
+ if (!ordered_p (msize, xsize))
return 0;
- /* Don't allow generating paradoxical FLOAT_MODE subregs. */
- if (SCALAR_FLOAT_MODE_P (mode) && msize > xsize)
- return 0;
+ if (SCALAR_FLOAT_MODE_P (mode))
+ {
+ /* Don't allow paradoxical FLOAT_MODE subregs. */
+ if (maybe_gt (msize, xsize))
+ return 0;
+ }
+ else
+ {
+ /* MODE must occupy no more of the underlying registers than X. */
+ poly_uint64 regsize = REGMODE_NATURAL_SIZE (innermode);
+ unsigned int mregs, xregs;
+ if (!can_div_away_from_zero_p (msize, regsize, &mregs)
+ || !can_div_away_from_zero_p (xsize, regsize, &xregs)
+ || mregs > xregs)
+ return 0;
+ }
+ scalar_int_mode int_mode, int_innermode, from_mode;
if ((GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
- && (GET_MODE_CLASS (mode) == MODE_INT
- || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT))
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && is_a <scalar_int_mode> (innermode, &int_innermode)
+ && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &from_mode))
{
/* If we are getting the low-order part of something that has been
sign- or zero-extended, we can either just use the object being
This case is used mostly by combine and cse. */
- if (GET_MODE (XEXP (x, 0)) == mode)
+ if (from_mode == int_mode)
return XEXP (x, 0);
- else if (msize < GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
- return gen_lowpart_common (mode, XEXP (x, 0));
- else if (msize < xsize)
- return gen_rtx_fmt_e (GET_CODE (x), mode, XEXP (x, 0));
+ else if (GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (from_mode))
+ return gen_lowpart_common (int_mode, XEXP (x, 0));
+ else if (GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (int_innermode))
+ return gen_rtx_fmt_e (GET_CODE (x), int_mode, XEXP (x, 0));
}
else if (GET_CODE (x) == SUBREG || REG_P (x)
|| GET_CODE (x) == CONCAT || GET_CODE (x) == CONST_VECTOR
- || CONST_DOUBLE_AS_FLOAT_P (x) || CONST_SCALAR_INT_P (x))
+ || CONST_DOUBLE_AS_FLOAT_P (x) || CONST_SCALAR_INT_P (x)
+ || CONST_POLY_INT_P (x))
return lowpart_subreg (mode, x, innermode);
/* Otherwise, we can't do this. */
rtx
gen_highpart (machine_mode mode, rtx x)
{
- unsigned int msize = GET_MODE_SIZE (mode);
+ poly_uint64 msize = GET_MODE_SIZE (mode);
rtx result;
/* This case loses if X is a subreg. To catch bugs early,
complain if an invalid MODE is used even in other cases. */
- gcc_assert (msize <= UNITS_PER_WORD
- || msize == (unsigned int) GET_MODE_UNIT_SIZE (GET_MODE (x)));
+ gcc_assert (known_le (msize, (unsigned int) UNITS_PER_WORD)
+ || known_eq (msize, GET_MODE_UNIT_SIZE (GET_MODE (x))));
result = simplify_gen_subreg (mode, x, GET_MODE (x),
subreg_highpart_offset (mode, GET_MODE (x)));
subreg_highpart_offset (outermode, innermode));
}
-/* Return the SUBREG_BYTE for an OUTERMODE lowpart of an INNERMODE value. */
+/* Return the SUBREG_BYTE for a lowpart subreg whose outer mode has
+ OUTER_BYTES bytes and whose inner mode has INNER_BYTES bytes. */
-unsigned int
-subreg_lowpart_offset (machine_mode outermode, machine_mode innermode)
+poly_uint64
+subreg_size_lowpart_offset (poly_uint64 outer_bytes, poly_uint64 inner_bytes)
{
- unsigned int offset = 0;
- int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
-
- if (difference > 0)
- {
- if (WORDS_BIG_ENDIAN)
- offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
- if (BYTES_BIG_ENDIAN)
- offset += difference % UNITS_PER_WORD;
- }
+ gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
+ if (maybe_gt (outer_bytes, inner_bytes))
+ /* Paradoxical subregs always have a SUBREG_BYTE of 0. */
+ return 0;
- return offset;
+ if (BYTES_BIG_ENDIAN && WORDS_BIG_ENDIAN)
+ return inner_bytes - outer_bytes;
+ else if (!BYTES_BIG_ENDIAN && !WORDS_BIG_ENDIAN)
+ return 0;
+ else
+ return subreg_size_offset_from_lsb (outer_bytes, inner_bytes, 0);
}
-/* Return offset in bytes to get OUTERMODE high part
- of the value in mode INNERMODE stored in memory in target format. */
-unsigned int
-subreg_highpart_offset (machine_mode outermode, machine_mode innermode)
-{
- unsigned int offset = 0;
- int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
-
- gcc_assert (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode));
+/* Return the SUBREG_BYTE for a highpart subreg whose outer mode has
+ OUTER_BYTES bytes and whose inner mode has INNER_BYTES bytes. */
- if (difference > 0)
- {
- if (! WORDS_BIG_ENDIAN)
- offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
- if (! BYTES_BIG_ENDIAN)
- offset += difference % UNITS_PER_WORD;
- }
+poly_uint64
+subreg_size_highpart_offset (poly_uint64 outer_bytes, poly_uint64 inner_bytes)
+{
+ gcc_assert (known_ge (inner_bytes, outer_bytes));
- return offset;
+ if (BYTES_BIG_ENDIAN && WORDS_BIG_ENDIAN)
+ return 0;
+ else if (!BYTES_BIG_ENDIAN && !WORDS_BIG_ENDIAN)
+ return inner_bytes - outer_bytes;
+ else
+ return subreg_size_offset_from_lsb (outer_bytes, inner_bytes,
+ (inner_bytes - outer_bytes)
+ * BITS_PER_UNIT);
}
/* Return 1 iff X, assumed to be a SUBREG,
else if (GET_MODE (SUBREG_REG (x)) == VOIDmode)
return 0;
- return (subreg_lowpart_offset (GET_MODE (x), GET_MODE (SUBREG_REG (x)))
- == SUBREG_BYTE (x));
-}
-
-/* Return true if X is a paradoxical subreg, false otherwise. */
-bool
-paradoxical_subreg_p (const_rtx x)
-{
- if (GET_CODE (x) != SUBREG)
- return false;
- return (GET_MODE_PRECISION (GET_MODE (x))
- > GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))));
+ return known_eq (subreg_lowpart_offset (GET_MODE (x),
+ GET_MODE (SUBREG_REG (x))),
+ SUBREG_BYTE (x));
}
\f
/* Return subword OFFSET of operand OP.
*/
rtx
-operand_subword (rtx op, unsigned int offset, int validate_address, machine_mode mode)
+operand_subword (rtx op, poly_uint64 offset, int validate_address,
+ machine_mode mode)
{
if (mode == VOIDmode)
mode = GET_MODE (op);
/* If OP is narrower than a word, fail. */
if (mode != BLKmode
- && (GET_MODE_SIZE (mode) < UNITS_PER_WORD))
+ && maybe_lt (GET_MODE_SIZE (mode), UNITS_PER_WORD))
return 0;
/* If we want a word outside OP, return zero. */
if (mode != BLKmode
- && (offset + 1) * UNITS_PER_WORD > GET_MODE_SIZE (mode))
+ && maybe_gt ((offset + 1) * UNITS_PER_WORD, GET_MODE_SIZE (mode)))
return const0_rtx;
/* Form a new MEM at the requested address. */
MODE is the mode of OP, in case it is CONST_INT. */
rtx
-operand_subword_force (rtx op, unsigned int offset, machine_mode mode)
+operand_subword_force (rtx op, poly_uint64 offset, machine_mode mode)
{
rtx result = operand_subword (op, offset, 1, mode);
if (mode != BLKmode && mode != VOIDmode)
{
- /* If this is a register which can not be accessed by words, copy it
+ /* If this is a register which cannot be accessed by words, copy it
to a pseudo register. */
if (REG_P (op))
op = copy_to_reg (op);
return result;
}
\f
+mem_attrs::mem_attrs ()
+ : expr (NULL_TREE),
+ offset (0),
+ size (0),
+ alias (0),
+ align (0),
+ addrspace (ADDR_SPACE_GENERIC),
+ offset_known_p (false),
+ size_known_p (false)
+{}
+
/* Returns 1 if both MEM_EXPR can be considered equal
and 0 otherwise. */
get_mem_align_offset (rtx mem, unsigned int align)
{
tree expr;
- unsigned HOST_WIDE_INT offset;
+ poly_uint64 offset;
/* This function can't use
if (!MEM_EXPR (mem) || !MEM_OFFSET_KNOWN_P (mem)
tree byte_offset = component_ref_field_offset (expr);
tree bit_offset = DECL_FIELD_BIT_OFFSET (field);
+ poly_uint64 suboffset;
if (!byte_offset
- || !tree_fits_uhwi_p (byte_offset)
+ || !poly_int_tree_p (byte_offset, &suboffset)
|| !tree_fits_uhwi_p (bit_offset))
return -1;
- offset += tree_to_uhwi (byte_offset);
+ offset += suboffset;
offset += tree_to_uhwi (bit_offset) / BITS_PER_UNIT;
if (inner == NULL_TREE)
else
return -1;
- return offset & ((align / BITS_PER_UNIT) - 1);
+ HOST_WIDE_INT misalign;
+ if (!known_misalignment (offset, align / BITS_PER_UNIT, &misalign))
+ return -1;
+ return misalign;
}
/* Given REF (a MEM) and T, either the type of X or the expression
void
set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp,
- HOST_WIDE_INT bitpos)
+ poly_int64 bitpos)
{
- HOST_WIDE_INT apply_bitpos = 0;
+ poly_int64 apply_bitpos = 0;
tree type;
struct mem_attrs attrs, *defattrs, *refattrs;
addr_space_t as;
set_mem_attributes. */
gcc_assert (!DECL_P (t) || ref != DECL_RTL_IF_SET (t));
- memset (&attrs, 0, sizeof (attrs));
-
/* Get the alias set from the expression or type (perhaps using a
front-end routine) and use it. */
attrs.alias = get_alias_set (t);
able to simply always use TYPE_ALIGN? */
}
- /* We can set the alignment from the type if we are making an object,
- this is an INDIRECT_REF, or if TYPE_ALIGN_OK. */
- if (objectp || TREE_CODE (t) == INDIRECT_REF || TYPE_ALIGN_OK (type))
+ /* We can set the alignment from the type if we are making an object or if
+ this is an INDIRECT_REF. */
+ if (objectp || TREE_CODE (t) == INDIRECT_REF)
attrs.align = MAX (attrs.align, TYPE_ALIGN (type));
/* If the size is known, we can set that. */
while (TREE_CODE (t2) == ARRAY_REF);
if (DECL_P (t2)
- || TREE_CODE (t2) == COMPONENT_REF)
+ || (TREE_CODE (t2) == COMPONENT_REF
+ /* For trailing arrays t2 doesn't have a size that
+ covers all valid accesses. */
+ && ! array_at_struct_end_p (t)))
{
attrs.expr = t2;
attrs.offset_known_p = false;
- if (tree_fits_uhwi_p (off_tree))
+ if (poly_int_tree_p (off_tree, &attrs.offset))
{
attrs.offset_known_p = true;
- attrs.offset = tree_to_uhwi (off_tree);
apply_bitpos = bitpos;
}
}
unsigned int obj_align;
unsigned HOST_WIDE_INT obj_bitpos;
get_object_alignment_1 (t, &obj_align, &obj_bitpos);
- obj_bitpos = (obj_bitpos - bitpos) & (obj_align - 1);
- if (obj_bitpos != 0)
- obj_align = (obj_bitpos & -obj_bitpos);
+ unsigned int diff_align = known_alignment (obj_bitpos - bitpos);
+ if (diff_align != 0)
+ obj_align = MIN (obj_align, diff_align);
attrs.align = MAX (attrs.align, obj_align);
}
- if (tree_fits_uhwi_p (new_size))
+ poly_uint64 const_size;
+ if (poly_int_tree_p (new_size, &const_size))
{
attrs.size_known_p = true;
- attrs.size = tree_to_uhwi (new_size);
+ attrs.size = const_size;
}
/* If we modified OFFSET based on T, then subtract the outstanding
bit position offset. Similarly, increase the size of the accessed
object to contain the negative offset. */
- if (apply_bitpos)
+ if (maybe_ne (apply_bitpos, 0))
{
gcc_assert (attrs.offset_known_p);
- attrs.offset -= apply_bitpos / BITS_PER_UNIT;
+ poly_int64 bytepos = bits_to_bytes_round_down (apply_bitpos);
+ attrs.offset -= bytepos;
if (attrs.size_known_p)
- attrs.size += apply_bitpos / BITS_PER_UNIT;
+ attrs.size += bytepos;
}
/* Now set the attributes we computed above. */
void
set_mem_alias_set (rtx mem, alias_set_type set)
{
- struct mem_attrs attrs;
-
/* If the new and old alias sets don't conflict, something is wrong. */
gcc_checking_assert (alias_sets_conflict_p (set, MEM_ALIAS_SET (mem)));
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.alias = set;
set_mem_attrs (mem, &attrs);
}
void
set_mem_addr_space (rtx mem, addr_space_t addrspace)
{
- struct mem_attrs attrs;
-
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.addrspace = addrspace;
set_mem_attrs (mem, &attrs);
}
void
set_mem_align (rtx mem, unsigned int align)
{
- struct mem_attrs attrs;
-
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.align = align;
set_mem_attrs (mem, &attrs);
}
void
set_mem_expr (rtx mem, tree expr)
{
- struct mem_attrs attrs;
-
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.expr = expr;
set_mem_attrs (mem, &attrs);
}
/* Set the offset of MEM to OFFSET. */
void
-set_mem_offset (rtx mem, HOST_WIDE_INT offset)
+set_mem_offset (rtx mem, poly_int64 offset)
{
- struct mem_attrs attrs;
-
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.offset_known_p = true;
attrs.offset = offset;
set_mem_attrs (mem, &attrs);
void
clear_mem_offset (rtx mem)
{
- struct mem_attrs attrs;
-
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.offset_known_p = false;
set_mem_attrs (mem, &attrs);
}
/* Set the size of MEM to SIZE. */
void
-set_mem_size (rtx mem, HOST_WIDE_INT size)
+set_mem_size (rtx mem, poly_int64 size)
{
- struct mem_attrs attrs;
-
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.size_known_p = true;
attrs.size = size;
set_mem_attrs (mem, &attrs);
void
clear_mem_size (rtx mem)
{
- struct mem_attrs attrs;
-
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.size_known_p = false;
set_mem_attrs (mem, &attrs);
}
{
rtx new_rtx = change_address_1 (memref, mode, addr, 1, false);
machine_mode mmode = GET_MODE (new_rtx);
- struct mem_attrs attrs, *defattrs;
+ struct mem_attrs *defattrs;
- attrs = *get_mem_attrs (memref);
+ mem_attrs attrs (*get_mem_attrs (memref));
defattrs = mode_mem_attrs[(int) mmode];
attrs.expr = NULL_TREE;
attrs.offset_known_p = false;
has no inherent size. */
rtx
-adjust_address_1 (rtx memref, machine_mode mode, HOST_WIDE_INT offset,
+adjust_address_1 (rtx memref, machine_mode mode, poly_int64 offset,
int validate, int adjust_address, int adjust_object,
- HOST_WIDE_INT size)
+ poly_int64 size)
{
rtx addr = XEXP (memref, 0);
rtx new_rtx;
- machine_mode address_mode;
- int pbits;
- struct mem_attrs attrs = *get_mem_attrs (memref), *defattrs;
+ scalar_int_mode address_mode;
+ struct mem_attrs attrs (*get_mem_attrs (memref)), *defattrs;
unsigned HOST_WIDE_INT max_align;
#ifdef POINTERS_EXTEND_UNSIGNED
- machine_mode pointer_mode
+ scalar_int_mode pointer_mode
= targetm.addr_space.pointer_mode (attrs.addrspace);
#endif
size = defattrs->size;
/* If there are no changes, just return the original memory reference. */
- if (mode == GET_MODE (memref) && !offset
- && (size == 0 || (attrs.size_known_p && attrs.size == size))
+ if (mode == GET_MODE (memref)
+ && known_eq (offset, 0)
+ && (known_eq (size, 0)
+ || (attrs.size_known_p && known_eq (attrs.size, size)))
&& (!validate || memory_address_addr_space_p (mode, addr,
attrs.addrspace)))
return memref;
/* Convert a possibly large offset to a signed value within the
range of the target address space. */
address_mode = get_address_mode (memref);
- pbits = GET_MODE_BITSIZE (address_mode);
- if (HOST_BITS_PER_WIDE_INT > pbits)
- {
- int shift = HOST_BITS_PER_WIDE_INT - pbits;
- offset = (((HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) offset << shift))
- >> shift);
- }
+ offset = trunc_int_for_mode (offset, address_mode);
if (adjust_address)
{
/* If MEMREF is a LO_SUM and the offset is within the alignment of the
object, we can merge it into the LO_SUM. */
- if (GET_MODE (memref) != BLKmode && GET_CODE (addr) == LO_SUM
- && offset >= 0
- && (unsigned HOST_WIDE_INT) offset
- < GET_MODE_ALIGNMENT (GET_MODE (memref)) / BITS_PER_UNIT)
+ if (GET_MODE (memref) != BLKmode
+ && GET_CODE (addr) == LO_SUM
+ && known_in_range_p (offset,
+ 0, (GET_MODE_ALIGNMENT (GET_MODE (memref))
+ / BITS_PER_UNIT)))
addr = gen_rtx_LO_SUM (address_mode, XEXP (addr, 0),
plus_constant (address_mode,
XEXP (addr, 1), offset));
else if (POINTERS_EXTEND_UNSIGNED > 0
&& GET_CODE (addr) == ZERO_EXTEND
&& GET_MODE (XEXP (addr, 0)) == pointer_mode
- && trunc_int_for_mode (offset, pointer_mode) == offset)
+ && known_eq (trunc_int_for_mode (offset, pointer_mode), offset))
addr = gen_rtx_ZERO_EXTEND (address_mode,
plus_constant (pointer_mode,
XEXP (addr, 0), offset));
/* If the address is a REG, change_address_1 rightfully returns memref,
but this would destroy memref's MEM_ATTRS. */
- if (new_rtx == memref && offset != 0)
+ if (new_rtx == memref && maybe_ne (offset, 0))
new_rtx = copy_rtx (new_rtx);
/* Conservatively drop the object if we don't know where we start from. */
attrs.offset += offset;
/* Drop the object if the new left end is not within its bounds. */
- if (adjust_object && attrs.offset < 0)
+ if (adjust_object && maybe_lt (attrs.offset, 0))
{
attrs.expr = NULL_TREE;
attrs.alias = 0;
/* Compute the new alignment by taking the MIN of the alignment and the
lowest-order set bit in OFFSET, but don't change the alignment if OFFSET
if zero. */
- if (offset != 0)
+ if (maybe_ne (offset, 0))
{
- max_align = (offset & -offset) * BITS_PER_UNIT;
+ max_align = known_alignment (offset) * BITS_PER_UNIT;
attrs.align = MIN (attrs.align, max_align);
}
- if (size)
+ if (maybe_ne (size, 0))
{
/* Drop the object if the new right end is not within its bounds. */
- if (adjust_object && (offset + size) > attrs.size)
+ if (adjust_object && maybe_gt (offset + size, attrs.size))
{
attrs.expr = NULL_TREE;
attrs.alias = 0;
rtx
adjust_automodify_address_1 (rtx memref, machine_mode mode, rtx addr,
- HOST_WIDE_INT offset, int validate)
+ poly_int64 offset, int validate)
{
memref = change_address_1 (memref, VOIDmode, addr, validate, false);
return adjust_address_1 (memref, mode, offset, validate, 0, 0, 0);
{
rtx new_rtx, addr = XEXP (memref, 0);
machine_mode address_mode;
- struct mem_attrs attrs, *defattrs;
+ struct mem_attrs *defattrs;
- attrs = *get_mem_attrs (memref);
+ mem_attrs attrs (*get_mem_attrs (memref));
address_mode = get_address_mode (memref);
new_rtx = simplify_gen_binary (PLUS, address_mode, addr, offset);
operations plus masking logic. */
rtx
-widen_memory_access (rtx memref, machine_mode mode, HOST_WIDE_INT offset)
+widen_memory_access (rtx memref, machine_mode mode, poly_int64 offset)
{
rtx new_rtx = adjust_address_1 (memref, mode, offset, 1, 1, 0, 0);
- struct mem_attrs attrs;
- unsigned int size = GET_MODE_SIZE (mode);
+ poly_uint64 size = GET_MODE_SIZE (mode);
/* If there are no changes, just return the original memory reference. */
if (new_rtx == memref)
return new_rtx;
- attrs = *get_mem_attrs (new_rtx);
+ mem_attrs attrs (*get_mem_attrs (new_rtx));
/* If we don't know what offset we were at within the expression, then
we can't know if we've overstepped the bounds. */
/* Is the field at least as large as the access? If so, ok,
otherwise strip back to the containing structure. */
- if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST
- && compare_tree_int (DECL_SIZE_UNIT (field), size) >= 0
- && attrs.offset >= 0)
+ if (poly_int_tree_p (DECL_SIZE_UNIT (field))
+ && known_ge (wi::to_poly_offset (DECL_SIZE_UNIT (field)), size)
+ && known_ge (attrs.offset, 0))
break;
- if (! tree_fits_uhwi_p (offset))
+ poly_uint64 suboffset;
+ if (!poly_int_tree_p (offset, &suboffset))
{
attrs.expr = NULL_TREE;
break;
}
attrs.expr = TREE_OPERAND (attrs.expr, 0);
- attrs.offset += tree_to_uhwi (offset);
+ attrs.offset += suboffset;
attrs.offset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
/ BITS_PER_UNIT);
}
/* Similarly for the decl. */
else if (DECL_P (attrs.expr)
&& DECL_SIZE_UNIT (attrs.expr)
- && TREE_CODE (DECL_SIZE_UNIT (attrs.expr)) == INTEGER_CST
- && compare_tree_int (DECL_SIZE_UNIT (attrs.expr), size) >= 0
- && (! attrs.offset_known_p || attrs.offset >= 0))
+ && poly_int_tree_p (DECL_SIZE_UNIT (attrs.expr))
+ && known_ge (wi::to_poly_offset (DECL_SIZE_UNIT (attrs.expr)),
+ size)
+ && known_ge (attrs.offset, 0))
break;
else
{
{
tree d = spill_slot_decl;
rtx rd;
- struct mem_attrs attrs;
if (d || !force_build_p)
return d;
rd = gen_rtx_MEM (BLKmode, frame_pointer_rtx);
MEM_NOTRAP_P (rd) = 1;
- attrs = *mode_mem_attrs[(int) BLKmode];
+ mem_attrs attrs (*mode_mem_attrs[(int) BLKmode]);
attrs.alias = new_alias_set ();
attrs.expr = d;
set_mem_attrs (rd, &attrs);
void
set_mem_attrs_for_spill (rtx mem)
{
- struct mem_attrs attrs;
rtx addr;
- attrs = *get_mem_attrs (mem);
+ mem_attrs attrs (*get_mem_attrs (mem));
attrs.expr = get_spill_slot_decl (true);
attrs.alias = MEM_ALIAS_SET (DECL_RTL (attrs.expr));
attrs.addrspace = ADDR_SPACE_GENERIC;
with perhaps the plus missing for offset = 0. */
addr = XEXP (mem, 0);
attrs.offset_known_p = true;
- attrs.offset = 0;
- if (GET_CODE (addr) == PLUS
- && CONST_INT_P (XEXP (addr, 1)))
- attrs.offset = INTVAL (XEXP (addr, 1));
+ strip_offset (addr, &attrs.offset);
set_mem_attrs (mem, &attrs);
MEM_NOTRAP_P (mem) = 1;
This special care is necessary when the stack slot MEM does not
actually appear in the insn chain. If it does appear, its address
is unshared from all else at that point. */
- stack_slot_list = safe_as_a <rtx_expr_list *> (
- copy_rtx_if_shared (stack_slot_list));
+ unsigned int i;
+ rtx temp;
+ FOR_EACH_VEC_SAFE_ELT (stack_slot_list, i, temp)
+ (*stack_slot_list)[i] = copy_rtx_if_shared (temp);
}
/* Go through all the RTL insn bodies and copy any invalid shared
for (decl = DECL_ARGUMENTS (cfun->decl); decl; decl = DECL_CHAIN (decl))
set_used_flags (DECL_RTL (decl));
- reset_used_flags (stack_slot_list);
+ rtx temp;
+ unsigned int i;
+ FOR_EACH_VEC_SAFE_ELT (stack_slot_list, i, temp)
+ reset_used_flags (temp);
unshare_all_rtl_1 (insn);
}
unshare_all_rtl (void)
{
unshare_all_rtl_1 (get_insns ());
+
+ for (tree decl = DECL_ARGUMENTS (cfun->decl); decl; decl = DECL_CHAIN (decl))
+ {
+ if (DECL_RTL_SET_P (decl))
+ SET_DECL_RTL (decl, copy_rtx_if_shared (DECL_RTL (decl)));
+ DECL_INCOMING_RTL (decl) = copy_rtx_if_shared (DECL_INCOMING_RTL (decl));
+ }
+
return 0;
}
/* SCRATCH must be shared because they represent distinct values. */
return;
case CLOBBER:
+ case CLOBBER_HIGH:
/* Share clobbers of hard registers (like cc0), but do not share pseudo reg
clobbers or clobbers of hard registers that originated as pseudos.
This is needed to allow safe register renaming. */
- if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
- && ORIGINAL_REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 0)))
+ if (REG_P (XEXP (x, 0))
+ && HARD_REGISTER_NUM_P (REGNO (XEXP (x, 0)))
+ && HARD_REGISTER_NUM_P (ORIGINAL_REGNO (XEXP (x, 0))))
return;
break;
/* This rtx may not be shared. If it has already been seen,
replace it with a copy of itself. */
-#ifdef ENABLE_CHECKING
- if (RTX_FLAG (x, used))
+ if (flag_checking && RTX_FLAG (x, used))
{
error ("invalid rtl sharing found in the insn");
debug_rtx (insn);
debug_rtx (x);
internal_error ("internal consistency failure");
}
-#endif
gcc_assert (!RTX_FLAG (x, used));
RTX_FLAG (x, used) = 1;
verify_insn_sharing (rtx insn)
{
gcc_assert (INSN_P (insn));
- reset_used_flags (PATTERN (insn));
- reset_used_flags (REG_NOTES (insn));
+ verify_rtx_sharing (PATTERN (insn), insn);
+ verify_rtx_sharing (REG_NOTES (insn), insn);
if (CALL_P (insn))
- reset_used_flags (CALL_INSN_FUNCTION_USAGE (insn));
+ verify_rtx_sharing (CALL_INSN_FUNCTION_USAGE (insn), insn);
}
/* Go through all the RTL insn bodies and check that there is no unexpected
/* SCRATCH must be shared because they represent distinct values. */
return;
case CLOBBER:
+ case CLOBBER_HIGH:
/* Share clobbers of hard registers (like cc0), but do not share pseudo reg
clobbers or clobbers of hard registers that originated as pseudos.
This is needed to allow safe register renaming. */
- if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
- && ORIGINAL_REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 0)))
+ if (REG_P (XEXP (x, 0))
+ && HARD_REGISTER_NUM_P (REGNO (XEXP (x, 0)))
+ && HARD_REGISTER_NUM_P (ORIGINAL_REGNO (XEXP (x, 0))))
return;
break;
look inside SEQUENCEs. */
rtx_insn *
-next_nonnote_insn (rtx uncast_insn)
+next_nonnote_insn (rtx_insn *insn)
{
- rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
while (insn)
{
insn = NEXT_INSN (insn);
return insn;
}
-/* Return the next insn after INSN that is not a NOTE, but stop the
- search before we enter another basic block. This routine does not
- look inside SEQUENCEs. */
+/* Return the next insn after INSN that is not a DEBUG_INSN. This
+ routine does not look inside SEQUENCEs. */
rtx_insn *
-next_nonnote_insn_bb (rtx_insn *insn)
+next_nondebug_insn (rtx_insn *insn)
{
while (insn)
{
insn = NEXT_INSN (insn);
- if (insn == 0 || !NOTE_P (insn))
+ if (insn == 0 || !DEBUG_INSN_P (insn))
break;
- if (NOTE_INSN_BASIC_BLOCK_P (insn))
- return NULL;
}
return insn;
not look inside SEQUENCEs. */
rtx_insn *
-prev_nonnote_insn (rtx uncast_insn)
+prev_nonnote_insn (rtx_insn *insn)
{
- rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
-
while (insn)
{
insn = PREV_INSN (insn);
return insn;
}
-/* Return the previous insn before INSN that is not a NOTE, but stop
- the search before we enter another basic block. This routine does
- not look inside SEQUENCEs. */
+/* Return the previous insn before INSN that is not a DEBUG_INSN.
+ This routine does not look inside SEQUENCEs. */
rtx_insn *
-prev_nonnote_insn_bb (rtx uncast_insn)
+prev_nondebug_insn (rtx_insn *insn)
{
- rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
-
while (insn)
{
insn = PREV_INSN (insn);
- if (insn == 0 || !NOTE_P (insn))
+ if (insn == 0 || !DEBUG_INSN_P (insn))
break;
- if (NOTE_INSN_BASIC_BLOCK_P (insn))
- return NULL;
}
return insn;
}
-/* Return the next insn after INSN that is not a DEBUG_INSN. This
- routine does not look inside SEQUENCEs. */
+/* Return the next insn after INSN that is not a NOTE nor DEBUG_INSN.
+ This routine does not look inside SEQUENCEs. */
rtx_insn *
-next_nondebug_insn (rtx uncast_insn)
+next_nonnote_nondebug_insn (rtx_insn *insn)
{
- rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
+ while (insn)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == 0 || (!NOTE_P (insn) && !DEBUG_INSN_P (insn)))
+ break;
+ }
+
+ return insn;
+}
+
+/* Return the next insn after INSN that is not a NOTE nor DEBUG_INSN,
+ but stop the search before we enter another basic block. This
+ routine does not look inside SEQUENCEs. */
+rtx_insn *
+next_nonnote_nondebug_insn_bb (rtx_insn *insn)
+{
while (insn)
{
insn = NEXT_INSN (insn);
- if (insn == 0 || !DEBUG_INSN_P (insn))
+ if (insn == 0)
+ break;
+ if (DEBUG_INSN_P (insn))
+ continue;
+ if (!NOTE_P (insn))
break;
+ if (NOTE_INSN_BASIC_BLOCK_P (insn))
+ return NULL;
}
return insn;
}
-/* Return the previous insn before INSN that is not a DEBUG_INSN.
+/* Return the previous insn before INSN that is not a NOTE nor DEBUG_INSN.
This routine does not look inside SEQUENCEs. */
rtx_insn *
-prev_nondebug_insn (rtx uncast_insn)
+prev_nonnote_nondebug_insn (rtx_insn *insn)
{
- rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
-
while (insn)
{
insn = PREV_INSN (insn);
- if (insn == 0 || !DEBUG_INSN_P (insn))
+ if (insn == 0 || (!NOTE_P (insn) && !DEBUG_INSN_P (insn)))
break;
}
return insn;
}
-/* Return the next insn after INSN that is not a NOTE nor DEBUG_INSN.
- This routine does not look inside SEQUENCEs. */
+/* Return the previous insn before INSN that is not a NOTE nor
+ DEBUG_INSN, but stop the search before we enter another basic
+ block. This routine does not look inside SEQUENCEs. */
rtx_insn *
-next_nonnote_nondebug_insn (rtx uncast_insn)
+prev_nonnote_nondebug_insn_bb (rtx_insn *insn)
{
- rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
+ while (insn)
+ {
+ insn = PREV_INSN (insn);
+ if (insn == 0)
+ break;
+ if (DEBUG_INSN_P (insn))
+ continue;
+ if (!NOTE_P (insn))
+ break;
+ if (NOTE_INSN_BASIC_BLOCK_P (insn))
+ return NULL;
+ }
+
+ return insn;
+}
+/* Return the next INSN, CALL_INSN, JUMP_INSN or DEBUG_INSN after INSN;
+ or 0, if there is none. This routine does not look inside
+ SEQUENCEs. */
+
+rtx_insn *
+next_real_insn (rtx_insn *insn)
+{
while (insn)
{
insn = NEXT_INSN (insn);
- if (insn == 0 || (!NOTE_P (insn) && !DEBUG_INSN_P (insn)))
+ if (insn == 0 || INSN_P (insn))
break;
}
return insn;
}
-/* Return the previous insn before INSN that is not a NOTE nor DEBUG_INSN.
- This routine does not look inside SEQUENCEs. */
+/* Return the last INSN, CALL_INSN, JUMP_INSN or DEBUG_INSN before INSN;
+ or 0, if there is none. This routine does not look inside
+ SEQUENCEs. */
rtx_insn *
-prev_nonnote_nondebug_insn (rtx uncast_insn)
+prev_real_insn (rtx_insn *insn)
{
- rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
-
while (insn)
{
insn = PREV_INSN (insn);
- if (insn == 0 || (!NOTE_P (insn) && !DEBUG_INSN_P (insn)))
+ if (insn == 0 || INSN_P (insn))
break;
}
SEQUENCEs. */
rtx_insn *
-next_real_insn (rtx uncast_insn)
+next_real_nondebug_insn (rtx uncast_insn)
{
rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
while (insn)
{
insn = NEXT_INSN (insn);
- if (insn == 0 || INSN_P (insn))
+ if (insn == 0 || NONDEBUG_INSN_P (insn))
break;
}
SEQUENCEs. */
rtx_insn *
-prev_real_insn (rtx uncast_insn)
+prev_real_nondebug_insn (rtx_insn *insn)
{
- rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
-
while (insn)
{
insn = PREV_INSN (insn);
- if (insn == 0 || INSN_P (insn))
+ if (insn == 0 || NONDEBUG_INSN_P (insn))
break;
}
standalone USE and CLOBBER insn. */
int
-active_insn_p (const_rtx insn)
+active_insn_p (const rtx_insn *insn)
{
return (CALL_P (insn) || JUMP_P (insn)
|| JUMP_TABLE_DATA_P (insn) /* FIXME */
}
rtx_insn *
-next_active_insn (rtx uncast_insn)
+next_active_insn (rtx_insn *insn)
{
- rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
-
while (insn)
{
insn = NEXT_INSN (insn);
standalone USE and CLOBBER insn. */
rtx_insn *
-prev_active_insn (rtx uncast_insn)
+prev_active_insn (rtx_insn *insn)
{
- rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
-
while (insn)
{
insn = PREV_INSN (insn);
Return 0 if we can't find the insn. */
rtx_insn *
-next_cc0_user (rtx uncast_insn)
+next_cc0_user (rtx_insn *insn)
{
- rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
-
rtx note = find_reg_note (insn, REG_CC_USER, NULL_RTX);
if (note)
const char *fmt;
code = GET_CODE (x);
- if (code == LABEL_REF && LABEL_P (LABEL_REF_LABEL (x)))
- LABEL_NUSES (LABEL_REF_LABEL (x))++;
+ if (code == LABEL_REF && LABEL_P (label_ref_label (x)))
+ LABEL_NUSES (label_ref_label (x))++;
fmt = GET_RTX_FORMAT (code);
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
rtx_insn *
try_split (rtx pat, rtx_insn *trial, int last)
{
- rtx_insn *before = PREV_INSN (trial);
- rtx_insn *after = NEXT_INSN (trial);
+ rtx_insn *before, *after;
rtx note;
rtx_insn *seq, *tem;
- int probability;
+ profile_probability probability;
rtx_insn *insn_last, *insn;
int njumps = 0;
rtx_insn *call_insn = NULL;
if (any_condjump_p (trial)
&& (note = find_reg_note (trial, REG_BR_PROB, 0)))
- split_branch_probability = XINT (note, 0);
+ split_branch_probability
+ = profile_probability::from_reg_br_prob_note (XINT (note, 0));
+ else
+ split_branch_probability = profile_probability::uninitialized ();
+
probability = split_branch_probability;
seq = split_insns (pat, trial);
- split_branch_probability = -1;
+ split_branch_probability = profile_probability::uninitialized ();
if (!seq)
return trial;
CROSSING_JUMP_P (insn) = CROSSING_JUMP_P (trial);
mark_jump_label (PATTERN (insn), insn, 0);
njumps++;
- if (probability != -1
+ if (probability.initialized_p ()
&& any_condjump_p (insn)
&& !find_reg_note (insn, REG_BR_PROB, 0))
{
is responsible for this step using
split_branch_probability variable. */
gcc_assert (njumps == 1);
- add_int_reg_note (insn, REG_BR_PROB, probability);
+ add_reg_br_prob_note (insn, probability);
}
}
}
for (insn = insn_last; insn ; insn = PREV_INSN (insn))
if (CALL_P (insn))
{
- rtx_insn *next;
- rtx *p;
-
gcc_assert (call_insn == NULL_RTX);
call_insn = insn;
/* Add the old CALL_INSN_FUNCTION_USAGE to whatever the
target may have explicitly specified. */
- p = &CALL_INSN_FUNCTION_USAGE (insn);
+ rtx *p = &CALL_INSN_FUNCTION_USAGE (insn);
while (*p)
p = &XEXP (*p, 1);
*p = CALL_INSN_FUNCTION_USAGE (trial);
/* If the old call was a sibling call, the new one must
be too. */
SIBLING_CALL_P (insn) = SIBLING_CALL_P (trial);
-
- /* If the new call is the last instruction in the sequence,
- it will effectively replace the old call in-situ. Otherwise
- we must move any following NOTE_INSN_CALL_ARG_LOCATION note
- so that it comes immediately after the new call. */
- if (NEXT_INSN (insn))
- for (next = NEXT_INSN (trial);
- next && NOTE_P (next);
- next = NEXT_INSN (next))
- if (NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
- {
- remove_insn (next);
- add_insn_after (next, insn, NULL);
- break;
- }
}
}
case REG_NORETURN:
case REG_SETJMP:
case REG_TM:
+ case REG_CALL_NOCF_CHECK:
+ case REG_CALL_ARG_LOCATION:
for (insn = insn_last; insn != NULL_RTX; insn = PREV_INSN (insn))
{
if (CALL_P (insn))
break;
case REG_ARGS_SIZE:
- fixup_args_size_notes (NULL, insn_last, INTVAL (XEXP (note, 0)));
+ fixup_args_size_notes (NULL, insn_last, get_args_size (note));
break;
case REG_CALL_DECL:
}
}
+ before = PREV_INSN (trial);
+ after = NEXT_INSN (trial);
+
tem = emit_insn_after_setloc (seq, trial, INSN_LOCATION (trial));
delete_insn (trial);
{
rtx_insn *prev = get_last_insn ();
link_insn_into_chain (insn, prev, NULL);
- if (NULL == get_insns ())
+ if (get_insns () == NULL)
set_first_insn (insn);
set_last_insn (insn);
}
they know how to update a SEQUENCE. */
void
-add_insn_after (rtx uncast_insn, rtx uncast_after, basic_block bb)
+add_insn_after (rtx_insn *insn, rtx_insn *after, basic_block bb)
{
- rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
- rtx_insn *after = as_a <rtx_insn *> (uncast_after);
add_insn_after_nobb (insn, after);
if (!BARRIER_P (after)
&& !BARRIER_P (insn)
they know how to update a SEQUENCE. */
void
-add_insn_before (rtx uncast_insn, rtx uncast_before, basic_block bb)
+add_insn_before (rtx_insn *insn, rtx_insn *before, basic_block bb)
{
- rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
- rtx_insn *before = as_a <rtx_insn *> (uncast_before);
add_insn_before_nobb (insn, before);
if (!bb
/* Replace insn with an deleted instruction note. */
void
-set_insn_deleted (rtx insn)
+set_insn_deleted (rtx_insn *insn)
{
if (INSN_P (insn))
- df_insn_delete (as_a <rtx_insn *> (insn));
+ df_insn_delete (insn);
PUT_CODE (insn, NOTE);
NOTE_KIND (insn) = NOTE_INSN_DELETED;
}
To really delete an insn and related DF information, use delete_insn. */
void
-remove_insn (rtx uncast_insn)
+remove_insn (rtx_insn *insn)
{
- rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
rtx_insn *next = NEXT_INSN (insn);
rtx_insn *prev = PREV_INSN (insn);
basic_block bb;
void
reorder_insns_nobb (rtx_insn *from, rtx_insn *to, rtx_insn *after)
{
-#ifdef ENABLE_CHECKING
- rtx_insn *x;
- for (x = from; x != to; x = NEXT_INSN (x))
- gcc_assert (after != x);
- gcc_assert (after != to);
-#endif
+ if (flag_checking)
+ {
+ for (rtx_insn *x = from; x != to; x = NEXT_INSN (x))
+ gcc_assert (after != x);
+ gcc_assert (after != to);
+ }
/* Splice this bunch out of where it is now. */
if (PREV_INSN (from))
generated would almost certainly die right after it was created. */
static rtx_insn *
-emit_pattern_before_noloc (rtx x, rtx before, rtx last, basic_block bb,
+emit_pattern_before_noloc (rtx x, rtx_insn *before, rtx_insn *last,
+ basic_block bb,
rtx_insn *(*make_raw) (rtx))
{
rtx_insn *insn;
gcc_assert (before);
if (x == NULL_RTX)
- return safe_as_a <rtx_insn *> (last);
+ return last;
switch (GET_CODE (x))
{
break;
}
- return safe_as_a <rtx_insn *> (last);
+ return last;
}
/* Make X be output before the instruction BEFORE. */
emit_jump_insn_before_noloc (rtx x, rtx_insn *before)
{
return as_a <rtx_jump_insn *> (
- emit_pattern_before_noloc (x, before, NULL_RTX, NULL,
+ emit_pattern_before_noloc (x, before, NULL, NULL,
make_jump_insn_raw));
}
rtx_insn *
emit_call_insn_before_noloc (rtx x, rtx_insn *before)
{
- return emit_pattern_before_noloc (x, before, NULL_RTX, NULL,
+ return emit_pattern_before_noloc (x, before, NULL, NULL,
make_call_insn_raw);
}
and output it before the instruction BEFORE. */
rtx_insn *
-emit_debug_insn_before_noloc (rtx x, rtx before)
+emit_debug_insn_before_noloc (rtx x, rtx_insn *before)
{
- return emit_pattern_before_noloc (x, before, NULL_RTX, NULL,
+ return emit_pattern_before_noloc (x, before, NULL, NULL,
make_debug_insn_raw);
}
and output it before the insn BEFORE. */
rtx_barrier *
-emit_barrier_before (rtx before)
+emit_barrier_before (rtx_insn *before)
{
rtx_barrier *insn = as_a <rtx_barrier *> (rtx_alloc (BARRIER));
/* Emit the label LABEL before the insn BEFORE. */
rtx_code_label *
-emit_label_before (rtx label, rtx_insn *before)
+emit_label_before (rtx_code_label *label, rtx_insn *before)
{
gcc_checking_assert (INSN_UID (label) == 0);
INSN_UID (label) = cur_insn_uid++;
add_insn_before (label, before, NULL);
- return as_a <rtx_code_label *> (label);
+ return label;
}
\f
/* Helper for emit_insn_after, handles lists of instructions
efficiently. */
static rtx_insn *
-emit_insn_after_1 (rtx_insn *first, rtx uncast_after, basic_block bb)
+emit_insn_after_1 (rtx_insn *first, rtx_insn *after, basic_block bb)
{
- rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
rtx_insn *last;
rtx_insn *after_after;
if (!bb && !BARRIER_P (after))
}
static rtx_insn *
-emit_pattern_after_noloc (rtx x, rtx uncast_after, basic_block bb,
+emit_pattern_after_noloc (rtx x, rtx_insn *after, basic_block bb,
rtx_insn *(*make_raw)(rtx))
{
- rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
rtx_insn *last = after;
gcc_assert (after);
BB is NULL, an attempt is made to infer the BB from AFTER. */
rtx_insn *
-emit_insn_after_noloc (rtx x, rtx after, basic_block bb)
+emit_insn_after_noloc (rtx x, rtx_insn *after, basic_block bb)
{
return emit_pattern_after_noloc (x, after, bb, make_insn_raw);
}
and output it after the insn AFTER. */
rtx_jump_insn *
-emit_jump_insn_after_noloc (rtx x, rtx after)
+emit_jump_insn_after_noloc (rtx x, rtx_insn *after)
{
return as_a <rtx_jump_insn *> (
emit_pattern_after_noloc (x, after, NULL, make_jump_insn_raw));
and output it after the instruction AFTER. */
rtx_insn *
-emit_call_insn_after_noloc (rtx x, rtx after)
+emit_call_insn_after_noloc (rtx x, rtx_insn *after)
{
return emit_pattern_after_noloc (x, after, NULL, make_call_insn_raw);
}
and output it after the instruction AFTER. */
rtx_insn *
-emit_debug_insn_after_noloc (rtx x, rtx after)
+emit_debug_insn_after_noloc (rtx x, rtx_insn *after)
{
return emit_pattern_after_noloc (x, after, NULL, make_debug_insn_raw);
}
and output it after the insn AFTER. */
rtx_barrier *
-emit_barrier_after (rtx after)
+emit_barrier_after (rtx_insn *after)
{
rtx_barrier *insn = as_a <rtx_barrier *> (rtx_alloc (BARRIER));
/* Emit the label LABEL after the insn AFTER. */
rtx_insn *
-emit_label_after (rtx label, rtx_insn *after)
+emit_label_after (rtx_insn *label, rtx_insn *after)
{
gcc_checking_assert (INSN_UID (label) == 0);
INSN_UID (label) = cur_insn_uid++;
add_insn_after (label, after, NULL);
- return as_a <rtx_insn *> (label);
+ return label;
}
\f
/* Notes require a bit of special handling: Some notes need to have their
inside basic blocks. If the caller is emitting on the basic block
boundary, do not set BLOCK_FOR_INSN on the new note. */
case NOTE_INSN_VAR_LOCATION:
- case NOTE_INSN_CALL_ARG_LOCATION:
case NOTE_INSN_EH_REGION_BEG:
case NOTE_INSN_EH_REGION_END:
return on_bb_boundary_p;
MAKE_RAW indicates how to turn PATTERN into a real insn. */
static rtx_insn *
-emit_pattern_after_setloc (rtx pattern, rtx uncast_after, int loc,
+emit_pattern_after_setloc (rtx pattern, rtx_insn *after, location_t loc,
rtx_insn *(*make_raw) (rtx))
{
- rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
rtx_insn *last = emit_pattern_after_noloc (pattern, after, NULL, make_raw);
if (pattern == NULL_RTX || !loc)
any DEBUG_INSNs. */
static rtx_insn *
-emit_pattern_after (rtx pattern, rtx uncast_after, bool skip_debug_insns,
+emit_pattern_after (rtx pattern, rtx_insn *after, bool skip_debug_insns,
rtx_insn *(*make_raw) (rtx))
{
- rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
rtx_insn *prev = after;
if (skip_debug_insns)
/* Like emit_insn_after_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
{
return emit_pattern_after_setloc (pattern, after, loc, make_insn_raw);
}
/* Like emit_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
rtx_insn *
-emit_insn_after (rtx pattern, rtx after)
+emit_insn_after (rtx pattern, rtx_insn *after)
{
return emit_pattern_after (pattern, after, true, make_insn_raw);
}
/* Like emit_jump_insn_after_noloc, but set INSN_LOCATION according to LOC. */
rtx_jump_insn *
-emit_jump_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_jump_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
{
return as_a <rtx_jump_insn *> (
emit_pattern_after_setloc (pattern, after, loc, make_jump_insn_raw));
/* Like emit_jump_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
rtx_jump_insn *
-emit_jump_insn_after (rtx pattern, rtx after)
+emit_jump_insn_after (rtx pattern, rtx_insn *after)
{
return as_a <rtx_jump_insn *> (
emit_pattern_after (pattern, after, true, make_jump_insn_raw));
/* Like emit_call_insn_after_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_call_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_call_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
{
return emit_pattern_after_setloc (pattern, after, loc, make_call_insn_raw);
}
/* Like emit_call_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
rtx_insn *
-emit_call_insn_after (rtx pattern, rtx after)
+emit_call_insn_after (rtx pattern, rtx_insn *after)
{
return emit_pattern_after (pattern, after, true, make_call_insn_raw);
}
/* Like emit_debug_insn_after_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_debug_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_debug_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
{
return emit_pattern_after_setloc (pattern, after, loc, make_debug_insn_raw);
}
/* Like emit_debug_insn_after_noloc, but set INSN_LOCATION according to AFTER. */
rtx_insn *
-emit_debug_insn_after (rtx pattern, rtx after)
+emit_debug_insn_after (rtx pattern, rtx_insn *after)
{
return emit_pattern_after (pattern, after, false, make_debug_insn_raw);
}
CALL_INSN, etc. */
static rtx_insn *
-emit_pattern_before_setloc (rtx pattern, rtx uncast_before, int loc, bool insnp,
- rtx_insn *(*make_raw) (rtx))
+emit_pattern_before_setloc (rtx pattern, rtx_insn *before, location_t loc,
+ bool insnp, rtx_insn *(*make_raw) (rtx))
{
- rtx_insn *before = as_a <rtx_insn *> (uncast_before);
rtx_insn *first = PREV_INSN (before);
rtx_insn *last = emit_pattern_before_noloc (pattern, before,
- insnp ? before : NULL_RTX,
+ insnp ? before : NULL,
NULL, make_raw);
if (pattern == NULL_RTX || !loc)
INSN as opposed to a JUMP_INSN, CALL_INSN, etc. */
static rtx_insn *
-emit_pattern_before (rtx pattern, rtx uncast_before, bool skip_debug_insns,
+emit_pattern_before (rtx pattern, rtx_insn *before, bool skip_debug_insns,
bool insnp, rtx_insn *(*make_raw) (rtx))
{
- rtx_insn *before = safe_as_a <rtx_insn *> (uncast_before);
rtx_insn *next = before;
if (skip_debug_insns)
insnp, make_raw);
else
return emit_pattern_before_noloc (pattern, before,
- insnp ? before : NULL_RTX,
+ insnp ? before : NULL,
NULL, make_raw);
}
/* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_insn_before_setloc (rtx pattern, rtx_insn *before, int loc)
+emit_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
{
return emit_pattern_before_setloc (pattern, before, loc, true,
make_insn_raw);
/* Like emit_insn_before_noloc, but set INSN_LOCATION according to BEFORE. */
rtx_insn *
-emit_insn_before (rtx pattern, rtx before)
+emit_insn_before (rtx pattern, rtx_insn *before)
{
return emit_pattern_before (pattern, before, true, true, make_insn_raw);
}
/* like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
rtx_jump_insn *
-emit_jump_insn_before_setloc (rtx pattern, rtx_insn *before, int loc)
+emit_jump_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
{
return as_a <rtx_jump_insn *> (
emit_pattern_before_setloc (pattern, before, loc, false,
/* Like emit_jump_insn_before_noloc, but set INSN_LOCATION according to BEFORE. */
rtx_jump_insn *
-emit_jump_insn_before (rtx pattern, rtx before)
+emit_jump_insn_before (rtx pattern, rtx_insn *before)
{
return as_a <rtx_jump_insn *> (
emit_pattern_before (pattern, before, true, false,
/* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_call_insn_before_setloc (rtx pattern, rtx_insn *before, int loc)
+emit_call_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
{
return emit_pattern_before_setloc (pattern, before, loc, false,
make_call_insn_raw);
/* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */
rtx_insn *
-emit_debug_insn_before_setloc (rtx pattern, rtx before, int loc)
+emit_debug_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
{
return emit_pattern_before_setloc (pattern, before, loc, false,
make_debug_insn_raw);
{
case REG_EQUAL:
case REG_EQUIV:
- if (!set_for_reg_notes (insn))
+ /* We need to support the REG_EQUAL on USE trick of find_reloads. */
+ if (!set_for_reg_notes (insn) && GET_CODE (PATTERN (insn)) != USE)
return NULL_RTX;
/* Don't add ASM_OPERAND REG_EQUAL/REG_EQUIV notes.
case SIMPLE_RETURN:
return orig;
case CLOBBER:
+ case CLOBBER_HIGH:
/* Share clobbers of hard registers (like cc0), but do not share pseudo reg
clobbers or clobbers of hard registers that originated as pseudos.
This is needed to allow safe register renaming. */
- if (REG_P (XEXP (orig, 0)) && REGNO (XEXP (orig, 0)) < FIRST_PSEUDO_REGISTER
- && ORIGINAL_REGNO (XEXP (orig, 0)) == REGNO (XEXP (orig, 0)))
+ if (REG_P (XEXP (orig, 0))
+ && HARD_REGISTER_NUM_P (REGNO (XEXP (orig, 0)))
+ && HARD_REGISTER_NUM_P (ORIGINAL_REGNO (XEXP (orig, 0))))
return orig;
break;
us to explicitly document why we are *not* copying a flag. */
copy = shallow_copy_rtx (orig);
- /* We do not copy the USED flag, which is used as a mark bit during
- walks over the RTL. */
- RTX_FLAG (copy, used) = 0;
-
/* We do not copy JUMP, CALL, or FRAME_RELATED for INSNs. */
if (INSN_P (orig))
{
case 't':
case 'w':
case 'i':
+ case 'p':
case 's':
case 'S':
case 'u':
crtl->emit.regno_pointer_align
= XCNEWVEC (unsigned char, crtl->emit.regno_pointer_align_length);
- regno_reg_rtx = ggc_vec_alloc<rtx> (crtl->emit.regno_pointer_align_length);
+ regno_reg_rtx
+ = ggc_cleared_vec_alloc<rtx> (crtl->emit.regno_pointer_align_length);
/* Put copies of all the hard registers into regno_reg_rtx. */
memcpy (regno_reg_rtx,
REGNO_POINTER_ALIGN (VIRTUAL_STACK_VARS_REGNUM) = STACK_BOUNDARY;
REGNO_POINTER_ALIGN (VIRTUAL_STACK_DYNAMIC_REGNUM) = STACK_BOUNDARY;
REGNO_POINTER_ALIGN (VIRTUAL_OUTGOING_ARGS_REGNUM) = STACK_BOUNDARY;
+
REGNO_POINTER_ALIGN (VIRTUAL_CFA_REGNUM) = BITS_PER_WORD;
#endif
#endif
}
-/* Generate a vector constant for mode MODE and constant value CONSTANT. */
+/* Return the value of element I of CONST_VECTOR X as a wide_int. */
-static rtx
-gen_const_vector (machine_mode mode, int constant)
+wide_int
+const_vector_int_elt (const_rtx x, unsigned int i)
{
- rtx tem;
- rtvec v;
- int units, i;
- machine_mode inner;
+ /* First handle elements that are directly encoded. */
+ machine_mode elt_mode = GET_MODE_INNER (GET_MODE (x));
+ if (i < (unsigned int) XVECLEN (x, 0))
+ return rtx_mode_t (CONST_VECTOR_ENCODED_ELT (x, i), elt_mode);
- units = GET_MODE_NUNITS (mode);
- inner = GET_MODE_INNER (mode);
+ /* Identify the pattern that contains element I and work out the index of
+ the last encoded element for that pattern. */
+ unsigned int encoded_nelts = const_vector_encoded_nelts (x);
+ unsigned int npatterns = CONST_VECTOR_NPATTERNS (x);
+ unsigned int count = i / npatterns;
+ unsigned int pattern = i % npatterns;
+ unsigned int final_i = encoded_nelts - npatterns + pattern;
- gcc_assert (!DECIMAL_FLOAT_MODE_P (inner));
+ /* If there are no steps, the final encoded value is the right one. */
+ if (!CONST_VECTOR_STEPPED_P (x))
+ return rtx_mode_t (CONST_VECTOR_ENCODED_ELT (x, final_i), elt_mode);
+
+ /* Otherwise work out the value from the last two encoded elements. */
+ rtx v1 = CONST_VECTOR_ENCODED_ELT (x, final_i - npatterns);
+ rtx v2 = CONST_VECTOR_ENCODED_ELT (x, final_i);
+ wide_int diff = wi::sub (rtx_mode_t (v2, elt_mode),
+ rtx_mode_t (v1, elt_mode));
+ return wi::add (rtx_mode_t (v2, elt_mode), (count - 2) * diff);
+}
+
+/* Return the value of element I of CONST_VECTOR X. */
+
+rtx
+const_vector_elt (const_rtx x, unsigned int i)
+{
+ /* First handle elements that are directly encoded. */
+ if (i < (unsigned int) XVECLEN (x, 0))
+ return CONST_VECTOR_ENCODED_ELT (x, i);
- v = rtvec_alloc (units);
+ /* If there are no steps, the final encoded value is the right one. */
+ if (!CONST_VECTOR_STEPPED_P (x))
+ {
+ /* Identify the pattern that contains element I and work out the index of
+ the last encoded element for that pattern. */
+ unsigned int encoded_nelts = const_vector_encoded_nelts (x);
+ unsigned int npatterns = CONST_VECTOR_NPATTERNS (x);
+ unsigned int pattern = i % npatterns;
+ unsigned int final_i = encoded_nelts - npatterns + pattern;
+ return CONST_VECTOR_ENCODED_ELT (x, final_i);
+ }
- /* We need to call this function after we set the scalar const_tiny_rtx
- entries. */
- gcc_assert (const_tiny_rtx[constant][(int) inner]);
+ /* Otherwise work out the value from the last two encoded elements. */
+ return immed_wide_int_const (const_vector_int_elt (x, i),
+ GET_MODE_INNER (GET_MODE (x)));
+}
- for (i = 0; i < units; ++i)
- RTVEC_ELT (v, i) = const_tiny_rtx[constant][(int) inner];
+/* Return true if X is a valid element for a CONST_VECTOR of the given
+ mode. */
- tem = gen_rtx_raw_CONST_VECTOR (mode, v);
- return tem;
+bool
+valid_for_const_vector_p (machine_mode, rtx x)
+{
+ return (CONST_SCALAR_INT_P (x)
+ || CONST_DOUBLE_AS_FLOAT_P (x)
+ || CONST_FIXED_P (x));
+}
+
+/* Generate a vector constant of mode MODE in which every element has
+ value ELT. */
+
+rtx
+gen_const_vec_duplicate (machine_mode mode, rtx elt)
+{
+ rtx_vector_builder builder (mode, 1, 1);
+ builder.quick_push (elt);
+ return builder.build ();
+}
+
+/* Return a vector rtx of mode MODE in which every element has value X.
+ The result will be a constant if X is constant. */
+
+rtx
+gen_vec_duplicate (machine_mode mode, rtx x)
+{
+ if (valid_for_const_vector_p (mode, x))
+ return gen_const_vec_duplicate (mode, x);
+ return gen_rtx_VEC_DUPLICATE (mode, x);
+}
+
+/* A subroutine of const_vec_series_p that handles the case in which:
+
+ (GET_CODE (X) == CONST_VECTOR
+ && CONST_VECTOR_NPATTERNS (X) == 1
+ && !CONST_VECTOR_DUPLICATE_P (X))
+
+ is known to hold. */
+
+bool
+const_vec_series_p_1 (const_rtx x, rtx *base_out, rtx *step_out)
+{
+ /* Stepped sequences are only defined for integers, to avoid specifying
+ rounding behavior. */
+ if (GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_INT)
+ return false;
+
+ /* A non-duplicated vector with two elements can always be seen as a
+ series with a nonzero step. Longer vectors must have a stepped
+ encoding. */
+ if (maybe_ne (CONST_VECTOR_NUNITS (x), 2)
+ && !CONST_VECTOR_STEPPED_P (x))
+ return false;
+
+ /* Calculate the step between the first and second elements. */
+ scalar_mode inner = GET_MODE_INNER (GET_MODE (x));
+ rtx base = CONST_VECTOR_ELT (x, 0);
+ rtx step = simplify_binary_operation (MINUS, inner,
+ CONST_VECTOR_ENCODED_ELT (x, 1), base);
+ if (rtx_equal_p (step, CONST0_RTX (inner)))
+ return false;
+
+ /* If we have a stepped encoding, check that the step between the
+ second and third elements is the same as STEP. */
+ if (CONST_VECTOR_STEPPED_P (x))
+ {
+ rtx diff = simplify_binary_operation (MINUS, inner,
+ CONST_VECTOR_ENCODED_ELT (x, 2),
+ CONST_VECTOR_ENCODED_ELT (x, 1));
+ if (!rtx_equal_p (step, diff))
+ return false;
+ }
+
+ *base_out = base;
+ *step_out = step;
+ return true;
+}
+
+/* Generate a vector constant of mode MODE in which element I has
+ the value BASE + I * STEP. */
+
+rtx
+gen_const_vec_series (machine_mode mode, rtx base, rtx step)
+{
+ gcc_assert (valid_for_const_vector_p (mode, base)
+ && valid_for_const_vector_p (mode, step));
+
+ rtx_vector_builder builder (mode, 1, 3);
+ builder.quick_push (base);
+ for (int i = 1; i < 3; ++i)
+ builder.quick_push (simplify_gen_binary (PLUS, GET_MODE_INNER (mode),
+ builder[i - 1], step));
+ return builder.build ();
+}
+
+/* Generate a vector of mode MODE in which element I has the value
+ BASE + I * STEP. The result will be a constant if BASE and STEP
+ are both constants. */
+
+rtx
+gen_vec_series (machine_mode mode, rtx base, rtx step)
+{
+ if (step == const0_rtx)
+ return gen_vec_duplicate (mode, base);
+ if (valid_for_const_vector_p (mode, base)
+ && valid_for_const_vector_p (mode, step))
+ return gen_const_vec_series (mode, base, step);
+ return gen_rtx_VEC_SERIES (mode, base, step);
+}
+
+/* Generate a new vector constant for mode MODE and constant value
+ CONSTANT. */
+
+static rtx
+gen_const_vector (machine_mode mode, int constant)
+{
+ machine_mode inner = GET_MODE_INNER (mode);
+
+ gcc_assert (!DECIMAL_FLOAT_MODE_P (inner));
+
+ rtx el = const_tiny_rtx[constant][(int) inner];
+ gcc_assert (el);
+
+ return gen_const_vec_duplicate (mode, el);
}
/* Generate a vector like gen_rtx_raw_CONST_VEC, but use the zero vector when
rtx
gen_rtx_CONST_VECTOR (machine_mode mode, rtvec v)
{
- machine_mode inner = GET_MODE_INNER (mode);
- int nunits = GET_MODE_NUNITS (mode);
- rtx x;
- int i;
-
- /* Check to see if all of the elements have the same value. */
- x = RTVEC_ELT (v, nunits - 1);
- for (i = nunits - 2; i >= 0; i--)
- if (RTVEC_ELT (v, i) != x)
- break;
+ gcc_assert (known_eq (GET_MODE_NUNITS (mode), GET_NUM_ELEM (v)));
/* If the values are all the same, check to see if we can use one of the
standard constant vectors. */
- if (i == -1)
- {
- if (x == CONST0_RTX (inner))
- return CONST0_RTX (mode);
- else if (x == CONST1_RTX (inner))
- return CONST1_RTX (mode);
- else if (x == CONSTM1_RTX (inner))
- return CONSTM1_RTX (mode);
- }
+ if (rtvec_all_equal_p (v))
+ return gen_const_vec_duplicate (mode, RTVEC_ELT (v, 0));
- return gen_rtx_raw_CONST_VECTOR (mode, v);
+ unsigned int nunits = GET_NUM_ELEM (v);
+ rtx_vector_builder builder (mode, nunits, 1);
+ for (unsigned int i = 0; i < nunits; ++i)
+ builder.quick_push (RTVEC_ELT (v, i));
+ return builder.build (v);
}
/* Initialise global register information required by all functions. */
attrs = ggc_cleared_alloc<mem_attrs> ();
attrs->align = BITS_PER_UNIT;
attrs->addrspace = ADDR_SPACE_GENERIC;
- if (mode != BLKmode)
+ if (mode != BLKmode && mode != VOIDmode)
{
attrs->size_known_p = true;
attrs->size = GET_MODE_SIZE (mode);
}
mode_mem_attrs[i] = attrs;
}
+
+ split_branch_probability = profile_probability::uninitialized ();
}
/* Initialize global machine_mode variables. */
void
init_derived_machine_modes (void)
{
- byte_mode = VOIDmode;
- word_mode = VOIDmode;
-
- for (machine_mode mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ opt_scalar_int_mode mode_iter, opt_byte_mode, opt_word_mode;
+ FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
{
+ scalar_int_mode mode = mode_iter.require ();
+
if (GET_MODE_BITSIZE (mode) == BITS_PER_UNIT
- && byte_mode == VOIDmode)
- byte_mode = mode;
+ && !opt_byte_mode.exists ())
+ opt_byte_mode = mode;
if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD
- && word_mode == VOIDmode)
- word_mode = mode;
+ && !opt_word_mode.exists ())
+ opt_word_mode = mode;
}
- ptr_mode = mode_for_size (POINTER_SIZE, GET_MODE_CLASS (Pmode), 0);
+ byte_mode = opt_byte_mode.require ();
+ word_mode = opt_word_mode.require ();
+ ptr_mode = as_a <scalar_int_mode>
+ (mode_for_size (POINTER_SIZE, GET_MODE_CLASS (Pmode), 0).require ());
}
/* Create some permanent unique rtl objects shared between all functions. */
{
int i;
machine_mode mode;
- machine_mode double_mode;
+ scalar_float_mode double_mode;
+ opt_scalar_mode smode_iter;
/* Initialize the CONST_INT, CONST_WIDE_INT, CONST_DOUBLE,
CONST_FIXED, and memory attribute hash tables. */
#endif
const_double_htab = hash_table<const_double_hasher>::create_ggc (37);
+ if (NUM_POLY_INT_COEFFS > 1)
+ const_poly_int_htab = hash_table<const_poly_int_hasher>::create_ggc (37);
+
const_fixed_htab = hash_table<const_fixed_hasher>::create_ggc (37);
reg_attrs_htab = hash_table<reg_attr_hasher>::create_ggc (37);
/* Create the unique rtx's for certain rtx codes and operand values. */
+ /* Process stack-limiting command-line options. */
+ if (opt_fstack_limit_symbol_arg != NULL)
+ stack_limit_rtx
+ = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (opt_fstack_limit_symbol_arg));
+ if (opt_fstack_limit_register_no >= 0)
+ stack_limit_rtx = gen_rtx_REG (Pmode, opt_fstack_limit_register_no);
+
/* Don't use gen_rtx_CONST_INT here since gen_rtx_CONST_INT in this case
tries to use these variables. */
for (i = - MAX_SAVED_CONST_INT; i <= MAX_SAVED_CONST_INT; i++)
else
const_true_rtx = gen_rtx_CONST_INT (VOIDmode, STORE_FLAG_VALUE);
- double_mode = mode_for_size (DOUBLE_TYPE_SIZE, MODE_FLOAT, 0);
+ double_mode = float_mode_for_size (DOUBLE_TYPE_SIZE).require ();
real_from_integer (&dconst0, double_mode, 0, SIGNED);
real_from_integer (&dconst1, double_mode, 1, SIGNED);
const REAL_VALUE_TYPE *const r =
(i == 0 ? &dconst0 : i == 1 ? &dconst1 : &dconst2);
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT)
const_tiny_rtx[i][(int) mode] =
- CONST_DOUBLE_FROM_REAL_VALUE (*r, mode);
+ const_double_from_real_value (*r, mode);
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_DECIMAL_FLOAT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_DECIMAL_FLOAT)
const_tiny_rtx[i][(int) mode] =
- CONST_DOUBLE_FROM_REAL_VALUE (*r, mode);
+ const_double_from_real_value (*r, mode);
const_tiny_rtx[i][(int) VOIDmode] = GEN_INT (i);
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
const_tiny_rtx[i][(int) mode] = GEN_INT (i);
for (mode = MIN_MODE_PARTIAL_INT;
const_tiny_rtx[3][(int) VOIDmode] = constm1_rtx;
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
const_tiny_rtx[3][(int) mode] = constm1_rtx;
+ /* For BImode, 1 and -1 are unsigned and signed interpretations
+ of the same value. */
+ const_tiny_rtx[0][(int) BImode] = const0_rtx;
+ const_tiny_rtx[1][(int) BImode] = const_true_rtx;
+ const_tiny_rtx[3][(int) BImode] = const_true_rtx;
+
for (mode = MIN_MODE_PARTIAL_INT;
mode <= MAX_MODE_PARTIAL_INT;
mode = (machine_mode)((int)(mode) + 1))
const_tiny_rtx[3][(int) mode] = constm1_rtx;
-
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_COMPLEX_INT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_COMPLEX_INT)
{
rtx inner = const_tiny_rtx[0][(int)GET_MODE_INNER (mode)];
const_tiny_rtx[0][(int) mode] = gen_rtx_CONCAT (mode, inner, inner);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_COMPLEX_FLOAT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_COMPLEX_FLOAT)
{
rtx inner = const_tiny_rtx[0][(int)GET_MODE_INNER (mode)];
const_tiny_rtx[0][(int) mode] = gen_rtx_CONCAT (mode, inner, inner);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_INT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ /* As for BImode, "all 1" and "all -1" are unsigned and signed
+ interpretations of the same value. */
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_BOOL)
+ {
+ const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
+ const_tiny_rtx[3][(int) mode] = gen_const_vector (mode, 3);
+ const_tiny_rtx[1][(int) mode] = const_tiny_rtx[3][(int) mode];
+ }
+
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)
{
const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, 1);
const_tiny_rtx[3][(int) mode] = gen_const_vector (mode, 3);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_FLOAT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FLOAT)
{
const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, 1);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_FRACT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_FRACT)
{
- FCONST0 (mode).data.high = 0;
- FCONST0 (mode).data.low = 0;
- FCONST0 (mode).mode = mode;
- const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST0 (mode), mode);
+ scalar_mode smode = smode_iter.require ();
+ FCONST0 (smode).data.high = 0;
+ FCONST0 (smode).data.low = 0;
+ FCONST0 (smode).mode = smode;
+ const_tiny_rtx[0][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_UFRACT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_UFRACT)
{
- FCONST0 (mode).data.high = 0;
- FCONST0 (mode).data.low = 0;
- FCONST0 (mode).mode = mode;
- const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST0 (mode), mode);
+ scalar_mode smode = smode_iter.require ();
+ FCONST0 (smode).data.high = 0;
+ FCONST0 (smode).data.low = 0;
+ FCONST0 (smode).mode = smode;
+ const_tiny_rtx[0][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_ACCUM);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_ACCUM)
{
- FCONST0 (mode).data.high = 0;
- FCONST0 (mode).data.low = 0;
- FCONST0 (mode).mode = mode;
- const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST0 (mode), mode);
+ scalar_mode smode = smode_iter.require ();
+ FCONST0 (smode).data.high = 0;
+ FCONST0 (smode).data.low = 0;
+ FCONST0 (smode).mode = smode;
+ const_tiny_rtx[0][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
/* We store the value 1. */
- FCONST1 (mode).data.high = 0;
- FCONST1 (mode).data.low = 0;
- FCONST1 (mode).mode = mode;
- FCONST1 (mode).data
- = double_int_one.lshift (GET_MODE_FBIT (mode),
+ FCONST1 (smode).data.high = 0;
+ FCONST1 (smode).data.low = 0;
+ FCONST1 (smode).mode = smode;
+ FCONST1 (smode).data
+ = double_int_one.lshift (GET_MODE_FBIT (smode),
HOST_BITS_PER_DOUBLE_INT,
- SIGNED_FIXED_POINT_MODE_P (mode));
- const_tiny_rtx[1][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST1 (mode), mode);
+ SIGNED_FIXED_POINT_MODE_P (smode));
+ const_tiny_rtx[1][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST1 (smode), smode);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_UACCUM);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_UACCUM)
{
- FCONST0 (mode).data.high = 0;
- FCONST0 (mode).data.low = 0;
- FCONST0 (mode).mode = mode;
- const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST0 (mode), mode);
+ scalar_mode smode = smode_iter.require ();
+ FCONST0 (smode).data.high = 0;
+ FCONST0 (smode).data.low = 0;
+ FCONST0 (smode).mode = smode;
+ const_tiny_rtx[0][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
/* We store the value 1. */
- FCONST1 (mode).data.high = 0;
- FCONST1 (mode).data.low = 0;
- FCONST1 (mode).mode = mode;
- FCONST1 (mode).data
- = double_int_one.lshift (GET_MODE_FBIT (mode),
+ FCONST1 (smode).data.high = 0;
+ FCONST1 (smode).data.low = 0;
+ FCONST1 (smode).mode = smode;
+ FCONST1 (smode).data
+ = double_int_one.lshift (GET_MODE_FBIT (smode),
HOST_BITS_PER_DOUBLE_INT,
- SIGNED_FIXED_POINT_MODE_P (mode));
- const_tiny_rtx[1][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST1 (mode), mode);
+ SIGNED_FIXED_POINT_MODE_P (smode));
+ const_tiny_rtx[1][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST1 (smode), smode);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_FRACT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FRACT)
{
const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_UFRACT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_UFRACT)
{
const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_ACCUM);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_ACCUM)
{
const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, 1);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_UACCUM);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_UACCUM)
{
const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, 1);
if (GET_MODE_CLASS ((machine_mode) i) == MODE_CC)
const_tiny_rtx[0][i] = const0_rtx;
- const_tiny_rtx[0][(int) BImode] = const0_rtx;
- if (STORE_FLAG_VALUE == 1)
- const_tiny_rtx[1][(int) BImode] = const1_rtx;
-
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_POINTER_BOUNDS);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
- {
- wide_int wi_zero = wi::zero (GET_MODE_PRECISION (mode));
- const_tiny_rtx[0][mode] = immed_wide_int_const (wi_zero, mode);
- }
-
pc_rtx = gen_rtx_fmt_ (PC, VOIDmode);
ret_rtx = gen_rtx_fmt_ (RETURN, VOIDmode);
simple_return_rtx = gen_rtx_fmt_ (SIMPLE_RETURN, VOIDmode);
which may be duplicated by the basic block reordering code. */
RTX_FRAME_RELATED_P (new_rtx) = RTX_FRAME_RELATED_P (insn);
+ /* Locate the end of existing REG_NOTES in NEW_RTX. */
+ rtx *ptail = ®_NOTES (new_rtx);
+ while (*ptail != NULL_RTX)
+ ptail = &XEXP (*ptail, 1);
+
/* Copy all REG_NOTES except REG_LABEL_OPERAND since mark_jump_label
will make them. REG_LABEL_TARGETs are created there too, but are
supposed to be sticky, so we copy them. */
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND)
{
- if (GET_CODE (link) == EXPR_LIST)
- add_reg_note (new_rtx, REG_NOTE_KIND (link),
- copy_insn_1 (XEXP (link, 0)));
- else
- add_shallow_copy_of_reg_note (new_rtx, link);
+ *ptail = duplicate_reg_note (link);
+ ptail = &XEXP (*ptail, 1);
}
INSN_CODE (new_rtx) = INSN_CODE (insn);
gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (mode, regno)));
}
+static GTY((deletable)) rtx
+hard_reg_clobbers_high[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
+
+/* Return a CLOBBER_HIGH expression for register REGNO that clobbers MODE,
+ caching into HARD_REG_CLOBBERS_HIGH. */
+rtx
+gen_hard_reg_clobber_high (machine_mode mode, unsigned int regno)
+{
+ if (hard_reg_clobbers_high[mode][regno])
+ return hard_reg_clobbers_high[mode][regno];
+ else
+ return (hard_reg_clobbers_high[mode][regno]
+ = gen_rtx_CLOBBER_HIGH (VOIDmode, gen_rtx_REG (mode, regno)));
+}
+
location_t prologue_location;
location_t epilogue_location;
gcc_unreachable ();
}
}
+
+/* Return a constant shift amount for shifting a value of mode MODE
+ by VALUE bits. */
+
+rtx
+gen_int_shift_amount (machine_mode, poly_int64 value)
+{
+ /* Use a 64-bit mode, to avoid any truncation.
+
+ ??? Perhaps this should be automatically derived from the .md files
+ instead, or perhaps have a target hook. */
+ scalar_int_mode shift_mode = (BITS_PER_UNIT == 8
+ ? DImode
+ : int_mode_for_size (64, 0).require ());
+ return gen_int_mode (value, shift_mode);
+}
+
+/* Initialize fields of rtl_data related to stack alignment. */
+
+void
+rtl_data::init_stack_alignment ()
+{
+ stack_alignment_needed = STACK_BOUNDARY;
+ max_used_stack_slot_alignment = STACK_BOUNDARY;
+ stack_alignment_estimated = 0;
+ preferred_stack_boundary = STACK_BOUNDARY;
+}
+
\f
#include "gt-emit-rtl.h"