/* Emit RTL for the GCC expander.
- Copyright (C) 1987, 1988, 1992-2012 Free Software Foundation, Inc.
+ Copyright (C) 1987-2014 Free Software Foundation, Inc.
This file is part of GCC.
#include "diagnostic-core.h"
#include "rtl.h"
#include "tree.h"
+#include "varasm.h"
+#include "basic-block.h"
+#include "tree-eh.h"
#include "tm_p.h"
#include "flags.h"
#include "function.h"
+#include "stringpool.h"
#include "expr.h"
-#include "vecprim.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "hashtab.h"
#include "insn-config.h"
#include "recog.h"
#include "bitmap.h"
-#include "basic-block.h"
-#include "ggc.h"
#include "debug.h"
#include "langhooks.h"
#include "df.h"
static GTY ((if_marked ("ggc_marked_p"), param_is (struct rtx_def)))
htab_t const_int_htab;
-/* A hash table storing memory attribute structures. */
-static GTY ((if_marked ("ggc_marked_p"), param_is (struct mem_attrs)))
- htab_t mem_attrs_htab;
-
/* A hash table storing register attribute structures. */
static GTY ((if_marked ("ggc_marked_p"), param_is (struct reg_attrs)))
htab_t reg_attrs_htab;
static hashval_t const_fixed_htab_hash (const void *);
static int const_fixed_htab_eq (const void *, const void *);
static rtx lookup_const_fixed (rtx);
-static hashval_t mem_attrs_htab_hash (const void *);
-static int mem_attrs_htab_eq (const void *, const void *);
static hashval_t reg_attrs_htab_hash (const void *);
static int reg_attrs_htab_eq (const void *, const void *);
static reg_attrs *get_reg_attrs (tree, int);
return fixed_identical (CONST_FIXED_VALUE (a), CONST_FIXED_VALUE (b));
}
-/* Returns a hash code for X (which is a really a mem_attrs *). */
-
-static hashval_t
-mem_attrs_htab_hash (const void *x)
-{
- const mem_attrs *const p = (const mem_attrs *) x;
-
- return (p->alias ^ (p->align * 1000)
- ^ (p->addrspace * 4000)
- ^ ((p->offset_known_p ? p->offset : 0) * 50000)
- ^ ((p->size_known_p ? p->size : 0) * 2500000)
- ^ (size_t) iterative_hash_expr (p->expr, 0));
-}
-
/* Return true if the given memory attributes are equal. */
static bool
&& operand_equal_p (p->expr, q->expr, 0))));
}
-/* Returns nonzero if the value represented by X (which is really a
- mem_attrs *) is the same as that given by Y (which is also really a
- mem_attrs *). */
-
-static int
-mem_attrs_htab_eq (const void *x, const void *y)
-{
- return mem_attrs_eq_p ((const mem_attrs *) x, (const mem_attrs *) y);
-}
-
/* Set MEM's memory attributes so that they are the same as ATTRS. */
static void
set_mem_attrs (rtx mem, mem_attrs *attrs)
{
- void **slot;
-
/* If everything is the default, we can just clear the attributes. */
if (mem_attrs_eq_p (attrs, mode_mem_attrs[(int) GET_MODE (mem)]))
{
return;
}
- slot = htab_find_slot (mem_attrs_htab, attrs, INSERT);
- if (*slot == 0)
+ if (!MEM_ATTRS (mem)
+ || !mem_attrs_eq_p (attrs, MEM_ATTRS (mem)))
{
- *slot = ggc_alloc_mem_attrs ();
- memcpy (*slot, attrs, sizeof (mem_attrs));
+ MEM_ATTRS (mem) = ggc_alloc_mem_attrs ();
+ memcpy (MEM_ATTRS (mem), attrs, sizeof (mem_attrs));
}
-
- MEM_ATTRS (mem) = (mem_attrs *) *slot;
}
/* Returns a hash code for X (which is a really a reg_attrs *). */
#if !HAVE_blockage
-/* Generate an empty ASM_INPUT, which is used to block attempts to schedule
- across this insn. */
+/* Generate an empty ASM_INPUT, which is used to block attempts to schedule,
+ and to block register equivalences to be seen across this insn. */
rtx
gen_blockage (void)
Also don't do this when we are making new REGs in reload, since
we don't want to get confused with the real pointers. */
- if (mode == Pmode && !reload_in_progress)
+ if (mode == Pmode && !reload_in_progress && !lra_in_progress)
{
if (regno == FRAME_POINTER_REGNUM
&& (!reload_completed || frame_pointer_needed))
(subreg:SI (reg:DF) 0) isn't. */
else if (FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode))
{
- if (isize != osize)
+ if (! (isize == osize
+ /* LRA can use subreg to store a floating point value in
+ an integer mode. Although the floating point and the
+ integer modes need the same number of hard registers,
+ the size of floating point mode can be less than the
+ integer mode. LRA also uses subregs for a register
+ should be used in different mode in on insn. */
+ || lra_in_progress))
return false;
}
of a subword. A subreg does *not* perform arbitrary bit extraction.
Given that we've already checked mode/offset alignment, we only have
to check subword subregs here. */
- if (osize < UNITS_PER_WORD)
+ if (osize < UNITS_PER_WORD
+ && ! (lra_in_progress && (FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode))))
{
enum machine_mode wmode = isize > UNITS_PER_WORD ? word_mode : imode;
unsigned int low_off = subreg_lowpart_offset (omode, wmode);
return gen_rtx_CONCAT (mode, realpart, imagpart);
}
+ /* Do not call gen_reg_rtx with uninitialized crtl. */
+ gcc_assert (crtl->emit.regno_pointer_align_length);
+
/* Make sure regno_pointer_align, and regno_reg_rtx are large
enough to have an element for this pseudo reg number. */
return val;
}
+/* Return TRUE if REG is a PARM_DECL, FALSE otherwise. */
+
+bool
+reg_is_parm_p (rtx reg)
+{
+ tree decl;
+
+ gcc_assert (REG_P (reg));
+ decl = REG_EXPR (reg);
+ return (decl && TREE_CODE (decl) == PARM_DECL);
+}
+
/* Update NEW with the same attributes as REG, but with OFFSET added
to the REG_OFFSET. */
}
else if (GET_CODE (x) == SUBREG || REG_P (x)
|| GET_CODE (x) == CONCAT || GET_CODE (x) == CONST_VECTOR
- || CONST_DOUBLE_P (x) || CONST_INT_P (x))
+ || CONST_DOUBLE_AS_FLOAT_P (x) || CONST_SCALAR_INT_P (x))
return simplify_gen_subreg (mode, x, innermode, offset);
/* Otherwise, we can't do this. */
tree bit_offset = DECL_FIELD_BIT_OFFSET (field);
if (!byte_offset
- || !host_integerp (byte_offset, 1)
- || !host_integerp (bit_offset, 1))
+ || !tree_fits_uhwi_p (byte_offset)
+ || !tree_fits_uhwi_p (bit_offset))
return -1;
- offset += tree_low_cst (byte_offset, 1);
- offset += tree_low_cst (bit_offset, 1) / BITS_PER_UNIT;
+ offset += tree_to_uhwi (byte_offset);
+ offset += tree_to_uhwi (bit_offset) / BITS_PER_UNIT;
if (inner == NULL_TREE)
{
if (objectp || TREE_CODE (t) == INDIRECT_REF || TYPE_ALIGN_OK (type))
attrs.align = MAX (attrs.align, TYPE_ALIGN (type));
- else if (TREE_CODE (t) == MEM_REF)
- {
- tree op0 = TREE_OPERAND (t, 0);
- if (TREE_CODE (op0) == ADDR_EXPR
- && (DECL_P (TREE_OPERAND (op0, 0))
- || CONSTANT_CLASS_P (TREE_OPERAND (op0, 0))))
- {
- if (DECL_P (TREE_OPERAND (op0, 0)))
- attrs.align = DECL_ALIGN (TREE_OPERAND (op0, 0));
- else if (CONSTANT_CLASS_P (TREE_OPERAND (op0, 0)))
- {
- attrs.align = TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (op0, 0)));
-#ifdef CONSTANT_ALIGNMENT
- attrs.align = CONSTANT_ALIGNMENT (TREE_OPERAND (op0, 0),
- attrs.align);
-#endif
- }
- if (TREE_INT_CST_LOW (TREE_OPERAND (t, 1)) != 0)
- {
- unsigned HOST_WIDE_INT ioff
- = TREE_INT_CST_LOW (TREE_OPERAND (t, 1));
- unsigned HOST_WIDE_INT aoff = (ioff & -ioff) * BITS_PER_UNIT;
- attrs.align = MIN (aoff, attrs.align);
- }
- }
- else
- /* ??? This isn't fully correct, we can't set the alignment from the
- type in all cases. */
- attrs.align = MAX (attrs.align, TYPE_ALIGN (type));
- }
-
- else if (TREE_CODE (t) == TARGET_MEM_REF)
- /* ??? This isn't fully correct, we can't set the alignment from the
- type in all cases. */
- attrs.align = MAX (attrs.align, TYPE_ALIGN (type));
-
/* If the size is known, we can set that. */
- if (TYPE_SIZE_UNIT (type) && host_integerp (TYPE_SIZE_UNIT (type), 1))
- {
- attrs.size_known_p = true;
- attrs.size = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
- }
+ tree new_size = TYPE_SIZE_UNIT (type);
+
+ /* The address-space is that of the type. */
+ as = TYPE_ADDR_SPACE (type);
/* If T is not a type, we may be able to deduce some more information about
the expression. */
if (! TYPE_P (t))
{
tree base;
- bool align_computed = false;
if (TREE_THIS_VOLATILE (t))
MEM_VOLATILE_P (ref) = 1;
&& TREE_STATIC (base))
MEM_READONLY_P (ref) = 1;
+ /* Address-space information is on the base object. */
if (TREE_CODE (base) == MEM_REF
|| TREE_CODE (base) == TARGET_MEM_REF)
as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (base,
else
as = TYPE_ADDR_SPACE (TREE_TYPE (base));
}
- else
- as = TYPE_ADDR_SPACE (type);
/* If this expression uses it's parent's alias set, mark it such
that we won't change it. */
- if (component_uses_parent_alias_set (t))
+ if (component_uses_parent_alias_set_from (t) != NULL_TREE)
MEM_KEEP_ALIAS_SET_P (ref) = 1;
/* If this is a decl, set the attributes of the MEM from it. */
attrs.offset_known_p = true;
attrs.offset = 0;
apply_bitpos = bitpos;
- if (DECL_SIZE_UNIT (t) && host_integerp (DECL_SIZE_UNIT (t), 1))
- {
- attrs.size_known_p = true;
- attrs.size = tree_low_cst (DECL_SIZE_UNIT (t), 1);
- }
- else
- attrs.size_known_p = false;
- attrs.align = DECL_ALIGN (t);
- align_computed = true;
+ new_size = DECL_SIZE_UNIT (t);
}
- /* If this is a constant, we know the alignment. */
+ /* ??? If we end up with a constant here do record a MEM_EXPR. */
else if (CONSTANT_CLASS_P (t))
- {
- attrs.align = TYPE_ALIGN (type);
-#ifdef CONSTANT_ALIGNMENT
- attrs.align = CONSTANT_ALIGNMENT (t, attrs.align);
-#endif
- align_computed = true;
- }
+ ;
- /* If this is a field reference and not a bit-field, record it. */
- /* ??? There is some information that can be gleaned from bit-fields,
- such as the word offset in the structure that might be modified.
- But skip it for now. */
- else if (TREE_CODE (t) == COMPONENT_REF
- && ! DECL_BIT_FIELD (TREE_OPERAND (t, 1)))
+ /* If this is a field reference, record it. */
+ else if (TREE_CODE (t) == COMPONENT_REF)
{
attrs.expr = t;
attrs.offset_known_p = true;
attrs.offset = 0;
apply_bitpos = bitpos;
- /* ??? Any reason the field size would be different than
- the size we got from the type? */
+ if (DECL_BIT_FIELD (TREE_OPERAND (t, 1)))
+ new_size = DECL_SIZE_UNIT (TREE_OPERAND (t, 1));
}
/* If this is an array reference, look for an outer field reference. */
}
while (TREE_CODE (t2) == ARRAY_REF);
- if (DECL_P (t2))
+ if (DECL_P (t2)
+ || TREE_CODE (t2) == COMPONENT_REF)
{
attrs.expr = t2;
attrs.offset_known_p = false;
- if (host_integerp (off_tree, 1))
+ if (tree_fits_uhwi_p (off_tree))
{
- HOST_WIDE_INT ioff = tree_low_cst (off_tree, 1);
- HOST_WIDE_INT aoff = (ioff & -ioff) * BITS_PER_UNIT;
- attrs.align = DECL_ALIGN (t2);
- if (aoff && (unsigned HOST_WIDE_INT) aoff < attrs.align)
- attrs.align = aoff;
- align_computed = true;
attrs.offset_known_p = true;
- attrs.offset = ioff;
+ attrs.offset = tree_to_uhwi (off_tree);
apply_bitpos = bitpos;
}
}
- else if (TREE_CODE (t2) == COMPONENT_REF)
- {
- attrs.expr = t2;
- attrs.offset_known_p = false;
- if (host_integerp (off_tree, 1))
- {
- attrs.offset_known_p = true;
- attrs.offset = tree_low_cst (off_tree, 1);
- apply_bitpos = bitpos;
- }
- /* ??? Any reason the field size would be different than
- the size we got from the type? */
- }
+ /* Else do not record a MEM_EXPR. */
}
/* If this is an indirect reference, record it. */
apply_bitpos = bitpos;
}
- if (!align_computed)
- {
- unsigned int obj_align = get_object_alignment (t);
- attrs.align = MAX (attrs.align, obj_align);
- }
+ /* Compute the alignment. */
+ unsigned int obj_align;
+ unsigned HOST_WIDE_INT obj_bitpos;
+ get_object_alignment_1 (t, &obj_align, &obj_bitpos);
+ obj_bitpos = (obj_bitpos - bitpos) & (obj_align - 1);
+ if (obj_bitpos != 0)
+ obj_align = (obj_bitpos & -obj_bitpos);
+ attrs.align = MAX (attrs.align, obj_align);
+ }
+
+ if (tree_fits_uhwi_p (new_size))
+ {
+ attrs.size_known_p = true;
+ attrs.size = tree_to_uhwi (new_size);
}
- else
- as = TYPE_ADDR_SPACE (type);
/* If we modified OFFSET based on T, then subtract the outstanding
bit position offset. Similarly, increase the size of the accessed
&& (!validate || memory_address_addr_space_p (mode, addr, as)))
return memref;
- if (validate)
+ /* Don't validate address for LRA. LRA can make the address valid
+ by itself in most efficient way. */
+ if (validate && !lra_in_progress)
{
if (reload_in_progress || reload_completed)
gcc_assert (memory_address_addr_space_p (mode, addr, as));
If ADJUST_OBJECT is zero, the underlying object associated with the
memory reference is left unchanged and the caller is responsible for
dealing with it. Otherwise, if the new memory reference is outside
- the underlying object, even partially, then the object is dropped. */
+ the underlying object, even partially, then the object is dropped.
+ SIZE, if nonzero, is the size of an access in cases where MODE
+ has no inherent size. */
rtx
adjust_address_1 (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset,
- int validate, int adjust_address, int adjust_object)
+ int validate, int adjust_address, int adjust_object,
+ HOST_WIDE_INT size)
{
rtx addr = XEXP (memref, 0);
rtx new_rtx;
enum machine_mode address_mode;
int pbits;
- struct mem_attrs attrs, *defattrs;
+ struct mem_attrs attrs = *get_mem_attrs (memref), *defattrs;
unsigned HOST_WIDE_INT max_align;
+#ifdef POINTERS_EXTEND_UNSIGNED
+ enum machine_mode pointer_mode
+ = targetm.addr_space.pointer_mode (attrs.addrspace);
+#endif
- attrs = *get_mem_attrs (memref);
+ /* VOIDmode means no mode change for change_address_1. */
+ if (mode == VOIDmode)
+ mode = GET_MODE (memref);
+
+ /* Take the size of non-BLKmode accesses from the mode. */
+ defattrs = mode_mem_attrs[(int) mode];
+ if (defattrs->size_known_p)
+ size = defattrs->size;
/* If there are no changes, just return the original memory reference. */
if (mode == GET_MODE (memref) && !offset
+ && (size == 0 || (attrs.size_known_p && attrs.size == size))
&& (!validate || memory_address_addr_space_p (mode, addr,
attrs.addrspace)))
return memref;
addr = gen_rtx_LO_SUM (address_mode, XEXP (addr, 0),
plus_constant (address_mode,
XEXP (addr, 1), offset));
+#ifdef POINTERS_EXTEND_UNSIGNED
+ /* If MEMREF is a ZERO_EXTEND from pointer_mode and the offset is valid
+ in that mode, we merge it into the ZERO_EXTEND. We take advantage of
+ the fact that pointers are not allowed to overflow. */
+ else if (POINTERS_EXTEND_UNSIGNED > 0
+ && GET_CODE (addr) == ZERO_EXTEND
+ && GET_MODE (XEXP (addr, 0)) == pointer_mode
+ && trunc_int_for_mode (offset, pointer_mode) == offset)
+ addr = gen_rtx_ZERO_EXTEND (address_mode,
+ plus_constant (pointer_mode,
+ XEXP (addr, 0), offset));
+#endif
else
addr = plus_constant (address_mode, addr, offset);
}
attrs.align = MIN (attrs.align, max_align);
}
- /* We can compute the size in a number of ways. */
- defattrs = mode_mem_attrs[(int) GET_MODE (new_rtx)];
- if (defattrs->size_known_p)
+ if (size)
{
/* Drop the object if the new right end is not within its bounds. */
- if (adjust_object && (offset + defattrs->size) > attrs.size)
+ if (adjust_object && (offset + size) > attrs.size)
{
attrs.expr = NULL_TREE;
attrs.alias = 0;
}
attrs.size_known_p = true;
- attrs.size = defattrs->size;
+ attrs.size = size;
}
else if (attrs.size_known_p)
{
+ gcc_assert (!adjust_object);
attrs.size -= offset;
- /* ??? The store_by_pieces machinery generates negative sizes. */
- gcc_assert (!(adjust_object && attrs.size < 0));
+ /* ??? The store_by_pieces machinery generates negative sizes,
+ so don't assert for that here. */
}
set_mem_attrs (new_rtx, &attrs);
HOST_WIDE_INT offset, int validate)
{
memref = change_address_1 (memref, VOIDmode, addr, validate);
- return adjust_address_1 (memref, mode, offset, validate, 0, 0);
+ return adjust_address_1 (memref, mode, offset, validate, 0, 0, 0);
}
/* Return a memory reference like MEMREF, but whose address is changed by
rtx
widen_memory_access (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset)
{
- rtx new_rtx = adjust_address_1 (memref, mode, offset, 1, 1, 0);
+ rtx new_rtx = adjust_address_1 (memref, mode, offset, 1, 1, 0, 0);
struct mem_attrs attrs;
unsigned int size = GET_MODE_SIZE (mode);
&& attrs.offset >= 0)
break;
- if (! host_integerp (offset, 1))
+ if (! tree_fits_uhwi_p (offset))
{
attrs.expr = NULL_TREE;
break;
}
attrs.expr = TREE_OPERAND (attrs.expr, 0);
- attrs.offset += tree_low_cst (offset, 1);
- attrs.offset += (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
+ attrs.offset += tree_to_uhwi (offset);
+ attrs.offset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
/ BITS_PER_UNIT);
}
/* Similarly for the decl. */
case RETURN:
case SIMPLE_RETURN:
case SCRATCH:
- return;
/* SCRATCH must be shared because they represent distinct values. */
+ return;
case CLOBBER:
- if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER)
+ /* Share clobbers of hard registers (like cc0), but do not share pseudo reg
+ clobbers or clobbers of hard registers that originated as pseudos.
+ This is needed to allow safe register renaming. */
+ if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
+ && ORIGINAL_REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 0)))
return;
break;
return;
}
-/* Go through all the RTL insn bodies and check that there is no unexpected
- sharing in between the subexpressions. */
+/* Reset used-flags for INSN. */
-DEBUG_FUNCTION void
-verify_rtl_sharing (void)
+static void
+reset_insn_used_flags (rtx insn)
{
- rtx p;
+ gcc_assert (INSN_P (insn));
+ reset_used_flags (PATTERN (insn));
+ reset_used_flags (REG_NOTES (insn));
+ if (CALL_P (insn))
+ reset_used_flags (CALL_INSN_FUNCTION_USAGE (insn));
+}
- timevar_push (TV_VERIFY_RTL_SHARING);
+/* Go through all the RTL insn bodies and clear all the USED bits. */
+
+static void
+reset_all_used_flags (void)
+{
+ rtx p;
for (p = get_insns (); p; p = NEXT_INSN (p))
if (INSN_P (p))
{
- reset_used_flags (PATTERN (p));
- reset_used_flags (REG_NOTES (p));
- if (CALL_P (p))
- reset_used_flags (CALL_INSN_FUNCTION_USAGE (p));
- if (GET_CODE (PATTERN (p)) == SEQUENCE)
+ rtx pat = PATTERN (p);
+ if (GET_CODE (pat) != SEQUENCE)
+ reset_insn_used_flags (p);
+ else
{
- int i;
- rtx q, sequence = PATTERN (p);
-
- for (i = 0; i < XVECLEN (sequence, 0); i++)
- {
- q = XVECEXP (sequence, 0, i);
- gcc_assert (INSN_P (q));
- reset_used_flags (PATTERN (q));
- reset_used_flags (REG_NOTES (q));
- if (CALL_P (q))
- reset_used_flags (CALL_INSN_FUNCTION_USAGE (q));
- }
+ gcc_assert (REG_NOTES (p) == NULL);
+ for (int i = 0; i < XVECLEN (pat, 0); i++)
+ reset_insn_used_flags (XVECEXP (pat, 0, i));
}
}
+}
+
+/* Verify sharing in INSN. */
+
+static void
+verify_insn_sharing (rtx insn)
+{
+ gcc_assert (INSN_P (insn));
+ reset_used_flags (PATTERN (insn));
+ reset_used_flags (REG_NOTES (insn));
+ if (CALL_P (insn))
+ reset_used_flags (CALL_INSN_FUNCTION_USAGE (insn));
+}
+
+/* Go through all the RTL insn bodies and check that there is no unexpected
+ sharing in between the subexpressions. */
+
+DEBUG_FUNCTION void
+verify_rtl_sharing (void)
+{
+ rtx p;
+
+ timevar_push (TV_VERIFY_RTL_SHARING);
+
+ reset_all_used_flags ();
for (p = get_insns (); p; p = NEXT_INSN (p))
if (INSN_P (p))
{
- verify_rtx_sharing (PATTERN (p), p);
- verify_rtx_sharing (REG_NOTES (p), p);
- if (CALL_P (p))
- verify_rtx_sharing (CALL_INSN_FUNCTION_USAGE (p), p);
+ rtx pat = PATTERN (p);
+ if (GET_CODE (pat) != SEQUENCE)
+ verify_insn_sharing (p);
+ else
+ for (int i = 0; i < XVECLEN (pat, 0); i++)
+ verify_insn_sharing (XVECEXP (pat, 0, i));
}
+ reset_all_used_flags ();
+
timevar_pop (TV_VERIFY_RTL_SHARING);
}
/* SCRATCH must be shared because they represent distinct values. */
return;
case CLOBBER:
- if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER)
+ /* Share clobbers of hard registers (like cc0), but do not share pseudo reg
+ clobbers or clobbers of hard registers that originated as pseudos.
+ This is needed to allow safe register renaming. */
+ if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
+ && ORIGINAL_REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 0)))
return;
break;
active_insn_p (const_rtx insn)
{
return (CALL_P (insn) || JUMP_P (insn)
+ || JUMP_TABLE_DATA_P (insn) /* FIXME */
|| (NONJUMP_INSN_P (insn)
&& (! reload_completed
|| (GET_CODE (PATTERN (insn)) != USE
return insn;
}
-
-/* Return the next CODE_LABEL after the insn INSN, or 0 if there is none. */
-
-rtx
-next_label (rtx insn)
-{
- while (insn)
- {
- insn = NEXT_INSN (insn);
- if (insn == 0 || LABEL_P (insn))
- break;
- }
-
- return insn;
-}
-
-/* Return the last label to mark the same position as LABEL. Return LABEL
- itself if it is null or any return rtx. */
-
-rtx
-skip_consecutive_labels (rtx label)
-{
- rtx insn;
-
- if (label && ANY_RETURN_P (label))
- return label;
-
- for (insn = label; insn != 0 && !INSN_P (insn); insn = NEXT_INSN (insn))
- if (LABEL_P (insn))
- label = insn;
-
- return label;
-}
\f
#ifdef HAVE_cc0
-/* INSN uses CC0 and is being moved into a delay slot. Set up REG_CC_SETTER
- and REG_CC_USER notes so we can find it. */
-
-void
-link_cc0_insns (rtx insn)
-{
- rtx user = next_nonnote_insn (insn);
-
- if (NONJUMP_INSN_P (user) && GET_CODE (PATTERN (user)) == SEQUENCE)
- user = XVECEXP (PATTERN (user), 0, 0);
-
- add_reg_note (user, REG_CC_SETTER, insn);
- add_reg_note (insn, REG_CC_USER, user);
-}
-
/* Return the next insn that uses CC0 after INSN, which is assumed to
set it. This is the inverse of prev_cc0_setter (i.e., prev_cc0_setter
applied to the result of this function should yield INSN).
if (any_condjump_p (trial)
&& (note = find_reg_note (trial, REG_BR_PROB, 0)))
- split_branch_probability = INTVAL (XEXP (note, 0));
+ split_branch_probability = XINT (note, 0);
probability = split_branch_probability;
seq = split_insns (pat, trial);
is responsible for this step using
split_branch_probability variable. */
gcc_assert (njumps == 1);
- add_reg_note (insn, REG_BR_PROB, GEN_INT (probability));
+ add_int_reg_note (insn, REG_BR_PROB, probability);
}
}
}
break;
case REG_NON_LOCAL_GOTO:
+ case REG_CROSSING_JUMP:
for (insn = insn_last; insn != NULL_RTX; insn = PREV_INSN (insn))
{
if (JUMP_P (insn))
return insn;
}
+
+/* Like `make_insn_raw' but make a NOTE instead of an insn. */
+
+static rtx
+make_note_raw (enum insn_note subtype)
+{
+ /* Some notes are never created this way at all. These notes are
+ only created by patching out insns. */
+ gcc_assert (subtype != NOTE_INSN_DELETED_LABEL
+ && subtype != NOTE_INSN_DELETED_DEBUG_LABEL);
+
+ rtx note = rtx_alloc (NOTE);
+ INSN_UID (note) = cur_insn_uid++;
+ NOTE_KIND (note) = subtype;
+ BLOCK_FOR_INSN (note) = NULL;
+ memset (&NOTE_DATA (note), 0, sizeof (NOTE_DATA (note)));
+ return note;
+}
\f
+/* Add INSN to the end of the doubly-linked list, between PREV and NEXT.
+ INSN may be any object that can appear in the chain: INSN_P and NOTE_P objects,
+ but also BARRIERs and JUMP_TABLE_DATAs. PREV and NEXT may be NULL. */
+
+static inline void
+link_insn_into_chain (rtx insn, rtx prev, rtx next)
+{
+ PREV_INSN (insn) = prev;
+ NEXT_INSN (insn) = next;
+ if (prev != NULL)
+ {
+ NEXT_INSN (prev) = insn;
+ if (NONJUMP_INSN_P (prev) && GET_CODE (PATTERN (prev)) == SEQUENCE)
+ {
+ rtx sequence = PATTERN (prev);
+ NEXT_INSN (XVECEXP (sequence, 0, XVECLEN (sequence, 0) - 1)) = insn;
+ }
+ }
+ if (next != NULL)
+ {
+ PREV_INSN (next) = insn;
+ if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE)
+ PREV_INSN (XVECEXP (PATTERN (next), 0, 0)) = insn;
+ }
+
+ if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ rtx sequence = PATTERN (insn);
+ PREV_INSN (XVECEXP (sequence, 0, 0)) = prev;
+ NEXT_INSN (XVECEXP (sequence, 0, XVECLEN (sequence, 0) - 1)) = next;
+ }
+}
+
/* Add INSN to the end of the doubly-linked list.
INSN may be an INSN, JUMP_INSN, CALL_INSN, CODE_LABEL, BARRIER or NOTE. */
void
add_insn (rtx insn)
{
- PREV_INSN (insn) = get_last_insn();
- NEXT_INSN (insn) = 0;
-
- if (NULL != get_last_insn())
- NEXT_INSN (get_last_insn ()) = insn;
-
+ rtx prev = get_last_insn ();
+ link_insn_into_chain (insn, prev, NULL);
if (NULL == get_insns ())
set_first_insn (insn);
-
set_last_insn (insn);
}
-/* Add INSN into the doubly-linked list after insn AFTER. This and
- the next should be the only functions called to insert an insn once
- delay slots have been filled since only they know how to update a
- SEQUENCE. */
+/* Add INSN into the doubly-linked list after insn AFTER. */
-void
-add_insn_after (rtx insn, rtx after, basic_block bb)
+static void
+add_insn_after_nobb (rtx insn, rtx after)
{
rtx next = NEXT_INSN (after);
gcc_assert (!optimize || !INSN_DELETED_P (after));
- NEXT_INSN (insn) = next;
- PREV_INSN (insn) = after;
+ link_insn_into_chain (insn, after, next);
- if (next)
+ if (next == NULL)
{
- PREV_INSN (next) = insn;
- if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE)
- PREV_INSN (XVECEXP (PATTERN (next), 0, 0)) = insn;
+ if (get_last_insn () == after)
+ set_last_insn (insn);
+ else
+ {
+ struct sequence_stack *stack = seq_stack;
+ /* Scan all pending sequences too. */
+ for (; stack; stack = stack->next)
+ if (after == stack->last)
+ {
+ stack->last = insn;
+ break;
+ }
+ }
}
- else if (get_last_insn () == after)
- set_last_insn (insn);
- else
+}
+
+/* Add INSN into the doubly-linked list before insn BEFORE. */
+
+static void
+add_insn_before_nobb (rtx insn, rtx before)
+{
+ rtx prev = PREV_INSN (before);
+
+ gcc_assert (!optimize || !INSN_DELETED_P (before));
+
+ link_insn_into_chain (insn, prev, before);
+
+ if (prev == NULL)
{
- struct sequence_stack *stack = seq_stack;
- /* Scan all pending sequences too. */
- for (; stack; stack = stack->next)
- if (after == stack->last)
- {
- stack->last = insn;
- break;
- }
+ if (get_insns () == before)
+ set_first_insn (insn);
+ else
+ {
+ struct sequence_stack *stack = seq_stack;
+ /* Scan all pending sequences too. */
+ for (; stack; stack = stack->next)
+ if (before == stack->first)
+ {
+ stack->first = insn;
+ break;
+ }
- gcc_assert (stack);
+ gcc_assert (stack);
+ }
}
+}
+
+/* Like add_insn_after_nobb, but try to set BLOCK_FOR_INSN.
+ If BB is NULL, an attempt is made to infer the bb from before.
+ This and the next function should be the only functions called
+ to insert an insn once delay slots have been filled since only
+ they know how to update a SEQUENCE. */
+
+void
+add_insn_after (rtx insn, rtx after, basic_block bb)
+{
+ add_insn_after_nobb (insn, after);
if (!BARRIER_P (after)
&& !BARRIER_P (insn)
&& (bb = BLOCK_FOR_INSN (after)))
&& !NOTE_INSN_BASIC_BLOCK_P (insn))
BB_END (bb) = insn;
}
-
- NEXT_INSN (after) = insn;
- if (NONJUMP_INSN_P (after) && GET_CODE (PATTERN (after)) == SEQUENCE)
- {
- rtx sequence = PATTERN (after);
- NEXT_INSN (XVECEXP (sequence, 0, XVECLEN (sequence, 0) - 1)) = insn;
- }
}
-/* Add INSN into the doubly-linked list before insn BEFORE. This and
- the previous should be the only functions called to insert an insn
- once delay slots have been filled since only they know how to
- update a SEQUENCE. If BB is NULL, an attempt is made to infer the
- bb from before. */
+/* Like add_insn_before_nobb, but try to set BLOCK_FOR_INSN.
+ If BB is NULL, an attempt is made to infer the bb from before.
+
+ This and the previous function should be the only functions called
+ to insert an insn once delay slots have been filled since only
+ they know how to update a SEQUENCE. */
void
add_insn_before (rtx insn, rtx before, basic_block bb)
{
- rtx prev = PREV_INSN (before);
-
- gcc_assert (!optimize || !INSN_DELETED_P (before));
-
- PREV_INSN (insn) = prev;
- NEXT_INSN (insn) = before;
-
- if (prev)
- {
- NEXT_INSN (prev) = insn;
- if (NONJUMP_INSN_P (prev) && GET_CODE (PATTERN (prev)) == SEQUENCE)
- {
- rtx sequence = PATTERN (prev);
- NEXT_INSN (XVECEXP (sequence, 0, XVECLEN (sequence, 0) - 1)) = insn;
- }
- }
- else if (get_insns () == before)
- set_first_insn (insn);
- else
- {
- struct sequence_stack *stack = seq_stack;
- /* Scan all pending sequences too. */
- for (; stack; stack = stack->next)
- if (before == stack->first)
- {
- stack->first = insn;
- break;
- }
-
- gcc_assert (stack);
- }
+ add_insn_before_nobb (insn, before);
if (!bb
&& !BARRIER_P (before)
|| BARRIER_P (insn)
|| NOTE_INSN_BASIC_BLOCK_P (insn));
}
-
- PREV_INSN (before) = insn;
- if (NONJUMP_INSN_P (before) && GET_CODE (PATTERN (before)) == SEQUENCE)
- PREV_INSN (XVECEXP (PATTERN (before), 0, 0)) = insn;
}
-
/* Replace insn with an deleted instruction note. */
void
set_insn_deleted (rtx insn)
{
- df_insn_delete (BLOCK_FOR_INSN (insn), INSN_UID (insn));
+ if (INSN_P (insn))
+ df_insn_delete (insn);
PUT_CODE (insn, NOTE);
NOTE_KIND (insn) = NOTE_INSN_DELETED;
}
-/* Remove an insn from its doubly-linked list. This function knows how
- to handle sequences. */
+/* Unlink INSN from the insn chain.
+
+ This function knows how to handle sequences.
+
+ This function does not invalidate data flow information associated with
+ INSN (i.e. does not call df_insn_delete). That makes this function
+ usable for only disconnecting an insn from the chain, and re-emit it
+ elsewhere later.
+
+ To later insert INSN elsewhere in the insn chain via add_insn and
+ similar functions, PREV_INSN and NEXT_INSN must be nullified by
+ the caller. Nullifying them here breaks many insn chain walks.
+
+ To really delete an insn and related DF information, use delete_insn. */
+
void
remove_insn (rtx insn)
{
rtx prev = PREV_INSN (insn);
basic_block bb;
- /* Later in the code, the block will be marked dirty. */
- df_insn_delete (NULL, INSN_UID (insn));
-
if (prev)
{
NEXT_INSN (prev) = next;
gcc_assert (stack);
}
+
+ /* Fix up basic block boundaries, if necessary. */
if (!BARRIER_P (insn)
&& (bb = BLOCK_FOR_INSN (insn)))
{
- if (NONDEBUG_INSN_P (insn))
- df_set_bb_dirty (bb);
if (BB_HEAD (bb) == insn)
{
/* Never ever delete the basic block note without deleting whole
NEXT_INSN (to) = NEXT_INSN (after);
PREV_INSN (from) = after;
NEXT_INSN (after) = from;
- if (after == get_last_insn())
+ if (after == get_last_insn ())
set_last_insn (to);
}
add_insn_before (label, before, NULL);
return label;
}
-
-/* Emit a note of subtype SUBTYPE before the insn BEFORE. */
-
-rtx
-emit_note_before (enum insn_note subtype, rtx before)
-{
- rtx note = rtx_alloc (NOTE);
- INSN_UID (note) = cur_insn_uid++;
- NOTE_KIND (note) = subtype;
- BLOCK_FOR_INSN (note) = NULL;
- memset (&NOTE_DATA (note), 0, sizeof (NOTE_DATA (note)));
-
- add_insn_before (note, before, NULL);
- return note;
-}
\f
/* Helper for emit_insn_after, handles lists of instructions
efficiently. */
if (after_after)
PREV_INSN (after_after) = last;
- if (after == get_last_insn())
+ if (after == get_last_insn ())
set_last_insn (last);
return last;
add_insn_after (label, after, NULL);
return label;
}
+\f
+/* Notes require a bit of special handling: Some notes need to have their
+ BLOCK_FOR_INSN set, others should never have it set, and some should
+ have it set or clear depending on the context. */
+
+/* Return true iff a note of kind SUBTYPE should be emitted with routines
+ that never set BLOCK_FOR_INSN on NOTE. BB_BOUNDARY is true if the
+ caller is asked to emit a note before BB_HEAD, or after BB_END. */
+
+static bool
+note_outside_basic_block_p (enum insn_note subtype, bool on_bb_boundary_p)
+{
+ switch (subtype)
+ {
+ /* NOTE_INSN_SWITCH_TEXT_SECTIONS only appears between basic blocks. */
+ case NOTE_INSN_SWITCH_TEXT_SECTIONS:
+ return true;
+
+ /* Notes for var tracking and EH region markers can appear between or
+ inside basic blocks. If the caller is emitting on the basic block
+ boundary, do not set BLOCK_FOR_INSN on the new note. */
+ case NOTE_INSN_VAR_LOCATION:
+ case NOTE_INSN_CALL_ARG_LOCATION:
+ case NOTE_INSN_EH_REGION_BEG:
+ case NOTE_INSN_EH_REGION_END:
+ return on_bb_boundary_p;
+
+ /* Otherwise, BLOCK_FOR_INSN must be set. */
+ default:
+ return false;
+ }
+}
/* Emit a note of subtype SUBTYPE after the insn AFTER. */
rtx
emit_note_after (enum insn_note subtype, rtx after)
{
- rtx note = rtx_alloc (NOTE);
- INSN_UID (note) = cur_insn_uid++;
- NOTE_KIND (note) = subtype;
- BLOCK_FOR_INSN (note) = NULL;
- memset (&NOTE_DATA (note), 0, sizeof (NOTE_DATA (note)));
- add_insn_after (note, after, NULL);
+ rtx note = make_note_raw (subtype);
+ basic_block bb = BARRIER_P (after) ? NULL : BLOCK_FOR_INSN (after);
+ bool on_bb_boundary_p = (bb != NULL && BB_END (bb) == after);
+
+ if (note_outside_basic_block_p (subtype, on_bb_boundary_p))
+ add_insn_after_nobb (note, after);
+ else
+ add_insn_after (note, after, bb);
+ return note;
+}
+
+/* Emit a note of subtype SUBTYPE before the insn BEFORE. */
+
+rtx
+emit_note_before (enum insn_note subtype, rtx before)
+{
+ rtx note = make_note_raw (subtype);
+ basic_block bb = BARRIER_P (before) ? NULL : BLOCK_FOR_INSN (before);
+ bool on_bb_boundary_p = (bb != NULL && BB_HEAD (bb) == before);
+
+ if (note_outside_basic_block_p (subtype, on_bb_boundary_p))
+ add_insn_before_nobb (note, before);
+ else
+ add_insn_before (note, before, bb);
return note;
}
\f
rtx
emit_insn (rtx x)
{
- rtx last = get_last_insn();
+ rtx last = get_last_insn ();
rtx insn;
if (x == NULL_RTX)
break;
#ifdef ENABLE_RTL_CHECKING
+ case JUMP_TABLE_DATA:
case SEQUENCE:
gcc_unreachable ();
break;
rtx
emit_debug_insn (rtx x)
{
- rtx last = get_last_insn();
+ rtx last = get_last_insn ();
rtx insn;
if (x == NULL_RTX)
break;
#ifdef ENABLE_RTL_CHECKING
+ case JUMP_TABLE_DATA:
case SEQUENCE:
gcc_unreachable ();
break;
break;
#ifdef ENABLE_RTL_CHECKING
+ case JUMP_TABLE_DATA:
case SEQUENCE:
gcc_unreachable ();
break;
#ifdef ENABLE_RTL_CHECKING
case SEQUENCE:
+ case JUMP_TABLE_DATA:
gcc_unreachable ();
break;
#endif
return label;
}
+/* Make an insn of code JUMP_TABLE_DATA
+ and add it to the end of the doubly-linked list. */
+
+rtx
+emit_jump_table_data (rtx table)
+{
+ rtx jump_table_data = rtx_alloc (JUMP_TABLE_DATA);
+ INSN_UID (jump_table_data) = cur_insn_uid++;
+ PATTERN (jump_table_data) = table;
+ BLOCK_FOR_INSN (jump_table_data) = NULL;
+ add_insn (jump_table_data);
+ return jump_table_data;
+}
+
/* Make an insn of code BARRIER
and add it to the end of the doubly-linked list. */
rtx
emit_note_copy (rtx orig)
{
- rtx note;
-
- note = rtx_alloc (NOTE);
-
- INSN_UID (note) = cur_insn_uid++;
+ enum insn_note kind = (enum insn_note) NOTE_KIND (orig);
+ rtx note = make_note_raw (kind);
NOTE_DATA (note) = NOTE_DATA (orig);
- NOTE_KIND (note) = NOTE_KIND (orig);
- BLOCK_FOR_INSN (note) = NULL;
add_insn (note);
-
return note;
}
rtx
emit_note (enum insn_note kind)
{
- rtx note;
-
- note = rtx_alloc (NOTE);
- INSN_UID (note) = cur_insn_uid++;
- NOTE_KIND (note) = kind;
- memset (&NOTE_DATA (note), 0, sizeof (NOTE_DATA (note)));
- BLOCK_FOR_INSN (note) = NULL;
+ rtx note = make_note_raw (kind);
add_insn (note);
return note;
}
case SIMPLE_RETURN:
return orig;
case CLOBBER:
- if (REG_P (XEXP (orig, 0)) && REGNO (XEXP (orig, 0)) < FIRST_PSEUDO_REGISTER)
+ /* Share clobbers of hard registers (like cc0), but do not share pseudo reg
+ clobbers or clobbers of hard registers that originated as pseudos.
+ This is needed to allow safe register renaming. */
+ if (REG_P (XEXP (orig, 0)) && REGNO (XEXP (orig, 0)) < FIRST_PSEUDO_REGISTER
+ && ORIGINAL_REGNO (XEXP (orig, 0)) == REGNO (XEXP (orig, 0)))
return orig;
break;
const_fixed_htab = htab_create_ggc (37, const_fixed_htab_hash,
const_fixed_htab_eq, NULL);
- mem_attrs_htab = htab_create_ggc (37, mem_attrs_htab_hash,
- mem_attrs_htab_eq, NULL);
reg_attrs_htab = htab_create_ggc (37, reg_attrs_htab_hash,
reg_attrs_htab_eq, NULL);
mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
- FCONST0(mode).data.high = 0;
- FCONST0(mode).data.low = 0;
- FCONST0(mode).mode = mode;
+ FCONST0 (mode).data.high = 0;
+ FCONST0 (mode).data.low = 0;
+ FCONST0 (mode).mode = mode;
const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
FCONST0 (mode), mode);
}
mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
- FCONST0(mode).data.high = 0;
- FCONST0(mode).data.low = 0;
- FCONST0(mode).mode = mode;
+ FCONST0 (mode).data.high = 0;
+ FCONST0 (mode).data.low = 0;
+ FCONST0 (mode).mode = mode;
const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
FCONST0 (mode), mode);
}
mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
- FCONST0(mode).data.high = 0;
- FCONST0(mode).data.low = 0;
- FCONST0(mode).mode = mode;
+ FCONST0 (mode).data.high = 0;
+ FCONST0 (mode).data.low = 0;
+ FCONST0 (mode).mode = mode;
const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
FCONST0 (mode), mode);
/* We store the value 1. */
- FCONST1(mode).data.high = 0;
- FCONST1(mode).data.low = 0;
- FCONST1(mode).mode = mode;
- FCONST1(mode).data
+ FCONST1 (mode).data.high = 0;
+ FCONST1 (mode).data.low = 0;
+ FCONST1 (mode).mode = mode;
+ FCONST1 (mode).data
= double_int_one.lshift (GET_MODE_FBIT (mode),
HOST_BITS_PER_DOUBLE_INT,
SIGNED_FIXED_POINT_MODE_P (mode));
mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
- FCONST0(mode).data.high = 0;
- FCONST0(mode).data.low = 0;
- FCONST0(mode).mode = mode;
+ FCONST0 (mode).data.high = 0;
+ FCONST0 (mode).data.low = 0;
+ FCONST0 (mode).mode = mode;
const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
FCONST0 (mode), mode);
/* We store the value 1. */
- FCONST1(mode).data.high = 0;
- FCONST1(mode).data.low = 0;
- FCONST1(mode).mode = mode;
- FCONST1(mode).data
+ FCONST1 (mode).data.high = 0;
+ FCONST1 (mode).data.low = 0;
+ FCONST1 (mode).mode = mode;
+ FCONST1 (mode).data
= double_int_one.lshift (GET_MODE_FBIT (mode),
HOST_BITS_PER_DOUBLE_INT,
SIGNED_FIXED_POINT_MODE_P (mode));
add_reg_note (new_rtx, REG_NOTE_KIND (link),
copy_insn_1 (XEXP (link, 0)));
else
- add_reg_note (new_rtx, REG_NOTE_KIND (link), XEXP (link, 0));
+ add_shallow_copy_of_reg_note (new_rtx, link);
}
INSN_CODE (new_rtx) = INSN_CODE (insn);
/* Hold current location information and last location information, so the
datastructures are built lazily only when some instructions in given
place are needed. */
-static location_t curr_location, last_location;
+static location_t curr_location;
/* Allocate insn location datastructure. */
void
{
prologue_location = epilogue_location = 0;
curr_location = UNKNOWN_LOCATION;
- last_location = UNKNOWN_LOCATION;
}
/* At the end of emit stage, clear current location. */
bool
need_atomic_barrier_p (enum memmodel model, bool pre)
{
- switch (model)
+ switch (model & MEMMODEL_MASK)
{
case MEMMODEL_RELAXED:
case MEMMODEL_CONSUME: