/* Scanning of rtl for dataflow analysis.
- Copyright (C) 1999-2014 Free Software Foundation, Inc.
+ Copyright (C) 1999-2016 Free Software Foundation, Inc.
Originally contributed by Michael P. Hayes
(m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
+#include "target.h"
#include "rtl.h"
-#include "tm_p.h"
-#include "insn-config.h"
-#include "recog.h"
-#include "function.h"
-#include "regs.h"
-#include "alloc-pool.h"
-#include "flags.h"
-#include "hard-reg-set.h"
-#include "basic-block.h"
-#include "sbitmap.h"
-#include "bitmap.h"
-#include "dumpfile.h"
#include "tree.h"
-#include "target.h"
-#include "target-def.h"
#include "df.h"
+#include "tm_p.h"
+#include "regs.h"
#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
+#include "dumpfile.h"
-typedef struct df_mw_hardreg *df_mw_hardreg_ptr;
-
-
-#ifndef HAVE_epilogue
-#define HAVE_epilogue 0
-#endif
-#ifndef HAVE_prologue
-#define HAVE_prologue 0
-#endif
-#ifndef HAVE_sibcall_epilogue
-#define HAVE_sibcall_epilogue 0
-#endif
-
-#ifndef EPILOGUE_USES
-#define EPILOGUE_USES(REGNO) 0
-#endif
-
/* The set of hard registers in eliminables[i].from. */
static HARD_REG_SET elim_reg_set;
auto_vec<df_ref, 128> def_vec;
auto_vec<df_ref, 32> use_vec;
auto_vec<df_ref, 32> eq_use_vec;
- auto_vec<df_mw_hardreg_ptr, 32> mw_vec;
+ auto_vec<df_mw_hardreg *, 32> mw_vec;
};
static void df_ref_record (enum df_ref_class, struct df_collection_rec *,
static void df_ref_chain_delete (df_ref);
static void df_refs_add_to_chains (struct df_collection_rec *,
- basic_block, rtx, unsigned int);
+ basic_block, rtx_insn *, unsigned int);
-static bool df_insn_refs_verify (struct df_collection_rec *, basic_block, rtx, bool);
+static bool df_insn_refs_verify (struct df_collection_rec *, basic_block,
+ rtx_insn *, bool);
static void df_entry_block_defs_collect (struct df_collection_rec *, bitmap);
static void df_exit_block_uses_collect (struct df_collection_rec *, bitmap);
static void df_install_ref (df_ref, struct df_reg_info *,
/* Problem data for the scanning dataflow function. */
struct df_scan_problem_data
{
- alloc_pool ref_base_pool;
- alloc_pool ref_artificial_pool;
- alloc_pool ref_regular_pool;
- alloc_pool insn_pool;
- alloc_pool reg_pool;
- alloc_pool mw_reg_pool;
+ object_allocator<df_base_ref> *ref_base_pool;
+ object_allocator<df_artificial_ref> *ref_artificial_pool;
+ object_allocator<df_regular_ref> *ref_regular_pool;
+ object_allocator<df_insn_info> *insn_pool;
+ object_allocator<df_reg_info> *reg_pool;
+ object_allocator<df_mw_hardreg> *mw_reg_pool;
+
bitmap_obstack reg_bitmaps;
bitmap_obstack insn_bitmaps;
};
-typedef struct df_scan_bb_info *df_scan_bb_info_t;
-
-
/* Internal function to shut down the scanning problem. */
static void
df_scan_free_internal (void)
bitmap_clear (&df->insns_to_rescan);
bitmap_clear (&df->insns_to_notes_rescan);
- free_alloc_pool (problem_data->ref_base_pool);
- free_alloc_pool (problem_data->ref_artificial_pool);
- free_alloc_pool (problem_data->ref_regular_pool);
- free_alloc_pool (problem_data->insn_pool);
- free_alloc_pool (problem_data->reg_pool);
- free_alloc_pool (problem_data->mw_reg_pool);
+ delete problem_data->ref_base_pool;
+ delete problem_data->ref_artificial_pool;
+ delete problem_data->ref_regular_pool;
+ delete problem_data->insn_pool;
+ delete problem_data->reg_pool;
+ delete problem_data->mw_reg_pool;
bitmap_obstack_release (&problem_data->reg_bitmaps);
bitmap_obstack_release (&problem_data->insn_bitmaps);
free (df_scan->problem_data);
{
struct df_scan_bb_info *bb_info = (struct df_scan_bb_info *) vbb_info;
unsigned int bb_index = bb->index;
- rtx insn;
+ rtx_insn *insn;
FOR_BB_INSNS (bb, insn)
if (INSN_P (insn))
{
struct df_scan_problem_data *problem_data;
unsigned int insn_num = get_max_uid () + 1;
- unsigned int block_size = 512;
basic_block bb;
/* Given the number of pools, this is really faster than tearing
df_scan->problem_data = problem_data;
df_scan->computed = true;
- problem_data->ref_base_pool
- = create_alloc_pool ("df_scan ref base",
- sizeof (struct df_base_ref), block_size);
- problem_data->ref_artificial_pool
- = create_alloc_pool ("df_scan ref artificial",
- sizeof (struct df_artificial_ref), block_size);
- problem_data->ref_regular_pool
- = create_alloc_pool ("df_scan ref regular",
- sizeof (struct df_regular_ref), block_size);
- problem_data->insn_pool
- = create_alloc_pool ("df_scan insn",
- sizeof (struct df_insn_info), block_size);
- problem_data->reg_pool
- = create_alloc_pool ("df_scan reg",
- sizeof (struct df_reg_info), block_size);
- problem_data->mw_reg_pool
- = create_alloc_pool ("df_scan mw_reg",
- sizeof (struct df_mw_hardreg), block_size / 16);
+ problem_data->ref_base_pool = new object_allocator<df_base_ref>
+ ("df_scan ref base");
+ problem_data->ref_artificial_pool = new object_allocator<df_artificial_ref>
+ ("df_scan ref artificial");
+ problem_data->ref_regular_pool = new object_allocator<df_regular_ref>
+ ("df_scan ref regular");
+ problem_data->insn_pool = new object_allocator<df_insn_info>
+ ("df_scan insn");
+ problem_data->reg_pool = new object_allocator<df_reg_info>
+ ("df_scan reg");
+ problem_data->mw_reg_pool = new object_allocator<df_mw_hardreg>
+ ("df_scan mw_reg");
bitmap_obstack_initialize (&problem_data->reg_bitmaps);
bitmap_obstack_initialize (&problem_data->insn_bitmaps);
int icount = 0;
int ccount = 0;
basic_block bb;
- rtx insn;
+ rtx_insn *insn;
fprintf (file, ";; invalidated by call \t");
df_print_regset (file, regs_invalidated_by_call_regset);
fprintf (file, ";; regs ever live \t");
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (df_regs_ever_live_p (i))
- fprintf (file, " %d[%s]", i, reg_names[i]);
+ fprintf (file, " %d [%s]", i, reg_names[i]);
fprintf (file, "\n;; ref usage \t");
for (i = 0; i < (int)df->regs_inited; i++)
}
#if 0
{
- rtx insn;
+ rtx_insn *insn;
FOR_BB_INSNS (bb, insn)
if (INSN_P (insn))
df_insn_debug (insn, false, file);
{
struct df_reg_info *reg_info;
- reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+ // TODO
+ reg_info = problem_data->reg_pool->allocate ();
memset (reg_info, 0, sizeof (struct df_reg_info));
df->def_regs[i] = reg_info;
- reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+ reg_info = problem_data->reg_pool->allocate ();
memset (reg_info, 0, sizeof (struct df_reg_info));
df->use_regs[i] = reg_info;
- reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+ reg_info = problem_data->reg_pool->allocate ();
memset (reg_info, 0, sizeof (struct df_reg_info));
df->eq_use_regs[i] = reg_info;
df->def_info.begin[i] = 0;
depending on whether LOC is inside PATTERN (INSN) or a note. */
void
-df_uses_create (rtx *loc, rtx insn, int ref_flags)
+df_uses_create (rtx *loc, rtx_insn *insn, int ref_flags)
{
gcc_assert (!(ref_flags & ~DF_REF_IN_NOTE));
df_uses_record (NULL, loc, DF_REF_REG_USE,
df_ref *ref_ptr;
bool add_to_table;
- rtx insn = DF_REF_INSN (ref);
+ rtx_insn *insn = DF_REF_INSN (ref);
basic_block bb = BLOCK_FOR_INSN (insn);
if (DF_REF_REG_DEF_P (ref))
switch (DF_REF_CLASS (ref))
{
case DF_REF_BASE:
- pool_free (problem_data->ref_base_pool, ref);
+ problem_data->ref_base_pool->remove ((df_base_ref *) (ref));
break;
case DF_REF_ARTIFICIAL:
- pool_free (problem_data->ref_artificial_pool, ref);
+ problem_data->ref_artificial_pool->remove
+ ((df_artificial_ref *) (ref));
break;
case DF_REF_REGULAR:
- pool_free (problem_data->ref_regular_pool, ref);
+ problem_data->ref_regular_pool->remove
+ ((df_regular_ref *) (ref));
break;
}
}
df_free_ref (ref);
}
+/* Initialize INSN_INFO to describe INSN. */
+
+static void
+df_insn_info_init_fields (df_insn_info *insn_info, rtx_insn *insn)
+{
+ memset (insn_info, 0, sizeof (struct df_insn_info));
+ insn_info->insn = insn;
+}
/* Create the insn record for INSN. If there was one there, zero it
out. */
struct df_insn_info *
-df_insn_create_insn_record (rtx insn)
+df_insn_create_insn_record (rtx_insn *insn)
{
struct df_scan_problem_data *problem_data
= (struct df_scan_problem_data *) df_scan->problem_data;
insn_rec = DF_INSN_INFO_GET (insn);
if (!insn_rec)
{
- insn_rec = (struct df_insn_info *) pool_alloc (problem_data->insn_pool);
+ insn_rec = problem_data->insn_pool->allocate ();
DF_INSN_INFO_SET (insn, insn_rec);
}
- memset (insn_rec, 0, sizeof (struct df_insn_info));
- insn_rec->insn = insn;
+ df_insn_info_init_fields (insn_rec, insn);
return insn_rec;
}
for (; hardregs; hardregs = next)
{
next = DF_MWS_NEXT (hardregs);
- pool_free (problem_data->mw_reg_pool, hardregs);
+ problem_data->mw_reg_pool->remove (hardregs);
}
}
+/* Remove the contents of INSN_INFO (but don't free INSN_INFO itself). */
+
+static void
+df_insn_info_free_fields (df_insn_info *insn_info)
+{
+ /* In general, notes do not have the insn_info fields
+ initialized. However, combine deletes insns by changing them
+ to notes. How clever. So we cannot just check if it is a
+ valid insn before short circuiting this code, we need to see
+ if we actually initialized it. */
+ df_mw_hardreg_chain_delete (insn_info->mw_hardregs);
+
+ if (df_chain)
+ {
+ df_ref_chain_delete_du_chain (insn_info->defs);
+ df_ref_chain_delete_du_chain (insn_info->uses);
+ df_ref_chain_delete_du_chain (insn_info->eq_uses);
+ }
+
+ df_ref_chain_delete (insn_info->defs);
+ df_ref_chain_delete (insn_info->uses);
+ df_ref_chain_delete (insn_info->eq_uses);
+}
/* Delete all of the refs information from the insn with UID.
Internal helper for df_insn_delete, df_insn_rescan, and other
struct df_scan_problem_data *problem_data
= (struct df_scan_problem_data *) df_scan->problem_data;
- /* In general, notes do not have the insn_info fields
- initialized. However, combine deletes insns by changing them
- to notes. How clever. So we cannot just check if it is a
- valid insn before short circuiting this code, we need to see
- if we actually initialized it. */
- df_mw_hardreg_chain_delete (insn_info->mw_hardregs);
-
- if (df_chain)
- {
- df_ref_chain_delete_du_chain (insn_info->defs);
- df_ref_chain_delete_du_chain (insn_info->uses);
- df_ref_chain_delete_du_chain (insn_info->eq_uses);
- }
-
- df_ref_chain_delete (insn_info->defs);
- df_ref_chain_delete (insn_info->uses);
- df_ref_chain_delete (insn_info->eq_uses);
-
- pool_free (problem_data->insn_pool, insn_info);
+ df_insn_info_free_fields (insn_info);
+ problem_data->insn_pool->remove (insn_info);
DF_INSN_UID_SET (uid, NULL);
}
}
or marked for later in deferred mode. */
void
-df_insn_delete (rtx insn)
+df_insn_delete (rtx_insn *insn)
{
unsigned int uid;
basic_block bb;
FOR_EACH_VEC_ELT (collection_rec->eq_use_vec, ix, ref)
df_free_ref (ref);
FOR_EACH_VEC_ELT (collection_rec->mw_vec, ix, mw)
- pool_free (problem_data->mw_reg_pool, mw);
+ problem_data->mw_reg_pool->remove (mw);
collection_rec->def_vec.release ();
collection_rec->use_vec.release ();
/* Rescan INSN. Return TRUE if the rescanning produced any changes. */
bool
-df_insn_rescan (rtx insn)
+df_insn_rescan (rtx_insn *insn)
{
unsigned int uid = INSN_UID (insn);
struct df_insn_info *insn_info = NULL;
/* There's change - we need to delete the existing info.
Since the insn isn't moved, we can salvage its LUID. */
luid = DF_INSN_LUID (insn);
- df_insn_info_delete (uid);
- df_insn_create_insn_record (insn);
+ df_insn_info_free_fields (insn_info);
+ df_insn_info_init_fields (insn_info, insn);
DF_INSN_LUID (insn) = luid;
}
else
dirty. */
bool
-df_insn_rescan_debug_internal (rtx insn)
+df_insn_rescan_debug_internal (rtx_insn *insn)
{
unsigned int uid = INSN_UID (insn);
struct df_insn_info *insn_info;
FOR_EACH_BB_FN (bb, cfun)
{
- rtx insn;
+ rtx_insn *insn;
FOR_BB_INSNS (bb, insn)
{
df_insn_rescan (insn);
EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- rtx insn;
+ rtx_insn *insn;
df_ref def, use;
if (include_defs)
EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- rtx insn;
+ rtx_insn *insn;
df_ref def, use;
if (include_defs)
bool include_defs, bool include_uses,
bool include_eq_uses)
{
- rtx insn;
+ rtx_insn *insn;
if (include_defs)
offset = df_add_refs_to_table (offset, ref_info,
instructions from one block to another. */
void
-df_insn_change_bb (rtx insn, basic_block new_bb)
+df_insn_change_bb (rtx_insn *insn, basic_block new_bb)
{
basic_block old_bb = BLOCK_FOR_INSN (insn);
struct df_insn_info *insn_info;
static void
df_ref_change_reg_with_loc_1 (struct df_reg_info *old_df,
struct df_reg_info *new_df,
- int new_regno, rtx loc)
+ unsigned int new_regno, rtx loc)
{
df_ref the_ref = old_df->reg_chain;
}
-/* Change the regno of all refs that contained LOC from OLD_REGNO to
- NEW_REGNO. Refs that do not match LOC are not changed which means
- that artificial refs are not changed since they have no loc. This
- call is to support the SET_REGNO macro. */
+/* Change the regno of register LOC to NEW_REGNO and update the df
+ information accordingly. Refs that do not match LOC are not changed
+ which means that artificial refs are not changed since they have no loc.
+ This call is to support the SET_REGNO macro. */
void
-df_ref_change_reg_with_loc (int old_regno, int new_regno, rtx loc)
+df_ref_change_reg_with_loc (rtx loc, unsigned int new_regno)
{
- if ((!df) || (old_regno == -1) || (old_regno == new_regno))
+ unsigned int old_regno = REGNO (loc);
+ if (old_regno == new_regno)
return;
- df_grow_reg_info ();
-
- df_ref_change_reg_with_loc_1 (DF_REG_DEF_GET (old_regno),
- DF_REG_DEF_GET (new_regno), new_regno, loc);
- df_ref_change_reg_with_loc_1 (DF_REG_USE_GET (old_regno),
- DF_REG_USE_GET (new_regno), new_regno, loc);
- df_ref_change_reg_with_loc_1 (DF_REG_EQ_USE_GET (old_regno),
- DF_REG_EQ_USE_GET (new_regno), new_regno, loc);
+ if (df)
+ {
+ df_grow_reg_info ();
+
+ df_ref_change_reg_with_loc_1 (DF_REG_DEF_GET (old_regno),
+ DF_REG_DEF_GET (new_regno),
+ new_regno, loc);
+ df_ref_change_reg_with_loc_1 (DF_REG_USE_GET (old_regno),
+ DF_REG_USE_GET (new_regno),
+ new_regno, loc);
+ df_ref_change_reg_with_loc_1 (DF_REG_EQ_USE_GET (old_regno),
+ DF_REG_EQ_USE_GET (new_regno),
+ new_regno, loc);
+ }
+ set_mode_and_regno (loc, GET_MODE (loc), new_regno);
}
if (mw->flags & DF_REF_IN_NOTE)
{
*mw_ptr = DF_MWS_NEXT (mw);
- pool_free (problem_data->mw_reg_pool, mw);
+ problem_data->mw_reg_pool->remove (mw);
}
else
mw_ptr = &DF_MWS_NEXT (mw);
/* Rescan only the REG_EQUIV/REG_EQUAL notes part of INSN. */
void
-df_notes_rescan (rtx insn)
+df_notes_rescan (rtx_insn *insn)
{
struct df_insn_info *insn_info;
unsigned int uid = INSN_UID (insn);
return df_ref_compare (*(const df_ref *) r1, *(const df_ref *) r2);
}
-static void
-df_swap_refs (vec<df_ref, va_heap> *ref_vec, int i, int j)
-{
- df_ref tmp = (*ref_vec)[i];
- (*ref_vec)[i] = (*ref_vec)[j];
- (*ref_vec)[j] = tmp;
-}
-
/* Sort and compress a set of refs. */
static void
df_ref r0 = (*ref_vec)[0];
df_ref r1 = (*ref_vec)[1];
if (df_ref_compare (r0, r1) > 0)
- df_swap_refs (ref_vec, 0, 1);
+ std::swap ((*ref_vec)[0], (*ref_vec)[1]);
}
else
{
/* Sort and compress a set of refs. */
static void
-df_sort_and_compress_mws (vec<df_mw_hardreg_ptr, va_heap> *mw_vec)
+df_sort_and_compress_mws (vec<df_mw_hardreg *, va_heap> *mw_vec)
{
unsigned int count;
struct df_scan_problem_data *problem_data
while (i + dist + 1 < count
&& df_mw_equal_p ((*mw_vec)[i], (*mw_vec)[i + dist + 1]))
{
- pool_free (problem_data->mw_reg_pool,
- (*mw_vec)[i + dist + 1]);
+ problem_data->mw_reg_pool->remove ((*mw_vec)[i + dist + 1]);
dist++;
}
/* Copy it down to the next position. */
insn. */
static struct df_mw_hardreg *
-df_install_mws (const vec<df_mw_hardreg_ptr, va_heap> *old_vec)
+df_install_mws (const vec<df_mw_hardreg *, va_heap> *old_vec)
{
unsigned int count = old_vec->length ();
if (count)
static void
df_refs_add_to_chains (struct df_collection_rec *collection_rec,
- basic_block bb, rtx insn, unsigned int flags)
+ basic_block bb, rtx_insn *insn, unsigned int flags)
{
if (insn)
{
switch (cl)
{
case DF_REF_BASE:
- this_ref = (df_ref) pool_alloc (problem_data->ref_base_pool);
+ this_ref = (df_ref) (problem_data->ref_base_pool->allocate ());
gcc_checking_assert (loc == NULL);
break;
case DF_REF_ARTIFICIAL:
- this_ref = (df_ref) pool_alloc (problem_data->ref_artificial_pool);
+ this_ref = (df_ref) (problem_data->ref_artificial_pool->allocate ());
this_ref->artificial_ref.bb = bb;
gcc_checking_assert (loc == NULL);
break;
case DF_REF_REGULAR:
- this_ref = (df_ref) pool_alloc (problem_data->ref_regular_pool);
+ this_ref = (df_ref) (problem_data->ref_regular_pool->allocate ());
this_ref->regular_ref.loc = loc;
gcc_checking_assert (loc);
break;
endregno = regno + subreg_nregs (reg);
}
else
- endregno = END_HARD_REGNO (reg);
+ endregno = END_REGNO (reg);
/* If this is a multiword hardreg, we create some extra
datastructures that will enable us to easily build REG_DEAD
ref_flags |= DF_REF_PARTIAL;
ref_flags |= DF_REF_MW_HARDREG;
- hardreg = (struct df_mw_hardreg *) pool_alloc (problem_data->mw_reg_pool);
+ hardreg = problem_data->mw_reg_pool->allocate ();
hardreg->type = ref_type;
hardreg->flags = ref_flags;
hardreg->mw_reg = reg;
&& !TEST_HARD_REG_BIT (defs_generated, i)
&& (!is_sibling_call
|| !bitmap_bit_p (df->exit_block_uses, i)
- || refers_to_regno_p (i, i+1,
- crtl->return_rtx, NULL)))
+ || refers_to_regno_p (i, crtl->return_rtx)))
df_ref_record (DF_REF_BASE, collection_rec, regno_reg_rtx[i],
NULL, bb, insn_info, DF_REF_REG_DEF,
DF_REF_MAY_CLOBBER | flags);
regno_reg_rtx[FRAME_POINTER_REGNUM],
NULL, bb, insn_info,
DF_REF_REG_USE, 0);
-#if !HARD_FRAME_POINTER_IS_FRAME_POINTER
- df_ref_record (DF_REF_BASE, collection_rec,
- regno_reg_rtx[HARD_FRAME_POINTER_REGNUM],
- NULL, bb, insn_info,
- DF_REF_REG_USE, 0);
-#endif
+ if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
+ df_ref_record (DF_REF_BASE, collection_rec,
+ regno_reg_rtx[HARD_FRAME_POINTER_REGNUM],
+ NULL, bb, insn_info,
+ DF_REF_REG_USE, 0);
break;
default:
break;
void
df_recompute_luids (basic_block bb)
{
- rtx insn;
+ rtx_insn *insn;
int luid = 0;
df_grow_insn_info ();
return;
}
-#ifdef EH_RETURN_DATA_REGNO
if (bb_has_eh_pred (bb))
{
unsigned int i;
bb, NULL, DF_REF_REG_DEF, DF_REF_AT_TOP);
}
}
-#endif
/* Add the hard_frame_pointer if this block is the target of a
non-local goto. */
df_bb_refs_record (int bb_index, bool scan_insns)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- rtx insn;
+ rtx_insn *insn;
int luid = 0;
if (!df)
reference of the frame pointer. */
bitmap_set_bit (regular_block_artificial_uses, FRAME_POINTER_REGNUM);
-#if !HARD_FRAME_POINTER_IS_FRAME_POINTER
- bitmap_set_bit (regular_block_artificial_uses, HARD_FRAME_POINTER_REGNUM);
-#endif
+ if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
+ bitmap_set_bit (regular_block_artificial_uses,
+ HARD_FRAME_POINTER_REGNUM);
-#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
/* Pseudos with argument area equivalences may require
reloading via the argument pointer. */
- if (fixed_regs[ARG_POINTER_REGNUM])
+ if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && fixed_regs[ARG_POINTER_REGNUM])
bitmap_set_bit (regular_block_artificial_uses, ARG_POINTER_REGNUM);
-#endif
/* Any constant, or pseudo with constant equivalences, may
require reloading from memory using the pic register. */
if (frame_pointer_needed)
{
bitmap_set_bit (eh_block_artificial_uses, FRAME_POINTER_REGNUM);
-#if !HARD_FRAME_POINTER_IS_FRAME_POINTER
- bitmap_set_bit (eh_block_artificial_uses, HARD_FRAME_POINTER_REGNUM);
-#endif
+ if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
+ bitmap_set_bit (eh_block_artificial_uses,
+ HARD_FRAME_POINTER_REGNUM);
}
-#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
- if (fixed_regs[ARG_POINTER_REGNUM])
+ if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && fixed_regs[ARG_POINTER_REGNUM])
bitmap_set_bit (eh_block_artificial_uses, ARG_POINTER_REGNUM);
-#endif
}
}
static void
df_mark_reg (rtx reg, void *vset)
{
- bitmap set = (bitmap) vset;
- int regno = REGNO (reg);
-
- gcc_assert (GET_MODE (reg) != BLKmode);
-
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- int n = hard_regno_nregs[regno][GET_MODE (reg)];
- bitmap_set_range (set, regno, n);
- }
- else
- bitmap_set_bit (set, regno);
+ bitmap_set_range ((bitmap) vset, REGNO (reg), REG_NREGS (reg));
}
/* Once the prologue has been generated, all of these registers
should just show up in the first regular block. */
- if (HAVE_prologue && epilogue_completed)
+ if (targetm.have_prologue () && epilogue_completed)
{
/* Defs for the callee saved registers are inserted so that the
pushes have some defining location. */
/* Any reference to any pseudo before reload is a potential
reference of the frame pointer. */
bitmap_set_bit (entry_block_defs, FRAME_POINTER_REGNUM);
-#if !HARD_FRAME_POINTER_IS_FRAME_POINTER
+
/* If they are different, also mark the hard frame pointer as live. */
- if (!LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM))
+ if (!HARD_FRAME_POINTER_IS_FRAME_POINTER
+ && !LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM))
bitmap_set_bit (entry_block_defs, HARD_FRAME_POINTER_REGNUM);
-#endif
}
/* These registers are live everywhere. */
if (!reload_completed)
{
-#ifdef PIC_OFFSET_TABLE_REGNUM
- unsigned int picreg = PIC_OFFSET_TABLE_REGNUM;
-#endif
-
-#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
/* Pseudos with argument area equivalences may require
reloading via the argument pointer. */
- if (fixed_regs[ARG_POINTER_REGNUM])
+ if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && fixed_regs[ARG_POINTER_REGNUM])
bitmap_set_bit (entry_block_defs, ARG_POINTER_REGNUM);
-#endif
-#ifdef PIC_OFFSET_TABLE_REGNUM
/* Any constant, or pseudo with constant equivalences, may
require reloading from memory using the pic register. */
+ unsigned int picreg = PIC_OFFSET_TABLE_REGNUM;
if (picreg != INVALID_REGNUM
&& fixed_regs[picreg])
bitmap_set_bit (entry_block_defs, picreg);
-#endif
}
#ifdef INCOMING_RETURN_ADDR_RTX
if ((!reload_completed) || frame_pointer_needed)
{
bitmap_set_bit (exit_block_uses, FRAME_POINTER_REGNUM);
-#if !HARD_FRAME_POINTER_IS_FRAME_POINTER
+
/* If they are different, also mark the hard frame pointer as live. */
- if (!LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM))
+ if (!HARD_FRAME_POINTER_IS_FRAME_POINTER
+ && !LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM))
bitmap_set_bit (exit_block_uses, HARD_FRAME_POINTER_REGNUM);
-#endif
}
/* Many architectures have a GP register even without flag_pic.
if (global_regs[i] || EPILOGUE_USES (i))
bitmap_set_bit (exit_block_uses, i);
- if (HAVE_epilogue && epilogue_completed)
+ if (targetm.have_epilogue () && epilogue_completed)
{
/* Mark all call-saved registers that we actually used. */
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
bitmap_set_bit (exit_block_uses, i);
}
-#ifdef EH_RETURN_DATA_REGNO
/* Mark the registers that will contain data for the handler. */
if (reload_completed && crtl->calls_eh_return)
for (i = 0; ; ++i)
break;
bitmap_set_bit (exit_block_uses, regno);
}
-#endif
#ifdef EH_RETURN_STACKADJ_RTX
- if ((!HAVE_epilogue || ! epilogue_completed)
+ if ((!targetm.have_epilogue () || ! epilogue_completed)
&& crtl->calls_eh_return)
{
rtx tmp = EH_RETURN_STACKADJ_RTX;
}
#endif
-#ifdef EH_RETURN_HANDLER_RTX
- if ((!HAVE_epilogue || ! epilogue_completed)
+ if ((!targetm.have_epilogue () || ! epilogue_completed)
&& crtl->calls_eh_return)
{
rtx tmp = EH_RETURN_HANDLER_RTX;
if (tmp && REG_P (tmp))
df_mark_reg (tmp, exit_block_uses);
}
-#endif
/* Mark function return value. */
diddle_return_value (df_mark_reg, (void*) exit_block_uses);
df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[i], NULL,
EXIT_BLOCK_PTR_FOR_FN (cfun), NULL, DF_REF_REG_USE, 0);
-#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
/* It is deliberate that this is not put in the exit block uses but
I do not know why. */
- if (reload_completed
+ if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && reload_completed
&& !bitmap_bit_p (exit_block_uses, ARG_POINTER_REGNUM)
&& bb_has_eh_pred (EXIT_BLOCK_PTR_FOR_FN (cfun))
&& fixed_regs[ARG_POINTER_REGNUM])
df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[ARG_POINTER_REGNUM], NULL,
EXIT_BLOCK_PTR_FOR_FN (cfun), NULL, DF_REF_REG_USE, 0);
-#endif
df_canonize_collection_rec (collection_rec);
}
in the set of registers clobbered across the call. */
FOR_EACH_BB_FN (bb, cfun)
{
- rtx insn;
+ rtx_insn *insn;
FOR_BB_INSNS (bb, insn)
{
if (INSN_P (insn) && CALL_P (insn))
/* Verify that NEW_REC and OLD_REC have exactly the same members. */
static bool
-df_mws_verify (const vec<df_mw_hardreg_ptr, va_heap> *new_rec,
+df_mws_verify (const vec<df_mw_hardreg *, va_heap> *new_rec,
struct df_mw_hardreg *old_rec,
bool abort_if_fail)
{
static bool
df_insn_refs_verify (struct df_collection_rec *collection_rec,
basic_block bb,
- rtx insn,
+ rtx_insn *insn,
bool abort_if_fail)
{
bool ret1, ret2, ret3, ret4;
static bool
df_bb_verify (basic_block bb)
{
- rtx insn;
+ rtx_insn *insn;
struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb->index);
struct df_collection_rec collection_rec;