/* Control flow optimization code for GNU compiler.
- Copyright (C) 1987-2013 Free Software Foundation, Inc.
+ Copyright (C) 1987-2016 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
+#include "target.h"
#include "rtl.h"
#include "tree.h"
-#include "hard-reg-set.h"
-#include "regs.h"
+#include "cfghooks.h"
+#include "df.h"
+#include "tm_p.h"
#include "insn-config.h"
-#include "flags.h"
-#include "recog.h"
-#include "diagnostic-core.h"
+#include "emit-rtl.h"
#include "cselib.h"
#include "params.h"
-#include "tm_p.h"
-#include "target.h"
-#include "function.h" /* For inline functions in emit-rtl.h they need crtl. */
-#include "emit-rtl.h"
#include "tree-pass.h"
#include "cfgloop.h"
-#include "expr.h"
-#include "df.h"
+#include "cfgrtl.h"
+#include "cfganal.h"
+#include "cfgbuild.h"
+#include "cfgcleanup.h"
#include "dce.h"
#include "dbgcnt.h"
+#include "rtl-iter.h"
#define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
static bool try_crossjump_bb (int, basic_block);
static bool outgoing_edges_match (int, basic_block, basic_block);
-static enum replace_direction old_insns_match_p (int, rtx, rtx);
+static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *);
static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
static bool mark_effect (rtx, bitmap);
static void notice_new_block (basic_block);
static void update_forwarder_flag (basic_block);
-static int mentions_nonequal_regs (rtx *, void *);
static void merge_memattrs (rtx, rtx);
\f
/* Set flags for newly created block. */
{
basic_block jump_block, jump_dest_block, cbranch_dest_block;
edge cbranch_jump_edge, cbranch_fallthru_edge;
- rtx cbranch_insn;
+ rtx_insn *cbranch_insn;
/* Verify that there are exactly two successors. */
if (EDGE_COUNT (cbranch_block->succs) != 2)
return false;
/* Invert the conditional branch. */
- if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
+ if (!invert_jump (as_a <rtx_jump_insn *> (cbranch_insn),
+ block_label (jump_dest_block), 0))
return false;
if (dump_file)
static bool
mark_effect (rtx exp, regset nonequal)
{
- int regno;
rtx dest;
switch (GET_CODE (exp))
{
/* In case we do clobber the register, mark it as equal, as we know the
value is dead so it don't have to match. */
case CLOBBER:
- if (REG_P (XEXP (exp, 0)))
- {
- dest = XEXP (exp, 0);
- regno = REGNO (dest);
- if (HARD_REGISTER_NUM_P (regno))
- bitmap_clear_range (nonequal, regno,
- hard_regno_nregs[regno][GET_MODE (dest)]);
- else
- bitmap_clear_bit (nonequal, regno);
- }
+ dest = XEXP (exp, 0);
+ if (REG_P (dest))
+ bitmap_clear_range (nonequal, REGNO (dest), REG_NREGS (dest));
return false;
case SET:
return false;
if (!REG_P (dest))
return true;
- regno = REGNO (dest);
- if (HARD_REGISTER_NUM_P (regno))
- bitmap_set_range (nonequal, regno,
- hard_regno_nregs[regno][GET_MODE (dest)]);
- else
- bitmap_set_bit (nonequal, regno);
+ bitmap_set_range (nonequal, REGNO (dest), REG_NREGS (dest));
return false;
default:
}
}
-/* Return nonzero if X is a register set in regset DATA.
- Called via for_each_rtx. */
-static int
-mentions_nonequal_regs (rtx *x, void *data)
+/* Return true if X contains a register in NONEQUAL. */
+static bool
+mentions_nonequal_regs (const_rtx x, regset nonequal)
{
- regset nonequal = (regset) data;
- if (REG_P (*x))
+ subrtx_iterator::array_type array;
+ FOR_EACH_SUBRTX (iter, array, x, NONCONST)
{
- int regno;
-
- regno = REGNO (*x);
- if (REGNO_REG_SET_P (nonequal, regno))
- return 1;
- if (regno < FIRST_PSEUDO_REGISTER)
+ const_rtx x = *iter;
+ if (REG_P (x))
{
- int n = hard_regno_nregs[regno][GET_MODE (*x)];
- while (--n > 0)
- if (REGNO_REG_SET_P (nonequal, regno + n))
- return 1;
+ unsigned int end_regno = END_REGNO (x);
+ for (unsigned int regno = REGNO (x); regno < end_regno; ++regno)
+ if (REGNO_REG_SET_P (nonequal, regno))
+ return true;
}
}
- return 0;
+ return false;
}
+
/* Attempt to prove that the basic block B will have no side effects and
always continues in the same edge if reached via E. Return the edge
if exist, NULL otherwise. */
static edge
thread_jump (edge e, basic_block b)
{
- rtx set1, set2, cond1, cond2, insn;
+ rtx set1, set2, cond1, cond2;
+ rtx_insn *insn;
enum rtx_code code1, code2, reversed_code2;
bool reverse1 = false;
unsigned i;
/* cond2 must not mention any register that is not equal to the
former block. */
- if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
+ if (mentions_nonequal_regs (cond2, nonequal))
goto failed_exit;
EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
partition boundaries). See the comments at the top of
bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
- if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
+ if (JUMP_P (BB_END (b)) && CROSSING_JUMP_P (BB_END (b)))
return false;
for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
{
basic_block target, first;
- int counter, goto_locus;
+ location_t goto_locus;
+ int counter;
bool threaded = false;
int nthreaded_edges = 0;
bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
details. */
if (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
- && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
+ && JUMP_P (BB_END (first))
+ && CROSSING_JUMP_P (BB_END (first)))
return changed;
while (counter < n_basic_blocks_for_fn (cfun))
{
/* When not optimizing, ensure that edges or forwarder
blocks with different locus are not optimized out. */
- int new_locus = single_succ_edge (target)->goto_locus;
- int locus = goto_locus;
+ location_t new_locus = single_succ_edge (target)->goto_locus;
+ location_t locus = goto_locus;
- if (new_locus != UNKNOWN_LOCATION
- && locus != UNKNOWN_LOCATION
+ if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
+ && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
&& new_locus != locus)
new_target = NULL;
else
{
- rtx last;
-
- if (new_locus != UNKNOWN_LOCATION)
+ if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
locus = new_locus;
- last = BB_END (target);
+ rtx_insn *last = BB_END (target);
if (DEBUG_INSN_P (last))
last = prev_nondebug_insn (last);
+ if (last && INSN_P (last))
+ new_locus = INSN_LOCATION (last);
+ else
+ new_locus = UNKNOWN_LOCATION;
- new_locus = last && INSN_P (last)
- ? INSN_LOCATION (last) : 0;
-
- if (new_locus != UNKNOWN_LOCATION
- && locus != UNKNOWN_LOCATION
+ if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
+ && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
&& new_locus != locus)
new_target = NULL;
else
{
- if (new_locus != UNKNOWN_LOCATION)
+ if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
locus = new_locus;
goto_locus = locus;
static void
merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
{
- rtx barrier;
+ rtx_insn *barrier;
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
static void
merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
{
- rtx barrier, real_b_end;
- rtx label, table;
+ rtx_insn *barrier, *real_b_end;
+ rtx label;
+ rtx_jump_table_data *table;
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
/* Removes the memory attributes of MEM expression
if they are not equal. */
-void
+static void
merge_memattrs (rtx x, rtx y)
{
int i;
if (GET_MODE (x) != GET_MODE (y))
return;
- if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
+ if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
{
if (! MEM_ATTRS (x))
MEM_ATTRS (y) = 0;
? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
continue;
- return false;
+ return false;
}
return true;
}
+
+/* NOTE1 is the REG_EQUAL note, if any, attached to an insn
+ that is a single_set with a SET_SRC of SRC1. Similarly
+ for NOTE2/SRC2.
+
+ So effectively NOTE1/NOTE2 are an alternate form of
+ SRC1/SRC2 respectively.
+
+ Return nonzero if SRC1 or NOTE1 has the same constant
+ integer value as SRC2 or NOTE2. Else return zero. */
+static int
+values_equal_p (rtx note1, rtx note2, rtx src1, rtx src2)
+{
+ if (note1
+ && note2
+ && CONST_INT_P (XEXP (note1, 0))
+ && rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0)))
+ return 1;
+
+ if (!note1
+ && !note2
+ && CONST_INT_P (src1)
+ && CONST_INT_P (src2)
+ && rtx_equal_p (src1, src2))
+ return 1;
+
+ if (note1
+ && CONST_INT_P (src2)
+ && rtx_equal_p (XEXP (note1, 0), src2))
+ return 1;
+
+ if (note2
+ && CONST_INT_P (src1)
+ && rtx_equal_p (XEXP (note2, 0), src1))
+ return 1;
+
+ return 0;
+}
+
/* Examine register notes on I1 and I2 and return:
- dir_forward if I1 can be replaced by I2, or
- dir_backward if I2 can be replaced by I1, or
- dir_both if both are the case. */
static enum replace_direction
-can_replace_by (rtx i1, rtx i2)
+can_replace_by (rtx_insn *i1, rtx_insn *i2)
{
rtx s1, s2, d1, d2, src1, src2, note1, note2;
bool c1, c2;
set dest to the same value. */
note1 = find_reg_equal_equiv_note (i1);
note2 = find_reg_equal_equiv_note (i2);
- if (!note1 || !note2 || !rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0))
- || !CONST_INT_P (XEXP (note1, 0)))
+
+ src1 = SET_SRC (s1);
+ src2 = SET_SRC (s2);
+
+ if (!values_equal_p (note1, note2, src1, src2))
return dir_none;
if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
(set (dest) (reg))
because we don't know if the reg is live and has the same value at the
location of replacement. */
- src1 = SET_SRC (s1);
- src2 = SET_SRC (s2);
c1 = CONST_INT_P (src1);
c2 = CONST_INT_P (src2);
if (c1 && c2)
- dir_both if both are the case. */
static enum replace_direction
-old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
+old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2)
{
rtx p1, p2;
&& DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
>= BUILT_IN_ASAN_REPORT_LOAD1
&& DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
- <= BUILT_IN_ASAN_REPORT_STORE16)
+ <= BUILT_IN_ASAN_STOREN)
return dir_none;
}
}
flow_find_head_matching_sequence, ensure the notes match. */
static void
-merge_notes (rtx i1, rtx i2)
+merge_notes (rtx_insn *i1, rtx_insn *i2)
{
/* If the merged insns have different REG_EQUAL notes, then
remove them. */
DID_FALLTHRU. Otherwise, stops at the head of the bb. */
static void
-walk_to_nondebug_insn (rtx *i1, basic_block *bb1, bool follow_fallthru,
+walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru,
bool *did_fallthru)
{
edge fallthru;
store the head of the blocks in *F1 and *F2. */
int
-flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx *f1, rtx *f2,
- enum replace_direction *dir_p)
+flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1,
+ rtx_insn **f2, enum replace_direction *dir_p)
{
- rtx i1, i2, last1, last2, afterlast1, afterlast2;
+ rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2;
int ninsns = 0;
- rtx p1;
enum replace_direction dir, last_dir, afterlast_dir;
bool follow_fallthru, did_fallthru;
need to be compared for equivalence, which we'll do below. */
i1 = BB_END (bb1);
- last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
+ last1 = afterlast1 = last2 = afterlast2 = NULL;
if (onlyjump_p (i1)
|| (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
{
|| (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
{
last2 = i2;
- /* Count everything except for unconditional jump as insn. */
- if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
+ /* Count everything except for unconditional jump as insn.
+ Don't count any jumps if dir_p is NULL. */
+ if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p)
ninsns++;
i2 = PREV_INSN (i2);
}
last1 = i1, last2 = i2;
afterlast_dir = last_dir;
last_dir = dir;
- p1 = PATTERN (i1);
- if (!(GET_CODE (p1) == USE || GET_CODE (p1) == CLOBBER))
+ if (active_insn_p (i1))
ninsns++;
}
i2 = PREV_INSN (i2);
}
-#ifdef HAVE_cc0
/* Don't allow the insn after a compare to be shared by
cross-jumping unless the compare is also shared. */
- if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
+ if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
+ && ! sets_cc0_p (last1))
last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
-#endif
/* Include preceding notes and labels in the cross-jump. One,
this may bring us to the head of the blocks as requested above.
/* Like flow_find_cross_jump, except start looking for a matching sequence from
the head of the two blocks. Do not include jumps at the end.
If STOP_AFTER is nonzero, stop after finding that many matching
- instructions. */
+ instructions. If STOP_AFTER is zero, count all INSN_P insns, if it is
+ non-zero, only count active insns. */
int
-flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx *f1,
- rtx *f2, int stop_after)
+flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1,
+ rtx_insn **f2, int stop_after)
{
- rtx i1, i2, last1, last2, beforelast1, beforelast2;
+ rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2;
int ninsns = 0;
edge e;
edge_iterator ei;
i1 = BB_HEAD (bb1);
i2 = BB_HEAD (bb2);
- last1 = beforelast1 = last2 = beforelast2 = NULL_RTX;
+ last1 = beforelast1 = last2 = beforelast2 = NULL;
while (true)
{
beforelast1 = last1, beforelast2 = last2;
last1 = i1, last2 = i2;
- ninsns++;
+ if (!stop_after || active_insn_p (i1))
+ ninsns++;
}
if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
i2 = NEXT_INSN (i2);
}
-#ifdef HAVE_cc0
/* Don't allow a compare to be shared by cross-jumping unless the insn
after the compare is also shared. */
- if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1))
+ if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
+ && sets_cc0_p (last1))
last1 = beforelast1, last2 = beforelast2, ninsns--;
-#endif
if (ninsns)
{
Return true if they are identical. */
{
rtx label1, label2;
- rtx table1, table2;
+ rtx_jump_table_data *table1, *table2;
if (tablejump_p (BB_END (bb1), &label1, &table1)
&& tablejump_p (BB_END (bb2), &label2, &table2)
if (identical)
{
- replace_label_data rr;
bool match;
/* Temporarily replace references to LABEL1 with LABEL2
in BB1->END so that we could compare the instructions. */
- rr.r1 = label1;
- rr.r2 = label2;
- rr.update_label_nuses = false;
- for_each_rtx (&BB_END (bb1), replace_label, &rr);
+ replace_label_in_insn (BB_END (bb1), label1, label2, false);
match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
== dir_both);
/* Set the original label in BB1->END because when deleting
a block whose end is a tablejump, the tablejump referenced
from the instruction is deleted too. */
- rr.r1 = label2;
- rr.r2 = label1;
- for_each_rtx (&BB_END (bb1), replace_label, &rr);
+ replace_label_in_insn (BB_END (bb1), label2, label1, false);
return match;
}
stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
handles that case specially. old_insns_match_p does not handle
other types of instruction notes. */
- rtx last1 = BB_END (bb1);
- rtx last2 = BB_END (bb2);
+ rtx_insn *last1 = BB_END (bb1);
+ rtx_insn *last2 = BB_END (bb2);
while (!NOTE_INSN_BASIC_BLOCK_P (last1) &&
(DEBUG_INSN_P (last1) || NOTE_P (last1)))
last1 = PREV_INSN (last1);
basic_block src1 = e1->src, src2 = e2->src;
basic_block redirect_to, redirect_from, to_remove;
basic_block osrc1, osrc2, redirect_edges_to, tmp;
- rtx newpos1, newpos2;
+ rtx_insn *newpos1, *newpos2;
edge s;
edge_iterator ei;
- newpos1 = newpos2 = NULL_RTX;
+ newpos1 = newpos2 = NULL;
/* If we have partitioned hot/cold basic blocks, it is a bad idea
to try this optimization.
SWAP (basic_block, osrc1, osrc2);
SWAP (basic_block, src1, src2);
SWAP (edge, e1, e2);
- SWAP (rtx, newpos1, newpos2);
+ SWAP (rtx_insn *, newpos1, newpos2);
#undef SWAP
}
If we have tablejumps in the end of SRC1 and SRC2
they have been already compared for equivalence in outgoing_edges_match ()
so replace the references to TABLE1 by references to TABLE2. */
- {
+ {
rtx label1, label2;
- rtx table1, table2;
+ rtx_jump_table_data *table1, *table2;
if (tablejump_p (BB_END (osrc1), &label1, &table1)
&& tablejump_p (BB_END (osrc2), &label2, &table2)
&& label1 != label2)
{
- replace_label_data rr;
- rtx insn;
+ rtx_insn *insn;
/* Replace references to LABEL1 with LABEL2. */
- rr.r1 = label1;
- rr.r2 = label2;
- rr.update_label_nuses = true;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
{
/* Do not replace the label in SRC1->END because when deleting
a block whose end is a tablejump, the tablejump referenced
from the instruction is deleted too. */
if (insn != BB_END (osrc1))
- for_each_rtx (&insn, replace_label, &rr);
+ replace_label_in_insn (insn, label1, label2, true);
}
}
- }
+ }
/* Avoid splitting if possible. We must always split when SRC2 has
EH predecessor edges, or we may end up with basic blocks with both
basic_block final_dest_bb = NULL;
int max_match = INT_MAX;
edge e0;
- rtx *headptr, *currptr, *nextptr;
+ rtx_insn **headptr, **currptr, **nextptr;
bool changed, moveall;
unsigned ix;
- rtx e0_last_head, cond, move_before;
+ rtx_insn *e0_last_head;
+ rtx cond;
+ rtx_insn *move_before;
unsigned nedges = EDGE_COUNT (bb->succs);
- rtx jump = BB_END (bb);
+ rtx_insn *jump = BB_END (bb);
regset live, live_union;
/* Nothing to do if there is not at least two outgoing edges. */
cond = get_condition (jump, &move_before, true, false);
if (cond == NULL_RTX)
{
-#ifdef HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, jump))
+ if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
move_before = prev_nonnote_nondebug_insn (jump);
else
-#endif
move_before = jump;
}
}
e0 = EDGE_SUCC (bb, 0);
- e0_last_head = NULL_RTX;
+ e0_last_head = NULL;
changed = false;
for (ix = 1; ix < nedges; ix++)
{
edge e = EDGE_SUCC (bb, ix);
- rtx e0_last, e_last;
+ rtx_insn *e0_last, *e_last;
int nmatch;
nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
live = BITMAP_ALLOC (NULL);
live_union = BITMAP_ALLOC (NULL);
- currptr = XNEWVEC (rtx, nedges);
- headptr = XNEWVEC (rtx, nedges);
- nextptr = XNEWVEC (rtx, nedges);
+ currptr = XNEWVEC (rtx_insn *, nedges);
+ headptr = XNEWVEC (rtx_insn *, nedges);
+ nextptr = XNEWVEC (rtx_insn *, nedges);
for (ix = 0; ix < nedges; ix++)
{
int j;
basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
- rtx head = BB_HEAD (merge_bb);
+ rtx_insn *head = BB_HEAD (merge_bb);
while (!NONDEBUG_INSN_P (head))
head = NEXT_INSN (head);
with the final move. */
if (final_dest_bb != NULL)
{
- rtx move_upto;
+ rtx_insn *move_upto;
moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
jump, e0->dest, live_union,
cond = get_condition (jump, &move_before, true, false);
if (cond == NULL_RTX)
{
-#ifdef HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, jump))
+ if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
move_before = prev_nonnote_nondebug_insn (jump);
else
-#endif
move_before = jump;
}
}
do
{
- rtx move_upto;
+ rtx_insn *move_upto;
moveall = can_move_insns_across (currptr[0], e0_last_head,
move_before, jump, e0->dest, live_union,
NULL, &move_upto);
/* Try again, using a different insertion point. */
move_before = jump;
-#ifdef HAVE_cc0
/* Don't try moving before a cc0 user, as that may invalidate
the cc0. */
- if (reg_mentioned_p (cc0_rtx, jump))
+ if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
break;
-#endif
continue;
}
break;
for (ix = 0; ix < nedges; ix++)
{
- rtx curr = currptr[ix];
+ rtx_insn *curr = currptr[ix];
do
curr = NEXT_INSN (curr);
while (!NONDEBUG_INSN_P (curr));
if (!moveall)
for (ix = 0; ix < nedges; ix++)
{
- rtx curr = currptr[ix];
+ rtx_insn *curr = currptr[ix];
do
curr = NEXT_INSN (curr);
while (!NONDEBUG_INSN_P (curr));
/* For the unmerged insns, try a different insertion point. */
move_before = jump;
-#ifdef HAVE_cc0
/* Don't try moving before a cc0 user, as that may invalidate
the cc0. */
- if (reg_mentioned_p (cc0_rtx, jump))
+ if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
break;
-#endif
for (ix = 0; ix < nedges; ix++)
currptr[ix] = headptr[ix] = nextptr[ix];
static bool
trivially_empty_bb_p (basic_block bb)
{
- rtx insn = BB_END (bb);
+ rtx_insn *insn = BB_END (bb);
while (1)
{
crossjumps_occured = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
update_forwarder_flag (bb);
if (! targetm.cannot_modify_jumps_p ())
}
else
{
- rtx last = get_last_bb_insn (b);
+ rtx_insn *last = get_last_bb_insn (b);
if (last && BARRIER_P (last))
FOR_EACH_EDGE (e, ei, b->preds)
if ((e->flags & EDGE_FALLTHRU))
&& (single_pred_edge (b)->flags & EDGE_FALLTHRU)
&& !(single_pred_edge (b)->flags & EDGE_COMPLEX)
&& LABEL_P (BB_HEAD (b))
+ && !LABEL_PRESERVE_P (BB_HEAD (b))
/* If the previous block ends with a branch to this
block, we can't delete the label. Normally this
is a condjump that is yet to be simplified, but
if (single_succ_p (b)
&& single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& onlyjump_p (BB_END (b))
- && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
+ && !CROSSING_JUMP_P (BB_END (b))
&& try_redirect_by_replacing_jump (single_succ_edge (b),
single_succ (b),
(mode & CLEANUP_CFGLAYOUT) != 0))
to detect and fix during edge forwarding, and in some cases
is only visible after newly unreachable blocks are deleted,
which will be done in fixup_partitions. */
- fixup_partitions ();
-
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
+ fixup_partitions ();
+ checking_verify_flow_info ();
}
changed_overall |= changed;
while (changed);
}
- FOR_ALL_BB (b)
+ FOR_ALL_BB_FN (b, cfun)
b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
return changed_overall;
return changed;
}
-\f
-/* Look for, and delete, any dead jumptables between START and END. */
-
-static void
-delete_dead_jump_tables_between (rtx start, rtx end)
-{
- rtx insn, next;
-
- for (insn = start; insn != end; insn = next)
- {
- next = NEXT_INSN (insn);
- if (next != NULL_RTX
- && LABEL_P (insn)
- && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
- && JUMP_TABLE_DATA_P (next))
- {
- rtx label = insn, jump = next;
-
- if (dump_file)
- fprintf (dump_file, "Dead jumptable %i removed\n",
- INSN_UID (insn));
-
- next = NEXT_INSN (next);
- delete_insn (jump);
- delete_insn (label);
- }
- }
-}
-
-
/* Delete any jump tables never referenced. We can't delete them at the
- time of removing tablejump insn as the label preceding the jump table
- data may be referenced by the preceding insns computing the destination.
- So we delay deleting and garbage-collect them from time to time, after
- a CFG cleanup. */
-
+ time of removing tablejump insn as they are referenced by the preceding
+ insns computing the destination, so we delay deleting and garbagecollect
+ them once life information is computed. */
void
delete_dead_jumptables (void)
{
basic_block bb;
- /* Label reference count must up-to-date to detect dead jump tables. */
- rebuild_jump_labels (get_insns ());
-
- FOR_EACH_BB (bb)
+ /* A dead jump table does not belong to any basic block. Scan insns
+ between two adjacent basic blocks. */
+ FOR_EACH_BB_FN (bb, cfun)
{
- if (current_ir_type () == IR_RTL_CFGLAYOUT)
- {
- /* Jump tables only appear in the header or footer of BB. */
- delete_dead_jump_tables_between (BB_HEADER (bb), NULL_RTX);
- delete_dead_jump_tables_between (BB_FOOTER (bb), NULL_RTX);
- }
- else
+ rtx_insn *insn, *next;
+
+ for (insn = NEXT_INSN (BB_END (bb));
+ insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
+ insn = next)
{
- /* Jump tables are in the insns chain between basic blocks. */
- rtx start = NEXT_INSN (BB_END (bb));
- rtx end = (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
- ? NULL_RTX : BB_HEAD (bb->next_bb);
- delete_dead_jump_tables_between (start, end);
+ next = NEXT_INSN (insn);
+ if (LABEL_P (insn)
+ && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
+ && JUMP_TABLE_DATA_P (next))
+ {
+ rtx_insn *label = insn, *jump = next;
+
+ if (dump_file)
+ fprintf (dump_file, "Dead jumptable %i removed\n",
+ INSN_UID (insn));
+
+ next = NEXT_INSN (next);
+ delete_insn (jump);
+ delete_insn (label);
+ }
}
}
}
if (mode & CLEANUP_CROSSJUMP)
remove_fake_exit_edges ();
- /* Don't always call delete_dead_jumptables in cfglayout mode, because
- jump tables can only appear in the headers and footers of basic blocks
- and we usually are not interested in anything hiding there.
- But if an expensive cleanup is called for, garbage-collect the dead
- jump tables to get label reference counts right. This sometimes
- allows some labels to be removed and more basic blocks to be merged. */
- if (!(mode & CLEANUP_CFGLAYOUT) || (mode & CLEANUP_EXPENSIVE))
+ /* Don't call delete_dead_jumptables in cfglayout mode, because
+ that function assumes that jump tables are in the insns stream.
+ But we also don't _have_ to delete dead jumptables in cfglayout
+ mode because we shouldn't even be looking at things that are
+ not in a basic block. Dead jumptables are cleaned up when
+ going out of cfglayout mode. */
+ if (!(mode & CLEANUP_CFGLAYOUT))
delete_dead_jumptables ();
/* ??? We probably do this way too often. */
return changed;
}
\f
-static unsigned int
-execute_jump (void)
-{
- delete_trivially_dead_insns (get_insns (), max_reg_num ());
- if (dump_file)
- dump_flow_info (dump_file, dump_flags);
- cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
- | (flag_thread_jumps ? CLEANUP_THREADING : 0));
- return 0;
-}
-
namespace {
const pass_data pass_data_jump =
RTL_PASS, /* type */
"jump", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- false, /* has_gate */
- true, /* has_execute */
TV_JUMP, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_verify_rtl_sharing, /* todo_flags_finish */
+ 0, /* todo_flags_finish */
};
class pass_jump : public rtl_opt_pass
{}
/* opt_pass methods: */
- unsigned int execute () { return execute_jump (); }
+ virtual unsigned int execute (function *);
}; // class pass_jump
+unsigned int
+pass_jump::execute (function *)
+{
+ delete_trivially_dead_insns (get_insns (), max_reg_num ());
+ if (dump_file)
+ dump_flow_info (dump_file, dump_flags);
+ cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
+ | (flag_thread_jumps ? CLEANUP_THREADING : 0));
+ return 0;
+}
+
} // anon namespace
rtl_opt_pass *
return new pass_jump (ctxt);
}
\f
-static unsigned int
-execute_jump2 (void)
-{
- cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
- return 0;
-}
-
namespace {
const pass_data pass_data_jump2 =
RTL_PASS, /* type */
"jump2", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- false, /* has_gate */
- true, /* has_execute */
TV_JUMP, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_verify_rtl_sharing, /* todo_flags_finish */
+ 0, /* todo_flags_finish */
};
class pass_jump2 : public rtl_opt_pass
{}
/* opt_pass methods: */
- unsigned int execute () { return execute_jump2 (); }
+ virtual unsigned int execute (function *)
+ {
+ cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
+ return 0;
+ }
}; // class pass_jump2