/* Perform instruction reorganizations for delay slot filling.
- Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 1992-2014 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu).
Hacked by Michael Tiemann (tiemann@cygnus.com).
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
/* Instruction reorganization pass.
delay slot. In that case, we point each insn at the other with REG_CC_USER
and REG_CC_SETTER notes. Note that these restrictions affect very few
machines because most RISC machines with delay slots will not use CC0
- (the RT is the only known exception at this point).
-
- Not yet implemented:
-
- The Acorn Risc Machine can conditionally execute most insns, so
- it is profitable to move single insns into a position to execute
- based on the condition code of the previous insn.
-
- The HP-PA can conditionally nullify insns, providing a similar
- effect to the ARM, differing mostly in which insn is "in charge". */
+ (the RT is the only known exception at this point). */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "rtl.h"
#include "tm_p.h"
#include "expr.h"
#include "regs.h"
#include "recog.h"
#include "flags.h"
-#include "output.h"
#include "obstack.h"
#include "insn-attr.h"
#include "resource.h"
#include "except.h"
#include "params.h"
-#include "timevar.h"
#include "target.h"
#include "tree-pass.h"
+#include "emit-rtl.h"
#ifdef DELAY_SLOTS
#define eligible_for_annul_false(INSN, SLOTS, TRIAL, FLAGS) 0
#endif
+\f
+/* First, some functions that were used before GCC got a control flow graph.
+ These functions are now only used here in reorg.c, and have therefore
+ been moved here to avoid inadvertent misuse elsewhere in the compiler. */
+
+/* Return the last label to mark the same position as LABEL. Return LABEL
+ itself if it is null or any return rtx. */
+
+static rtx_insn *
+skip_consecutive_labels (rtx_insn *label)
+{
+ rtx_insn *insn;
+
+ if (label && ANY_RETURN_P (label))
+ return label;
+
+ for (insn = label; insn != 0 && !INSN_P (insn); insn = NEXT_INSN (insn))
+ if (LABEL_P (insn))
+ label = insn;
+
+ return label;
+}
+
+#ifdef HAVE_cc0
+/* INSN uses CC0 and is being moved into a delay slot. Set up REG_CC_SETTER
+ and REG_CC_USER notes so we can find it. */
+
+static void
+link_cc0_insns (rtx insn)
+{
+ rtx user = next_nonnote_insn (insn);
+
+ if (NONJUMP_INSN_P (user) && GET_CODE (PATTERN (user)) == SEQUENCE)
+ user = XVECEXP (PATTERN (user), 0, 0);
+
+ add_reg_note (user, REG_CC_SETTER, insn);
+ add_reg_note (insn, REG_CC_USER, user);
+}
+#endif
+\f
/* Insns which have delay slots that have not yet been filled. */
static struct obstack unfilled_slots_obstack;
should be recomputed at each use. */
#define unfilled_slots_base \
- ((rtx *) obstack_base (&unfilled_slots_obstack))
+ ((rtx_insn **) obstack_base (&unfilled_slots_obstack))
#define unfilled_slots_next \
- ((rtx *) obstack_next_free (&unfilled_slots_obstack))
+ ((rtx_insn **) obstack_next_free (&unfilled_slots_obstack))
-/* Points to the label before the end of the function. */
-static rtx end_of_function_label;
+/* Points to the label before the end of the function, or before a
+ return insn. */
+static rtx_code_label *function_return_label;
+/* Likewise for a simple_return. */
+static rtx_code_label *function_simple_return_label;
/* Mapping between INSN_UID's and position in the code since INSN_UID's do
not always monotonically increase. */
static int stop_search_p (rtx, int);
static int resource_conflicts_p (struct resources *, struct resources *);
-static int insn_references_resource_p (rtx, struct resources *, int);
-static int insn_sets_resource_p (rtx, struct resources *, int);
-static rtx find_end_label (void);
-static rtx emit_delay_sequence (rtx, rtx, int);
-static rtx add_to_delay_list (rtx, rtx);
-static rtx delete_from_delay_slot (rtx);
+static int insn_references_resource_p (rtx, struct resources *, bool);
+static int insn_sets_resource_p (rtx, struct resources *, bool);
+static rtx_code_label *find_end_label (rtx);
+static rtx_insn *emit_delay_sequence (rtx_insn *, rtx_insn_list *, int);
+static rtx_insn_list *add_to_delay_list (rtx_insn *, rtx_insn_list *);
+static rtx_insn *delete_from_delay_slot (rtx_insn *);
static void delete_scheduled_jump (rtx);
static void note_delay_statistics (int, int);
#if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
-static rtx optimize_skip (rtx);
+static rtx_insn_list *optimize_skip (rtx_insn *);
#endif
static int get_jump_flags (rtx, rtx);
-static int rare_destination (rtx);
-static int mostly_true_jump (rtx, rtx);
+static int mostly_true_jump (rtx);
static rtx get_branch_condition (rtx, rtx);
static int condition_dominates_p (rtx, rtx);
static int redirect_with_delay_slots_safe_p (rtx, rtx, rtx);
static int redirect_with_delay_list_safe_p (rtx, rtx, rtx);
static int check_annul_list_true_false (int, rtx);
-static rtx steal_delay_list_from_target (rtx, rtx, rtx, rtx,
- struct resources *,
- struct resources *,
- struct resources *,
- int, int *, int *, rtx *);
-static rtx steal_delay_list_from_fallthrough (rtx, rtx, rtx, rtx,
- struct resources *,
- struct resources *,
- struct resources *,
- int, int *, int *);
+static rtx_insn_list *steal_delay_list_from_target (rtx, rtx,
+ rtx_sequence *,
+ rtx_insn_list *,
+ struct resources *,
+ struct resources *,
+ struct resources *,
+ int, int *, int *,
+ rtx_insn **);
+static rtx_insn_list *steal_delay_list_from_fallthrough (rtx, rtx,
+ rtx_sequence *,
+ rtx_insn_list *,
+ struct resources *,
+ struct resources *,
+ struct resources *,
+ int, int *, int *);
static void try_merge_delay_insns (rtx, rtx);
static rtx redundant_insn (rtx, rtx, rtx);
static int own_thread_p (rtx, rtx, int);
static void fix_reg_dead_note (rtx, rtx);
static void update_reg_unused_notes (rtx, rtx);
static void fill_simple_delay_slots (int);
-static rtx fill_slots_from_thread (rtx, rtx, rtx, rtx, int, int, int, int,
- int *, rtx);
+static rtx_insn_list *fill_slots_from_thread (rtx_insn *, rtx,
+ rtx_insn *, rtx_insn *,
+ int, int, int, int,
+ int *, rtx_insn_list *);
static void fill_eager_delay_slots (void);
-static void relax_delay_slots (rtx);
-#ifdef HAVE_return
+static void relax_delay_slots (rtx_insn *);
static void make_return_insns (rtx);
-#endif
+\f
+/* A wrapper around next_active_insn which takes care to return ret_rtx
+ unchanged. */
+
+static rtx_insn *
+first_active_target_insn (rtx_insn *insn)
+{
+ if (ANY_RETURN_P (insn))
+ return insn;
+ return next_active_insn (insn);
+}
+\f
+/* Return true iff INSN is a simplejump, or any kind of return insn. */
+
+static bool
+simplejump_or_return_p (rtx insn)
+{
+ return (JUMP_P (insn)
+ && (simplejump_p (insn) || ANY_RETURN_P (PATTERN (insn))));
+}
\f
/* Return TRUE if this insn should stop the search for insn to fill delay
slots. LABELS_P indicates that labels should terminate the search.
resource_conflicts_p (struct resources *res1, struct resources *res2)
{
if ((res1->cc && res2->cc) || (res1->memory && res2->memory)
- || (res1->unch_memory && res2->unch_memory)
|| res1->volatil || res2->volatil)
return 1;
-#ifdef HARD_REG_SET
- return (res1->regs & res2->regs) != HARD_CONST (0);
-#else
- {
- int i;
-
- for (i = 0; i < HARD_REG_SET_LONGS; i++)
- if ((res1->regs[i] & res2->regs[i]) != 0)
- return 1;
- return 0;
- }
-#endif
+ return hard_reg_set_intersect_p (res1->regs, res2->regs);
}
/* Return TRUE if any resource marked in RES, a `struct resources', is
static int
insn_references_resource_p (rtx insn, struct resources *res,
- int include_delayed_effects)
+ bool include_delayed_effects)
{
struct resources insn_res;
static int
insn_sets_resource_p (rtx insn, struct resources *res,
- int include_delayed_effects)
+ bool include_delayed_effects)
{
struct resources insn_sets;
CLEAR_RESOURCE (&insn_sets);
- mark_set_resources (insn, &insn_sets, 0, include_delayed_effects);
+ mark_set_resources (insn, &insn_sets, 0,
+ (include_delayed_effects
+ ? MARK_SRC_DEST_CALL
+ : MARK_SRC_DEST));
return resource_conflicts_p (&insn_sets, res);
}
\f
??? There may be a problem with the current implementation. Suppose
we start with a bare RETURN insn and call find_end_label. It may set
- end_of_function_label just before the RETURN. Suppose the machinery
+ function_return_label just before the RETURN. Suppose the machinery
is able to fill the delay slot of the RETURN insn afterwards. Then
- end_of_function_label is no longer valid according to the property
+ function_return_label is no longer valid according to the property
described above and find_end_label will still return it unmodified.
Note that this is probably mitigated by the following observation:
- once end_of_function_label is made, it is very likely the target of
+ once function_return_label is made, it is very likely the target of
a jump, so filling the delay slot of the RETURN will be much more
- difficult. */
+ difficult.
+ KIND is either simple_return_rtx or ret_rtx, indicating which type of
+ return we're looking for. */
-static rtx
-find_end_label (void)
+static rtx_code_label *
+find_end_label (rtx kind)
{
- rtx insn;
+ rtx_insn *insn;
+ rtx_code_label **plabel;
+
+ if (kind == ret_rtx)
+ plabel = &function_return_label;
+ else
+ {
+ gcc_assert (kind == simple_return_rtx);
+ plabel = &function_simple_return_label;
+ }
/* If we found one previously, return it. */
- if (end_of_function_label)
- return end_of_function_label;
+ if (*plabel)
+ return *plabel;
/* Otherwise, see if there is a label at the end of the function. If there
is, it must be that RETURN insns aren't needed, so that is our return
/* When a target threads its epilogue we might already have a
suitable return insn. If so put a label before it for the
- end_of_function_label. */
+ function_return_label. */
if (BARRIER_P (insn)
&& JUMP_P (PREV_INSN (insn))
- && GET_CODE (PATTERN (PREV_INSN (insn))) == RETURN)
+ && PATTERN (PREV_INSN (insn)) == kind)
{
- rtx temp = PREV_INSN (PREV_INSN (insn));
- end_of_function_label = gen_label_rtx ();
- LABEL_NUSES (end_of_function_label) = 0;
+ rtx_insn *temp = PREV_INSN (PREV_INSN (insn));
+ rtx_code_label *label = gen_label_rtx ();
+ LABEL_NUSES (label) = 0;
- /* Put the label before an USE insns that may precede the RETURN insn. */
+ /* Put the label before any USE insns that may precede the RETURN
+ insn. */
while (GET_CODE (temp) == USE)
temp = PREV_INSN (temp);
- emit_label_after (end_of_function_label, temp);
+ emit_label_after (label, temp);
+ *plabel = label;
}
else if (LABEL_P (insn))
- end_of_function_label = insn;
+ *plabel = as_a <rtx_code_label *> (insn);
else
{
- end_of_function_label = gen_label_rtx ();
- LABEL_NUSES (end_of_function_label) = 0;
+ rtx_code_label *label = gen_label_rtx ();
+ LABEL_NUSES (label) = 0;
/* If the basic block reorder pass moves the return insn to
some other place try to locate it again and put our
- end_of_function_label there. */
- while (insn && ! (JUMP_P (insn)
- && (GET_CODE (PATTERN (insn)) == RETURN)))
+ function_return_label there. */
+ while (insn && ! (JUMP_P (insn) && (PATTERN (insn) == kind)))
insn = PREV_INSN (insn);
if (insn)
{
insn = PREV_INSN (insn);
- /* Put the label before an USE insns that may proceed the
+ /* Put the label before any USE insns that may precede the
RETURN insn. */
while (GET_CODE (insn) == USE)
insn = PREV_INSN (insn);
- emit_label_after (end_of_function_label, insn);
+ emit_label_after (label, insn);
}
else
{
&& ! HAVE_return
#endif
)
- {
- /* The RETURN insn has its delay slot filled so we cannot
- emit the label just before it. Since we already have
- an epilogue and cannot emit a new RETURN, we cannot
- emit the label at all. */
- end_of_function_label = NULL_RTX;
- return end_of_function_label;
- }
+ /* The RETURN insn has its delay slot filled so we cannot
+ emit the label just before it. Since we already have
+ an epilogue and cannot emit a new RETURN, we cannot
+ emit the label at all. */
+ return NULL;
#endif /* HAVE_epilogue */
/* Otherwise, make a new label and emit a RETURN and BARRIER,
if needed. */
- emit_label (end_of_function_label);
+ emit_label (label);
#ifdef HAVE_return
- /* We don't bother trying to create a return insn if the
- epilogue has filled delay-slots; we would have to try and
- move the delay-slot fillers to the delay-slots for the new
- return insn or in front of the new return insn. */
- if (current_function_epilogue_delay_list == NULL
- && HAVE_return)
+ if (HAVE_return)
{
/* The return we make may have delay slots too. */
rtx insn = gen_return ();
insn = emit_jump_insn (insn);
+ set_return_jump_label (insn);
emit_barrier ();
if (num_delay_slots (insn) > 0)
obstack_ptr_grow (&unfilled_slots_obstack, insn);
}
#endif
}
+ *plabel = label;
}
/* Show one additional use for this label so it won't go away until
we are done. */
- ++LABEL_NUSES (end_of_function_label);
+ ++LABEL_NUSES (*plabel);
- return end_of_function_label;
+ return *plabel;
}
\f
/* Put INSN and LIST together in a SEQUENCE rtx of LENGTH, and replace
the pattern of INSN with the SEQUENCE.
- Chain the insns so that NEXT_INSN of each insn in the sequence points to
- the next and NEXT_INSN of the last insn in the sequence points to
- the first insn after the sequence. Similarly for PREV_INSN. This makes
- it easier to scan all insns.
+ Returns the insn containing the SEQUENCE that replaces INSN. */
- Returns the SEQUENCE that replaces INSN. */
-
-static rtx
-emit_delay_sequence (rtx insn, rtx list, int length)
+static rtx_insn *
+emit_delay_sequence (rtx_insn *insn, rtx_insn_list *list, int length)
{
- int i = 1;
- rtx li;
- int had_barrier = 0;
-
/* Allocate the rtvec to hold the insns and the SEQUENCE. */
rtvec seqv = rtvec_alloc (length + 1);
rtx seq = gen_rtx_SEQUENCE (VOIDmode, seqv);
- rtx seq_insn = make_insn_raw (seq);
- rtx first = get_insns ();
- rtx last = get_last_insn ();
-
- /* Make a copy of the insn having delay slots. */
- rtx delay_insn = copy_rtx (insn);
-
- /* If INSN is followed by a BARRIER, delete the BARRIER since it will only
- confuse further processing. Update LAST in case it was the last insn.
- We will put the BARRIER back in later. */
- if (NEXT_INSN (insn) && BARRIER_P (NEXT_INSN (insn)))
- {
- delete_related_insns (NEXT_INSN (insn));
- last = get_last_insn ();
- had_barrier = 1;
- }
-
- /* Splice our SEQUENCE into the insn stream where INSN used to be. */
- NEXT_INSN (seq_insn) = NEXT_INSN (insn);
- PREV_INSN (seq_insn) = PREV_INSN (insn);
-
- if (insn != last)
- PREV_INSN (NEXT_INSN (seq_insn)) = seq_insn;
+ rtx_insn *seq_insn = make_insn_raw (seq);
- if (insn != first)
- NEXT_INSN (PREV_INSN (seq_insn)) = seq_insn;
+ /* If DELAY_INSN has a location, use it for SEQ_INSN. If DELAY_INSN does
+ not have a location, but one of the delayed insns does, we pick up a
+ location from there later. */
+ INSN_LOCATION (seq_insn) = INSN_LOCATION (insn);
- /* Note the calls to set_new_first_and_last_insn must occur after
- SEQ_INSN has been completely spliced into the insn stream.
-
- Otherwise CUR_INSN_UID will get set to an incorrect value because
- set_new_first_and_last_insn will not find SEQ_INSN in the chain. */
- if (insn == last)
- set_new_first_and_last_insn (first, seq_insn);
-
- if (insn == first)
- set_new_first_and_last_insn (seq_insn, last);
+ /* Unlink INSN from the insn chain, so that we can put it into
+ the SEQUENCE. Remember where we want to emit SEQUENCE in AFTER. */
+ rtx after = PREV_INSN (insn);
+ remove_insn (insn);
+ SET_NEXT_INSN (insn) = SET_PREV_INSN (insn) = NULL;
/* Build our SEQUENCE and rebuild the insn chain. */
- XVECEXP (seq, 0, 0) = delay_insn;
- INSN_DELETED_P (delay_insn) = 0;
- PREV_INSN (delay_insn) = PREV_INSN (seq_insn);
-
- for (li = list; li; li = XEXP (li, 1), i++)
+ int i = 1;
+ start_sequence ();
+ XVECEXP (seq, 0, 0) = emit_insn (insn);
+ for (rtx_insn_list *li = list; li; li = li->next (), i++)
{
- rtx tem = XEXP (li, 0);
+ rtx_insn *tem = li->insn ();
rtx note, next;
/* Show that this copy of the insn isn't deleted. */
INSN_DELETED_P (tem) = 0;
- XVECEXP (seq, 0, i) = tem;
- PREV_INSN (tem) = XVECEXP (seq, 0, i - 1);
- NEXT_INSN (XVECEXP (seq, 0, i - 1)) = tem;
+ /* Unlink insn from its original place, and re-emit it into
+ the sequence. */
+ SET_NEXT_INSN (tem) = SET_PREV_INSN (tem) = NULL;
+ XVECEXP (seq, 0, i) = emit_insn (tem);
/* SPARC assembler, for instance, emit warning when debug info is output
into the delay slot. */
- if (INSN_LOCATOR (tem) && !INSN_LOCATOR (seq_insn))
- INSN_LOCATOR (seq_insn) = INSN_LOCATOR (tem);
- INSN_LOCATOR (tem) = 0;
+ if (INSN_LOCATION (tem) && !INSN_LOCATION (seq_insn))
+ INSN_LOCATION (seq_insn) = INSN_LOCATION (tem);
+ INSN_LOCATION (tem) = 0;
for (note = REG_NOTES (tem); note; note = next)
{
remove_note (tem, note);
break;
- case REG_LABEL:
+ case REG_LABEL_OPERAND:
+ case REG_LABEL_TARGET:
/* Keep the label reference count up to date. */
if (LABEL_P (XEXP (note, 0)))
LABEL_NUSES (XEXP (note, 0)) ++;
}
}
}
-
- NEXT_INSN (XVECEXP (seq, 0, length)) = NEXT_INSN (seq_insn);
-
- /* If the previous insn is a SEQUENCE, update the NEXT_INSN pointer on the
- last insn in that SEQUENCE to point to us. Similarly for the first
- insn in the following insn if it is a SEQUENCE. */
-
- if (PREV_INSN (seq_insn) && NONJUMP_INSN_P (PREV_INSN (seq_insn))
- && GET_CODE (PATTERN (PREV_INSN (seq_insn))) == SEQUENCE)
- NEXT_INSN (XVECEXP (PATTERN (PREV_INSN (seq_insn)), 0,
- XVECLEN (PATTERN (PREV_INSN (seq_insn)), 0) - 1))
- = seq_insn;
-
- if (NEXT_INSN (seq_insn) && NONJUMP_INSN_P (NEXT_INSN (seq_insn))
- && GET_CODE (PATTERN (NEXT_INSN (seq_insn))) == SEQUENCE)
- PREV_INSN (XVECEXP (PATTERN (NEXT_INSN (seq_insn)), 0, 0)) = seq_insn;
-
- /* If there used to be a BARRIER, put it back. */
- if (had_barrier)
- emit_barrier_after (seq_insn);
-
+ end_sequence ();
gcc_assert (i == length + 1);
+ /* Splice our SEQUENCE into the insn stream where INSN used to be. */
+ add_insn_after (seq_insn, after, NULL);
+
return seq_insn;
}
/* Add INSN to DELAY_LIST and return the head of the new list. The list must
be in the order in which the insns are to be executed. */
-static rtx
-add_to_delay_list (rtx insn, rtx delay_list)
+static rtx_insn_list *
+add_to_delay_list (rtx_insn *insn, rtx_insn_list *delay_list)
{
/* If we have an empty list, just make a new list element. If
INSN has its block number recorded, clear it since we may
/* Otherwise this must be an INSN_LIST. Add INSN to the end of the
list. */
- XEXP (delay_list, 1) = add_to_delay_list (insn, XEXP (delay_list, 1));
+ XEXP (delay_list, 1) = add_to_delay_list (insn, delay_list->next ());
return delay_list;
}
/* Delete INSN from the delay slot of the insn that it is in, which may
produce an insn with no delay slots. Return the new insn. */
-static rtx
-delete_from_delay_slot (rtx insn)
+static rtx_insn *
+delete_from_delay_slot (rtx_insn *insn)
{
- rtx trial, seq_insn, seq, prev;
- rtx delay_list = 0;
+ rtx_insn *trial, *seq_insn, *prev;
+ rtx_sequence *seq;
+ rtx_insn_list *delay_list = 0;
int i;
int had_barrier = 0;
;
seq_insn = PREV_INSN (NEXT_INSN (trial));
- seq = PATTERN (seq_insn);
+ seq = as_a <rtx_sequence *> (PATTERN (seq_insn));
if (NEXT_INSN (seq_insn) && BARRIER_P (NEXT_INSN (seq_insn)))
had_barrier = 1;
/* Create a delay list consisting of all the insns other than the one
we are deleting (unless we were the only one). */
- if (XVECLEN (seq, 0) > 2)
- for (i = 1; i < XVECLEN (seq, 0); i++)
- if (XVECEXP (seq, 0, i) != insn)
- delay_list = add_to_delay_list (XVECEXP (seq, 0, i), delay_list);
+ if (seq->len () > 2)
+ for (i = 1; i < seq->len (); i++)
+ if (seq->insn (i) != insn)
+ delay_list = add_to_delay_list (seq->insn (i), delay_list);
/* Delete the old SEQUENCE, re-emit the insn that used to have the delay
list, and rebuild the delay list if non-empty. */
prev = PREV_INSN (seq_insn);
- trial = XVECEXP (seq, 0, 0);
+ trial = seq->insn (0);
delete_related_insns (seq_insn);
- add_insn_after (trial, prev);
+ add_insn_after (trial, prev, NULL);
/* If there was a barrier after the old SEQUENCE, remit it. */
if (had_barrier)
annul flag. */
if (delay_list)
trial = emit_delay_sequence (trial, delay_list, XVECLEN (seq, 0) - 2);
- else if (INSN_P (trial))
+ else if (JUMP_P (trial))
INSN_ANNULLED_BRANCH_P (trial) = 0;
INSN_FROM_TARGET_P (insn) = 0;
{
if (! FIND_REG_INC_NOTE (XEXP (note, 0), NULL_RTX)
&& sets_cc0_p (PATTERN (XEXP (note, 0))) == 1)
- delete_from_delay_slot (XEXP (note, 0));
+ delete_from_delay_slot (as_a <rtx_insn *> (XEXP (note, 0)));
}
else
{
/* The insn setting CC0 is our previous insn, but it may be in
a delay slot. It will be the last insn in the delay slot, if
it is. */
- rtx trial = previous_insn (insn);
+ rtx_insn *trial = previous_insn (insn);
if (NOTE_P (trial))
trial = prev_nonnote_insn (trial);
if (sets_cc0_p (PATTERN (trial)) != 1
This should be expanded to skip over N insns, where N is the number
of delay slots required. */
-static rtx
-optimize_skip (rtx insn)
+static rtx_insn_list *
+optimize_skip (rtx_insn *insn)
{
- rtx trial = next_nonnote_insn (insn);
- rtx next_trial = next_active_insn (trial);
- rtx delay_list = 0;
+ rtx_insn *trial = next_nonnote_insn (insn);
+ rtx_insn *next_trial = next_active_insn (trial);
+ rtx_insn_list *delay_list = 0;
int flags;
flags = get_jump_flags (insn, JUMP_LABEL (insn));
we have one insn followed by a branch to the same label we branch to.
In both of these cases, inverting the jump and annulling the delay
slot give the same effect in fewer insns. */
- if ((next_trial == next_active_insn (JUMP_LABEL (insn))
- && ! (next_trial == 0 && current_function_epilogue_delay_list != 0))
+ if (next_trial == next_active_insn (JUMP_LABEL (insn))
|| (next_trial != 0
- && JUMP_P (next_trial)
- && JUMP_LABEL (insn) == JUMP_LABEL (next_trial)
- && (simplejump_p (next_trial)
- || GET_CODE (PATTERN (next_trial)) == RETURN)))
+ && simplejump_or_return_p (next_trial)
+ && JUMP_LABEL (insn) == JUMP_LABEL (next_trial)))
{
if (eligible_for_annul_false (insn, 0, trial, flags))
{
return 0;
}
- delay_list = add_to_delay_list (trial, NULL_RTX);
+ delay_list = add_to_delay_list (trial, NULL);
next_trial = next_active_insn (trial);
update_block (trial, trial);
delete_related_insns (trial);
branch, thread our jump to the target of that branch. Don't
change this into a RETURN here, because it may not accept what
we have in the delay slot. We'll fix this up later. */
- if (next_trial && JUMP_P (next_trial)
- && (simplejump_p (next_trial)
- || GET_CODE (PATTERN (next_trial)) == RETURN))
+ if (next_trial && simplejump_or_return_p (next_trial))
{
rtx target_label = JUMP_LABEL (next_trial);
- if (target_label == 0)
- target_label = find_end_label ();
+ if (ANY_RETURN_P (target_label))
+ target_label = find_end_label (target_label);
if (target_label)
{
be INSNs, CALL_INSNs, or JUMP_INSNs. Only JUMP_INSNs have branch
direction information, and only if they are conditional jumps.
- If LABEL is zero, then there is no way to determine the branch
+ If LABEL is a return, then there is no way to determine the branch
direction. */
if (JUMP_P (insn)
&& (condjump_p (insn) || condjump_in_parallel_p (insn))
+ && !ANY_RETURN_P (label)
&& INSN_UID (insn) <= max_uid
- && label != 0
&& INSN_UID (label) <= max_uid)
flags
= (uid_to_ruid[INSN_UID (label)] > uid_to_ruid[INSN_UID (insn)])
else
flags = 0;
- /* If insn is a conditional branch call mostly_true_jump to get
- determine the branch prediction.
-
- Non conditional branches are predicted as very likely taken. */
- if (JUMP_P (insn)
- && (condjump_p (insn) || condjump_in_parallel_p (insn)))
- {
- int prediction;
-
- prediction = mostly_true_jump (insn, get_branch_condition (insn, label));
- switch (prediction)
- {
- case 2:
- flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely);
- break;
- case 1:
- flags |= ATTR_FLAG_likely;
- break;
- case 0:
- flags |= ATTR_FLAG_unlikely;
- break;
- case -1:
- flags |= (ATTR_FLAG_very_unlikely | ATTR_FLAG_unlikely);
- break;
-
- default:
- gcc_unreachable ();
- }
- }
- else
- flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely);
-
return flags;
}
-/* Return 1 if INSN is a destination that will be branched to rarely (the
- return point of a function); return 2 if DEST will be branched to very
- rarely (a call to a function that doesn't return). Otherwise,
- return 0. */
-
-static int
-rare_destination (rtx insn)
-{
- int jump_count = 0;
- rtx next;
-
- for (; insn; insn = next)
- {
- if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
- insn = XVECEXP (PATTERN (insn), 0, 0);
-
- next = NEXT_INSN (insn);
-
- switch (GET_CODE (insn))
- {
- case CODE_LABEL:
- return 0;
- case BARRIER:
- /* A BARRIER can either be after a JUMP_INSN or a CALL_INSN. We
- don't scan past JUMP_INSNs, so any barrier we find here must
- have been after a CALL_INSN and hence mean the call doesn't
- return. */
- return 2;
- case JUMP_INSN:
- if (GET_CODE (PATTERN (insn)) == RETURN)
- return 1;
- else if (simplejump_p (insn)
- && jump_count++ < 10)
- next = JUMP_LABEL (insn);
- else
- return 0;
-
- default:
- break;
- }
- }
-
- /* If we got here it means we hit the end of the function. So this
- is an unlikely destination. */
-
- return 1;
-}
-
/* Return truth value of the statement that this branch
is mostly taken. If we think that the branch is extremely likely
to be taken, we return 2. If the branch is slightly more likely to be
taken, return 1. If the branch is slightly less likely to be taken,
- return 0 and if the branch is highly unlikely to be taken, return -1.
-
- CONDITION, if nonzero, is the condition that JUMP_INSN is testing. */
+ return 0 and if the branch is highly unlikely to be taken, return -1. */
static int
-mostly_true_jump (rtx jump_insn, rtx condition)
+mostly_true_jump (rtx jump_insn)
{
- rtx target_label = JUMP_LABEL (jump_insn);
- rtx note;
- int rare_dest, rare_fallthrough;
-
/* If branch probabilities are available, then use that number since it
always gives a correct answer. */
- note = find_reg_note (jump_insn, REG_BR_PROB, 0);
+ rtx note = find_reg_note (jump_insn, REG_BR_PROB, 0);
if (note)
{
- int prob = INTVAL (XEXP (note, 0));
+ int prob = XINT (note, 0);
if (prob >= REG_BR_PROB_BASE * 9 / 10)
return 2;
return -1;
}
- /* Look at the relative rarities of the fallthrough and destination. If
- they differ, we can predict the branch that way. */
- rare_dest = rare_destination (target_label);
- rare_fallthrough = rare_destination (NEXT_INSN (jump_insn));
-
- switch (rare_fallthrough - rare_dest)
- {
- case -2:
- return -1;
- case -1:
- return 0;
- case 0:
- break;
- case 1:
- return 1;
- case 2:
- return 2;
- }
-
- /* If we couldn't figure out what this jump was, assume it won't be
- taken. This should be rare. */
- if (condition == 0)
+ /* If there is no note, assume branches are not taken.
+ This should be rare. */
return 0;
-
- /* Predict backward branches usually take, forward branches usually not. If
- we don't know whether this is forward or backward, assume the branch
- will be taken, since most are. */
- return (target_label == 0 || INSN_UID (jump_insn) > max_uid
- || INSN_UID (target_label) > max_uid
- || (uid_to_ruid[INSN_UID (jump_insn)]
- > uid_to_ruid[INSN_UID (target_label)]));
}
/* Return the condition under which INSN will branch to TARGET. If TARGET
if (condjump_in_parallel_p (insn))
pat = XVECEXP (pat, 0, 0);
- if (GET_CODE (pat) == RETURN)
- return target == 0 ? const_true_rtx : 0;
+ if (ANY_RETURN_P (pat) && pat == target)
+ return const_true_rtx;
- else if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
+ if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
return 0;
src = SET_SRC (pat);
return const_true_rtx;
else if (GET_CODE (src) == IF_THEN_ELSE
- && ((target == 0 && GET_CODE (XEXP (src, 1)) == RETURN)
- || (GET_CODE (XEXP (src, 1)) == LABEL_REF
- && XEXP (XEXP (src, 1), 0) == target))
- && XEXP (src, 2) == pc_rtx)
+ && XEXP (src, 2) == pc_rtx
+ && ((GET_CODE (XEXP (src, 1)) == LABEL_REF
+ && XEXP (XEXP (src, 1), 0) == target)
+ || (ANY_RETURN_P (XEXP (src, 1)) && XEXP (src, 1) == target)))
return XEXP (src, 0);
else if (GET_CODE (src) == IF_THEN_ELSE
- && ((target == 0 && GET_CODE (XEXP (src, 2)) == RETURN)
- || (GET_CODE (XEXP (src, 2)) == LABEL_REF
- && XEXP (XEXP (src, 2), 0) == target))
- && XEXP (src, 1) == pc_rtx)
+ && XEXP (src, 1) == pc_rtx
+ && ((GET_CODE (XEXP (src, 2)) == LABEL_REF
+ && XEXP (XEXP (src, 2), 0) == target)
+ || (ANY_RETURN_P (XEXP (src, 2)) && XEXP (src, 2) == target)))
{
enum rtx_code rev;
rev = reversed_comparison_code (XEXP (src, 0), insn);
PNEW_THREAD points to a location that is to receive the place at which
execution should continue. */
-static rtx
-steal_delay_list_from_target (rtx insn, rtx condition, rtx seq,
- rtx delay_list, struct resources *sets,
+static rtx_insn_list *
+steal_delay_list_from_target (rtx insn, rtx condition, rtx_sequence *seq,
+ rtx_insn_list *delay_list, struct resources *sets,
struct resources *needed,
struct resources *other_needed,
int slots_to_fill, int *pslots_filled,
- int *pannul_p, rtx *pnew_thread)
+ int *pannul_p, rtx_insn **pnew_thread)
{
- rtx temp;
int slots_remaining = slots_to_fill - *pslots_filled;
int total_slots_filled = *pslots_filled;
- rtx new_delay_list = 0;
+ rtx_insn_list *new_delay_list = 0;
int must_annul = *pannul_p;
int used_annul = 0;
int i;
struct resources cc_set;
+ bool *redundant;
/* We can't do anything if there are more delay slots in SEQ than we
can handle, or if we don't know that it will be a taken branch.
will effect the direction of the jump in the sequence. */
CLEAR_RESOURCE (&cc_set);
- for (temp = delay_list; temp; temp = XEXP (temp, 1))
+ for (rtx_insn_list *temp = delay_list; temp; temp = temp->next ())
{
- rtx trial = XEXP (temp, 0);
+ rtx_insn *trial = temp->insn ();
mark_set_resources (trial, &cc_set, 0, MARK_SRC_DEST_CALL);
- if (insn_references_resource_p (XVECEXP (seq , 0, 0), &cc_set, 0))
+ if (insn_references_resource_p (seq->insn (0), &cc_set, false))
return delay_list;
}
if (XVECLEN (seq, 0) - 1 > slots_remaining
- || ! condition_dominates_p (condition, XVECEXP (seq, 0, 0))
- || ! single_set (XVECEXP (seq, 0, 0)))
+ || ! condition_dominates_p (condition, seq->insn (0))
+ || ! single_set (seq->insn (0)))
return delay_list;
#ifdef MD_CAN_REDIRECT_BRANCH
/* On some targets, branches with delay slots can have a limited
displacement. Give the back end a chance to tell us we can't do
this. */
- if (! MD_CAN_REDIRECT_BRANCH (insn, XVECEXP (seq, 0, 0)))
+ if (! MD_CAN_REDIRECT_BRANCH (insn, seq->insn (0)))
return delay_list;
#endif
- for (i = 1; i < XVECLEN (seq, 0); i++)
+ redundant = XALLOCAVEC (bool, XVECLEN (seq, 0));
+ for (i = 1; i < seq->len (); i++)
{
- rtx trial = XVECEXP (seq, 0, i);
+ rtx_insn *trial = seq->insn (i);
int flags;
- if (insn_references_resource_p (trial, sets, 0)
- || insn_sets_resource_p (trial, needed, 0)
- || insn_sets_resource_p (trial, sets, 0)
+ if (insn_references_resource_p (trial, sets, false)
+ || insn_sets_resource_p (trial, needed, false)
+ || insn_sets_resource_p (trial, sets, false)
#ifdef HAVE_cc0
/* If TRIAL sets CC0, we can't copy it, so we can't steal this
delay list. */
#endif
/* If TRIAL is from the fallthrough code of an annulled branch insn
in SEQ, we cannot use it. */
- || (INSN_ANNULLED_BRANCH_P (XVECEXP (seq, 0, 0))
+ || (INSN_ANNULLED_BRANCH_P (seq->insn (0))
&& ! INSN_FROM_TARGET_P (trial)))
return delay_list;
/* If this insn was already done (usually in a previous delay slot),
pretend we put it in our delay slot. */
- if (redundant_insn (trial, insn, new_delay_list))
+ redundant[i] = redundant_insn (trial, insn, new_delay_list);
+ if (redundant[i])
continue;
/* We will end up re-vectoring this branch, so compute flags
based on jumping to the new label. */
- flags = get_jump_flags (insn, JUMP_LABEL (XVECEXP (seq, 0, 0)));
+ flags = get_jump_flags (insn, JUMP_LABEL (seq->insn (0)));
if (! must_annul
&& ((condition == const_true_rtx
- || (! insn_sets_resource_p (trial, other_needed, 0)
+ || (! insn_sets_resource_p (trial, other_needed, false)
&& ! may_trap_or_fault_p (PATTERN (trial)))))
? eligible_for_delay (insn, total_slots_filled, trial, flags)
: (must_annul || (delay_list == NULL && new_delay_list == NULL))
{
if (must_annul)
used_annul = 1;
- temp = copy_rtx (trial);
+ rtx_insn *temp = copy_delay_slot_insn (trial);
INSN_FROM_TARGET_P (temp) = 1;
new_delay_list = add_to_delay_list (temp, new_delay_list);
total_slots_filled++;
return delay_list;
}
+ /* Record the effect of the instructions that were redundant and which
+ we therefore decided not to copy. */
+ for (i = 1; i < XVECLEN (seq, 0); i++)
+ if (redundant[i])
+ update_block (XVECEXP (seq, 0, i), insn);
+
/* Show the place to which we will be branching. */
- *pnew_thread = next_active_insn (JUMP_LABEL (XVECEXP (seq, 0, 0)));
+ *pnew_thread = first_active_target_insn (JUMP_LABEL_AS_INSN (seq->insn (0)));
/* Add any new insns to the delay list and update the count of the
number of slots filled. */
if (delay_list == 0)
return new_delay_list;
- for (temp = new_delay_list; temp; temp = XEXP (temp, 1))
- delay_list = add_to_delay_list (XEXP (temp, 0), delay_list);
+ for (rtx_insn_list *temp = new_delay_list; temp; temp = temp->next ())
+ delay_list = add_to_delay_list (temp->insn (), delay_list);
return delay_list;
}
of SEQ is an unconditional branch. In that case we steal its delay slot
for INSN since unconditional branches are much easier to fill. */
-static rtx
-steal_delay_list_from_fallthrough (rtx insn, rtx condition, rtx seq,
- rtx delay_list, struct resources *sets,
+static rtx_insn_list *
+steal_delay_list_from_fallthrough (rtx insn, rtx condition, rtx_sequence *seq,
+ rtx_insn_list *delay_list,
+ struct resources *sets,
struct resources *needed,
struct resources *other_needed,
int slots_to_fill, int *pslots_filled,
/* We can't do anything if SEQ's delay insn isn't an
unconditional branch. */
- if (! simplejump_p (XVECEXP (seq, 0, 0))
- && GET_CODE (PATTERN (XVECEXP (seq, 0, 0))) != RETURN)
+ if (! simplejump_or_return_p (seq->insn (0)))
return delay_list;
- for (i = 1; i < XVECLEN (seq, 0); i++)
+ for (i = 1; i < seq->len (); i++)
{
- rtx trial = XVECEXP (seq, 0, i);
+ rtx_insn *trial = seq->insn (i);
/* If TRIAL sets CC0, stealing it will move it too far from the use
of CC0. */
- if (insn_references_resource_p (trial, sets, 0)
- || insn_sets_resource_p (trial, needed, 0)
- || insn_sets_resource_p (trial, sets, 0)
+ if (insn_references_resource_p (trial, sets, false)
+ || insn_sets_resource_p (trial, needed, false)
+ || insn_sets_resource_p (trial, sets, false)
#ifdef HAVE_cc0
|| sets_cc0_p (PATTERN (trial))
#endif
/* If this insn was already done, we don't need it. */
if (redundant_insn (trial, insn, delay_list))
{
+ update_block (trial, insn);
delete_from_delay_slot (trial);
continue;
}
if (! must_annul
&& ((condition == const_true_rtx
- || (! insn_sets_resource_p (trial, other_needed, 0)
+ || (! insn_sets_resource_p (trial, other_needed, false)
&& ! may_trap_or_fault_p (PATTERN (trial)))))
? eligible_for_delay (insn, *pslots_filled, trial, flags)
: (must_annul || delay_list == NULL) && (must_annul = 1,
{
rtx trial, next_trial;
rtx delay_insn = XVECEXP (PATTERN (insn), 0, 0);
- int annul_p = INSN_ANNULLED_BRANCH_P (delay_insn);
+ int annul_p = JUMP_P (delay_insn) && INSN_ANNULLED_BRANCH_P (delay_insn);
int slot_number = 1;
int num_slots = XVECLEN (PATTERN (insn), 0);
rtx next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
struct resources set, needed;
- rtx merged_insns = 0;
+ rtx_insn_list *merged_insns = 0;
int i;
int flags;
if (! annul_p)
for (i = 1 ; i < num_slots; i++)
if (XVECEXP (PATTERN (insn), 0, i))
- mark_referenced_resources (XVECEXP (PATTERN (insn), 0, i), &needed, 1);
+ mark_referenced_resources (XVECEXP (PATTERN (insn), 0, i), &needed,
+ true);
for (trial = thread; !stop_search_p (trial, 1); trial = next_trial)
{
/* We can't share an insn that sets cc0. */
&& ! sets_cc0_p (pat)
#endif
- && ! insn_references_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &needed, 1)
+ && ! insn_references_resource_p (trial, &set, true)
+ && ! insn_sets_resource_p (trial, &set, true)
+ && ! insn_sets_resource_p (trial, &needed, true)
&& (trial = try_split (pat, trial, 0)) != 0
/* Update next_trial, in case try_split succeeded. */
&& (next_trial = next_nonnote_insn (trial))
}
mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
- mark_referenced_resources (trial, &needed, 1);
+ mark_referenced_resources (trial, &needed, true);
}
/* See if we stopped on a filled insn. If we did, try to see if its
if (slot_number != num_slots
&& trial && NONJUMP_INSN_P (trial)
&& GET_CODE (PATTERN (trial)) == SEQUENCE
- && ! INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0)))
+ && !(JUMP_P (XVECEXP (PATTERN (trial), 0, 0))
+ && INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0))))
{
- rtx pat = PATTERN (trial);
+ rtx_sequence *pat = as_a <rtx_sequence *> (PATTERN (trial));
rtx filled_insn = XVECEXP (pat, 0, 0);
/* Account for resources set/needed by the filled insn. */
mark_set_resources (filled_insn, &set, 0, MARK_SRC_DEST_CALL);
- mark_referenced_resources (filled_insn, &needed, 1);
+ mark_referenced_resources (filled_insn, &needed, true);
- for (i = 1; i < XVECLEN (pat, 0); i++)
+ for (i = 1; i < pat->len (); i++)
{
- rtx dtrial = XVECEXP (pat, 0, i);
+ rtx_insn *dtrial = pat->insn (i);
- if (! insn_references_resource_p (dtrial, &set, 1)
- && ! insn_sets_resource_p (dtrial, &set, 1)
- && ! insn_sets_resource_p (dtrial, &needed, 1)
+ if (! insn_references_resource_p (dtrial, &set, true)
+ && ! insn_sets_resource_p (dtrial, &set, true)
+ && ! insn_sets_resource_p (dtrial, &needed, true)
#ifdef HAVE_cc0
&& ! sets_cc0_p (PATTERN (dtrial))
#endif
{
if (! annul_p)
{
- rtx new;
+ rtx_insn *new_rtx;
update_block (dtrial, thread);
- new = delete_from_delay_slot (dtrial);
+ new_rtx = delete_from_delay_slot (dtrial);
if (INSN_DELETED_P (thread))
- thread = new;
+ thread = new_rtx;
INSN_FROM_TARGET_P (next_to_match) = 0;
}
else
/* Keep track of the set/referenced resources for the delay
slots of any trial insns we encounter. */
mark_set_resources (dtrial, &set, 0, MARK_SRC_DEST_CALL);
- mark_referenced_resources (dtrial, &needed, 1);
+ mark_referenced_resources (dtrial, &needed, true);
}
}
}
target. */
if (slot_number == num_slots && annul_p)
{
- for (; merged_insns; merged_insns = XEXP (merged_insns, 1))
+ for (; merged_insns; merged_insns = merged_insns->next ())
{
if (GET_MODE (merged_insns) == SImode)
{
- rtx new;
+ rtx_insn *new_rtx;
update_block (XEXP (merged_insns, 0), thread);
- new = delete_from_delay_slot (XEXP (merged_insns, 0));
+ new_rtx = delete_from_delay_slot (merged_insns->insn ());
if (INSN_DELETED_P (thread))
- thread = new;
+ thread = new_rtx;
}
else
{
for (trial = PREV_INSN (target),
insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
trial && insns_to_search > 0;
- trial = PREV_INSN (trial), --insns_to_search)
+ trial = PREV_INSN (trial))
{
- if (LABEL_P (trial))
+ /* (use (insn))s can come immediately after a barrier if the
+ label that used to precede them has been deleted as dead.
+ See delete_related_insns. */
+ if (LABEL_P (trial) || BARRIER_P (trial))
return 0;
- if (! INSN_P (trial))
+ if (!INSN_P (trial))
continue;
+ --insns_to_search;
pat = PATTERN (trial);
if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
continue;
- if (GET_CODE (pat) == SEQUENCE)
+ if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (pat))
{
/* Stop for a CALL and its delay slots because it is difficult to
track its resource needs correctly. */
- if (CALL_P (XVECEXP (pat, 0, 0)))
+ if (CALL_P (seq->element (0)))
return 0;
/* Stop for an INSN or JUMP_INSN with delayed effects and its delay
correctly. */
#ifdef INSN_SETS_ARE_DELAYED
- if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0)))
+ if (INSN_SETS_ARE_DELAYED (seq->element (0)))
return 0;
#endif
#ifdef INSN_REFERENCES_ARE_DELAYED
- if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0)))
+ if (INSN_REFERENCES_ARE_DELAYED (seq->element (0)))
return 0;
#endif
/* See if any of the insns in the delay slot match, updating
resource requirements as we go. */
- for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
- if (GET_CODE (XVECEXP (pat, 0, i)) == GET_CODE (insn)
- && rtx_equal_p (PATTERN (XVECEXP (pat, 0, i)), ipat)
- && ! find_reg_note (XVECEXP (pat, 0, i), REG_UNUSED, NULL_RTX))
+ for (i = seq->len () - 1; i > 0; i--)
+ if (GET_CODE (seq->element (i)) == GET_CODE (insn)
+ && rtx_equal_p (PATTERN (seq->element (i)), ipat)
+ && ! find_reg_note (seq->element (i), REG_UNUSED, NULL_RTX))
break;
/* If found a match, exit this loop early. */
CLEAR_RESOURCE (&needed);
CLEAR_RESOURCE (&set);
mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
- mark_referenced_resources (insn, &needed, 1);
+ mark_referenced_resources (insn, &needed, true);
/* If TARGET is a SEQUENCE, get the main insn. */
if (NONJUMP_INSN_P (target) && GET_CODE (PATTERN (target)) == SEQUENCE)
#endif
/* The insn requiring the delay may not set anything needed or set by
INSN. */
- || insn_sets_resource_p (target_main, &needed, 1)
- || insn_sets_resource_p (target_main, &set, 1))
+ || insn_sets_resource_p (target_main, &needed, true)
+ || insn_sets_resource_p (target_main, &set, true))
return 0;
/* Insns we pass may not set either NEEDED or SET, so merge them for
simpler tests. */
needed.memory |= set.memory;
- needed.unch_memory |= set.unch_memory;
IOR_HARD_REG_SET (needed.regs, set.regs);
/* This insn isn't redundant if it conflicts with an insn that either is
while (delay_list)
{
- if (insn_sets_resource_p (XEXP (delay_list, 0), &needed, 1))
+ if (insn_sets_resource_p (XEXP (delay_list, 0), &needed, true))
return 0;
delay_list = XEXP (delay_list, 1);
}
if (NONJUMP_INSN_P (target) && GET_CODE (PATTERN (target)) == SEQUENCE)
for (i = 1; i < XVECLEN (PATTERN (target), 0); i++)
- if (insn_sets_resource_p (XVECEXP (PATTERN (target), 0, i), &needed, 1))
+ if (insn_sets_resource_p (XVECEXP (PATTERN (target), 0, i), &needed,
+ true))
return 0;
/* Scan backwards until we reach a label or an insn that uses something
for (trial = PREV_INSN (target),
insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
trial && !LABEL_P (trial) && insns_to_search > 0;
- trial = PREV_INSN (trial), --insns_to_search)
+ trial = PREV_INSN (trial))
{
if (!INSN_P (trial))
continue;
+ --insns_to_search;
pat = PATTERN (trial);
if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
continue;
- if (GET_CODE (pat) == SEQUENCE)
+ if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (pat))
{
+ bool annul_p = false;
+ rtx control = seq->element (0);
+
/* If this is a CALL_INSN and its delay slots, it is hard to track
the resource needs properly, so give up. */
- if (CALL_P (XVECEXP (pat, 0, 0)))
+ if (CALL_P (control))
return 0;
/* If this is an INSN or JUMP_INSN with delayed effects, it
is hard to track the resource needs properly, so give up. */
#ifdef INSN_SETS_ARE_DELAYED
- if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0)))
+ if (INSN_SETS_ARE_DELAYED (control))
return 0;
#endif
#ifdef INSN_REFERENCES_ARE_DELAYED
- if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0)))
+ if (INSN_REFERENCES_ARE_DELAYED (control))
return 0;
#endif
+ if (JUMP_P (control))
+ annul_p = INSN_ANNULLED_BRANCH_P (control);
+
/* See if any of the insns in the delay slot match, updating
resource requirements as we go. */
- for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
+ for (i = seq->len () - 1; i > 0; i--)
{
- rtx candidate = XVECEXP (pat, 0, i);
+ rtx candidate = seq->element (i);
/* If an insn will be annulled if the branch is false, it isn't
considered as a possible duplicate insn. */
if (rtx_equal_p (PATTERN (candidate), ipat)
- && ! (INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0))
- && INSN_FROM_TARGET_P (candidate)))
+ && ! (annul_p && INSN_FROM_TARGET_P (candidate)))
{
/* Show that this insn will be used in the sequel. */
INSN_FROM_TARGET_P (candidate) = 0;
/* Unless this is an annulled insn from the target of a branch,
we must stop if it sets anything needed or set by INSN. */
- if ((! INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0))
- || ! INSN_FROM_TARGET_P (candidate))
- && insn_sets_resource_p (candidate, &needed, 1))
+ if ((!annul_p || !INSN_FROM_TARGET_P (candidate))
+ && insn_sets_resource_p (candidate, &needed, true))
return 0;
}
/* If the insn requiring the delay slot conflicts with INSN, we
must stop. */
- if (insn_sets_resource_p (XVECEXP (pat, 0, 0), &needed, 1))
+ if (insn_sets_resource_p (control, &needed, true))
return 0;
}
else
return trial;
/* Can't go any further if TRIAL conflicts with INSN. */
- if (insn_sets_resource_p (trial, &needed, 1))
+ if (insn_sets_resource_p (trial, &needed, true))
return 0;
}
}
rtx insn;
/* We don't own the function end. */
- if (thread == 0)
+ if (thread == 0 || ANY_RETURN_P (thread))
return 0;
/* Get the first active insn, or THREAD, if it is an active insn. */
}
}
\f
+static vec <rtx> sibling_labels;
+
+/* Return the label before INSN, or put a new label there. If SIBLING is
+ non-zero, it is another label associated with the new label (if any),
+ typically the former target of the jump that will be redirected to
+ the new label. */
+
+static rtx_insn *
+get_label_before (rtx insn, rtx sibling)
+{
+ rtx_insn *label;
+
+ /* Find an existing label at this point
+ or make a new one if there is none. */
+ label = prev_nonnote_insn (insn);
+
+ if (label == 0 || !LABEL_P (label))
+ {
+ rtx prev = PREV_INSN (insn);
+
+ label = gen_label_rtx ();
+ emit_label_after (label, prev);
+ LABEL_NUSES (label) = 0;
+ if (sibling)
+ {
+ sibling_labels.safe_push (label);
+ sibling_labels.safe_push (sibling);
+ }
+ }
+ return label;
+}
+
/* Scan a function looking for insns that need a delay slot and find insns to
put into the delay slot.
static void
fill_simple_delay_slots (int non_jumps_p)
{
- rtx insn, pat, trial, next_trial;
+ rtx_insn *insn, *trial, *next_trial;
+ rtx pat;
int i;
int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
struct resources needed, set;
int slots_to_fill, slots_filled;
- rtx delay_list;
+ rtx_insn_list *delay_list;
for (i = 0; i < num_unfilled_slots; i++)
{
&& no_labels_between_p (insn, trial)
&& ! can_throw_internal (trial))
{
- rtx *tmp;
+ rtx_insn **tmp;
slots_filled++;
delay_list = add_to_delay_list (trial, delay_list);
if (*tmp == trial)
*tmp = 0;
{
- rtx next = NEXT_INSN (trial);
- rtx prev = PREV_INSN (trial);
+ rtx_insn *next = NEXT_INSN (trial);
+ rtx_insn *prev = PREV_INSN (trial);
if (prev)
- NEXT_INSN (prev) = next;
+ SET_NEXT_INSN (prev) = next;
if (next)
- PREV_INSN (next) = prev;
+ SET_PREV_INSN (next) = prev;
}
}
CLEAR_RESOURCE (&needed);
CLEAR_RESOURCE (&set);
mark_set_resources (insn, &set, 0, MARK_SRC_DEST);
- mark_referenced_resources (insn, &needed, 0);
+ mark_referenced_resources (insn, &needed, false);
for (trial = prev_nonnote_insn (insn); ! stop_search_p (trial, 1);
trial = next_trial)
/* This must be an INSN or CALL_INSN. */
pat = PATTERN (trial);
- /* USE and CLOBBER at this level was just for flow; ignore it. */
+ /* Stand-alone USE and CLOBBER are just for flow. */
if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
continue;
/* Check for resource conflict first, to avoid unnecessary
splitting. */
- if (! insn_references_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &needed, 1)
+ if (! insn_references_resource_p (trial, &set, true)
+ && ! insn_sets_resource_p (trial, &set, true)
+ && ! insn_sets_resource_p (trial, &needed, true)
#ifdef HAVE_cc0
/* Can't separate set of cc0 from its use. */
&& ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
}
mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
- mark_referenced_resources (trial, &needed, 1);
+ mark_referenced_resources (trial, &needed, true);
}
}
if (slots_filled != slots_to_fill
&& delay_list == 0
&& JUMP_P (insn)
- && (condjump_p (insn) || condjump_in_parallel_p (insn)))
+ && (condjump_p (insn) || condjump_in_parallel_p (insn))
+ && !ANY_RETURN_P (JUMP_LABEL (insn)))
{
delay_list = optimize_skip (insn);
if (delay_list)
Presumably, we should also check to see if we could get
back to this function via `setjmp'. */
&& ! can_throw_internal (insn)
- && (!JUMP_P (insn)
- || ((condjump_p (insn) || condjump_in_parallel_p (insn))
- && ! simplejump_p (insn)
- && JUMP_LABEL (insn) != 0)))
+ && !JUMP_P (insn))
{
- /* Invariant: If insn is a JUMP_INSN, the insn's jump
- label. Otherwise, zero. */
- rtx target = 0;
int maybe_never = 0;
rtx pat, trial_delay;
CLEAR_RESOURCE (&needed);
CLEAR_RESOURCE (&set);
+ mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
+ mark_referenced_resources (insn, &needed, true);
if (CALL_P (insn))
+ maybe_never = 1;
+
+ for (trial = next_nonnote_insn (insn); !stop_search_p (trial, 1);
+ trial = next_trial)
{
- mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
- mark_referenced_resources (insn, &needed, 1);
- maybe_never = 1;
- }
- else
- {
- mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
- mark_referenced_resources (insn, &needed, 1);
- if (JUMP_P (insn))
- target = JUMP_LABEL (insn);
- }
+ next_trial = next_nonnote_insn (trial);
- if (target == 0)
- for (trial = next_nonnote_insn (insn); trial; trial = next_trial)
- {
- next_trial = next_nonnote_insn (trial);
+ /* This must be an INSN or CALL_INSN. */
+ pat = PATTERN (trial);
- if (LABEL_P (trial)
- || BARRIER_P (trial))
- break;
+ /* Stand-alone USE and CLOBBER are just for flow. */
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ continue;
- /* We must have an INSN, JUMP_INSN, or CALL_INSN. */
- pat = PATTERN (trial);
+ /* If this already has filled delay slots, get the insn needing
+ the delay slots. */
+ if (GET_CODE (pat) == SEQUENCE)
+ trial_delay = XVECEXP (pat, 0, 0);
+ else
+ trial_delay = trial;
- /* Stand-alone USE and CLOBBER are just for flow. */
- if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
- continue;
+ /* Stop our search when seeing a jump. */
+ if (JUMP_P (trial_delay))
+ break;
- /* If this already has filled delay slots, get the insn needing
- the delay slots. */
- if (GET_CODE (pat) == SEQUENCE)
- trial_delay = XVECEXP (pat, 0, 0);
- else
- trial_delay = trial;
-
- /* Stop our search when seeing an unconditional jump. */
- if (JUMP_P (trial_delay))
- break;
-
- /* See if we have a resource problem before we try to
- split. */
- if (GET_CODE (pat) != SEQUENCE
- && ! insn_references_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &needed, 1)
+ /* See if we have a resource problem before we try to split. */
+ if (GET_CODE (pat) != SEQUENCE
+ && ! insn_references_resource_p (trial, &set, true)
+ && ! insn_sets_resource_p (trial, &set, true)
+ && ! insn_sets_resource_p (trial, &needed, true)
#ifdef HAVE_cc0
- && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
+ && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
#endif
- && ! (maybe_never && may_trap_or_fault_p (pat))
- && (trial = try_split (pat, trial, 0))
- && eligible_for_delay (insn, slots_filled, trial, flags)
- && ! can_throw_internal(trial))
- {
- next_trial = next_nonnote_insn (trial);
- delay_list = add_to_delay_list (trial, delay_list);
-
+ && ! (maybe_never && may_trap_or_fault_p (pat))
+ && (trial = try_split (pat, trial, 0))
+ && eligible_for_delay (insn, slots_filled, trial, flags)
+ && ! can_throw_internal (trial))
+ {
+ next_trial = next_nonnote_insn (trial);
+ delay_list = add_to_delay_list (trial, delay_list);
#ifdef HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, pat))
- link_cc0_insns (trial);
+ if (reg_mentioned_p (cc0_rtx, pat))
+ link_cc0_insns (trial);
#endif
+ delete_related_insns (trial);
+ if (slots_to_fill == ++slots_filled)
+ break;
+ continue;
+ }
- delete_related_insns (trial);
- if (slots_to_fill == ++slots_filled)
- break;
- continue;
- }
-
- mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
- mark_referenced_resources (trial, &needed, 1);
+ mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
+ mark_referenced_resources (trial, &needed, true);
- /* Ensure we don't put insns between the setting of cc and the
- comparison by moving a setting of cc into an earlier delay
- slot since these insns could clobber the condition code. */
- set.cc = 1;
+ /* Ensure we don't put insns between the setting of cc and the
+ comparison by moving a setting of cc into an earlier delay
+ slot since these insns could clobber the condition code. */
+ set.cc = 1;
- /* If this is a call or jump, we might not get here. */
- if (CALL_P (trial_delay)
- || JUMP_P (trial_delay))
- maybe_never = 1;
- }
+ /* If this is a call, we might not get here. */
+ if (CALL_P (trial_delay))
+ maybe_never = 1;
+ }
/* If there are slots left to fill and our search was stopped by an
unconditional branch, try the insn at the branch target. We can
Don't do this if the insn at the branch target is a branch. */
if (slots_to_fill != slots_filled
&& trial
- && JUMP_P (trial)
+ && jump_to_label_p (trial)
&& simplejump_p (trial)
- && (target == 0 || JUMP_LABEL (trial) == target)
&& (next_trial = next_active_insn (JUMP_LABEL (trial))) != 0
&& ! (NONJUMP_INSN_P (next_trial)
&& GET_CODE (PATTERN (next_trial)) == SEQUENCE)
&& !JUMP_P (next_trial)
- && ! insn_references_resource_p (next_trial, &set, 1)
- && ! insn_sets_resource_p (next_trial, &set, 1)
- && ! insn_sets_resource_p (next_trial, &needed, 1)
+ && ! insn_references_resource_p (next_trial, &set, true)
+ && ! insn_sets_resource_p (next_trial, &set, true)
+ && ! insn_sets_resource_p (next_trial, &needed, true)
#ifdef HAVE_cc0
&& ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial))
#endif
rtx new_label = next_real_insn (next_trial);
if (new_label != 0)
- new_label = get_label_before (new_label);
+ new_label = get_label_before (new_label, JUMP_LABEL (trial));
else
- new_label = find_end_label ();
+ new_label = find_end_label (simple_return_rtx);
if (new_label)
{
delay_list
- = add_to_delay_list (copy_rtx (next_trial), delay_list);
+ = add_to_delay_list (copy_delay_slot_insn (next_trial),
+ delay_list);
slots_filled++;
reorg_redirect_jump (trial, new_label);
-
- /* If we merged because we both jumped to the same place,
- redirect the original insn also. */
- if (target)
- reorg_redirect_jump (insn, new_label);
}
}
}
note_delay_statistics (slots_filled, 0);
}
-
-#ifdef DELAY_SLOTS_FOR_EPILOGUE
- /* See if the epilogue needs any delay slots. Try to fill them if so.
- The only thing we can do is scan backwards from the end of the
- function. If we did this in a previous pass, it is incorrect to do it
- again. */
- if (current_function_epilogue_delay_list)
- return;
-
- slots_to_fill = DELAY_SLOTS_FOR_EPILOGUE;
- if (slots_to_fill == 0)
- return;
-
- slots_filled = 0;
- CLEAR_RESOURCE (&set);
-
- /* The frame pointer and stack pointer are needed at the beginning of
- the epilogue, so instructions setting them can not be put in the
- epilogue delay slot. However, everything else needed at function
- end is safe, so we don't want to use end_of_function_needs here. */
- CLEAR_RESOURCE (&needed);
- if (frame_pointer_needed)
- {
- SET_HARD_REG_BIT (needed.regs, FRAME_POINTER_REGNUM);
-#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
- SET_HARD_REG_BIT (needed.regs, HARD_FRAME_POINTER_REGNUM);
-#endif
- if (! EXIT_IGNORE_STACK
- || current_function_sp_is_unchanging)
- SET_HARD_REG_BIT (needed.regs, STACK_POINTER_REGNUM);
- }
- else
- SET_HARD_REG_BIT (needed.regs, STACK_POINTER_REGNUM);
-
-#ifdef EPILOGUE_USES
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- {
- if (EPILOGUE_USES (i))
- SET_HARD_REG_BIT (needed.regs, i);
- }
-#endif
-
- for (trial = get_last_insn (); ! stop_search_p (trial, 1);
- trial = PREV_INSN (trial))
+}
+\f
+/* Follow any unconditional jump at LABEL, for the purpose of redirecting JUMP;
+ return the ultimate label reached by any such chain of jumps.
+ Return a suitable return rtx if the chain ultimately leads to a
+ return instruction.
+ If LABEL is not followed by a jump, return LABEL.
+ If the chain loops or we can't find end, return LABEL,
+ since that tells caller to avoid changing the insn.
+ If the returned label is obtained by following a crossing jump,
+ set *CROSSING to true, otherwise set it to false. */
+
+static rtx_insn *
+follow_jumps (rtx_insn *label, rtx jump, bool *crossing)
+{
+ rtx_insn *insn;
+ rtx_insn *next;
+ rtx_insn *value = label;
+ int depth;
+
+ *crossing = false;
+ if (ANY_RETURN_P (label))
+ return label;
+ for (depth = 0;
+ (depth < 10
+ && (insn = next_active_insn (value)) != 0
+ && JUMP_P (insn)
+ && JUMP_LABEL (insn) != NULL_RTX
+ && ((any_uncondjump_p (insn) && onlyjump_p (insn))
+ || ANY_RETURN_P (PATTERN (insn)))
+ && (next = NEXT_INSN (insn))
+ && BARRIER_P (next));
+ depth++)
{
- if (NOTE_P (trial))
- continue;
- pat = PATTERN (trial);
- if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
- continue;
+ rtx_insn *this_label = JUMP_LABEL_AS_INSN (insn);
- if (! insn_references_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &needed, 1)
- && ! insn_sets_resource_p (trial, &set, 1)
-#ifdef HAVE_cc0
- /* Don't want to mess with cc0 here. */
- && ! reg_mentioned_p (cc0_rtx, pat)
-#endif
- && ! can_throw_internal (trial))
- {
- trial = try_split (pat, trial, 1);
- if (ELIGIBLE_FOR_EPILOGUE_DELAY (trial, slots_filled))
- {
- /* Here as well we are searching backward, so put the
- insns we find on the head of the list. */
-
- current_function_epilogue_delay_list
- = gen_rtx_INSN_LIST (VOIDmode, trial,
- current_function_epilogue_delay_list);
- mark_end_of_function_resources (trial, 1);
- update_block (trial, trial);
- delete_related_insns (trial);
-
- /* Clear deleted bit so final.c will output the insn. */
- INSN_DELETED_P (trial) = 0;
+ /* If we have found a cycle, make the insn jump to itself. */
+ if (this_label == label)
+ return label;
- if (slots_to_fill == ++slots_filled)
- break;
- continue;
- }
- }
+ /* Cannot follow returns and cannot look through tablejumps. */
+ if (ANY_RETURN_P (this_label))
+ return this_label;
+ if (NEXT_INSN (this_label)
+ && JUMP_TABLE_DATA_P (NEXT_INSN (this_label)))
+ break;
- mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
- mark_referenced_resources (trial, &needed, 1);
+ if (!targetm.can_follow_jump (jump, insn))
+ break;
+ if (!*crossing)
+ *crossing = CROSSING_JUMP_P (jump);
+ value = this_label;
}
-
- note_delay_statistics (slots_filled, 0);
-#endif
+ if (depth == 10)
+ return label;
+ return value;
}
-\f
+
/* Try to find insns to place in delay slots.
INSN is the jump needing SLOTS_TO_FILL delay slots. It tests CONDITION
case, we can only take insns from the head of the thread for our delay
slot. We then adjust the jump to point after the insns we have taken. */
-static rtx
-fill_slots_from_thread (rtx insn, rtx condition, rtx thread,
- rtx opposite_thread, int likely, int thread_if_true,
+static rtx_insn_list *
+fill_slots_from_thread (rtx_insn *insn, rtx condition, rtx_insn *thread,
+ rtx_insn *opposite_thread, int likely,
+ int thread_if_true,
int own_thread, int slots_to_fill,
- int *pslots_filled, rtx delay_list)
+ int *pslots_filled, rtx_insn_list *delay_list)
{
- rtx new_thread;
+ rtx_insn *new_thread;
struct resources opposite_needed, set, needed;
- rtx trial;
+ rtx_insn *trial;
int lose = 0;
int must_annul = 0;
int flags;
/* Validate our arguments. */
- gcc_assert(condition != const_true_rtx || thread_if_true);
- gcc_assert(own_thread || thread_if_true);
+ gcc_assert (condition != const_true_rtx || thread_if_true);
+ gcc_assert (own_thread || thread_if_true);
flags = get_jump_flags (insn, JUMP_LABEL (insn));
/* If our thread is the end of subroutine, we can't get any delay
insns from that. */
- if (thread == 0)
+ if (thread == NULL_RTX || ANY_RETURN_P (thread))
return delay_list;
/* If this is an unconditional branch, nothing is needed at the
/* If TRIAL conflicts with the insns ahead of it, we lose. Also,
don't separate or copy insns that set and use CC0. */
- if (! insn_references_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &needed, 1)
+ if (! insn_references_resource_p (trial, &set, true)
+ && ! insn_sets_resource_p (trial, &set, true)
+ && ! insn_sets_resource_p (trial, &needed, true)
#ifdef HAVE_cc0
&& ! (reg_mentioned_p (cc0_rtx, pat)
&& (! own_thread || ! sets_cc0_p (pat)))
go into an annulled delay slot. */
if (!must_annul
&& (condition == const_true_rtx
- || (! insn_sets_resource_p (trial, &opposite_needed, 1)
- && ! may_trap_or_fault_p (pat))))
+ || (! insn_sets_resource_p (trial, &opposite_needed, true)
+ && ! may_trap_or_fault_p (pat)
+ && ! RTX_FRAME_RELATED_P (trial))))
{
old_trial = trial;
trial = try_split (pat, trial, 0);
: check_annul_list_true_false (1, delay_list)
&& eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
{
- rtx temp;
+ rtx_insn *temp;
must_annul = 1;
winner:
/* We are moving this insn, not deleting it. We must
temporarily increment the use count on any referenced
label lest it be deleted by delete_related_insns. */
- note = find_reg_note (trial, REG_LABEL, 0);
- /* REG_LABEL could be NOTE_INSN_DELETED_LABEL too. */
- if (note && LABEL_P (XEXP (note, 0)))
- LABEL_NUSES (XEXP (note, 0))++;
+ for (note = REG_NOTES (trial);
+ note != NULL_RTX;
+ note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_LABEL_OPERAND
+ || REG_NOTE_KIND (note) == REG_LABEL_TARGET)
+ {
+ /* REG_LABEL_OPERAND could be
+ NOTE_INSN_DELETED_LABEL too. */
+ if (LABEL_P (XEXP (note, 0)))
+ LABEL_NUSES (XEXP (note, 0))++;
+ else
+ gcc_assert (REG_NOTE_KIND (note)
+ == REG_LABEL_OPERAND);
+ }
+ if (jump_to_label_p (trial))
+ LABEL_NUSES (JUMP_LABEL (trial))++;
delete_related_insns (trial);
- if (note && LABEL_P (XEXP (note, 0)))
- LABEL_NUSES (XEXP (note, 0))--;
+ for (note = REG_NOTES (trial);
+ note != NULL_RTX;
+ note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_LABEL_OPERAND
+ || REG_NOTE_KIND (note) == REG_LABEL_TARGET)
+ {
+ /* REG_LABEL_OPERAND could be
+ NOTE_INSN_DELETED_LABEL too. */
+ if (LABEL_P (XEXP (note, 0)))
+ LABEL_NUSES (XEXP (note, 0))--;
+ else
+ gcc_assert (REG_NOTE_KIND (note)
+ == REG_LABEL_OPERAND);
+ }
+ if (jump_to_label_p (trial))
+ LABEL_NUSES (JUMP_LABEL (trial))--;
}
else
new_thread = next_active_insn (trial);
- temp = own_thread ? trial : copy_rtx (trial);
+ temp = own_thread ? trial : copy_delay_slot_insn (trial);
if (thread_if_true)
INSN_FROM_TARGET_P (temp) = 1;
may be branching to a location that has a
redundant insn. Skip any if so. */
while (new_thread && ! own_thread
- && ! insn_sets_resource_p (new_thread, &set, 1)
- && ! insn_sets_resource_p (new_thread, &needed, 1)
+ && ! insn_sets_resource_p (new_thread, &set, true)
+ && ! insn_sets_resource_p (new_thread, &needed,
+ true)
&& ! insn_references_resource_p (new_thread,
- &set, 1)
+ &set, true)
&& (prior_insn
= redundant_insn (new_thread, insn,
delay_list)))
/* This insn can't go into a delay slot. */
lose = 1;
mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
- mark_referenced_resources (trial, &needed, 1);
+ mark_referenced_resources (trial, &needed, true);
/* Ensure we don't put insns between the setting of cc and the comparison
by moving a setting of cc into an earlier delay slot since these insns
&& GET_CODE (PATTERN (trial)) == SEQUENCE
&& JUMP_P (XVECEXP (PATTERN (trial), 0, 0)))
{
+ rtx_sequence *sequence = as_a <rtx_sequence *> (PATTERN (trial));
/* If this is the `true' thread, we will want to follow the jump,
so we can only do this if we have taken everything up to here. */
if (thread_if_true && trial == new_thread)
{
delay_list
- = steal_delay_list_from_target (insn, condition, PATTERN (trial),
+ = steal_delay_list_from_target (insn, condition, sequence,
delay_list, &set, &needed,
&opposite_needed, slots_to_fill,
pslots_filled, &must_annul,
else if (! thread_if_true)
delay_list
= steal_delay_list_from_fallthrough (insn, condition,
- PATTERN (trial),
+ sequence,
delay_list, &set, &needed,
&opposite_needed, slots_to_fill,
pslots_filled, &must_annul);
depend on the destination register. If so, try to place the opposite
arithmetic insn after the jump insn and put the arithmetic insn in the
delay slot. If we can't do this, return. */
- if (delay_list == 0 && likely && new_thread
+ if (delay_list == 0 && likely
+ && new_thread && !ANY_RETURN_P (new_thread)
&& NONJUMP_INSN_P (new_thread)
+ && !RTX_FRAME_RELATED_P (new_thread)
&& GET_CODE (PATTERN (new_thread)) != ASM_INPUT
&& asm_noperands (PATTERN (new_thread)) < 0)
{
dest = SET_DEST (pat), src = SET_SRC (pat);
if ((GET_CODE (src) == PLUS || GET_CODE (src) == MINUS)
&& rtx_equal_p (XEXP (src, 0), dest)
+ && (!FLOAT_MODE_P (GET_MODE (src))
+ || flag_unsafe_math_optimizations)
&& ! reg_overlap_mentioned_p (dest, XEXP (src, 1))
&& ! side_effects_p (pat))
{
rtx other = XEXP (src, 1);
rtx new_arith;
- rtx ninsn;
+ rtx_insn *ninsn;
/* If this is a constant adjustment, use the same code with
the negated constant. Otherwise, reverse the sense of the
arithmetic. */
- if (GET_CODE (other) == CONST_INT)
+ if (CONST_INT_P (other))
new_arith = gen_rtx_fmt_ee (GET_CODE (src), GET_MODE (src), dest,
negate_rtx (GET_MODE (src), other));
else
else
new_thread = next_active_insn (trial);
- ninsn = own_thread ? trial : copy_rtx (trial);
+ ninsn = own_thread ? trial : copy_delay_slot_insn (trial);
if (thread_if_true)
INSN_FROM_TARGET_P (ninsn) = 1;
- delay_list = add_to_delay_list (ninsn, NULL_RTX);
+ delay_list = add_to_delay_list (ninsn, NULL);
(*pslots_filled)++;
}
}
if (new_thread != thread)
{
rtx label;
+ bool crossing = false;
gcc_assert (thread_if_true);
- if (new_thread && JUMP_P (new_thread)
- && (simplejump_p (new_thread)
- || GET_CODE (PATTERN (new_thread)) == RETURN)
+ if (new_thread && simplejump_or_return_p (new_thread)
&& redirect_with_delay_list_safe_p (insn,
JUMP_LABEL (new_thread),
delay_list))
- new_thread = follow_jumps (JUMP_LABEL (new_thread));
+ new_thread = follow_jumps (JUMP_LABEL_AS_INSN (new_thread), insn,
+ &crossing);
- if (new_thread == 0)
- label = find_end_label ();
+ if (ANY_RETURN_P (new_thread))
+ label = find_end_label (new_thread);
else if (LABEL_P (new_thread))
label = new_thread;
else
- label = get_label_before (new_thread);
+ label = get_label_before (new_thread, JUMP_LABEL (insn));
if (label)
- reorg_redirect_jump (insn, label);
+ {
+ reorg_redirect_jump (insn, label);
+ if (crossing)
+ CROSSING_JUMP_P (insn) = 1;
+ }
}
return delay_list;
static void
fill_eager_delay_slots (void)
{
- rtx insn;
+ rtx_insn *insn;
int i;
int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
for (i = 0; i < num_unfilled_slots; i++)
{
rtx condition;
- rtx target_label, insn_at_target, fallthrough_insn;
- rtx delay_list = 0;
+ rtx_insn *target_label, *insn_at_target, *fallthrough_insn;
+ rtx_insn_list *delay_list = 0;
int own_target;
int own_fallthrough;
int prediction, slots_to_fill, slots_filled;
continue;
slots_filled = 0;
- target_label = JUMP_LABEL (insn);
+ target_label = JUMP_LABEL_AS_INSN (insn);
condition = get_branch_condition (insn, target_label);
if (condition == 0)
them. Then see whether the branch is likely true. We don't need
to do a lot of this for unconditional branches. */
- insn_at_target = next_active_insn (target_label);
+ insn_at_target = first_active_target_insn (target_label);
own_target = own_thread_p (target_label, target_label, 0);
if (condition == const_true_rtx)
{
fallthrough_insn = next_active_insn (insn);
own_fallthrough = own_thread_p (NEXT_INSN (insn), NULL_RTX, 1);
- prediction = mostly_true_jump (insn, condition);
+ prediction = mostly_true_jump (insn);
}
/* If this insn is expected to branch, first try to get insns from our
we might have found a redundant insn which we deleted
from the thread that was filled. So we have to recompute
the next insn at the target. */
- target_label = JUMP_LABEL (insn);
- insn_at_target = next_active_insn (target_label);
+ target_label = JUMP_LABEL_AS_INSN (insn);
+ insn_at_target = first_active_target_insn (target_label);
delay_list
= fill_slots_from_thread (insn, condition, fallthrough_insn,
note_delay_statistics (slots_filled, 1);
}
}
+\f
+static void delete_computation (rtx insn);
+
+/* Recursively delete prior insns that compute the value (used only by INSN
+ which the caller is deleting) stored in the register mentioned by NOTE
+ which is a REG_DEAD note associated with INSN. */
+
+static void
+delete_prior_computation (rtx note, rtx insn)
+{
+ rtx our_prev;
+ rtx reg = XEXP (note, 0);
+
+ for (our_prev = prev_nonnote_insn (insn);
+ our_prev && (NONJUMP_INSN_P (our_prev)
+ || CALL_P (our_prev));
+ our_prev = prev_nonnote_insn (our_prev))
+ {
+ rtx pat = PATTERN (our_prev);
+
+ /* If we reach a CALL which is not calling a const function
+ or the callee pops the arguments, then give up. */
+ if (CALL_P (our_prev)
+ && (! RTL_CONST_CALL_P (our_prev)
+ || GET_CODE (pat) != SET || GET_CODE (SET_SRC (pat)) != CALL))
+ break;
+
+ /* If we reach a SEQUENCE, it is too complex to try to
+ do anything with it, so give up. We can be run during
+ and after reorg, so SEQUENCE rtl can legitimately show
+ up here. */
+ if (GET_CODE (pat) == SEQUENCE)
+ break;
+
+ if (GET_CODE (pat) == USE
+ && NONJUMP_INSN_P (XEXP (pat, 0)))
+ /* reorg creates USEs that look like this. We leave them
+ alone because reorg needs them for its own purposes. */
+ break;
+
+ if (reg_set_p (reg, pat))
+ {
+ if (side_effects_p (pat) && !CALL_P (our_prev))
+ break;
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ /* If we find a SET of something else, we can't
+ delete the insn. */
+
+ int i;
+
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ {
+ rtx part = XVECEXP (pat, 0, i);
+
+ if (GET_CODE (part) == SET
+ && SET_DEST (part) != reg)
+ break;
+ }
+
+ if (i == XVECLEN (pat, 0))
+ delete_computation (our_prev);
+ }
+ else if (GET_CODE (pat) == SET
+ && REG_P (SET_DEST (pat)))
+ {
+ int dest_regno = REGNO (SET_DEST (pat));
+ int dest_endregno = END_REGNO (SET_DEST (pat));
+ int regno = REGNO (reg);
+ int endregno = END_REGNO (reg);
+
+ if (dest_regno >= regno
+ && dest_endregno <= endregno)
+ delete_computation (our_prev);
+
+ /* We may have a multi-word hard register and some, but not
+ all, of the words of the register are needed in subsequent
+ insns. Write REG_UNUSED notes for those parts that were not
+ needed. */
+ else if (dest_regno <= regno
+ && dest_endregno >= endregno)
+ {
+ int i;
+
+ add_reg_note (our_prev, REG_UNUSED, reg);
+
+ for (i = dest_regno; i < dest_endregno; i++)
+ if (! find_regno_note (our_prev, REG_UNUSED, i))
+ break;
+
+ if (i == dest_endregno)
+ delete_computation (our_prev);
+ }
+ }
+
+ break;
+ }
+
+ /* If PAT references the register that dies here, it is an
+ additional use. Hence any prior SET isn't dead. However, this
+ insn becomes the new place for the REG_DEAD note. */
+ if (reg_overlap_mentioned_p (reg, pat))
+ {
+ XEXP (note, 1) = REG_NOTES (our_prev);
+ REG_NOTES (our_prev) = note;
+ break;
+ }
+ }
+}
+
+/* Delete INSN and recursively delete insns that compute values used only
+ by INSN. This uses the REG_DEAD notes computed during flow analysis.
+
+ Look at all our REG_DEAD notes. If a previous insn does nothing other
+ than set a register that dies in this insn, we can delete that insn
+ as well.
+
+ On machines with CC0, if CC0 is used in this insn, we may be able to
+ delete the insn that set it. */
+
+static void
+delete_computation (rtx insn)
+{
+ rtx note, next;
+
+#ifdef HAVE_cc0
+ if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
+ {
+ rtx prev = prev_nonnote_insn (insn);
+ /* We assume that at this stage
+ CC's are always set explicitly
+ and always immediately before the jump that
+ will use them. So if the previous insn
+ exists to set the CC's, delete it
+ (unless it performs auto-increments, etc.). */
+ if (prev && NONJUMP_INSN_P (prev)
+ && sets_cc0_p (PATTERN (prev)))
+ {
+ if (sets_cc0_p (PATTERN (prev)) > 0
+ && ! side_effects_p (PATTERN (prev)))
+ delete_computation (prev);
+ else
+ /* Otherwise, show that cc0 won't be used. */
+ add_reg_note (prev, REG_UNUSED, cc0_rtx);
+ }
+ }
+#endif
+
+ for (note = REG_NOTES (insn); note; note = next)
+ {
+ next = XEXP (note, 1);
+
+ if (REG_NOTE_KIND (note) != REG_DEAD
+ /* Verify that the REG_NOTE is legitimate. */
+ || !REG_P (XEXP (note, 0)))
+ continue;
+
+ delete_prior_computation (note, insn);
+ }
+
+ delete_related_insns (insn);
+}
+
+/* If all INSN does is set the pc, delete it,
+ and delete the insn that set the condition codes for it
+ if that's what the previous thing was. */
+
+static void
+delete_jump (rtx insn)
+{
+ rtx set = single_set (insn);
+
+ if (set && GET_CODE (SET_DEST (set)) == PC)
+ delete_computation (insn);
+}
+
+static rtx
+label_before_next_insn (rtx x, rtx scan_limit)
+{
+ rtx insn = next_active_insn (x);
+ while (insn)
+ {
+ insn = PREV_INSN (insn);
+ if (insn == scan_limit || insn == NULL_RTX)
+ return NULL_RTX;
+ if (LABEL_P (insn))
+ break;
+ }
+ return insn;
+}
+
\f
/* Once we have tried two ways to fill a delay slot, make a pass over the
code to try to improve the results and to do such things as more jump
threading. */
static void
-relax_delay_slots (rtx first)
+relax_delay_slots (rtx_insn *first)
{
- rtx insn, next, pat;
- rtx trial, delay_insn, target_label;
+ rtx_insn *insn, *next;
+ rtx_sequence *pat;
+ rtx_insn *trial, *delay_insn, *target_label;
/* Look at every JUMP_INSN and see if we can improve it. */
for (insn = first; insn; insn = next)
{
rtx other;
+ bool crossing;
next = next_active_insn (insn);
group of consecutive labels. */
if (JUMP_P (insn)
&& (condjump_p (insn) || condjump_in_parallel_p (insn))
- && (target_label = JUMP_LABEL (insn)) != 0)
+ && !ANY_RETURN_P (target_label = JUMP_LABEL_AS_INSN (insn)))
{
- target_label = skip_consecutive_labels (follow_jumps (target_label));
- if (target_label == 0)
- target_label = find_end_label ();
+ target_label
+ = skip_consecutive_labels (follow_jumps (target_label, insn,
+ &crossing));
+ if (ANY_RETURN_P (target_label))
+ target_label = find_end_label (target_label);
if (target_label && next_active_insn (target_label) == next
&& ! condjump_in_parallel_p (insn))
}
if (target_label && target_label != JUMP_LABEL (insn))
- reorg_redirect_jump (insn, target_label);
+ {
+ reorg_redirect_jump (insn, target_label);
+ if (crossing)
+ CROSSING_JUMP_P (insn) = 1;
+ }
/* See if this jump conditionally branches around an unconditional
jump. If so, invert this jump and point it to the target of the
second jump. */
- if (next && JUMP_P (next)
+ if (next && simplejump_or_return_p (next)
&& any_condjump_p (insn)
- && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN)
&& target_label
&& next_active_insn (target_label) == next_active_insn (next)
&& no_labels_between_p (insn, next))
invert_jump fails. */
++LABEL_NUSES (target_label);
- if (label)
+ if (!ANY_RETURN_P (label))
++LABEL_NUSES (label);
if (invert_jump (insn, label, 1))
next = insn;
}
- if (label)
+ if (!ANY_RETURN_P (label))
--LABEL_NUSES (label);
if (--LABEL_NUSES (target_label) == 0)
Don't do this if we expect the conditional branch to be true, because
we would then be making the more common case longer. */
- if (JUMP_P (insn)
- && (simplejump_p (insn) || GET_CODE (PATTERN (insn)) == RETURN)
+ if (simplejump_or_return_p (insn)
&& (other = prev_active_insn (insn)) != 0
&& any_condjump_p (other)
&& no_labels_between_p (other, insn)
- && 0 > mostly_true_jump (other,
- get_branch_condition (other,
- JUMP_LABEL (other))))
+ && 0 > mostly_true_jump (other))
{
rtx other_target = JUMP_LABEL (other);
- target_label = JUMP_LABEL (insn);
+ target_label = JUMP_LABEL_AS_INSN (insn);
if (invert_jump (other, target_label, 0))
reorg_redirect_jump (insn, other_target);
}
- /* Now look only at cases where we have filled a delay slot. */
- if (!NONJUMP_INSN_P (insn)
- || GET_CODE (PATTERN (insn)) != SEQUENCE)
+ /* Now look only at cases where we have a filled delay slot. */
+ if (!NONJUMP_INSN_P (insn) || GET_CODE (PATTERN (insn)) != SEQUENCE)
continue;
- pat = PATTERN (insn);
- delay_insn = XVECEXP (pat, 0, 0);
+ pat = as_a <rtx_sequence *> (PATTERN (insn));
+ delay_insn = pat->insn (0);
/* See if the first insn in the delay slot is redundant with some
previous insn. Remove it from the delay slot if so; then set up
to reprocess this insn. */
- if (redundant_insn (XVECEXP (pat, 0, 1), delay_insn, 0))
+ if (redundant_insn (pat->insn (1), delay_insn, 0))
{
- delete_from_delay_slot (XVECEXP (pat, 0, 1));
+ update_block (pat->insn (1), insn);
+ delete_from_delay_slot (pat->insn (1));
next = prev_active_insn (next);
continue;
}
Only do so if optimizing for size since this results in slower, but
smaller code. */
- if (optimize_size
- && GET_CODE (PATTERN (delay_insn)) == RETURN
+ if (optimize_function_for_size_p (cfun)
+ && ANY_RETURN_P (PATTERN (delay_insn))
&& next
&& JUMP_P (next)
- && GET_CODE (PATTERN (next)) == RETURN)
+ && PATTERN (next) == PATTERN (delay_insn))
{
rtx after;
int i;
We do this by deleting the INSN containing the SEQUENCE, then
re-emitting the insns separately, and then deleting the RETURN.
This allows the count of the jump target to be properly
- decremented. */
+ decremented.
- /* Clear the from target bit, since these insns are no longer
+ Note that we need to change the INSN_UID of the re-emitted insns
+ since it is used to hash the insns for mark_target_live_regs and
+ the re-emitted insns will no longer be wrapped up in a SEQUENCE.
+
+ Clear the from target bit, since these insns are no longer
in delay slots. */
for (i = 0; i < XVECLEN (pat, 0); i++)
INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
trial = PREV_INSN (insn);
delete_related_insns (insn);
gcc_assert (GET_CODE (pat) == SEQUENCE);
- after = trial;
- for (i = 0; i < XVECLEN (pat, 0); i++)
- {
- rtx this_insn = XVECEXP (pat, 0, i);
- add_insn_after (this_insn, after);
- after = this_insn;
- }
+ add_insn_after (delay_insn, trial, NULL);
+ after = delay_insn;
+ for (i = 1; i < XVECLEN (pat, 0); i++)
+ after = emit_copy_of_insn_after (XVECEXP (pat, 0, i), after);
delete_scheduled_jump (delay_insn);
continue;
}
/* Now look only at the cases where we have a filled JUMP_INSN. */
- if (!JUMP_P (XVECEXP (PATTERN (insn), 0, 0))
- || ! (condjump_p (XVECEXP (PATTERN (insn), 0, 0))
- || condjump_in_parallel_p (XVECEXP (PATTERN (insn), 0, 0))))
+ if (!JUMP_P (delay_insn)
+ || !(condjump_p (delay_insn) || condjump_in_parallel_p (delay_insn)))
continue;
- target_label = JUMP_LABEL (delay_insn);
+ target_label = JUMP_LABEL_AS_INSN (delay_insn);
+ if (target_label && ANY_RETURN_P (target_label))
+ continue;
- if (target_label)
- {
- /* If this jump goes to another unconditional jump, thread it, but
- don't convert a jump into a RETURN here. */
- trial = skip_consecutive_labels (follow_jumps (target_label));
- if (trial == 0)
- trial = find_end_label ();
-
- if (trial && trial != target_label
- && redirect_with_delay_slots_safe_p (delay_insn, trial, insn))
- {
- reorg_redirect_jump (delay_insn, trial);
- target_label = trial;
- }
+ /* If this jump goes to another unconditional jump, thread it, but
+ don't convert a jump into a RETURN here. */
+ trial = skip_consecutive_labels (follow_jumps (target_label, delay_insn,
+ &crossing));
+ if (ANY_RETURN_P (trial))
+ trial = find_end_label (trial);
- /* If the first insn at TARGET_LABEL is redundant with a previous
- insn, redirect the jump to the following insn process again. */
- trial = next_active_insn (target_label);
- if (trial && GET_CODE (PATTERN (trial)) != SEQUENCE
- && redundant_insn (trial, insn, 0)
- && ! can_throw_internal (trial))
- {
- /* Figure out where to emit the special USE insn so we don't
- later incorrectly compute register live/death info. */
- rtx tmp = next_active_insn (trial);
- if (tmp == 0)
- tmp = find_end_label ();
+ if (trial && trial != target_label
+ && redirect_with_delay_slots_safe_p (delay_insn, trial, insn))
+ {
+ reorg_redirect_jump (delay_insn, trial);
+ target_label = trial;
+ if (crossing)
+ CROSSING_JUMP_P (insn) = 1;
+ }
- if (tmp)
- {
- /* Insert the special USE insn and update dataflow info. */
- update_block (trial, tmp);
+ /* If the first insn at TARGET_LABEL is redundant with a previous
+ insn, redirect the jump to the following insn and process again.
+ We use next_real_insn instead of next_active_insn so we
+ don't skip USE-markers, or we'll end up with incorrect
+ liveness info. */
+ trial = next_real_insn (target_label);
+ if (trial && GET_CODE (PATTERN (trial)) != SEQUENCE
+ && redundant_insn (trial, insn, 0)
+ && ! can_throw_internal (trial))
+ {
+ /* Figure out where to emit the special USE insn so we don't
+ later incorrectly compute register live/death info. */
+ rtx tmp = next_active_insn (trial);
+ if (tmp == 0)
+ tmp = find_end_label (simple_return_rtx);
- /* Now emit a label before the special USE insn, and
- redirect our jump to the new label. */
- target_label = get_label_before (PREV_INSN (tmp));
- reorg_redirect_jump (delay_insn, target_label);
- next = insn;
- continue;
- }
+ if (tmp)
+ {
+ /* Insert the special USE insn and update dataflow info. */
+ update_block (trial, tmp);
+
+ /* Now emit a label before the special USE insn, and
+ redirect our jump to the new label. */
+ target_label = get_label_before (PREV_INSN (tmp), target_label);
+ reorg_redirect_jump (delay_insn, target_label);
+ next = insn;
+ continue;
}
+ }
- /* Similarly, if it is an unconditional jump with one insn in its
- delay list and that insn is redundant, thread the jump. */
- if (trial && GET_CODE (PATTERN (trial)) == SEQUENCE
- && XVECLEN (PATTERN (trial), 0) == 2
- && JUMP_P (XVECEXP (PATTERN (trial), 0, 0))
- && (simplejump_p (XVECEXP (PATTERN (trial), 0, 0))
- || GET_CODE (PATTERN (XVECEXP (PATTERN (trial), 0, 0))) == RETURN)
- && redundant_insn (XVECEXP (PATTERN (trial), 0, 1), insn, 0))
+ /* Similarly, if it is an unconditional jump with one insn in its
+ delay list and that insn is redundant, thread the jump. */
+ if (trial && GET_CODE (PATTERN (trial)) == SEQUENCE
+ && XVECLEN (PATTERN (trial), 0) == 2
+ && JUMP_P (XVECEXP (PATTERN (trial), 0, 0))
+ && simplejump_or_return_p (XVECEXP (PATTERN (trial), 0, 0))
+ && redundant_insn (XVECEXP (PATTERN (trial), 0, 1), insn, 0))
+ {
+ rtx_sequence *trial_seq = as_a <rtx_sequence *> (PATTERN (trial));
+ target_label = JUMP_LABEL_AS_INSN (trial_seq->insn (0));
+ if (ANY_RETURN_P (target_label))
+ target_label = find_end_label (target_label);
+
+ if (target_label
+ && redirect_with_delay_slots_safe_p (delay_insn, target_label,
+ insn))
{
- target_label = JUMP_LABEL (XVECEXP (PATTERN (trial), 0, 0));
- if (target_label == 0)
- target_label = find_end_label ();
-
- if (target_label
- && redirect_with_delay_slots_safe_p (delay_insn, target_label,
- insn))
- {
- reorg_redirect_jump (delay_insn, target_label);
- next = insn;
- continue;
- }
+ update_block (trial_seq->insn (1), insn);
+ reorg_redirect_jump (delay_insn, target_label);
+ next = insn;
+ continue;
}
}
+ /* See if we have a simple (conditional) jump that is useless. */
if (! INSN_ANNULLED_BRANCH_P (delay_insn)
- && prev_active_insn (target_label) == insn
&& ! condjump_in_parallel_p (delay_insn)
+ && prev_active_insn (target_label) == insn
+ && ! BARRIER_P (prev_nonnote_insn (target_label))
#ifdef HAVE_cc0
/* If the last insn in the delay slot sets CC0 for some insn,
various code assumes that it is in a delay slot. We could
We do this by deleting the INSN containing the SEQUENCE, then
re-emitting the insns separately, and then deleting the jump.
This allows the count of the jump target to be properly
- decremented. */
+ decremented.
- /* Clear the from target bit, since these insns are no longer
+ Note that we need to change the INSN_UID of the re-emitted insns
+ since it is used to hash the insns for mark_target_live_regs and
+ the re-emitted insns will no longer be wrapped up in a SEQUENCE.
+
+ Clear the from target bit, since these insns are no longer
in delay slots. */
for (i = 0; i < XVECLEN (pat, 0); i++)
INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
trial = PREV_INSN (insn);
delete_related_insns (insn);
gcc_assert (GET_CODE (pat) == SEQUENCE);
- after = trial;
- for (i = 0; i < XVECLEN (pat, 0); i++)
- {
- rtx this_insn = XVECEXP (pat, 0, i);
- add_insn_after (this_insn, after);
- after = this_insn;
- }
+ add_insn_after (delay_insn, trial, NULL);
+ after = delay_insn;
+ for (i = 1; i < XVECLEN (pat, 0); i++)
+ after = emit_copy_of_insn_after (XVECEXP (pat, 0, i), after);
delete_scheduled_jump (delay_insn);
continue;
}
identical to the one in its delay slot. In this case, we can just
delete the branch and the insn in its delay slot. */
if (next && NONJUMP_INSN_P (next)
- && prev_label (next_active_insn (next)) == target_label
+ && label_before_next_insn (next, insn) == target_label
&& simplejump_p (insn)
&& XVECLEN (pat, 0) == 2
- && rtx_equal_p (PATTERN (next), PATTERN (XVECEXP (pat, 0, 1))))
+ && rtx_equal_p (PATTERN (next), PATTERN (pat->insn (1))))
{
delete_related_insns (insn);
continue;
}
- /* See if this jump (with its delay slots) branches around another
- jump (without delay slots). If so, invert this jump and point
- it to the target of the second jump. We cannot do this for
- annulled jumps, though. Again, don't convert a jump to a RETURN
- here. */
+ /* See if this jump (with its delay slots) conditionally branches
+ around an unconditional jump (without delay slots). If so, invert
+ this jump and point it to the target of the second jump. We cannot
+ do this for annulled jumps, though. Again, don't convert a jump to
+ a RETURN here. */
if (! INSN_ANNULLED_BRANCH_P (delay_insn)
&& any_condjump_p (delay_insn)
- && next && JUMP_P (next)
- && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN)
+ && next && simplejump_or_return_p (next)
&& next_active_insn (target_label) == next_active_insn (next)
&& no_labels_between_p (insn, next))
{
rtx label = JUMP_LABEL (next);
rtx old_label = JUMP_LABEL (delay_insn);
- if (label == 0)
- label = find_end_label ();
+ if (ANY_RETURN_P (label))
+ label = find_end_label (label);
/* find_end_label can generate a new label. Check this first. */
if (label
/* If we own the thread opposite the way this insn branches, see if we
can merge its delay slots with following insns. */
- if (INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
+ if (INSN_FROM_TARGET_P (pat->insn (1))
&& own_thread_p (NEXT_INSN (insn), 0, 1))
try_merge_delay_insns (insn, next);
- else if (! INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
+ else if (! INSN_FROM_TARGET_P (pat->insn (1))
&& own_thread_p (target_label, target_label, 0))
try_merge_delay_insns (insn, next_active_insn (target_label));
}
}
\f
-#ifdef HAVE_return
/* Look for filled jumps to the end of function label. We can try to convert
them into RETURN insns if the insns in the delay slot are valid for the
make_return_insns (rtx first)
{
rtx insn, jump_insn, pat;
- rtx real_return_label = end_of_function_label;
+ rtx real_return_label = function_return_label;
+ rtx real_simple_return_label = function_simple_return_label;
int slots, i;
-#ifdef DELAY_SLOTS_FOR_EPILOGUE
- /* If a previous pass filled delay slots in the epilogue, things get a
- bit more complicated, as those filler insns would generally (without
- data flow analysis) have to be executed after any existing branch
- delay slot filler insns. It is also unknown whether such a
- transformation would actually be profitable. Note that the existing
- code only cares for branches with (some) filled delay slots. */
- if (current_function_epilogue_delay_list != NULL)
- return;
-#endif
-
/* See if there is a RETURN insn in the function other than the one we
made for END_OF_FUNCTION_LABEL. If so, set up anything we can't change
into a RETURN to jump to it. */
for (insn = first; insn; insn = NEXT_INSN (insn))
- if (JUMP_P (insn) && GET_CODE (PATTERN (insn)) == RETURN)
+ if (JUMP_P (insn) && ANY_RETURN_P (PATTERN (insn)))
{
- real_return_label = get_label_before (insn);
+ rtx t = get_label_before (insn, NULL_RTX);
+ if (PATTERN (insn) == ret_rtx)
+ real_return_label = t;
+ else
+ real_simple_return_label = t;
break;
}
/* Show an extra usage of REAL_RETURN_LABEL so it won't go away if it
was equal to END_OF_FUNCTION_LABEL. */
- LABEL_NUSES (real_return_label)++;
+ if (real_return_label)
+ LABEL_NUSES (real_return_label)++;
+ if (real_simple_return_label)
+ LABEL_NUSES (real_simple_return_label)++;
/* Clear the list of insns to fill so we can use it. */
obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
for (insn = first; insn; insn = NEXT_INSN (insn))
{
int flags;
+ rtx kind, real_label;
/* Only look at filled JUMP_INSNs that go to the end of function
label. */
if (!NONJUMP_INSN_P (insn)
|| GET_CODE (PATTERN (insn)) != SEQUENCE
- || !JUMP_P (XVECEXP (PATTERN (insn), 0, 0))
- || JUMP_LABEL (XVECEXP (PATTERN (insn), 0, 0)) != end_of_function_label)
+ || !jump_to_label_p (XVECEXP (PATTERN (insn), 0, 0)))
+ continue;
+
+ if (JUMP_LABEL (XVECEXP (PATTERN (insn), 0, 0)) == function_return_label)
+ {
+ kind = ret_rtx;
+ real_label = real_return_label;
+ }
+ else if (JUMP_LABEL (XVECEXP (PATTERN (insn), 0, 0))
+ == function_simple_return_label)
+ {
+ kind = simple_return_rtx;
+ real_label = real_simple_return_label;
+ }
+ else
continue;
pat = PATTERN (insn);
/* If we can't make the jump into a RETURN, try to redirect it to the best
RETURN and go on to the next insn. */
- if (! reorg_redirect_jump (jump_insn, NULL_RTX))
+ if (!reorg_redirect_jump (jump_insn, kind))
{
/* Make sure redirecting the jump will not invalidate the delay
slot insns. */
- if (redirect_with_delay_slots_safe_p (jump_insn,
- real_return_label,
- insn))
- reorg_redirect_jump (jump_insn, real_return_label);
+ if (redirect_with_delay_slots_safe_p (jump_insn, real_label, insn))
+ reorg_redirect_jump (jump_insn, real_label);
continue;
}
RETURN, delete the SEQUENCE and output the individual insns,
followed by the RETURN. Then set things up so we try to find
insns for its delay slots, if it needs some. */
- if (GET_CODE (PATTERN (jump_insn)) == RETURN)
+ if (ANY_RETURN_P (PATTERN (jump_insn)))
{
rtx prev = PREV_INSN (insn);
else
/* It is probably more efficient to keep this with its current
delay slot as a branch to a RETURN. */
- reorg_redirect_jump (jump_insn, real_return_label);
+ reorg_redirect_jump (jump_insn, real_label);
}
/* Now delete REAL_RETURN_LABEL if we never used it. Then try to fill any
new delay slots we have created. */
- if (--LABEL_NUSES (real_return_label) == 0)
+ if (real_return_label != NULL_RTX && --LABEL_NUSES (real_return_label) == 0)
delete_related_insns (real_return_label);
+ if (real_simple_return_label != NULL_RTX
+ && --LABEL_NUSES (real_simple_return_label) == 0)
+ delete_related_insns (real_simple_return_label);
fill_simple_delay_slots (1);
fill_simple_delay_slots (0);
}
-#endif
\f
/* Try to find insns to place in delay slots. */
-void
-dbr_schedule (rtx first)
+static void
+dbr_schedule (rtx_insn *first)
{
- rtx insn, next, epilogue_insn = 0;
+ rtx_insn *insn, *next, *epilogue_insn = 0;
int i;
+ bool need_return_insns;
/* If the current function has no insns other than the prologue and
epilogue, then do not try to fill any delay slots. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return;
/* Find the highest INSN_UID and allocate and initialize our map from
if (INSN_UID (insn) > max_uid)
max_uid = INSN_UID (insn);
if (NOTE_P (insn)
- && NOTE_LINE_NUMBER (insn) == NOTE_INSN_EPILOGUE_BEG)
+ && NOTE_KIND (insn) == NOTE_INSN_EPILOGUE_BEG)
epilogue_insn = insn;
}
- uid_to_ruid = xmalloc ((max_uid + 1) * sizeof (int));
+ uid_to_ruid = XNEWVEC (int, max_uid + 1);
for (i = 0, insn = first; insn; i++, insn = NEXT_INSN (insn))
uid_to_ruid[INSN_UID (insn)] = i;
if (unfilled_firstobj == 0)
{
gcc_obstack_init (&unfilled_slots_obstack);
- unfilled_firstobj = obstack_alloc (&unfilled_slots_obstack, 0);
+ unfilled_firstobj = XOBNEWVAR (&unfilled_slots_obstack, rtx, 0);
}
for (insn = next_active_insn (first); insn; insn = next_active_insn (insn))
{
rtx target;
- INSN_ANNULLED_BRANCH_P (insn) = 0;
- INSN_FROM_TARGET_P (insn) = 0;
-
/* Skip vector tables. We can't get attributes for them. */
- if (JUMP_P (insn)
- && (GET_CODE (PATTERN (insn)) == ADDR_VEC
- || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
+ if (JUMP_TABLE_DATA_P (insn))
continue;
+ if (JUMP_P (insn))
+ INSN_ANNULLED_BRANCH_P (insn) = 0;
+ INSN_FROM_TARGET_P (insn) = 0;
+
if (num_delay_slots (insn) > 0)
obstack_ptr_grow (&unfilled_slots_obstack, insn);
/* Ensure all jumps go to the last of a set of consecutive labels. */
if (JUMP_P (insn)
&& (condjump_p (insn) || condjump_in_parallel_p (insn))
- && JUMP_LABEL (insn) != 0
- && ((target = skip_consecutive_labels (JUMP_LABEL (insn)))
+ && !ANY_RETURN_P (JUMP_LABEL (insn))
+ && ((target = skip_consecutive_labels (JUMP_LABEL_AS_INSN (insn)))
!= JUMP_LABEL (insn)))
redirect_jump (insn, target, 1);
}
init_resource_info (epilogue_insn);
/* Show we haven't computed an end-of-function label yet. */
- end_of_function_label = 0;
+ function_return_label = function_simple_return_label = NULL;
/* Initialize the statistics for this function. */
memset (num_insns_needing_delays, 0, sizeof num_insns_needing_delays);
relax_delay_slots (first);
}
+ /* If we made an end of function label, indicate that it is now
+ safe to delete it by undoing our prior adjustment to LABEL_NUSES.
+ If it is now unused, delete it. */
+ if (function_return_label && --LABEL_NUSES (function_return_label) == 0)
+ delete_related_insns (function_return_label);
+ if (function_simple_return_label
+ && --LABEL_NUSES (function_simple_return_label) == 0)
+ delete_related_insns (function_simple_return_label);
+
+ need_return_insns = false;
+#ifdef HAVE_return
+ need_return_insns |= HAVE_return && function_return_label != 0;
+#endif
+#ifdef HAVE_simple_return
+ need_return_insns |= HAVE_simple_return && function_simple_return_label != 0;
+#endif
+ if (need_return_insns)
+ make_return_insns (first);
+
/* Delete any USE insns made by update_block; subsequent passes don't need
them or know how to deal with them. */
for (insn = first; insn; insn = next)
next = delete_related_insns (insn);
}
- /* If we made an end of function label, indicate that it is now
- safe to delete it by undoing our prior adjustment to LABEL_NUSES.
- If it is now unused, delete it. */
- if (end_of_function_label && --LABEL_NUSES (end_of_function_label) == 0)
- delete_related_insns (end_of_function_label);
-
-#ifdef HAVE_return
- if (HAVE_return && end_of_function_label != 0)
- make_return_insns (first);
-#endif
-
obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
/* It is not clear why the line below is needed, but it does seem to be. */
- unfilled_firstobj = obstack_alloc (&unfilled_slots_obstack, 0);
+ unfilled_firstobj = XOBNEWVAR (&unfilled_slots_obstack, rtx, 0);
- if (file)
+ if (dump_file)
{
int i, j, need_comma;
int total_delay_slots[MAX_DELAY_HISTOGRAM + 1];
reorg_pass_number < MAX_REORG_PASSES;
reorg_pass_number++)
{
- fprintf (file, ";; Reorg pass #%d:\n", reorg_pass_number + 1);
+ fprintf (dump_file, ";; Reorg pass #%d:\n", reorg_pass_number + 1);
for (i = 0; i < NUM_REORG_FUNCTIONS; i++)
{
need_comma = 0;
- fprintf (file, ";; Reorg function #%d\n", i);
+ fprintf (dump_file, ";; Reorg function #%d\n", i);
- fprintf (file, ";; %d insns needing delay slots\n;; ",
+ fprintf (dump_file, ";; %d insns needing delay slots\n;; ",
num_insns_needing_delays[i][reorg_pass_number]);
for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
if (num_filled_delays[i][j][reorg_pass_number])
{
if (need_comma)
- fprintf (file, ", ");
+ fprintf (dump_file, ", ");
need_comma = 1;
- fprintf (file, "%d got %d delays",
+ fprintf (dump_file, "%d got %d delays",
num_filled_delays[i][j][reorg_pass_number], j);
}
- fprintf (file, "\n");
+ fprintf (dump_file, "\n");
}
}
memset (total_delay_slots, 0, sizeof total_delay_slots);
{
if (GET_CODE (PATTERN (insn)) == SEQUENCE)
{
+ rtx control;
j = XVECLEN (PATTERN (insn), 0) - 1;
if (j > MAX_DELAY_HISTOGRAM)
j = MAX_DELAY_HISTOGRAM;
- if (INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (insn), 0, 0)))
+ control = XVECEXP (PATTERN (insn), 0, 0);
+ if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
total_annul_slots[j]++;
else
total_delay_slots[j]++;
total_delay_slots[0]++;
}
}
- fprintf (file, ";; Reorg totals: ");
+ fprintf (dump_file, ";; Reorg totals: ");
need_comma = 0;
for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
{
if (total_delay_slots[j])
{
if (need_comma)
- fprintf (file, ", ");
+ fprintf (dump_file, ", ");
need_comma = 1;
- fprintf (file, "%d got %d delays", total_delay_slots[j], j);
+ fprintf (dump_file, "%d got %d delays", total_delay_slots[j], j);
}
}
- fprintf (file, "\n");
+ fprintf (dump_file, "\n");
#if defined (ANNUL_IFTRUE_SLOTS) || defined (ANNUL_IFFALSE_SLOTS)
- fprintf (file, ";; Reorg annuls: ");
+ fprintf (dump_file, ";; Reorg annuls: ");
need_comma = 0;
for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++)
{
if (total_annul_slots[j])
{
if (need_comma)
- fprintf (file, ", ");
+ fprintf (dump_file, ", ");
need_comma = 1;
- fprintf (file, "%d got %d delays", total_annul_slots[j], j);
+ fprintf (dump_file, "%d got %d delays", total_annul_slots[j], j);
}
}
- fprintf (file, "\n");
+ fprintf (dump_file, "\n");
#endif
- fprintf (file, "\n");
+ fprintf (dump_file, "\n");
}
- /* For all JUMP insns, fill in branch prediction notes, so that during
- assembler output a target can set branch prediction bits in the code.
- We have to do this now, as up until this point the destinations of
- JUMPS can be moved around and changed, but past right here that cannot
- happen. */
- for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (!sibling_labels.is_empty ())
{
- int pred_flags;
-
- if (NONJUMP_INSN_P (insn))
- {
- rtx pat = PATTERN (insn);
-
- if (GET_CODE (pat) == SEQUENCE)
- insn = XVECEXP (pat, 0, 0);
- }
- if (!JUMP_P (insn))
- continue;
-
- pred_flags = get_jump_flags (insn, JUMP_LABEL (insn));
- REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_BR_PRED,
- GEN_INT (pred_flags),
- REG_NOTES (insn));
+ update_alignments (sibling_labels);
+ sibling_labels.release ();
}
+
free_resource_info ();
free (uid_to_ruid);
-#ifdef DELAY_SLOTS_FOR_EPILOGUE
- /* SPARC assembler, for instance, emit warning when debug info is output
- into the delay slot. */
- {
- rtx link;
-
- for (link = current_function_epilogue_delay_list;
- link;
- link = XEXP (link, 1))
- INSN_LOCATOR (XEXP (link, 0)) = 0;
- }
-#endif
+ crtl->dbr_scheduled_p = true;
}
#endif /* DELAY_SLOTS */
\f
-static bool
-gate_handle_delay_slots (void)
-{
-#ifdef DELAY_SLOTS
- return flag_delayed_branch;
-#else
- return 0;
-#endif
-}
-
/* Run delay slot optimization. */
-static void
+static unsigned int
rest_of_handle_delay_slots (void)
{
#ifdef DELAY_SLOTS
dbr_schedule (get_insns ());
#endif
-}
+ return 0;
+}
-struct tree_opt_pass pass_delay_slots =
+namespace {
+
+const pass_data pass_data_delay_slots =
{
- "dbr", /* name */
- gate_handle_delay_slots, /* gate */
- rest_of_handle_delay_slots, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_DBR_SCHED, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_dump_func |
- TODO_ggc_collect, /* todo_flags_finish */
- 'd' /* letter */
+ RTL_PASS, /* type */
+ "dbr", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_DBR_SCHED, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
};
-/* Machine dependent reorg pass. */
-static bool
-gate_handle_machine_reorg (void)
+class pass_delay_slots : public rtl_opt_pass
{
- return targetm.machine_dependent_reorg != 0;
+public:
+ pass_delay_slots (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_delay_slots, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *);
+ virtual unsigned int execute (function *)
+ {
+ return rest_of_handle_delay_slots ();
+ }
+
+}; // class pass_delay_slots
+
+bool
+pass_delay_slots::gate (function *)
+{
+#ifdef DELAY_SLOTS
+ /* At -O0 dataflow info isn't updated after RA. */
+ return optimize > 0 && flag_delayed_branch && !crtl->dbr_scheduled_p;
+#else
+ return 0;
+#endif
}
+} // anon namespace
-static void
-rest_of_handle_machine_reorg (void)
+rtl_opt_pass *
+make_pass_delay_slots (gcc::context *ctxt)
{
- targetm.machine_dependent_reorg ();
+ return new pass_delay_slots (ctxt);
}
-struct tree_opt_pass pass_machine_reorg =
+/* Machine dependent reorg pass. */
+
+namespace {
+
+const pass_data pass_data_machine_reorg =
{
- "mach", /* name */
- gate_handle_machine_reorg, /* gate */
- rest_of_handle_machine_reorg, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_MACH_DEP, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_dump_func |
- TODO_ggc_collect, /* todo_flags_finish */
- 'M' /* letter */
+ RTL_PASS, /* type */
+ "mach", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_MACH_DEP, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
};
+class pass_machine_reorg : public rtl_opt_pass
+{
+public:
+ pass_machine_reorg (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_machine_reorg, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+ return targetm.machine_dependent_reorg != 0;
+ }
+
+ virtual unsigned int execute (function *)
+ {
+ targetm.machine_dependent_reorg ();
+ return 0;
+ }
+
+}; // class pass_machine_reorg
+
+} // anon namespace
+
+rtl_opt_pass *
+make_pass_machine_reorg (gcc::context *ctxt)
+{
+ return new pass_machine_reorg (ctxt);
+}