/* Perform instruction reorganizations for delay slot filling.
- Copyright (C) 1992-2013 Free Software Foundation, Inc.
+ Copyright (C) 1992-2014 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu).
Hacked by Michael Tiemann (tiemann@cygnus.com).
#define eligible_for_annul_false(INSN, SLOTS, TRIAL, FLAGS) 0
#endif
+\f
+/* First, some functions that were used before GCC got a control flow graph.
+ These functions are now only used here in reorg.c, and have therefore
+ been moved here to avoid inadvertent misuse elsewhere in the compiler. */
+
+/* Return the last label to mark the same position as LABEL. Return LABEL
+ itself if it is null or any return rtx. */
+
+static rtx_insn *
+skip_consecutive_labels (rtx_insn *label)
+{
+ rtx_insn *insn;
+
+ if (label && ANY_RETURN_P (label))
+ return label;
+
+ for (insn = label; insn != 0 && !INSN_P (insn); insn = NEXT_INSN (insn))
+ if (LABEL_P (insn))
+ label = insn;
+
+ return label;
+}
+
+#ifdef HAVE_cc0
+/* INSN uses CC0 and is being moved into a delay slot. Set up REG_CC_SETTER
+ and REG_CC_USER notes so we can find it. */
+
+static void
+link_cc0_insns (rtx insn)
+{
+ rtx user = next_nonnote_insn (insn);
+
+ if (NONJUMP_INSN_P (user) && GET_CODE (PATTERN (user)) == SEQUENCE)
+ user = XVECEXP (PATTERN (user), 0, 0);
+
+ add_reg_note (user, REG_CC_SETTER, insn);
+ add_reg_note (insn, REG_CC_USER, user);
+}
+#endif
+\f
/* Insns which have delay slots that have not yet been filled. */
static struct obstack unfilled_slots_obstack;
should be recomputed at each use. */
#define unfilled_slots_base \
- ((rtx *) obstack_base (&unfilled_slots_obstack))
+ ((rtx_insn **) obstack_base (&unfilled_slots_obstack))
#define unfilled_slots_next \
- ((rtx *) obstack_next_free (&unfilled_slots_obstack))
+ ((rtx_insn **) obstack_next_free (&unfilled_slots_obstack))
/* Points to the label before the end of the function, or before a
return insn. */
-static rtx function_return_label;
+static rtx_code_label *function_return_label;
/* Likewise for a simple_return. */
-static rtx function_simple_return_label;
+static rtx_code_label *function_simple_return_label;
/* Mapping between INSN_UID's and position in the code since INSN_UID's do
not always monotonically increase. */
static int resource_conflicts_p (struct resources *, struct resources *);
static int insn_references_resource_p (rtx, struct resources *, bool);
static int insn_sets_resource_p (rtx, struct resources *, bool);
-static rtx find_end_label (rtx);
-static rtx emit_delay_sequence (rtx, rtx, int);
-static rtx add_to_delay_list (rtx, rtx);
-static rtx delete_from_delay_slot (rtx);
+static rtx_code_label *find_end_label (rtx);
+static rtx_insn *emit_delay_sequence (rtx_insn *, rtx_insn_list *, int);
+static rtx_insn_list *add_to_delay_list (rtx_insn *, rtx_insn_list *);
+static rtx_insn *delete_from_delay_slot (rtx_insn *);
static void delete_scheduled_jump (rtx);
static void note_delay_statistics (int, int);
#if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
-static rtx optimize_skip (rtx);
+static rtx_insn_list *optimize_skip (rtx_insn *);
#endif
static int get_jump_flags (rtx, rtx);
static int mostly_true_jump (rtx);
static int redirect_with_delay_slots_safe_p (rtx, rtx, rtx);
static int redirect_with_delay_list_safe_p (rtx, rtx, rtx);
static int check_annul_list_true_false (int, rtx);
-static rtx steal_delay_list_from_target (rtx, rtx, rtx, rtx,
- struct resources *,
- struct resources *,
- struct resources *,
- int, int *, int *, rtx *);
-static rtx steal_delay_list_from_fallthrough (rtx, rtx, rtx, rtx,
- struct resources *,
- struct resources *,
- struct resources *,
- int, int *, int *);
+static rtx_insn_list *steal_delay_list_from_target (rtx, rtx,
+ rtx_sequence *,
+ rtx_insn_list *,
+ struct resources *,
+ struct resources *,
+ struct resources *,
+ int, int *, int *,
+ rtx_insn **);
+static rtx_insn_list *steal_delay_list_from_fallthrough (rtx, rtx,
+ rtx_sequence *,
+ rtx_insn_list *,
+ struct resources *,
+ struct resources *,
+ struct resources *,
+ int, int *, int *);
static void try_merge_delay_insns (rtx, rtx);
static rtx redundant_insn (rtx, rtx, rtx);
static int own_thread_p (rtx, rtx, int);
static void fix_reg_dead_note (rtx, rtx);
static void update_reg_unused_notes (rtx, rtx);
static void fill_simple_delay_slots (int);
-static rtx fill_slots_from_thread (rtx, rtx, rtx, rtx,
- int, int, int, int,
- int *, rtx);
+static rtx_insn_list *fill_slots_from_thread (rtx_insn *, rtx,
+ rtx_insn *, rtx_insn *,
+ int, int, int, int,
+ int *, rtx_insn_list *);
static void fill_eager_delay_slots (void);
-static void relax_delay_slots (rtx);
+static void relax_delay_slots (rtx_insn *);
static void make_return_insns (rtx);
\f
/* A wrapper around next_active_insn which takes care to return ret_rtx
unchanged. */
-static rtx
-first_active_target_insn (rtx insn)
+static rtx_insn *
+first_active_target_insn (rtx_insn *insn)
{
if (ANY_RETURN_P (insn))
return insn;
resource_conflicts_p (struct resources *res1, struct resources *res2)
{
if ((res1->cc && res2->cc) || (res1->memory && res2->memory)
- || (res1->unch_memory && res2->unch_memory)
|| res1->volatil || res2->volatil)
return 1;
KIND is either simple_return_rtx or ret_rtx, indicating which type of
return we're looking for. */
-static rtx
+static rtx_code_label *
find_end_label (rtx kind)
{
- rtx insn;
- rtx *plabel;
+ rtx_insn *insn;
+ rtx_code_label **plabel;
if (kind == ret_rtx)
plabel = &function_return_label;
&& JUMP_P (PREV_INSN (insn))
&& PATTERN (PREV_INSN (insn)) == kind)
{
- rtx temp = PREV_INSN (PREV_INSN (insn));
- rtx label = gen_label_rtx ();
+ rtx_insn *temp = PREV_INSN (PREV_INSN (insn));
+ rtx_code_label *label = gen_label_rtx ();
LABEL_NUSES (label) = 0;
/* Put the label before any USE insns that may precede the RETURN
}
else if (LABEL_P (insn))
- *plabel = insn;
+ *plabel = as_a <rtx_code_label *> (insn);
else
{
- rtx label = gen_label_rtx ();
+ rtx_code_label *label = gen_label_rtx ();
LABEL_NUSES (label) = 0;
/* If the basic block reorder pass moves the return insn to
some other place try to locate it again and put our
emit the label just before it. Since we already have
an epilogue and cannot emit a new RETURN, we cannot
emit the label at all. */
- return NULL_RTX;
+ return NULL;
#endif /* HAVE_epilogue */
/* Otherwise, make a new label and emit a RETURN and BARRIER,
/* Put INSN and LIST together in a SEQUENCE rtx of LENGTH, and replace
the pattern of INSN with the SEQUENCE.
- Chain the insns so that NEXT_INSN of each insn in the sequence points to
- the next and NEXT_INSN of the last insn in the sequence points to
- the first insn after the sequence. Similarly for PREV_INSN. This makes
- it easier to scan all insns.
-
- Returns the SEQUENCE that replaces INSN. */
+ Returns the insn containing the SEQUENCE that replaces INSN. */
-static rtx
-emit_delay_sequence (rtx insn, rtx list, int length)
+static rtx_insn *
+emit_delay_sequence (rtx_insn *insn, rtx_insn_list *list, int length)
{
- int i = 1;
- rtx li;
- int had_barrier = 0;
-
/* Allocate the rtvec to hold the insns and the SEQUENCE. */
rtvec seqv = rtvec_alloc (length + 1);
rtx seq = gen_rtx_SEQUENCE (VOIDmode, seqv);
- rtx seq_insn = make_insn_raw (seq);
- rtx first = get_insns ();
- rtx last = get_last_insn ();
+ rtx_insn *seq_insn = make_insn_raw (seq);
- /* Make a copy of the insn having delay slots. */
- rtx delay_insn = copy_rtx (insn);
+ /* If DELAY_INSN has a location, use it for SEQ_INSN. If DELAY_INSN does
+ not have a location, but one of the delayed insns does, we pick up a
+ location from there later. */
+ INSN_LOCATION (seq_insn) = INSN_LOCATION (insn);
- /* If INSN is followed by a BARRIER, delete the BARRIER since it will only
- confuse further processing. Update LAST in case it was the last insn.
- We will put the BARRIER back in later. */
- if (NEXT_INSN (insn) && BARRIER_P (NEXT_INSN (insn)))
- {
- delete_related_insns (NEXT_INSN (insn));
- last = get_last_insn ();
- had_barrier = 1;
- }
-
- /* Splice our SEQUENCE into the insn stream where INSN used to be. */
- NEXT_INSN (seq_insn) = NEXT_INSN (insn);
- PREV_INSN (seq_insn) = PREV_INSN (insn);
-
- if (insn != last)
- PREV_INSN (NEXT_INSN (seq_insn)) = seq_insn;
-
- if (insn != first)
- NEXT_INSN (PREV_INSN (seq_insn)) = seq_insn;
-
- /* Note the calls to set_new_first_and_last_insn must occur after
- SEQ_INSN has been completely spliced into the insn stream.
-
- Otherwise CUR_INSN_UID will get set to an incorrect value because
- set_new_first_and_last_insn will not find SEQ_INSN in the chain. */
- if (insn == last)
- set_new_first_and_last_insn (first, seq_insn);
-
- if (insn == first)
- set_new_first_and_last_insn (seq_insn, last);
+ /* Unlink INSN from the insn chain, so that we can put it into
+ the SEQUENCE. Remember where we want to emit SEQUENCE in AFTER. */
+ rtx after = PREV_INSN (insn);
+ remove_insn (insn);
+ SET_NEXT_INSN (insn) = SET_PREV_INSN (insn) = NULL;
/* Build our SEQUENCE and rebuild the insn chain. */
- XVECEXP (seq, 0, 0) = delay_insn;
- INSN_DELETED_P (delay_insn) = 0;
- PREV_INSN (delay_insn) = PREV_INSN (seq_insn);
-
- INSN_LOCATION (seq_insn) = INSN_LOCATION (delay_insn);
-
- for (li = list; li; li = XEXP (li, 1), i++)
+ int i = 1;
+ start_sequence ();
+ XVECEXP (seq, 0, 0) = emit_insn (insn);
+ for (rtx_insn_list *li = list; li; li = li->next (), i++)
{
- rtx tem = XEXP (li, 0);
+ rtx_insn *tem = li->insn ();
rtx note, next;
/* Show that this copy of the insn isn't deleted. */
INSN_DELETED_P (tem) = 0;
- XVECEXP (seq, 0, i) = tem;
- PREV_INSN (tem) = XVECEXP (seq, 0, i - 1);
- NEXT_INSN (XVECEXP (seq, 0, i - 1)) = tem;
+ /* Unlink insn from its original place, and re-emit it into
+ the sequence. */
+ SET_NEXT_INSN (tem) = SET_PREV_INSN (tem) = NULL;
+ XVECEXP (seq, 0, i) = emit_insn (tem);
/* SPARC assembler, for instance, emit warning when debug info is output
into the delay slot. */
}
}
}
-
- NEXT_INSN (XVECEXP (seq, 0, length)) = NEXT_INSN (seq_insn);
-
- /* If the previous insn is a SEQUENCE, update the NEXT_INSN pointer on the
- last insn in that SEQUENCE to point to us. Similarly for the first
- insn in the following insn if it is a SEQUENCE. */
-
- if (PREV_INSN (seq_insn) && NONJUMP_INSN_P (PREV_INSN (seq_insn))
- && GET_CODE (PATTERN (PREV_INSN (seq_insn))) == SEQUENCE)
- NEXT_INSN (XVECEXP (PATTERN (PREV_INSN (seq_insn)), 0,
- XVECLEN (PATTERN (PREV_INSN (seq_insn)), 0) - 1))
- = seq_insn;
-
- if (NEXT_INSN (seq_insn) && NONJUMP_INSN_P (NEXT_INSN (seq_insn))
- && GET_CODE (PATTERN (NEXT_INSN (seq_insn))) == SEQUENCE)
- PREV_INSN (XVECEXP (PATTERN (NEXT_INSN (seq_insn)), 0, 0)) = seq_insn;
-
- /* If there used to be a BARRIER, put it back. */
- if (had_barrier)
- emit_barrier_after (seq_insn);
-
+ end_sequence ();
gcc_assert (i == length + 1);
+ /* Splice our SEQUENCE into the insn stream where INSN used to be. */
+ add_insn_after (seq_insn, after, NULL);
+
return seq_insn;
}
/* Add INSN to DELAY_LIST and return the head of the new list. The list must
be in the order in which the insns are to be executed. */
-static rtx
-add_to_delay_list (rtx insn, rtx delay_list)
+static rtx_insn_list *
+add_to_delay_list (rtx_insn *insn, rtx_insn_list *delay_list)
{
/* If we have an empty list, just make a new list element. If
INSN has its block number recorded, clear it since we may
/* Otherwise this must be an INSN_LIST. Add INSN to the end of the
list. */
- XEXP (delay_list, 1) = add_to_delay_list (insn, XEXP (delay_list, 1));
+ XEXP (delay_list, 1) = add_to_delay_list (insn, delay_list->next ());
return delay_list;
}
/* Delete INSN from the delay slot of the insn that it is in, which may
produce an insn with no delay slots. Return the new insn. */
-static rtx
-delete_from_delay_slot (rtx insn)
+static rtx_insn *
+delete_from_delay_slot (rtx_insn *insn)
{
- rtx trial, seq_insn, seq, prev;
- rtx delay_list = 0;
+ rtx_insn *trial, *seq_insn, *prev;
+ rtx_sequence *seq;
+ rtx_insn_list *delay_list = 0;
int i;
int had_barrier = 0;
;
seq_insn = PREV_INSN (NEXT_INSN (trial));
- seq = PATTERN (seq_insn);
+ seq = as_a <rtx_sequence *> (PATTERN (seq_insn));
if (NEXT_INSN (seq_insn) && BARRIER_P (NEXT_INSN (seq_insn)))
had_barrier = 1;
/* Create a delay list consisting of all the insns other than the one
we are deleting (unless we were the only one). */
- if (XVECLEN (seq, 0) > 2)
- for (i = 1; i < XVECLEN (seq, 0); i++)
- if (XVECEXP (seq, 0, i) != insn)
- delay_list = add_to_delay_list (XVECEXP (seq, 0, i), delay_list);
+ if (seq->len () > 2)
+ for (i = 1; i < seq->len (); i++)
+ if (seq->insn (i) != insn)
+ delay_list = add_to_delay_list (seq->insn (i), delay_list);
/* Delete the old SEQUENCE, re-emit the insn that used to have the delay
list, and rebuild the delay list if non-empty. */
prev = PREV_INSN (seq_insn);
- trial = XVECEXP (seq, 0, 0);
+ trial = seq->insn (0);
delete_related_insns (seq_insn);
add_insn_after (trial, prev, NULL);
{
if (! FIND_REG_INC_NOTE (XEXP (note, 0), NULL_RTX)
&& sets_cc0_p (PATTERN (XEXP (note, 0))) == 1)
- delete_from_delay_slot (XEXP (note, 0));
+ delete_from_delay_slot (as_a <rtx_insn *> (XEXP (note, 0)));
}
else
{
/* The insn setting CC0 is our previous insn, but it may be in
a delay slot. It will be the last insn in the delay slot, if
it is. */
- rtx trial = previous_insn (insn);
+ rtx_insn *trial = previous_insn (insn);
if (NOTE_P (trial))
trial = prev_nonnote_insn (trial);
if (sets_cc0_p (PATTERN (trial)) != 1
This should be expanded to skip over N insns, where N is the number
of delay slots required. */
-static rtx
-optimize_skip (rtx insn)
+static rtx_insn_list *
+optimize_skip (rtx_insn *insn)
{
- rtx trial = next_nonnote_insn (insn);
- rtx next_trial = next_active_insn (trial);
- rtx delay_list = 0;
+ rtx_insn *trial = next_nonnote_insn (insn);
+ rtx_insn *next_trial = next_active_insn (trial);
+ rtx_insn_list *delay_list = 0;
int flags;
flags = get_jump_flags (insn, JUMP_LABEL (insn));
return 0;
}
- delay_list = add_to_delay_list (trial, NULL_RTX);
+ delay_list = add_to_delay_list (trial, NULL);
next_trial = next_active_insn (trial);
update_block (trial, trial);
delete_related_insns (trial);
rtx note = find_reg_note (jump_insn, REG_BR_PROB, 0);
if (note)
{
- int prob = INTVAL (XEXP (note, 0));
+ int prob = XINT (note, 0);
if (prob >= REG_BR_PROB_BASE * 9 / 10)
return 2;
if (condjump_in_parallel_p (insn))
pat = XVECEXP (pat, 0, 0);
- if (ANY_RETURN_P (pat))
- return pat == target ? const_true_rtx : 0;
+ if (ANY_RETURN_P (pat) && pat == target)
+ return const_true_rtx;
if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
return 0;
else if (GET_CODE (src) == IF_THEN_ELSE
&& XEXP (src, 2) == pc_rtx
- && GET_CODE (XEXP (src, 1)) == LABEL_REF
- && XEXP (XEXP (src, 1), 0) == target)
+ && ((GET_CODE (XEXP (src, 1)) == LABEL_REF
+ && XEXP (XEXP (src, 1), 0) == target)
+ || (ANY_RETURN_P (XEXP (src, 1)) && XEXP (src, 1) == target)))
return XEXP (src, 0);
else if (GET_CODE (src) == IF_THEN_ELSE
&& XEXP (src, 1) == pc_rtx
- && GET_CODE (XEXP (src, 2)) == LABEL_REF
- && XEXP (XEXP (src, 2), 0) == target)
+ && ((GET_CODE (XEXP (src, 2)) == LABEL_REF
+ && XEXP (XEXP (src, 2), 0) == target)
+ || (ANY_RETURN_P (XEXP (src, 2)) && XEXP (src, 2) == target)))
{
enum rtx_code rev;
rev = reversed_comparison_code (XEXP (src, 0), insn);
PNEW_THREAD points to a location that is to receive the place at which
execution should continue. */
-static rtx
-steal_delay_list_from_target (rtx insn, rtx condition, rtx seq,
- rtx delay_list, struct resources *sets,
+static rtx_insn_list *
+steal_delay_list_from_target (rtx insn, rtx condition, rtx_sequence *seq,
+ rtx_insn_list *delay_list, struct resources *sets,
struct resources *needed,
struct resources *other_needed,
int slots_to_fill, int *pslots_filled,
- int *pannul_p, rtx *pnew_thread)
+ int *pannul_p, rtx_insn **pnew_thread)
{
- rtx temp;
int slots_remaining = slots_to_fill - *pslots_filled;
int total_slots_filled = *pslots_filled;
- rtx new_delay_list = 0;
+ rtx_insn_list *new_delay_list = 0;
int must_annul = *pannul_p;
int used_annul = 0;
int i;
struct resources cc_set;
+ bool *redundant;
/* We can't do anything if there are more delay slots in SEQ than we
can handle, or if we don't know that it will be a taken branch.
will effect the direction of the jump in the sequence. */
CLEAR_RESOURCE (&cc_set);
- for (temp = delay_list; temp; temp = XEXP (temp, 1))
+ for (rtx_insn_list *temp = delay_list; temp; temp = temp->next ())
{
- rtx trial = XEXP (temp, 0);
+ rtx_insn *trial = temp->insn ();
mark_set_resources (trial, &cc_set, 0, MARK_SRC_DEST_CALL);
- if (insn_references_resource_p (XVECEXP (seq , 0, 0), &cc_set, false))
+ if (insn_references_resource_p (seq->insn (0), &cc_set, false))
return delay_list;
}
if (XVECLEN (seq, 0) - 1 > slots_remaining
- || ! condition_dominates_p (condition, XVECEXP (seq, 0, 0))
- || ! single_set (XVECEXP (seq, 0, 0)))
+ || ! condition_dominates_p (condition, seq->insn (0))
+ || ! single_set (seq->insn (0)))
return delay_list;
#ifdef MD_CAN_REDIRECT_BRANCH
/* On some targets, branches with delay slots can have a limited
displacement. Give the back end a chance to tell us we can't do
this. */
- if (! MD_CAN_REDIRECT_BRANCH (insn, XVECEXP (seq, 0, 0)))
+ if (! MD_CAN_REDIRECT_BRANCH (insn, seq->insn (0)))
return delay_list;
#endif
- for (i = 1; i < XVECLEN (seq, 0); i++)
+ redundant = XALLOCAVEC (bool, XVECLEN (seq, 0));
+ for (i = 1; i < seq->len (); i++)
{
- rtx trial = XVECEXP (seq, 0, i);
+ rtx_insn *trial = seq->insn (i);
int flags;
if (insn_references_resource_p (trial, sets, false)
#endif
/* If TRIAL is from the fallthrough code of an annulled branch insn
in SEQ, we cannot use it. */
- || (INSN_ANNULLED_BRANCH_P (XVECEXP (seq, 0, 0))
+ || (INSN_ANNULLED_BRANCH_P (seq->insn (0))
&& ! INSN_FROM_TARGET_P (trial)))
return delay_list;
/* If this insn was already done (usually in a previous delay slot),
pretend we put it in our delay slot. */
- if (redundant_insn (trial, insn, new_delay_list))
+ redundant[i] = redundant_insn (trial, insn, new_delay_list);
+ if (redundant[i])
continue;
/* We will end up re-vectoring this branch, so compute flags
based on jumping to the new label. */
- flags = get_jump_flags (insn, JUMP_LABEL (XVECEXP (seq, 0, 0)));
+ flags = get_jump_flags (insn, JUMP_LABEL (seq->insn (0)));
if (! must_annul
&& ((condition == const_true_rtx
{
if (must_annul)
used_annul = 1;
- temp = copy_delay_slot_insn (trial);
+ rtx_insn *temp = copy_delay_slot_insn (trial);
INSN_FROM_TARGET_P (temp) = 1;
new_delay_list = add_to_delay_list (temp, new_delay_list);
total_slots_filled++;
return delay_list;
}
+ /* Record the effect of the instructions that were redundant and which
+ we therefore decided not to copy. */
+ for (i = 1; i < XVECLEN (seq, 0); i++)
+ if (redundant[i])
+ update_block (XVECEXP (seq, 0, i), insn);
+
/* Show the place to which we will be branching. */
- *pnew_thread = first_active_target_insn (JUMP_LABEL (XVECEXP (seq, 0, 0)));
+ *pnew_thread = first_active_target_insn (JUMP_LABEL_AS_INSN (seq->insn (0)));
/* Add any new insns to the delay list and update the count of the
number of slots filled. */
if (delay_list == 0)
return new_delay_list;
- for (temp = new_delay_list; temp; temp = XEXP (temp, 1))
- delay_list = add_to_delay_list (XEXP (temp, 0), delay_list);
+ for (rtx_insn_list *temp = new_delay_list; temp; temp = temp->next ())
+ delay_list = add_to_delay_list (temp->insn (), delay_list);
return delay_list;
}
of SEQ is an unconditional branch. In that case we steal its delay slot
for INSN since unconditional branches are much easier to fill. */
-static rtx
-steal_delay_list_from_fallthrough (rtx insn, rtx condition, rtx seq,
- rtx delay_list, struct resources *sets,
+static rtx_insn_list *
+steal_delay_list_from_fallthrough (rtx insn, rtx condition, rtx_sequence *seq,
+ rtx_insn_list *delay_list,
+ struct resources *sets,
struct resources *needed,
struct resources *other_needed,
int slots_to_fill, int *pslots_filled,
/* We can't do anything if SEQ's delay insn isn't an
unconditional branch. */
- if (! simplejump_or_return_p (XVECEXP (seq, 0, 0)))
+ if (! simplejump_or_return_p (seq->insn (0)))
return delay_list;
- for (i = 1; i < XVECLEN (seq, 0); i++)
+ for (i = 1; i < seq->len (); i++)
{
- rtx trial = XVECEXP (seq, 0, i);
+ rtx_insn *trial = seq->insn (i);
/* If TRIAL sets CC0, stealing it will move it too far from the use
of CC0. */
/* If this insn was already done, we don't need it. */
if (redundant_insn (trial, insn, delay_list))
{
+ update_block (trial, insn);
delete_from_delay_slot (trial);
continue;
}
int num_slots = XVECLEN (PATTERN (insn), 0);
rtx next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
struct resources set, needed;
- rtx merged_insns = 0;
+ rtx_insn_list *merged_insns = 0;
int i;
int flags;
&& !(JUMP_P (XVECEXP (PATTERN (trial), 0, 0))
&& INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0))))
{
- rtx pat = PATTERN (trial);
+ rtx_sequence *pat = as_a <rtx_sequence *> (PATTERN (trial));
rtx filled_insn = XVECEXP (pat, 0, 0);
/* Account for resources set/needed by the filled insn. */
mark_set_resources (filled_insn, &set, 0, MARK_SRC_DEST_CALL);
mark_referenced_resources (filled_insn, &needed, true);
- for (i = 1; i < XVECLEN (pat, 0); i++)
+ for (i = 1; i < pat->len (); i++)
{
- rtx dtrial = XVECEXP (pat, 0, i);
+ rtx_insn *dtrial = pat->insn (i);
if (! insn_references_resource_p (dtrial, &set, true)
&& ! insn_sets_resource_p (dtrial, &set, true)
{
if (! annul_p)
{
- rtx new_rtx;
+ rtx_insn *new_rtx;
update_block (dtrial, thread);
new_rtx = delete_from_delay_slot (dtrial);
target. */
if (slot_number == num_slots && annul_p)
{
- for (; merged_insns; merged_insns = XEXP (merged_insns, 1))
+ for (; merged_insns; merged_insns = merged_insns->next ())
{
if (GET_MODE (merged_insns) == SImode)
{
- rtx new_rtx;
+ rtx_insn *new_rtx;
update_block (XEXP (merged_insns, 0), thread);
- new_rtx = delete_from_delay_slot (XEXP (merged_insns, 0));
+ new_rtx = delete_from_delay_slot (merged_insns->insn ());
if (INSN_DELETED_P (thread))
thread = new_rtx;
}
trial && insns_to_search > 0;
trial = PREV_INSN (trial))
{
- if (LABEL_P (trial))
+ /* (use (insn))s can come immediately after a barrier if the
+ label that used to precede them has been deleted as dead.
+ See delete_related_insns. */
+ if (LABEL_P (trial) || BARRIER_P (trial))
return 0;
if (!INSN_P (trial))
if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
continue;
- if (GET_CODE (pat) == SEQUENCE)
+ if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (pat))
{
/* Stop for a CALL and its delay slots because it is difficult to
track its resource needs correctly. */
- if (CALL_P (XVECEXP (pat, 0, 0)))
+ if (CALL_P (seq->element (0)))
return 0;
/* Stop for an INSN or JUMP_INSN with delayed effects and its delay
correctly. */
#ifdef INSN_SETS_ARE_DELAYED
- if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0)))
+ if (INSN_SETS_ARE_DELAYED (seq->element (0)))
return 0;
#endif
#ifdef INSN_REFERENCES_ARE_DELAYED
- if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0)))
+ if (INSN_REFERENCES_ARE_DELAYED (seq->element (0)))
return 0;
#endif
/* See if any of the insns in the delay slot match, updating
resource requirements as we go. */
- for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
- if (GET_CODE (XVECEXP (pat, 0, i)) == GET_CODE (insn)
- && rtx_equal_p (PATTERN (XVECEXP (pat, 0, i)), ipat)
- && ! find_reg_note (XVECEXP (pat, 0, i), REG_UNUSED, NULL_RTX))
+ for (i = seq->len () - 1; i > 0; i--)
+ if (GET_CODE (seq->element (i)) == GET_CODE (insn)
+ && rtx_equal_p (PATTERN (seq->element (i)), ipat)
+ && ! find_reg_note (seq->element (i), REG_UNUSED, NULL_RTX))
break;
/* If found a match, exit this loop early. */
/* Insns we pass may not set either NEEDED or SET, so merge them for
simpler tests. */
needed.memory |= set.memory;
- needed.unch_memory |= set.unch_memory;
IOR_HARD_REG_SET (needed.regs, set.regs);
/* This insn isn't redundant if it conflicts with an insn that either is
if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
continue;
- if (GET_CODE (pat) == SEQUENCE)
+ if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (pat))
{
bool annul_p = false;
- rtx control = XVECEXP (pat, 0, 0);
+ rtx control = seq->element (0);
/* If this is a CALL_INSN and its delay slots, it is hard to track
the resource needs properly, so give up. */
/* See if any of the insns in the delay slot match, updating
resource requirements as we go. */
- for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
+ for (i = seq->len () - 1; i > 0; i--)
{
- rtx candidate = XVECEXP (pat, 0, i);
+ rtx candidate = seq->element (i);
/* If an insn will be annulled if the branch is false, it isn't
considered as a possible duplicate insn. */
}
}
\f
-/* Return the label before INSN, or put a new label there. */
+static vec <rtx> sibling_labels;
-static rtx
-get_label_before (rtx insn)
+/* Return the label before INSN, or put a new label there. If SIBLING is
+ non-zero, it is another label associated with the new label (if any),
+ typically the former target of the jump that will be redirected to
+ the new label. */
+
+static rtx_insn *
+get_label_before (rtx insn, rtx sibling)
{
- rtx label;
+ rtx_insn *label;
/* Find an existing label at this point
or make a new one if there is none. */
label = gen_label_rtx ();
emit_label_after (label, prev);
LABEL_NUSES (label) = 0;
+ if (sibling)
+ {
+ sibling_labels.safe_push (label);
+ sibling_labels.safe_push (sibling);
+ }
}
return label;
}
static void
fill_simple_delay_slots (int non_jumps_p)
{
- rtx insn, pat, trial, next_trial;
+ rtx_insn *insn, *trial, *next_trial;
+ rtx pat;
int i;
int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
struct resources needed, set;
int slots_to_fill, slots_filled;
- rtx delay_list;
+ rtx_insn_list *delay_list;
for (i = 0; i < num_unfilled_slots; i++)
{
&& no_labels_between_p (insn, trial)
&& ! can_throw_internal (trial))
{
- rtx *tmp;
+ rtx_insn **tmp;
slots_filled++;
delay_list = add_to_delay_list (trial, delay_list);
if (*tmp == trial)
*tmp = 0;
{
- rtx next = NEXT_INSN (trial);
- rtx prev = PREV_INSN (trial);
+ rtx_insn *next = NEXT_INSN (trial);
+ rtx_insn *prev = PREV_INSN (trial);
if (prev)
- NEXT_INSN (prev) = next;
+ SET_NEXT_INSN (prev) = next;
if (next)
- PREV_INSN (next) = prev;
+ SET_PREV_INSN (next) = prev;
}
}
Presumably, we should also check to see if we could get
back to this function via `setjmp'. */
&& ! can_throw_internal (insn)
- && (!JUMP_P (insn)
- || ((condjump_p (insn) || condjump_in_parallel_p (insn))
- && ! simplejump_p (insn)
- && !ANY_RETURN_P (JUMP_LABEL (insn)))))
+ && !JUMP_P (insn))
{
- /* Invariant: If insn is a JUMP_INSN, the insn's jump
- label. Otherwise, zero. */
- rtx target = 0;
int maybe_never = 0;
rtx pat, trial_delay;
CLEAR_RESOURCE (&needed);
CLEAR_RESOURCE (&set);
+ mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
+ mark_referenced_resources (insn, &needed, true);
if (CALL_P (insn))
+ maybe_never = 1;
+
+ for (trial = next_nonnote_insn (insn); !stop_search_p (trial, 1);
+ trial = next_trial)
{
- mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
- mark_referenced_resources (insn, &needed, true);
- maybe_never = 1;
- }
- else
- {
- mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL);
- mark_referenced_resources (insn, &needed, true);
- if (JUMP_P (insn))
- target = JUMP_LABEL (insn);
- }
+ next_trial = next_nonnote_insn (trial);
- if (target == 0 || ANY_RETURN_P (target))
- for (trial = next_nonnote_insn (insn); !stop_search_p (trial, 1);
- trial = next_trial)
- {
- next_trial = next_nonnote_insn (trial);
+ /* This must be an INSN or CALL_INSN. */
+ pat = PATTERN (trial);
- /* This must be an INSN or CALL_INSN. */
- pat = PATTERN (trial);
+ /* Stand-alone USE and CLOBBER are just for flow. */
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ continue;
- /* Stand-alone USE and CLOBBER are just for flow. */
- if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
- continue;
+ /* If this already has filled delay slots, get the insn needing
+ the delay slots. */
+ if (GET_CODE (pat) == SEQUENCE)
+ trial_delay = XVECEXP (pat, 0, 0);
+ else
+ trial_delay = trial;
- /* If this already has filled delay slots, get the insn needing
- the delay slots. */
- if (GET_CODE (pat) == SEQUENCE)
- trial_delay = XVECEXP (pat, 0, 0);
- else
- trial_delay = trial;
-
- /* Stop our search when seeing a jump. */
- if (JUMP_P (trial_delay))
- break;
-
- /* See if we have a resource problem before we try to
- split. */
- if (GET_CODE (pat) != SEQUENCE
- && ! insn_references_resource_p (trial, &set, true)
- && ! insn_sets_resource_p (trial, &set, true)
- && ! insn_sets_resource_p (trial, &needed, true)
+ /* Stop our search when seeing a jump. */
+ if (JUMP_P (trial_delay))
+ break;
+
+ /* See if we have a resource problem before we try to split. */
+ if (GET_CODE (pat) != SEQUENCE
+ && ! insn_references_resource_p (trial, &set, true)
+ && ! insn_sets_resource_p (trial, &set, true)
+ && ! insn_sets_resource_p (trial, &needed, true)
#ifdef HAVE_cc0
- && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
+ && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
#endif
- && ! (maybe_never && may_trap_or_fault_p (pat))
- && (trial = try_split (pat, trial, 0))
- && eligible_for_delay (insn, slots_filled, trial, flags)
- && ! can_throw_internal(trial))
- {
- next_trial = next_nonnote_insn (trial);
- delay_list = add_to_delay_list (trial, delay_list);
-
+ && ! (maybe_never && may_trap_or_fault_p (pat))
+ && (trial = try_split (pat, trial, 0))
+ && eligible_for_delay (insn, slots_filled, trial, flags)
+ && ! can_throw_internal (trial))
+ {
+ next_trial = next_nonnote_insn (trial);
+ delay_list = add_to_delay_list (trial, delay_list);
#ifdef HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, pat))
- link_cc0_insns (trial);
+ if (reg_mentioned_p (cc0_rtx, pat))
+ link_cc0_insns (trial);
#endif
+ delete_related_insns (trial);
+ if (slots_to_fill == ++slots_filled)
+ break;
+ continue;
+ }
- delete_related_insns (trial);
- if (slots_to_fill == ++slots_filled)
- break;
- continue;
- }
-
- mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
- mark_referenced_resources (trial, &needed, true);
+ mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL);
+ mark_referenced_resources (trial, &needed, true);
- /* Ensure we don't put insns between the setting of cc and the
- comparison by moving a setting of cc into an earlier delay
- slot since these insns could clobber the condition code. */
- set.cc = 1;
+ /* Ensure we don't put insns between the setting of cc and the
+ comparison by moving a setting of cc into an earlier delay
+ slot since these insns could clobber the condition code. */
+ set.cc = 1;
- /* If this is a call or jump, we might not get here. */
- if (CALL_P (trial_delay)
- || JUMP_P (trial_delay))
- maybe_never = 1;
- }
+ /* If this is a call, we might not get here. */
+ if (CALL_P (trial_delay))
+ maybe_never = 1;
+ }
/* If there are slots left to fill and our search was stopped by an
unconditional branch, try the insn at the branch target. We can
&& trial
&& jump_to_label_p (trial)
&& simplejump_p (trial)
- && (target == 0 || JUMP_LABEL (trial) == target)
&& (next_trial = next_active_insn (JUMP_LABEL (trial))) != 0
&& ! (NONJUMP_INSN_P (next_trial)
&& GET_CODE (PATTERN (next_trial)) == SEQUENCE)
rtx new_label = next_real_insn (next_trial);
if (new_label != 0)
- new_label = get_label_before (new_label);
+ new_label = get_label_before (new_label, JUMP_LABEL (trial));
else
new_label = find_end_label (simple_return_rtx);
delay_list);
slots_filled++;
reorg_redirect_jump (trial, new_label);
-
- /* If we merged because we both jumped to the same place,
- redirect the original insn also. */
- if (target)
- reorg_redirect_jump (insn, new_label);
}
}
}
If LABEL is not followed by a jump, return LABEL.
If the chain loops or we can't find end, return LABEL,
since that tells caller to avoid changing the insn.
- If the returned label is obtained by following a REG_CROSSING_JUMP
- jump, set *CROSSING to true, otherwise set it to false. */
+ If the returned label is obtained by following a crossing jump,
+ set *CROSSING to true, otherwise set it to false. */
-static rtx
-follow_jumps (rtx label, rtx jump, bool *crossing)
+static rtx_insn *
+follow_jumps (rtx_insn *label, rtx jump, bool *crossing)
{
- rtx insn;
- rtx next;
- rtx value = label;
+ rtx_insn *insn;
+ rtx_insn *next;
+ rtx_insn *value = label;
int depth;
*crossing = false;
&& BARRIER_P (next));
depth++)
{
- rtx this_label = JUMP_LABEL (insn);
- rtx tem;
+ rtx_insn *this_label = JUMP_LABEL_AS_INSN (insn);
/* If we have found a cycle, make the insn jump to itself. */
if (this_label == label)
return label;
+
+ /* Cannot follow returns and cannot look through tablejumps. */
if (ANY_RETURN_P (this_label))
return this_label;
- tem = next_active_insn (this_label);
- if (tem
- && (GET_CODE (PATTERN (tem)) == ADDR_VEC
- || GET_CODE (PATTERN (tem)) == ADDR_DIFF_VEC))
+ if (NEXT_INSN (this_label)
+ && JUMP_TABLE_DATA_P (NEXT_INSN (this_label)))
break;
if (!targetm.can_follow_jump (jump, insn))
break;
if (!*crossing)
- *crossing
- = find_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX) != NULL_RTX;
+ *crossing = CROSSING_JUMP_P (jump);
value = this_label;
}
if (depth == 10)
case, we can only take insns from the head of the thread for our delay
slot. We then adjust the jump to point after the insns we have taken. */
-static rtx
-fill_slots_from_thread (rtx insn, rtx condition, rtx thread,
- rtx opposite_thread, int likely, int thread_if_true,
+static rtx_insn_list *
+fill_slots_from_thread (rtx_insn *insn, rtx condition, rtx_insn *thread,
+ rtx_insn *opposite_thread, int likely,
+ int thread_if_true,
int own_thread, int slots_to_fill,
- int *pslots_filled, rtx delay_list)
+ int *pslots_filled, rtx_insn_list *delay_list)
{
- rtx new_thread;
+ rtx_insn *new_thread;
struct resources opposite_needed, set, needed;
- rtx trial;
+ rtx_insn *trial;
int lose = 0;
int must_annul = 0;
int flags;
/* Validate our arguments. */
- gcc_assert(condition != const_true_rtx || thread_if_true);
- gcc_assert(own_thread || thread_if_true);
+ gcc_assert (condition != const_true_rtx || thread_if_true);
+ gcc_assert (own_thread || thread_if_true);
flags = get_jump_flags (insn, JUMP_LABEL (insn));
: check_annul_list_true_false (1, delay_list)
&& eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
{
- rtx temp;
+ rtx_insn *temp;
must_annul = 1;
winner:
&& GET_CODE (PATTERN (trial)) == SEQUENCE
&& JUMP_P (XVECEXP (PATTERN (trial), 0, 0)))
{
+ rtx_sequence *sequence = as_a <rtx_sequence *> (PATTERN (trial));
/* If this is the `true' thread, we will want to follow the jump,
so we can only do this if we have taken everything up to here. */
if (thread_if_true && trial == new_thread)
{
delay_list
- = steal_delay_list_from_target (insn, condition, PATTERN (trial),
+ = steal_delay_list_from_target (insn, condition, sequence,
delay_list, &set, &needed,
&opposite_needed, slots_to_fill,
pslots_filled, &must_annul,
else if (! thread_if_true)
delay_list
= steal_delay_list_from_fallthrough (insn, condition,
- PATTERN (trial),
+ sequence,
delay_list, &set, &needed,
&opposite_needed, slots_to_fill,
pslots_filled, &must_annul);
{
rtx other = XEXP (src, 1);
rtx new_arith;
- rtx ninsn;
+ rtx_insn *ninsn;
/* If this is a constant adjustment, use the same code with
the negated constant. Otherwise, reverse the sense of the
if (thread_if_true)
INSN_FROM_TARGET_P (ninsn) = 1;
- delay_list = add_to_delay_list (ninsn, NULL_RTX);
+ delay_list = add_to_delay_list (ninsn, NULL);
(*pslots_filled)++;
}
}
&& redirect_with_delay_list_safe_p (insn,
JUMP_LABEL (new_thread),
delay_list))
- new_thread = follow_jumps (JUMP_LABEL (new_thread), insn, &crossing);
+ new_thread = follow_jumps (JUMP_LABEL_AS_INSN (new_thread), insn,
+ &crossing);
if (ANY_RETURN_P (new_thread))
label = find_end_label (new_thread);
else if (LABEL_P (new_thread))
label = new_thread;
else
- label = get_label_before (new_thread);
+ label = get_label_before (new_thread, JUMP_LABEL (insn));
if (label)
{
reorg_redirect_jump (insn, label);
if (crossing)
- set_unique_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX);
+ CROSSING_JUMP_P (insn) = 1;
}
}
static void
fill_eager_delay_slots (void)
{
- rtx insn;
+ rtx_insn *insn;
int i;
int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
for (i = 0; i < num_unfilled_slots; i++)
{
rtx condition;
- rtx target_label, insn_at_target, fallthrough_insn;
- rtx delay_list = 0;
+ rtx_insn *target_label, *insn_at_target, *fallthrough_insn;
+ rtx_insn_list *delay_list = 0;
int own_target;
int own_fallthrough;
int prediction, slots_to_fill, slots_filled;
continue;
slots_filled = 0;
- target_label = JUMP_LABEL (insn);
+ target_label = JUMP_LABEL_AS_INSN (insn);
condition = get_branch_condition (insn, target_label);
if (condition == 0)
we might have found a redundant insn which we deleted
from the thread that was filled. So we have to recompute
the next insn at the target. */
- target_label = JUMP_LABEL (insn);
+ target_label = JUMP_LABEL_AS_INSN (insn);
insn_at_target = first_active_target_insn (target_label);
delay_list
threading. */
static void
-relax_delay_slots (rtx first)
+relax_delay_slots (rtx_insn *first)
{
- rtx insn, next, pat;
- rtx trial, delay_insn, target_label;
+ rtx_insn *insn, *next;
+ rtx_sequence *pat;
+ rtx_insn *trial, *delay_insn, *target_label;
/* Look at every JUMP_INSN and see if we can improve it. */
for (insn = first; insn; insn = next)
group of consecutive labels. */
if (JUMP_P (insn)
&& (condjump_p (insn) || condjump_in_parallel_p (insn))
- && !ANY_RETURN_P (target_label = JUMP_LABEL (insn)))
+ && !ANY_RETURN_P (target_label = JUMP_LABEL_AS_INSN (insn)))
{
target_label
= skip_consecutive_labels (follow_jumps (target_label, insn,
{
reorg_redirect_jump (insn, target_label);
if (crossing)
- set_unique_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX);
+ CROSSING_JUMP_P (insn) = 1;
}
/* See if this jump conditionally branches around an unconditional
&& 0 > mostly_true_jump (other))
{
rtx other_target = JUMP_LABEL (other);
- target_label = JUMP_LABEL (insn);
+ target_label = JUMP_LABEL_AS_INSN (insn);
if (invert_jump (other, target_label, 0))
reorg_redirect_jump (insn, other_target);
if (!NONJUMP_INSN_P (insn) || GET_CODE (PATTERN (insn)) != SEQUENCE)
continue;
- pat = PATTERN (insn);
- delay_insn = XVECEXP (pat, 0, 0);
+ pat = as_a <rtx_sequence *> (PATTERN (insn));
+ delay_insn = pat->insn (0);
/* See if the first insn in the delay slot is redundant with some
previous insn. Remove it from the delay slot if so; then set up
to reprocess this insn. */
- if (redundant_insn (XVECEXP (pat, 0, 1), delay_insn, 0))
+ if (redundant_insn (pat->insn (1), delay_insn, 0))
{
- delete_from_delay_slot (XVECEXP (pat, 0, 1));
+ update_block (pat->insn (1), insn);
+ delete_from_delay_slot (pat->insn (1));
next = prev_active_insn (next);
continue;
}
|| !(condjump_p (delay_insn) || condjump_in_parallel_p (delay_insn)))
continue;
- target_label = JUMP_LABEL (delay_insn);
+ target_label = JUMP_LABEL_AS_INSN (delay_insn);
if (target_label && ANY_RETURN_P (target_label))
continue;
reorg_redirect_jump (delay_insn, trial);
target_label = trial;
if (crossing)
- set_unique_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX);
+ CROSSING_JUMP_P (insn) = 1;
}
/* If the first insn at TARGET_LABEL is redundant with a previous
/* Now emit a label before the special USE insn, and
redirect our jump to the new label. */
- target_label = get_label_before (PREV_INSN (tmp));
+ target_label = get_label_before (PREV_INSN (tmp), target_label);
reorg_redirect_jump (delay_insn, target_label);
next = insn;
continue;
&& simplejump_or_return_p (XVECEXP (PATTERN (trial), 0, 0))
&& redundant_insn (XVECEXP (PATTERN (trial), 0, 1), insn, 0))
{
- target_label = JUMP_LABEL (XVECEXP (PATTERN (trial), 0, 0));
+ rtx_sequence *trial_seq = as_a <rtx_sequence *> (PATTERN (trial));
+ target_label = JUMP_LABEL_AS_INSN (trial_seq->insn (0));
if (ANY_RETURN_P (target_label))
target_label = find_end_label (target_label);
&& redirect_with_delay_slots_safe_p (delay_insn, target_label,
insn))
{
+ update_block (trial_seq->insn (1), insn);
reorg_redirect_jump (delay_insn, target_label);
next = insn;
continue;
&& label_before_next_insn (next, insn) == target_label
&& simplejump_p (insn)
&& XVECLEN (pat, 0) == 2
- && rtx_equal_p (PATTERN (next), PATTERN (XVECEXP (pat, 0, 1))))
+ && rtx_equal_p (PATTERN (next), PATTERN (pat->insn (1))))
{
delete_related_insns (insn);
continue;
/* If we own the thread opposite the way this insn branches, see if we
can merge its delay slots with following insns. */
- if (INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
+ if (INSN_FROM_TARGET_P (pat->insn (1))
&& own_thread_p (NEXT_INSN (insn), 0, 1))
try_merge_delay_insns (insn, next);
- else if (! INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
+ else if (! INSN_FROM_TARGET_P (pat->insn (1))
&& own_thread_p (target_label, target_label, 0))
try_merge_delay_insns (insn, next_active_insn (target_label));
for (insn = first; insn; insn = NEXT_INSN (insn))
if (JUMP_P (insn) && ANY_RETURN_P (PATTERN (insn)))
{
- rtx t = get_label_before (insn);
+ rtx t = get_label_before (insn, NULL_RTX);
if (PATTERN (insn) == ret_rtx)
real_return_label = t;
else
\f
/* Try to find insns to place in delay slots. */
-void
-dbr_schedule (rtx first)
+static void
+dbr_schedule (rtx_insn *first)
{
- rtx insn, next, epilogue_insn = 0;
+ rtx_insn *insn, *next, *epilogue_insn = 0;
int i;
bool need_return_insns;
/* If the current function has no insns other than the prologue and
epilogue, then do not try to fill any delay slots. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return;
/* Find the highest INSN_UID and allocate and initialize our map from
{
rtx target;
- if (JUMP_P (insn))
- INSN_ANNULLED_BRANCH_P (insn) = 0;
- INSN_FROM_TARGET_P (insn) = 0;
-
/* Skip vector tables. We can't get attributes for them. */
if (JUMP_TABLE_DATA_P (insn))
continue;
+ if (JUMP_P (insn))
+ INSN_ANNULLED_BRANCH_P (insn) = 0;
+ INSN_FROM_TARGET_P (insn) = 0;
+
if (num_delay_slots (insn) > 0)
obstack_ptr_grow (&unfilled_slots_obstack, insn);
if (JUMP_P (insn)
&& (condjump_p (insn) || condjump_in_parallel_p (insn))
&& !ANY_RETURN_P (JUMP_LABEL (insn))
- && ((target = skip_consecutive_labels (JUMP_LABEL (insn)))
+ && ((target = skip_consecutive_labels (JUMP_LABEL_AS_INSN (insn)))
!= JUMP_LABEL (insn)))
redirect_jump (insn, target, 1);
}
init_resource_info (epilogue_insn);
/* Show we haven't computed an end-of-function label yet. */
- function_return_label = function_simple_return_label = NULL_RTX;
+ function_return_label = function_simple_return_label = NULL;
/* Initialize the statistics for this function. */
memset (num_insns_needing_delays, 0, sizeof num_insns_needing_delays);
fprintf (dump_file, "\n");
}
+ if (!sibling_labels.is_empty ())
+ {
+ update_alignments (sibling_labels);
+ sibling_labels.release ();
+ }
+
free_resource_info ();
free (uid_to_ruid);
crtl->dbr_scheduled_p = true;
}
#endif /* DELAY_SLOTS */
\f
-static bool
-gate_handle_delay_slots (void)
-{
-#ifdef DELAY_SLOTS
- /* At -O0 dataflow info isn't updated after RA. */
- return optimize > 0 && flag_delayed_branch && !crtl->dbr_scheduled_p;
-#else
- return 0;
-#endif
-}
-
/* Run delay slot optimization. */
static unsigned int
rest_of_handle_delay_slots (void)
return 0;
}
-struct rtl_opt_pass pass_delay_slots =
+namespace {
+
+const pass_data pass_data_delay_slots =
{
- {
- RTL_PASS,
- "dbr", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- gate_handle_delay_slots, /* gate */
- rest_of_handle_delay_slots, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_DBR_SCHED, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_ggc_collect /* todo_flags_finish */
- }
+ RTL_PASS, /* type */
+ "dbr", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_DBR_SCHED, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
};
-/* Machine dependent reorg pass. */
-static bool
-gate_handle_machine_reorg (void)
+class pass_delay_slots : public rtl_opt_pass
{
- return targetm.machine_dependent_reorg != 0;
-}
+public:
+ pass_delay_slots (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_delay_slots, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *);
+ virtual unsigned int execute (function *)
+ {
+ return rest_of_handle_delay_slots ();
+ }
+}; // class pass_delay_slots
-static unsigned int
-rest_of_handle_machine_reorg (void)
+bool
+pass_delay_slots::gate (function *)
{
- targetm.machine_dependent_reorg ();
+#ifdef DELAY_SLOTS
+ /* At -O0 dataflow info isn't updated after RA. */
+ return optimize > 0 && flag_delayed_branch && !crtl->dbr_scheduled_p;
+#else
return 0;
+#endif
}
-struct rtl_opt_pass pass_machine_reorg =
+} // anon namespace
+
+rtl_opt_pass *
+make_pass_delay_slots (gcc::context *ctxt)
{
- {
- RTL_PASS,
- "mach", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- gate_handle_machine_reorg, /* gate */
- rest_of_handle_machine_reorg, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_MACH_DEP, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_ggc_collect /* todo_flags_finish */
- }
+ return new pass_delay_slots (ctxt);
+}
+
+/* Machine dependent reorg pass. */
+
+namespace {
+
+const pass_data pass_data_machine_reorg =
+{
+ RTL_PASS, /* type */
+ "mach", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_MACH_DEP, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
};
+
+class pass_machine_reorg : public rtl_opt_pass
+{
+public:
+ pass_machine_reorg (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_machine_reorg, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+ return targetm.machine_dependent_reorg != 0;
+ }
+
+ virtual unsigned int execute (function *)
+ {
+ targetm.machine_dependent_reorg ();
+ return 0;
+ }
+
+}; // class pass_machine_reorg
+
+} // anon namespace
+
+rtl_opt_pass *
+make_pass_machine_reorg (gcc::context *ctxt)
+{
+ return new pass_machine_reorg (ctxt);
+}