static int insn_references_resource_p (rtx, struct resources *, bool);
static int insn_sets_resource_p (rtx, struct resources *, bool);
static rtx_code_label *find_end_label (rtx);
-static rtx_insn *emit_delay_sequence (rtx_insn *, rtx_insn_list *, int);
-static rtx_insn_list *add_to_delay_list (rtx_insn *, rtx_insn_list *);
+static rtx_insn *emit_delay_sequence (rtx_insn *, const vec<rtx_insn *> &,
+ int);
+static void add_to_delay_list (rtx_insn *, vec<rtx_insn *> *);
static rtx_insn *delete_from_delay_slot (rtx_insn *);
static void delete_scheduled_jump (rtx_insn *);
static void note_delay_statistics (int, int);
#if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
-static rtx_insn_list *optimize_skip (rtx_jump_insn *);
+static void optimize_skip (rtx_jump_insn *, vec<rtx_insn *> *);
#endif
static int get_jump_flags (const rtx_insn *, rtx);
static int mostly_true_jump (rtx);
static rtx get_branch_condition (const rtx_insn *, rtx);
static int condition_dominates_p (rtx, const rtx_insn *);
static int redirect_with_delay_slots_safe_p (rtx_insn *, rtx, rtx);
-static int redirect_with_delay_list_safe_p (rtx_insn *, rtx, rtx_insn_list *);
-static int check_annul_list_true_false (int, rtx);
-static rtx_insn_list *steal_delay_list_from_target (rtx_insn *, rtx,
- rtx_sequence *,
- rtx_insn_list *,
- struct resources *,
- struct resources *,
- struct resources *,
- int, int *, int *,
- rtx *);
-static rtx_insn_list *steal_delay_list_from_fallthrough (rtx_insn *, rtx,
- rtx_sequence *,
- rtx_insn_list *,
- struct resources *,
- struct resources *,
- struct resources *,
- int, int *, int *);
+static int redirect_with_delay_list_safe_p (rtx_insn *, rtx,
+ const vec<rtx_insn *> &);
+static int check_annul_list_true_false (int, const vec<rtx_insn *> &);
+static void steal_delay_list_from_target (rtx_insn *, rtx, rtx_sequence *,
+ vec<rtx_insn *> *,
+ struct resources *,
+ struct resources *,
+ struct resources *,
+ int, int *, int *,
+ rtx *);
+static void steal_delay_list_from_fallthrough (rtx_insn *, rtx, rtx_sequence *,
+ vec<rtx_insn *> *,
+ struct resources *,
+ struct resources *,
+ struct resources *,
+ int, int *, int *);
static void try_merge_delay_insns (rtx_insn *, rtx_insn *);
-static rtx redundant_insn (rtx, rtx_insn *, rtx);
+static rtx redundant_insn (rtx, rtx_insn *, const vec<rtx_insn *> &);
static int own_thread_p (rtx, rtx, int);
static void update_block (rtx_insn *, rtx);
static int reorg_redirect_jump (rtx_jump_insn *, rtx);
static void fix_reg_dead_note (rtx, rtx);
static void update_reg_unused_notes (rtx, rtx);
static void fill_simple_delay_slots (int);
-static rtx_insn_list *fill_slots_from_thread (rtx_jump_insn *, rtx, rtx, rtx,
- int, int, int, int,
- int *, rtx_insn_list *);
+static void fill_slots_from_thread (rtx_jump_insn *, rtx, rtx, rtx,
+ int, int, int, int,
+ int *, vec<rtx_insn *> *);
static void fill_eager_delay_slots (void);
static void relax_delay_slots (rtx_insn *);
static void make_return_insns (rtx_insn *);
Returns the insn containing the SEQUENCE that replaces INSN. */
static rtx_insn *
-emit_delay_sequence (rtx_insn *insn, rtx_insn_list *list, int length)
+emit_delay_sequence (rtx_insn *insn, const vec<rtx_insn *> &list, int length)
{
/* Allocate the rtvec to hold the insns and the SEQUENCE. */
rtvec seqv = rtvec_alloc (length + 1);
SET_NEXT_INSN (insn) = SET_PREV_INSN (insn) = NULL;
/* Build our SEQUENCE and rebuild the insn chain. */
- int i = 1;
start_sequence ();
XVECEXP (seq, 0, 0) = emit_insn (insn);
- for (rtx_insn_list *li = list; li; li = li->next (), i++)
+
+ unsigned int delay_insns = list.length ();
+ gcc_assert (delay_insns == (unsigned int) length);
+ for (unsigned int i = 0; i < delay_insns; i++)
{
- rtx_insn *tem = li->insn ();
+ rtx_insn *tem = list[i];
rtx note, next;
/* Show that this copy of the insn isn't deleted. */
/* Unlink insn from its original place, and re-emit it into
the sequence. */
SET_NEXT_INSN (tem) = SET_PREV_INSN (tem) = NULL;
- XVECEXP (seq, 0, i) = emit_insn (tem);
+ XVECEXP (seq, 0, i + 1) = emit_insn (tem);
/* SPARC assembler, for instance, emit warning when debug info is output
into the delay slot. */
}
}
end_sequence ();
- gcc_assert (i == length + 1);
/* Splice our SEQUENCE into the insn stream where INSN used to be. */
add_insn_after (seq_insn, after, NULL);
/* Add INSN to DELAY_LIST and return the head of the new list. The list must
be in the order in which the insns are to be executed. */
-static rtx_insn_list *
-add_to_delay_list (rtx_insn *insn, rtx_insn_list *delay_list)
+static void
+add_to_delay_list (rtx_insn *insn, vec<rtx_insn *> *delay_list)
{
- /* If we have an empty list, just make a new list element. If
- INSN has its block number recorded, clear it since we may
+ /* If INSN has its block number recorded, clear it since we may
be moving the insn to a new block. */
-
- if (delay_list == 0)
- {
clear_hashed_info_for_insn (insn);
- return gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX);
- }
-
- /* Otherwise this must be an INSN_LIST. Add INSN to the end of the
- list. */
- XEXP (delay_list, 1) = add_to_delay_list (insn, delay_list->next ());
-
- return delay_list;
+ delay_list->safe_push (insn);
}
\f
/* Delete INSN from the delay slot of the insn that it is in, which may
{
rtx_insn *trial, *seq_insn, *prev;
rtx_sequence *seq;
- rtx_insn_list *delay_list = 0;
int i;
int had_barrier = 0;
/* Create a delay list consisting of all the insns other than the one
we are deleting (unless we were the only one). */
+ auto_vec<rtx_insn *, 5> delay_list;
if (seq->len () > 2)
for (i = 1; i < seq->len (); i++)
if (seq->insn (i) != insn)
- delay_list = add_to_delay_list (seq->insn (i), delay_list);
+ add_to_delay_list (seq->insn (i), &delay_list);
/* Delete the old SEQUENCE, re-emit the insn that used to have the delay
list, and rebuild the delay list if non-empty. */
/* If there are any delay insns, remit them. Otherwise clear the
annul flag. */
- if (delay_list)
+ if (!delay_list.is_empty ())
trial = emit_delay_sequence (trial, delay_list, XVECLEN (seq, 0) - 2);
else if (JUMP_P (trial))
INSN_ANNULLED_BRANCH_P (trial) = 0;
This should be expanded to skip over N insns, where N is the number
of delay slots required. */
-static rtx_insn_list *
-optimize_skip (rtx_jump_insn *insn)
+static void
+optimize_skip (rtx_jump_insn *insn, vec<rtx_insn *> *delay_list)
{
rtx_insn *trial = next_nonnote_insn (insn);
rtx_insn *next_trial = next_active_insn (trial);
- rtx_insn_list *delay_list = 0;
int flags;
flags = get_jump_flags (insn, JUMP_LABEL (insn));
|| (! eligible_for_annul_false (insn, 0, trial, flags)
&& ! eligible_for_annul_true (insn, 0, trial, flags))
|| can_throw_internal (trial))
- return 0;
+ return;
/* There are two cases where we are just executing one insn (we assume
here that a branch requires only one insn; this should be generalized
if (invert_jump (insn, JUMP_LABEL (insn), 1))
INSN_FROM_TARGET_P (trial) = 1;
else if (! eligible_for_annul_true (insn, 0, trial, flags))
- return 0;
+ return;
}
- delay_list = add_to_delay_list (trial, NULL);
+ add_to_delay_list (trial, delay_list);
next_trial = next_active_insn (trial);
update_block (trial, trial);
delete_related_insns (trial);
INSN_ANNULLED_BRANCH_P (insn) = 1;
}
-
- return delay_list;
}
#endif
\f
static int
redirect_with_delay_list_safe_p (rtx_insn *jump, rtx newlabel,
- rtx_insn_list *delay_list)
+ const vec<rtx_insn *> &delay_list)
{
- int flags, i;
- rtx_insn_list *li;
-
/* Make sure all the insns in DELAY_LIST would still be
valid after threading the jump. If they are still
valid, then return nonzero. */
- flags = get_jump_flags (jump, newlabel);
- for (li = delay_list, i = 0; li; li = li->next (), i++)
+ int flags = get_jump_flags (jump, newlabel);
+ unsigned int delay_insns = delay_list.length ();
+ unsigned int i = 0;
+ for (; i < delay_insns; i++)
if (! (
#ifdef ANNUL_IFFALSE_SLOTS
(INSN_ANNULLED_BRANCH_P (jump)
- && INSN_FROM_TARGET_P (li->insn ()))
- ? eligible_for_annul_false (jump, i, li->insn (), flags) :
+ && INSN_FROM_TARGET_P (delay_list[i]))
+ ? eligible_for_annul_false (jump, i, delay_list[i], flags) :
#endif
#ifdef ANNUL_IFTRUE_SLOTS
(INSN_ANNULLED_BRANCH_P (jump)
- && ! INSN_FROM_TARGET_P (XEXP (li, 0)))
- ? eligible_for_annul_true (jump, i, li->insn (), flags) :
+ && ! INSN_FROM_TARGET_P (delay_list[i]))
+ ? eligible_for_annul_true (jump, i, delay_list[i], flags) :
#endif
- eligible_for_delay (jump, i, li->insn (), flags)))
+ eligible_for_delay (jump, i, delay_list[i], flags)))
break;
- return (li == NULL);
+ return i == delay_insns;
}
/* DELAY_LIST is a list of insns that have already been placed into delay
If not, return 0; otherwise return 1. */
static int
-check_annul_list_true_false (int annul_true_p, rtx delay_list)
+check_annul_list_true_false (int annul_true_p,
+ const vec<rtx_insn *> &delay_list)
{
- rtx temp;
-
- if (delay_list)
- {
- for (temp = delay_list; temp; temp = XEXP (temp, 1))
- {
- rtx trial = XEXP (temp, 0);
-
- if ((annul_true_p && INSN_FROM_TARGET_P (trial))
- || (!annul_true_p && !INSN_FROM_TARGET_P (trial)))
- return 0;
- }
- }
+ rtx_insn *trial;
+ unsigned int i;
+ FOR_EACH_VEC_ELT (delay_list, i, trial)
+ if ((annul_true_p && INSN_FROM_TARGET_P (trial))
+ || (!annul_true_p && !INSN_FROM_TARGET_P (trial)))
+ return 0;
return 1;
}
PNEW_THREAD points to a location that is to receive the place at which
execution should continue. */
-static rtx_insn_list *
+static void
steal_delay_list_from_target (rtx_insn *insn, rtx condition, rtx_sequence *seq,
- rtx_insn_list *delay_list, struct resources *sets,
+ vec<rtx_insn *> *delay_list, resources *sets,
struct resources *needed,
struct resources *other_needed,
int slots_to_fill, int *pslots_filled,
{
int slots_remaining = slots_to_fill - *pslots_filled;
int total_slots_filled = *pslots_filled;
- rtx_insn_list *new_delay_list = 0;
+ auto_vec<rtx_insn *, 5> new_delay_list;
int must_annul = *pannul_p;
int used_annul = 0;
int i;
will effect the direction of the jump in the sequence. */
CLEAR_RESOURCE (&cc_set);
- for (rtx_insn_list *temp = delay_list; temp; temp = temp->next ())
- {
- rtx_insn *trial = temp->insn ();
+ rtx_insn *trial;
+ FOR_EACH_VEC_ELT (*delay_list, i, trial)
+ {
mark_set_resources (trial, &cc_set, 0, MARK_SRC_DEST_CALL);
if (insn_references_resource_p (seq->insn (0), &cc_set, false))
- return delay_list;
+ return;
}
if (XVECLEN (seq, 0) - 1 > slots_remaining
|| ! condition_dominates_p (condition, seq->insn (0))
|| ! single_set (seq->insn (0)))
- return delay_list;
+ return;
/* On some targets, branches with delay slots can have a limited
displacement. Give the back end a chance to tell us we can't do
this. */
if (! targetm.can_follow_jump (insn, seq->insn (0)))
- return delay_list;
+ return;
redundant = XALLOCAVEC (bool, XVECLEN (seq, 0));
for (i = 1; i < seq->len (); i++)
in SEQ, we cannot use it. */
|| (INSN_ANNULLED_BRANCH_P (seq->insn (0))
&& ! INSN_FROM_TARGET_P (trial)))
- return delay_list;
+ return;
/* If this insn was already done (usually in a previous delay slot),
pretend we put it in our delay slot. */
|| (! insn_sets_resource_p (trial, other_needed, false)
&& ! may_trap_or_fault_p (PATTERN (trial)))))
? eligible_for_delay (insn, total_slots_filled, trial, flags)
- : (must_annul || (delay_list == NULL && new_delay_list == NULL))
+ : (must_annul || (delay_list->is_empty () && new_delay_list.is_empty ()))
&& (must_annul = 1,
- check_annul_list_true_false (0, delay_list)
+ check_annul_list_true_false (0, *delay_list)
&& check_annul_list_true_false (0, new_delay_list)
&& eligible_for_annul_false (insn, total_slots_filled,
trial, flags)))
used_annul = 1;
rtx_insn *temp = copy_delay_slot_insn (trial);
INSN_FROM_TARGET_P (temp) = 1;
- new_delay_list = add_to_delay_list (temp, new_delay_list);
+ add_to_delay_list (temp, &new_delay_list);
total_slots_filled++;
if (--slots_remaining == 0)
break;
}
else
- return delay_list;
+ return;
}
/* Record the effect of the instructions that were redundant and which
if (used_annul)
*pannul_p = 1;
- if (delay_list == 0)
- return new_delay_list;
-
- for (rtx_insn_list *temp = new_delay_list; temp; temp = temp->next ())
- delay_list = add_to_delay_list (temp->insn (), delay_list);
-
- return delay_list;
+ rtx_insn *temp;
+ FOR_EACH_VEC_ELT (new_delay_list, i, temp)
+ add_to_delay_list (temp, delay_list);
}
\f
/* Similar to steal_delay_list_from_target except that SEQ is on the
of SEQ is an unconditional branch. In that case we steal its delay slot
for INSN since unconditional branches are much easier to fill. */
-static rtx_insn_list *
+static void
steal_delay_list_from_fallthrough (rtx_insn *insn, rtx condition,
rtx_sequence *seq,
- rtx_insn_list *delay_list,
+ vec<rtx_insn *> *delay_list,
struct resources *sets,
struct resources *needed,
struct resources *other_needed,
unconditional branch. */
if (! simplejump_or_return_p (seq->insn (0)))
- return delay_list;
+ return;
for (i = 1; i < seq->len (); i++)
{
break;
/* If this insn was already done, we don't need it. */
- if (redundant_insn (trial, insn, delay_list))
+ if (redundant_insn (trial, insn, *delay_list))
{
update_block (trial, insn);
delete_from_delay_slot (trial);
|| (! insn_sets_resource_p (trial, other_needed, false)
&& ! may_trap_or_fault_p (PATTERN (trial)))))
? eligible_for_delay (insn, *pslots_filled, trial, flags)
- : (must_annul || delay_list == NULL) && (must_annul = 1,
- check_annul_list_true_false (1, delay_list)
+ : (must_annul || delay_list->is_empty ()) && (must_annul = 1,
+ check_annul_list_true_false (1, *delay_list)
&& eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
{
if (must_annul)
used_annul = 1;
delete_from_delay_slot (trial);
- delay_list = add_to_delay_list (trial, delay_list);
+ add_to_delay_list (trial, delay_list);
if (++(*pslots_filled) == slots_to_fill)
break;
if (used_annul)
*pannul_p = 1;
- return delay_list;
}
\f
/* Try merging insns starting at THREAD which match exactly the insns in
gain in rare cases. */
static rtx
-redundant_insn (rtx insn, rtx_insn *target, rtx delay_list)
+redundant_insn (rtx insn, rtx_insn *target, const vec<rtx_insn *> &delay_list)
{
rtx target_main = target;
rtx ipat = PATTERN (insn);
/* This insn isn't redundant if it conflicts with an insn that either is
or will be in a delay slot of TARGET. */
- while (delay_list)
- {
- if (insn_sets_resource_p (XEXP (delay_list, 0), &needed, true))
- return 0;
- delay_list = XEXP (delay_list, 1);
- }
+ unsigned int j;
+ rtx_insn *temp;
+ FOR_EACH_VEC_ELT (delay_list, j, temp)
+ if (insn_sets_resource_p (temp, &needed, true))
+ return 0;
if (NONJUMP_INSN_P (target) && GET_CODE (PATTERN (target)) == SEQUENCE)
for (i = 1; i < XVECLEN (PATTERN (target), 0); i++)
int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
struct resources needed, set;
int slots_to_fill, slots_filled;
- rtx_insn_list *delay_list;
+ auto_vec<rtx_insn *, 5> delay_list;
for (i = 0; i < num_unfilled_slots; i++)
{
CALL_INSNs. */
slots_filled = 0;
- delay_list = 0;
+ delay_list.truncate (0);
if (JUMP_P (insn))
flags = get_jump_flags (insn, JUMP_LABEL (insn));
{
rtx_insn **tmp;
slots_filled++;
- delay_list = add_to_delay_list (trial, delay_list);
+ add_to_delay_list (trial, &delay_list);
/* TRIAL may have had its delay slot filled, then unfilled. When
the delay slot is unfilled, TRIAL is placed back on the unfilled
tail, of the list. */
update_reg_dead_notes (trial, insn);
- delay_list = gen_rtx_INSN_LIST (VOIDmode,
- trial, delay_list);
+ delay_list.safe_insert (0, trial);
update_block (trial, trial);
delete_related_insns (trial);
if (slots_to_fill == ++slots_filled)
/* Try to optimize case of jumping around a single insn. */
#if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
if (slots_filled != slots_to_fill
- && delay_list == 0
+ && delay_list.is_empty ()
&& JUMP_P (insn)
&& (condjump_p (insn) || condjump_in_parallel_p (insn))
&& !ANY_RETURN_P (JUMP_LABEL (insn)))
{
- delay_list = optimize_skip (as_a <rtx_jump_insn *> (insn));
- if (delay_list)
+ optimize_skip (as_a <rtx_jump_insn *> (insn), &delay_list);
+ if (!delay_list.is_empty ())
slots_filled += 1;
}
#endif
&& ! can_throw_internal (trial))
{
next_trial = next_nonnote_insn (trial);
- delay_list = add_to_delay_list (trial, delay_list);
+ add_to_delay_list (trial, &delay_list);
if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, pat))
link_cc0_insns (trial);
if (new_label)
{
- delay_list
- = add_to_delay_list (copy_delay_slot_insn (next_trial),
- delay_list);
+ add_to_delay_list (copy_delay_slot_insn (next_trial),
+ &delay_list);
slots_filled++;
reorg_redirect_jump (as_a <rtx_jump_insn *> (trial),
new_label);
if ((jump_insn = dyn_cast <rtx_jump_insn *> (insn))
&& simplejump_p (jump_insn)
&& slots_filled != slots_to_fill)
- delay_list
- = fill_slots_from_thread (jump_insn, const_true_rtx,
- next_active_insn (JUMP_LABEL (insn)),
- NULL, 1, 1,
- own_thread_p (JUMP_LABEL (insn),
- JUMP_LABEL (insn), 0),
- slots_to_fill, &slots_filled,
- delay_list);
-
- if (delay_list)
+ fill_slots_from_thread (jump_insn, const_true_rtx,
+ next_active_insn (JUMP_LABEL (insn)), NULL, 1,
+ 1, own_thread_p (JUMP_LABEL (insn),
+ JUMP_LABEL (insn), 0),
+ slots_to_fill, &slots_filled, &delay_list);
+
+ if (!delay_list.is_empty ())
unfilled_slots_base[i]
= emit_delay_sequence (insn, delay_list, slots_filled);
case, we can only take insns from the head of the thread for our delay
slot. We then adjust the jump to point after the insns we have taken. */
-static rtx_insn_list *
+static void
fill_slots_from_thread (rtx_jump_insn *insn, rtx condition,
rtx thread_or_return, rtx opposite_thread, int likely,
int thread_if_true, int own_thread, int slots_to_fill,
- int *pslots_filled, rtx_insn_list *delay_list)
+ int *pslots_filled, vec<rtx_insn *> *delay_list)
{
rtx new_thread;
struct resources opposite_needed, set, needed;
/* If our thread is the end of subroutine, we can't get any delay
insns from that. */
if (thread_or_return == NULL_RTX || ANY_RETURN_P (thread_or_return))
- return delay_list;
+ return;
rtx_insn *thread = as_a <rtx_insn *> (thread_or_return);
/* If TRIAL is redundant with some insn before INSN, we don't
actually need to add it to the delay list; we can merely pretend
we did. */
- if ((prior_insn = redundant_insn (trial, insn, delay_list)))
+ if ((prior_insn = redundant_insn (trial, insn, *delay_list)))
{
fix_reg_dead_note (prior_insn, insn);
if (own_thread)
if (thread == old_trial)
thread = trial;
pat = PATTERN (trial);
- if ((must_annul || delay_list == NULL) && (thread_if_true
- ? check_annul_list_true_false (0, delay_list)
+ if ((must_annul || delay_list->is_empty ()) && (thread_if_true
+ ? check_annul_list_true_false (0, *delay_list)
&& eligible_for_annul_false (insn, *pslots_filled, trial, flags)
- : check_annul_list_true_false (1, delay_list)
+ : check_annul_list_true_false (1, *delay_list)
&& eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
{
rtx_insn *temp;
if (thread_if_true)
INSN_FROM_TARGET_P (temp) = 1;
- delay_list = add_to_delay_list (temp, delay_list);
+ add_to_delay_list (temp, delay_list);
if (slots_to_fill == ++(*pslots_filled))
{
&set, true)
&& (prior_insn
= redundant_insn (new_thread, insn,
- delay_list)))
+ *delay_list)))
{
/* We know we do not own the thread, so no need
to call update_block and delete_insn. */
so we can only do this if we have taken everything up to here. */
if (thread_if_true && trial == new_thread)
{
- delay_list
- = steal_delay_list_from_target (insn, condition, sequence,
- delay_list, &set, &needed,
- &opposite_needed, slots_to_fill,
- pslots_filled, &must_annul,
- &new_thread);
+ steal_delay_list_from_target (insn, condition, sequence,
+ delay_list, &set, &needed,
+ &opposite_needed, slots_to_fill,
+ pslots_filled, &must_annul,
+ &new_thread);
/* If we owned the thread and are told that it branched
elsewhere, make sure we own the thread at the new location. */
if (own_thread && trial != new_thread)
own_thread = own_thread_p (new_thread, new_thread, 0);
}
else if (! thread_if_true)
- delay_list
- = steal_delay_list_from_fallthrough (insn, condition,
- sequence,
- delay_list, &set, &needed,
- &opposite_needed, slots_to_fill,
- pslots_filled, &must_annul);
+ steal_delay_list_from_fallthrough (insn, condition, sequence,
+ delay_list, &set, &needed,
+ &opposite_needed, slots_to_fill,
+ pslots_filled, &must_annul);
}
/* If we haven't found anything for this delay slot and it is very
depend on the destination register. If so, try to place the opposite
arithmetic insn after the jump insn and put the arithmetic insn in the
delay slot. If we can't do this, return. */
- if (delay_list == 0 && likely
+ if (delay_list->is_empty () && likely
&& new_thread && !ANY_RETURN_P (new_thread)
&& NONJUMP_INSN_P (new_thread)
&& !RTX_FRAME_RELATED_P (new_thread)
|| GET_CODE (pat) != SET
|| ! eligible_for_delay (insn, 0, trial, flags)
|| can_throw_internal (trial))
- return 0;
+ return;
dest = SET_DEST (pat), src = SET_SRC (pat);
if ((GET_CODE (src) == PLUS || GET_CODE (src) == MINUS)
!constrain_operands (1, get_preferred_alternatives (ninsn))))
{
delete_related_insns (ninsn);
- return 0;
+ return;
}
if (own_thread)
if (thread_if_true)
INSN_FROM_TARGET_P (ninsn) = 1;
- delay_list = add_to_delay_list (ninsn, NULL);
+ add_to_delay_list (ninsn, delay_list);
(*pslots_filled)++;
}
}
- if (delay_list && must_annul)
+ if (!delay_list->is_empty () && must_annul)
INSN_ANNULLED_BRANCH_P (insn) = 1;
/* If we are to branch into the middle of this thread, find an appropriate
if (new_thread && simplejump_or_return_p (new_thread)
&& redirect_with_delay_list_safe_p (insn,
JUMP_LABEL (new_thread),
- delay_list))
+ *delay_list))
new_thread = follow_jumps (JUMP_LABEL (new_thread), insn,
&crossing);
CROSSING_JUMP_P (insn) = 1;
}
}
-
- return delay_list;
}
\f
/* Make another attempt to find insns to place in delay slots.
rtx condition;
rtx target_label, insn_at_target;
rtx_insn *fallthrough_insn;
- rtx_insn_list *delay_list = 0;
+ auto_vec<rtx_insn *, 5> delay_list;
rtx_jump_insn *jump_insn;
int own_target;
int own_fallthrough;
if (prediction > 0)
{
- delay_list
- = fill_slots_from_thread (jump_insn, condition, insn_at_target,
- fallthrough_insn, prediction == 2, 1,
- own_target,
- slots_to_fill, &slots_filled, delay_list);
+ fill_slots_from_thread (jump_insn, condition, insn_at_target,
+ fallthrough_insn, prediction == 2, 1,
+ own_target,
+ slots_to_fill, &slots_filled, &delay_list);
- if (delay_list == 0 && own_fallthrough)
+ if (delay_list.is_empty () && own_fallthrough)
{
/* Even though we didn't find anything for delay slots,
we might have found a redundant insn which we deleted
target_label = JUMP_LABEL (jump_insn);
insn_at_target = first_active_target_insn (target_label);
- delay_list
- = fill_slots_from_thread (jump_insn, condition,
- fallthrough_insn,
- insn_at_target, 0, 0,
- own_fallthrough,
- slots_to_fill, &slots_filled,
- delay_list);
+ fill_slots_from_thread (jump_insn, condition, fallthrough_insn,
+ insn_at_target, 0, 0, own_fallthrough,
+ slots_to_fill, &slots_filled,
+ &delay_list);
}
}
else
{
if (own_fallthrough)
- delay_list
- = fill_slots_from_thread (jump_insn, condition, fallthrough_insn,
- insn_at_target, 0, 0,
- own_fallthrough,
- slots_to_fill, &slots_filled,
- delay_list);
-
- if (delay_list == 0)
- delay_list
- = fill_slots_from_thread (jump_insn, condition, insn_at_target,
- next_active_insn (insn), 0, 1,
- own_target,
- slots_to_fill, &slots_filled,
- delay_list);
+ fill_slots_from_thread (jump_insn, condition, fallthrough_insn,
+ insn_at_target, 0, 0, own_fallthrough,
+ slots_to_fill, &slots_filled, &delay_list);
+
+ if (delay_list.is_empty ())
+ fill_slots_from_thread (jump_insn, condition, insn_at_target,
+ next_active_insn (insn), 0, 1, own_target,
+ slots_to_fill, &slots_filled, &delay_list);
}
- if (delay_list)
+ if (!delay_list.is_empty ())
unfilled_slots_base[i]
= emit_delay_sequence (jump_insn, delay_list, slots_filled);
/* See if the first insn in the delay slot is redundant with some
previous insn. Remove it from the delay slot if so; then set up
to reprocess this insn. */
- if (redundant_insn (pat->insn (1), delay_insn, 0))
+ if (redundant_insn (pat->insn (1), delay_insn, vNULL))
{
update_block (pat->insn (1), insn);
delete_from_delay_slot (pat->insn (1));
liveness info. */
trial = next_real_insn (target_label);
if (trial && GET_CODE (PATTERN (trial)) != SEQUENCE
- && redundant_insn (trial, insn, 0)
+ && redundant_insn (trial, insn, vNULL)
&& ! can_throw_internal (trial))
{
/* Figure out where to emit the special USE insn so we don't
&& trial_seq->len () == 2
&& JUMP_P (trial_seq->insn (0))
&& simplejump_or_return_p (trial_seq->insn (0))
- && redundant_insn (trial_seq->insn (1), insn, 0))
+ && redundant_insn (trial_seq->insn (1), insn, vNULL))
{
target_label = JUMP_LABEL (trial_seq->insn (0));
if (ANY_RETURN_P (target_label))