rtx loop_notes;
{
register RTX_CODE code = GET_CODE (x);
+ int schedule_barrier_found = 0;
rtx link;
int i;
if (GET_CODE (insn) == JUMP_INSN)
{
- rtx next, u, pending, pending_mem;
+ rtx next, u;
next = next_nonnote_insn (insn);
if (next && GET_CODE (next) == BARRIER)
- {
- for (i = 0; i < deps->max_reg; i++)
- {
- struct deps_reg *reg_last = &deps->reg_last[i];
-
- for (u = reg_last->uses; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- for (u = reg_last->sets; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- for (u = reg_last->clobbers; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- }
- }
+ schedule_barrier_found = 1;
else
{
regset_head tmp;
CLEAR_REG_SET (&tmp);
}
- pending = deps->pending_write_insns;
- pending_mem = deps->pending_write_mems;
- while (pending)
- {
- add_dependence (insn, XEXP (pending, 0), 0);
-
- pending = XEXP (pending, 1);
- pending_mem = XEXP (pending_mem, 1);
- }
-
- for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
}
/* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
block, then we must be sure that no instructions are scheduled across it.
Otherwise, the reg_n_refs info (which depends on loop_depth) would
become incorrect. */
-
if (loop_notes)
{
- int schedule_barrier_found = 0;
rtx link;
/* Update loop_notes with any notes from this insn. Also determine
if any of the notes on the list correspond to instruction scheduling
- barriers (loop, eh & setjmp notes, but not range notes. */
+ barriers (loop, eh & setjmp notes, but not range notes). */
link = loop_notes;
while (XEXP (link, 1))
{
}
XEXP (link, 1) = REG_NOTES (insn);
REG_NOTES (insn) = loop_notes;
+ }
+
+ /* If this instruction can throw an exception, then moving it changes
+ where block boundaries fall. This is mighty confusing elsewhere.
+ Therefore, prevent such an instruction from being moved. */
+ if (flag_non_call_exceptions && can_throw_internal (insn))
+ schedule_barrier_found = 1;
- /* Add dependencies if a scheduling barrier was found. */
- if (schedule_barrier_found)
+ /* Add dependencies if a scheduling barrier was found. */
+ if (schedule_barrier_found)
+ {
+ rtx u, pending, pending_mem;
+
+ for (i = 0; i < deps->max_reg; i++)
{
- for (i = 0; i < deps->max_reg; i++)
- {
- struct deps_reg *reg_last = &deps->reg_last[i];
- rtx u;
+ struct deps_reg *reg_last = &deps->reg_last[i];
- for (u = reg_last->uses; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- for (u = reg_last->sets; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), 0);
- for (u = reg_last->clobbers; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), 0);
+ for (u = reg_last->uses; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), 0);
+ for (u = reg_last->clobbers; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), 0);
- if (GET_CODE (PATTERN (insn)) != COND_EXEC)
- free_INSN_LIST_list (®_last->uses);
- }
- reg_pending_sets_all = 1;
+ if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ free_INSN_LIST_list (®_last->uses);
+ }
+ flush_pending_lists (deps, insn, 0);
- flush_pending_lists (deps, insn, 0);
+ pending = deps->pending_write_insns;
+ pending_mem = deps->pending_write_mems;
+ while (pending)
+ {
+ add_dependence (insn, XEXP (pending, 0), 0);
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
}
+ for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+
+ reg_pending_sets_all = 1;
}
/* Accumulate clobbers until the next set so that it will be output