/* Instruction scheduling pass. This file computes dependencies between
instructions.
- Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
- Free Software Foundation, Inc.
+ Copyright (C) 1992-2014 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "coretypes.h"
#include "tm.h"
#include "diagnostic-core.h"
-#include "toplev.h"
#include "rtl.h"
+#include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
#include "tm_p.h"
#include "hard-reg-set.h"
#include "regs.h"
#include "insn-config.h"
#include "insn-attr.h"
#include "except.h"
-#include "toplev.h"
#include "recog.h"
+#include "emit-rtl.h"
#include "sched-int.h"
#include "params.h"
#include "cselib.h"
struct sched_deps_info_def *sched_deps_info;
/* The data is specific to the Haifa scheduler. */
-VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
+vec<haifa_deps_insn_data_def>
+ h_d_i_d = vNULL;
/* Return the major type present in the DS. */
enum reg_note
if (ds & DEP_OUTPUT)
return REG_DEP_OUTPUT;
+ if (ds & DEP_CONTROL)
+ return REG_DEP_CONTROL;
+
gcc_assert (ds & DEP_ANTI);
return REG_DEP_ANTI;
case REG_DEP_OUTPUT:
return DEP_OUTPUT;
+ case REG_DEP_CONTROL:
+ return DEP_CONTROL;
+
default:
gcc_assert (dk == REG_DEP_ANTI);
return DEP_ANTI;
DEP_CON (dep) = con;
DEP_TYPE (dep) = type;
DEP_STATUS (dep) = ds;
+ DEP_COST (dep) = UNKNOWN_DEP_COST;
+ DEP_NONREG (dep) = 0;
+ DEP_MULTIPLE (dep) = 0;
+ DEP_REPLACE (dep) = NULL;
}
/* Init DEP with the arguments.
if ((current_sched_info->flags & USE_DEPS_LIST))
ds = dk_to_ds (kind);
else
- ds = -1;
+ ds = 0;
init_dep_1 (dep, pro, con, kind, ds);
}
t = 'o';
break;
+ case REG_DEP_CONTROL:
+ t = 'c';
+ break;
+
case REG_DEP_ANTI:
t = 'a';
break;
gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
&& dep_link_is_detached_p (DEP_NODE_FORW (n)));
+ XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
+
--dn_pool_diff;
pool_free (dn_pool, n);
while (1);
}
+/* Decide whether a dependency should be treated as a hard or a speculative
+ dependency. */
+static bool
+dep_spec_p (dep_t dep)
+{
+ if (current_sched_info->flags & DO_SPECULATION)
+ {
+ if (DEP_STATUS (dep) & SPECULATIVE)
+ return true;
+ }
+ if (current_sched_info->flags & DO_PREDICATION)
+ {
+ if (DEP_TYPE (dep) == REG_DEP_CONTROL)
+ return true;
+ }
+ if (DEP_REPLACE (dep) != NULL)
+ return true;
+ return false;
+}
+
static regset reg_pending_sets;
static regset reg_pending_clobbers;
static regset reg_pending_uses;
+static regset reg_pending_control_uses;
static enum reg_pending_barrier_mode reg_pending_barrier;
/* Hard registers implicitly clobbered or used (or may be implicitly
static bitmap_head *true_dependency_cache = NULL;
static bitmap_head *output_dependency_cache = NULL;
static bitmap_head *anti_dependency_cache = NULL;
+static bitmap_head *control_dependency_cache = NULL;
static bitmap_head *spec_dependency_cache = NULL;
static int cache_size;
+/* True if we should mark added dependencies as a non-register deps. */
+static bool mark_as_hard;
+
static int deps_may_trap_p (const_rtx);
-static void add_dependence_list (rtx, rtx, int, enum reg_note);
+static void add_dependence_1 (rtx, rtx, enum reg_note);
+static void add_dependence_list (rtx, rtx, int, enum reg_note, bool);
static void add_dependence_list_and_free (struct deps_desc *, rtx,
- rtx *, int, enum reg_note);
+ rtx *, int, enum reg_note, bool);
static void delete_all_dependences (rtx);
-static void fixup_sched_groups (rtx);
+static void chain_to_prev_insn (rtx);
static void flush_pending_lists (struct deps_desc *, rtx, int, int);
static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
it is set to TRUE when the returned comparison should be reversed
to get the actual condition. */
static rtx
-sched_get_condition_with_rev (const_rtx insn, bool *rev)
+sched_get_condition_with_rev_uncached (const_rtx insn, bool *rev)
{
rtx pat = PATTERN (insn);
rtx src;
- if (pat == 0)
- return 0;
-
if (rev)
*rev = false;
return 0;
}
+/* Return the condition under which INSN does not execute (i.e. the
+ not-taken condition for a conditional branch), or NULL if we cannot
+ find such a condition. The caller should make a copy of the condition
+ before using it. */
+rtx
+sched_get_reverse_condition_uncached (const_rtx insn)
+{
+ bool rev;
+ rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
+ if (cond == NULL_RTX)
+ return cond;
+ if (!rev)
+ {
+ enum rtx_code revcode = reversed_comparison_code (cond, insn);
+ cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
+ XEXP (cond, 0),
+ XEXP (cond, 1));
+ }
+ return cond;
+}
+
+/* Caching variant of sched_get_condition_with_rev_uncached.
+ We only do actual work the first time we come here for an insn; the
+ results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
+static rtx
+sched_get_condition_with_rev (const_rtx insn, bool *rev)
+{
+ bool tmp;
+
+ if (INSN_LUID (insn) == 0)
+ return sched_get_condition_with_rev_uncached (insn, rev);
+
+ if (INSN_CACHED_COND (insn) == const_true_rtx)
+ return NULL_RTX;
+
+ if (INSN_CACHED_COND (insn) != NULL_RTX)
+ {
+ if (rev)
+ *rev = INSN_REVERSE_COND (insn);
+ return INSN_CACHED_COND (insn);
+ }
+
+ INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
+ INSN_REVERSE_COND (insn) = tmp;
+
+ if (INSN_CACHED_COND (insn) == NULL_RTX)
+ {
+ INSN_CACHED_COND (insn) = const_true_rtx;
+ return NULL_RTX;
+ }
+
+ if (rev)
+ *rev = INSN_REVERSE_COND (insn);
+ return INSN_CACHED_COND (insn);
+}
+
/* True when we can find a condition under which INSN is executed. */
static bool
sched_has_condition_p (const_rtx insn)
(rev1==rev2
? reversed_comparison_code (cond2, NULL)
: GET_CODE (cond2))
- && XEXP (cond1, 0) == XEXP (cond2, 0)
+ && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
&& XEXP (cond1, 1) == XEXP (cond2, 1))
return 1;
return 0;
/* The following instructions, which depend on a speculatively scheduled
instruction, cannot be speculatively scheduled along. */
{
- if (may_trap_p (PATTERN (insn)))
- /* If instruction might trap, it cannot be speculatively scheduled.
+ if (may_trap_or_fault_p (PATTERN (insn)))
+ /* If instruction might fault, it cannot be speculatively scheduled.
For control speculation it's obvious why and for data speculation
it's because the insn might get wrong input if speculation
wasn't successful. */
INSN_FORW_DEPS (insn) = create_deps_list ();
INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
- if (DEBUG_INSN_P (insn))
- DEBUG_INSN_SCHED_P (insn) = TRUE;
-
/* ??? It would be nice to allocate dependency caches here. */
}
{
/* ??? It would be nice to deallocate dependency caches here. */
- if (DEBUG_INSN_P (insn))
- {
- gcc_assert (DEBUG_INSN_SCHED_P (insn));
- DEBUG_INSN_SCHED_P (insn) = FALSE;
- }
-
free_deps_list (INSN_HARD_BACK_DEPS (insn));
INSN_HARD_BACK_DEPS (insn) = NULL;
int elem_luid = INSN_LUID (pro);
int insn_luid = INSN_LUID (con);
- gcc_assert (output_dependency_cache != NULL
- && anti_dependency_cache != NULL);
-
if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
&& !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
- && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
+ && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
+ && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
return NULL;
}
gcc_assert (true_dependency_cache != NULL
&& output_dependency_cache != NULL
- && anti_dependency_cache != NULL);
+ && anti_dependency_cache != NULL
+ && control_dependency_cache != NULL);
if (!(current_sched_info->flags & USE_DEPS_LIST))
{
present_dep_type = REG_DEP_OUTPUT;
else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
present_dep_type = REG_DEP_ANTI;
+ else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
+ present_dep_type = REG_DEP_CONTROL;
else
/* There is no existing dep so it should be created. */
return DEP_CREATED;
present_dep_types |= DEP_OUTPUT;
if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
present_dep_types |= DEP_ANTI;
+ if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
+ present_dep_types |= DEP_CONTROL;
if (present_dep_types == 0)
/* There is no existing dep so it should be created. */
bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
break;
+ case REG_DEP_CONTROL:
+ bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
+ break;
+
default:
gcc_unreachable ();
}
bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
if (ds & DEP_ANTI)
bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
+ if (ds & DEP_CONTROL)
+ bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
if (ds & SPECULATIVE)
{
bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
break;
+ case REG_DEP_CONTROL:
+ bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
+ break;
+
default:
gcc_unreachable ();
}
{
enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
enum reg_note old_type = DEP_TYPE (dep);
+ bool was_spec = dep_spec_p (dep);
+
+ DEP_NONREG (dep) |= DEP_NONREG (new_dep);
+ DEP_MULTIPLE (dep) = 1;
/* If this is a more restrictive type of dependence than the
existing one, then change the existing dependence to this
ds_t new_status = ds | dep_status;
if (new_status & SPECULATIVE)
- /* Either existing dep or a dep we're adding or both are
- speculative. */
{
+ /* Either existing dep or a dep we're adding or both are
+ speculative. */
if (!(ds & SPECULATIVE)
|| !(dep_status & SPECULATIVE))
/* The new dep can't be speculative. */
- {
- new_status &= ~SPECULATIVE;
-
- if (dep_status & SPECULATIVE)
- /* The old dep was speculative, but now it
- isn't. */
- change_spec_dep_to_hard (sd_it);
- }
+ new_status &= ~SPECULATIVE;
else
{
/* Both are speculative. Merge probabilities. */
}
}
+ if (was_spec && !dep_spec_p (dep))
+ /* The old dep was speculative, but now it isn't. */
+ change_spec_dep_to_hard (sd_it);
+
if (true_dependency_cache != NULL
&& res == DEP_CHANGED)
update_dependency_caches (dep, old_type);
if (!resolved_p)
{
- if ((current_sched_info->flags & DO_SPECULATION)
- && (DEP_STATUS (dep) & SPECULATIVE))
+ if (dep_spec_p (dep))
*back_list_ptr = INSN_SPEC_BACK_DEPS (con);
else
*back_list_ptr = INSN_HARD_BACK_DEPS (con);
gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
- if ((current_sched_info->flags & DO_SPECULATION)
- && !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
+ if ((current_sched_info->flags & DO_SPECULATION) == 0
+ || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
DEP_STATUS (dep) &= ~SPECULATIVE;
copy_dep (DEP_NODE_DEP (n), dep);
rtx pro = DEP_PRO (dep);
rtx con = DEP_CON (dep);
- if ((current_sched_info->flags & DO_SPECULATION)
- && (DEP_STATUS (dep) & SPECULATIVE))
+ if (dep_spec_p (dep))
move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
INSN_RESOLVED_BACK_DEPS (con));
else
INSN_RESOLVED_FORW_DEPS (pro));
}
+/* Perform the inverse operation of sd_resolve_dep. Restore the dependence
+ pointed to by SD_IT to unresolved state. */
+void
+sd_unresolve_dep (sd_iterator_def sd_it)
+{
+ dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
+ dep_t dep = DEP_NODE_DEP (node);
+ rtx pro = DEP_PRO (dep);
+ rtx con = DEP_CON (dep);
+
+ if (dep_spec_p (dep))
+ move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
+ INSN_SPEC_BACK_DEPS (con));
+ else
+ move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
+ INSN_HARD_BACK_DEPS (con));
+
+ move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
+ INSN_FORW_DEPS (pro));
+}
+
/* Make TO depend on all the FROM's producers.
If RESOLVED_P is true add dependencies to the resolved lists. */
void
bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
+ bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
if (current_sched_info->flags & DO_SPECULATION)
fprintf (stderr, "\n");
}
-/* A convenience wrapper to operate on an entire list. */
+/* A wrapper around add_dependence_1, to add a dependence of CON on
+ PRO, with type DEP_TYPE. This function implements special handling
+ for REG_DEP_CONTROL dependencies. For these, we optionally promote
+ the type to REG_DEP_ANTI if we can determine that predication is
+ impossible; otherwise we add additional true dependencies on the
+ INSN_COND_DEPS list of the jump (which PRO must be). */
+void
+add_dependence (rtx con, rtx pro, enum reg_note dep_type)
+{
+ if (dep_type == REG_DEP_CONTROL
+ && !(current_sched_info->flags & DO_PREDICATION))
+ dep_type = REG_DEP_ANTI;
+
+ /* A REG_DEP_CONTROL dependence may be eliminated through predication,
+ so we must also make the insn dependent on the setter of the
+ condition. */
+ if (dep_type == REG_DEP_CONTROL)
+ {
+ rtx real_pro = pro;
+ rtx other = real_insn_for_shadow (real_pro);
+ rtx cond;
+
+ if (other != NULL_RTX)
+ real_pro = other;
+ cond = sched_get_reverse_condition_uncached (real_pro);
+ /* Verify that the insn does not use a different value in
+ the condition register than the one that was present at
+ the jump. */
+ if (cond == NULL_RTX)
+ dep_type = REG_DEP_ANTI;
+ else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
+ {
+ HARD_REG_SET uses;
+ CLEAR_HARD_REG_SET (uses);
+ note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
+ if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
+ dep_type = REG_DEP_ANTI;
+ }
+ if (dep_type == REG_DEP_CONTROL)
+ {
+ if (sched_verbose >= 5)
+ fprintf (sched_dump, "making DEP_CONTROL for %d\n",
+ INSN_UID (real_pro));
+ add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
+ REG_DEP_TRUE, false);
+ }
+ }
+
+ add_dependence_1 (con, pro, dep_type);
+}
+
+/* A convenience wrapper to operate on an entire list. HARD should be
+ true if DEP_NONREG should be set on newly created dependencies. */
static void
-add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
+add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type,
+ bool hard)
{
+ mark_as_hard = hard;
for (; list; list = XEXP (list, 1))
{
if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
add_dependence (insn, XEXP (list, 0), dep_type);
}
+ mark_as_hard = false;
}
/* Similar, but free *LISTP at the same time, when the context
- is not readonly. */
+ is not readonly. HARD should be true if DEP_NONREG should be set on
+ newly created dependencies. */
static void
add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
- int uncond, enum reg_note dep_type)
+ int uncond, enum reg_note dep_type, bool hard)
{
- rtx list, next;
+ add_dependence_list (insn, *listp, uncond, dep_type, hard);
/* We don't want to short-circuit dependencies involving debug
insns, because they may cause actual dependencies to be
disregarded. */
if (deps->readonly || DEBUG_INSN_P (insn))
- {
- add_dependence_list (insn, *listp, uncond, dep_type);
- return;
- }
+ return;
- for (list = *listp, *listp = NULL; list ; list = next)
- {
- next = XEXP (list, 1);
- if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
- add_dependence (insn, XEXP (list, 0), dep_type);
- free_INSN_LIST_node (list);
- }
+ free_INSN_LIST_list (listp);
}
-/* Remove all occurences of INSN from LIST. Return the number of
- occurences removed. */
+/* Remove all occurrences of INSN from LIST. Return the number of
+ occurrences removed. */
static int
remove_from_dependence_list (rtx insn, rtx* listp)
the previous nonnote insn. */
static void
-fixup_sched_groups (rtx insn)
+chain_to_prev_insn (rtx insn)
{
sd_iterator_def sd_it;
dep_t dep;
if (sched_deps_info->use_cselib)
{
mem = shallow_copy_rtx (mem);
- XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
+ XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
+ GET_MODE (mem), insn);
}
link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
*mem_list = link;
if (for_write)
{
add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
- 1, REG_DEP_ANTI);
+ 1, REG_DEP_ANTI, true);
if (!deps->readonly)
{
free_EXPR_LIST_list (&deps->pending_read_mems);
}
add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
- for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
+ for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
+ true);
add_dependence_list_and_free (deps, insn,
&deps->last_pending_memory_flush, 1,
- for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
+ for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
+ true);
+
+ add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
+ REG_DEP_ANTI, true);
+
+ if (DEBUG_INSN_P (insn))
+ {
+ if (for_write)
+ free_INSN_LIST_list (&deps->pending_read_insns);
+ free_INSN_LIST_list (&deps->pending_write_insns);
+ free_INSN_LIST_list (&deps->last_pending_memory_flush);
+ free_INSN_LIST_list (&deps->pending_jump_insns);
+ }
+
if (!deps->readonly)
{
free_EXPR_LIST_list (&deps->pending_write_mems);
deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
deps->pending_flush_length = 1;
}
+ mark_as_hard = false;
}
\f
/* Instruction which dependencies we are analyzing. */
dep_def _dep, *dep = &_dep;
init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
- current_sched_info->flags & USE_DEPS_LIST ? ds : -1);
+ current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
+ DEP_NONREG (dep) = 1;
maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
}
dep_t dep = &_dep;
init_dep (dep, elem, cur_insn, ds_to_dt (ds));
+ if (mark_as_hard)
+ DEP_NONREG (dep) = 1;
maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
}
return REG_DEP_TRUE;
else if (ds & DEP_OUTPUT)
return REG_DEP_OUTPUT;
+ else if (ds & DEP_ANTI)
+ return REG_DEP_ANTI;
else
{
- gcc_assert (ds & DEP_ANTI);
- return REG_DEP_ANTI;
+ gcc_assert (ds & DEP_CONTROL);
+ return REG_DEP_CONTROL;
}
}
return use;
}
-/* Allocate and return reg_set_data structure for REGNO and INSN. */
-static struct reg_set_data *
+/* Allocate reg_set_data structure for REGNO and INSN. */
+static void
create_insn_reg_set (int regno, rtx insn)
{
struct reg_set_data *set;
set->insn = insn;
set->next_insn_set = INSN_REG_SET_LIST (insn);
INSN_REG_SET_LIST (insn) = set;
- return set;
}
/* Set up insn register uses for INSN and dependency context DEPS. */
enum reg_class cl;
gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
- cl = sched_regno_cover_class[regno];
+ cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
{
- incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
+ incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
if (clobber_p)
{
new_incr = reg_pressure_info[cl].clobber_increase + incr;
gcc_assert (regno < FIRST_PSEUDO_REGISTER);
if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
- cl = sched_regno_cover_class[regno];
+ cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
{
if (clobber_p)
enum reg_class cl;
gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
- cl = sched_regno_cover_class[regno];
+ cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
{
- incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
+ incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
reg_pressure_info[cl].change -= incr;
}
}
gcc_assert (regno < FIRST_PSEUDO_REGISTER);
if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
- cl = sched_regno_cover_class[regno];
+ cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
reg_pressure_info[cl].change -= 1;
}
}
/* Set up reg pressure info related to INSN. */
-static void
-setup_insn_reg_pressure_info (rtx insn)
+void
+init_insn_reg_pressure_info (rtx insn)
{
int i, len;
enum reg_class cl;
static struct reg_pressure_data *pressure_info;
rtx link;
- gcc_assert (sched_pressure_p);
+ gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
if (! INSN_P (insn))
return;
- for (i = 0; i < ira_reg_class_cover_size; i++)
+ for (i = 0; i < ira_pressure_classes_num; i++)
{
- cl = ira_reg_class_cover[i];
+ cl = ira_pressure_classes[i];
reg_pressure_info[cl].clobber_increase = 0;
reg_pressure_info[cl].set_increase = 0;
reg_pressure_info[cl].unused_set_increase = 0;
if (REG_NOTE_KIND (link) == REG_DEAD)
mark_reg_death (XEXP (link, 0));
- len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size;
+ len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
pressure_info
= INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
- INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_reg_class_cover_size
- * sizeof (int), 1);
- for (i = 0; i < ira_reg_class_cover_size; i++)
+ if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
+ INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
+ * sizeof (int), 1);
+ for (i = 0; i < ira_pressure_classes_num; i++)
{
- cl = ira_reg_class_cover[i];
+ cl = ira_pressure_classes[i];
pressure_info[i].clobber_increase
= reg_pressure_info[cl].clobber_increase;
pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
= alloc_INSN_LIST (insn, deps->sched_before_next_call);
else
add_dependence_list (insn, deps->last_function_call, 1,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
}
}
}
/* Treat all writes to a stack register as modifying the TOS. */
if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
{
- int nregs;
-
/* Avoid analyzing the same register twice. */
if (regno != FIRST_STACK_REG)
sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
- nregs = hard_regno_nregs[FIRST_STACK_REG][mode];
- while (--nregs >= 0)
- SET_HARD_REG_BIT (implicit_reg_pending_uses,
- FIRST_STACK_REG + nregs);
+ add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
+ FIRST_STACK_REG);
}
#endif
}
if (sched_deps_info->use_cselib)
{
- enum machine_mode address_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
+ enum machine_mode address_mode = get_address_mode (dest);
t = shallow_copy_rtx (dest);
- cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
- XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
+ cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
+ GET_MODE (t), insn);
+ XEXP (t, 0)
+ = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
+ insn);
}
t = canon_rtx (t);
}
add_dependence_list (insn, deps->last_pending_memory_flush, 1,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
+ add_dependence_list (insn, deps->pending_jump_insns, 1,
+ REG_DEP_CONTROL, true);
if (!deps->readonly)
add_insn_mem_dependence (deps, false, insn, dest);
switch (code)
{
- case CONST_INT:
- case CONST_DOUBLE:
- case CONST_FIXED:
- case CONST_VECTOR:
+ CASE_CONST_ANY:
case SYMBOL_REF:
case CONST:
case LABEL_REF:
if (sched_deps_info->use_cselib)
{
- enum machine_mode address_mode
- = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
+ enum machine_mode address_mode = get_address_mode (t);
t = shallow_copy_rtx (t);
- cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
- XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
+ cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
+ GET_MODE (t), insn);
+ XEXP (t, 0)
+ = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
+ insn);
}
if (!DEBUG_INSN_P (insn))
pending_mem = deps->pending_write_mems;
while (pending)
{
- if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
- t, rtx_varies_p)
+ if (true_dependence (XEXP (pending_mem, 0), VOIDmode, t)
&& ! sched_insns_conditions_mutex_p (insn,
XEXP (pending, 0)))
note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
}
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- {
- if (! JUMP_P (XEXP (u, 0)))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- else if (deps_may_trap_p (x))
- {
- if ((sched_deps_info->generate_spec_deps)
- && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
- {
- ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
- MAX_DEP_WEAK);
-
- note_dep (XEXP (u, 0), ds);
- }
- else
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- }
- }
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+
+ for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
+ if (deps_may_trap_p (x))
+ {
+ if ((sched_deps_info->generate_spec_deps)
+ && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
+ {
+ ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
+ MAX_DEP_WEAK);
+
+ note_dep (XEXP (u, 0), ds);
+ }
+ else
+ add_dependence (insn, XEXP (u, 0), REG_DEP_CONTROL);
+ }
}
/* Always add these dependencies to pending_reads, since
this insn may be followed by a write. */
- if (!deps->readonly)
- add_insn_mem_dependence (deps, true, insn, x);
+ if (!deps->readonly)
+ {
+ if ((deps->pending_read_list_length
+ + deps->pending_write_list_length)
+ > MAX_PENDING_LIST_LENGTH
+ && !DEBUG_INSN_P (insn))
+ flush_pending_lists (deps, insn, true, true);
+ add_insn_mem_dependence (deps, true, insn, x);
+ }
sched_analyze_2 (deps, XEXP (x, 0), insn);
case PREFETCH:
if (PREFETCH_SCHEDULE_BARRIER_P (x))
reg_pending_barrier = TRUE_BARRIER;
+ /* Prefetch insn contains addresses only. So if the prefetch
+ address has no registers, there will be no dependencies on
+ the prefetch insn. This is wrong with result code
+ correctness point of view as such prefetch can be moved below
+ a jump insn which usually generates MOVE_BARRIER preventing
+ to move insns containing registers or memories through the
+ barrier. It is also wrong with generated code performance
+ point of view as prefetch withouth dependecies will have a
+ tendency to be issued later instead of earlier. It is hard
+ to generate accurate dependencies for prefetch insns as
+ prefetch has only the start address but it is better to have
+ something than nothing. */
+ if (!deps->readonly)
+ {
+ rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
+ if (sched_deps_info->use_cselib)
+ cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
+ add_insn_mem_dependence (deps, true, insn, x);
+ }
break;
case UNSPEC_VOLATILE:
Consider for instance a volatile asm that changes the fpu rounding
mode. An insn should not be moved across this even if it only uses
pseudo-regs because it might give an incorrectly rounded result. */
- if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
+ if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
+ && !DEBUG_INSN_P (insn))
reg_pending_barrier = TRUE_BARRIER;
/* For all ASM_OPERANDS, we must traverse the vector of input operands.
sched_deps_info->finish_rhs ();
}
+/* Try to group comparison and the following conditional jump INSN if
+ they're already adjacent. This is to prevent scheduler from scheduling
+ them apart. */
+
+static void
+try_group_insn (rtx insn)
+{
+ unsigned int condreg1, condreg2;
+ rtx cc_reg_1;
+ rtx prev;
+
+ if (!any_condjump_p (insn))
+ return;
+
+ targetm.fixed_condition_code_regs (&condreg1, &condreg2);
+ cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
+ prev = prev_nonnote_nondebug_insn (insn);
+ if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
+ || !prev
+ || !modified_in_p (cc_reg_1, prev))
+ return;
+
+ /* Different microarchitectures support macro fusions for different
+ combinations of insn pairs. */
+ if (!targetm.sched.macro_fusion_pair_p
+ || !targetm.sched.macro_fusion_pair_p (prev, insn))
+ return;
+
+ SCHED_GROUP_P (insn) = 1;
+}
+
/* Analyze an INSN with pattern X to find all dependencies. */
static void
sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
HARD_REG_SET temp;
extract_insn (insn);
- preprocess_constraints ();
+ preprocess_constraints (insn);
ira_implicitly_set_insn_hard_regs (&temp);
AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
&& code == SET);
+ /* Group compare and branch insns for macro-fusion. */
+ if (targetm.sched.macro_fusion_p
+ && targetm.sched.macro_fusion_p ())
+ try_group_insn (insn);
+
if (may_trap_p (x))
- /* Avoid moving trapping instructions accross function calls that might
+ /* Avoid moving trapping instructions across function calls that might
not always return. */
add_dependence_list (insn, deps->last_function_call_may_noreturn,
- 1, REG_DEP_ANTI);
+ 1, REG_DEP_ANTI, true);
+
+ /* We must avoid creating a situation in which two successors of the
+ current block have different unwind info after scheduling. If at any
+ point the two paths re-join this leads to incorrect unwind info. */
+ /* ??? There are certain situations involving a forced frame pointer in
+ which, with extra effort, we could fix up the unwind info at a later
+ CFG join. However, it seems better to notice these cases earlier
+ during prologue generation and avoid marking the frame pointer setup
+ as frame-related at all. */
+ if (RTX_FRAME_RELATED_P (insn))
+ {
+ /* Make sure prologue insn is scheduled before next jump. */
+ deps->sched_before_next_jump
+ = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
+
+ /* Make sure epilogue insn is scheduled after preceding jumps. */
+ add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
+ true);
+ }
if (code == COND_EXEC)
{
instruction so that reg-stack won't get confused. */
if (code == CLOBBER)
add_dependence_list (insn, deps->last_function_call, 1,
- REG_DEP_OUTPUT);
+ REG_DEP_OUTPUT, true);
}
else if (code == PARALLEL)
{
{
if (GET_CODE (XEXP (link, 0)) == CLOBBER)
sched_analyze_1 (deps, XEXP (link, 0), insn);
- else
+ else if (GET_CODE (XEXP (link, 0)) != SET)
sched_analyze_2 (deps, XEXP (link, 0), insn);
}
- if (find_reg_note (insn, REG_SETJMP, NULL))
+ /* Don't schedule anything after a tail call, tail call needs
+ to use at least all call-saved registers. */
+ if (SIBLING_CALL_P (insn))
+ reg_pending_barrier = TRUE_BARRIER;
+ else if (find_reg_note (insn, REG_SETJMP, NULL))
reg_pending_barrier = MOVE_BARRIER;
}
if (sched_deps_info->compute_jump_reg_dependencies)
{
- regset_head tmp_uses, tmp_sets;
- INIT_REG_SET (&tmp_uses);
- INIT_REG_SET (&tmp_sets);
-
(*sched_deps_info->compute_jump_reg_dependencies)
- (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
+ (insn, reg_pending_control_uses);
+
/* Make latency of jump equal to 0 by using anti-dependence. */
- EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
+ false);
add_dependence_list (insn, reg_last->implicit_sets,
- 0, REG_DEP_ANTI);
+ 0, REG_DEP_ANTI, false);
add_dependence_list (insn, reg_last->clobbers, 0,
- REG_DEP_ANTI);
-
- if (!deps->readonly)
- {
- reg_last->uses_length++;
- reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
- }
+ REG_DEP_ANTI, false);
}
- IOR_REG_SET (reg_pending_sets, &tmp_sets);
-
- CLEAR_REG_SET (&tmp_uses);
- CLEAR_REG_SET (&tmp_sets);
}
/* All memory writes and volatile reads must happen before the
}
add_dependence_list (insn, deps->last_pending_memory_flush, 1,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
+ add_dependence_list (insn, deps->pending_jump_insns, 1,
+ REG_DEP_ANTI, true);
}
}
|| (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
reg_pending_barrier = MOVE_BARRIER;
- if (sched_pressure_p)
+ if (sched_pressure != SCHED_PRESSURE_NONE)
{
setup_insn_reg_uses (deps, insn);
- setup_insn_reg_pressure_info (insn);
+ init_insn_reg_pressure_info (insn);
}
/* Add register dependencies for insn. */
add_dependence (insn, prev, REG_DEP_ANTI);
add_dependence_list (insn, deps->last_function_call, 1,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
- for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- if (! JUMP_P (XEXP (u, 0))
- || !sel_sched_p ())
+ if (!sel_sched_p ())
+ for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
+ add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
+ /* There's no point in making REG_DEP_CONTROL dependencies for
+ debug insns. */
+ add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
+ false);
if (!deps->readonly)
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
}
else
{
+ regset_head set_or_clobbered;
+
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
- add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
+ add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
+ false);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
+ false);
if (!deps->readonly)
{
if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
add_dependence_list (insn, reg_last->implicit_sets, 0,
- REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
+ REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
+ false);
if (!deps->readonly)
{
}
}
+ if (targetm.sched.exposed_pipeline)
+ {
+ INIT_REG_SET (&set_or_clobbered);
+ bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
+ reg_pending_sets);
+ EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ rtx list;
+ for (list = reg_last->uses; list; list = XEXP (list, 1))
+ {
+ rtx other = XEXP (list, 0);
+ if (INSN_CACHED_COND (other) != const_true_rtx
+ && refers_to_regno_p (i, i + 1, INSN_CACHED_COND (other), NULL))
+ INSN_CACHED_COND (other) = const_true_rtx;
+ }
+ }
+ }
+
/* If the current insn is conditional, we can't free any
of the lists. */
if (sched_has_condition_p (insn))
EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
+ false);
add_dependence_list (insn, reg_last->implicit_sets, 0,
- REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
+ false);
+ add_dependence_list (insn, reg_last->control_uses, 0,
+ REG_DEP_CONTROL, false);
if (!deps->readonly)
{
EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
+ false);
add_dependence_list (insn, reg_last->implicit_sets, 0,
- REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
- add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
+ false);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
+ false);
+ add_dependence_list (insn, reg_last->control_uses, 0,
+ REG_DEP_CONTROL, false);
if (!deps->readonly)
- {
- reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
- SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
- }
+ reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
}
}
else
|| reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
{
add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
- REG_DEP_OUTPUT);
+ REG_DEP_OUTPUT, false);
add_dependence_list_and_free (deps, insn,
®_last->implicit_sets, 0,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
- REG_DEP_ANTI);
- add_dependence_list_and_free
- (deps, insn, ®_last->clobbers, 0, REG_DEP_OUTPUT);
+ REG_DEP_ANTI, false);
+ add_dependence_list_and_free (deps, insn,
+ ®_last->control_uses, 0,
+ REG_DEP_ANTI, false);
+ add_dependence_list_and_free (deps, insn,
+ ®_last->clobbers, 0,
+ REG_DEP_OUTPUT, false);
if (!deps->readonly)
{
}
else
{
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
+ false);
add_dependence_list (insn, reg_last->implicit_sets, 0,
- REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
+ false);
+ add_dependence_list (insn, reg_last->control_uses, 0,
+ REG_DEP_CONTROL, false);
}
if (!deps->readonly)
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
- REG_DEP_OUTPUT);
+ REG_DEP_OUTPUT, false);
add_dependence_list_and_free (deps, insn,
®_last->implicit_sets,
- 0, REG_DEP_ANTI);
+ 0, REG_DEP_ANTI, false);
add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
- REG_DEP_OUTPUT);
+ REG_DEP_OUTPUT, false);
add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->control_uses, 0,
+ REG_DEP_CONTROL, false);
if (!deps->readonly)
{
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
reg_last->uses_length = 0;
reg_last->clobbers_length = 0;
- CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
}
}
}
+ if (!deps->readonly)
+ {
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ reg_last->control_uses
+ = alloc_INSN_LIST (insn, reg_last->control_uses);
+ }
+ }
}
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
+ false);
if (!deps->readonly)
reg_last->implicit_sets
CLEAR_REG_SET (reg_pending_uses);
CLEAR_REG_SET (reg_pending_clobbers);
CLEAR_REG_SET (reg_pending_sets);
+ CLEAR_REG_SET (reg_pending_control_uses);
CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
+ true);
add_dependence_list (insn, reg_last->sets, 0,
reg_pending_barrier == TRUE_BARRIER
- ? REG_DEP_TRUE : REG_DEP_ANTI);
+ ? REG_DEP_TRUE : REG_DEP_ANTI, true);
add_dependence_list (insn, reg_last->implicit_sets, 0,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
add_dependence_list (insn, reg_last->clobbers, 0,
reg_pending_barrier == TRUE_BARRIER
- ? REG_DEP_TRUE : REG_DEP_ANTI);
+ ? REG_DEP_TRUE : REG_DEP_ANTI, true);
}
}
else
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
+ add_dependence_list_and_free (deps, insn,
+ ®_last->control_uses, 0,
+ REG_DEP_CONTROL, true);
add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
reg_pending_barrier == TRUE_BARRIER
- ? REG_DEP_TRUE : REG_DEP_ANTI);
+ ? REG_DEP_TRUE : REG_DEP_ANTI,
+ true);
add_dependence_list_and_free (deps, insn,
®_last->implicit_sets, 0,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
reg_pending_barrier == TRUE_BARRIER
- ? REG_DEP_TRUE : REG_DEP_ANTI);
+ ? REG_DEP_TRUE : REG_DEP_ANTI,
+ true);
if (!deps->readonly)
{
SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
}
- /* Flush pending lists on jumps, but not on speculative checks. */
- if (JUMP_P (insn) && !(sel_sched_p ()
- && sel_insn_is_speculation_check (insn)))
+ /* Don't flush pending lists on speculative checks for
+ selective scheduling. */
+ if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
flush_pending_lists (deps, insn, true, true);
- if (!deps->readonly)
- CLEAR_REG_SET (&deps->reg_conditional_sets);
reg_pending_barrier = NOT_A_BARRIER;
}
instructions that follow seem like they should be part
of the call group.
- Also, if we did, fixup_sched_groups() would move the
+ Also, if we did, chain_to_prev_insn would move the
deps of the debug insn to the call insn, modifying
non-debug post-dependency counts of the debug insn
dependencies and otherwise messing with the scheduling
change_spec_dep_to_hard (sd_it);
}
}
+
+ /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
+ honor their original ordering. */
+ if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
+ {
+ if (deps->last_args_size)
+ add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
+ deps->last_args_size = insn;
+ }
}
/* Return TRUE if INSN might not always return normally (e.g. call exit,
longjmp, loop forever, ...). */
+/* FIXME: Why can't this function just use flags_from_decl_or_type and
+ test for ECF_NORETURN? */
static bool
call_may_noreturn_p (rtx insn)
{
&& !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
return false;
- call = PATTERN (insn);
- if (GET_CODE (call) == PARALLEL)
- call = XVECEXP (call, 0, 0);
- if (GET_CODE (call) == SET)
- call = SET_SRC (call);
- if (GET_CODE (call) == CALL
- && MEM_P (XEXP (call, 0))
- && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
+ call = get_call_rtx_from (insn);
+ if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
{
rtx symbol = XEXP (XEXP (call, 0), 0);
if (SYMBOL_REF_DECL (symbol)
return true;
}
+/* Return true if INSN should be made dependent on the previous instruction
+ group, and if all INSN's dependencies should be moved to the first
+ instruction of that group. */
+
+static bool
+chain_to_prev_insn_p (rtx insn)
+{
+ rtx prev, x;
+
+ /* INSN forms a group with the previous instruction. */
+ if (SCHED_GROUP_P (insn))
+ return true;
+
+ /* If the previous instruction clobbers a register R and this one sets
+ part of R, the clobber was added specifically to help us track the
+ liveness of R. There's no point scheduling the clobber and leaving
+ INSN behind, especially if we move the clobber to another block. */
+ prev = prev_nonnote_nondebug_insn (insn);
+ if (prev
+ && INSN_P (prev)
+ && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
+ && GET_CODE (PATTERN (prev)) == CLOBBER)
+ {
+ x = XEXP (PATTERN (prev), 0);
+ if (set_of (x, insn))
+ return true;
+ }
+
+ return false;
+}
+
/* Analyze INSN with DEPS as a context. */
void
deps_analyze_insn (struct deps_desc *deps, rtx insn)
if (sched_deps_info->start_insn)
sched_deps_info->start_insn (insn);
- if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
+ /* Record the condition for this insn. */
+ if (NONDEBUG_INSN_P (insn))
+ {
+ rtx t;
+ sched_get_condition_with_rev (insn, NULL);
+ t = INSN_CACHED_COND (insn);
+ INSN_COND_DEPS (insn) = NULL_RTX;
+ if (reload_completed
+ && (current_sched_info->flags & DO_PREDICATION)
+ && COMPARISON_P (t)
+ && REG_P (XEXP (t, 0))
+ && CONSTANT_P (XEXP (t, 1)))
+ {
+ unsigned int regno;
+ int nregs;
+ t = XEXP (t, 0);
+ regno = REGNO (t);
+ nregs = hard_regno_nregs[regno][GET_MODE (t)];
+ t = NULL_RTX;
+ while (nregs-- > 0)
+ {
+ struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
+ t = concat_INSN_LIST (reg_last->sets, t);
+ t = concat_INSN_LIST (reg_last->clobbers, t);
+ t = concat_INSN_LIST (reg_last->implicit_sets, t);
+ }
+ INSN_COND_DEPS (insn) = t;
+ }
+ }
+
+ if (JUMP_P (insn))
{
/* Make each JUMP_INSN (but not a speculative check)
a scheduling barrier for memory references. */
if (!deps->readonly
- && JUMP_P (insn)
&& !(sel_sched_p ()
&& sel_insn_is_speculation_check (insn)))
{
if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
flush_pending_lists (deps, insn, true, true);
else
- deps->last_pending_memory_flush
- = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
+ deps->pending_jump_insns
+ = alloc_INSN_LIST (insn, deps->pending_jump_insns);
}
+ /* For each insn which shouldn't cross a jump, add a dependence. */
+ add_dependence_list_and_free (deps, insn,
+ &deps->sched_before_next_jump, 1,
+ REG_DEP_ANTI, true);
+
+ sched_analyze_insn (deps, PATTERN (insn), insn);
+ }
+ else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
+ {
sched_analyze_insn (deps, PATTERN (insn), insn);
}
else if (CALL_P (insn))
between that insn and this call insn. */
add_dependence_list_and_free (deps, insn,
&deps->sched_before_next_call, 1,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
sched_analyze_insn (deps, PATTERN (insn), insn);
if (sched_deps_info->use_cselib)
cselib_process_insn (insn);
- /* EH_REGION insn notes can not appear until well after we complete
- scheduling. */
- if (NOTE_P (insn))
- gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
- && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
-
if (sched_deps_info->finish_insn)
sched_deps_info->finish_insn ();
/* Fixup the dependencies in the sched group. */
if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
- && SCHED_GROUP_P (insn) && !sel_sched_p ())
- fixup_sched_groups (insn);
+ && chain_to_prev_insn_p (insn)
+ && !sel_sched_p ())
+ chain_to_prev_insn (insn);
}
/* Initialize DEPS for the new block beginning with HEAD. */
{
/* And initialize deps_lists. */
sd_init_insn (insn);
+ /* Clean up SCHED_GROUP_P which may be set by last
+ scheduler pass. */
+ if (SCHED_GROUP_P (insn))
+ SCHED_GROUP_P (insn) = 0;
}
deps_analyze_insn (deps, insn);
rtx insn;
rtx next_tail = NEXT_INSN (tail);
+ /* We make two passes since some insns may be scheduled before their
+ dependencies are resolved. */
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
if (INSN_P (insn) && INSN_LUID (insn) > 0)
{
- /* Clear resolved back deps together with its dep_nodes. */
- delete_dep_nodes_in_back_deps (insn, resolved_p);
-
/* Clear forward deps and leave the dep_nodes to the
corresponding back_deps list. */
if (resolved_p)
clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
else
clear_deps_list (INSN_FORW_DEPS (insn));
+ }
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ if (INSN_P (insn) && INSN_LUID (insn) > 0)
+ {
+ /* Clear resolved back deps together with its dep_nodes. */
+ delete_dep_nodes_in_back_deps (insn, resolved_p);
sd_finish_insn (insn);
}
else
deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
INIT_REG_SET (&deps->reg_last_in_use);
- INIT_REG_SET (&deps->reg_conditional_sets);
deps->pending_read_insns = 0;
deps->pending_read_mems = 0;
deps->pending_write_insns = 0;
deps->pending_write_mems = 0;
+ deps->pending_jump_insns = 0;
deps->pending_read_list_length = 0;
deps->pending_write_list_length = 0;
deps->pending_flush_length = 0;
deps->last_function_call = 0;
deps->last_function_call_may_noreturn = 0;
deps->sched_before_next_call = 0;
+ deps->sched_before_next_jump = 0;
deps->in_post_call_group_p = not_post_call;
deps->last_debug_insn = 0;
+ deps->last_args_size = 0;
deps->last_reg_pending_barrier = NOT_A_BARRIER;
deps->readonly = 0;
}
free_INSN_LIST_list (®_last->sets);
if (reg_last->implicit_sets)
free_INSN_LIST_list (®_last->implicit_sets);
+ if (reg_last->control_uses)
+ free_INSN_LIST_list (®_last->control_uses);
if (reg_last->clobbers)
free_INSN_LIST_list (®_last->clobbers);
}
CLEAR_REG_SET (&deps->reg_last_in_use);
- CLEAR_REG_SET (&deps->reg_conditional_sets);
/* As we initialize reg_last lazily, it is possible that we didn't allocate
it at all. */
- if (deps->reg_last)
- free (deps->reg_last);
+ free (deps->reg_last);
deps->reg_last = NULL;
deps = NULL;
}
-/* Remove INSN from dependence contexts DEPS. Caution: reg_conditional_sets
- is not handled. */
+/* Remove INSN from dependence contexts DEPS. */
void
remove_from_deps (struct deps_desc *deps, rtx insn)
{
removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
&deps->pending_write_mems);
deps->pending_write_list_length -= removed;
+
+ removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
+ deps->pending_flush_length -= removed;
removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
deps->pending_flush_length -= removed;
static void
init_deps_data_vector (void)
{
- int reserve = (sched_max_luid + 1
- - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
- if (reserve > 0
- && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
- VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
- 3 * sched_max_luid / 2);
+ int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
+ if (reserve > 0 && ! h_d_i_d.space (reserve))
+ h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
}
/* If it is profitable to use them, initialize or extend (depending on
{
/* Average number of insns in the basic block.
'+ 1' is used to make it nonzero. */
- int insns_in_block = sched_max_luid / n_basic_blocks + 1;
+ int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
init_deps_data_vector ();
output_dependency_cache, luid);
anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
luid);
+ control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
+ luid);
if (current_sched_info->flags & DO_SPECULATION)
spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
bitmap_initialize (&true_dependency_cache[i], 0);
bitmap_initialize (&output_dependency_cache[i], 0);
bitmap_initialize (&anti_dependency_cache[i], 0);
+ bitmap_initialize (&control_dependency_cache[i], 0);
if (current_sched_info->flags & DO_SPECULATION)
bitmap_initialize (&spec_dependency_cache[i], 0);
free_alloc_pool_if_empty (&dl_pool);
gcc_assert (dn_pool == NULL && dl_pool == NULL);
- VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
+ h_d_i_d.release ();
cache_size = 0;
if (true_dependency_cache)
bitmap_clear (&true_dependency_cache[i]);
bitmap_clear (&output_dependency_cache[i]);
bitmap_clear (&anti_dependency_cache[i]);
+ bitmap_clear (&control_dependency_cache[i]);
if (sched_deps_info->generate_spec_deps)
bitmap_clear (&spec_dependency_cache[i]);
output_dependency_cache = NULL;
free (anti_dependency_cache);
anti_dependency_cache = NULL;
+ free (control_dependency_cache);
+ control_dependency_cache = NULL;
if (sched_deps_info->generate_spec_deps)
{
reg_pending_sets = ALLOC_REG_SET (®_obstack);
reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
reg_pending_uses = ALLOC_REG_SET (®_obstack);
+ reg_pending_control_uses = ALLOC_REG_SET (®_obstack);
reg_pending_barrier = NOT_A_BARRIER;
if (!sel_sched_p () || sched_emulate_haifa_p)
FREE_REG_SET (reg_pending_sets);
FREE_REG_SET (reg_pending_clobbers);
FREE_REG_SET (reg_pending_uses);
+ FREE_REG_SET (reg_pending_control_uses);
}
/* Estimate the weakness of dependence between MEM1 and MEM2. */
/* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
This function can handle same INSN and ELEM (INSN == ELEM).
It is a convenience wrapper. */
-void
-add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
+static void
+add_dependence_1 (rtx insn, rtx elem, enum reg_note dep_type)
{
ds_t ds;
bool internal;
ds = DEP_TRUE;
else if (dep_type == REG_DEP_OUTPUT)
ds = DEP_OUTPUT;
+ else if (dep_type == REG_DEP_CONTROL)
+ ds = DEP_CONTROL;
else
{
gcc_assert (dep_type == REG_DEP_ANTI);
cur_insn = NULL;
}
-/* Return weakness of speculative type TYPE in the dep_status DS. */
-dw_t
+/* Return weakness of speculative type TYPE in the dep_status DS,
+ without checking to prevent ICEs on malformed input. */
+static dw_t
get_dep_weak_1 (ds_t ds, ds_t type)
{
ds = ds & type;
return (dw_t) ds;
}
+/* Return weakness of speculative type TYPE in the dep_status DS. */
dw_t
get_dep_weak (ds_t ds, ds_t type)
{
if (s & DEP_TRUE)
fprintf (f, "DEP_TRUE; ");
- if (s & DEP_ANTI)
- fprintf (f, "DEP_ANTI; ");
if (s & DEP_OUTPUT)
fprintf (f, "DEP_OUTPUT; ");
+ if (s & DEP_ANTI)
+ fprintf (f, "DEP_ANTI; ");
+ if (s & DEP_CONTROL)
+ fprintf (f, "DEP_CONTROL; ");
fprintf (f, "}");
}
if (!(current_sched_info->flags & USE_DEPS_LIST))
{
- gcc_assert (ds == -1);
+ gcc_assert (ds == 0);
return;
}
else if (dt == REG_DEP_OUTPUT)
gcc_assert ((ds & DEP_OUTPUT)
&& !(ds & DEP_TRUE));
- else
- gcc_assert ((dt == REG_DEP_ANTI)
- && (ds & DEP_ANTI)
+ else if (dt == REG_DEP_ANTI)
+ gcc_assert ((ds & DEP_ANTI)
&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
+ else
+ gcc_assert (dt == REG_DEP_CONTROL
+ && (ds & DEP_CONTROL)
+ && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
/* HARD_DEP can not appear in dep_status of a link. */
gcc_assert (!(ds & HARD_DEP));
}
#endif /* ENABLE_CHECKING */
+/* The following code discovers opportunities to switch a memory reference
+ and an increment by modifying the address. We ensure that this is done
+ only for dependencies that are only used to show a single register
+ dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
+ instruction involved is subject to only one dep that can cause a pattern
+ change.
+
+ When we discover a suitable dependency, we fill in the dep_replacement
+ structure to show how to modify the memory reference. */
+
+/* Holds information about a pair of memory reference and register increment
+ insns which depend on each other, but could possibly be interchanged. */
+struct mem_inc_info
+{
+ rtx inc_insn;
+ rtx mem_insn;
+
+ rtx *mem_loc;
+ /* A register occurring in the memory address for which we wish to break
+ the dependence. This must be identical to the destination register of
+ the increment. */
+ rtx mem_reg0;
+ /* Any kind of index that is added to that register. */
+ rtx mem_index;
+ /* The constant offset used in the memory address. */
+ HOST_WIDE_INT mem_constant;
+ /* The constant added in the increment insn. Negated if the increment is
+ after the memory address. */
+ HOST_WIDE_INT inc_constant;
+ /* The source register used in the increment. May be different from mem_reg0
+ if the increment occurs before the memory address. */
+ rtx inc_input;
+};
+
+/* Verify that the memory location described in MII can be replaced with
+ one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
+ insn remains unchanged by this function. */
+
+static rtx
+attempt_change (struct mem_inc_info *mii, rtx new_addr)
+{
+ rtx mem = *mii->mem_loc;
+ rtx new_mem;
+
+ /* Jump through a lot of hoops to keep the attributes up to date. We
+ do not want to call one of the change address variants that take
+ an offset even though we know the offset in many cases. These
+ assume you are changing where the address is pointing by the
+ offset. */
+ new_mem = replace_equiv_address_nv (mem, new_addr);
+ if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
+ {
+ if (sched_verbose >= 5)
+ fprintf (sched_dump, "validation failure\n");
+ return NULL_RTX;
+ }
+
+ /* Put back the old one. */
+ validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
+
+ return new_mem;
+}
+
+/* Return true if INSN is of a form "a = b op c" where a and b are
+ regs. op is + if c is a reg and +|- if c is a const. Fill in
+ informantion in MII about what is found.
+ BEFORE_MEM indicates whether the increment is found before or after
+ a corresponding memory reference. */
+
+static bool
+parse_add_or_inc (struct mem_inc_info *mii, rtx insn, bool before_mem)
+{
+ rtx pat = single_set (insn);
+ rtx src, cst;
+ bool regs_equal;
+
+ if (RTX_FRAME_RELATED_P (insn) || !pat)
+ return false;
+
+ /* Result must be single reg. */
+ if (!REG_P (SET_DEST (pat)))
+ return false;
+
+ if (GET_CODE (SET_SRC (pat)) != PLUS)
+ return false;
+
+ mii->inc_insn = insn;
+ src = SET_SRC (pat);
+ mii->inc_input = XEXP (src, 0);
+
+ if (!REG_P (XEXP (src, 0)))
+ return false;
+
+ if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
+ return false;
+
+ cst = XEXP (src, 1);
+ if (!CONST_INT_P (cst))
+ return false;
+ mii->inc_constant = INTVAL (cst);
+
+ regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
+
+ if (!before_mem)
+ {
+ mii->inc_constant = -mii->inc_constant;
+ if (!regs_equal)
+ return false;
+ }
+
+ if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
+ {
+ /* Note that the sign has already been reversed for !before_mem. */
+#ifdef STACK_GROWS_DOWNWARD
+ return mii->inc_constant > 0;
+#else
+ return mii->inc_constant < 0;
+#endif
+ }
+ return true;
+}
+
+/* Once a suitable mem reference has been found and the corresponding data
+ in MII has been filled in, this function is called to find a suitable
+ add or inc insn involving the register we found in the memory
+ reference. */
+
+static bool
+find_inc (struct mem_inc_info *mii, bool backwards)
+{
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ sd_it = sd_iterator_start (mii->mem_insn,
+ backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
+ while (sd_iterator_cond (&sd_it, &dep))
+ {
+ dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
+ rtx pro = DEP_PRO (dep);
+ rtx con = DEP_CON (dep);
+ rtx inc_cand = backwards ? pro : con;
+ if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
+ goto next;
+ if (parse_add_or_inc (mii, inc_cand, backwards))
+ {
+ struct dep_replacement *desc;
+ df_ref def;
+ rtx newaddr, newmem;
+
+ if (sched_verbose >= 5)
+ fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
+ INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
+
+ /* Need to assure that none of the operands of the inc
+ instruction are assigned to by the mem insn. */
+ FOR_EACH_INSN_DEF (def, mii->mem_insn)
+ if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
+ || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
+ {
+ if (sched_verbose >= 5)
+ fprintf (sched_dump,
+ "inc conflicts with store failure.\n");
+ goto next;
+ }
+ newaddr = mii->inc_input;
+ if (mii->mem_index != NULL_RTX)
+ newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
+ mii->mem_index);
+ newaddr = plus_constant (GET_MODE (newaddr), newaddr,
+ mii->mem_constant + mii->inc_constant);
+ newmem = attempt_change (mii, newaddr);
+ if (newmem == NULL_RTX)
+ goto next;
+ if (sched_verbose >= 5)
+ fprintf (sched_dump, "successful address replacement\n");
+ desc = XCNEW (struct dep_replacement);
+ DEP_REPLACE (dep) = desc;
+ desc->loc = mii->mem_loc;
+ desc->newval = newmem;
+ desc->orig = *desc->loc;
+ desc->insn = mii->mem_insn;
+ move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
+ INSN_SPEC_BACK_DEPS (con));
+ if (backwards)
+ {
+ FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
+ add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
+ REG_DEP_TRUE);
+ }
+ else
+ {
+ FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
+ add_dependence_1 (DEP_CON (dep), mii->mem_insn,
+ REG_DEP_ANTI);
+ }
+ return true;
+ }
+ next:
+ sd_iterator_next (&sd_it);
+ }
+ return false;
+}
+
+/* A recursive function that walks ADDRESS_OF_X to find memory references
+ which could be modified during scheduling. We call find_inc for each
+ one we find that has a recognizable form. MII holds information about
+ the pair of memory/increment instructions.
+ We ensure that every instruction with a memory reference (which will be
+ the location of the replacement) is assigned at most one breakable
+ dependency. */
+
+static bool
+find_mem (struct mem_inc_info *mii, rtx *address_of_x)
+{
+ rtx x = *address_of_x;
+ enum rtx_code code = GET_CODE (x);
+ const char *const fmt = GET_RTX_FORMAT (code);
+ int i;
+
+ if (code == MEM)
+ {
+ rtx reg0 = XEXP (x, 0);
+
+ mii->mem_loc = address_of_x;
+ mii->mem_index = NULL_RTX;
+ mii->mem_constant = 0;
+ if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
+ {
+ mii->mem_constant = INTVAL (XEXP (reg0, 1));
+ reg0 = XEXP (reg0, 0);
+ }
+ if (GET_CODE (reg0) == PLUS)
+ {
+ mii->mem_index = XEXP (reg0, 1);
+ reg0 = XEXP (reg0, 0);
+ }
+ if (REG_P (reg0))
+ {
+ df_ref use;
+ int occurrences = 0;
+
+ /* Make sure this reg appears only once in this insn. Can't use
+ count_occurrences since that only works for pseudos. */
+ FOR_EACH_INSN_USE (use, mii->mem_insn)
+ if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
+ if (++occurrences > 1)
+ {
+ if (sched_verbose >= 5)
+ fprintf (sched_dump, "mem count failure\n");
+ return false;
+ }
+
+ mii->mem_reg0 = reg0;
+ return find_inc (mii, true) || find_inc (mii, false);
+ }
+ return false;
+ }
+
+ if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
+ {
+ /* If REG occurs inside a MEM used in a bit-field reference,
+ that is unacceptable. */
+ return false;
+ }
+
+ /* Time for some deep diving. */
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (find_mem (mii, &XEXP (x, i)))
+ return true;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (find_mem (mii, &XVECEXP (x, i, j)))
+ return true;
+ }
+ }
+ return false;
+}
+
+
+/* Examine the instructions between HEAD and TAIL and try to find
+ dependencies that can be broken by modifying one of the patterns. */
+
+void
+find_modifiable_mems (rtx head, rtx tail)
+{
+ rtx insn, next_tail = NEXT_INSN (tail);
+ int success_in_block = 0;
+
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ {
+ struct mem_inc_info mii;
+
+ if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
+ continue;
+
+ mii.mem_insn = insn;
+ if (find_mem (&mii, &PATTERN (insn)))
+ success_in_block++;
+ }
+ if (success_in_block && sched_verbose >= 5)
+ fprintf (sched_dump, "%d candidates for address modification found.\n",
+ success_in_block);
+}
+
#endif /* INSN_SCHEDULING */