/* Instruction scheduling pass. This file computes dependencies between
instructions.
- Copyright (C) 1992-2015 Free Software Foundation, Inc.
+ Copyright (C) 1992-2017 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "system.h"
#include "coretypes.h"
#include "backend.h"
-#include "tree.h"
+#include "target.h"
#include "rtl.h"
+#include "tree.h"
#include "df.h"
-#include "diagnostic-core.h"
-#include "alias.h"
-#include "tm_p.h"
-#include "regs.h"
-#include "flags.h"
#include "insn-config.h"
+#include "regs.h"
+#include "memmodel.h"
+#include "ira.h"
+#include "ira-int.h"
#include "insn-attr.h"
-#include "except.h"
-#include "recog.h"
-#include "emit-rtl.h"
#include "cfgbuild.h"
#include "sched-int.h"
#include "params.h"
-#include "alloc-pool.h"
#include "cselib.h"
-#include "ira.h"
-#include "target.h"
#ifdef INSN_SCHEDULING
-#ifdef ENABLE_CHECKING
-#define CHECK (true)
-#else
-#define CHECK (false)
-#endif
-
/* Holds current parameters for the dependency analyzer. */
struct sched_deps_info_def *sched_deps_info;
}
/* Pool to hold all dependency nodes (dep_node_t). */
-static pool_allocator<_dep_node> *dn_pool;
+static object_allocator<_dep_node> *dn_pool;
/* Number of dep_nodes out there. */
static int dn_pool_diff = 0;
}
/* Pool to hold dependencies lists (deps_list_t). */
-static pool_allocator<_deps_list> *dl_pool;
+static object_allocator<_deps_list> *dl_pool;
/* Number of deps_lists out there. */
static int dl_pool_diff = 0;
rtx, rtx);
static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
-#ifdef ENABLE_CHECKING
static void check_dep (dep_t, bool);
-#endif
+
\f
/* Return nonzero if a load of the memory reference MEM can cause a trap. */
gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
&& DEP_PRO (new_dep) != DEP_CON (new_dep));
-#ifdef ENABLE_CHECKING
- check_dep (new_dep, mem1 != NULL);
-#endif
+ if (flag_checking)
+ check_dep (new_dep, mem1 != NULL);
if (true_dependency_cache != NULL)
{
add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
-#ifdef ENABLE_CHECKING
- check_dep (dep, false);
-#endif
+ if (flag_checking)
+ check_dep (dep, false);
add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
If so, mark all of them just like the first. */
if (regno < FIRST_PSEUDO_REGISTER)
{
- int i = hard_regno_nregs[regno][mode];
+ int i = hard_regno_nregs (regno, mode);
if (ref == SET)
{
while (--i >= 0)
{
if (GET_CODE (dest) == STRICT_LOW_PART
|| GET_CODE (dest) == ZERO_EXTRACT
- || df_read_modify_subreg_p (dest))
+ || read_modify_subreg_p (dest))
{
/* These both read and modify the result. We must handle
them as writes to get proper dependencies for following
return;
}
- /* Force pending stores to memory in case a trap handler needs them. */
+ /* Force pending stores to memory in case a trap handler needs them.
+ Also force pending loads from memory; loads and stores can segfault
+ and the signal handler won't be triggered if the trap insn was moved
+ above load or store insn. */
case TRAP_IF:
- flush_pending_lists (deps, insn, true, false);
+ flush_pending_lists (deps, insn, true, true);
break;
case PREFETCH:
sched_macro_fuse_insns (rtx_insn *insn)
{
rtx_insn *prev;
-
+ prev = prev_nonnote_nondebug_insn (insn);
+ if (!prev)
+ return;
+
if (any_condjump_p (insn))
{
unsigned int condreg1, condreg2;
rtx cc_reg_1;
targetm.fixed_condition_code_regs (&condreg1, &condreg2);
cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
- prev = prev_nonnote_nondebug_insn (insn);
- if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
- || !prev
- || !modified_in_p (cc_reg_1, prev))
- return;
+ if (reg_referenced_p (cc_reg_1, PATTERN (insn))
+ && modified_in_p (cc_reg_1, prev))
+ {
+ if (targetm.sched.macro_fusion_pair_p (prev, insn))
+ SCHED_GROUP_P (insn) = 1;
+ return;
+ }
}
- else
- {
- rtx insn_set = single_set (insn);
-
- prev = prev_nonnote_nondebug_insn (insn);
- if (!prev
- || !insn_set
- || !single_set (prev))
- return;
+ if (single_set (insn) && single_set (prev))
+ {
+ if (targetm.sched.macro_fusion_pair_p (prev, insn))
+ SCHED_GROUP_P (insn) = 1;
}
+}
- if (targetm.sched.macro_fusion_pair_p (prev, insn))
- SCHED_GROUP_P (insn) = 1;
-
+/* Get the implicit reg pending clobbers for INSN and save them in TEMP. */
+void
+get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
+{
+ extract_insn (insn);
+ preprocess_constraints (insn);
+ alternative_mask preferred = get_preferred_alternatives (insn);
+ ira_implicitly_set_insn_hard_regs (temp, preferred);
+ AND_COMPL_HARD_REG_SET (*temp, ira_no_alloc_regs);
}
/* Analyze an INSN with pattern X to find all dependencies. */
if (! reload_completed)
{
HARD_REG_SET temp;
-
- extract_insn (insn);
- preprocess_constraints (insn);
- ira_implicitly_set_insn_hard_regs (&temp);
- AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
+ get_implicit_reg_pending_clobbers (&temp, insn);
IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
}
{
if (deps->last_args_size)
add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
- deps->last_args_size = insn;
+ if (!deps->readonly)
+ deps->last_args_size = insn;
+ }
+
+ /* We must not mix prologue and epilogue insns. See PR78029. */
+ if (prologue_contains (insn))
+ {
+ add_dependence_list (insn, deps->last_epilogue, true, REG_DEP_ANTI, true);
+ if (!deps->readonly)
+ {
+ if (deps->last_logue_was_epilogue)
+ free_INSN_LIST_list (&deps->last_prologue);
+ deps->last_prologue = alloc_INSN_LIST (insn, deps->last_prologue);
+ deps->last_logue_was_epilogue = false;
+ }
+ }
+
+ if (epilogue_contains (insn))
+ {
+ add_dependence_list (insn, deps->last_prologue, true, REG_DEP_ANTI, true);
+ if (!deps->readonly)
+ {
+ if (!deps->last_logue_was_epilogue)
+ free_INSN_LIST_list (&deps->last_epilogue);
+ deps->last_epilogue = alloc_INSN_LIST (insn, deps->last_epilogue);
+ deps->last_logue_was_epilogue = true;
+ }
}
}
Since we only have a choice between 'might be clobbered'
and 'definitely not clobbered', we must include all
partly call-clobbered registers here. */
- else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
+ else if (targetm.hard_regno_call_part_clobbered (i,
+ reg_raw_mode[i])
|| TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
SET_REGNO_REG_SET (reg_pending_clobbers, i);
/* We don't know what set of fixed registers might be used
deps->in_post_call_group_p = not_post_call;
deps->last_debug_insn = 0;
deps->last_args_size = 0;
+ deps->last_prologue = 0;
+ deps->last_epilogue = 0;
+ deps->last_logue_was_epilogue = false;
deps->last_reg_pending_barrier = NOT_A_BARRIER;
deps->readonly = 0;
}
removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
deps->pending_flush_length -= removed;
+ unsigned to_clear = -1U;
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
+ if (to_clear != -1U)
+ {
+ CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
+ to_clear = -1U;
+ }
struct deps_reg *reg_last = &deps->reg_last[i];
if (reg_last->uses)
remove_from_dependence_list (insn, ®_last->uses);
remove_from_dependence_list (insn, ®_last->clobbers);
if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
&& !reg_last->clobbers)
- CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
+ to_clear = i;
}
+ if (to_clear != -1U)
+ CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
if (CALL_P (insn))
{
if (global_p)
{
- dl_pool = new pool_allocator<_deps_list> ("deps_list",
- /* Allocate lists for one block at a time. */
- insns_in_block);
- dn_pool = new pool_allocator<_dep_node> ("dep_node",
- /* Allocate nodes for one block at a time.
- We assume that average insn has
- 5 producers. */
- 5 * insns_in_block);
+ dl_pool = new object_allocator<_deps_list> ("deps_list");
+ /* Allocate lists for one block at a time. */
+ dn_pool = new object_allocator<_dep_node> ("dep_node");
+ /* Allocate nodes for one block at a time. */
}
}
sched_deps_finish (void)
{
gcc_assert (deps_pools_are_empty_p ());
- dn_pool->release_if_empty ();
+ delete dn_pool;
+ delete dl_pool;
dn_pool = NULL;
- dl_pool->release_if_empty ();
dl_pool = NULL;
h_d_i_d.release ();
dw_t
estimate_dep_weak (rtx mem1, rtx mem2)
{
- rtx r1, r2;
-
if (mem1 == mem2)
/* MEMs are the same - don't speculate. */
return MIN_DEP_WEAK;
- r1 = XEXP (mem1, 0);
- r2 = XEXP (mem2, 0);
+ rtx r1 = XEXP (mem1, 0);
+ rtx r2 = XEXP (mem2, 0);
+
+ if (sched_deps_info->use_cselib)
+ {
+ /* We cannot call rtx_equal_for_cselib_p because the VALUEs might be
+ dangling at this point, since we never preserve them. Instead we
+ canonicalize manually to get stable VALUEs out of hashing. */
+ if (GET_CODE (r1) == VALUE && CSELIB_VAL_PTR (r1))
+ r1 = canonical_cselib_val (CSELIB_VAL_PTR (r1))->val_rtx;
+ if (GET_CODE (r2) == VALUE && CSELIB_VAL_PTR (r2))
+ r2 = canonical_cselib_val (CSELIB_VAL_PTR (r2))->val_rtx;
+ }
if (r1 == r2
- || (REG_P (r1) && REG_P (r2)
- && REGNO (r1) == REGNO (r2)))
+ || (REG_P (r1) && REG_P (r2) && REGNO (r1) == REGNO (r2)))
/* Again, MEMs are the same. */
return MIN_DEP_WEAK;
- else if ((REG_P (r1) && !REG_P (r2))
- || (!REG_P (r1) && REG_P (r2)))
+ else if ((REG_P (r1) && !REG_P (r2)) || (!REG_P (r1) && REG_P (r2)))
/* Different addressing modes - reason to be more speculative,
than usual. */
return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
fprintf (stderr, "\n");
}
-#ifdef ENABLE_CHECKING
/* Verify that dependence type and status are consistent.
If RELAXED_P is true, then skip dep_weakness checks. */
static void
gcc_assert (ds & BEGIN_CONTROL);
}
}
-#endif /* ENABLE_CHECKING */
/* The following code discovers opportunities to switch a memory reference
and an increment by modifying the address. We ensure that this is done
if (RTX_FRAME_RELATED_P (insn) || !pat)
return false;
+ /* Do not allow breaking data dependencies for insns that are marked
+ with REG_STACK_CHECK. */
+ if (find_reg_note (insn, REG_STACK_CHECK, NULL))
+ return false;
+
/* Result must be single reg. */
if (!REG_P (SET_DEST (pat)))
return false;