/* Instruction scheduling pass.
- Copyright (C) 1992-2014 Free Software Foundation, Inc.
+ Copyright (C) 1992-2020 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
-#include "diagnostic-core.h"
+#include "backend.h"
+#include "target.h"
#include "rtl.h"
+#include "df.h"
+#include "memmodel.h"
#include "tm_p.h"
-#include "hard-reg-set.h"
-#include "regs.h"
-#include "function.h"
-#include "flags.h"
#include "insn-config.h"
+#include "emit-rtl.h"
+#include "recog.h"
+#include "profile.h"
#include "insn-attr.h"
#include "except.h"
-#include "recog.h"
-#include "params.h"
+#include "cfganal.h"
#include "sched-int.h"
#include "sel-sched.h"
-#include "target.h"
#include "tree-pass.h"
#include "dbgcnt.h"
+#include "pretty-print.h"
+#include "print-rtl.h"
+
+/* Disable warnings about quoting issues in the pp_xxx calls below
+ that (intentionally) don't follow GCC diagnostic conventions. */
+#if __GNUC__ >= 10
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wformat-diag"
+#endif
#ifdef INSN_SCHEDULING
while other blocks in the region from which insns can be moved to the
target are called "source" blocks. The candidate structure holds info
about such sources: are they valid? Speculative? Etc. */
-typedef struct
+struct bblst
{
basic_block *first_member;
int nr_members;
-}
-bblst;
+};
-typedef struct
+struct candidate
{
char is_valid;
char is_speculative;
int src_prob;
bblst split_bbs;
bblst update_bbs;
-}
-candidate;
+};
static candidate *candidate_table;
#define IS_VALID(src) (candidate_table[src].is_valid)
int target_bb;
/* List of edges. */
-typedef struct
+struct edgelst
{
edge *first_member;
int nr_members;
-}
-edgelst;
+};
static edge *edgelst_table;
static int edgelst_last;
static int check_live_1 (int, rtx);
static void update_live_1 (int, rtx);
static int is_pfree (rtx, int, int);
-static int find_conditional_protection (rtx, int);
+static int find_conditional_protection (rtx_insn *, int);
static int is_conditionally_protected (rtx, int, int);
static int is_prisky (rtx, int, int);
-static int is_exception_free (rtx, int, int);
+static int is_exception_free (rtx_insn *, int, int);
static bool sets_likely_spilled (rtx);
static void sets_likely_spilled_1 (rtx, const_rtx, void *);
-static void add_branch_dependences (rtx, rtx);
+static void add_branch_dependences (rtx_insn *, rtx_insn *);
static void compute_block_dependences (int);
static void schedule_region (int);
-static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
-static void propagate_deps (int, struct deps_desc *);
+static void concat_insn_mem_list (rtx_insn_list *, rtx_expr_list *,
+ rtx_insn_list **, rtx_expr_list **);
+static void propagate_deps (int, class deps_desc *);
static void free_pending_lists (void);
/* Functions for construction of the control flow graph. */
is_cfg_nonregular (void)
{
basic_block b;
- rtx insn;
+ rtx_insn *insn;
/* If we have a label that could be the target of a nonlocal goto, then
the cfg is not well structured. */
FOR_EACH_BB_FN (b, cfun)
FOR_BB_INSNS (b, insn)
{
- rtx note, next, set, dest;
+ rtx note, set, dest;
+ rtx_insn *next;
/* If this function has a computed jump, then we consider the cfg
not well structured. */
if (ebbs_p) {
int probability_cutoff;
- if (profile_info && flag_branch_probabilities)
- probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
+ if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
+ probability_cutoff = param_tracer_min_branch_probability_feedback;
else
- probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
+ probability_cutoff = param_tracer_min_branch_probability;
probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
FOR_EACH_BB_FN (ebb_start, cfun)
e = find_fallthru_edge (bb->succs);
if (! e)
break;
- if (e->probability <= probability_cutoff)
+ if (e->probability.initialized_p ()
+ && e->probability.to_reg_br_prob_base () <= probability_cutoff)
break;
}
if (MAY_HAVE_DEBUG_INSNS)
{
- rtx insn;
+ rtx_insn *insn;
FOR_BB_INSNS (bb, insn)
if (DEBUG_INSN_P (insn))
(*num_insns) += (common_sched_info->estimate_number_of_insns
(BASIC_BLOCK_FOR_FN (cfun, block)));
- return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
- || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
+ return ((*num_bbs > param_max_sched_region_blocks)
+ || (*num_insns > param_max_sched_region_insns));
}
/* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
int too_large_failure;
basic_block bb;
- /* Note if a block is a natural loop header. */
- sbitmap header;
-
- /* Note if a block is a natural inner loop header. */
- sbitmap inner;
-
- /* Note if a block is in the block queue. */
- sbitmap in_queue;
-
- /* Note if a block is in the block queue. */
- sbitmap in_stack;
-
/* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
and a mapping from block to its loop header (if the block is contained
in a loop, else -1).
dfs_nr = XCNEWVEC (int, last_basic_block_for_fn (cfun));
stack = XNEWVEC (edge_iterator, n_edges_for_fn (cfun));
- inner = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ /* Note if a block is a natural inner loop header. */
+ auto_sbitmap inner (last_basic_block_for_fn (cfun));
bitmap_ones (inner);
- header = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ /* Note if a block is a natural loop header. */
+ auto_sbitmap header (last_basic_block_for_fn (cfun));
bitmap_clear (header);
- in_queue = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ /* Note if a block is in the block queue. */
+ auto_sbitmap in_queue (last_basic_block_for_fn (cfun));
bitmap_clear (in_queue);
- in_stack = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ /* Note if a block is in the block queue. */
+ auto_sbitmap in_stack (last_basic_block_for_fn (cfun));
bitmap_clear (in_stack);
for (i = 0; i < last_basic_block_for_fn (cfun); i++)
queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
- extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
+ extend_regions_p = param_max_sched_extend_regions_iters > 0;
if (extend_regions_p)
{
degree1 = XNEWVEC (int, last_basic_block_for_fn (cfun));
The algorithm in the DFS traversal may not mark B & D as part
of the loop (i.e. they will not have max_hdr set to A).
- We know they can not be loop latches (else they would have
+ We know they cannot be loop latches (else they would have
had max_hdr set since they'd have a backedge to a dominator
block). So we don't need them on the initial queue.
free (max_hdr);
free (degree);
free (stack);
- sbitmap_free (header);
- sbitmap_free (inner);
- sbitmap_free (in_queue);
- sbitmap_free (in_stack);
}
int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
- max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
+ max_iter = param_max_sched_extend_regions_iters;
max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
FOR_EACH_EDGE (out_edge, out_ei, in_edge->src->succs)
bitmap_set_bit (pot_split[bb], EDGE_TO_BIT (out_edge));
- prob[bb] += combine_probabilities (prob[pred_bb], in_edge->probability);
+ prob[bb] += combine_probabilities
+ (prob[pred_bb],
+ in_edge->probability.initialized_p ()
+ ? in_edge->probability.to_reg_br_prob_base ()
+ : 0);
// The rounding divide in combine_probabilities can result in an extra
// probability increment propagating along 50-50 edges. Eventually when
// the edges re-merge, the accumulated probability can go slightly above
static void
split_edges (int bb_src, int bb_trg, edgelst *bl)
{
- sbitmap src = sbitmap_alloc (SBITMAP_SIZE (pot_split[bb_src]));
+ auto_sbitmap src (SBITMAP_SIZE (pot_split[bb_src]));
bitmap_copy (src, pot_split[bb_src]);
bitmap_and_compl (src, src, pot_split[bb_trg]);
extract_edgelst (src, bl);
- sbitmap_free (src);
}
/* Find the valid candidate-source-blocks for the target block TRG, compute
edgelst el = { NULL, 0 };
int i, j, k, update_idx;
basic_block block;
- sbitmap visited;
edge_iterator ei;
edge e;
sp->is_speculative = 0;
sp->src_prob = REG_BR_PROB_BASE;
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
for (i = trg + 1; i < current_nr_blocks; i++)
{
sp->src_prob = 0;
}
}
-
- sbitmap_free (visited);
}
/* Free the computed target info. */
if (regno < FIRST_PSEUDO_REGISTER)
{
/* Check for hard registers. */
- int j = hard_regno_nregs[regno][GET_MODE (reg)];
+ int j = REG_NREGS (reg);
while (--j >= 0)
{
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
{
basic_block b = candidate_table[src].update_bbs.first_member[i];
-
- if (HARD_REGISTER_NUM_P (regno))
- bitmap_set_range (df_get_live_in (b), regno,
- hard_regno_nregs[regno][GET_MODE (reg)]);
- else
- bitmap_set_bit (df_get_live_in (b), regno);
+ bitmap_set_range (df_get_live_in (b), regno, REG_NREGS (reg));
}
}
}
ready-list or before the scheduling. */
static int
-check_live (rtx insn, int src)
+check_live (rtx_insn *insn, int src)
{
/* Find the registers set by instruction. */
if (GET_CODE (PATTERN (insn)) == SET
block src to trg. */
static void
-update_live (rtx insn, int src)
+update_live (rtx_insn *insn, int src)
{
/* Find the registers set by instruction. */
if (GET_CODE (PATTERN (insn)) == SET
branch depending on insn, that guards the speculative load. */
static int
-find_conditional_protection (rtx insn, int load_insn_bb)
+find_conditional_protection (rtx_insn *insn, int load_insn_bb)
{
sd_iterator_def sd_it;
dep_t dep;
/* Iterate through DEF-USE forward dependences. */
FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
{
- rtx next = DEP_CON (dep);
+ rtx_insn *next = DEP_CON (dep);
if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep)
{
- rtx insn1 = DEP_PRO (dep);
+ rtx_insn *insn1 = DEP_PRO (dep);
/* Must be a DEF-USE dependence upon non-branch. */
if (DEP_TYPE (dep) != REG_DEP_TRUE
FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
{
- rtx insn1 = DEP_PRO (back_dep);
+ rtx_insn *insn1 = DEP_PRO (back_dep);
if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
/* Found a DEF-USE dependence (insn1, load_insn). */
FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
{
- rtx insn2 = DEP_CON (fore_dep);
+ rtx_insn *insn2 = DEP_CON (fore_dep);
if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
{
and 0 otherwise. */
static int
-is_exception_free (rtx insn, int bb_src, int bb_trg)
+is_exception_free (rtx_insn *insn, int bb_src, int bb_trg)
{
int insn_class = haifa_classify_insn (insn);
if (is_pfree (insn, bb_src, bb_trg))
return 1;
/* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
+ /* FALLTHRU */
case PRISKY_CANDIDATE:
if (!flag_schedule_speculative_load_dangerous
|| is_prisky (insn, bb_src, bb_trg))
/* Implementations of the sched_info functions for region scheduling. */
static void init_ready_list (void);
-static int can_schedule_ready_p (rtx);
-static void begin_schedule_ready (rtx);
-static ds_t new_ready (rtx, ds_t);
+static int can_schedule_ready_p (rtx_insn *);
+static void begin_schedule_ready (rtx_insn *);
+static ds_t new_ready (rtx_insn *, ds_t);
static int schedule_more_p (void);
-static const char *rgn_print_insn (const_rtx, int);
-static int rgn_rank (rtx, rtx);
+static const char *rgn_print_insn (const rtx_insn *, int);
+static int rgn_rank (rtx_insn *, rtx_insn *);
static void compute_jump_reg_dependencies (rtx, regset);
/* Functions for speculative scheduling. */
-static void rgn_add_remove_insn (rtx, int);
+static void rgn_add_remove_insn (rtx_insn *, int);
static void rgn_add_block (basic_block, basic_block);
static void rgn_fix_recovery_cfg (int, int, int);
-static basic_block advance_target_bb (basic_block, rtx);
+static basic_block advance_target_bb (basic_block, rtx_insn *);
/* Return nonzero if there are more insns that should be scheduled. */
static void
init_ready_list (void)
{
- rtx prev_head = current_sched_info->prev_head;
- rtx next_tail = current_sched_info->next_tail;
+ rtx_insn *prev_head = current_sched_info->prev_head;
+ rtx_insn *next_tail = current_sched_info->next_tail;
int bb_src;
- rtx insn;
+ rtx_insn *insn;
target_n_insns = 0;
sched_target_n_insns = 0;
for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++)
if (IS_VALID (bb_src))
{
- rtx src_head;
- rtx src_next_tail;
- rtx tail, head;
+ rtx_insn *src_head;
+ rtx_insn *src_next_tail;
+ rtx_insn *tail, *head;
get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src),
&head, &tail);
insn can be scheduled, nonzero if we should silently discard it. */
static int
-can_schedule_ready_p (rtx insn)
+can_schedule_ready_p (rtx_insn *insn)
{
/* An interblock motion? */
- if (INSN_BB (insn) != target_bb
- && IS_SPECULATIVE_INSN (insn)
- && !check_live (insn, INSN_BB (insn)))
- return 0;
- else
- return 1;
+ if (INSN_BB (insn) != target_bb && IS_SPECULATIVE_INSN (insn))
+ {
+ /* Cannot schedule this insn unless all operands are live. */
+ if (!check_live (insn, INSN_BB (insn)))
+ return 0;
+
+ /* Should not move expensive instructions speculatively. */
+ if (GET_CODE (PATTERN (insn)) != CLOBBER
+ && !targetm.sched.can_speculate_insn (insn))
+ return 0;
+ }
+
+ return 1;
}
/* Updates counter and other information. Split from can_schedule_ready_p ()
can_schedule_ready_p () differs from the one passed to
begin_schedule_ready (). */
static void
-begin_schedule_ready (rtx insn)
+begin_schedule_ready (rtx_insn *insn)
{
/* An interblock motion? */
if (INSN_BB (insn) != target_bb)
Return nonzero if it should be moved to the ready list or the queue, or zero
if we should silently discard it. */
static ds_t
-new_ready (rtx next, ds_t ts)
+new_ready (rtx_insn *next, ds_t ts)
{
if (INSN_BB (next) != target_bb)
{
|| (IS_SPECULATIVE_INSN (next)
&& ((recog_memoized (next) >= 0
&& min_insn_conflict_delay (curr_state, next, next)
- > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
+ > param_max_sched_insn_conflict_delay)
|| IS_SPECULATION_CHECK_P (next)
|| !check_live (next, INSN_BB (next))
|| (not_ex_free = !is_exception_free (next, INSN_BB (next),
to be formatted so that multiple output lines will line up nicely. */
static const char *
-rgn_print_insn (const_rtx insn, int aligned)
+rgn_print_insn (const rtx_insn *insn, int aligned)
{
static char tmp[80];
is to be preferred. Zero if they are equally good. */
static int
-rgn_rank (rtx insn1, rtx insn2)
+rgn_rank (rtx_insn *insn1, rtx_insn *insn2)
{
/* Some comparison make sense in interblock scheduling only. */
if (INSN_BB (insn1) != INSN_BB (insn2))
calculations. */
int
-contributes_to_priority (rtx next, rtx insn)
+contributes_to_priority (rtx_insn *next, rtx_insn *insn)
{
/* NEXT and INSN reside in one ebb. */
return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn));
/* Return true if scheduling INSN will trigger finish of scheduling
current block. */
static bool
-rgn_insn_finishes_block_p (rtx insn)
+rgn_insn_finishes_block_p (rtx_insn *insn)
{
if (INSN_BB (insn) == target_bb
&& sched_target_n_insns + 1 == target_n_insns)
sets_likely_spilled (rtx pat)
{
bool ret = false;
- note_stores (pat, sets_likely_spilled_1, &ret);
+ note_pattern_stores (pat, sets_likely_spilled_1, &ret);
return ret;
}
/* Add dependences so that branches are scheduled to run last in their
block. */
static void
-add_branch_dependences (rtx head, rtx tail)
+add_branch_dependences (rtx_insn *head, rtx_insn *tail)
{
- rtx insn, last;
+ rtx_insn *insn, *last;
/* For all branches, calls, uses, clobbers, cc0 setters, and instructions
that can throw exceptions, force them to remain in order at the end of
&& (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER
|| can_throw_internal (insn)
-#ifdef HAVE_cc0
- || sets_cc0_p (PATTERN (insn))
-#endif
+ || (HAVE_cc0 && sets_cc0_p (PATTERN (insn)))
|| (!reload_completed
&& sets_likely_spilled (PATTERN (insn)))))
|| NOTE_P (insn)
while (insn != head && DEBUG_INSN_P (insn));
}
+ /* Selective scheduling handles control dependencies by itself, and
+ CANT_MOVE flags ensure that other insns will be kept in place. */
+ if (sel_sched_p ())
+ return;
+
/* Make sure these insns are scheduled last in their block. */
insn = last;
if (insn != 0)
the variables of its predecessors. When the analysis for a bb completes,
we save the contents to the corresponding bb_deps[bb] variable. */
-static struct deps_desc *bb_deps;
+static class deps_desc *bb_deps;
static void
-concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p,
- rtx *old_mems_p)
+concat_insn_mem_list (rtx_insn_list *copy_insns,
+ rtx_expr_list *copy_mems,
+ rtx_insn_list **old_insns_p,
+ rtx_expr_list **old_mems_p)
{
- rtx new_insns = *old_insns_p;
- rtx new_mems = *old_mems_p;
+ rtx_insn_list *new_insns = *old_insns_p;
+ rtx_expr_list *new_mems = *old_mems_p;
while (copy_insns)
{
- new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns);
- new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems);
- copy_insns = XEXP (copy_insns, 1);
- copy_mems = XEXP (copy_mems, 1);
+ new_insns = alloc_INSN_LIST (copy_insns->insn (), new_insns);
+ new_mems = alloc_EXPR_LIST (VOIDmode, copy_mems->element (), new_mems);
+ copy_insns = copy_insns->next ();
+ copy_mems = copy_mems->next ();
}
*old_insns_p = new_insns;
/* Join PRED_DEPS to the SUCC_DEPS. */
void
-deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
+deps_join (class deps_desc *succ_deps, class deps_desc *pred_deps)
{
unsigned reg;
reg_set_iterator rsi;
/* After computing the dependencies for block BB, propagate the dependencies
found in TMP_DEPS to the successors of the block. */
static void
-propagate_deps (int bb, struct deps_desc *pred_deps)
+propagate_deps (int bb, class deps_desc *pred_deps)
{
basic_block block = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb));
edge_iterator ei;
static void
compute_block_dependences (int bb)
{
- rtx head, tail;
- struct deps_desc tmp_deps;
+ rtx_insn *head, *tail;
+ class deps_desc tmp_deps;
tmp_deps = bb_deps[bb];
sched_analyze (&tmp_deps, head, tail);
- /* Selective scheduling handles control dependencies by itself. */
- if (!sel_sched_p ())
- add_branch_dependences (head, tail);
+ add_branch_dependences (head, tail);
if (current_nr_blocks > 1)
propagate_deps (bb, &tmp_deps);
static void
free_block_dependencies (int bb)
{
- rtx head;
- rtx tail;
+ rtx_insn *head;
+ rtx_insn *tail;
get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
for (bb = from_bb; bb < current_nr_blocks; bb++)
{
- rtx head, tail;
+ rtx_insn *head, *tail;
get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
/* Print dependencies information for instructions between HEAD and TAIL.
??? This function would probably fit best in haifa-sched.c. */
-void debug_dependencies (rtx head, rtx tail)
+void debug_dependencies (rtx_insn *head, rtx_insn *tail)
{
- rtx insn;
- rtx next_tail = NEXT_INSN (tail);
+ rtx_insn *insn;
+ rtx_insn *next_tail = NEXT_INSN (tail);
fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
"insn", "code", "bb", "dep", "prio", "cost",
: INSN_PRIORITY (insn))
: INSN_PRIORITY (insn)),
(sel_sched_p () ? (sched_emulate_haifa_p ? -1
- : insn_cost (insn))
- : insn_cost (insn)));
+ : insn_sched_cost (insn))
+ : insn_sched_cost (insn)));
if (recog_memoized (insn) < 0)
fprintf (sched_dump, "nothing");
fprintf (sched_dump, "\n");
}
+
+/* Dump dependency graph for the current region to a file using dot syntax. */
+
+void
+dump_rgn_dependencies_dot (FILE *file)
+{
+ rtx_insn *head, *tail, *con, *pro;
+ sd_iterator_def sd_it;
+ dep_t dep;
+ int bb;
+ pretty_printer pp;
+
+ pp.buffer->stream = file;
+ pp_printf (&pp, "digraph SchedDG {\n");
+
+ for (bb = 0; bb < current_nr_blocks; ++bb)
+ {
+ /* Begin subgraph (basic block). */
+ pp_printf (&pp, "subgraph cluster_block_%d {\n", bb);
+ pp_printf (&pp, "\t" "color=blue;" "\n");
+ pp_printf (&pp, "\t" "style=bold;" "\n");
+ pp_printf (&pp, "\t" "label=\"BB #%d\";\n", BB_TO_BLOCK (bb));
+
+ /* Setup head and tail (no support for EBBs). */
+ gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
+ tail = NEXT_INSN (tail);
+
+ /* Dump all insns. */
+ for (con = head; con != tail; con = NEXT_INSN (con))
+ {
+ if (!INSN_P (con))
+ continue;
+
+ /* Pretty print the insn. */
+ pp_printf (&pp, "\t%d [label=\"{", INSN_UID (con));
+ pp_write_text_to_stream (&pp);
+ print_insn (&pp, con, /*verbose=*/false);
+ pp_write_text_as_dot_label_to_stream (&pp, /*for_record=*/true);
+ pp_write_text_to_stream (&pp);
+
+ /* Dump instruction attributes. */
+ pp_printf (&pp, "|{ uid:%d | luid:%d | prio:%d }}\",shape=record]\n",
+ INSN_UID (con), INSN_LUID (con), INSN_PRIORITY (con));
+
+ /* Dump all deps. */
+ FOR_EACH_DEP (con, SD_LIST_BACK, sd_it, dep)
+ {
+ int weight = 0;
+ const char *color;
+ pro = DEP_PRO (dep);
+
+ switch (DEP_TYPE (dep))
+ {
+ case REG_DEP_TRUE:
+ color = "black";
+ weight = 1;
+ break;
+ case REG_DEP_OUTPUT:
+ case REG_DEP_ANTI:
+ color = "orange";
+ break;
+ case REG_DEP_CONTROL:
+ color = "blue";
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ pp_printf (&pp, "\t%d -> %d [color=%s",
+ INSN_UID (pro), INSN_UID (con), color);
+ if (int cost = dep_cost (dep))
+ pp_printf (&pp, ",label=%d", cost);
+ pp_printf (&pp, ",weight=%d", weight);
+ pp_printf (&pp, "];\n");
+ }
+ }
+ pp_printf (&pp, "}\n");
+ }
+
+ pp_printf (&pp, "}\n");
+ pp_flush (&pp);
+}
+
+/* Dump dependency graph for the current region to a file using dot syntax. */
+
+DEBUG_FUNCTION void
+dump_rgn_dependencies_dot (const char *fname)
+{
+ FILE *fp;
+
+ fp = fopen (fname, "w");
+ if (!fp)
+ {
+ perror ("fopen");
+ return;
+ }
+
+ dump_rgn_dependencies_dot (fp);
+ fclose (fp);
+}
+
\f
/* Returns true if all the basic blocks of the current region have
NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */
for (bb = 0; bb < current_nr_blocks; bb++)
{
- rtx head, tail;
+ rtx_insn *head, *tail;
gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
current_sched_info->sched_max_insns_priority = 0;
for (bb = 0; bb < current_nr_blocks; bb++)
{
- rtx head, tail;
+ rtx_insn *head, *tail;
gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
for (bb = 0; bb < current_nr_blocks; bb++)
{
basic_block first_bb, last_bb;
- rtx head, tail;
+ rtx_insn *head, *tail;
first_bb = EBB_FIRST_BB (bb);
last_bb = EBB_LAST_BB (bb);
for (bb = 0; bb < current_nr_blocks; bb++)
{
basic_block first_bb, last_bb, curr_bb;
- rtx head, tail;
+ rtx_insn *head, *tail;
first_bb = EBB_FIRST_BB (bb);
last_bb = EBB_LAST_BB (bb);
sched_rgn_n_insns += sched_n_insns;
realloc_bb_state_array (saved_last_basic_block);
f = find_fallthru_edge (last_bb->succs);
- if (f && f->probability * 100 / REG_BR_PROB_BASE >=
- PARAM_VALUE (PARAM_SCHED_STATE_EDGE_PROB_CUTOFF))
+ if (f
+ && (!f->probability.initialized_p ()
+ || (f->probability.to_reg_br_prob_base () * 100
+ / REG_BR_PROB_BASE
+ >= param_sched_state_edge_prob_cutoff)))
{
memcpy (bb_state[f->dest->index], curr_state,
dfa_state_size);
void
sched_rgn_init (bool single_blocks_p)
{
- min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
+ min_spec_prob = ((param_min_spec_prob * REG_BR_PROB_BASE)
/ 100);
nr_inter = 0;
free_dominance_info (CDI_DOMINATORS);
}
- gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks_for_fn (cfun));
+ gcc_assert (nr_regions > 0 && nr_regions <= n_basic_blocks_for_fn (cfun));
- RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) +
- RGN_NR_BLOCKS (nr_regions - 1));
+ RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1)
+ + RGN_NR_BLOCKS (nr_regions - 1));
nr_regions_initial = nr_regions;
}
init_deps_global ();
/* Initializations for region data dependence analysis. */
- bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
+ bb_deps = XNEWVEC (class deps_desc, current_nr_blocks);
for (bb = 0; bb < current_nr_blocks; bb++)
init_deps (bb_deps + bb, false);
haifa_sched_init ();
sched_rgn_init (reload_completed);
- bitmap_initialize (¬_in_df, 0);
- bitmap_clear (¬_in_df);
+ bitmap_initialize (¬_in_df, &bitmap_default_obstack);
/* Schedule every region in the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
/* Clean up. */
sched_rgn_finish ();
- bitmap_clear (¬_in_df);
+ bitmap_release (¬_in_df);
haifa_sched_finish ();
}
/* INSN has been added to/removed from current region. */
static void
-rgn_add_remove_insn (rtx insn, int remove_p)
+rgn_add_remove_insn (rtx_insn *insn, int remove_p)
{
if (!remove_p)
rgn_n_insns++;
/* Return next block in ebb chain. For parameter meaning please refer to
sched-int.h: struct sched_info: advance_target_bb. */
static basic_block
-advance_target_bb (basic_block bb, rtx insn)
+advance_target_bb (basic_block bb, rtx_insn *insn)
{
if (insn)
return 0;
return 0;
}
+static unsigned int
+rest_of_handle_sched_fusion (void)
+{
+#ifdef INSN_SCHEDULING
+ sched_fusion = true;
+ schedule_insns ();
+ sched_fusion = false;
+#endif
+ return 0;
+}
+
namespace {
const pass_data pass_data_live_range_shrinkage =
RTL_PASS, /* type */
"lr_shrinkage", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_execute */
TV_LIVE_RANGE_SHRINKAGE, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- ( TODO_df_finish | TODO_verify_rtl_sharing
- | TODO_verify_flow ), /* todo_flags_finish */
+ TODO_df_finish, /* todo_flags_finish */
};
class pass_live_range_shrinkage : public rtl_opt_pass
RTL_PASS, /* type */
"sched1", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_execute */
TV_SCHED, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- ( TODO_df_finish | TODO_verify_rtl_sharing
- | TODO_verify_flow ), /* todo_flags_finish */
+ TODO_df_finish, /* todo_flags_finish */
};
class pass_sched : public rtl_opt_pass
RTL_PASS, /* type */
"sched2", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_execute */
TV_SCHED2, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- ( TODO_df_finish | TODO_verify_rtl_sharing
- | TODO_verify_flow ), /* todo_flags_finish */
+ TODO_df_finish, /* todo_flags_finish */
};
class pass_sched2 : public rtl_opt_pass
{
return new pass_sched2 (ctxt);
}
+
+namespace {
+
+const pass_data pass_data_sched_fusion =
+{
+ RTL_PASS, /* type */
+ "sched_fusion", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_SCHED_FUSION, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_df_finish, /* todo_flags_finish */
+};
+
+class pass_sched_fusion : public rtl_opt_pass
+{
+public:
+ pass_sched_fusion (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_sched_fusion, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *);
+ virtual unsigned int execute (function *)
+ {
+ return rest_of_handle_sched_fusion ();
+ }
+
+}; // class pass_sched2
+
+bool
+pass_sched_fusion::gate (function *)
+{
+#ifdef INSN_SCHEDULING
+ /* Scheduling fusion relies on peephole2 to do real fusion work,
+ so only enable it if peephole2 is in effect. */
+ return (optimize > 0 && flag_peephole2
+ && flag_schedule_fusion && targetm.sched.fusion_priority != NULL);
+#else
+ return 0;
+#endif
+}
+
+} // anon namespace
+
+rtl_opt_pass *
+make_pass_sched_fusion (gcc::context *ctxt)
+{
+ return new pass_sched_fusion (ctxt);
+}
+
+#if __GNUC__ >= 10
+# pragma GCC diagnostic pop
+#endif