/* Instruction scheduling pass.
- Copyright (C) 1992-2018 Free Software Foundation, Inc.
+ Copyright (C) 1992-2020 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "profile.h"
#include "insn-attr.h"
#include "except.h"
-#include "params.h"
#include "cfganal.h"
#include "sched-int.h"
#include "sel-sched.h"
#include "pretty-print.h"
#include "print-rtl.h"
+/* Disable warnings about quoting issues in the pp_xxx calls below
+ that (intentionally) don't follow GCC diagnostic conventions. */
+#if __GNUC__ >= 10
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wformat-diag"
+#endif
+
#ifdef INSN_SCHEDULING
/* Some accessor macros for h_i_d members only used within this file. */
static void schedule_region (int);
static void concat_insn_mem_list (rtx_insn_list *, rtx_expr_list *,
rtx_insn_list **, rtx_expr_list **);
-static void propagate_deps (int, struct deps_desc *);
+static void propagate_deps (int, class deps_desc *);
static void free_pending_lists (void);
/* Functions for construction of the control flow graph. */
if (ebbs_p) {
int probability_cutoff;
if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
- probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
+ probability_cutoff = param_tracer_min_branch_probability_feedback;
else
- probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
+ probability_cutoff = param_tracer_min_branch_probability;
probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
FOR_EACH_BB_FN (ebb_start, cfun)
(*num_insns) += (common_sched_info->estimate_number_of_insns
(BASIC_BLOCK_FOR_FN (cfun, block)));
- return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
- || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
+ return ((*num_bbs > param_max_sched_region_blocks)
+ || (*num_insns > param_max_sched_region_insns));
}
/* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
- extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
+ extend_regions_p = param_max_sched_extend_regions_iters > 0;
if (extend_regions_p)
{
degree1 = XNEWVEC (int, last_basic_block_for_fn (cfun));
The algorithm in the DFS traversal may not mark B & D as part
of the loop (i.e. they will not have max_hdr set to A).
- We know they can not be loop latches (else they would have
+ We know they cannot be loop latches (else they would have
had max_hdr set since they'd have a backedge to a dominator
block). So we don't need them on the initial queue.
int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
- max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
+ max_iter = param_max_sched_extend_regions_iters;
max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
|| (IS_SPECULATIVE_INSN (next)
&& ((recog_memoized (next) >= 0
&& min_insn_conflict_delay (curr_state, next, next)
- > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
+ > param_max_sched_insn_conflict_delay)
|| IS_SPECULATION_CHECK_P (next)
|| !check_live (next, INSN_BB (next))
|| (not_ex_free = !is_exception_free (next, INSN_BB (next),
sets_likely_spilled (rtx pat)
{
bool ret = false;
- note_stores (pat, sets_likely_spilled_1, &ret);
+ note_pattern_stores (pat, sets_likely_spilled_1, &ret);
return ret;
}
while (insn != head && DEBUG_INSN_P (insn));
}
+ /* Selective scheduling handles control dependencies by itself, and
+ CANT_MOVE flags ensure that other insns will be kept in place. */
+ if (sel_sched_p ())
+ return;
+
/* Make sure these insns are scheduled last in their block. */
insn = last;
if (insn != 0)
the variables of its predecessors. When the analysis for a bb completes,
we save the contents to the corresponding bb_deps[bb] variable. */
-static struct deps_desc *bb_deps;
+static class deps_desc *bb_deps;
static void
concat_insn_mem_list (rtx_insn_list *copy_insns,
/* Join PRED_DEPS to the SUCC_DEPS. */
void
-deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
+deps_join (class deps_desc *succ_deps, class deps_desc *pred_deps)
{
unsigned reg;
reg_set_iterator rsi;
/* After computing the dependencies for block BB, propagate the dependencies
found in TMP_DEPS to the successors of the block. */
static void
-propagate_deps (int bb, struct deps_desc *pred_deps)
+propagate_deps (int bb, class deps_desc *pred_deps)
{
basic_block block = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb));
edge_iterator ei;
compute_block_dependences (int bb)
{
rtx_insn *head, *tail;
- struct deps_desc tmp_deps;
+ class deps_desc tmp_deps;
tmp_deps = bb_deps[bb];
sched_analyze (&tmp_deps, head, tail);
- /* Selective scheduling handles control dependencies by itself. */
- if (!sel_sched_p ())
- add_branch_dependences (head, tail);
+ add_branch_dependences (head, tail);
if (current_nr_blocks > 1)
propagate_deps (bb, &tmp_deps);
f = find_fallthru_edge (last_bb->succs);
if (f
&& (!f->probability.initialized_p ()
- || f->probability.to_reg_br_prob_base () * 100 / REG_BR_PROB_BASE >=
- PARAM_VALUE (PARAM_SCHED_STATE_EDGE_PROB_CUTOFF)))
+ || (f->probability.to_reg_br_prob_base () * 100
+ / REG_BR_PROB_BASE
+ >= param_sched_state_edge_prob_cutoff)))
{
memcpy (bb_state[f->dest->index], curr_state,
dfa_state_size);
void
sched_rgn_init (bool single_blocks_p)
{
- min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
+ min_spec_prob = ((param_min_spec_prob * REG_BR_PROB_BASE)
/ 100);
nr_inter = 0;
init_deps_global ();
/* Initializations for region data dependence analysis. */
- bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
+ bb_deps = XNEWVEC (class deps_desc, current_nr_blocks);
for (bb = 0; bb < current_nr_blocks; bb++)
init_deps (bb_deps + bb, false);
haifa_sched_init ();
sched_rgn_init (reload_completed);
- bitmap_initialize (¬_in_df, 0);
- bitmap_clear (¬_in_df);
+ bitmap_initialize (¬_in_df, &bitmap_default_obstack);
/* Schedule every region in the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
/* Clean up. */
sched_rgn_finish ();
- bitmap_clear (¬_in_df);
+ bitmap_release (¬_in_df);
haifa_sched_finish ();
}
{
return new pass_sched_fusion (ctxt);
}
+
+#if __GNUC__ >= 10
+# pragma GCC diagnostic pop
+#endif