/* Natural loop analysis code for GNU compiler.
- Copyright (C) 2002-2016 Free Software Foundation, Inc.
+ Copyright (C) 2002-2019 Free Software Foundation, Inc.
This file is part of GCC.
#include "rtl.h"
#include "tree.h"
#include "predict.h"
+#include "memmodel.h"
#include "emit-rtl.h"
#include "cfgloop.h"
#include "explow.h"
#include "expr.h"
#include "graphds.h"
#include "params.h"
+#include "sreal.h"
+#include "regs.h"
+#include "function-abi.h"
struct target_cfgloop default_target_cfgloop;
#if SWITCHABLE_TARGET
/* Checks whether BB is executed exactly once in each LOOP iteration. */
bool
-just_once_each_iteration_p (const struct loop *loop, const_basic_block bb)
+just_once_each_iteration_p (const class loop *loop, const_basic_block bb)
{
/* It must be executed at least once each iteration. */
if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
unsigned depth;
struct graph *g;
int num = number_of_loops (cfun);
- struct loop *cloop;
+ class loop *cloop;
bool irred_loop_found = false;
int i;
/* Counts number of insns inside LOOP. */
int
-num_loop_insns (const struct loop *loop)
+num_loop_insns (const class loop *loop)
{
basic_block *bbs, bb;
unsigned i, ninsns = 0;
/* Counts number of insns executed on average per iteration LOOP. */
int
-average_num_loop_insns (const struct loop *loop)
+average_num_loop_insns (const class loop *loop)
{
basic_block *bbs, bb;
- unsigned i, binsns, ninsns, ratio;
+ unsigned i, binsns;
+ sreal ninsns;
rtx_insn *insn;
ninsns = 0;
if (NONDEBUG_INSN_P (insn))
binsns++;
- ratio = loop->header->frequency == 0
- ? BB_FREQ_MAX
- : (bb->frequency * BB_FREQ_MAX) / loop->header->frequency;
- ninsns += binsns * ratio;
+ ninsns += (sreal)binsns * bb->count.to_sreal_scale (loop->header->count);
+ /* Avoid overflows. */
+ if (ninsns > 1000000)
+ return 100000;
}
free (bbs);
- ninsns /= BB_FREQ_MAX;
- if (!ninsns)
- ninsns = 1; /* To avoid division by zero. */
+ int64_t ret = ninsns.to_int ();
+ if (!ret)
+ ret = 1; /* To avoid division by zero. */
- return ninsns;
+ return ret;
}
/* Returns expected number of iterations of LOOP, according to
- measured or guessed profile. No bounding is done on the
- value. */
+ measured or guessed profile.
+
+ This functions attempts to return "sane" value even if profile
+ information is not good enough to derive osmething.
+ If BY_PROFILE_ONLY is set, this logic is bypassed and function
+ return -1 in those scenarios. */
gcov_type
-expected_loop_iterations_unbounded (struct loop *loop)
+expected_loop_iterations_unbounded (const class loop *loop,
+ bool *read_profile_p,
+ bool by_profile_only)
{
edge e;
edge_iterator ei;
- gcov_type expected;
+ gcov_type expected = -1;
+ if (read_profile_p)
+ *read_profile_p = false;
- /* Average loop rolls about 3 times. If we have no profile at all, it is
- best we can do. */
+ /* If we have no profile at all, use AVG_LOOP_NITER. */
if (profile_status_for_fn (cfun) == PROFILE_ABSENT)
- expected = 3;
- else if (loop->latch->count || loop->header->count)
{
- gcov_type count_in, count_latch;
-
- count_in = 0;
- count_latch = 0;
-
- FOR_EACH_EDGE (e, ei, loop->header->preds)
- if (e->src == loop->latch)
- count_latch = e->count;
- else
- count_in += e->count;
-
- if (count_in == 0)
- expected = count_latch * 2;
- else
- expected = (count_latch + count_in - 1) / count_in;
+ if (by_profile_only)
+ return -1;
+ expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
}
- else
+ else if (loop->latch && (loop->latch->count.initialized_p ()
+ || loop->header->count.initialized_p ()))
{
- int freq_in, freq_latch;
-
- freq_in = 0;
- freq_latch = 0;
+ profile_count count_in = profile_count::zero (),
+ count_latch = profile_count::zero ();
FOR_EACH_EDGE (e, ei, loop->header->preds)
if (e->src == loop->latch)
- freq_latch = EDGE_FREQUENCY (e);
+ count_latch = e->count ();
else
- freq_in += EDGE_FREQUENCY (e);
+ count_in += e->count ();
- if (freq_in == 0)
+ if (!count_latch.initialized_p ())
{
- /* If we have no profile at all, expect 3 iterations. */
- if (!freq_latch)
- expected = 3;
- else
- expected = freq_latch * 2;
+ if (by_profile_only)
+ return -1;
+ expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
+ }
+ else if (!count_in.nonzero_p ())
+ {
+ if (by_profile_only)
+ return -1;
+ expected = count_latch.to_gcov_type () * 2;
}
else
- expected = (freq_latch + freq_in - 1) / freq_in;
+ {
+ expected = (count_latch.to_gcov_type () + count_in.to_gcov_type ()
+ - 1) / count_in.to_gcov_type ();
+ if (read_profile_p
+ && count_latch.reliable_p () && count_in.reliable_p ())
+ *read_profile_p = true;
+ }
+ }
+ else
+ {
+ if (by_profile_only)
+ return -1;
+ expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
}
- HOST_WIDE_INT max = get_max_loop_iterations_int (loop);
- if (max != -1 && max < expected)
- return max;
+ if (!by_profile_only)
+ {
+ HOST_WIDE_INT max = get_max_loop_iterations_int (loop);
+ if (max != -1 && max < expected)
+ return max;
+ }
+
return expected;
}
by REG_BR_PROB_BASE. */
unsigned
-expected_loop_iterations (struct loop *loop)
+expected_loop_iterations (class loop *loop)
{
gcov_type expected = expected_loop_iterations_unbounded (loop);
return (expected > REG_BR_PROB_BASE ? REG_BR_PROB_BASE : expected);
/* Returns the maximum level of nesting of subloops of LOOP. */
unsigned
-get_loop_level (const struct loop *loop)
+get_loop_level (const class loop *loop)
{
- const struct loop *ploop;
+ const class loop *ploop;
unsigned mx = 0, l;
for (ploop = loop->inner; ploop; ploop = ploop->next)
&& !fixed_regs[i])
{
target_avail_regs++;
- if (call_used_regs[i])
+ /* ??? This is only a rough heuristic. It doesn't cope well
+ with alternative ABIs, but that's an optimization rather than
+ correctness issue. */
+ if (default_function_abi.clobbers_full_reg_p (i))
target_clobbered_regs++;
}
to noreturn call. */
edge
-single_likely_exit (struct loop *loop)
+single_likely_exit (class loop *loop)
{
edge found = single_exit (loop);
vec<edge> exits;
exits = get_loop_exit_edges (loop);
FOR_EACH_VEC_ELT (exits, i, ex)
{
- if (ex->flags & (EDGE_EH | EDGE_ABNORMAL_CALL))
- continue;
- /* The constant of 5 is set in a way so noreturn calls are
- ruled out by this test. The static branch prediction algorithm
- will not assign such a low probability to conditionals for usual
- reasons. */
- if (profile_status_for_fn (cfun) != PROFILE_ABSENT
- && ex->probability < 5 && !ex->count)
+ if (probably_never_executed_edge_p (cfun, ex)
+ /* We want to rule out paths to noreturns but not low probabilities
+ resulting from adjustments or combining.
+ FIXME: once we have better quality tracking, make this more
+ robust. */
+ || ex->probability <= profile_probability::very_unlikely ())
continue;
if (!found)
found = ex;
header != latch, latch is the 1-st block. */
vec<basic_block>
-get_loop_hot_path (const struct loop *loop)
+get_loop_hot_path (const class loop *loop)
{
basic_block bb = loop->header;
vec<basic_block> path = vNULL;