+2019-08-01 Eric Botcazou <ebotcazou@adacore.com>
+
+ * cgraph.h (cgraph_edge::maybe_hot_p): Tweak comment.
+ * cgraph.c (cgraph_edge::maybe_hot_p): Likewise. Remove useless test.
+ * predict.c (maybe_hot_count_p): Likewise.
+ (maybe_hot_bb_p): Tweak comment.
+ (maybe_hot_edge_p): Likewise.
+ (probably_never_executed): Likewise. Minor tweak.
+ (probably_never_executed_bb_p): Likewise.
+ (unlikely_executed_edge_p): Likewise.
+ (probably_never_executed_edge_p): Likewise.
+ (optimize_function_for_size_p): Likewise.
+ (optimize_function_for_speed_p): Likewise.
+ (function_optimization_type): Likewise.
+ (optimize_bb_for_size_p): Likewise.
+ (optimize_bb_for_speed_p): Likewise.
+ (bb_optimization_type): Likewise.
+ (optimize_edge_for_size_p): Likewise.
+ (optimize_edge_for_speed_p): Likewise.
+ (optimize_insn_for_size_p): Likewise.
+ (optimize_insn_for_speed_p): Likewise.
+ (optimize_loop_for_size_p): Likewise.
+ (optimize_loop_for_speed_p): Likewise.
+ (optimize_loop_nest_for_speed_p): Likewise.
+ (optimize_loop_nest_for_size_p): Likewise.
+ (predictable_edge_p): Likewise.
+ (handle_missing_profiles): Minor tweak.
+
2019-08-01 Michael Meissner <meissner@linux.ibm.com>
* config/rs6000/predicates.md (pcrel_external_address): Update
min_count = min;
}
-/* Return TRUE if frequency FREQ is considered to be hot. */
+/* Return TRUE if COUNT is considered to be hot in function FUN. */
bool
maybe_hot_count_p (struct function *fun, profile_count count)
if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
&& count < (ENTRY_BLOCK_PTR_FOR_FN (fun)->count.apply_scale (2, 3)))
return false;
- if (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION) == 0)
- return false;
if (count.apply_scale (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION), 1)
< ENTRY_BLOCK_PTR_FOR_FN (fun)->count)
return false;
return (count.to_gcov_type () >= get_hot_bb_threshold ());
}
-/* Return true in case BB can be CPU intensive and should be optimized
- for maximal performance. */
+/* Return true if basic block BB of function FUN can be CPU intensive
+ and should thus be optimized for maximum performance. */
bool
maybe_hot_bb_p (struct function *fun, const_basic_block bb)
return maybe_hot_count_p (fun, bb->count);
}
-/* Return true in case BB can be CPU intensive and should be optimized
- for maximal performance. */
+/* Return true if edge E can be CPU intensive and should thus be optimized
+ for maximum performance. */
bool
maybe_hot_edge_p (edge e)
return maybe_hot_count_p (cfun, e->count ());
}
-/* Return true if profile COUNT and FREQUENCY, or function FUN static
- node frequency reflects never being executed. */
+/* Return true if COUNT is considered to be never executed in function FUN
+ or if function FUN is considered so in the static profile. */
static bool
-probably_never_executed (struct function *fun,
- profile_count count)
+probably_never_executed (struct function *fun, profile_count count)
{
gcc_checking_assert (fun);
if (count.ipa () == profile_count::zero ())
desirable. */
if (count.precise_p () && profile_status_for_fn (fun) == PROFILE_READ)
{
- int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
- if (count.apply_scale (unlikely_count_fraction, 1) >= profile_info->runs)
+ const int unlikely_frac = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
+ if (count.apply_scale (unlikely_frac, 1) >= profile_info->runs)
return false;
return true;
}
return false;
}
-
-/* Return true in case BB is probably never executed. */
+/* Return true if basic block BB of function FUN is probably never executed. */
bool
probably_never_executed_bb_p (struct function *fun, const_basic_block bb)
return probably_never_executed (fun, bb->count);
}
-
-/* Return true if E is unlikely executed for obvious reasons. */
+/* Return true if edge E is unlikely executed for obvious reasons. */
static bool
unlikely_executed_edge_p (edge e)
|| (e->flags & (EDGE_EH | EDGE_FAKE));
}
-/* Return true in case edge E is probably never executed. */
+/* Return true if edge E of function FUN is probably never executed. */
bool
probably_never_executed_edge_p (struct function *fun, edge e)
return probably_never_executed (fun, e->count ());
}
-/* Return true when current function should always be optimized for size. */
+/* Return true if function FUN should always be optimized for size. */
bool
optimize_function_for_size_p (struct function *fun)
return n && n->optimize_for_size_p ();
}
-/* Return true when current function should always be optimized for speed. */
+/* Return true if function FUN should always be optimized for speed. */
bool
optimize_function_for_speed_p (struct function *fun)
return !optimize_function_for_size_p (fun);
}
-/* Return the optimization type that should be used for the function FUN. */
+/* Return the optimization type that should be used for function FUN. */
optimization_type
function_optimization_type (struct function *fun)
: OPTIMIZE_FOR_SIZE);
}
-/* Return TRUE when BB should be optimized for size. */
+/* Return TRUE if basic block BB should be optimized for size. */
bool
optimize_bb_for_size_p (const_basic_block bb)
|| (bb && !maybe_hot_bb_p (cfun, bb)));
}
-/* Return TRUE when BB should be optimized for speed. */
+/* Return TRUE if basic block BB should be optimized for speed. */
bool
optimize_bb_for_speed_p (const_basic_block bb)
return !optimize_bb_for_size_p (bb);
}
-/* Return the optimization type that should be used for block BB. */
+/* Return the optimization type that should be used for basic block BB. */
optimization_type
bb_optimization_type (const_basic_block bb)
: OPTIMIZE_FOR_SIZE);
}
-/* Return TRUE when BB should be optimized for size. */
+/* Return TRUE if edge E should be optimized for size. */
bool
optimize_edge_for_size_p (edge e)
return optimize_function_for_size_p (cfun) || !maybe_hot_edge_p (e);
}
-/* Return TRUE when BB should be optimized for speed. */
+/* Return TRUE if edge E should be optimized for speed. */
bool
optimize_edge_for_speed_p (edge e)
return !optimize_edge_for_size_p (e);
}
-/* Return TRUE when BB should be optimized for size. */
+/* Return TRUE if the current function is optimized for size. */
bool
optimize_insn_for_size_p (void)
return optimize_function_for_size_p (cfun) || !crtl->maybe_hot_insn_p;
}
-/* Return TRUE when BB should be optimized for speed. */
+/* Return TRUE if the current function is optimized for speed. */
bool
optimize_insn_for_speed_p (void)
return !optimize_insn_for_size_p ();
}
-/* Return TRUE when LOOP should be optimized for size. */
+/* Return TRUE if LOOP should be optimized for size. */
bool
optimize_loop_for_size_p (class loop *loop)
return optimize_bb_for_size_p (loop->header);
}
-/* Return TRUE when LOOP should be optimized for speed. */
+/* Return TRUE if LOOP should be optimized for speed. */
bool
optimize_loop_for_speed_p (class loop *loop)
return optimize_bb_for_speed_p (loop->header);
}
-/* Return TRUE when LOOP nest should be optimized for speed. */
+/* Return TRUE if nest rooted at LOOP should be optimized for speed. */
bool
optimize_loop_nest_for_speed_p (class loop *loop)
return false;
}
-/* Return TRUE when LOOP nest should be optimized for size. */
+/* Return TRUE if nest rooted at LOOP should be optimized for size. */
bool
optimize_loop_nest_for_size_p (class loop *loop)
return !optimize_loop_nest_for_speed_p (loop);
}
-/* Return true when edge E is likely to be well predictable by branch
+/* Return true if edge E is likely to be well predictable by branch
predictor. */
bool
void
handle_missing_profiles (void)
{
+ const int unlikely_frac = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
struct cgraph_node *node;
- int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
auto_vec<struct cgraph_node *, 64> worklist;
/* See if 0 count function has non-0 count callers. In this case we
if (call_count > 0
&& fn && fn->cfg
- && (call_count.apply_scale (unlikely_count_fraction, 1)
- >= profile_info->runs))
+ && call_count.apply_scale (unlikely_frac, 1) >= profile_info->runs)
{
drop_profile (node, call_count);
worklist.safe_push (node);