+ inline_summary (e->callee)->estimated_self_stack_size;
if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
e->callee->global.inlined_to->global.estimated_stack_size = peak;
+ cgraph_propagate_frequency (e->callee);
/* Recursively clone all bodies. */
for (e = e->callee->callees; e; e = e->next_callee)
struct cgraph_node *to = NULL, *what;
struct cgraph_edge *curr = e;
int freq;
- bool duplicate = false;
- int orig_size = e->callee->global.size;
gcc_assert (e->inline_failed);
e->inline_failed = CIF_OK;
DECL_POSSIBLY_INLINED (e->callee->decl) = true;
e->callee->global.inlined = true;
- if (e->callee->callers->next_caller
- || !cgraph_can_remove_if_no_direct_calls_p (e->callee)
- || e->callee->same_comdat_group)
- duplicate = true;
cgraph_clone_inlined_nodes (e, true, update_original);
what = e->callee;
gcc_assert (what->global.inlined_to == to);
if (new_size > old_size)
overall_size += new_size - old_size;
- if (!duplicate)
- overall_size -= orig_size;
ncalls_inlined++;
if (flag_indirect_inlining)
of the function or function body size. */
static int
-cgraph_edge_badness (struct cgraph_edge *edge)
+cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
{
gcov_type badness;
int growth =
- cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
+ (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
+ - edge->caller->global.size);
- growth -= edge->caller->global.size;
+ if (dump)
+ {
+ fprintf (dump_file, " Badness calculcation for %s -> %s\n",
+ cgraph_node_name (edge->caller),
+ cgraph_node_name (edge->callee));
+ fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
+ growth,
+ edge->callee->global.time,
+ inline_summary (edge->callee)->time_inlining_benefit,
+ edge->callee->global.size,
+ inline_summary (edge->callee)->size_inlining_benefit);
+ }
/* Always prefer inlining saving code size. */
if (growth <= 0)
- badness = INT_MIN - growth;
+ {
+ badness = INT_MIN - growth;
+ if (dump)
+ fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
+ growth);
+ }
/* When profiling is available, base priorities -(#calls / growth).
So we optimize for overall number of "executed" inlined calls. */
else if (max_count)
- badness = ((int)((double)edge->count * INT_MIN / max_count / (max_benefit + 1))
- * (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
+ {
+ badness =
+ ((int)
+ ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
+ (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
+ if (dump)
+ {
+ fprintf (dump_file,
+ " %i (relative %f): profile info. Relative count %f"
+ " * Relative benefit %f\n",
+ (int) badness, (double) badness / INT_MIN,
+ (double) edge->count / max_count,
+ (double) (inline_summary (edge->callee)->
+ time_inlining_benefit + 1) / (max_benefit + 1));
+ }
+ }
/* When function local profile is available, base priorities on
growth / frequency, so we optimize for overall frequency of inlined
else if (flag_guess_branch_prob)
{
int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
+ int benefitperc;
+ int growth_for_all;
badness = growth * 10000;
- div *= MIN (100 * inline_summary (edge->callee)->time_inlining_benefit
- / (edge->callee->global.time + 1) + 1, 100);
+ benefitperc =
+ MIN (100 * inline_summary (edge->callee)->time_inlining_benefit /
+ (edge->callee->global.time + 1) +1, 100);
+ div *= benefitperc;
/* Decrease badness if call is nested. */
div = 1;
if (badness > 0)
badness /= div;
- badness += cgraph_estimate_growth (edge->callee);
+ growth_for_all = cgraph_estimate_growth (edge->callee);
+ badness += growth_for_all;
if (badness > INT_MAX)
- badness = INT_MAX;
+ badness = INT_MAX;
+ if (dump)
+ {
+ fprintf (dump_file,
+ " %i: guessed profile. frequency %i, overall growth %i,"
+ " benefit %i%%, divisor %i\n",
+ (int) badness, edge->frequency, growth_for_all, benefitperc, div);
+ }
}
/* When function local profile is not available or it does not give
useful information (ie frequency is zero), base the cost on
if (badness > 0)
badness >>= nest;
else
- {
+ {
badness <<= nest;
- }
+ }
+ if (dump)
+ fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
+ nest);
}
+
+ /* Ensure that we did not overflow in all the fixed point math above. */
+ gcc_assert (badness >= INT_MIN);
+ gcc_assert (badness <= INT_MAX - 1);
/* Make recursive inlining happen always after other inlining is done. */
if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
return badness + 1;
for (edge = node->callers; edge; edge = edge->next_caller)
if (edge->inline_failed)
{
- int badness = cgraph_edge_badness (edge);
+ int badness = cgraph_edge_badness (edge, false);
if (edge->aux)
{
fibnode_t n = (fibnode_t) edge->aux;
continue;
/* fibheap_replace_key only increase the keys. */
- if (fibheap_replace_key (heap, n, badness))
- continue;
+ if (badness < n->key)
+ {
+ fibheap_replace_key (heap, n, badness);
+ gcc_assert (n->key == badness);
+ continue;
+ }
fibheap_delete_node (heap, (fibnode_t) edge->aux);
}
edge->aux = fibheap_insert (heap, badness, edge);
struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
gcc_assert (!edge->aux);
- edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
+ edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
}
}
if (edge->inline_failed)
{
gcc_assert (!edge->aux);
- edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
+ edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
}
}
min_size = overall_size;
while (overall_size <= max_size
- && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
+ && !fibheap_empty (heap))
{
int old_size = overall_size;
- struct cgraph_node *where;
- int growth =
- cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
+ struct cgraph_node *where, *callee;
+ int badness = fibheap_min_key (heap);
+ int growth;
cgraph_inline_failed_t not_good = CIF_OK;
- growth -= edge->caller->global.size;
+ edge = (struct cgraph_edge *) fibheap_extract_min (heap);
+ gcc_assert (edge->aux);
+ edge->aux = NULL;
+ if (!edge->inline_failed)
+ continue;
+#ifdef ENABLE_CHECKING
+ gcc_assert (cgraph_edge_badness (edge, false) == badness);
+#endif
+ callee = edge->callee;
+
+ growth = (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
+ - edge->caller->global.size);
if (dump_file)
{
" Estimated growth after inlined into all callees is %+i insns.\n"
" Estimated badness is %i, frequency %.2f.\n",
cgraph_node_name (edge->caller),
- gimple_filename ((const_gimple) edge->call_stmt),
- gimple_lineno ((const_gimple) edge->call_stmt),
+ flag_wpa ? "unknown"
+ : gimple_filename ((const_gimple) edge->call_stmt),
+ flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
cgraph_estimate_growth (edge->callee),
- cgraph_edge_badness (edge),
+ badness,
edge->frequency / (double)CGRAPH_FREQ_BASE);
if (edge->count)
fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
+ if (dump_flags & TDF_DETAILS)
+ cgraph_edge_badness (edge, true);
}
- gcc_assert (edge->aux);
- edge->aux = NULL;
- if (!edge->inline_failed)
- continue;
/* When not having profile info ready we don't weight by any way the
position of call in procedure itself. This means if call of
called by function we inlined (since number of it inlinable callers
might change). */
update_caller_keys (heap, where, updated_nodes);
+
+ /* We removed one call of the function we just inlined. If offline
+ copy is still needed, be sure to update the keys. */
+ if (callee != where && !callee->global.inlined_to)
+ update_caller_keys (heap, callee, updated_nodes);
bitmap_clear (updated_nodes);
if (dump_file)
fprintf (dump_file, "New minimal size reached: %i\n", min_size);
}
}
- while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
+ while (!fibheap_empty (heap))
{
+ int badness = fibheap_min_key (heap);
+
+ edge = (struct cgraph_edge *) fibheap_extract_min (heap);
gcc_assert (edge->aux);
edge->aux = NULL;
+ if (!edge->inline_failed)
+ continue;
+#ifdef ENABLE_CHECKING
+ gcc_assert (cgraph_edge_badness (edge, false) == badness);
+#endif
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ "\nSkipping %s with %i size\n",
+ cgraph_node_name (edge->callee),
+ edge->callee->global.size);
+ fprintf (dump_file,
+ " called by %s in %s:%i\n"
+ " Estimated growth after inlined into all callees is %+i insns.\n"
+ " Estimated badness is %i, frequency %.2f.\n",
+ cgraph_node_name (edge->caller),
+ flag_wpa ? "unknown"
+ : gimple_filename ((const_gimple) edge->call_stmt),
+ flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
+ cgraph_estimate_growth (edge->callee),
+ badness,
+ edge->frequency / (double)CGRAPH_FREQ_BASE);
+ if (edge->count)
+ fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
+ if (dump_flags & TDF_DETAILS)
+ cgraph_edge_badness (edge, true);
+ }
if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
&& !cgraph_recursive_inlining_p (edge->caller, edge->callee,
&edge->inline_failed))
cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
if (in_lto_p && flag_indirect_inlining)
ipa_update_after_lto_read ();
+ if (flag_indirect_inlining)
+ ipa_create_all_structures_for_iinln ();
max_count = 0;
max_benefit = 0;
if (cgraph_check_inline_limits (node->callers->caller, node,
&reason, false))
{
+ struct cgraph_node *caller = node->callers->caller;
cgraph_mark_inline (node->callers);
if (dump_file)
fprintf (dump_file,
" Inlined into %s which now has %i size"
" for a net change of %+i size.\n",
- cgraph_node_name (node->callers->caller),
- node->callers->caller->global.size,
+ cgraph_node_name (caller),
+ caller->global.size,
overall_size - old_size);
}
else
/* Free ipa-prop structures if they are no longer needed. */
if (flag_indirect_inlining)
- free_all_ipa_structures_after_iinln ();
+ ipa_free_all_structures_after_iinln ();
if (dump_file)
fprintf (dump_file,
}
else
{
+ if (lookup_attribute ("flatten",
+ DECL_ATTRIBUTES (node->decl)) != NULL)
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ "Flattening %s\n", cgraph_node_name (node));
+ cgraph_flatten (node);
+ timevar_push (TV_INTEGRATION);
+ todo |= optimize_inline_calls (current_function_decl);
+ timevar_pop (TV_INTEGRATION);
+ }
/* We iterate incremental inlining to get trivial cases of indirect
inlining. */
while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
active, we don't need to write them twice. */
static void
-inline_write_summary (cgraph_node_set set)
+inline_write_summary (cgraph_node_set set,
+ varpool_node_set vset ATTRIBUTE_UNUSED)
{
if (flag_indirect_inlining && !flag_ipa_cp)
ipa_prop_write_jump_functions (set);
inline_generate_summary, /* generate_summary */
inline_write_summary, /* write_summary */
inline_read_summary, /* read_summary */
- NULL, /* function_read_summary */
- lto_ipa_fixup_call_notes, /* stmt_fixup */
+ NULL, /* write_optimization_summary */
+ NULL, /* read_optimization_summary */
+ NULL, /* stmt_fixup */
0, /* TODOs */
inline_transform, /* function_transform */
NULL, /* variable_transform */