+2018-06-08 Martin Liska <mliska@suse.cz>
+
+ * config/i386/i386.c (ix86_can_inline_p): Use get_create instead
+ of get.
+ * hsa-common.c (hsa_summary_t::link_functions): Likewise.
+ (hsa_register_kernel): Likewise.
+ * hsa-common.h (hsa_gpu_implementation_p): Likewise.
+ * hsa-gen.c (hsa_get_host_function): Likewise.
+ (get_brig_function_name): Likewise.
+ (generate_hsa): Likewise.
+ (pass_gen_hsail::execute): Likewise.
+ * ipa-cp.c (ipcp_cloning_candidate_p): Likewise.
+ (devirtualization_time_bonus): Likewise.
+ (ipcp_propagate_stage): Likewise.
+ * ipa-fnsummary.c (redirect_to_unreachable): Likewise.
+ (edge_set_predicate): Likewise.
+ (evaluate_conditions_for_known_args): Likewise.
+ (evaluate_properties_for_edge): Likewise.
+ (ipa_fn_summary::reset): Likewise.
+ (ipa_fn_summary_t::duplicate): Likewise.
+ (dump_ipa_call_summary): Likewise.
+ (ipa_dump_fn_summary): Likewise.
+ (analyze_function_body): Likewise.
+ (compute_fn_summary): Likewise.
+ (estimate_edge_devirt_benefit): Likewise.
+ (estimate_edge_size_and_time): Likewise.
+ (estimate_calls_size_and_time): Likewise.
+ (estimate_node_size_and_time): Likewise.
+ (inline_update_callee_summaries): Likewise.
+ (remap_edge_change_prob): Likewise.
+ (remap_edge_summaries): Likewise.
+ (ipa_merge_fn_summary_after_inlining): Likewise.
+ (ipa_update_overall_fn_summary): Likewise.
+ (read_ipa_call_summary): Likewise.
+ (inline_read_section): Likewise.
+ (write_ipa_call_summary): Likewise.
+ (ipa_fn_summary_write): Likewise.
+ (ipa_free_fn_summary): Likewise.
+ * ipa-hsa.c (process_hsa_functions): Likewise.
+ (ipa_hsa_write_summary): Likewise.
+ (ipa_hsa_read_section): Likewise.
+ * ipa-icf.c (sem_function::merge): Likewise.
+ * ipa-inline-analysis.c (simple_edge_hints): Likewise.
+ (do_estimate_edge_time): Likewise.
+ (estimate_size_after_inlining): Likewise.
+ (estimate_growth): Likewise.
+ (growth_likely_positive): Likewise.
+ * ipa-inline-transform.c (clone_inlined_nodes): Likewise.
+ (inline_call): Likewise.
+ * ipa-inline.c (caller_growth_limits): Likewise.
+ (can_inline_edge_p): Likewise.
+ (can_inline_edge_by_limits_p): Likewise.
+ (compute_uninlined_call_time): Likewise.
+ (compute_inlined_call_time): Likewise.
+ (want_inline_small_function_p): Likewise.
+ (edge_badness): Likewise.
+ (update_caller_keys): Likewise.
+ (update_callee_keys): Likewise.
+ (recursive_inlining): Likewise.
+ (inline_small_functions): Likewise.
+ (inline_to_all_callers_1): Likewise.
+ (dump_overall_stats): Likewise.
+ (early_inline_small_functions): Likewise.
+ (early_inliner): Likewise.
+ * ipa-inline.h (estimate_edge_growth): Likewise.
+ * ipa-profile.c (ipa_propagate_frequency_1): Likewise.
+ * ipa-prop.c (ipa_make_edge_direct_to_target): Likewise.
+ * ipa-prop.h (IPA_NODE_REF): Likewise.
+ (IPA_EDGE_REF): Likewise.
+ * ipa-pure-const.c (malloc_candidate_p): Likewise.
+ (propagate_malloc): Likewise.
+ * ipa-split.c (execute_split_functions): Likewise.
+ * symbol-summary.h: Rename get to get_create.
+ (get): Likewise.
+ (get_create): Likewise.
+ * tree-sra.c (ipa_sra_preliminary_function_checks): Likewise.
+
2018-06-08 Martin Liska <mliska@suse.cz>
* symbol-summary.h (release): Move definition out of class
for multi-versioning call optimization, so beware of
ipa_fn_summaries not available. */
&& (! ipa_fn_summaries
- || ipa_fn_summaries->get
+ || ipa_fn_summaries->get_create
(cgraph_node::get (callee))->fp_expressions))
ret = false;
hsa_summary_t::link_functions (cgraph_node *gpu, cgraph_node *host,
hsa_function_kind kind, bool gridified_kernel_p)
{
- hsa_function_summary *gpu_summary = get (gpu);
- hsa_function_summary *host_summary = get (host);
+ hsa_function_summary *gpu_summary = get_create (gpu);
+ hsa_function_summary *host_summary = get_create (host);
gpu_summary->m_kind = kind;
host_summary->m_kind = kind;
{
if (hsa_summaries == NULL)
hsa_summaries = new hsa_summary_t (symtab);
- hsa_function_summary *s = hsa_summaries->get (host);
+ hsa_function_summary *s = hsa_summaries->get_create (host);
s->m_kind = HSA_KERNEL;
}
if (hsa_summaries == NULL)
return false;
- hsa_function_summary *s = hsa_summaries->get (cgraph_node::get_create (decl));
+ hsa_function_summary *s
+ = hsa_summaries->get_create (cgraph_node::get_create (decl));
return s->m_gpu_implementation_p;
}
hsa_get_host_function (tree decl)
{
hsa_function_summary *s
- = hsa_summaries->get (cgraph_node::get_create (decl));
+ = hsa_summaries->get_create (cgraph_node::get_create (decl));
gcc_assert (s->m_kind != HSA_NONE);
gcc_assert (s->m_gpu_implementation_p);
{
tree d = decl;
- hsa_function_summary *s = hsa_summaries->get (cgraph_node::get_create (d));
+ hsa_function_summary *s
+ = hsa_summaries->get_create (cgraph_node::get_create (d));
if (s->m_kind != HSA_NONE
&& s->m_gpu_implementation_p
&& s->m_bound_function)
if (hsa_cfun->m_kern_p)
{
hsa_function_summary *s
- = hsa_summaries->get (cgraph_node::get (hsa_cfun->m_decl));
+ = hsa_summaries->get_create (cgraph_node::get (hsa_cfun->m_decl));
hsa_add_kern_decl_mapping (current_function_decl, hsa_cfun->m_name,
hsa_cfun->m_maximum_omp_data_size,
s->m_gridified_kernel_p);
unsigned int
pass_gen_hsail::execute (function *)
{
- hsa_function_summary *s
- = hsa_summaries->get (cgraph_node::get_create (current_function_decl));
+ cgraph_node *node = cgraph_node::get_create (current_function_decl);
+ hsa_function_summary *s = hsa_summaries->get_create (node);
expand_builtins ();
generate_hsa (s->m_kind == HSA_KERNEL);
init_caller_stats (&stats);
node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats, false);
- if (ipa_fn_summaries->get (node)->self_size < stats.n_calls)
+ if (ipa_fn_summaries->get_create (node)->self_size < stats.n_calls)
{
if (dump_file)
fprintf (dump_file, "Considering %s for cloning; code might shrink.\n",
callee = callee->function_symbol (&avail);
if (avail < AVAIL_AVAILABLE)
continue;
- isummary = ipa_fn_summaries->get (callee);
+ isummary = ipa_fn_summaries->get_create (callee);
if (!isummary->inlinable)
continue;
initialize_node_lattices (node);
}
if (node->definition && !node->alias)
- overall_size += ipa_fn_summaries->get (node)->self_size;
+ overall_size += ipa_fn_summaries->get_create (node)->self_size;
max_count = max_count.max (node->count.ipa ());
}
e->make_direct (target);
else
e->redirect_callee (target);
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
e->inline_failed = CIF_UNREACHABLE;
e->count = profile_count::zero ();
es->call_stmt_size = 0;
&& (!e->speculative || e->callee))
e = redirect_to_unreachable (e);
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
if (predicate && *predicate != true)
{
if (!es->predicate)
{
clause_t clause = inline_p ? 0 : 1 << predicate::not_inlined_condition;
clause_t nonspec_clause = 1 << predicate::not_inlined_condition;
- struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
+ struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
int i;
struct condition *c;
vec<ipa_agg_jump_function_p> *known_aggs_ptr)
{
struct cgraph_node *callee = e->callee->ultimate_alias_target ();
- struct ipa_fn_summary *info = ipa_fn_summaries->get (callee);
+ struct ipa_fn_summary *info = ipa_fn_summaries->get_create (callee);
vec<tree> known_vals = vNULL;
vec<ipa_agg_jump_function_p> known_aggs = vNULL;
{
struct ipa_node_params *caller_parms_info, *callee_pi;
struct ipa_edge_args *args = IPA_EDGE_REF (e);
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
int i, count = ipa_get_cs_argument_count (args);
if (e->caller->global.inlined_to)
vec_free (conds);
vec_free (size_time_table);
for (e = node->callees; e; e = e->next_callee)
- ipa_call_summaries->get (e)->reset ();
+ ipa_call_summaries->get_create (e)->reset ();
for (e = node->indirect_calls; e; e = e->next_callee)
- ipa_call_summaries->get (e)->reset ();
+ ipa_call_summaries->get_create (e)->reset ();
fp_expressions = false;
}
ipa_fn_summary *,
ipa_fn_summary *info)
{
- memcpy (info, ipa_fn_summaries->get (src), sizeof (ipa_fn_summary));
+ memcpy (info, ipa_fn_summaries->get_create (src), sizeof (ipa_fn_summary));
/* TODO: as an optimization, we may avoid copying conditions
that are known to be false or true. */
info->conds = vec_safe_copy (info->conds);
for (edge = dst->callees; edge; edge = next)
{
predicate new_predicate;
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
next = edge->next_callee;
if (!edge->inline_failed)
for (edge = dst->indirect_calls; edge; edge = next)
{
predicate new_predicate;
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
next = edge->next_callee;
gcc_checking_assert (edge->inline_failed);
struct cgraph_edge *edge;
for (edge = node->callees; edge; edge = edge->next_callee)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
int i;
? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
indent, "", es->loop_depth, edge->sreal_frequency ().to_double (),
es->call_stmt_size, es->call_stmt_time,
- (int) ipa_fn_summaries->get (callee)->size / ipa_fn_summary::size_scale,
- (int) ipa_fn_summaries->get (callee)->estimated_stack_size);
+ (int) (ipa_fn_summaries->get_create (callee)->size
+ / ipa_fn_summary::size_scale),
+ (int) ipa_fn_summaries->get_create (callee)->estimated_stack_size);
if (es->predicate)
{
}
if (!edge->inline_failed)
{
+ ipa_fn_summary *s = ipa_fn_summaries->get_create (callee);
fprintf (f, "%*sStack frame offset %i, callee self size %i,"
" callee size %i\n",
indent + 2, "",
- (int) ipa_fn_summaries->get (callee)->stack_frame_offset,
- (int) ipa_fn_summaries->get (callee)->estimated_self_stack_size,
- (int) ipa_fn_summaries->get (callee)->estimated_stack_size);
+ (int) s->stack_frame_offset,
+ (int) s->estimated_self_stack_size,
+ (int) s->estimated_stack_size);
dump_ipa_call_summary (f, indent + 2, callee, info);
}
}
for (edge = node->indirect_calls; edge; edge = edge->next_callee)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
fprintf (f, "%*sindirect call loop depth:%2i freq:%4.2f size:%2i"
" time: %2i",
indent, "",
{
if (node->definition)
{
- struct ipa_fn_summary *s = ipa_fn_summaries->get (node);
+ struct ipa_fn_summary *s = ipa_fn_summaries->get_create (node);
size_time_entry *e;
int i;
fprintf (f, "IPA function summary for %s/%i", node->name (),
basic_block bb;
struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
sreal freq;
- struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
+ struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
predicate bb_predicate;
struct ipa_func_body_info fbi;
vec<predicate> nonconstant_names = vNULL;
&& !gimple_call_internal_p (stmt))
{
struct cgraph_edge *edge = node->get_edge (stmt);
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ ipa_call_summary *es = ipa_call_summaries->get_create (edge);
/* Special case: results of BUILT_IN_CONSTANT_P will be always
resolved as constant. We however don't want to optimize
}
}
}
- set_hint_predicate (&ipa_fn_summaries->get (node)->array_index, array_index);
+ set_hint_predicate (&ipa_fn_summaries->get_create (node)->array_index,
+ array_index);
free (order);
if (nonconstant_names.exists () && !early)
}
free (body);
}
- set_hint_predicate (&ipa_fn_summaries->get (node)->loop_iterations,
+ set_hint_predicate (&ipa_fn_summaries->get_create (node)->loop_iterations,
loop_iterations);
- set_hint_predicate (&ipa_fn_summaries->get (node)->loop_stride,
+ set_hint_predicate (&ipa_fn_summaries->get_create (node)->loop_stride,
loop_stride);
scev_finalize ();
}
e->aux = NULL;
}
}
- ipa_fn_summaries->get (node)->time = time;
- ipa_fn_summaries->get (node)->self_size = size;
+ ipa_fn_summaries->get_create (node)->time = time;
+ ipa_fn_summaries->get_create (node)->self_size = size;
nonconstant_names.release ();
ipa_release_body_info (&fbi);
if (opt_for_fn (node->decl, optimize))
if (!ipa_fn_summaries)
ipa_fn_summary_alloc ();
- info = ipa_fn_summaries->get (node);
+ info = ipa_fn_summaries->get_create (node);
info->reset (node);
/* Estimate the stack size for the function if we're optimizing. */
if (node->thunk.thunk_p)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (node->callees);
+ ipa_call_summary *es = ipa_call_summaries->get_create (node->callees);
predicate t = true;
node->local.can_change_signature = false;
callee = callee->function_symbol (&avail);
if (avail < AVAIL_AVAILABLE)
return false;
- isummary = ipa_fn_summaries->get (callee);
+ isummary = ipa_fn_summaries->get_create (callee);
return isummary->inlinable;
}
vec<ipa_agg_jump_function_p> known_aggs,
ipa_hints *hints)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
int call_size = es->call_stmt_size;
int call_time = es->call_stmt_time;
int cur_size;
struct cgraph_edge *e;
for (e = node->callees; e; e = e->next_callee)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
/* Do not care about zero sized builtins. */
if (e->inline_failed && !es->call_stmt_size)
}
for (e = node->indirect_calls; e; e = e->next_callee)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
if (!es->predicate
|| es->predicate->evaluate (possible_truths))
estimate_edge_size_and_time (e, size,
vec<inline_param_summary>
inline_param_summary)
{
- struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
+ struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
size_time_entry *e;
int size = 0;
sreal time = 0;
inline_update_callee_summaries (struct cgraph_node *node, int depth)
{
struct cgraph_edge *e;
- struct ipa_fn_summary *callee_info = ipa_fn_summaries->get (node);
- struct ipa_fn_summary *caller_info = ipa_fn_summaries->get (node->callers->caller);
+ ipa_fn_summary *callee_info = ipa_fn_summaries->get_create (node);
+ ipa_fn_summary *caller_info
+ = ipa_fn_summaries->get_create (node->callers->caller);
HOST_WIDE_INT peak;
callee_info->stack_frame_offset
+ caller_info->estimated_self_stack_size;
peak = callee_info->stack_frame_offset
+ callee_info->estimated_self_stack_size;
- if (ipa_fn_summaries->get (node->global.inlined_to)->estimated_stack_size < peak)
- ipa_fn_summaries->get (node->global.inlined_to)->estimated_stack_size = peak;
+
+ ipa_fn_summary *s = ipa_fn_summaries->get_create (node->global.inlined_to);
+ if (s->estimated_stack_size < peak)
+ s->estimated_stack_size = peak;
ipa_propagate_frequency (node);
for (e = node->callees; e; e = e->next_callee)
{
if (!e->inline_failed)
inline_update_callee_summaries (e->callee, depth);
- ipa_call_summaries->get (e)->loop_depth += depth;
+ ipa_call_summaries->get_create (e)->loop_depth += depth;
}
for (e = node->indirect_calls; e; e = e->next_callee)
- ipa_call_summaries->get (e)->loop_depth += depth;
+ ipa_call_summaries->get_create (e)->loop_depth += depth;
}
/* Update change_prob of EDGE after INLINED_EDGE has been inlined.
{
int i;
struct ipa_edge_args *args = IPA_EDGE_REF (edge);
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
struct ipa_call_summary *inlined_es
- = ipa_call_summaries->get (inlined_edge);
+ = ipa_call_summaries->get_create (inlined_edge);
for (i = 0; i < ipa_get_cs_argument_count (args); i++)
{
struct cgraph_edge *e, *next;
for (e = node->callees; e; e = next)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
predicate p;
next = e->next_callee;
}
for (e = node->indirect_calls; e; e = next)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
predicate p;
next = e->next_callee;
void
ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge)
{
- struct ipa_fn_summary *callee_info = ipa_fn_summaries->get (edge->callee);
+ ipa_fn_summary *callee_info = ipa_fn_summaries->get_create (edge->callee);
struct cgraph_node *to = (edge->caller->global.inlined_to
? edge->caller->global.inlined_to : edge->caller);
- struct ipa_fn_summary *info = ipa_fn_summaries->get (to);
+ struct ipa_fn_summary *info = ipa_fn_summaries->get_create (to);
clause_t clause = 0; /* not_inline is known to be false. */
size_time_entry *e;
vec<int> operand_map = vNULL;
int i;
predicate toplev_predicate;
predicate true_p = true;
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
if (es->predicate)
toplev_predicate = *es->predicate;
&callee_info->array_index,
operand_map, offset_map, clause, &toplev_predicate);
- inline_update_callee_summaries (edge->callee,
- ipa_call_summaries->get (edge)->loop_depth);
+ ipa_call_summary *s = ipa_call_summaries->get_create (edge);
+ inline_update_callee_summaries (edge->callee, s->loop_depth);
/* We do not maintain predicates of inlined edges, free it. */
edge_set_predicate (edge, &true_p);
void
ipa_update_overall_fn_summary (struct cgraph_node *node)
{
- struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
+ struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
size_time_entry *e;
int i;
static void
read_ipa_call_summary (struct lto_input_block *ib, struct cgraph_edge *e)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
predicate p;
int length, i;
encoder = file_data->symtab_node_encoder;
node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
index));
- info = ipa_fn_summaries->get (node);
+ info = ipa_fn_summaries->get_create (node);
info->estimated_stack_size
= info->estimated_self_stack_size = streamer_read_uhwi (&ib);
static void
write_ipa_call_summary (struct output_block *ob, struct cgraph_edge *e)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
int i;
streamer_write_uhwi (ob, es->call_stmt_size);
cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
if (cnode && cnode->definition && !cnode->alias)
{
- struct ipa_fn_summary *info = ipa_fn_summaries->get (cnode);
+ struct ipa_fn_summary *info = ipa_fn_summaries->get_create (cnode);
struct bitpack_d bp;
struct cgraph_edge *edge;
int i;
return;
FOR_EACH_DEFINED_FUNCTION (node)
if (!node->alias)
- ipa_fn_summaries->get (node)->reset (node);
+ ipa_fn_summaries->get_create (node)->reset (node);
ipa_fn_summaries->release ();
ipa_fn_summaries = NULL;
ipa_call_summaries->release ();
FOR_EACH_DEFINED_FUNCTION (node)
{
- hsa_function_summary *s = hsa_summaries->get (node);
+ hsa_function_summary *s = hsa_summaries->get_create (node);
/* A linked function is skipped. */
if (s->m_bound_function != NULL)
while (e)
{
- hsa_function_summary *src = hsa_summaries->get (node);
+ hsa_function_summary *src = hsa_summaries->get_create (node);
if (src->m_kind != HSA_NONE && src->m_gpu_implementation_p)
{
- hsa_function_summary *dst = hsa_summaries->get (e->callee);
+ hsa_function_summary *dst = hsa_summaries->get_create (e->callee);
if (dst->m_kind != HSA_NONE && !dst->m_gpu_implementation_p)
{
e->redirect_callee (dst->m_bound_function);
lsei_next_function_in_partition (&lsei))
{
node = lsei_cgraph_node (lsei);
- hsa_function_summary *s = hsa_summaries->get (node);
+ hsa_function_summary *s = hsa_summaries->get_create (node);
if (s->m_kind != HSA_NONE)
count++;
lsei_next_function_in_partition (&lsei))
{
node = lsei_cgraph_node (lsei);
- hsa_function_summary *s = hsa_summaries->get (node);
+ hsa_function_summary *s = hsa_summaries->get_create (node);
if (s->m_kind != HSA_NONE)
{
node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
index));
gcc_assert (node->definition);
- hsa_function_summary *s = hsa_summaries->get (node);
+ hsa_function_summary *s = hsa_summaries->get_create (node);
struct bitpack_d bp = streamer_read_bitpack (&ib_main);
s->m_kind = (hsa_function_kind) bp_unpack_value (&bp, 2);
"can not create wrapper of stdarg function.\n");
}
else if (ipa_fn_summaries
- && ipa_fn_summaries->get (alias)->self_size <= 2)
+ && ipa_fn_summaries->get_create (alias)->self_size <= 2)
{
if (dump_file)
fprintf (dump_file, "Wrapper creation is not "
struct cgraph_node *to = (edge->caller->global.inlined_to
? edge->caller->global.inlined_to : edge->caller);
struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
- if (ipa_fn_summaries->get (to)->scc_no
- && ipa_fn_summaries->get (to)->scc_no
- == ipa_fn_summaries->get (callee)->scc_no
+ if (ipa_fn_summaries->get_create (to)->scc_no
+ && ipa_fn_summaries->get_create (to)->scc_no
+ == ipa_fn_summaries->get_create (callee)->scc_no
&& !edge->recursive_p ())
hints |= INLINE_HINT_same_scc;
vec<tree> known_vals;
vec<ipa_polymorphic_call_context> known_contexts;
vec<ipa_agg_jump_function_p> known_aggs;
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
int min_size;
callee = edge->callee->ultimate_alias_target ();
/* When caching, update the cache entry. */
if (edge_growth_cache.exists ())
{
- ipa_fn_summaries->get (edge->callee)->min_size = min_size;
+ ipa_fn_summaries->get_create (edge->callee)->min_size = min_size;
if ((int) edge_growth_cache.length () <= edge->uid)
edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
edge_growth_cache[edge->uid].time = time;
estimate_size_after_inlining (struct cgraph_node *node,
struct cgraph_edge *edge)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
+ ipa_fn_summary *s = ipa_fn_summaries->get_create (node);
if (!es->predicate || *es->predicate != false)
{
- int size = ipa_fn_summaries->get (node)->size + estimate_edge_growth (edge);
+ int size = s->size + estimate_edge_growth (edge);
gcc_assert (size >= 0);
return size;
}
- return ipa_fn_summaries->get (node)->size;
+ return s->size;
}
estimate_growth (struct cgraph_node *node)
{
struct growth_data d = { node, false, false, 0 };
- struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
+ struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
|| node->address_taken)
return true;
- max_callers = ipa_fn_summaries->get (node)->size * 4 / edge_growth + 2;
+ max_callers = ipa_fn_summaries->get_create (node)->size * 4 / edge_growth + 2;
for (e = node->callers; e; e = e->next_caller)
{
{
gcc_assert (!e->callee->alias);
if (overall_size)
- *overall_size -= ipa_fn_summaries->get (e->callee)->size;
+ *overall_size -= ipa_fn_summaries->get_create (e->callee)->size;
nfunctions_inlined++;
}
duplicate = false;
reload_optimization_node = true;
}
- ipa_fn_summary *caller_info = ipa_fn_summaries->get (to);
- ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
+ ipa_fn_summary *caller_info = ipa_fn_summaries->get_create (to);
+ ipa_fn_summary *callee_info = ipa_fn_summaries->get_create (callee);
if (!caller_info->fp_expressions && callee_info->fp_expressions)
{
caller_info->fp_expressions = true;
gcc_assert (curr->callee->global.inlined_to == to);
- old_size = ipa_fn_summaries->get (to)->size;
+ old_size = ipa_fn_summaries->get_create (to)->size;
ipa_merge_fn_summary_after_inlining (e);
if (e->in_polymorphic_cdtor)
mark_all_inlined_calls_cdtor (e->callee);
work for further inlining into this function. Before inlining
the function we inlined to again we expect the caller to update
the overall summary. */
- ipa_fn_summaries->get (to)->size += estimated_growth;
- new_size = ipa_fn_summaries->get (to)->size;
+ ipa_fn_summaries->get_create (to)->size += estimated_growth;
+ new_size = ipa_fn_summaries->get_create (to)->size;
if (callee->calls_comdat_local)
to->calls_comdat_local = true;
int newsize;
int limit = 0;
HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
- ipa_fn_summary *info, *what_info, *outer_info = ipa_fn_summaries->get (to);
+ ipa_fn_summary *info, *what_info;
+ ipa_fn_summary *outer_info = ipa_fn_summaries->get_create (to);
/* Look for function e->caller is inlined to. While doing
so work out the largest function body on the way. As
too much in order to prevent compiler from exploding". */
while (true)
{
- info = ipa_fn_summaries->get (to);
+ info = ipa_fn_summaries->get_create (to);
if (limit < info->self_size)
limit = info->self_size;
if (stack_size_limit < info->estimated_self_stack_size)
break;
}
- what_info = ipa_fn_summaries->get (what);
+ what_info = ipa_fn_summaries->get_create (what);
if (limit < what_info->self_size)
limit = what_info->self_size;
e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
inlinable = false;
}
- else if (!ipa_fn_summaries->get (callee)->inlinable)
+ else if (!ipa_fn_summaries->get_create (callee)->inlinable)
{
e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
inlinable = false;
(DECL_DISREGARD_INLINE_LIMITS (callee->decl)
&& lookup_attribute ("always_inline",
DECL_ATTRIBUTES (callee->decl)));
- ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller);
- ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
+ ipa_fn_summary *caller_info = ipa_fn_summaries->get_create (caller);
+ ipa_fn_summary *callee_info = ipa_fn_summaries->get_create (callee);
/* Until GCC 4.9 we did not check the semantics alterning flags
bellow and inline across optimization boundry.
> opt_for_fn (caller->decl, optimize)))
{
if (estimate_edge_time (e)
- >= 20 + ipa_call_summaries->get (e)->call_stmt_time)
+ >= 20 + ipa_call_summaries->get_create (e)->call_stmt_time)
{
e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
inlinable = false;
else
uninlined_call_time = uninlined_call_time >> 11;
- sreal caller_time = ipa_fn_summaries->get (caller)->time;
+ sreal caller_time = ipa_fn_summaries->get_create (caller)->time;
return uninlined_call_time + caller_time;
}
cgraph_node *caller = (edge->caller->global.inlined_to
? edge->caller->global.inlined_to
: edge->caller);
- sreal caller_time = ipa_fn_summaries->get (caller)->time;
+ sreal caller_time = ipa_fn_summaries->get_create (caller)->time;
sreal freq = edge->sreal_frequency ();
if (freq > 0)
/* This calculation should match one in ipa-inline-analysis.c
(estimate_edge_size_and_time). */
- time -= (sreal)ipa_call_summaries->get (edge)->call_stmt_time * freq;
+ time -= (sreal)ipa_call_summaries->get_create (edge)->call_stmt_time * freq;
time += caller_time;
if (time <= 0)
time = ((sreal) 1) >> 8;
MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
else if ((!DECL_DECLARED_INLINE_P (callee->decl)
&& (!e->count.ipa ().initialized_p () || !e->maybe_hot_p ()))
- && ipa_fn_summaries->get (callee)->min_size
- - ipa_call_summaries->get (e)->call_stmt_size
+ && ipa_fn_summaries->get_create (callee)->min_size
+ - ipa_call_summaries->get_create (e)->call_stmt_size
> MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
{
e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
}
else if ((DECL_DECLARED_INLINE_P (callee->decl)
|| e->count.ipa ().nonzero_p ())
- && ipa_fn_summaries->get (callee)->min_size
- - ipa_call_summaries->get (e)->call_stmt_size
+ && ipa_fn_summaries->get_create (callee)->min_size
+ - ipa_call_summaries->get_create (e)->call_stmt_size
> 16 * MAX_INLINE_INSNS_SINGLE)
{
e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
int growth;
sreal edge_time, unspec_edge_time;
struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
- struct ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
+ struct ipa_fn_summary *callee_info = ipa_fn_summaries->get_create (callee);
ipa_hints hints;
cgraph_node *caller = (edge->caller->global.inlined_to
? edge->caller->global.inlined_to
&& (!DECL_DECLARED_INLINE_P (edge->callee->decl)
|| DECL_DECLARED_INLINE_P (caller->decl)))))
{
- struct ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller);
+ ipa_fn_summary *caller_info = ipa_fn_summaries->get_create (caller);
int caller_growth = caller_info->growth;
/* Only apply the penalty when caller looks like inline candidate,
of functions fully inlined in program. */
else
{
- int nest = MIN (ipa_call_summaries->get (edge)->loop_depth, 8);
+ int nest = MIN (ipa_call_summaries->get_create (edge)->loop_depth, 8);
badness = growth;
/* Decrease badness if call is nested. */
struct cgraph_edge *edge;
struct ipa_ref *ref;
- if ((!node->alias && !ipa_fn_summaries->get (node)->inlinable)
+ if ((!node->alias && !ipa_fn_summaries->get_create (node)->inlinable)
|| node->global.inlined_to)
return;
if (!bitmap_set_bit (updated_nodes, node->uid))
don't need updating. */
if (e->inline_failed
&& (callee = e->callee->ultimate_alias_target (&avail, e->caller))
- && ipa_fn_summaries->get (callee)->inlinable
+ && ipa_fn_summaries->get_create (callee)->inlinable
&& avail >= AVAIL_AVAILABLE
&& !bitmap_bit_p (updated_nodes, callee->uid))
{
fprintf (dump_file,
"\n Inlined %i times, "
"body grown from size %i to %i, time %f to %f\n", n,
- ipa_fn_summaries->get (master_clone)->size,
- ipa_fn_summaries->get (node)->size,
- ipa_fn_summaries->get (master_clone)->time.to_double (),
- ipa_fn_summaries->get (node)->time.to_double ());
+ ipa_fn_summaries->get_create (master_clone)->size,
+ ipa_fn_summaries->get_create (node)->size,
+ ipa_fn_summaries->get_create (master_clone)->time.to_double (),
+ ipa_fn_summaries->get_create (node)->time.to_double ());
/* Remove master clone we used for inlining. We rely that clones inlined
into master clone gets queued just before master clone so we don't
&& (node->has_gimple_body_p () || node->thunk.thunk_p)
&& opt_for_fn (node->decl, optimize))
{
- struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
+ struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
/* Do not account external functions, they will be optimized out
n2 = ((struct ipa_dfs_info *) n2->aux)->next_cycle)
if (opt_for_fn (n2->decl, optimize))
{
- struct ipa_fn_summary *info2 = ipa_fn_summaries->get (n2);
+ ipa_fn_summary *info2 = ipa_fn_summaries->get_create (n2);
if (info2->scc_no)
break;
info2->scc_no = id;
fprintf (dump_file,
"\nConsidering %s with %i size\n",
callee->dump_name (),
- ipa_fn_summaries->get (callee)->size);
+ ipa_fn_summaries->get_create (callee)->size);
fprintf (dump_file,
" to be inlined into %s in %s:%i\n"
" Estimated badness is %f, frequency %.2f.\n",
if (dump_file)
{
+ ipa_fn_summary *s = ipa_fn_summaries->get_create (edge->caller);
fprintf (dump_file,
" Inlined %s into %s which now has time %f and size %i, "
"net change of %+i.\n",
xstrdup_for_dump (edge->callee->name ()),
xstrdup_for_dump (edge->caller->name ()),
- ipa_fn_summaries->get (edge->caller)->time.to_double (),
- ipa_fn_summaries->get (edge->caller)->size,
+ s->time.to_double (),
+ s->size,
overall_size - old_size);
}
if (min_size > overall_size)
fprintf (dump_file,
"\nInlining %s size %i.\n",
node->name (),
- ipa_fn_summaries->get (node)->size);
+ ipa_fn_summaries->get_create (node)->size);
fprintf (dump_file,
" Called once from %s %i insns.\n",
node->callers->caller->name (),
- ipa_fn_summaries->get (node->callers->caller)->size);
+ ipa_fn_summaries->get_create (node->callers->caller)->size);
}
/* Remember which callers we inlined to, delaying updating the
fprintf (dump_file,
" Inlined into %s which now has %i size\n",
caller->name (),
- ipa_fn_summaries->get (caller)->size);
+ ipa_fn_summaries->get_create (caller)->size);
if (!(*num_calls)--)
{
if (dump_file)
if (!node->global.inlined_to
&& !node->alias)
{
- sreal time = ipa_fn_summaries->get (node)->time;
+ sreal time = ipa_fn_summaries->get_create (node)->time;
sum += time;
if (node->count.ipa ().initialized_p ())
sum_weighted += time * node->count.ipa ().to_gcov_type ();
for (e = node->callees; e; e = e->next_callee)
{
struct cgraph_node *callee = e->callee->ultimate_alias_target ();
- if (!ipa_fn_summaries->get (callee)->inlinable
+ if (!ipa_fn_summaries->get_create (callee)->inlinable
|| !e->inline_failed)
continue;
statements that don't have inline parameters computed. */
for (edge = node->callees; edge; edge = edge->next_callee)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ ipa_call_summary *es = ipa_call_summaries->get_create (edge);
es->call_stmt_size
= estimate_num_insns (edge->call_stmt, &eni_size_weights);
es->call_stmt_time
for (edge = node->callees; edge; edge = edge->next_callee)
{
/* We have no summary for new bound store calls yet. */
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ ipa_call_summary *es = ipa_call_summaries->get_create (edge);
es->call_stmt_size
= estimate_num_insns (edge->call_stmt, &eni_size_weights);
es->call_stmt_time
static inline int
estimate_edge_growth (struct cgraph_edge *edge)
{
- gcc_checking_assert (ipa_call_summaries->get (edge)->call_stmt_size
+ gcc_checking_assert (ipa_call_summaries->get_create (edge)->call_stmt_size
|| !edge->callee->analyzed);
return (estimate_edge_size (edge)
- - ipa_call_summaries->get (edge)->call_stmt_size);
+ - ipa_call_summaries->get_create (edge)->call_stmt_size);
}
/* Return estimated callee runtime increase after inlining
fprintf (dump_file, " Called by %s that is executed once\n",
edge->caller->name ());
d->maybe_unlikely_executed = false;
- if (ipa_call_summaries->get (edge)->loop_depth)
+ if (ipa_call_summaries->get_create (edge)->loop_depth)
{
d->maybe_executed_once = false;
if (dump_file && (dump_flags & TDF_DETAILS))
bool speculative)
{
struct cgraph_node *callee;
- struct ipa_call_summary *es = ipa_call_summaries->get (ie);
+ struct ipa_call_summary *es = ipa_call_summaries->get_create (ie);
bool unreachable = false;
if (TREE_CODE (target) == ADDR_EXPR)
for direct call (adjusted by inline_edge_duplication_hook). */
if (ie == orig)
{
- es = ipa_call_summaries->get (ie);
+ es = ipa_call_summaries->get_create (ie);
es->call_stmt_size -= (eni_size_weights.indirect_call_cost
- eni_size_weights.call_cost);
es->call_stmt_time -= (eni_time_weights.indirect_call_cost
/* Return the associated parameter/argument info corresponding to the given
node/edge. */
-#define IPA_NODE_REF(NODE) (ipa_node_params_sum->get (NODE))
-#define IPA_EDGE_REF(EDGE) (ipa_edge_args_sum->get (EDGE))
+#define IPA_NODE_REF(NODE) (ipa_node_params_sum->get_create (NODE))
+#define IPA_EDGE_REF(EDGE) (ipa_edge_args_sum->get_create (EDGE))
/* This macro checks validity of index returned by
ipa_get_param_decl_index function. */
#define IS_VALID_JUMP_FUNC_INDEX(I) ((I) != -1)
cgraph_edge *cs = node->get_edge (call_stmt);
if (cs)
{
- ipa_call_summary *es = ipa_call_summaries->get (cs);
+ ipa_call_summary *es = ipa_call_summaries->get_create (cs);
gcc_assert (es);
es->is_return_callee_uncaptured = true;
}
cgraph_edge *cs = node->get_edge (call_stmt);
if (cs)
{
- ipa_call_summary *es = ipa_call_summaries->get (cs);
+ ipa_call_summary *es = ipa_call_summaries->get_create (cs);
gcc_assert (es);
es->is_return_callee_uncaptured = true;
}
vec<cgraph_node *> callees = vNULL;
for (cgraph_edge *cs = node->callees; cs; cs = cs->next_callee)
{
- ipa_call_summary *es = ipa_call_summaries->get (cs);
+ ipa_call_summary *es = ipa_call_summaries->get_create (cs);
if (es && es->is_return_callee_uncaptured)
callees.safe_push (cs->callee);
}
/* This can be relaxed; function might become inlinable after splitting
away the uninlinable part. */
if (ipa_fn_summaries
- && !ipa_fn_summaries->get (node)->inlinable)
+ && !ipa_fn_summaries->get_create (node)->inlinable)
{
if (dump_file)
fprintf (dump_file, "Not splitting: not inlinable.\n");
+2018-06-08 Martin Liska <mliska@suse.cz>
+
+ * lto-partition.c (add_symbol_to_partition_1): Use get_create instead
+ of get.
+ (undo_partition): Likewise.
+ (lto_balanced_map): Likewise.
+
2018-06-08 Martin Liska <mliska@suse.cz>
* lto-partition.c (add_references_to_partition): Remove support
{
struct cgraph_edge *e;
if (!node->alias && c == SYMBOL_PARTITION)
- part->insns += ipa_fn_summaries->get (cnode)->size;
+ part->insns += ipa_fn_summaries->get_create (cnode)->size;
/* Add all inline clones and callees that are duplicated. */
for (e = cnode->callees; e; e = e->next_callee)
if (!node->alias && (cnode = dyn_cast <cgraph_node *> (node))
&& node->get_partitioning_class () == SYMBOL_PARTITION)
- partition->insns -= ipa_fn_summaries->get (cnode)->size;
+ partition->insns -= ipa_fn_summaries->get_create (cnode)->size;
lto_symtab_encoder_delete_node (partition->encoder, node);
node->aux = (void *)((size_t)node->aux - 1);
}
else
order[n_nodes++] = node;
if (!node->alias)
- total_size += ipa_fn_summaries->get (node)->size;
+ total_size += ipa_fn_summaries->get_create (node)->size;
}
original_total_size = total_size;
/* Release an item that is stored within map. */
void release (T *item);
- /* Getter for summary callgraph node pointer. */
- T* get (cgraph_node *node)
+ /* Getter for summary callgraph node pointer. If a summary for a node
+ does not exist it will be created. */
+ T* get_create (cgraph_node *node)
{
gcc_checking_assert (node->summary_uid);
- return get (node->summary_uid);
+ return get_create (node->summary_uid);
}
/* Return number of elements handled by data structure. */
typedef int_hash <int, 0, -1> map_hash;
/* Getter for summary callgraph ID. */
- T* get (int uid);
+ T* get_create (int uid);
/* Indicates if insertion hook is enabled. */
bool m_insertion_enabled;
function_summary *summary = (function_summary <T *> *) (data);
if (summary->m_insertion_enabled)
- summary->insert (node, summary->get (node));
+ summary->insert (node, summary->get_create (node));
}
template <typename T>
template <typename T>
T*
-function_summary<T *>::get (int uid)
+function_summary<T *>::get_create (int uid)
{
bool existed;
T **v = &m_map.get_or_insert (uid, &existed);
/* Release an item that is stored within map. */
void release (T *item);
- /* Getter for summary callgraph edge pointer. */
- T* get (cgraph_edge *edge)
+ /* Getter for summary callgraph edge pointer.
+ If a summary for an edge does not exist, it will be created. */
+ T* get_create (cgraph_edge *edge)
{
- return get (hashable_uid (edge));
+ return get_create (hashable_uid (edge));
}
/* Return number of elements handled by data structure. */
typedef int_hash <int, 0, -1> map_hash;
/* Getter for summary callgraph ID. */
- T* get (int uid)
+ T* get_create (int uid)
{
bool existed;
T **v = &m_map.get_or_insert (uid, &existed);
}
if ((DECL_ONE_ONLY (node->decl) || DECL_EXTERNAL (node->decl))
- && ipa_fn_summaries->get (node)->size >= MAX_INLINE_INSNS_AUTO)
+ && ipa_fn_summaries->get_create (node)->size >= MAX_INLINE_INSNS_AUTO)
{
if (dump_file)
fprintf (dump_file, "Function too big to be made truly local.\n");