/* Interprocedural constant propagation
- Copyright (C) 2005-2013 Free Software Foundation, Inc.
+ Copyright (C) 2005-2014 Free Software Foundation, Inc.
Contributed by Razya Ladelsky <RAZYA@il.ibm.com> and Martin Jambor
<mjambor@suse.cz>
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "gimple-fold.h"
+#include "gimple-expr.h"
#include "target.h"
-#include "gimple.h"
-#include "cgraph.h"
#include "ipa-prop.h"
-#include "tree-flow.h"
+#include "bitmap.h"
#include "tree-pass.h"
#include "flags.h"
#include "diagnostic.h"
return true;
}
-/* Return true iff the CS is an edge within a strongly connected component as
- computed by ipa_reduced_postorder. */
-
-static inline bool
-edge_within_scc (struct cgraph_edge *cs)
-{
- struct ipa_dfs_info *caller_dfs = (struct ipa_dfs_info *) cs->caller->symbol.aux;
- struct ipa_dfs_info *callee_dfs;
- struct cgraph_node *callee = cgraph_function_node (cs->callee, NULL);
-
- callee_dfs = (struct ipa_dfs_info *) callee->symbol.aux;
- return (caller_dfs
- && callee_dfs
- && caller_dfs->scc_no == callee_dfs->scc_no);
-}
-
/* Print V which is extracted from a value in a lattice to F. */
static void
fprintf (f, " [from:");
for (s = val->sources; s; s = s->next)
- fprintf (f, " %i(%i)", s->cs->caller->uid,s->cs->frequency);
+ fprintf (f, " %i(%i)", s->cs->caller->order,
+ s->cs->frequency);
fprintf (f, "]");
}
struct ipa_node_params *info;
info = IPA_NODE_REF (node);
- fprintf (f, " Node: %s/%i:\n", cgraph_node_name (node), node->uid);
+ fprintf (f, " Node: %s/%i:\n", node->name (),
+ node->order);
count = ipa_get_param_count (info);
for (i = 0; i < count; i++)
{
reason = "alias or thunk";
else if (!node->local.versionable)
reason = "not a tree_versionable_function";
- else if (cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE)
+ else if (node->get_availability () <= AVAIL_INTERPOSABLE)
reason = "insufficient body availability";
+ else if (!opt_for_fn (node->decl, optimize)
+ || !opt_for_fn (node->decl, flag_ipa_cp))
+ reason = "non-optimized function";
+ else if (lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (node->decl)))
+ {
+ /* Ideally we should clone the SIMD clones themselves and create
+ vector copies of them, so IPA-cp and SIMD clones can happily
+ coexist, but that may not be worth the effort. */
+ reason = "function has SIMD clones";
+ }
+ /* Don't clone decls local to a comdat group; it breaks and for C++
+ decloned constructors, inlining is always better anyway. */
+ else if (node->comdat_local_p ())
+ reason = "comdat-local function";
if (reason && dump_file && !node->alias && !node->thunk.thunk_p)
fprintf (dump_file, "Function %s/%i is not versionable, reason: %s.\n",
- cgraph_node_name (node), node->uid, reason);
+ node->name (), node->order, reason);
node->local.versionable = (reason == NULL);
}
for (cs = node->callers; cs; cs = cs->next_caller)
if (cs->caller->thunk.thunk_p)
- cgraph_for_node_and_aliases (cs->caller, gather_caller_stats,
- stats, false);
+ cs->caller->call_for_symbol_thunks_and_aliases (gather_caller_stats,
+ stats, false);
else
{
stats->count_sum += cs->count;
stats->freq_sum += cs->frequency;
stats->n_calls++;
- if (cgraph_maybe_hot_edge_p (cs))
+ if (cs->maybe_hot_p ())
stats->n_hot_calls ++;
}
return false;
{
struct caller_statistics stats;
- gcc_checking_assert (cgraph_function_with_gimple_body_p (node));
+ gcc_checking_assert (node->has_gimple_body_p ());
if (!flag_ipa_cp_clone)
{
if (dump_file)
fprintf (dump_file, "Not considering %s for cloning; "
"-fipa-cp-clone disabled.\n",
- cgraph_node_name (node));
+ node->name ());
return false;
}
- if (!optimize_function_for_speed_p (DECL_STRUCT_FUNCTION (node->symbol.decl)))
+ if (!optimize_function_for_speed_p (DECL_STRUCT_FUNCTION (node->decl)))
{
if (dump_file)
fprintf (dump_file, "Not considering %s for cloning; "
"optimizing it for size.\n",
- cgraph_node_name (node));
+ node->name ());
return false;
}
init_caller_stats (&stats);
- cgraph_for_node_and_aliases (node, gather_caller_stats, &stats, false);
+ node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats, false);
if (inline_summary (node)->self_size < stats.n_calls)
{
if (dump_file)
fprintf (dump_file, "Considering %s for cloning; code might shrink.\n",
- cgraph_node_name (node));
+ node->name ());
return true;
}
if (dump_file)
fprintf (dump_file, "Considering %s for cloning; "
"usually called directly.\n",
- cgraph_node_name (node));
+ node->name ());
return true;
}
}
{
if (dump_file)
fprintf (dump_file, "Not considering %s for cloning; no hot calls.\n",
- cgraph_node_name (node));
+ node->name ());
return false;
}
if (dump_file)
fprintf (dump_file, "Considering %s for cloning.\n",
- cgraph_node_name (node));
+ node->name ());
return true;
}
static void
build_toporder_info (struct topo_info *topo)
{
- topo->order = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
- topo->stack = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
+ topo->order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
+ topo->stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
+
topo->stack_top = 0;
topo->nnodes = ipa_reduced_postorder (topo->order, true, true, NULL);
}
bool disable = false, variable = false;
int i;
- gcc_checking_assert (cgraph_function_with_gimple_body_p (node));
+ gcc_checking_assert (node->has_gimple_body_p ());
if (!node->local.local)
{
/* When cloning is allowed, we can assume that externally visible
if (dump_file && (dump_flags & TDF_DETAILS)
&& !node->alias && !node->thunk.thunk_p)
fprintf (dump_file, "Marking all lattices of %s/%i as %s\n",
- cgraph_node_name (node), node->uid,
+ node->name (), node->order,
disable ? "BOTTOM" : "VARIABLE");
}
- if (!disable)
- for (i = 0; i < ipa_get_param_count (info) ; i++)
- {
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
- tree t = TREE_TYPE (ipa_get_param(info, i));
-
- if (POINTER_TYPE_P (t) && TYPE_RESTRICT (t)
- && TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
- {
- set_lattice_to_bottom (&plats->itself);
- if (dump_file && (dump_flags & TDF_DETAILS)
- && !node->alias && !node->thunk.thunk_p)
- fprintf (dump_file, "Going to ignore param %i of of %s/%i.\n",
- i, cgraph_node_name (node), node->uid);
- }
- }
for (ie = node->indirect_calls; ie; ie = ie->next_callee)
- if (ie->indirect_info->polymorphic)
+ if (ie->indirect_info->polymorphic
+ && ie->indirect_info->param_index >= 0)
{
gcc_checking_assert (ie->indirect_info->param_index >= 0);
ipa_get_parm_lattices (info,
/* Return the result of a (possibly arithmetic) pass through jump function
JFUNC on the constant value INPUT. Return NULL_TREE if that cannot be
- determined or itself is considered an interprocedural invariant. */
+ determined or be considered an interprocedural invariant. */
static tree
ipa_get_jf_pass_through_result (struct ipa_jump_func *jfunc, tree input)
{
tree restype, res;
+ if (TREE_CODE (input) == TREE_BINFO)
+ {
+ if (ipa_get_jf_pass_through_type_preserved (jfunc))
+ {
+ gcc_checking_assert (ipa_get_jf_pass_through_operation (jfunc)
+ == NOP_EXPR);
+ return input;
+ }
+ return NULL_TREE;
+ }
+
if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
return input;
- else if (TREE_CODE (input) == TREE_BINFO)
- return NULL_TREE;
gcc_checking_assert (is_gimple_ip_invariant (input));
if (TREE_CODE_CLASS (ipa_get_jf_pass_through_operation (jfunc))
ipa_get_jf_ancestor_result (struct ipa_jump_func *jfunc, tree input)
{
if (TREE_CODE (input) == TREE_BINFO)
- return get_binfo_at_offset (input,
- ipa_get_jf_ancestor_offset (jfunc),
- ipa_get_jf_ancestor_type (jfunc));
+ {
+ if (!ipa_get_jf_ancestor_type_preserved (jfunc))
+ return NULL;
+ /* FIXME: At LTO we can't propagate to non-polymorphic type, because
+ we have no ODR equivalency on those. This should be fixed by
+ propagating on types rather than binfos that would make type
+ matching here unnecesary. */
+ if (in_lto_p
+ && (TREE_CODE (ipa_get_jf_ancestor_type (jfunc)) != RECORD_TYPE
+ || !TYPE_BINFO (ipa_get_jf_ancestor_type (jfunc))
+ || !BINFO_VTABLE (TYPE_BINFO (ipa_get_jf_ancestor_type (jfunc)))))
+ {
+ if (!ipa_get_jf_ancestor_offset (jfunc))
+ return input;
+ return NULL;
+ }
+ return get_binfo_at_offset (input,
+ ipa_get_jf_ancestor_offset (jfunc),
+ ipa_get_jf_ancestor_type (jfunc));
+ }
else if (TREE_CODE (input) == ADDR_EXPR)
{
tree t = TREE_OPERAND (input, 0);
t = build_ref_for_offset (EXPR_LOCATION (t), t,
ipa_get_jf_ancestor_offset (jfunc),
- ipa_get_jf_ancestor_type (jfunc), NULL, false);
+ ipa_get_jf_ancestor_type (jfunc)
+ ? ipa_get_jf_ancestor_type (jfunc)
+ : ptr_type_node, NULL, false);
return build_fold_addr_expr (t);
}
else
return NULL_TREE;
}
-/* Extract the acual BINFO being described by JFUNC which must be a known type
- jump function. */
-
-static tree
-ipa_value_from_known_type_jfunc (struct ipa_jump_func *jfunc)
-{
- tree base_binfo = TYPE_BINFO (ipa_get_jf_known_type_base_type (jfunc));
- if (!base_binfo)
- return NULL_TREE;
- return get_binfo_at_offset (base_binfo,
- ipa_get_jf_known_type_offset (jfunc),
- ipa_get_jf_known_type_component_type (jfunc));
-}
-
/* Determine whether JFUNC evaluates to a known value (that is either a
constant or a binfo) and if so, return it. Otherwise return NULL. INFO
describes the caller node so that pass-through jump functions can be
if (jfunc->type == IPA_JF_CONST)
return ipa_get_jf_constant (jfunc);
else if (jfunc->type == IPA_JF_KNOWN_TYPE)
- return ipa_value_from_known_type_jfunc (jfunc);
+ return ipa_binfo_from_known_type_jfunc (jfunc);
else if (jfunc->type == IPA_JF_PASS_THROUGH
|| jfunc->type == IPA_JF_ANCESTOR)
{
{
if (dump_file)
{
+ symtab_node::dump_table (dump_file);
fprintf (dump_file, "\nIPA lattices after constant "
- "propagation:\n");
+ "propagation, before gcc_unreachable:\n");
print_all_lattices (dump_file, true, false);
}
for (val = lat->values; val; val = val->next)
if (values_equal_for_ipcp_p (val->value, newval))
{
- if (edge_within_scc (cs))
+ if (ipa_edge_within_scc (cs))
{
struct ipcp_value_source *s;
for (s = val->sources; s ; s = s->next)
struct ipcp_value *src_val;
bool ret = false;
- if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
- for (src_val = src_lat->values; src_val; src_val = src_val->next)
- ret |= add_scalar_value_to_lattice (dest_lat, src_val->value, cs,
- src_val, src_idx);
/* Do not create new values when propagating within an SCC because if there
are arithmetic functions with circular dependencies, there is infinite
number of them and we would just make lattices bottom. */
- else if (edge_within_scc (cs))
+ if ((ipa_get_jf_pass_through_operation (jfunc) != NOP_EXPR)
+ && ipa_edge_within_scc (cs))
ret = set_lattice_contains_variable (dest_lat);
else
for (src_val = src_lat->values; src_val; src_val = src_val->next)
{
- tree cstval = src_val->value;
-
- if (TREE_CODE (cstval) == TREE_BINFO)
- {
- ret |= set_lattice_contains_variable (dest_lat);
- continue;
- }
- cstval = ipa_get_jf_pass_through_result (jfunc, cstval);
+ tree cstval = ipa_get_jf_pass_through_result (jfunc, src_val->value);
if (cstval)
ret |= add_scalar_value_to_lattice (dest_lat, cstval, cs, src_val,
struct ipcp_value *src_val;
bool ret = false;
- if (edge_within_scc (cs))
+ if (ipa_edge_within_scc (cs))
return set_lattice_contains_variable (dest_lat);
for (src_val = src_lat->values; src_val; src_val = src_val->next)
if (jfunc->type == IPA_JF_KNOWN_TYPE)
{
- val = ipa_value_from_known_type_jfunc (jfunc);
+ val = ipa_binfo_from_known_type_jfunc (jfunc);
if (!val)
return set_lattice_contains_variable (dest_lat);
}
if (item->offset < 0)
continue;
gcc_checking_assert (is_gimple_ip_invariant (item->value));
- val_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (item->value)), 1);
+ val_size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (item->value)));
if (merge_agg_lats_step (dest_plats, item->offset, val_size,
&aglat, pre_existing, &ret))
bool ret = false;
int i, args_count, parms_count;
- callee = cgraph_function_node (cs->callee, &availability);
- if (!callee->analyzed)
+ callee = cs->callee->function_symbol (&availability);
+ if (!callee->definition)
return false;
- gcc_checking_assert (cgraph_function_with_gimple_body_p (callee));
+ gcc_checking_assert (callee->has_gimple_body_p ());
callee_info = IPA_NODE_REF (callee);
args = IPA_EDGE_REF (cs);
args_count = ipa_get_cs_argument_count (args);
parms_count = ipa_get_param_count (callee_info);
+ if (parms_count == 0)
+ return false;
/* If this call goes through a thunk we must not propagate to the first (0th)
parameter. However, we might need to uncover a thunk from below a series
of aliases first. */
alias_or_thunk = cs->callee;
while (alias_or_thunk->alias)
- alias_or_thunk = cgraph_alias_aliased_node (alias_or_thunk);
+ alias_or_thunk = alias_or_thunk->get_alias_target ();
if (alias_or_thunk->thunk.thunk_p)
{
ret |= set_all_contains_variable (ipa_get_parm_lattices (callee_info,
struct ipcp_param_lattices *dest_plats;
dest_plats = ipa_get_parm_lattices (callee_info, i);
- if (availability == AVAIL_OVERWRITABLE)
+ if (availability == AVAIL_INTERPOSABLE)
ret |= set_all_contains_variable (dest_plats);
else
{
}
/* If an indirect edge IE can be turned into a direct one based on KNOWN_VALS
- (which can contain both constants and binfos) or KNOWN_BINFOS (which can be
- NULL) return the destination. */
+ (which can contain both constants and binfos), KNOWN_BINFOS, KNOWN_AGGS or
+ AGG_REPS return the destination. The latter three can be NULL. If AGG_REPS
+ is not NULL, KNOWN_AGGS is ignored. */
-tree
-ipa_get_indirect_edge_target (struct cgraph_edge *ie,
- vec<tree> known_vals,
- vec<tree> known_binfos,
- vec<ipa_agg_jump_function_p> known_aggs)
+static tree
+ipa_get_indirect_edge_target_1 (struct cgraph_edge *ie,
+ vec<tree> known_vals,
+ vec<tree> known_binfos,
+ vec<ipa_agg_jump_function_p> known_aggs,
+ struct ipa_agg_replacement_value *agg_reps)
{
int param_index = ie->indirect_info->param_index;
HOST_WIDE_INT token, anc_offset;
tree otr_type;
tree t;
+ tree target = NULL;
- if (param_index == -1)
+ if (param_index == -1
+ || known_vals.length () <= (unsigned int) param_index)
return NULL_TREE;
if (!ie->indirect_info->polymorphic)
if (ie->indirect_info->agg_contents)
{
- if (known_aggs.length ()
- > (unsigned int) param_index)
+ if (agg_reps)
+ {
+ t = NULL;
+ while (agg_reps)
+ {
+ if (agg_reps->index == param_index
+ && agg_reps->offset == ie->indirect_info->offset
+ && agg_reps->by_ref == ie->indirect_info->by_ref)
+ {
+ t = agg_reps->value;
+ break;
+ }
+ agg_reps = agg_reps->next;
+ }
+ }
+ else if (known_aggs.length () > (unsigned int) param_index)
{
struct ipa_agg_jump_function *agg;
agg = known_aggs[param_index];
t = NULL;
}
else
- t = (known_vals.length () > (unsigned int) param_index
- ? known_vals[param_index] : NULL);
+ t = known_vals[param_index];
if (t &&
TREE_CODE (t) == ADDR_EXPR
return NULL_TREE;
}
+ if (!flag_devirtualize)
+ return NULL_TREE;
+
gcc_assert (!ie->indirect_info->agg_contents);
token = ie->indirect_info->otr_token;
anc_offset = ie->indirect_info->offset;
otr_type = ie->indirect_info->otr_type;
- t = known_vals[param_index];
+ t = NULL;
+
+ /* Try to work out value of virtual table pointer value in replacemnets. */
+ if (!t && agg_reps && !ie->indirect_info->by_ref)
+ {
+ while (agg_reps)
+ {
+ if (agg_reps->index == param_index
+ && agg_reps->offset == ie->indirect_info->offset
+ && agg_reps->by_ref)
+ {
+ t = agg_reps->value;
+ break;
+ }
+ agg_reps = agg_reps->next;
+ }
+ }
+
+ /* Try to work out value of virtual table pointer value in known
+ aggregate values. */
+ if (!t && known_aggs.length () > (unsigned int) param_index
+ && !ie->indirect_info->by_ref)
+ {
+ struct ipa_agg_jump_function *agg;
+ agg = known_aggs[param_index];
+ t = ipa_find_agg_cst_for_param (agg, ie->indirect_info->offset,
+ true);
+ }
+
+ /* If we found the virtual table pointer, lookup the target. */
+ if (t)
+ {
+ tree vtable;
+ unsigned HOST_WIDE_INT offset;
+ if (vtable_pointer_value_to_vtable (t, &vtable, &offset))
+ {
+ target = gimple_get_virt_method_for_vtable (ie->indirect_info->otr_token,
+ vtable, offset);
+ if (target)
+ {
+ if ((TREE_CODE (TREE_TYPE (target)) == FUNCTION_TYPE
+ && DECL_FUNCTION_CODE (target) == BUILT_IN_UNREACHABLE)
+ || !possible_polymorphic_call_target_p
+ (ie, cgraph_node::get (target)))
+ target = ipa_impossible_devirt_target (ie, target);
+ return target;
+ }
+ }
+ }
+
+ /* Did we work out BINFO via type propagation? */
if (!t && known_binfos.length () > (unsigned int) param_index)
t = known_binfos[param_index];
+ /* Or do we know the constant value of pointer? */
+ if (!t)
+ t = known_vals[param_index];
if (!t)
return NULL_TREE;
if (TREE_CODE (t) != TREE_BINFO)
{
- tree binfo;
- binfo = gimple_extract_devirt_binfo_from_cst (t);
- if (!binfo)
+ ipa_polymorphic_call_context context;
+ vec <cgraph_node *>targets;
+ bool final;
+
+ if (!get_polymorphic_call_info_from_invariant
+ (&context, t, ie->indirect_info->otr_type,
+ anc_offset))
return NULL_TREE;
- binfo = get_binfo_at_offset (binfo, anc_offset, otr_type);
- if (!binfo)
+ targets = possible_polymorphic_call_targets
+ (ie->indirect_info->otr_type,
+ ie->indirect_info->otr_token,
+ context, &final);
+ if (!final || targets.length () > 1)
return NULL_TREE;
- return gimple_get_virt_method_for_binfo (token, binfo);
+ if (targets.length () == 1)
+ target = targets[0]->decl;
+ else
+ target = ipa_impossible_devirt_target (ie, NULL_TREE);
}
else
{
binfo = get_binfo_at_offset (t, anc_offset, otr_type);
if (!binfo)
return NULL_TREE;
- return gimple_get_virt_method_for_binfo (token, binfo);
+ target = gimple_get_virt_method_for_binfo (token, binfo);
}
+
+ if (target && !possible_polymorphic_call_target_p (ie,
+ cgraph_node::get (target)))
+ target = ipa_impossible_devirt_target (ie, target);
+
+ return target;
+}
+
+
+/* If an indirect edge IE can be turned into a direct one based on KNOWN_VALS
+ (which can contain both constants and binfos), KNOWN_BINFOS (which can be
+ NULL) or KNOWN_AGGS (which also can be NULL) return the destination. */
+
+tree
+ipa_get_indirect_edge_target (struct cgraph_edge *ie,
+ vec<tree> known_vals,
+ vec<tree> known_binfos,
+ vec<ipa_agg_jump_function_p> known_aggs)
+{
+ return ipa_get_indirect_edge_target_1 (ie, known_vals, known_binfos,
+ known_aggs, NULL);
}
/* Calculate devirtualization time bonus for NODE, assuming we know KNOWN_CSTS
static int
devirtualization_time_bonus (struct cgraph_node *node,
vec<tree> known_csts,
- vec<tree> known_binfos)
+ vec<tree> known_binfos,
+ vec<ipa_agg_jump_function_p> known_aggs)
{
struct cgraph_edge *ie;
int res = 0;
{
struct cgraph_node *callee;
struct inline_summary *isummary;
+ enum availability avail;
tree target;
target = ipa_get_indirect_edge_target (ie, known_csts, known_binfos,
- vNULL);
+ known_aggs);
if (!target)
continue;
/* Only bare minimum benefit for clearly un-inlineable targets. */
res += 1;
- callee = cgraph_get_node (target);
- if (!callee || !callee->analyzed)
+ callee = cgraph_node::get (target);
+ if (!callee || !callee->definition)
+ continue;
+ callee = callee->function_symbol (&avail);
+ if (avail < AVAIL_AVAILABLE)
continue;
isummary = inline_summary (callee);
if (!isummary->inlinable)
else if (isummary->size <= MAX_INLINE_INSNS_AUTO / 2)
res += 15;
else if (isummary->size <= MAX_INLINE_INSNS_AUTO
- || DECL_DECLARED_INLINE_P (callee->symbol.decl))
+ || DECL_DECLARED_INLINE_P (callee->decl))
res += 7;
}
static int
hint_time_bonus (inline_hints hints)
{
+ int result = 0;
if (hints & (INLINE_HINT_loop_iterations | INLINE_HINT_loop_stride))
- return PARAM_VALUE (PARAM_IPA_CP_LOOP_HINT_BONUS);
- return 0;
+ result += PARAM_VALUE (PARAM_IPA_CP_LOOP_HINT_BONUS);
+ if (hints & INLINE_HINT_array_index)
+ result += PARAM_VALUE (PARAM_IPA_CP_ARRAY_INDEX_HINT_BONUS);
+ return result;
}
/* Return true if cloning NODE is a good idea, given the estimated TIME_BENEFIT
{
if (time_benefit == 0
|| !flag_ipa_cp_clone
- || !optimize_function_for_speed_p (DECL_STRUCT_FUNCTION (node->symbol.decl)))
+ || !optimize_function_for_speed_p (DECL_STRUCT_FUNCTION (node->decl)))
return false;
gcc_assert (size_cost > 0);
if (max_count)
{
int factor = (count_sum * 1000) / max_count;
- HOST_WIDEST_INT evaluation = (((HOST_WIDEST_INT) time_benefit * factor)
+ int64_t evaluation = (((int64_t) time_benefit * factor)
/ size_cost);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " good_cloning_opportunity_p (time: %i, "
"size: %i, count_sum: " HOST_WIDE_INT_PRINT_DEC
- ") -> evaluation: " HOST_WIDEST_INT_PRINT_DEC
+ ") -> evaluation: " "%"PRId64
", threshold: %i\n",
time_benefit, size_cost, (HOST_WIDE_INT) count_sum,
evaluation, PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD));
}
else
{
- HOST_WIDEST_INT evaluation = (((HOST_WIDEST_INT) time_benefit * freq_sum)
+ int64_t evaluation = (((int64_t) time_benefit * freq_sum)
/ size_cost);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " good_cloning_opportunity_p (time: %i, "
"size: %i, freq_sum: %i) -> evaluation: "
- HOST_WIDEST_INT_PRINT_DEC ", threshold: %i\n",
+ "%"PRId64 ", threshold: %i\n",
time_benefit, size_cost, freq_sum, evaluation,
PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD));
/* Return all context independent values from aggregate lattices in PLATS in a
vector. Return NULL if there are none. */
-static vec<ipa_agg_jf_item_t, va_gc> *
+static vec<ipa_agg_jf_item, va_gc> *
context_independent_aggregate_values (struct ipcp_param_lattices *plats)
{
- vec<ipa_agg_jf_item_t, va_gc> *res = NULL;
+ vec<ipa_agg_jf_item, va_gc> *res = NULL;
if (plats->aggs_bottom
|| plats->aggs_contain_variable
gather_context_independent_values (struct ipa_node_params *info,
vec<tree> *known_csts,
vec<tree> *known_binfos,
- vec<ipa_agg_jump_function_t> *known_aggs,
+ vec<ipa_agg_jump_function> *known_aggs,
int *removable_params_cost)
{
int i, count = ipa_get_param_count (info);
(*known_csts)[i] = val->value;
if (removable_params_cost)
*removable_params_cost
- += estimate_move_cost (TREE_TYPE (val->value));
+ += estimate_move_cost (TREE_TYPE (val->value), false);
ret = true;
}
else if (plats->virt_call)
}
else if (removable_params_cost
&& !ipa_is_param_used (info, i))
- *removable_params_cost
- += estimate_move_cost (TREE_TYPE (ipa_get_param (info, i)));
+ *removable_params_cost += ipa_get_param_move_cost (info, i);
}
else if (removable_params_cost
&& !ipa_is_param_used (info, i))
*removable_params_cost
- += estimate_move_cost (TREE_TYPE (ipa_get_param (info, i)));
+ += ipa_get_param_move_cost (info, i);
if (known_aggs)
{
- vec<ipa_agg_jf_item_t, va_gc> *agg_items;
+ vec<ipa_agg_jf_item, va_gc> *agg_items;
struct ipa_agg_jump_function *ajf;
agg_items = context_independent_aggregate_values (plats);
issue. */
static vec<ipa_agg_jump_function_p>
-agg_jmp_p_vec_for_t_vec (vec<ipa_agg_jump_function_t> known_aggs)
+agg_jmp_p_vec_for_t_vec (vec<ipa_agg_jump_function> known_aggs)
{
vec<ipa_agg_jump_function_p> ret;
struct ipa_agg_jump_function *ajf;
struct ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
vec<tree> known_csts, known_binfos;
- vec<ipa_agg_jump_function_t> known_aggs;
+ vec<ipa_agg_jump_function> known_aggs;
vec<ipa_agg_jump_function_p> known_aggs_ptrs;
bool always_const;
int base_time = inline_summary (node)->time;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\nEstimating effects for %s/%i, base_time: %i.\n",
- cgraph_node_name (node), node->uid, base_time);
+ node->name (), node->order, base_time);
always_const = gather_context_independent_values (info, &known_csts,
&known_binfos, &known_aggs,
int time, size;
init_caller_stats (&stats);
- cgraph_for_node_and_aliases (node, gather_caller_stats, &stats, false);
+ node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats,
+ false);
estimate_ipcp_clone_size_and_time (node, known_csts, known_binfos,
known_aggs_ptrs, &size, &time, &hints);
- time -= devirtualization_time_bonus (node, known_csts, known_binfos);
+ time -= devirtualization_time_bonus (node, known_csts, known_binfos,
+ known_aggs_ptrs);
time -= hint_time_bonus (hints);
time -= removable_params_cost;
size -= stats.n_calls * removable_params_cost;
"time_benefit: %i\n", size, base_time - time);
if (size <= 0
- || cgraph_will_be_removed_from_program_if_no_direct_calls (node))
+ || node->will_be_removed_from_program_if_no_direct_calls_p ())
{
info->do_clone_for_all_contexts = true;
base_time = time;
{
known_csts[i] = val->value;
known_binfos[i] = NULL_TREE;
- emc = estimate_move_cost (TREE_TYPE (val->value));
+ emc = estimate_move_cost (TREE_TYPE (val->value), true);
}
else if (plats->virt_call)
{
known_aggs_ptrs, &size, &time,
&hints);
time_benefit = base_time - time
- + devirtualization_time_bonus (node, known_csts, known_binfos)
+ + devirtualization_time_bonus (node, known_csts, known_binfos,
+ known_aggs_ptrs)
+ hint_time_bonus (hints)
+ removable_params_cost + emc;
{
fprintf (dump_file, " - estimates for value ");
print_ipcp_constant_value (dump_file, val->value);
- fprintf (dump_file, " for parameter ");
- print_generic_expr (dump_file, ipa_get_param (info, i), 0);
+ fprintf (dump_file, " for ");
+ ipa_dump_param (dump_file, info, i);
fprintf (dump_file, ": time_benefit: %i, size: %i\n",
time_benefit, size);
}
known_aggs_ptrs, &size, &time,
&hints);
time_benefit = base_time - time
- + devirtualization_time_bonus (node, known_csts, known_binfos)
+ + devirtualization_time_bonus (node, known_csts, known_binfos,
+ known_aggs_ptrs)
+ hint_time_bonus (hints);
gcc_checking_assert (size >=0);
if (size == 0)
{
fprintf (dump_file, " - estimates for value ");
print_ipcp_constant_value (dump_file, val->value);
- fprintf (dump_file, " for parameter ");
- print_generic_expr (dump_file, ipa_get_param (info, i), 0);
+ fprintf (dump_file, " for ");
+ ipa_dump_param (dump_file, info, i);
fprintf (dump_file, "[%soffset: " HOST_WIDE_INT_PRINT_DEC
"]: time_benefit: %i, size: %i\n",
plats->aggs_by_ref ? "ref " : "",
for (i = topo->nnodes - 1; i >= 0; i--)
{
+ unsigned j;
struct cgraph_node *v, *node = topo->order[i];
- struct ipa_dfs_info *node_dfs_info;
-
- if (!cgraph_function_with_gimple_body_p (node))
- continue;
+ vec<cgraph_node *> cycle_nodes = ipa_get_nodes_in_cycle (node);
- node_dfs_info = (struct ipa_dfs_info *) node->symbol.aux;
/* First, iteratively propagate within the strongly connected component
until all lattices stabilize. */
- v = node_dfs_info->next_cycle;
- while (v)
- {
+ FOR_EACH_VEC_ELT (cycle_nodes, j, v)
+ if (v->has_gimple_body_p ())
push_node_to_stack (topo, v);
- v = ((struct ipa_dfs_info *) v->symbol.aux)->next_cycle;
- }
- v = node;
+ v = pop_node_from_stack (topo);
while (v)
{
struct cgraph_edge *cs;
for (cs = v->callees; cs; cs = cs->next_callee)
- if (edge_within_scc (cs)
+ if (ipa_edge_within_scc (cs)
&& propagate_constants_accross_call (cs))
push_node_to_stack (topo, cs->callee);
v = pop_node_from_stack (topo);
/* Afterwards, propagate along edges leading out of the SCC, calculates
the local effects of the discovered constants and all valid values to
their topological sort. */
- v = node;
- while (v)
- {
- struct cgraph_edge *cs;
-
- estimate_local_effects (v);
- add_all_node_vals_to_toposort (v);
- for (cs = v->callees; cs; cs = cs->next_callee)
- if (!edge_within_scc (cs))
- propagate_constants_accross_call (cs);
+ FOR_EACH_VEC_ELT (cycle_nodes, j, v)
+ if (v->has_gimple_body_p ())
+ {
+ struct cgraph_edge *cs;
- v = ((struct ipa_dfs_info *) v->symbol.aux)->next_cycle;
- }
+ estimate_local_effects (v);
+ add_all_node_vals_to_toposort (v);
+ for (cs = v->callees; cs; cs = cs->next_callee)
+ if (!ipa_edge_within_scc (cs))
+ propagate_constants_accross_call (cs);
+ }
+ cycle_nodes.release ();
}
}
for (val = base; val; val = val->scc_next)
for (src = val->sources; src; src = src->next)
if (src->val
- && cgraph_maybe_hot_edge_p (src->cs))
+ && src->cs->maybe_hot_p ())
{
src->val->prop_time_benefit = safe_add (time,
src->val->prop_time_benefit);
struct ipa_node_params *info = IPA_NODE_REF (node);
determine_versionability (node);
- if (cgraph_function_with_gimple_body_p (node))
+ if (node->has_gimple_body_p ())
{
info->lattices = XCNEWVEC (struct ipcp_param_lattices,
ipa_get_param_count (info));
initialize_node_lattices (node);
}
+ if (node->definition && !node->alias)
+ overall_size += inline_summary (node)->self_size;
if (node->count > max_count)
max_count = node->count;
- overall_size += inline_summary (node)->self_size;
}
max_new_size = overall_size;
static void
ipcp_discover_new_direct_edges (struct cgraph_node *node,
- vec<tree> known_vals)
+ vec<tree> known_vals,
+ struct ipa_agg_replacement_value *aggvals)
{
struct cgraph_edge *ie, *next_ie;
bool found = false;
tree target;
next_ie = ie->next_callee;
- target = ipa_get_indirect_edge_target (ie, known_vals, vNULL, vNULL);
+ target = ipa_get_indirect_edge_target_1 (ie, known_vals, vNULL, vNULL,
+ aggvals);
if (target)
{
- ipa_make_edge_direct_to_target (ie, target);
+ bool agg_contents = ie->indirect_info->agg_contents;
+ bool polymorphic = ie->indirect_info->polymorphic;
+ int param_index = ie->indirect_info->param_index;
+ struct cgraph_edge *cs = ipa_make_edge_direct_to_target (ie, target);
found = true;
+
+ if (cs && !agg_contents && !polymorphic)
+ {
+ struct ipa_node_params *info = IPA_NODE_REF (node);
+ int c = ipa_get_controlled_uses (info, param_index);
+ if (c != IPA_UNDESCRIBED_USE)
+ {
+ struct ipa_ref *to_del;
+
+ c--;
+ ipa_set_controlled_uses (info, param_index, c);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " controlled uses count of param "
+ "%i bumped down to %i\n", param_index, c);
+ if (c == 0
+ && (to_del = node->find_reference (cs->callee, NULL, 0)))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " and even removing its "
+ "cloning-created reference\n");
+ to_del->remove_reference ();
+ }
+ }
+ }
}
}
/* Turning calls to direct calls will improve overall summary. */
/* Vector of pointers which for linked lists of clones of an original crgaph
edge. */
-static vec<cgraph_edge_p> next_edge_clone;
+static vec<cgraph_edge *> next_edge_clone;
+static vec<cgraph_edge *> prev_edge_clone;
static inline void
-grow_next_edge_clone_vector (void)
+grow_edge_clone_vectors (void)
{
if (next_edge_clone.length ()
- <= (unsigned) cgraph_edge_max_uid)
- next_edge_clone.safe_grow_cleared (cgraph_edge_max_uid + 1);
+ <= (unsigned) symtab->edges_max_uid)
+ next_edge_clone.safe_grow_cleared (symtab->edges_max_uid + 1);
+ if (prev_edge_clone.length ()
+ <= (unsigned) symtab->edges_max_uid)
+ prev_edge_clone.safe_grow_cleared (symtab->edges_max_uid + 1);
}
/* Edge duplication hook to grow the appropriate linked list in
static void
ipcp_edge_duplication_hook (struct cgraph_edge *src, struct cgraph_edge *dst,
- __attribute__((unused)) void *data)
+ void *)
{
- grow_next_edge_clone_vector ();
- next_edge_clone[dst->uid] = next_edge_clone[src->uid];
+ grow_edge_clone_vectors ();
+
+ struct cgraph_edge *old_next = next_edge_clone[src->uid];
+ if (old_next)
+ prev_edge_clone[old_next->uid] = dst;
+ prev_edge_clone[dst->uid] = src;
+
+ next_edge_clone[dst->uid] = old_next;
next_edge_clone[src->uid] = dst;
}
+/* Hook that is called by cgraph.c when an edge is removed. */
+
+static void
+ipcp_edge_removal_hook (struct cgraph_edge *cs, void *)
+{
+ grow_edge_clone_vectors ();
+
+ struct cgraph_edge *prev = prev_edge_clone[cs->uid];
+ struct cgraph_edge *next = next_edge_clone[cs->uid];
+ if (prev)
+ next_edge_clone[prev->uid] = next;
+ if (next)
+ prev_edge_clone[next->uid] = prev;
+}
+
/* See if NODE is a clone with a known aggregate value at a given OFFSET of a
parameter with the given INDEX. */
static tree
-get_clone_agg_value (struct cgraph_node *node, HOST_WIDEST_INT offset,
+get_clone_agg_value (struct cgraph_node *node, HOST_WIDE_INT offset,
int index)
{
struct ipa_agg_replacement_value *aggval;
struct ipcp_value_source *src)
{
struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
- struct ipa_node_params *dst_info = IPA_NODE_REF (cs->callee);
+ cgraph_node *real_dest = cs->callee->function_symbol ();
+ struct ipa_node_params *dst_info = IPA_NODE_REF (real_dest);
if ((dst_info->ipcp_orig_node && !dst_info->is_all_contexts_clone)
|| caller_info->node_dead)
count++;
freq += cs->frequency;
cnt += cs->count;
- hot |= cgraph_maybe_hot_edge_p (cs);
+ hot |= cs->maybe_hot_p ();
}
cs = get_next_cgraph_edge_clone (cs);
}
/* Return a vector of incoming edges that do bring value VAL. It is assumed
their number is known and equal to CALLER_COUNT. */
-static vec<cgraph_edge_p>
+static vec<cgraph_edge *>
gather_edges_for_value (struct ipcp_value *val, int caller_count)
{
struct ipcp_value_source *src;
- vec<cgraph_edge_p> ret;
+ vec<cgraph_edge *> ret;
ret.create (caller_count);
for (src = val->sources; src; src = src->next)
Return it or NULL if for some reason it cannot be created. */
static struct ipa_replace_map *
-get_replacement_map (tree value, tree parm)
+get_replacement_map (struct ipa_node_params *info, tree value, int parm_num)
{
- tree req_type = TREE_TYPE (parm);
struct ipa_replace_map *replace_map;
- if (!useless_type_conversion_p (req_type, TREE_TYPE (value)))
- {
- if (fold_convertible_p (req_type, value))
- value = fold_build1 (NOP_EXPR, req_type, value);
- else if (TYPE_SIZE (req_type) == TYPE_SIZE (TREE_TYPE (value)))
- value = fold_build1 (VIEW_CONVERT_EXPR, req_type, value);
- else
- {
- if (dump_file)
- {
- fprintf (dump_file, " const ");
- print_generic_expr (dump_file, value, 0);
- fprintf (dump_file, " can't be converted to param ");
- print_generic_expr (dump_file, parm, 0);
- fprintf (dump_file, "\n");
- }
- return NULL;
- }
- }
- replace_map = ggc_alloc_ipa_replace_map ();
+ replace_map = ggc_alloc<ipa_replace_map> ();
if (dump_file)
{
- fprintf (dump_file, " replacing param ");
- print_generic_expr (dump_file, parm, 0);
+ fprintf (dump_file, " replacing ");
+ ipa_dump_param (dump_file, info, parm_num);
+
fprintf (dump_file, " with const ");
print_generic_expr (dump_file, value, 0);
fprintf (dump_file, "\n");
}
- replace_map->old_tree = parm;
+ replace_map->old_tree = NULL;
+ replace_map->parm_num = parm_num;
replace_map->new_tree = value;
replace_map->replace_p = true;
replace_map->ref_p = false;
for (cs = new_node->callees; cs ; cs = cs->next_callee)
fprintf (dump_file, " edge to %s has count "
HOST_WIDE_INT_PRINT_DEC "\n",
- cgraph_node_name (cs->callee), (HOST_WIDE_INT) cs->count);
+ cs->callee->name (), (HOST_WIDE_INT) cs->count);
fprintf (dump_file, " setting count of the original node to "
HOST_WIDE_INT_PRINT_DEC "\n", (HOST_WIDE_INT) orig_node->count);
for (cs = orig_node->callees; cs ; cs = cs->next_callee)
fprintf (dump_file, " edge to %s is left with "
HOST_WIDE_INT_PRINT_DEC "\n",
- cgraph_node_name (cs->callee), (HOST_WIDE_INT) cs->count);
+ cs->callee->name (), (HOST_WIDE_INT) cs->count);
}
/* After a specialized NEW_NODE version of ORIG_NODE has been created, update
return;
init_caller_stats (&stats);
- cgraph_for_node_and_aliases (orig_node, gather_caller_stats, &stats, false);
+ orig_node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats,
+ false);
orig_sum = stats.count_sum;
init_caller_stats (&stats);
- cgraph_for_node_and_aliases (new_node, gather_caller_stats, &stats, false);
+ new_node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats,
+ false);
new_sum = stats.count_sum;
if (orig_node_count < orig_sum + new_sum)
fprintf (dump_file, " Problem: node %s/%i has too low count "
HOST_WIDE_INT_PRINT_DEC " while the sum of incoming "
"counts is " HOST_WIDE_INT_PRINT_DEC "\n",
- cgraph_node_name (orig_node), orig_node->uid,
+ orig_node->name (), orig_node->order,
(HOST_WIDE_INT) orig_node_count,
(HOST_WIDE_INT) (orig_sum + new_sum));
for (cs = new_node->callees; cs ; cs = cs->next_callee)
if (cs->frequency)
- cs->count = cs->count * (new_sum * REG_BR_PROB_BASE
- / orig_node_count) / REG_BR_PROB_BASE;
+ cs->count = apply_probability (cs->count,
+ GCOV_COMPUTE_SCALE (new_sum,
+ orig_node_count));
else
cs->count = 0;
for (cs = orig_node->callees; cs ; cs = cs->next_callee)
- cs->count = cs->count * (remainder * REG_BR_PROB_BASE
- / orig_node_count) / REG_BR_PROB_BASE;
+ cs->count = apply_probability (cs->count,
+ GCOV_COMPUTE_SCALE (remainder,
+ orig_node_count));
if (dump_file)
dump_profile_updates (orig_node, new_node);
for (cs = new_node->callees; cs ; cs = cs->next_callee)
if (cs->frequency)
- cs->count += cs->count * redirected_sum / new_node_count;
+ cs->count += apply_probability (cs->count,
+ GCOV_COMPUTE_SCALE (redirected_sum,
+ new_node_count));
else
cs->count = 0;
for (cs = orig_node->callees; cs ; cs = cs->next_callee)
{
- gcov_type dec = cs->count * (redirected_sum * REG_BR_PROB_BASE
- / orig_node_count) / REG_BR_PROB_BASE;
+ gcov_type dec = apply_probability (cs->count,
+ GCOV_COMPUTE_SCALE (redirected_sum,
+ orig_node_count));
if (dec < cs->count)
cs->count -= dec;
else
create_specialized_node (struct cgraph_node *node,
vec<tree> known_vals,
struct ipa_agg_replacement_value *aggvals,
- vec<cgraph_edge_p> callers)
+ vec<cgraph_edge *> callers)
{
struct ipa_node_params *new_info, *info = IPA_NODE_REF (node);
- vec<ipa_replace_map_p, va_gc> *replace_trees = NULL;
+ vec<ipa_replace_map *, va_gc> *replace_trees = NULL;
+ struct ipa_agg_replacement_value *av;
struct cgraph_node *new_node;
int i, count = ipa_get_param_count (info);
bitmap args_to_skip;
{
struct ipa_replace_map *replace_map;
- replace_map = get_replacement_map (t, ipa_get_param (info, i));
+ replace_map = get_replacement_map (info, t, i);
if (replace_map)
vec_safe_push (replace_trees, replace_map);
}
}
- new_node = cgraph_create_virtual_clone (node, callers, replace_trees,
- args_to_skip, "constprop");
+ new_node = node->create_virtual_clone (callers, replace_trees,
+ args_to_skip, "constprop");
ipa_set_node_agg_value_chain (new_node, aggvals);
+ for (av = aggvals; av; av = av->next)
+ new_node->maybe_create_reference (av->value, IPA_REF_ADDR, NULL);
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " the new node is %s/%i.\n",
- cgraph_node_name (new_node), new_node->uid);
+ new_node->name (), new_node->order);
if (aggvals)
ipa_dump_agg_replacement_values (dump_file, aggvals);
}
- gcc_checking_assert (ipa_node_params_vector.exists ()
- && (ipa_node_params_vector.length ()
- > (unsigned) cgraph_max_uid));
+ ipa_check_create_node_params ();
update_profiling_info (node, new_node);
new_info = IPA_NODE_REF (new_node);
new_info->ipcp_orig_node = node;
new_info->known_vals = known_vals;
- ipcp_discover_new_direct_edges (new_node, known_vals);
+ ipcp_discover_new_direct_edges (new_node, known_vals, aggvals);
callers.release ();
return new_node;
static void
find_more_scalar_values_for_callers_subset (struct cgraph_node *node,
vec<tree> known_vals,
- vec<cgraph_edge_p> callers)
+ vec<cgraph_edge *> callers)
{
struct ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
{
fprintf (dump_file, " adding an extra known scalar value ");
print_ipcp_constant_value (dump_file, newval);
- fprintf (dump_file, " for parameter ");
- print_generic_expr (dump_file, ipa_get_param (info, i), 0);
+ fprintf (dump_file, " for ");
+ ipa_dump_param (dump_file, info, i);
fprintf (dump_file, "\n");
}
/* Go through PLATS and create a vector of values consisting of values and
offsets (minus OFFSET) of lattices that contain only a single value. */
-static vec<ipa_agg_jf_item_t>
+static vec<ipa_agg_jf_item>
copy_plats_to_inter (struct ipcp_param_lattices *plats, HOST_WIDE_INT offset)
{
- vec<ipa_agg_jf_item_t> res = vNULL;
+ vec<ipa_agg_jf_item> res = vNULL;
if (!plats->aggs || plats->aggs_contain_variable || plats->aggs_bottom)
return vNULL;
static void
intersect_with_plats (struct ipcp_param_lattices *plats,
- vec<ipa_agg_jf_item_t> *inter,
+ vec<ipa_agg_jf_item> *inter,
HOST_WIDE_INT offset)
{
struct ipcp_agg_lattice *aglat;
/* Copy agggregate replacement values of NODE (which is an IPA-CP clone) to the
vector result while subtracting OFFSET from the individual value offsets. */
-static vec<ipa_agg_jf_item_t>
+static vec<ipa_agg_jf_item>
agg_replacements_to_vector (struct cgraph_node *node, int index,
HOST_WIDE_INT offset)
{
struct ipa_agg_replacement_value *av;
- vec<ipa_agg_jf_item_t> res = vNULL;
+ vec<ipa_agg_jf_item> res = vNULL;
for (av = ipa_get_agg_replacements_for_node (node); av; av = av->next)
if (av->index == index
static void
intersect_with_agg_replacements (struct cgraph_node *node, int index,
- vec<ipa_agg_jf_item_t> *inter,
+ vec<ipa_agg_jf_item> *inter,
HOST_WIDE_INT offset)
{
struct ipa_agg_replacement_value *srcvals;
copy all incoming values to it. If we determine we ended up with no values
whatsoever, return a released vector. */
-static vec<ipa_agg_jf_item_t>
+static vec<ipa_agg_jf_item>
intersect_aggregates_with_edge (struct cgraph_edge *cs, int index,
- vec<ipa_agg_jf_item_t> inter)
+ vec<ipa_agg_jf_item> inter)
{
struct ipa_jump_func *jfunc;
jfunc = ipa_get_ith_jump_func (IPA_EDGE_REF (cs), index);
intersect_with_agg_replacements (cs->caller, src_idx,
&inter, 0);
}
+ else
+ {
+ inter.release ();
+ return vNULL;
+ }
}
else
{
else
intersect_with_plats (src_plats, &inter, 0);
}
+ else
+ {
+ inter.release ();
+ return vNULL;
+ }
}
}
else if (jfunc->type == IPA_JF_ANCESTOR
}
else
{
- inter.release();
- return vec<ipa_agg_jf_item_t>();
+ inter.release ();
+ return vec<ipa_agg_jf_item>();
}
return inter;
}
static struct ipa_agg_replacement_value *
find_aggregate_values_for_callers_subset (struct cgraph_node *node,
- vec<cgraph_edge_p> callers)
+ vec<cgraph_edge *> callers)
{
struct ipa_node_params *dest_info = IPA_NODE_REF (node);
- struct ipa_agg_replacement_value *res = NULL;
+ struct ipa_agg_replacement_value *res;
+ struct ipa_agg_replacement_value **tail = &res;
struct cgraph_edge *cs;
int i, j, count = ipa_get_param_count (dest_info);
for (i = 0; i < count ; i++)
{
struct cgraph_edge *cs;
- vec<ipa_agg_jf_item_t> inter = vNULL;
+ vec<ipa_agg_jf_item> inter = vNULL;
struct ipa_agg_jf_item *item;
+ struct ipcp_param_lattices *plats = ipa_get_parm_lattices (dest_info, i);
int j;
/* Among other things, the following check should deal with all by_ref
mismatches. */
- if (ipa_get_parm_lattices (dest_info, i)->aggs_bottom)
+ if (plats->aggs_bottom)
continue;
FOR_EACH_VEC_ELT (callers, j, cs)
if (!item->value)
continue;
- v = ggc_alloc_ipa_agg_replacement_value ();
+ v = ggc_alloc<ipa_agg_replacement_value> ();
v->index = i;
v->offset = item->offset;
v->value = item->value;
- v->next = res;
- res = v;
+ v->by_ref = plats->aggs_by_ref;
+ *tail = v;
+ tail = &v->next;
}
next_param:
if (inter.exists ())
inter.release ();
}
+ *tail = NULL;
return res;
}
/* Turn KNOWN_AGGS into a list of aggreate replacement values. */
static struct ipa_agg_replacement_value *
-known_aggs_to_agg_replacement_list (vec<ipa_agg_jump_function_t> known_aggs)
+known_aggs_to_agg_replacement_list (vec<ipa_agg_jump_function> known_aggs)
{
- struct ipa_agg_replacement_value *res = NULL;
+ struct ipa_agg_replacement_value *res;
+ struct ipa_agg_replacement_value **tail = &res;
struct ipa_agg_jump_function *aggjf;
struct ipa_agg_jf_item *item;
int i, j;
FOR_EACH_VEC_SAFE_ELT (aggjf->items, j, item)
{
struct ipa_agg_replacement_value *v;
- v = ggc_alloc_ipa_agg_replacement_value ();
+ v = ggc_alloc<ipa_agg_replacement_value> ();
v->index = i;
v->offset = item->offset;
v->value = item->value;
- v->next = res;
- res = v;
+ v->by_ref = aggjf->by_ref;
+ *tail = v;
+ tail = &v->next;
}
+ *tail = NULL;
return res;
}
struct cgraph_node *node)
{
struct ipa_node_params *orig_caller_info = IPA_NODE_REF (cs->caller);
+ struct ipa_node_params *orig_node_info;
struct ipa_agg_replacement_value *aggval;
int i, ec, count;
if (aggval->index >= ec)
return false;
+ orig_node_info = IPA_NODE_REF (IPA_NODE_REF (node)->ipcp_orig_node);
if (orig_caller_info->ipcp_orig_node)
orig_caller_info = IPA_NODE_REF (orig_caller_info->ipcp_orig_node);
for (i = 0; i < count; i++)
{
- static vec<ipa_agg_jf_item_t> values = vec<ipa_agg_jf_item_t>();
+ static vec<ipa_agg_jf_item> values = vec<ipa_agg_jf_item>();
struct ipcp_param_lattices *plats;
bool interesting = false;
for (struct ipa_agg_replacement_value *av = aggval; av; av = av->next)
if (!interesting)
continue;
- plats = ipa_get_parm_lattices (orig_caller_info, aggval->index);
+ plats = ipa_get_parm_lattices (orig_node_info, aggval->index);
if (plats->aggs_bottom)
return false;
values = intersect_aggregates_with_edge (cs, i, values);
- if (!values.exists())
+ if (!values.exists ())
return false;
for (struct ipa_agg_replacement_value *av = aggval; av; av = av->next)
if (item->value
&& item->offset == av->offset
&& values_equal_for_ipcp_p (item->value, av->value))
- found = true;
+ {
+ found = true;
+ break;
+ }
if (!found)
{
- values.release();
+ values.release ();
return false;
}
}
while (cs)
{
enum availability availability;
- struct cgraph_node *dst = cgraph_function_node (cs->callee,
- &availability);
+ struct cgraph_node *dst = cs->callee->function_symbol (&availability);
if ((dst == node || IPA_NODE_REF (dst)->is_all_contexts_clone)
- && availability > AVAIL_OVERWRITABLE
+ && availability > AVAIL_INTERPOSABLE
&& cgraph_edge_brings_value_p (cs, src))
{
if (cgraph_edge_brings_all_scalars_for_node (cs, val->spec_node)
if (dump_file)
fprintf (dump_file, " - adding an extra caller %s/%i"
" of %s/%i\n",
- xstrdup (cgraph_node_name (cs->caller)),
- cs->caller->uid,
- xstrdup (cgraph_node_name (val->spec_node)),
- val->spec_node->uid);
+ xstrdup (cs->caller->name ()),
+ cs->caller->order,
+ xstrdup (val->spec_node->name ()),
+ val->spec_node->order);
- cgraph_redirect_edge_callee (cs, val->spec_node);
+ cs->redirect_callee (val->spec_node);
redirected_sum += cs->count;
}
}
struct ipa_agg_replacement_value *aggvals;
int freq_sum, caller_count;
gcov_type count_sum;
- vec<cgraph_edge_p> callers;
+ vec<cgraph_edge *> callers;
vec<tree> kv;
if (val->spec_node)
{
fprintf (dump_file, " - considering value ");
print_ipcp_constant_value (dump_file, val->value);
- fprintf (dump_file, " for parameter ");
- print_generic_expr (dump_file, ipa_get_param (IPA_NODE_REF (node),
- index), 0);
+ fprintf (dump_file, " for ");
+ ipa_dump_param (dump_file, IPA_NODE_REF (node), index);
if (offset != -1)
fprintf (dump_file, ", offset: " HOST_WIDE_INT_PRINT_DEC, offset);
fprintf (dump_file, " (caller_count: %i)\n", caller_count);
if (dump_file)
fprintf (dump_file, " Creating a specialized node of %s/%i.\n",
- cgraph_node_name (node), node->uid);
+ node->name (), node->order);
callers = gather_edges_for_value (val, caller_count);
kv = known_csts.copy ();
struct ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
vec<tree> known_csts, known_binfos;
- vec<ipa_agg_jump_function_t> known_aggs = vNULL;
+ vec<ipa_agg_jump_function> known_aggs = vNULL;
bool ret = false;
if (count == 0)
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\nEvaluating opportunities for %s/%i.\n",
- cgraph_node_name (node), node->uid);
+ node->name (), node->order);
gather_context_independent_values (info, &known_csts, &known_binfos,
info->do_clone_for_all_contexts ? &known_aggs
if (info->do_clone_for_all_contexts)
{
struct cgraph_node *clone;
- vec<cgraph_edge_p> callers;
+ vec<cgraph_edge *> callers;
if (dump_file)
fprintf (dump_file, " - Creating a specialized node of %s/%i "
- "for all known contexts.\n", cgraph_node_name (node),
- node->uid);
+ "for all known contexts.\n", node->name (),
+ node->order);
- callers = collect_callers_of_node (node);
+ callers = node->collect_callers ();
move_binfos_to_values (known_csts, known_binfos);
clone = create_specialized_node (node, known_csts,
known_aggs_to_agg_replacement_list (known_aggs),
info = IPA_NODE_REF (node);
info->do_clone_for_all_contexts = false;
IPA_NODE_REF (clone)->is_all_contexts_clone = true;
+ for (i = 0; i < count ; i++)
+ vec_free (known_aggs[i].items);
+ known_aggs.release ();
ret = true;
}
else
struct cgraph_edge *cs;
for (cs = node->callees; cs; cs = cs->next_callee)
- if (edge_within_scc (cs))
+ if (ipa_edge_within_scc (cs))
{
struct cgraph_node *callee;
struct ipa_node_params *info;
- callee = cgraph_function_node (cs->callee, NULL);
+ callee = cs->callee->function_symbol (NULL);
info = IPA_NODE_REF (callee);
if (info->node_dead)
for (cs = node->callers; cs; cs = cs->next_caller)
if (cs->caller->thunk.thunk_p
- && cgraph_for_node_and_aliases (cs->caller,
- has_undead_caller_from_outside_scc_p,
- NULL, true))
+ && cs->caller->call_for_symbol_thunks_and_aliases
+ (has_undead_caller_from_outside_scc_p, NULL, true))
return true;
- else if (!edge_within_scc (cs)
+ else if (!ipa_edge_within_scc (cs)
&& !IPA_NODE_REF (cs->caller)->node_dead)
return true;
return false;
identify_dead_nodes (struct cgraph_node *node)
{
struct cgraph_node *v;
- for (v = node; v ; v = ((struct ipa_dfs_info *) v->symbol.aux)->next_cycle)
- if (cgraph_will_be_removed_from_program_if_no_direct_calls (v)
- && !cgraph_for_node_and_aliases (v,
- has_undead_caller_from_outside_scc_p,
- NULL, true))
+ for (v = node; v ; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
+ if (v->will_be_removed_from_program_if_no_direct_calls_p ()
+ && !v->call_for_symbol_thunks_and_aliases
+ (has_undead_caller_from_outside_scc_p, NULL, true))
IPA_NODE_REF (v)->node_dead = 1;
- for (v = node; v ; v = ((struct ipa_dfs_info *) v->symbol.aux)->next_cycle)
+ for (v = node; v ; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
if (!IPA_NODE_REF (v)->node_dead)
spread_undeadness (v);
if (dump_file && (dump_flags & TDF_DETAILS))
{
- for (v = node; v ; v = ((struct ipa_dfs_info *) v->symbol.aux)->next_cycle)
+ for (v = node; v ; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
if (IPA_NODE_REF (v)->node_dead)
fprintf (dump_file, " Marking node as dead: %s/%i.\n",
- cgraph_node_name (v), v->uid);
+ v->name (), v->order);
}
}
{
struct cgraph_node *v;
iterate = false;
- for (v = node; v ; v = ((struct ipa_dfs_info *) v->symbol.aux)->next_cycle)
- if (cgraph_function_with_gimple_body_p (v)
+ for (v = node; v ; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
+ if (v->has_gimple_body_p ()
&& ipcp_versionable_function_p (v))
iterate |= decide_whether_version_node (v);
ipcp_driver (void)
{
struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
+ struct cgraph_edge_hook_list *edge_removal_hook_holder;
struct topo_info topo;
ipa_check_create_node_params ();
ipa_check_create_edge_args ();
- grow_next_edge_clone_vector ();
+ grow_edge_clone_vectors ();
edge_duplication_hook_holder =
- cgraph_add_edge_duplication_hook (&ipcp_edge_duplication_hook, NULL);
+ symtab->add_edge_duplication_hook (&ipcp_edge_duplication_hook, NULL);
+ edge_removal_hook_holder =
+ symtab->add_edge_removal_hook (&ipcp_edge_removal_hook, NULL);
+
ipcp_values_pool = create_alloc_pool ("IPA-CP values",
sizeof (struct ipcp_value), 32);
ipcp_sources_pool = create_alloc_pool ("IPA-CP value sources",
/* Free all IPCP structures. */
free_toporder_info (&topo);
next_edge_clone.release ();
- cgraph_remove_edge_duplication_hook (edge_duplication_hook_holder);
+ symtab->remove_edge_removal_hook (edge_removal_hook_holder);
+ symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
ipa_free_all_structures_after_ipa_cp ();
if (dump_file)
fprintf (dump_file, "\nIPA constant propagation end\n");
FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
{
node->local.versionable
- = tree_versionable_function_p (node->symbol.decl);
+ = tree_versionable_function_p (node->decl);
ipa_analyze_node (node);
}
}
ipa_prop_read_jump_functions ();
}
-/* Gate for IPCP optimization. */
+namespace {
-static bool
-cgraph_gate_cp (void)
-{
- /* FIXME: We should remove the optimize check after we ensure we never run
- IPA passes when not optimizing. */
- return flag_ipa_cp && optimize;
-}
-
-struct ipa_opt_pass_d pass_ipa_cp =
-{
- {
- IPA_PASS,
- "cp", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- cgraph_gate_cp, /* gate */
- ipcp_driver, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_IPA_CONSTANT_PROP, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_dump_symtab |
- TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
- },
- ipcp_generate_summary, /* generate_summary */
- ipcp_write_summary, /* write_summary */
- ipcp_read_summary, /* read_summary */
- ipa_prop_write_all_agg_replacement, /* write_optimization_summary */
- ipa_prop_read_all_agg_replacement, /* read_optimization_summary */
- NULL, /* stmt_fixup */
- 0, /* TODOs */
- ipcp_transform_function, /* function_transform */
- NULL, /* variable_transform */
+const pass_data pass_data_ipa_cp =
+{
+ IPA_PASS, /* type */
+ "cp", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_IPA_CONSTANT_PROP, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ ( TODO_dump_symtab | TODO_remove_functions ), /* todo_flags_finish */
};
+
+class pass_ipa_cp : public ipa_opt_pass_d
+{
+public:
+ pass_ipa_cp (gcc::context *ctxt)
+ : ipa_opt_pass_d (pass_data_ipa_cp, ctxt,
+ ipcp_generate_summary, /* generate_summary */
+ ipcp_write_summary, /* write_summary */
+ ipcp_read_summary, /* read_summary */
+ ipa_prop_write_all_agg_replacement, /*
+ write_optimization_summary */
+ ipa_prop_read_all_agg_replacement, /*
+ read_optimization_summary */
+ NULL, /* stmt_fixup */
+ 0, /* function_transform_todo_flags_start */
+ ipcp_transform_function, /* function_transform */
+ NULL) /* variable_transform */
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+ /* FIXME: We should remove the optimize check after we ensure we never run
+ IPA passes when not optimizing. */
+ return flag_ipa_cp && optimize;
+ }
+
+ virtual unsigned int execute (function *) { return ipcp_driver (); }
+
+}; // class pass_ipa_cp
+
+} // anon namespace
+
+ipa_opt_pass_d *
+make_pass_ipa_cp (gcc::context *ctxt)
+{
+ return new pass_ipa_cp (ctxt);
+}