{
*tp = build1 (INDIRECT_REF, type, new);
TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
+ TREE_SIDE_EFFECTS (*tp) = TREE_SIDE_EFFECTS (old);
}
}
*walk_subtrees = 0;
later */
static basic_block
-copy_bb (copy_body_data *id, basic_block bb, int frequency_scale, int count_scale)
+copy_bb (copy_body_data *id, basic_block bb, int frequency_scale,
+ gcov_type count_scale)
{
block_stmt_iterator bsi, copy_bsi;
basic_block copy_basic_block;
pointer_set_insert (id->statements_to_fold, stmt);
/* We're duplicating a CALL_EXPR. Find any corresponding
callgraph edges and update or duplicate them. */
- if (call && (decl = get_callee_fndecl (call)))
+ if (call)
{
struct cgraph_node *node;
struct cgraph_edge *edge;
edge = cgraph_edge (id->src_node, orig_stmt);
if (edge)
cgraph_clone_edge (edge, id->dst_node, stmt,
- REG_BR_PROB_BASE, 1, edge->frequency, true);
+ REG_BR_PROB_BASE, 1,
+ edge->frequency, true);
break;
case CB_CGE_MOVE_CLONES:
node = node->next_clone)
{
edge = cgraph_edge (node, orig_stmt);
- gcc_assert (edge);
- cgraph_set_call_stmt (edge, stmt);
+ if (edge)
+ cgraph_set_call_stmt (edge, stmt);
}
/* FALLTHRU */
accordingly. Edges will be taken care of later. Assume aux
pointers to point to the copies of each BB. */
static void
-copy_edges_for_bb (basic_block bb, int count_scale, basic_block ret_bb)
+copy_edges_for_bb (basic_block bb, gcov_type count_scale, basic_block ret_bb)
{
basic_block new_bb = (basic_block) bb->aux;
edge_iterator ei;
static void
copy_phis_for_bb (basic_block bb, copy_body_data *id)
{
- basic_block new_bb = bb->aux;
+ basic_block const new_bb = (basic_block) bb->aux;
edge_iterator ei;
tree phi;
= new_phi = create_phi_node (new_res, new_bb);
FOR_EACH_EDGE (new_edge, ei, new_bb->preds)
{
- edge old_edge = find_edge (new_edge->src->aux, bb);
+ edge const old_edge = find_edge ((basic_block) new_edge->src->aux, bb);
tree arg = PHI_ARG_DEF_FROM_EDGE (phi, old_edge);
tree new_arg = arg;
struct function *new_cfun
= (struct function *) ggc_alloc_cleared (sizeof (struct function));
struct function *src_cfun = DECL_STRUCT_FUNCTION (callee_fndecl);
- int count_scale, frequency_scale;
+ gcov_type count_scale, frequency_scale;
if (ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count)
count_scale = (REG_BR_PROB_BASE * count
struct function *cfun_to_copy;
basic_block bb;
tree new_fndecl = NULL;
- int count_scale, frequency_scale;
+ gcov_type count_scale, frequency_scale;
int last;
if (ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count)
|| !is_gimple_reg (var))
{
tree_stmt_iterator i;
+ struct gimplify_ctx gctx;
- push_gimplify_context ();
+ push_gimplify_context (&gctx);
gimplify_stmt (&init_stmt);
if (gimple_in_ssa_p (cfun)
&& init_stmt && TREE_CODE (init_stmt) == STATEMENT_LIST)
static tree
estimate_num_insns_1 (tree *tp, int *walk_subtrees, void *data)
{
- struct eni_data *d = data;
+ struct eni_data *const d = (struct eni_data *) data;
tree x = *tp;
unsigned cost;
BLOCK_SUPERCONTEXT (new_block) = current_block;
}
+/* Fetch callee declaration from the call graph edge going from NODE and
+ associated with STMR call statement. Return NULL_TREE if not found. */
+static tree
+get_indirect_callee_fndecl (struct cgraph_node *node, tree stmt)
+{
+ struct cgraph_edge *cs;
+
+ cs = cgraph_edge (node, stmt);
+ if (cs)
+ return cs->callee->decl;
+
+ return NULL_TREE;
+}
+
/* If *TP is a CALL_EXPR, replace it with its inline expansion. */
static bool
If we cannot, then there is no hope of inlining the function. */
fn = get_callee_fndecl (t);
if (!fn)
- goto egress;
+ {
+ fn = get_indirect_callee_fndecl (id->dst_node, stmt);
+ if (!fn)
+ goto egress;
+ }
/* Turn forward declarations into real ones. */
fn = cgraph_node (fn)->decl;
inlining. */
if (!cgraph_inline_p (cg_edge, &reason))
{
+ /* If this call was originally indirect, we do not want to emit any
+ inlining related warnings or sorry messages because there are no
+ guarantees regarding those. */
+ if (cg_edge->indirect_call)
+ goto egress;
+
if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (fn))
/* Avoid warnings during early inline pass. */
&& (!flag_unit_at_a_time || cgraph_global_info_ready))
tree prev_fn;
basic_block bb;
int last = n_basic_blocks;
+ struct gimplify_ctx gctx;
+
/* There is no point in performing inlining if errors have already
occurred -- and we might crash if we try to inline invalid
code. */
id.transform_lang_insert_block = NULL;
id.statements_to_fold = pointer_set_create ();
- push_gimplify_context ();
+ push_gimplify_context (&gctx);
/* We make no attempts to keep dominance info up-to-date. */
free_dominance_info (CDI_DOMINATORS);
if (tree_map)
for (i = 0; i < VARRAY_ACTIVE_SIZE (tree_map); i++)
{
- replace_info = VARRAY_GENERIC_PTR (tree_map, i);
+ replace_info = (struct ipa_replace_map *) VARRAY_GENERIC_PTR (tree_map, i);
if (replace_info->replace_p)
insert_decl_map (&id, replace_info->old_tree,
replace_info->new_tree);
return type;
}
+
+/* Return whether it is safe to inline a function because it used different
+ target specific options or different optimization options. */
+bool
+tree_can_inline_p (tree caller, tree callee)
+{
+ /* Don't inline a function with a higher optimization level than the
+ caller, or with different space constraints (hot/cold functions). */
+ tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller);
+ tree callee_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee);
+
+ if (caller_tree != callee_tree)
+ {
+ struct cl_optimization *caller_opt
+ = TREE_OPTIMIZATION ((caller_tree)
+ ? caller_tree
+ : optimization_default_node);
+
+ struct cl_optimization *callee_opt
+ = TREE_OPTIMIZATION ((callee_tree)
+ ? callee_tree
+ : optimization_default_node);
+
+ if ((caller_opt->optimize > callee_opt->optimize)
+ || (caller_opt->optimize_size != callee_opt->optimize_size))
+ return false;
+ }
+
+ /* Allow the backend to decide if inlining is ok. */
+ return targetm.target_option.can_inline_p (caller, callee);
+}