From 2bf86c845a89fce00ccb219adbf6002443b5b1cb Mon Sep 17 00:00:00 2001 From: Jan Hubicka Date: Tue, 18 Nov 2014 21:44:16 +0100 Subject: [PATCH] ipa-cp.c (ipcp_cloning_candidate_p): Use opt_for_fn. * ipa-cp.c (ipcp_cloning_candidate_p): Use opt_for_fn. (ipa_value_from_jfunc, ipa_context_from_jfunc): Skip sanity check. (ipa_get_indirect_edge_target_1): Use opt_for_fn. (good_cloning_opportunity_p): Likewise. (ipa-cp gate): Enable ipa-cp with LTO. * ipa-profile.c (ipa_propagate_frequency): Use opt_for_fn. * ipa.c (symbol_table::remove_unreachable_nodes): Always build type inheritance. * ipa-inline-transform.c (inline_transform): Check if there are inlines to apply even at -O0. * cgraphunit.c (cgraph_node::finalize_function): Use opt_for_fn. (analyze_functions): Build type inheritance graph. * ipa-inline.c (can_inline_edge_p): Use opt_for_fn. (want_early_inline_function_p, want_inline_small_function_p): Likewise. (check_callers): Likewise. (edge_badness): Likewise. (inline_small_functions): Always be ready for indirect inlining to happend. (ipa_inline): Always use want_inline_function_to_all_callers_p. (early_inline_small_functions): Use opt_for_fn. * ipa-inline-analysis.c (estimate_function_body_sizes): use opt_for_fn. (estimate_function_body_sizes): Likewise. (compute_inline_parameters): Likewise. (estimate_edge_devirt_benefit): Likewise. (inline_analyze_function): Likewise. * ipa-devirt.c (ipa_devirt): Likewise. (gate): Use in_lto_p. * ipa-prop.c (ipa_func_spec_opts_forbid_analysis_p): Use opt_for_fn. (try_make_edge_direct_virtual_call): Likewise. (update_indirect_edges_after_inlining): Likewise. (ipa_free_all_structures_after_ipa_cp): Add in_lto_p check. * common.opt (findirect-inlining): Turn into optimization. * ipa-pure-const.c (add_new_function): Use opt_for_fn. (pure_const_generate_summary): Likewise. (gate_pure_const): Always enable with in_lto_p. From-SVN: r217737 --- gcc/ChangeLog | 39 +++++++++++++++++++++++++++++++++ gcc/cgraphunit.c | 11 +++++----- gcc/common.opt | 2 +- gcc/ipa-cp.c | 22 +++++++------------ gcc/ipa-devirt.c | 8 ++++++- gcc/ipa-inline-analysis.c | 13 +++++------ gcc/ipa-inline-transform.c | 5 ++++- gcc/ipa-inline.c | 44 ++++++++++++++++---------------------- gcc/ipa-profile.c | 5 +++-- gcc/ipa-prop.c | 15 ++++++------- gcc/ipa-pure-const.c | 10 ++++----- gcc/ipa.c | 6 +++--- 12 files changed, 108 insertions(+), 72 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 2fa58caeb38..647902bb678 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,42 @@ +2014-11-18 Jan Hubicka + + * ipa-cp.c (ipcp_cloning_candidate_p): Use opt_for_fn. + (ipa_value_from_jfunc, ipa_context_from_jfunc): Skip sanity check. + (ipa_get_indirect_edge_target_1): Use opt_for_fn. + (good_cloning_opportunity_p): Likewise. + (ipa-cp gate): Enable ipa-cp with LTO. + * ipa-profile.c (ipa_propagate_frequency): Use opt_for_fn. + * ipa.c (symbol_table::remove_unreachable_nodes): Always build type + inheritance. + * ipa-inline-transform.c (inline_transform): Check if there are inlines + to apply even at -O0. + * cgraphunit.c (cgraph_node::finalize_function): Use opt_for_fn. + (analyze_functions): Build type inheritance graph. + * ipa-inline.c (can_inline_edge_p): Use opt_for_fn. + (want_early_inline_function_p, want_inline_small_function_p): + Likewise. + (check_callers): Likewise. + (edge_badness): Likewise. + (inline_small_functions): Always be ready for indirect inlining + to happend. + (ipa_inline): Always use want_inline_function_to_all_callers_p. + (early_inline_small_functions): Use opt_for_fn. + * ipa-inline-analysis.c (estimate_function_body_sizes): use opt_for_fn. + (estimate_function_body_sizes): Likewise. + (compute_inline_parameters): Likewise. + (estimate_edge_devirt_benefit): Likewise. + (inline_analyze_function): Likewise. + * ipa-devirt.c (ipa_devirt): Likewise. + (gate): Use in_lto_p. + * ipa-prop.c (ipa_func_spec_opts_forbid_analysis_p): Use opt_for_fn. + (try_make_edge_direct_virtual_call): Likewise. + (update_indirect_edges_after_inlining): Likewise. + (ipa_free_all_structures_after_ipa_cp): Add in_lto_p check. + * common.opt (findirect-inlining): Turn into optimization. + * ipa-pure-const.c (add_new_function): Use opt_for_fn. + (pure_const_generate_summary): Likewise. + (gate_pure_const): Always enable with in_lto_p. + 2014-11-18 Maciej W. Rozycki * config/mips/mips.md (compression): Add `micromips32' setting. diff --git a/gcc/cgraphunit.c b/gcc/cgraphunit.c index 6695ae3ed20..e380e841dfd 100644 --- a/gcc/cgraphunit.c +++ b/gcc/cgraphunit.c @@ -450,7 +450,7 @@ cgraph_node::finalize_function (tree decl, bool no_collect) declared inline and nested functions. These were optimized out in the original implementation and it is unclear whether we want to change the behavior here. */ - if ((!optimize + if ((!opt_for_fn (decl, optimize) && !node->cpp_implicit_alias && !DECL_DISREGARD_INLINE_LIMITS (decl) && !DECL_DECLARED_INLINE_P (decl) @@ -929,8 +929,7 @@ analyze_functions (void) FOR_EACH_SYMBOL (node) if (node->cpp_implicit_alias) node->fixup_same_cpp_alias_visibility (node->get_alias_target ()); - if (optimize && flag_devirtualize) - build_type_inheritance_graph (); + build_type_inheritance_graph (); /* Analysis adds static variables that in turn adds references to new functions. So we need to iterate the process until it stabilize. */ @@ -1001,7 +1000,8 @@ analyze_functions (void) for (edge = cnode->callees; edge; edge = edge->next_callee) if (edge->callee->definition) enqueue_node (edge->callee); - if (optimize && opt_for_fn (cnode->decl, flag_devirtualize)) + if (opt_for_fn (cnode->decl, optimize) + && opt_for_fn (cnode->decl, flag_devirtualize)) { cgraph_edge *next; @@ -1046,8 +1046,7 @@ analyze_functions (void) symtab->process_new_functions (); } } - if (optimize && flag_devirtualize) - update_type_inheritance_graph (); + update_type_inheritance_graph (); /* Collect entry points to the unit. */ if (symtab->dump_file) diff --git a/gcc/common.opt b/gcc/common.opt index 3a6d7e10c2c..41c8d4ed76d 100644 --- a/gcc/common.opt +++ b/gcc/common.opt @@ -1392,7 +1392,7 @@ Common Report Var(flag_inhibit_size_directive) Do not generate .size directives findirect-inlining -Common Report Var(flag_indirect_inlining) +Common Report Var(flag_indirect_inlining) Optimization Perform indirect inlining ; General flag to enable inlining. Specifying -fno-inline will disable diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c index e598241c01d..f97912ba72d 100644 --- a/gcc/ipa-cp.c +++ b/gcc/ipa-cp.c @@ -566,7 +566,7 @@ ipcp_cloning_candidate_p (struct cgraph_node *node) gcc_checking_assert (node->has_gimple_body_p ()); - if (!flag_ipa_cp_clone) + if (!opt_for_fn (node->decl, flag_ipa_cp_clone)) { if (dump_file) fprintf (dump_file, "Not considering %s for cloning; " @@ -902,10 +902,7 @@ ipa_value_from_jfunc (struct ipa_node_params *info, struct ipa_jump_func *jfunc) ipcp_lattice *lat; if (!info->lattices) - { - gcc_checking_assert (!flag_ipa_cp); - return NULL_TREE; - } + return NULL_TREE; lat = ipa_get_scalar_lat (info, idx); if (!lat->is_single_const ()) return NULL_TREE; @@ -967,10 +964,7 @@ ipa_context_from_jfunc (ipa_node_params *info, cgraph_edge *cs, int csidx, else { if (!info->lattices) - { - gcc_checking_assert (!flag_ipa_cp); - return ctx; - } + return ctx; ipcp_lattice *lat; lat = ipa_get_poly_ctx_lat (info, srcidx); if (!lat->is_single_const ()) @@ -1786,7 +1780,7 @@ ipa_get_indirect_edge_target_1 (struct cgraph_edge *ie, return NULL_TREE; } - if (!flag_devirtualize) + if (!opt_for_fn (ie->caller->decl, flag_devirtualize)) return NULL_TREE; gcc_assert (!ie->indirect_info->agg_contents); @@ -1884,8 +1878,8 @@ ipa_get_indirect_edge_target_1 (struct cgraph_edge *ie, struct cgraph_node *node; if (*speculative) return target; - if (!flag_devirtualize_speculatively || ie->speculative - || !ie->maybe_hot_p ()) + if (!opt_for_fn (ie->caller->decl, flag_devirtualize_speculatively) + || ie->speculative || !ie->maybe_hot_p ()) return NULL; node = try_speculative_devirtualization (ie->indirect_info->otr_type, ie->indirect_info->otr_token, @@ -2003,7 +1997,7 @@ good_cloning_opportunity_p (struct cgraph_node *node, int time_benefit, int freq_sum, gcov_type count_sum, int size_cost) { if (time_benefit == 0 - || !flag_ipa_cp_clone + || !opt_for_fn (node->decl, flag_ipa_cp_clone) || !optimize_function_for_speed_p (DECL_STRUCT_FUNCTION (node->decl))) return false; @@ -4315,7 +4309,7 @@ public: { /* FIXME: We should remove the optimize check after we ensure we never run IPA passes when not optimizing. */ - return flag_ipa_cp && optimize; + return (flag_ipa_cp && optimize) || in_lto_p; } virtual unsigned int execute (function *) { return ipcp_driver (); } diff --git a/gcc/ipa-devirt.c b/gcc/ipa-devirt.c index 5e1f5713122..99475f6b640 100644 --- a/gcc/ipa-devirt.c +++ b/gcc/ipa-devirt.c @@ -2818,6 +2818,8 @@ ipa_devirt (void) FOR_EACH_DEFINED_FUNCTION (n) { bool update = false; + if (!opt_for_fn (n->decl, flag_devirtualize)) + continue; if (dump_file && n->indirect_calls) fprintf (dump_file, "\n\nProcesing function %s/%i\n", n->name (), n->order); @@ -2846,7 +2848,7 @@ ipa_devirt (void) npolymorphic++; - if (!flag_devirtualize_speculatively) + if (!opt_for_fn (n->decl, flag_devirtualize_speculatively)) continue; if (!e->maybe_hot_p ()) @@ -3116,6 +3118,10 @@ public: /* opt_pass methods: */ virtual bool gate (function *) { + /* In LTO, always run the IPA passes and decide on function basis if the + pass is enabled. */ + if (in_lto_p) + return true; return (flag_devirtualize && (flag_devirtualize_speculatively || (warn_suggest_final_methods diff --git a/gcc/ipa-inline-analysis.c b/gcc/ipa-inline-analysis.c index dace2fc78be..0494e0456e1 100644 --- a/gcc/ipa-inline-analysis.c +++ b/gcc/ipa-inline-analysis.c @@ -2474,7 +2474,7 @@ estimate_function_body_sizes (struct cgraph_node *node, bool early) info->conds = NULL; info->entry = NULL; - if (optimize && !early) + if (opt_for_fn (node->decl, optimize) && !early) { calculate_dominance_info (CDI_DOMINATORS); loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); @@ -2815,7 +2815,7 @@ estimate_function_body_sizes (struct cgraph_node *node, bool early) inline_summary (node)->self_time = time; inline_summary (node)->self_size = size; nonconstant_names.release (); - if (optimize && !early) + if (opt_for_fn (node->decl, optimize) && !early) { loop_optimizer_finalize (); free_dominance_info (CDI_DOMINATORS); @@ -2872,8 +2872,9 @@ compute_inline_parameters (struct cgraph_node *node, bool early) info->stack_frame_offset = 0; /* Can this function be inlined at all? */ - if (!optimize && !lookup_attribute ("always_inline", - DECL_ATTRIBUTES (node->decl))) + if (!opt_for_fn (node->decl, optimize) + && !lookup_attribute ("always_inline", + DECL_ATTRIBUTES (node->decl))) info->inlinable = false; else info->inlinable = tree_inlinable_function_p (node->decl); @@ -2990,7 +2991,7 @@ estimate_edge_devirt_benefit (struct cgraph_edge *ie, if (!known_vals.exists () && !known_contexts.exists ()) return false; - if (!flag_indirect_inlining) + if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining)) return false; target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts, @@ -3986,7 +3987,7 @@ inline_analyze_function (struct cgraph_node *node) if (dump_file) fprintf (dump_file, "\nAnalyzing function: %s/%u\n", node->name (), node->order); - if (optimize && !node->thunk.thunk_p) + if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p) inline_indirect_intraprocedural_analysis (node); compute_inline_parameters (node, false); if (!optimize) diff --git a/gcc/ipa-inline-transform.c b/gcc/ipa-inline-transform.c index dbc56c599b0..9b806c1bf66 100644 --- a/gcc/ipa-inline-transform.c +++ b/gcc/ipa-inline-transform.c @@ -467,6 +467,7 @@ inline_transform (struct cgraph_node *node) { unsigned int todo = 0; struct cgraph_edge *e, *next; + bool has_inline = false; /* FIXME: Currently the pass manager is adding inline transform more than once to some clones. This needs revisiting after WPA cleanups. */ @@ -480,13 +481,15 @@ inline_transform (struct cgraph_node *node) for (e = node->callees; e; e = next) { + if (!e->inline_failed) + has_inline = true; next = e->next_callee; e->redirect_call_stmt_to_callee (); } node->remove_all_references (); timevar_push (TV_INTEGRATION); - if (node->callees && optimize) + if (node->callees && (optimize || has_inline)) todo = optimize_inline_calls (current_function_decl); timevar_pop (TV_INTEGRATION); diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c index ca50ad5268c..72c0715dc56 100644 --- a/gcc/ipa-inline.c +++ b/gcc/ipa-inline.c @@ -378,18 +378,10 @@ can_inline_edge_p (struct cgraph_edge *e, bool report, optimization attribute. */ else if (caller_tree != callee_tree) { - struct cl_optimization *caller_opt - = TREE_OPTIMIZATION ((caller_tree) - ? caller_tree - : optimization_default_node); - - struct cl_optimization *callee_opt - = TREE_OPTIMIZATION ((callee_tree) - ? callee_tree - : optimization_default_node); - - if (((caller_opt->x_optimize > callee_opt->x_optimize) - || (caller_opt->x_optimize_size != callee_opt->x_optimize_size)) + if (((opt_for_fn (e->caller->decl, optimize) + > opt_for_fn (e->callee->decl, optimize)) + || (opt_for_fn (e->caller->decl, optimize_size) + != opt_for_fn (e->callee->decl, optimize_size))) /* gcc.dg/pr43564.c. Look at forced inline even in -O0. */ && !DECL_DISREGARD_INLINE_LIMITS (e->callee->decl)) { @@ -469,7 +461,7 @@ want_early_inline_function_p (struct cgraph_edge *e) else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e)) ; else if (!DECL_DECLARED_INLINE_P (callee->decl) - && !flag_inline_small_functions) + && !opt_for_fn (e->caller->decl, flag_inline_small_functions)) { e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE; report_inline_failed_reason (e); @@ -587,7 +579,7 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report) if (DECL_DISREGARD_INLINE_LIMITS (callee->decl)) ; else if (!DECL_DECLARED_INLINE_P (callee->decl) - && !flag_inline_small_functions) + && !opt_for_fn (e->caller->decl, flag_inline_small_functions)) { e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE; want_inline = false; @@ -639,7 +631,7 @@ want_inline_small_function_p (struct cgraph_edge *e, bool report) want_inline = false; } else if (!DECL_DECLARED_INLINE_P (callee->decl) - && !flag_inline_functions) + && !opt_for_fn (e->caller->decl, flag_inline_functions)) { /* growth_likely_positive is expensive, always test it last. */ if (growth >= MAX_INLINE_INSNS_SINGLE @@ -816,6 +808,8 @@ check_callers (struct cgraph_node *node, void *has_hot_call) struct cgraph_edge *e; for (e = node->callers; e; e = e->next_caller) { + if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once)) + return true; if (!can_inline_edge_p (e, true)) return true; if (!(*(bool *)has_hot_call) && e->maybe_hot_p ()) @@ -1010,6 +1004,8 @@ edge_badness (struct cgraph_edge *edge, bool dump) compensated by the inline hints. */ + /* TODO: We ought suport mixing units where some functions are profiled + and some not. */ else if (flag_guess_branch_prob) { badness = (relative_time_benefit (callee_info, edge, edge_time) @@ -1575,8 +1571,7 @@ inline_small_functions (void) int initial_size = 0; struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count); struct cgraph_edge_hook_list *edge_removal_hook_holder; - if (flag_indirect_inlining) - new_indirect_edges.create (8); + new_indirect_edges.create (8); edge_removal_hook_holder = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap); @@ -1773,7 +1768,8 @@ inline_small_functions (void) if (where->global.inlined_to) where = where->global.inlined_to; if (!recursive_inlining (edge, - flag_indirect_inlining + opt_for_fn (edge->caller->decl, + flag_indirect_inlining) ? &new_indirect_edges : NULL)) { edge->inline_failed = CIF_RECURSIVE_INLINING; @@ -1783,7 +1779,7 @@ inline_small_functions (void) reset_edge_caches (where); /* Recursive inliner inlines all recursive calls of the function at once. Consequently we need to update all callee keys. */ - if (flag_indirect_inlining) + if (opt_for_fn (edge->caller->decl, flag_indirect_inlining)) add_new_edges_to_heap (&edge_heap, new_indirect_edges); update_callee_keys (&edge_heap, where, updated_nodes); bitmap_clear (updated_nodes); @@ -1821,8 +1817,7 @@ inline_small_functions (void) gcc_checking_assert (!callee->global.inlined_to); inline_call (edge, true, &new_indirect_edges, &overall_size, true); - if (flag_indirect_inlining) - add_new_edges_to_heap (&edge_heap, new_indirect_edges); + add_new_edges_to_heap (&edge_heap, new_indirect_edges); reset_edge_caches (edge->callee); reset_node_growth_cache (callee); @@ -2246,8 +2241,7 @@ ipa_inline (void) reset_edge_caches (where); inline_update_overall_summary (where); } - if (flag_inline_functions_called_once - && want_inline_function_to_all_callers_p (node, cold)) + if (want_inline_function_to_all_callers_p (node, cold)) { int num_calls = 0; node->call_for_symbol_thunks_and_aliases (sum_callers, &num_calls, @@ -2345,8 +2339,8 @@ early_inline_small_functions (struct cgraph_node *node) /* Do not consider functions not declared inline. */ if (!DECL_DECLARED_INLINE_P (callee->decl) - && !flag_inline_small_functions - && !flag_inline_functions) + && !opt_for_fn (node->decl, flag_inline_small_functions) + && !opt_for_fn (node->decl, flag_inline_functions)) continue; if (dump_file) diff --git a/gcc/ipa-profile.c b/gcc/ipa-profile.c index 99d13098534..340d033b6a5 100644 --- a/gcc/ipa-profile.c +++ b/gcc/ipa-profile.c @@ -418,7 +418,8 @@ ipa_propagate_frequency (struct cgraph_node *node) nor about virtuals. */ if (!node->local.local || node->alias - || (flag_devirtualize && DECL_VIRTUAL_P (node->decl))) + || (opt_for_fn (node->decl, flag_devirtualize) + && DECL_VIRTUAL_P (node->decl))) return false; gcc_assert (node->analyzed); if (dump_file && (dump_flags & TDF_DETAILS)) @@ -754,7 +755,7 @@ public: {} /* opt_pass methods: */ - virtual bool gate (function *) { return flag_ipa_profile; } + virtual bool gate (function *) { return flag_ipa_profile || in_lto_p; } virtual unsigned int execute (function *) { return ipa_profile (); } }; // class pass_ipa_profile diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c index f87243ce939..6905f0cc79c 100644 --- a/gcc/ipa-prop.c +++ b/gcc/ipa-prop.c @@ -168,12 +168,10 @@ static bool ipa_func_spec_opts_forbid_analysis_p (struct cgraph_node *node) { tree fs_opts = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (node->decl); - struct cl_optimization *os; if (!fs_opts) return false; - os = TREE_OPTIMIZATION (fs_opts); - return !os->x_optimize || !os->x_flag_ipa_cp; + return !opt_for_fn (node->decl, optimize) || !opt_for_fn (node->decl, flag_ipa_cp); } /* Return index of the formal whose tree is PTREE in function which corresponds @@ -2896,13 +2894,14 @@ try_make_edge_direct_virtual_call (struct cgraph_edge *ie, tree target = NULL; bool speculative = false; - if (!flag_devirtualize) + if (!opt_for_fn (ie->caller->decl, flag_devirtualize)) return NULL; gcc_assert (!ie->indirect_info->by_ref); /* Try to do lookup via known virtual table pointer value. */ - if (!ie->indirect_info->vptr_changed || flag_devirtualize_speculatively) + if (!ie->indirect_info->vptr_changed + || opt_for_fn (ie->caller->decl, flag_devirtualize_speculatively)) { tree vtable; unsigned HOST_WIDE_INT offset; @@ -2953,7 +2952,7 @@ try_make_edge_direct_virtual_call (struct cgraph_edge *ie, else target = ipa_impossible_devirt_target (ie, NULL_TREE); } - else if (!target && flag_devirtualize_speculatively + else if (!target && opt_for_fn (ie->caller->decl, flag_devirtualize_speculatively) && !ie->speculative && ie->maybe_hot_p ()) { cgraph_node *n; @@ -3025,7 +3024,7 @@ update_indirect_edges_after_inlining (struct cgraph_edge *cs, param_index = ici->param_index; jfunc = ipa_get_ith_jump_func (top, param_index); - if (!flag_indirect_inlining) + if (!opt_for_fn (node->decl, flag_indirect_inlining)) new_direct_edge = NULL; else if (ici->polymorphic) { @@ -3579,7 +3578,7 @@ ipa_unregister_cgraph_hooks (void) void ipa_free_all_structures_after_ipa_cp (void) { - if (!optimize) + if (!optimize && !in_lto_p) { ipa_free_all_edge_args (); ipa_free_all_node_params (); diff --git a/gcc/ipa-pure-const.c b/gcc/ipa-pure-const.c index a55288d7ba4..50d7300cc4f 100644 --- a/gcc/ipa-pure-const.c +++ b/gcc/ipa-pure-const.c @@ -914,7 +914,8 @@ add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED) static declarations. We do not need to scan them more than once since all we would be interested in are the addressof operations. */ - if (node->get_availability () > AVAIL_INTERPOSABLE) + if (node->get_availability () > AVAIL_INTERPOSABLE + && opt_for_fn (node->decl, flag_ipa_pure_const)) set_function_state (node, analyze_function (node, true)); } @@ -984,7 +985,8 @@ pure_const_generate_summary (void) when function got cloned and the clone is AVAILABLE. */ FOR_EACH_DEFINED_FUNCTION (node) - if (node->get_availability () >= AVAIL_INTERPOSABLE) + if (node->get_availability () >= AVAIL_INTERPOSABLE + && opt_for_fn (node->decl, flag_ipa_pure_const)) set_function_state (node, analyze_function (node, true)); } @@ -1595,9 +1597,7 @@ execute (function *) static bool gate_pure_const (void) { - return (flag_ipa_pure_const - /* Don't bother doing anything if the program has errors. */ - && !seen_error ()); + return flag_ipa_pure_const || in_lto_p; } pass_ipa_pure_const::pass_ipa_pure_const(gcc::context *ctxt) diff --git a/gcc/ipa.c b/gcc/ipa.c index a6086d808b0..54b30aab83c 100644 --- a/gcc/ipa.c +++ b/gcc/ipa.c @@ -304,8 +304,7 @@ symbol_table::remove_unreachable_nodes (bool before_inlining_p, FILE *file) hash_set reachable_call_targets; timevar_push (TV_IPA_UNREACHABLE); - if (optimize && flag_devirtualize) - build_type_inheritance_graph (); + build_type_inheritance_graph (); if (file) fprintf (file, "\nReclaiming functions:"); #ifdef ENABLE_CHECKING @@ -391,7 +390,8 @@ symbol_table::remove_unreachable_nodes (bool before_inlining_p, FILE *file) { struct cgraph_edge *e; /* Keep alive possible targets for devirtualization. */ - if (optimize && flag_devirtualize) + if (opt_for_fn (cnode->decl, optimize) + && opt_for_fn (cnode->decl, flag_devirtualize)) { struct cgraph_edge *next; for (e = cnode->indirect_calls; e; e = next) -- 2.30.2