* ipa-comdats.c: Remove optimize check from gate.
* ipa-fnsummary.c (ipa_fn_summary_generate): do not generate summary
for functions not optimized.
(ipa_fn_summary_read): Skip optimize check.
(ipa_fn_summary_write): Likewise.
* ipa-inline-analysis.c (do_estimate_growth_1): Check that caller
is optimized.
* ipa-inline.c (can_inline_edge_p): Not optimized functions are
uninlinable.
(can_inline_edge_p): Check flag_pcc_struct_return for match.
(check_callers): Give up on caller which is not optimized.
(inline_small_functions): Likewise.
(ipa_inline): Do not give up when not optimizing.
* ipa-visbility.c (function_and_variable_visibility): Do not optimize
away unoptimizes cdtors.
(whole_program_function_and_variable_visibility): Do
ipa_discover_readonly_nonaddressable_vars in LTO mode.
* ipa.c (process_references): Do not check optimize.
(symbol_table::remove_unreachable_nodes): Update optimize check.
(set_writeonly_bit): Update optimize check.
(pass_ipa_cdtor_merge::gate): Do not check optimize.
(pass_ipa_single_use::gate): Remove.
From-SVN: r250048
+2017-07-06 Jan Hubicka <hubicka@ucw.cz>
+
+ * ipa-comdats.c: Remove optimize check from gate.
+ * ipa-fnsummary.c (ipa_fn_summary_generate): do not generate summary
+ for functions not optimized.
+ (ipa_fn_summary_read): Skip optimize check.
+ (ipa_fn_summary_write): Likewise.
+ * ipa-inline-analysis.c (do_estimate_growth_1): Check that caller
+ is optimized.
+ * ipa-inline.c (can_inline_edge_p): Not optimized functions are
+ uninlinable.
+ (can_inline_edge_p): Check flag_pcc_struct_return for match.
+ (check_callers): Give up on caller which is not optimized.
+ (inline_small_functions): Likewise.
+ (ipa_inline): Do not give up when not optimizing.
+ * ipa-visbility.c (function_and_variable_visibility): Do not optimize
+ away unoptimizes cdtors.
+ (whole_program_function_and_variable_visibility): Do
+ ipa_discover_readonly_nonaddressable_vars in LTO mode.
+ * ipa.c (process_references): Do not check optimize.
+ (symbol_table::remove_unreachable_nodes): Update optimize check.
+ (set_writeonly_bit): Update optimize check.
+ (pass_ipa_cdtor_merge::gate): Do not check optimize.
+ (pass_ipa_single_use::gate): Remove.
+
2017-07-06 Aaron Sawdey <acsawdey@linux.vnet.ibm.com>
* config/rs6000/rs6000.c (union_defs, union_uses, insn_is_load_p,
bool
pass_ipa_comdats::gate (function *)
{
- return HAVE_COMDAT_GROUP && optimize;
+ return HAVE_COMDAT_GROUP;
}
} // anon namespace
FOR_EACH_DEFINED_FUNCTION (node)
if (DECL_STRUCT_FUNCTION (node->decl))
- node->local.versionable = tree_versionable_function_p (node->decl);
-
- /* When not optimizing, do not bother to analyze. Inlining is still done
- because edge redirection needs to happen there. */
- if (!optimize && !flag_generate_lto && !flag_generate_offload && !flag_wpa)
- return;
+ node->local.versionable =
+ (opt_for_fn (node->decl, optimize)
+ && tree_versionable_function_p (node->decl));
ipa_fn_summary_alloc ();
ipa_fn_summaries->enable_insertion_hook ();
ipa_register_cgraph_hooks ();
- ipa_free_fn_summary ();
FOR_EACH_DEFINED_FUNCTION (node)
- if (!node->alias)
+ if (!node->alias
+ && (flag_generate_lto || flag_generate_offload|| flag_wpa
+ || opt_for_fn (node->decl, optimize)))
inline_analyze_function (node);
}
fatal_error (input_location,
"ipa inline summary is missing in input file");
}
- if (optimize)
- {
- ipa_register_cgraph_hooks ();
- if (!flag_ipa_cp)
- ipa_prop_read_jump_functions ();
- }
+ ipa_register_cgraph_hooks ();
+ if (!flag_ipa_cp)
+ ipa_prop_read_jump_functions ();
gcc_assert (ipa_fn_summaries);
ipa_fn_summaries->enable_insertion_hook ();
produce_asm (ob, NULL);
destroy_output_block (ob);
- if (optimize && !flag_ipa_cp)
+ if (!flag_ipa_cp)
ipa_prop_write_jump_functions ();
}
{
gcc_checking_assert (e->inline_failed);
- if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
+ if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
+ || !opt_for_fn (e->caller->decl, optimize))
{
d->uninlinable = true;
continue;
e->inline_failed = CIF_BODY_NOT_AVAILABLE;
inlinable = false;
}
+ if (!early && !opt_for_fn (callee->decl, optimize))
+ {
+ e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
+ inlinable = false;
+ }
else if (callee->calls_comdat_local)
{
e->inline_failed = CIF_USES_COMDAT_LOCAL;
Not even for always_inline declared functions. */
else if (check_match (flag_wrapv)
|| check_match (flag_trapv)
+ || check_match (flag_pcc_struct_return)
/* When caller or callee does FP math, be sure FP codegen flags
compatible. */
|| ((caller_info->fp_expressions && callee_info->fp_expressions)
struct cgraph_edge *e;
for (e = node->callers; e; e = e->next_caller)
{
- if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once))
+ if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once)
+ || !opt_for_fn (e->caller->decl, optimize))
return true;
if (!can_inline_edge_p (e, true))
return true;
if (!node->global.inlined_to)
{
if (!node->alias && node->analyzed
- && (node->has_gimple_body_p () || node->thunk.thunk_p))
+ && (node->has_gimple_body_p () || node->thunk.thunk_p)
+ && opt_for_fn (node->decl, optimize))
{
struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
int id = dfs->scc_no + 1;
for (n2 = node; n2;
n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle)
- {
- struct ipa_fn_summary *info2 = ipa_fn_summaries->get (n2);
- if (info2->scc_no)
- break;
- info2->scc_no = id;
- }
+ if (opt_for_fn (n2->decl, optimize))
+ {
+ struct ipa_fn_summary *info2 = ipa_fn_summaries->get (n2);
+ if (info2->scc_no)
+ break;
+ info2->scc_no = id;
+ }
}
}
struct cgraph_edge *next = NULL;
bool has_speculative = false;
+ if (!opt_for_fn (node->decl, optimize))
+ continue;
+
if (dump_file)
fprintf (dump_file, "Enqueueing calls in %s.\n", node->dump_name ());
int cold;
bool remove_functions = false;
- if (!optimize)
- return 0;
-
cgraph_freq_base_rec = (sreal) 1 / (sreal) CGRAPH_FREQ_BASE;
percent_rec = (sreal) 1 / (sreal) 100;
struct cgraph_edge *edge, *next;
bool update=false;
+ if (!opt_for_fn (node->decl, optimize)
+ || !opt_for_fn (node->decl, flag_inline_functions_called_once))
+ continue;
+
for (edge = node->callees; edge; edge = next)
{
next = edge->next_callee;
}
/* Free ipa-prop structures if they are no longer needed. */
- if (optimize)
- ipa_free_all_structures_after_iinln ();
+ ipa_free_all_structures_after_iinln ();
if (dump_file)
{
int flags = flags_from_decl_or_type (node->decl);
/* Optimize away PURE and CONST constructors and destructors. */
- if (optimize
+ if (node->analyzed
+ && (DECL_STATIC_CONSTRUCTOR (node->decl)
+ || DECL_STATIC_CONSTRUCTOR (node->decl))
&& (flags & (ECF_CONST | ECF_PURE))
- && !(flags & ECF_LOOPING_CONST_OR_PURE))
+ && !(flags & ECF_LOOPING_CONST_OR_PURE)
+ && opt_for_fn (node->decl, optimize))
{
DECL_STATIC_CONSTRUCTOR (node->decl) = 0;
DECL_STATIC_DESTRUCTOR (node->decl) = 0;
whole_program_function_and_variable_visibility (void)
{
function_and_variable_visibility (flag_whole_program);
- if (optimize)
+ if (optimize || in_lto_p)
ipa_discover_readonly_nonaddressable_vars ();
return 0;
}
if (node->definition && !node->in_other_partition
&& ((!DECL_EXTERNAL (node->decl) || node->alias)
|| (((before_inlining_p
- && ((TREE_CODE (node->decl) != FUNCTION_DECL
- && optimize)
+ && (TREE_CODE (node->decl) != FUNCTION_DECL
|| (TREE_CODE (node->decl) == FUNCTION_DECL
&& opt_for_fn (body->decl, optimize))
|| (symtab->state < IPA_SSA
hash_set<symtab_node *> reachable;
hash_set<tree> body_needed_for_clonning;
hash_set<void *> reachable_call_targets;
- bool before_inlining_p = symtab->state < (!optimize ? IPA_SSA
+ bool before_inlining_p = symtab->state < (!optimize && !in_lto_p ? IPA_SSA
: IPA_SSA_AFTER_INLINING);
timevar_push (TV_IPA_UNREACHABLE);
symtab_node::checking_verify_symtab_nodes ();
/* If we removed something, perhaps profile could be improved. */
- if (changed && optimize && ipa_call_summaries)
+ if (changed && (optimize || in_lto_p) && ipa_call_summaries)
FOR_EACH_DEFINED_FUNCTION (node)
ipa_propagate_frequency (node);
set_writeonly_bit (varpool_node *vnode, void *data)
{
vnode->writeonly = true;
- if (optimize)
+ if (optimize || in_lto_p)
{
DECL_INITIAL (vnode->decl) = NULL;
if (!vnode->alias)
/* Perform the pass when we have no ctors/dtors support
or at LTO time to merge multiple constructors into single
function. */
- return !targetm.have_ctors_dtors || (optimize && in_lto_p);
+ return !targetm.have_ctors_dtors || in_lto_p;
}
} // anon namespace
{}
/* opt_pass methods: */
- virtual bool gate (function *);
virtual unsigned int execute (function *) { return ipa_single_use (); }
}; // class pass_ipa_single_use
-bool
-pass_ipa_single_use::gate (function *)
-{
- return optimize;
-}
-
} // anon namespace
ipa_opt_pass_d *