From 9cec31f43afbdb326fddbc144e9aea63986828e4 Mon Sep 17 00:00:00 2001 From: Martin Liska Date: Tue, 17 Dec 2013 22:20:12 +0000 Subject: [PATCH] Time profile-based function reordering (phase 2). Co-Authored-By: Jan Hubicka From-SVN: r206070 --- gcc/ChangeLog | 11 +++++++++++ gcc/cgraphunit.c | 40 +++++++++++++++++++++++++++++++++++++++- gcc/common.opt | 4 ++++ gcc/config/darwin.c | 26 +++++++++++++++++++------- gcc/doc/invoke.texi | 10 +++++++++- gcc/ipa-split.c | 4 ++++ gcc/ipa-utils.c | 5 +++++ gcc/lto/lto-partition.c | 30 +++++++++++++++++++++++++++--- gcc/lto/lto.c | 5 ++++- gcc/opts.c | 2 ++ gcc/predict.c | 12 ++++++++++++ gcc/varasm.c | 14 ++++++++++++-- 12 files changed, 148 insertions(+), 15 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 58613eef093..2c10c04358d 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,14 @@ +2013-12-18 Martin Liska + Jan Hubicka + + * cgraphunit.c (node_cmp): New function. + (expand_all_functions): Function ordering added. + * common.opt: New profile based function reordering flag introduced. + * lto-partition.c: Support for time profile added. + * lto.c: Likewise. + * predict.c (handle_missing_profiles): Time profile handled in + missing profiles. + 2013-12-17 Jakub Jelinek PR tree-optimization/59523 diff --git a/gcc/cgraphunit.c b/gcc/cgraphunit.c index 44f3afd6e4a..28f51162bba 100644 --- a/gcc/cgraphunit.c +++ b/gcc/cgraphunit.c @@ -1831,6 +1831,23 @@ expand_function (struct cgraph_node *node) ipa_remove_all_references (&node->ref_list); } +/* Node comparer that is responsible for the order that corresponds + to time when a function was launched for the first time. */ + +static int +node_cmp (const void *pa, const void *pb) +{ + const struct cgraph_node *a = *(const struct cgraph_node * const *) pa; + const struct cgraph_node *b = *(const struct cgraph_node * const *) pb; + + /* Functions with time profile must be before these without profile. */ + if (!a->tp_first_run || !b->tp_first_run) + return a->tp_first_run - b->tp_first_run; + + return a->tp_first_run != b->tp_first_run + ? b->tp_first_run - a->tp_first_run + : b->order - a->order; +} /* Expand all functions that must be output. @@ -1847,6 +1864,7 @@ expand_all_functions (void) { struct cgraph_node *node; struct cgraph_node **order = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes); + unsigned int expanded_func_count = 0, profiled_func_count = 0; int order_pos, new_order_pos = 0; int i; @@ -1859,20 +1877,39 @@ expand_all_functions (void) if (order[i]->process) order[new_order_pos++] = order[i]; + if (flag_profile_reorder_functions) + qsort (order, new_order_pos, sizeof (struct cgraph_node *), node_cmp); + for (i = new_order_pos - 1; i >= 0; i--) { node = order[i]; + if (node->process) { + expanded_func_count++; + if(node->tp_first_run) + profiled_func_count++; + + if (cgraph_dump_file) + fprintf (cgraph_dump_file, "Time profile order in expand_all_functions:%s:%d\n", node->asm_name (), node->tp_first_run); + node->process = 0; expand_function (node); } } + + if (dump_file) + fprintf (dump_file, "Expanded functions with time profile (%s):%u/%u\n", + main_input_filename, profiled_func_count, expanded_func_count); + + if (cgraph_dump_file && flag_profile_reorder_functions) + fprintf (cgraph_dump_file, "Expanded functions with time profile:%u/%u\n", + profiled_func_count, expanded_func_count); + cgraph_process_new_functions (); free_gimplify_stack (); free (order); - } /* This is used to sort the node types by the cgraph order number. */ @@ -2194,6 +2231,7 @@ compile (void) #endif cgraph_state = CGRAPH_STATE_EXPANSION; + if (!flag_toplevel_reorder) output_in_order (); else diff --git a/gcc/common.opt b/gcc/common.opt index 0cd1fddd4ad..ea323fdc9c3 100644 --- a/gcc/common.opt +++ b/gcc/common.opt @@ -1712,6 +1712,10 @@ fprofile-report Common Report Var(profile_report) Report on consistency of profile +fprofile-reorder-functions +Common Report Var(flag_profile_reorder_functions) +Enable function reordering that improves code placement + frandom-seed Common Var(common_deferred_options) Defer diff --git a/gcc/config/darwin.c b/gcc/config/darwin.c index ea056a9ab9d..4267c89dc06 100644 --- a/gcc/config/darwin.c +++ b/gcc/config/darwin.c @@ -3621,9 +3621,16 @@ darwin_function_section (tree decl, enum node_frequency freq, unlikely executed (this happens especially with function splitting where we can split away unnecessary parts of static constructors). */ if (startup && freq != NODE_FREQUENCY_UNLIKELY_EXECUTED) - return (weak) - ? darwin_sections[text_startup_coal_section] - : darwin_sections[text_startup_section]; + { + /* If we do have a profile or(and) LTO phase is executed, we do not need + these ELF section. */ + if (!in_lto_p || !flag_profile_values) + return (weak) + ? darwin_sections[text_startup_coal_section] + : darwin_sections[text_startup_section]; + else + return text_section; + } /* Similarly for exit. */ if (exit && freq != NODE_FREQUENCY_UNLIKELY_EXECUTED) @@ -3640,10 +3647,15 @@ darwin_function_section (tree decl, enum node_frequency freq, : darwin_sections[text_cold_section]; break; case NODE_FREQUENCY_HOT: - return (weak) - ? darwin_sections[text_hot_coal_section] - : darwin_sections[text_hot_section]; - break; + { + /* If we do have a profile or(and) LTO phase is executed, we do not need + these ELF section. */ + if (!in_lto_p || !flag_profile_values) + return (weak) + ? darwin_sections[text_hot_coal_section] + : darwin_sections[text_hot_section]; + break; + } default: return (weak) ? darwin_sections[text_coal_section] diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi index b102e13b0e8..99ec1d2dce6 100644 --- a/gcc/doc/invoke.texi +++ b/gcc/doc/invoke.texi @@ -394,7 +394,7 @@ Objective-C and Objective-C++ Dialects}. -fprefetch-loop-arrays -fprofile-report @gol -fprofile-correction -fprofile-dir=@var{path} -fprofile-generate @gol -fprofile-generate=@var{path} @gol --fprofile-use -fprofile-use=@var{path} -fprofile-values @gol +-fprofile-use -fprofile-use=@var{path} -fprofile-values -fprofile-reorder-functions @gol -freciprocal-math -free -frename-registers -freorder-blocks @gol -freorder-blocks-and-partition -freorder-functions @gol -frerun-cse-after-loop -freschedule-modulo-scheduled-loops @gol @@ -9071,6 +9071,14 @@ from profiling values of expressions for usage in optimizations. Enabled with @option{-fprofile-generate} and @option{-fprofile-use}. +@item -fprofile-reoder-functions +@opindex fprofile-reorder-functions +Function reordering based on profile instrumentation collects +first time of execution of a function and orders these functions +in ascending order. + +Enabled with @option{-fprofile-use}. + @item -fvpt @opindex fvpt If combined with @option{-fprofile-arcs}, this option instructs the compiler diff --git a/gcc/ipa-split.c b/gcc/ipa-split.c index 6d9334882ed..43758b6db7a 100644 --- a/gcc/ipa-split.c +++ b/gcc/ipa-split.c @@ -1234,6 +1234,10 @@ split_function (struct split_point *split_point) !split_part_return_p, split_point->split_bbs, split_point->entry_bb, "part"); + + /* Let's take a time profile for splitted function. */ + node->tp_first_run = cur_node->tp_first_run + 1; + /* For usual cloning it is enough to clear builtin only when signature changes. For partial inlining we however can not expect the part of builtin implementation to have same semantic as the whole. */ diff --git a/gcc/ipa-utils.c b/gcc/ipa-utils.c index 92972803ba0..66416268f57 100644 --- a/gcc/ipa-utils.c +++ b/gcc/ipa-utils.c @@ -655,6 +655,11 @@ ipa_merge_profiles (struct cgraph_node *dst, return; if (src->frequency < dst->frequency) src->frequency = dst->frequency; + + /* Time profiles are merged. */ + if (dst->tp_first_run > src->tp_first_run && src->tp_first_run) + dst->tp_first_run = src->tp_first_run; + if (!dst->count) return; if (cgraph_dump_file) diff --git a/gcc/lto/lto-partition.c b/gcc/lto/lto-partition.c index 5b46af9d907..5e0335ea5a4 100644 --- a/gcc/lto/lto-partition.c +++ b/gcc/lto/lto-partition.c @@ -286,9 +286,11 @@ add_symbol_to_partition (ltrans_partition part, symtab_node *node) Be lax about comdats; they may or may not be duplicated and we may end up in need to duplicate keyed comdat because it has unkeyed alias. */ + gcc_assert (get_symbol_class (node) == SYMBOL_DUPLICATE || DECL_COMDAT (node->decl) || !symbol_partitioned_p (node)); + add_symbol_to_partition_1 (part, node); } @@ -401,6 +403,25 @@ node_cmp (const void *pa, const void *pb) { const struct cgraph_node *a = *(const struct cgraph_node * const *) pa; const struct cgraph_node *b = *(const struct cgraph_node * const *) pb; + + /* Profile reorder flag enables function reordering based on first execution + of a function. All functions with profile are placed in ascending + order at the beginning. */ + + if (flag_profile_reorder_functions) + { + /* Functions with time profile are sorted in ascending order. */ + if (a->tp_first_run && b->tp_first_run) + return a->tp_first_run != b->tp_first_run + ? a->tp_first_run - b->tp_first_run + : a->order - b->order; + + /* Functions with time profile are sorted before the functions + that do not have the profile. */ + if (a->tp_first_run || b->tp_first_run) + return b->tp_first_run - a->tp_first_run; + } + return b->order - a->order; } @@ -487,10 +508,13 @@ lto_balanced_map (void) get better about minimizing the function bounday, but until that things works smoother if we order in source order. */ qsort (order, n_nodes, sizeof (struct cgraph_node *), node_cmp); + + if (cgraph_dump_file) + for(i = 0; i < n_nodes; i++) + fprintf (cgraph_dump_file, "Balanced map symbol order:%s:%u\n", order[i]->name (), order[i]->tp_first_run); + if (!flag_toplevel_reorder) { - qsort (order, n_nodes, sizeof (struct cgraph_node *), node_cmp); - FOR_EACH_VARIABLE (vnode) if (get_symbol_class (vnode) == SYMBOL_PARTITION) n_varpool_nodes++; @@ -855,7 +879,7 @@ may_need_named_section_p (lto_symtab_encoder_t encoder, symtab_node *node) of the same name in partition ENCODER (or in whole compilation unit if ENCODER is NULL) and if so, mangle the statics. Always mangle all conflicting statics, so we reduce changes of silently miscompiling - asm statemnets referring to them by symbol name. */ + asm statements referring to them by symbol name. */ static void rename_statics (lto_symtab_encoder_t encoder, symtab_node *node) diff --git a/gcc/lto/lto.c b/gcc/lto/lto.c index f322a0071d2..8e5eeb3d11f 100644 --- a/gcc/lto/lto.c +++ b/gcc/lto/lto.c @@ -2503,9 +2503,12 @@ lto_wpa_write_files (void) /* Sort partitions by size so small ones are compiled last. FIXME: Even when not reordering we may want to output one list for parallel make and other for final link command. */ - ltrans_partitions.qsort (flag_toplevel_reorder + + if (!flag_profile_reorder_functions || !flag_profile_use) + ltrans_partitions.qsort (flag_toplevel_reorder ? cmp_partitions_size : cmp_partitions_order); + for (i = 0; i < n_sets; i++) { size_t len; diff --git a/gcc/opts.c b/gcc/opts.c index 4cb2cdf4eff..5be03faa703 100644 --- a/gcc/opts.c +++ b/gcc/opts.c @@ -1710,6 +1710,8 @@ common_handle_option (struct gcc_options *opts, opts->x_flag_vect_cost_model = VECT_COST_MODEL_DYNAMIC; if (!opts_set->x_flag_tree_loop_distribute_patterns) opts->x_flag_tree_loop_distribute_patterns = value; + if (!opts_set->x_flag_profile_reorder_functions) + opts->x_flag_profile_reorder_functions = value; /* Indirect call profiling should do all useful transformations speculative devirtualization does. */ if (!opts_set->x_flag_devirtualize_speculatively diff --git a/gcc/predict.c b/gcc/predict.c index a5ad34f601a..1826a0699ec 100644 --- a/gcc/predict.c +++ b/gcc/predict.c @@ -2839,12 +2839,24 @@ handle_missing_profiles (void) { struct cgraph_edge *e; gcov_type call_count = 0; + gcov_type max_tp_first_run = 0; struct function *fn = DECL_STRUCT_FUNCTION (node->decl); if (node->count) continue; for (e = node->callers; e; e = e->next_caller) + { call_count += e->count; + + if (e->caller->tp_first_run > max_tp_first_run) + max_tp_first_run = e->caller->tp_first_run; + } + + /* If time profile is missing, let assign the maximum that comes from + caller functions. */ + if (!node->tp_first_run && max_tp_first_run) + node->tp_first_run = max_tp_first_run + 1; + if (call_count && fn && fn->cfg && (call_count * unlikely_count_fraction >= profile_info->runs)) diff --git a/gcc/varasm.c b/gcc/varasm.c index 5c5025ac5e6..1d2c03e6fbb 100644 --- a/gcc/varasm.c +++ b/gcc/varasm.c @@ -552,7 +552,14 @@ default_function_section (tree decl, enum node_frequency freq, unlikely executed (this happens especially with function splitting where we can split away unnecessary parts of static constructors. */ if (startup && freq != NODE_FREQUENCY_UNLIKELY_EXECUTED) - return get_named_text_section (decl, ".text.startup", NULL); + { + /* If we do have a profile or(and) LTO phase is executed, we do not need + these ELF section. */ + if (!in_lto_p || !flag_profile_values) + return get_named_text_section (decl, ".text.startup", NULL); + else + return NULL; + } /* Similarly for exit. */ if (exit && freq != NODE_FREQUENCY_UNLIKELY_EXECUTED) @@ -564,7 +571,10 @@ default_function_section (tree decl, enum node_frequency freq, case NODE_FREQUENCY_UNLIKELY_EXECUTED: return get_named_text_section (decl, ".text.unlikely", NULL); case NODE_FREQUENCY_HOT: - return get_named_text_section (decl, ".text.hot", NULL); + /* If we do have a profile or(and) LTO phase is executed, we do not need + these ELF section. */ + if (!in_lto_p || !flag_profile_values) + return get_named_text_section (decl, ".text.hot", NULL); default: return NULL; } -- 2.30.2