Optimize updating of badness after applying inline
authorJan Hubicka <jh@suse.cz>
Wed, 20 Nov 2019 11:46:41 +0000 (12:46 +0100)
committerJan Hubicka <hubicka@gcc.gnu.org>
Wed, 20 Nov 2019 11:46:41 +0000 (11:46 +0000)
        * ipa-inline.c (wrapper_heuristics_may_apply): Break out from ...
(edge_badness): ... here.
(inline_small_functions): Use monotonicity of badness calculation
to avoid redundant updates.

From-SVN: r278496

gcc/ChangeLog
gcc/ipa-inline.c

index eacaee3638c95be3c3a9a479de24f6f9da4f44fc..79b8210eccc6548c24b6635a49c27c059f2e6fbe 100644 (file)
@@ -1,3 +1,10 @@
+2019-11-20  Jan Hubicka  <jh@suse.cz>
+
+       * ipa-inline.c (wrapper_heuristics_may_apply): Break out from ...
+       (edge_badness): ... here.
+       (inline_small_functions): Use monotonicity of badness calculation
+       to avoid redundant updates.
+
 2019-11-20  Richard Biener  <rguenther@suse.de>
 
        * tree-vect-slp.c (vect_analyze_slp_instance): Dump
index becea8a3e8ecf6995800ca330ffdd8ac0bb54f9b..5d8b87a7bc3f3a5c25914f68dacb399eb18c1fba 100644 (file)
@@ -1097,6 +1097,17 @@ want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
   return true;
 }
 
+/* Return true if WHERE of SIZE is a possible candidate for wrapper heuristics
+   in estimate_edge_badness.  */
+
+static bool
+wrapper_heuristics_may_apply (struct cgraph_node *where, int size)
+{
+  return size < (DECL_DECLARED_INLINE_P (where->decl)
+                ? inline_insns_single (where, false)
+                : inline_insns_auto (where, false));
+}
+
 /* A cost model driving the inlining heuristics in a way so the edges with
    smallest badness are inlined first.  After each inlining is performed
    the costs of all caller edges of nodes affected are recomputed so the
@@ -1227,10 +1238,8 @@ edge_badness (struct cgraph_edge *edge, bool dump)
             and it is not called once.  */
          if (!caller_info->single_caller && overall_growth < caller_growth
              && caller_info->inlinable
-             && ipa_size_summaries->get (caller)->size
-                < (DECL_DECLARED_INLINE_P (caller->decl)
-                   ? inline_insns_single (caller, false)
-                   : inline_insns_auto (caller, false)))
+             && wrapper_heuristics_may_apply
+                (caller, ipa_size_summaries->get (caller)->size))
            {
              if (dump)
                fprintf (dump_file,
@@ -2158,11 +2167,24 @@ inline_small_functions (void)
            fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
 
          gcc_checking_assert (!callee->inlined_to);
+
+         int old_size = ipa_size_summaries->get (where)->size;
+         sreal old_time = ipa_fn_summaries->get (where)->time;
+
          inline_call (edge, true, &new_indirect_edges, &overall_size, true);
          reset_edge_caches (edge->callee);
          add_new_edges_to_heap (&edge_heap, new_indirect_edges);
 
-         update_callee_keys (&edge_heap, where, updated_nodes);
+         /* If caller's size and time increased we do not need to update
+            all edges becuase badness is not going to decrease.  */
+         if (old_size <= ipa_size_summaries->get (where)->size
+             && old_time <= ipa_fn_summaries->get (where)->time
+             /* Wrapper penalty may be non-monotonous in this respect.
+                Fortunately it only affects small functions.  */
+             && !wrapper_heuristics_may_apply (where, old_size))
+           update_callee_keys (&edge_heap, edge->callee, updated_nodes);
+         else
+           update_callee_keys (&edge_heap, where, updated_nodes);
        }
       where = edge->caller;
       if (where->inlined_to)