Remove gcc/params.* files.
[gcc.git] / gcc / ipa-inline-analysis.c
1 /* Analysis used by inlining decision heuristics.
2 Copyright (C) 2003-2019 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "alloc-pool.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "tree-streamer.h"
31 #include "cgraph.h"
32 #include "diagnostic.h"
33 #include "fold-const.h"
34 #include "print-tree.h"
35 #include "tree-inline.h"
36 #include "gimple-pretty-print.h"
37 #include "cfganal.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-ssa-loop-niter.h"
41 #include "tree-ssa-loop.h"
42 #include "symbol-summary.h"
43 #include "ipa-prop.h"
44 #include "ipa-fnsummary.h"
45 #include "ipa-inline.h"
46 #include "cfgloop.h"
47 #include "tree-scalar-evolution.h"
48 #include "ipa-utils.h"
49 #include "cfgexpand.h"
50 #include "gimplify.h"
51
52 /* Cached node/edge growths. */
53 fast_call_summary<edge_growth_cache_entry *, va_heap> *edge_growth_cache = NULL;
54
55 /* The context cache remembers estimated time/size and hints for given
56 ipa_call_context of a call. */
57 class node_context_cache_entry
58 {
59 public:
60 ipa_call_context ctx;
61 sreal time, nonspec_time;
62 int size;
63 ipa_hints hints;
64
65 node_context_cache_entry ()
66 : ctx ()
67 {
68 }
69 ~node_context_cache_entry ()
70 {
71 ctx.release ();
72 }
73 };
74
75 /* At the moment we implement primitive single entry LRU cache. */
76 class node_context_summary
77 {
78 public:
79 node_context_cache_entry entry;
80
81 node_context_summary ()
82 : entry ()
83 {
84 }
85 ~node_context_summary ()
86 {
87 }
88 };
89
90 /* Summary holding the context cache. */
91 static fast_function_summary <node_context_summary *, va_heap>
92 *node_context_cache = NULL;
93 /* Statistics about the context cache effectivity. */
94 static long node_context_cache_hit, node_context_cache_miss,
95 node_context_cache_clear;
96
97 /* Give initial reasons why inlining would fail on EDGE. This gets either
98 nullified or usually overwritten by more precise reasons later. */
99
100 void
101 initialize_inline_failed (struct cgraph_edge *e)
102 {
103 struct cgraph_node *callee = e->callee;
104
105 if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
106 && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
107 ;
108 else if (e->indirect_unknown_callee)
109 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
110 else if (!callee->definition)
111 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
112 else if (callee->redefined_extern_inline)
113 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
114 else
115 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
116 gcc_checking_assert (!e->call_stmt_cannot_inline_p
117 || cgraph_inline_failed_type (e->inline_failed)
118 == CIF_FINAL_ERROR);
119 }
120
121 /* Allocate edge growth caches. */
122
123 void
124 initialize_growth_caches ()
125 {
126 edge_growth_cache
127 = new fast_call_summary<edge_growth_cache_entry *, va_heap> (symtab);
128 node_context_cache
129 = new fast_function_summary<node_context_summary *, va_heap> (symtab);
130 }
131
132 /* Free growth caches. */
133
134 void
135 free_growth_caches (void)
136 {
137 delete edge_growth_cache;
138 delete node_context_cache;
139 edge_growth_cache = NULL;
140 node_context_cache = NULL;
141 if (dump_file)
142 fprintf (dump_file, "node context cache: %li hits, %li misses,"
143 " %li initializations\n",
144 node_context_cache_hit, node_context_cache_miss,
145 node_context_cache_clear);
146 node_context_cache_hit = 0;
147 node_context_cache_miss = 0;
148 node_context_cache_clear = 0;
149 }
150
151 /* Return hints derrived from EDGE. */
152
153 int
154 simple_edge_hints (struct cgraph_edge *edge)
155 {
156 int hints = 0;
157 struct cgraph_node *to = (edge->caller->inlined_to
158 ? edge->caller->inlined_to : edge->caller);
159 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
160 int to_scc_no = ipa_fn_summaries->get (to)->scc_no;
161 int callee_scc_no = ipa_fn_summaries->get (callee)->scc_no;
162
163 if (to_scc_no && to_scc_no == callee_scc_no && !edge->recursive_p ())
164 hints |= INLINE_HINT_same_scc;
165
166 if (callee->lto_file_data && edge->caller->lto_file_data
167 && edge->caller->lto_file_data != callee->lto_file_data
168 && !callee->merged_comdat && !callee->icf_merged)
169 hints |= INLINE_HINT_cross_module;
170
171 return hints;
172 }
173
174 /* Estimate the time cost for the caller when inlining EDGE.
175 Only to be called via estimate_edge_time, that handles the
176 caching mechanism.
177
178 When caching, also update the cache entry. Compute both time and
179 size, since we always need both metrics eventually. */
180
181 sreal
182 do_estimate_edge_time (struct cgraph_edge *edge)
183 {
184 sreal time, nonspec_time;
185 int size;
186 ipa_hints hints;
187 struct cgraph_node *callee;
188 clause_t clause, nonspec_clause;
189 vec<tree> known_vals;
190 vec<ipa_polymorphic_call_context> known_contexts;
191 vec<ipa_agg_jump_function_p> known_aggs;
192 class ipa_call_summary *es = ipa_call_summaries->get (edge);
193 int min_size = -1;
194
195 callee = edge->callee->ultimate_alias_target ();
196
197 gcc_checking_assert (edge->inline_failed);
198 evaluate_properties_for_edge (edge, true,
199 &clause, &nonspec_clause, &known_vals,
200 &known_contexts, &known_aggs);
201 ipa_call_context ctx (callee, clause, nonspec_clause, known_vals,
202 known_contexts, known_aggs, es->param);
203 if (node_context_cache != NULL)
204 {
205 node_context_summary *e = node_context_cache->get_create (callee);
206 if (e->entry.ctx.equal_to (ctx))
207 {
208 node_context_cache_hit++;
209 size = e->entry.size;
210 time = e->entry.time;
211 nonspec_time = e->entry.nonspec_time;
212 hints = e->entry.hints;
213 }
214 else
215 {
216 if (e->entry.ctx.exists_p ())
217 node_context_cache_miss++;
218 else
219 node_context_cache_clear++;
220 e->entry.ctx.release (true);
221 ctx.estimate_size_and_time (&size, &min_size,
222 &time, &nonspec_time, &hints);
223 e->entry.size = size;
224 e->entry.time = time;
225 e->entry.nonspec_time = nonspec_time;
226 e->entry.hints = hints;
227 e->entry.ctx.duplicate_from (ctx);
228 }
229 }
230 else
231 ctx.estimate_size_and_time (&size, &min_size,
232 &time, &nonspec_time, &hints);
233
234 /* When we have profile feedback, we can quite safely identify hot
235 edges and for those we disable size limits. Don't do that when
236 probability that caller will call the callee is low however, since it
237 may hurt optimization of the caller's hot path. */
238 if (edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
239 && (edge->count.ipa ().apply_scale (2, 1)
240 > (edge->caller->inlined_to
241 ? edge->caller->inlined_to->count.ipa ()
242 : edge->caller->count.ipa ())))
243 hints |= INLINE_HINT_known_hot;
244
245 ctx.release ();
246 gcc_checking_assert (size >= 0);
247 gcc_checking_assert (time >= 0);
248
249 /* When caching, update the cache entry. */
250 if (edge_growth_cache != NULL)
251 {
252 if (min_size >= 0)
253 ipa_fn_summaries->get (edge->callee->function_symbol ())->min_size
254 = min_size;
255 edge_growth_cache_entry *entry
256 = edge_growth_cache->get_create (edge);
257 entry->time = time;
258 entry->nonspec_time = nonspec_time;
259
260 entry->size = size + (size >= 0);
261 hints |= simple_edge_hints (edge);
262 entry->hints = hints + 1;
263 }
264 return time;
265 }
266
267 /* Reset cache for NODE.
268 This must be done each time NODE body is modified. */
269 void
270 reset_node_cache (struct cgraph_node *node)
271 {
272 if (node_context_cache)
273 node_context_cache->remove (node);
274 }
275
276 /* Remove EDGE from caches once it was inlined. */
277 void
278 ipa_remove_from_growth_caches (struct cgraph_edge *edge)
279 {
280 if (node_context_cache)
281 node_context_cache->remove (edge->callee);
282 if (edge_growth_cache)
283 edge_growth_cache->remove (edge);
284 }
285
286 /* Return estimated callee growth after inlining EDGE.
287 Only to be called via estimate_edge_size. */
288
289 int
290 do_estimate_edge_size (struct cgraph_edge *edge)
291 {
292 int size;
293 struct cgraph_node *callee;
294 clause_t clause, nonspec_clause;
295 vec<tree> known_vals;
296 vec<ipa_polymorphic_call_context> known_contexts;
297 vec<ipa_agg_jump_function_p> known_aggs;
298
299 /* When we do caching, use do_estimate_edge_time to populate the entry. */
300
301 if (edge_growth_cache != NULL)
302 {
303 do_estimate_edge_time (edge);
304 size = edge_growth_cache->get (edge)->size;
305 gcc_checking_assert (size);
306 return size - (size > 0);
307 }
308
309 callee = edge->callee->ultimate_alias_target ();
310
311 /* Early inliner runs without caching, go ahead and do the dirty work. */
312 gcc_checking_assert (edge->inline_failed);
313 evaluate_properties_for_edge (edge, true,
314 &clause, &nonspec_clause,
315 &known_vals, &known_contexts,
316 &known_aggs);
317 ipa_call_context ctx (callee, clause, nonspec_clause, known_vals,
318 known_contexts, known_aggs, vNULL);
319 ctx.estimate_size_and_time (&size, NULL, NULL, NULL, NULL);
320 ctx.release ();
321 return size;
322 }
323
324
325 /* Estimate the growth of the caller when inlining EDGE.
326 Only to be called via estimate_edge_size. */
327
328 ipa_hints
329 do_estimate_edge_hints (struct cgraph_edge *edge)
330 {
331 ipa_hints hints;
332 struct cgraph_node *callee;
333 clause_t clause, nonspec_clause;
334 vec<tree> known_vals;
335 vec<ipa_polymorphic_call_context> known_contexts;
336 vec<ipa_agg_jump_function_p> known_aggs;
337
338 /* When we do caching, use do_estimate_edge_time to populate the entry. */
339
340 if (edge_growth_cache != NULL)
341 {
342 do_estimate_edge_time (edge);
343 hints = edge_growth_cache->get (edge)->hints;
344 gcc_checking_assert (hints);
345 return hints - 1;
346 }
347
348 callee = edge->callee->ultimate_alias_target ();
349
350 /* Early inliner runs without caching, go ahead and do the dirty work. */
351 gcc_checking_assert (edge->inline_failed);
352 evaluate_properties_for_edge (edge, true,
353 &clause, &nonspec_clause,
354 &known_vals, &known_contexts,
355 &known_aggs);
356 ipa_call_context ctx (callee, clause, nonspec_clause, known_vals,
357 known_contexts, known_aggs, vNULL);
358 ctx.estimate_size_and_time (NULL, NULL, NULL, NULL, &hints);
359 ctx.release ();
360 hints |= simple_edge_hints (edge);
361 return hints;
362 }
363
364 /* Estimate the size of NODE after inlining EDGE which should be an
365 edge to either NODE or a call inlined into NODE. */
366
367 int
368 estimate_size_after_inlining (struct cgraph_node *node,
369 struct cgraph_edge *edge)
370 {
371 class ipa_call_summary *es = ipa_call_summaries->get (edge);
372 ipa_size_summary *s = ipa_size_summaries->get (node);
373 if (!es->predicate || *es->predicate != false)
374 {
375 int size = s->size + estimate_edge_growth (edge);
376 gcc_assert (size >= 0);
377 return size;
378 }
379 return s->size;
380 }
381
382
383 struct growth_data
384 {
385 struct cgraph_node *node;
386 bool self_recursive;
387 bool uninlinable;
388 int growth;
389 int cap;
390 };
391
392
393 /* Worker for do_estimate_growth. Collect growth for all callers. */
394
395 static bool
396 do_estimate_growth_1 (struct cgraph_node *node, void *data)
397 {
398 struct cgraph_edge *e;
399 struct growth_data *d = (struct growth_data *) data;
400
401 for (e = node->callers; e; e = e->next_caller)
402 {
403 gcc_checking_assert (e->inline_failed);
404
405 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
406 || !opt_for_fn (e->caller->decl, optimize))
407 {
408 d->uninlinable = true;
409 if (d->cap < INT_MAX)
410 return true;
411 continue;
412 }
413
414 if (e->recursive_p ())
415 {
416 d->self_recursive = true;
417 if (d->cap < INT_MAX)
418 return true;
419 continue;
420 }
421 d->growth += estimate_edge_growth (e);
422 if (d->growth > d->cap)
423 return true;
424 }
425 return false;
426 }
427
428 /* Return estimated savings for eliminating offline copy of NODE by inlining
429 it everywhere. */
430
431 static int
432 offline_size (struct cgraph_node *node, ipa_size_summary *info)
433 {
434 if (!DECL_EXTERNAL (node->decl))
435 {
436 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
437 return info->size;
438 /* COMDAT functions are very often not shared across multiple units
439 since they come from various template instantiations.
440 Take this into account. */
441 else if (DECL_COMDAT (node->decl)
442 && node->can_remove_if_no_direct_calls_p ())
443 return (info->size
444 * (100 - param_comdat_sharing_probability)
445 + 50) / 100;
446 }
447 return 0;
448 }
449
450 /* Estimate the growth caused by inlining NODE into all callees. */
451
452 int
453 estimate_growth (struct cgraph_node *node)
454 {
455 struct growth_data d = { node, false, false, 0, INT_MAX };
456 ipa_size_summary *info = ipa_size_summaries->get (node);
457
458 if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
459 return 1;
460
461 /* For self recursive functions the growth estimation really should be
462 infinity. We don't want to return very large values because the growth
463 plays various roles in badness computation fractions. Be sure to not
464 return zero or negative growths. */
465 if (d.self_recursive)
466 d.growth = d.growth < info->size ? info->size : d.growth;
467 else if (!d.uninlinable)
468 d.growth -= offline_size (node, info);
469
470 return d.growth;
471 }
472
473 /* Verify if there are fewer than MAX_CALLERS. */
474
475 static bool
476 check_callers (cgraph_node *node, int *growth, int *n, int offline,
477 int min_size, struct cgraph_edge *known_edge)
478 {
479 ipa_ref *ref;
480
481 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
482 return true;
483
484 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
485 {
486 edge_growth_cache_entry *entry;
487
488 if (e == known_edge)
489 continue;
490 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
491 return true;
492 if (edge_growth_cache != NULL
493 && (entry = edge_growth_cache->get (e)) != NULL
494 && entry->size != 0)
495 *growth += entry->size - (entry->size > 0);
496 else
497 {
498 class ipa_call_summary *es = ipa_call_summaries->get (e);
499 if (!es)
500 return true;
501 *growth += min_size - es->call_stmt_size;
502 if (--(*n) < 0)
503 return false;
504 }
505 if (*growth > offline)
506 return true;
507 }
508
509 if (*n > 0)
510 FOR_EACH_ALIAS (node, ref)
511 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), growth, n,
512 offline, min_size, known_edge))
513 return true;
514
515 return false;
516 }
517
518
519 /* Decide if growth of NODE is positive. This is cheaper than calculating
520 actual growth. If edge growth of KNOWN_EDGE is known
521 it is passed by EDGE_GROWTH. */
522
523 bool
524 growth_positive_p (struct cgraph_node *node,
525 struct cgraph_edge * known_edge, int edge_growth)
526 {
527 struct cgraph_edge *e;
528
529 ipa_size_summary *s = ipa_size_summaries->get (node);
530
531 /* First quickly check if NODE is removable at all. */
532 int offline = offline_size (node, s);
533 if (offline <= 0 && known_edge && edge_growth > 0)
534 return true;
535
536 int min_size = ipa_fn_summaries->get (node)->min_size;
537 int n = 10;
538
539 int min_growth = known_edge ? edge_growth : 0;
540 for (e = node->callers; e; e = e->next_caller)
541 {
542 edge_growth_cache_entry *entry;
543
544 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
545 return true;
546 if (e == known_edge)
547 continue;
548 if (edge_growth_cache != NULL
549 && (entry = edge_growth_cache->get (e)) != NULL
550 && entry->size != 0)
551 min_growth += entry->size - (entry->size > 0);
552 else
553 {
554 class ipa_call_summary *es = ipa_call_summaries->get (e);
555 if (!es)
556 return true;
557 min_growth += min_size - es->call_stmt_size;
558 if (--n <= 0)
559 break;
560 }
561 if (min_growth > offline)
562 return true;
563 }
564
565 ipa_ref *ref;
566 if (n > 0)
567 FOR_EACH_ALIAS (node, ref)
568 if (check_callers (dyn_cast <cgraph_node *> (ref->referring),
569 &min_growth, &n, offline, min_size, known_edge))
570 return true;
571
572 struct growth_data d = { node, false, false, 0, offline };
573 if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
574 return true;
575 if (d.self_recursive || d.uninlinable)
576 return true;
577 return (d.growth > offline);
578 }