re PR middle-end/28779 (internal compiler error: in cgraph_estimate_size_after_inlini...
[gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22 /* Inlining decision heuristics
23
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
28
29 There are three major parts of this file:
30
31 cgraph_mark_inline implementation
32
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
36
37 inlining heuristics limits
38
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
42
43 inlining heuristics
44
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
47
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
53
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
57
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
61
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used in non-unit-at-a-time mode. */
65
66 #include "config.h"
67 #include "system.h"
68 #include "coretypes.h"
69 #include "tm.h"
70 #include "tree.h"
71 #include "tree-inline.h"
72 #include "langhooks.h"
73 #include "flags.h"
74 #include "cgraph.h"
75 #include "diagnostic.h"
76 #include "timevar.h"
77 #include "params.h"
78 #include "fibheap.h"
79 #include "intl.h"
80 #include "tree-pass.h"
81 #include "hashtab.h"
82 #include "coverage.h"
83 #include "ggc.h"
84
85 /* Statistics we collect about inlining algorithm. */
86 static int ncalls_inlined;
87 static int nfunctions_inlined;
88 static int initial_insns;
89 static int overall_insns;
90 static int max_insns;
91 static gcov_type max_count;
92
93 /* Estimate size of the function after inlining WHAT into TO. */
94
95 static int
96 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
97 struct cgraph_node *what)
98 {
99 int size;
100 tree fndecl = what->decl, arg;
101 int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
102
103 for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
104 call_insns += estimate_move_cost (TREE_TYPE (arg));
105 size = (what->global.insns - call_insns) * times + to->global.insns;
106 gcc_assert (size >= 0);
107 return size;
108 }
109
110 /* E is expected to be an edge being inlined. Clone destination node of
111 the edge and redirect it to the new clone.
112 DUPLICATE is used for bookkeeping on whether we are actually creating new
113 clones or re-using node originally representing out-of-line function call.
114 */
115 void
116 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, bool update_original)
117 {
118 if (duplicate)
119 {
120 /* We may eliminate the need for out-of-line copy to be output.
121 In that case just go ahead and re-use it. */
122 if (!e->callee->callers->next_caller
123 && !e->callee->needed
124 && flag_unit_at_a_time)
125 {
126 gcc_assert (!e->callee->global.inlined_to);
127 if (DECL_SAVED_TREE (e->callee->decl))
128 overall_insns -= e->callee->global.insns, nfunctions_inlined++;
129 duplicate = false;
130 }
131 else
132 {
133 struct cgraph_node *n;
134 n = cgraph_clone_node (e->callee, e->count, e->loop_nest,
135 update_original);
136 cgraph_redirect_edge_callee (e, n);
137 }
138 }
139
140 if (e->caller->global.inlined_to)
141 e->callee->global.inlined_to = e->caller->global.inlined_to;
142 else
143 e->callee->global.inlined_to = e->caller;
144
145 /* Recursively clone all bodies. */
146 for (e = e->callee->callees; e; e = e->next_callee)
147 if (!e->inline_failed)
148 cgraph_clone_inlined_nodes (e, duplicate, update_original);
149 }
150
151 /* Mark edge E as inlined and update callgraph accordingly.
152 UPDATE_ORIGINAL specify whether profile of original function should be
153 updated. */
154
155 void
156 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
157 {
158 int old_insns = 0, new_insns = 0;
159 struct cgraph_node *to = NULL, *what;
160
161 if (e->callee->inline_decl)
162 cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
163
164 gcc_assert (e->inline_failed);
165 e->inline_failed = NULL;
166
167 if (!e->callee->global.inlined && flag_unit_at_a_time)
168 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
169 e->callee->global.inlined = true;
170
171 cgraph_clone_inlined_nodes (e, true, update_original);
172
173 what = e->callee;
174
175 /* Now update size of caller and all functions caller is inlined into. */
176 for (;e && !e->inline_failed; e = e->caller->callers)
177 {
178 old_insns = e->caller->global.insns;
179 new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
180 what);
181 gcc_assert (new_insns >= 0);
182 to = e->caller;
183 to->global.insns = new_insns;
184 }
185 gcc_assert (what->global.inlined_to == to);
186 if (new_insns > old_insns)
187 overall_insns += new_insns - old_insns;
188 ncalls_inlined++;
189 }
190
191 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
192 Return following unredirected edge in the list of callers
193 of EDGE->CALLEE */
194
195 static struct cgraph_edge *
196 cgraph_mark_inline (struct cgraph_edge *edge)
197 {
198 struct cgraph_node *to = edge->caller;
199 struct cgraph_node *what = edge->callee;
200 struct cgraph_edge *e, *next;
201 int times = 0;
202
203 /* Look for all calls, mark them inline and clone recursively
204 all inlined functions. */
205 for (e = what->callers; e; e = next)
206 {
207 next = e->next_caller;
208 if (e->caller == to && e->inline_failed)
209 {
210 cgraph_mark_inline_edge (e, true);
211 if (e == edge)
212 edge = next;
213 times++;
214 }
215 }
216 gcc_assert (times);
217 return edge;
218 }
219
220 /* Estimate the growth caused by inlining NODE into all callees. */
221
222 static int
223 cgraph_estimate_growth (struct cgraph_node *node)
224 {
225 int growth = 0;
226 struct cgraph_edge *e;
227 if (node->global.estimated_growth != INT_MIN)
228 return node->global.estimated_growth;
229
230 for (e = node->callers; e; e = e->next_caller)
231 if (e->inline_failed)
232 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
233 - e->caller->global.insns);
234
235 /* ??? Wrong for self recursive functions or cases where we decide to not
236 inline for different reasons, but it is not big deal as in that case
237 we will keep the body around, but we will also avoid some inlining. */
238 if (!node->needed && !DECL_EXTERNAL (node->decl))
239 growth -= node->global.insns;
240
241 node->global.estimated_growth = growth;
242 return growth;
243 }
244
245 /* Return false when inlining WHAT into TO is not good idea
246 as it would cause too large growth of function bodies. */
247
248 static bool
249 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
250 const char **reason)
251 {
252 int times = 0;
253 struct cgraph_edge *e;
254 int newsize;
255 int limit;
256
257 for (e = to->callees; e; e = e->next_callee)
258 if (e->callee == what)
259 times++;
260
261 if (to->global.inlined_to)
262 to = to->global.inlined_to;
263
264 /* When inlining large function body called once into small function,
265 take the inlined function as base for limiting the growth. */
266 if (to->local.self_insns > what->local.self_insns)
267 limit = to->local.self_insns;
268 else
269 limit = what->local.self_insns;
270
271 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
272
273 /* Check the size after inlining against the function limits. But allow
274 the function to shrink if it went over the limits by forced inlining. */
275 newsize = cgraph_estimate_size_after_inlining (times, to, what);
276 if (newsize >= to->global.insns
277 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
278 && newsize > limit)
279 {
280 if (reason)
281 *reason = N_("--param large-function-growth limit reached");
282 return false;
283 }
284 return true;
285 }
286
287 /* Return true when function N is small enough to be inlined. */
288
289 bool
290 cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
291 {
292 tree decl = n->decl;
293
294 if (n->inline_decl)
295 decl = n->inline_decl;
296 if (!DECL_INLINE (decl))
297 {
298 if (reason)
299 *reason = N_("function not inlinable");
300 return false;
301 }
302
303 if (!DECL_STRUCT_FUNCTION (decl)->cfg)
304 {
305 if (reason)
306 *reason = N_("function body not available");
307 return false;
308 }
309
310 if (DECL_DECLARED_INLINE_P (decl))
311 {
312 if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
313 {
314 if (reason)
315 *reason = N_("--param max-inline-insns-single limit reached");
316 return false;
317 }
318 }
319 else
320 {
321 if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
322 {
323 if (reason)
324 *reason = N_("--param max-inline-insns-auto limit reached");
325 return false;
326 }
327 }
328
329 return true;
330 }
331
332 /* Return true when inlining WHAT would create recursive inlining.
333 We call recursive inlining all cases where same function appears more than
334 once in the single recursion nest path in the inline graph. */
335
336 static bool
337 cgraph_recursive_inlining_p (struct cgraph_node *to,
338 struct cgraph_node *what,
339 const char **reason)
340 {
341 bool recursive;
342 if (to->global.inlined_to)
343 recursive = what->decl == to->global.inlined_to->decl;
344 else
345 recursive = what->decl == to->decl;
346 /* Marking recursive function inline has sane semantic and thus we should
347 not warn on it. */
348 if (recursive && reason)
349 *reason = (what->local.disregard_inline_limits
350 ? N_("recursive inlining") : "");
351 return recursive;
352 }
353
354 /* Return true if the call can be hot. */
355 static bool
356 cgraph_maybe_hot_edge_p (struct cgraph_edge *edge)
357 {
358 if (profile_info && flag_branch_probabilities
359 && (edge->count
360 <= profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION)))
361 return false;
362 return true;
363 }
364
365 /* A cost model driving the inlining heuristics in a way so the edges with
366 smallest badness are inlined first. After each inlining is performed
367 the costs of all caller edges of nodes affected are recomputed so the
368 metrics may accurately depend on values such as number of inlinable callers
369 of the function or function body size.
370
371 With profiling we use number of executions of each edge to drive the cost.
372 We also should distinguish hot and cold calls where the cold calls are
373 inlined into only when code size is overall improved.
374 */
375
376 static int
377 cgraph_edge_badness (struct cgraph_edge *edge)
378 {
379 if (max_count)
380 {
381 int growth =
382 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
383 growth -= edge->caller->global.insns;
384
385 /* Always prefer inlining saving code size. */
386 if (growth <= 0)
387 return INT_MIN - growth;
388 return ((int)((double)edge->count * INT_MIN / max_count)) / growth;
389 }
390 else
391 {
392 int nest = MIN (edge->loop_nest, 8);
393 int badness = cgraph_estimate_growth (edge->callee) * 256;
394
395 /* Decrease badness if call is nested. */
396 if (badness > 0)
397 badness >>= nest;
398 else
399 badness <<= nest;
400
401 /* Make recursive inlining happen always after other inlining is done. */
402 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
403 return badness + 1;
404 else
405 return badness;
406 }
407 }
408
409 /* Recompute heap nodes for each of caller edge. */
410
411 static void
412 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
413 bitmap updated_nodes)
414 {
415 struct cgraph_edge *edge;
416 const char *failed_reason;
417
418 if (!node->local.inlinable || node->local.disregard_inline_limits
419 || node->global.inlined_to)
420 return;
421 if (bitmap_bit_p (updated_nodes, node->uid))
422 return;
423 bitmap_set_bit (updated_nodes, node->uid);
424 node->global.estimated_growth = INT_MIN;
425
426 if (!node->local.inlinable)
427 return;
428 /* Prune out edges we won't inline into anymore. */
429 if (!cgraph_default_inline_p (node, &failed_reason))
430 {
431 for (edge = node->callers; edge; edge = edge->next_caller)
432 if (edge->aux)
433 {
434 fibheap_delete_node (heap, edge->aux);
435 edge->aux = NULL;
436 if (edge->inline_failed)
437 edge->inline_failed = failed_reason;
438 }
439 return;
440 }
441
442 for (edge = node->callers; edge; edge = edge->next_caller)
443 if (edge->inline_failed)
444 {
445 int badness = cgraph_edge_badness (edge);
446 if (edge->aux)
447 {
448 fibnode_t n = edge->aux;
449 gcc_assert (n->data == edge);
450 if (n->key == badness)
451 continue;
452
453 /* fibheap_replace_key only increase the keys. */
454 if (fibheap_replace_key (heap, n, badness))
455 continue;
456 fibheap_delete_node (heap, edge->aux);
457 }
458 edge->aux = fibheap_insert (heap, badness, edge);
459 }
460 }
461
462 /* Recompute heap nodes for each of caller edges of each of callees. */
463
464 static void
465 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
466 bitmap updated_nodes)
467 {
468 struct cgraph_edge *e;
469 node->global.estimated_growth = INT_MIN;
470
471 for (e = node->callees; e; e = e->next_callee)
472 if (e->inline_failed)
473 update_caller_keys (heap, e->callee, updated_nodes);
474 else if (!e->inline_failed)
475 update_callee_keys (heap, e->callee, updated_nodes);
476 }
477
478 /* Enqueue all recursive calls from NODE into priority queue depending on
479 how likely we want to recursively inline the call. */
480
481 static void
482 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
483 fibheap_t heap)
484 {
485 static int priority;
486 struct cgraph_edge *e;
487 for (e = where->callees; e; e = e->next_callee)
488 if (e->callee == node)
489 {
490 /* When profile feedback is available, prioritize by expected number
491 of calls. Without profile feedback we maintain simple queue
492 to order candidates via recursive depths. */
493 fibheap_insert (heap,
494 !max_count ? priority++
495 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
496 e);
497 }
498 for (e = where->callees; e; e = e->next_callee)
499 if (!e->inline_failed)
500 lookup_recursive_calls (node, e->callee, heap);
501 }
502
503 /* Find callgraph nodes closing a circle in the graph. The
504 resulting hashtab can be used to avoid walking the circles.
505 Uses the cgraph nodes ->aux field which needs to be zero
506 before and will be zero after operation. */
507
508 static void
509 cgraph_find_cycles (struct cgraph_node *node, htab_t cycles)
510 {
511 struct cgraph_edge *e;
512
513 if (node->aux)
514 {
515 void **slot;
516 slot = htab_find_slot (cycles, node, INSERT);
517 if (!*slot)
518 {
519 if (dump_file)
520 fprintf (dump_file, "Cycle contains %s\n", cgraph_node_name (node));
521 *slot = node;
522 }
523 return;
524 }
525
526 node->aux = node;
527 for (e = node->callees; e; e = e->next_callee)
528 cgraph_find_cycles (e->callee, cycles);
529 node->aux = 0;
530 }
531
532 /* Leafify the cgraph node. We have to be careful in recursing
533 as to not run endlessly in circles of the callgraph.
534 We do so by using a hashtab of cycle entering nodes as generated
535 by cgraph_find_cycles. */
536
537 static void
538 cgraph_flatten_node (struct cgraph_node *node, htab_t cycles)
539 {
540 struct cgraph_edge *e;
541
542 for (e = node->callees; e; e = e->next_callee)
543 {
544 /* Inline call, if possible, and recurse. Be sure we are not
545 entering callgraph circles here. */
546 if (e->inline_failed
547 && e->callee->local.inlinable
548 && !cgraph_recursive_inlining_p (node, e->callee,
549 &e->inline_failed)
550 && !htab_find (cycles, e->callee))
551 {
552 if (dump_file)
553 fprintf (dump_file, " inlining %s", cgraph_node_name (e->callee));
554 cgraph_mark_inline_edge (e, true);
555 cgraph_flatten_node (e->callee, cycles);
556 }
557 else if (dump_file)
558 fprintf (dump_file, " !inlining %s", cgraph_node_name (e->callee));
559 }
560 }
561
562 /* Decide on recursive inlining: in the case function has recursive calls,
563 inline until body size reaches given argument. */
564
565 static bool
566 cgraph_decide_recursive_inlining (struct cgraph_node *node)
567 {
568 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
569 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
570 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
571 fibheap_t heap;
572 struct cgraph_edge *e;
573 struct cgraph_node *master_clone, *next;
574 int depth = 0;
575 int n = 0;
576
577 if (DECL_DECLARED_INLINE_P (node->decl))
578 {
579 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
580 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
581 }
582
583 /* Make sure that function is small enough to be considered for inlining. */
584 if (!max_depth
585 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
586 return false;
587 heap = fibheap_new ();
588 lookup_recursive_calls (node, node, heap);
589 if (fibheap_empty (heap))
590 {
591 fibheap_delete (heap);
592 return false;
593 }
594
595 if (dump_file)
596 fprintf (dump_file,
597 " Performing recursive inlining on %s\n",
598 cgraph_node_name (node));
599
600 /* We need original clone to copy around. */
601 master_clone = cgraph_clone_node (node, node->count, 1, false);
602 master_clone->needed = true;
603 for (e = master_clone->callees; e; e = e->next_callee)
604 if (!e->inline_failed)
605 cgraph_clone_inlined_nodes (e, true, false);
606
607 /* Do the inlining and update list of recursive call during process. */
608 while (!fibheap_empty (heap)
609 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
610 <= limit))
611 {
612 struct cgraph_edge *curr = fibheap_extract_min (heap);
613 struct cgraph_node *cnode;
614
615 depth = 1;
616 for (cnode = curr->caller;
617 cnode->global.inlined_to; cnode = cnode->callers->caller)
618 if (node->decl == curr->callee->decl)
619 depth++;
620 if (depth > max_depth)
621 {
622 if (dump_file)
623 fprintf (dump_file,
624 " maxmal depth reached\n");
625 continue;
626 }
627
628 if (max_count)
629 {
630 if (!cgraph_maybe_hot_edge_p (curr))
631 {
632 if (dump_file)
633 fprintf (dump_file, " Not inlining cold call\n");
634 continue;
635 }
636 if (curr->count * 100 / node->count < probability)
637 {
638 if (dump_file)
639 fprintf (dump_file,
640 " Probability of edge is too small\n");
641 continue;
642 }
643 }
644
645 if (dump_file)
646 {
647 fprintf (dump_file,
648 " Inlining call of depth %i", depth);
649 if (node->count)
650 {
651 fprintf (dump_file, " called approx. %.2f times per call",
652 (double)curr->count / node->count);
653 }
654 fprintf (dump_file, "\n");
655 }
656 cgraph_redirect_edge_callee (curr, master_clone);
657 cgraph_mark_inline_edge (curr, false);
658 lookup_recursive_calls (node, curr->callee, heap);
659 n++;
660 }
661 if (!fibheap_empty (heap) && dump_file)
662 fprintf (dump_file, " Recursive inlining growth limit met.\n");
663
664 fibheap_delete (heap);
665 if (dump_file)
666 fprintf (dump_file,
667 "\n Inlined %i times, body grown from %i to %i insns\n", n,
668 master_clone->global.insns, node->global.insns);
669
670 /* Remove master clone we used for inlining. We rely that clones inlined
671 into master clone gets queued just before master clone so we don't
672 need recursion. */
673 for (node = cgraph_nodes; node != master_clone;
674 node = next)
675 {
676 next = node->next;
677 if (node->global.inlined_to == master_clone)
678 cgraph_remove_node (node);
679 }
680 cgraph_remove_node (master_clone);
681 /* FIXME: Recursive inlining actually reduces number of calls of the
682 function. At this place we should probably walk the function and
683 inline clones and compensate the counts accordingly. This probably
684 doesn't matter much in practice. */
685 return n > 0;
686 }
687
688 /* Set inline_failed for all callers of given function to REASON. */
689
690 static void
691 cgraph_set_inline_failed (struct cgraph_node *node, const char *reason)
692 {
693 struct cgraph_edge *e;
694
695 if (dump_file)
696 fprintf (dump_file, "Inlining failed: %s\n", reason);
697 for (e = node->callers; e; e = e->next_caller)
698 if (e->inline_failed)
699 e->inline_failed = reason;
700 }
701
702 /* We use greedy algorithm for inlining of small functions:
703 All inline candidates are put into prioritized heap based on estimated
704 growth of the overall number of instructions and then update the estimates.
705
706 INLINED and INLINED_CALEES are just pointers to arrays large enough
707 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
708
709 static void
710 cgraph_decide_inlining_of_small_functions (void)
711 {
712 struct cgraph_node *node;
713 struct cgraph_edge *edge;
714 const char *failed_reason;
715 fibheap_t heap = fibheap_new ();
716 bitmap updated_nodes = BITMAP_ALLOC (NULL);
717
718 if (dump_file)
719 fprintf (dump_file, "\nDeciding on smaller functions:\n");
720
721 /* Put all inline candidates into the heap. */
722
723 for (node = cgraph_nodes; node; node = node->next)
724 {
725 if (!node->local.inlinable || !node->callers
726 || node->local.disregard_inline_limits)
727 continue;
728 if (dump_file)
729 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
730
731 node->global.estimated_growth = INT_MIN;
732 if (!cgraph_default_inline_p (node, &failed_reason))
733 {
734 cgraph_set_inline_failed (node, failed_reason);
735 continue;
736 }
737
738 for (edge = node->callers; edge; edge = edge->next_caller)
739 if (edge->inline_failed)
740 {
741 gcc_assert (!edge->aux);
742 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
743 }
744 }
745 while (overall_insns <= max_insns && (edge = fibheap_extract_min (heap)))
746 {
747 int old_insns = overall_insns;
748 struct cgraph_node *where;
749 int growth =
750 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
751
752 growth -= edge->caller->global.insns;
753
754 if (dump_file)
755 {
756 fprintf (dump_file,
757 "\nConsidering %s with %i insns\n",
758 cgraph_node_name (edge->callee),
759 edge->callee->global.insns);
760 fprintf (dump_file,
761 " to be inlined into %s\n"
762 " Estimated growth after inlined into all callees is %+i insns.\n"
763 " Estimated badness is %i.\n",
764 cgraph_node_name (edge->caller),
765 cgraph_estimate_growth (edge->callee),
766 cgraph_edge_badness (edge));
767 if (edge->count)
768 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
769 }
770 gcc_assert (edge->aux);
771 edge->aux = NULL;
772 if (!edge->inline_failed)
773 continue;
774
775 /* When not having profile info ready we don't weight by any way the
776 position of call in procedure itself. This means if call of
777 function A from function B seems profitable to inline, the recursive
778 call of function A in inline copy of A in B will look profitable too
779 and we end up inlining until reaching maximal function growth. This
780 is not good idea so prohibit the recursive inlining.
781
782 ??? When the frequencies are taken into account we might not need this
783 restriction. */
784 if (!max_count)
785 {
786 where = edge->caller;
787 while (where->global.inlined_to)
788 {
789 if (where->decl == edge->callee->decl)
790 break;
791 where = where->callers->caller;
792 }
793 if (where->global.inlined_to)
794 {
795 edge->inline_failed
796 = (edge->callee->local.disregard_inline_limits ? N_("recursive inlining") : "");
797 if (dump_file)
798 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
799 continue;
800 }
801 }
802
803 if (!cgraph_maybe_hot_edge_p (edge) && growth > 0)
804 {
805 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
806 &edge->inline_failed))
807 {
808 edge->inline_failed =
809 N_("call is unlikely");
810 if (dump_file)
811 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
812 }
813 continue;
814 }
815 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
816 {
817 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
818 &edge->inline_failed))
819 {
820 if (dump_file)
821 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
822 }
823 continue;
824 }
825 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
826 &edge->inline_failed))
827 {
828 where = edge->caller;
829 if (where->global.inlined_to)
830 where = where->global.inlined_to;
831 if (!cgraph_decide_recursive_inlining (where))
832 continue;
833 update_callee_keys (heap, where, updated_nodes);
834 }
835 else
836 {
837 struct cgraph_node *callee;
838 if (!cgraph_check_inline_limits (edge->caller, edge->callee,
839 &edge->inline_failed))
840 {
841 if (dump_file)
842 fprintf (dump_file, " Not inlining into %s:%s.\n",
843 cgraph_node_name (edge->caller), edge->inline_failed);
844 continue;
845 }
846 callee = edge->callee;
847 cgraph_mark_inline_edge (edge, true);
848 update_callee_keys (heap, callee, updated_nodes);
849 }
850 where = edge->caller;
851 if (where->global.inlined_to)
852 where = where->global.inlined_to;
853
854 /* Our profitability metric can depend on local properties
855 such as number of inlinable calls and size of the function body.
856 After inlining these properties might change for the function we
857 inlined into (since it's body size changed) and for the functions
858 called by function we inlined (since number of it inlinable callers
859 might change). */
860 update_caller_keys (heap, where, updated_nodes);
861 bitmap_clear (updated_nodes);
862
863 if (dump_file)
864 {
865 fprintf (dump_file,
866 " Inlined into %s which now has %i insns,"
867 "net change of %+i insns.\n",
868 cgraph_node_name (edge->caller),
869 edge->caller->global.insns,
870 overall_insns - old_insns);
871 }
872 }
873 while ((edge = fibheap_extract_min (heap)) != NULL)
874 {
875 gcc_assert (edge->aux);
876 edge->aux = NULL;
877 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
878 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
879 &edge->inline_failed))
880 edge->inline_failed = N_("--param inline-unit-growth limit reached");
881 }
882 fibheap_delete (heap);
883 BITMAP_FREE (updated_nodes);
884 }
885
886 /* Decide on the inlining. We do so in the topological order to avoid
887 expenses on updating data structures. */
888
889 static unsigned int
890 cgraph_decide_inlining (void)
891 {
892 struct cgraph_node *node;
893 int nnodes;
894 struct cgraph_node **order =
895 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
896 int old_insns = 0;
897 int i;
898
899 timevar_push (TV_INLINE_HEURISTICS);
900 max_count = 0;
901 for (node = cgraph_nodes; node; node = node->next)
902 if (node->analyzed && (node->needed || node->reachable))
903 {
904 struct cgraph_edge *e;
905
906 /* At the moment, no IPA passes change function bodies before inlining.
907 Save some time by not recomputing function body sizes if early inlining
908 already did so. */
909 if (!flag_early_inlining)
910 node->local.self_insns = node->global.insns
911 = estimate_num_insns (node->decl);
912
913 initial_insns += node->local.self_insns;
914 gcc_assert (node->local.self_insns == node->global.insns);
915 for (e = node->callees; e; e = e->next_callee)
916 if (max_count < e->count)
917 max_count = e->count;
918 }
919 overall_insns = initial_insns;
920 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
921
922 max_insns = overall_insns;
923 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
924 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
925
926 max_insns = ((HOST_WIDEST_INT) max_insns
927 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
928
929 nnodes = cgraph_postorder (order);
930
931 if (dump_file)
932 fprintf (dump_file,
933 "\nDeciding on inlining. Starting with %i insns.\n",
934 initial_insns);
935
936 for (node = cgraph_nodes; node; node = node->next)
937 node->aux = 0;
938
939 if (dump_file)
940 fprintf (dump_file, "\nInlining always_inline functions:\n");
941
942 /* In the first pass mark all always_inline edges. Do this with a priority
943 so none of our later choices will make this impossible. */
944 for (i = nnodes - 1; i >= 0; i--)
945 {
946 struct cgraph_edge *e, *next;
947
948 node = order[i];
949
950 /* Handle nodes to be flattened, but don't update overall unit size. */
951 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
952 {
953 int old_overall_insns = overall_insns;
954 htab_t cycles;
955 if (dump_file)
956 fprintf (dump_file,
957 "Leafifying %s\n", cgraph_node_name (node));
958 cycles = htab_create (7, htab_hash_pointer, htab_eq_pointer, NULL);
959 cgraph_find_cycles (node, cycles);
960 cgraph_flatten_node (node, cycles);
961 htab_delete (cycles);
962 overall_insns = old_overall_insns;
963 /* We don't need to consider always_inline functions inside the flattened
964 function anymore. */
965 continue;
966 }
967
968 if (!node->local.disregard_inline_limits)
969 continue;
970 if (dump_file)
971 fprintf (dump_file,
972 "\nConsidering %s %i insns (always inline)\n",
973 cgraph_node_name (node), node->global.insns);
974 old_insns = overall_insns;
975 for (e = node->callers; e; e = next)
976 {
977 next = e->next_caller;
978 if (!e->inline_failed)
979 continue;
980 if (cgraph_recursive_inlining_p (e->caller, e->callee,
981 &e->inline_failed))
982 continue;
983 cgraph_mark_inline_edge (e, true);
984 if (dump_file)
985 fprintf (dump_file,
986 " Inlined into %s which now has %i insns.\n",
987 cgraph_node_name (e->caller),
988 e->caller->global.insns);
989 }
990 if (dump_file)
991 fprintf (dump_file,
992 " Inlined for a net change of %+i insns.\n",
993 overall_insns - old_insns);
994 }
995
996 if (!flag_really_no_inline)
997 cgraph_decide_inlining_of_small_functions ();
998
999 if (!flag_really_no_inline
1000 && flag_inline_functions_called_once)
1001 {
1002 if (dump_file)
1003 fprintf (dump_file, "\nDeciding on functions called once:\n");
1004
1005 /* And finally decide what functions are called once. */
1006
1007 for (i = nnodes - 1; i >= 0; i--)
1008 {
1009 node = order[i];
1010
1011 if (node->callers && !node->callers->next_caller && !node->needed
1012 && node->local.inlinable && node->callers->inline_failed
1013 && !DECL_EXTERNAL (node->decl) && !DECL_COMDAT (node->decl))
1014 {
1015 bool ok = true;
1016 struct cgraph_node *node1;
1017
1018 /* Verify that we won't duplicate the caller. */
1019 for (node1 = node->callers->caller;
1020 node1->callers && !node1->callers->inline_failed
1021 && ok; node1 = node1->callers->caller)
1022 if (node1->callers->next_caller || node1->needed)
1023 ok = false;
1024 if (ok)
1025 {
1026 if (dump_file)
1027 {
1028 fprintf (dump_file,
1029 "\nConsidering %s %i insns.\n",
1030 cgraph_node_name (node), node->global.insns);
1031 fprintf (dump_file,
1032 " Called once from %s %i insns.\n",
1033 cgraph_node_name (node->callers->caller),
1034 node->callers->caller->global.insns);
1035 }
1036
1037 old_insns = overall_insns;
1038
1039 if (cgraph_check_inline_limits (node->callers->caller, node,
1040 NULL))
1041 {
1042 cgraph_mark_inline (node->callers);
1043 if (dump_file)
1044 fprintf (dump_file,
1045 " Inlined into %s which now has %i insns"
1046 " for a net change of %+i insns.\n",
1047 cgraph_node_name (node->callers->caller),
1048 node->callers->caller->global.insns,
1049 overall_insns - old_insns);
1050 }
1051 else
1052 {
1053 if (dump_file)
1054 fprintf (dump_file,
1055 " Inline limit reached, not inlined.\n");
1056 }
1057 }
1058 }
1059 }
1060 }
1061
1062 if (dump_file)
1063 fprintf (dump_file,
1064 "\nInlined %i calls, eliminated %i functions, "
1065 "%i insns turned to %i insns.\n\n",
1066 ncalls_inlined, nfunctions_inlined, initial_insns,
1067 overall_insns);
1068 free (order);
1069 timevar_pop (TV_INLINE_HEURISTICS);
1070 return 0;
1071 }
1072
1073 /* Decide on the inlining. We do so in the topological order to avoid
1074 expenses on updating data structures. */
1075
1076 bool
1077 cgraph_decide_inlining_incrementally (struct cgraph_node *node, bool early)
1078 {
1079 struct cgraph_edge *e;
1080 bool inlined = false;
1081 const char *failed_reason;
1082
1083 /* First of all look for always inline functions. */
1084 for (e = node->callees; e; e = e->next_callee)
1085 if (e->callee->local.disregard_inline_limits
1086 && e->inline_failed
1087 && !cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed)
1088 /* ??? It is possible that renaming variable removed the function body
1089 in duplicate_decls. See gcc.c-torture/compile/20011119-2.c */
1090 && (DECL_SAVED_TREE (e->callee->decl) || e->callee->inline_decl))
1091 {
1092 if (dump_file && early)
1093 {
1094 fprintf (dump_file, " Early inlining %s",
1095 cgraph_node_name (e->callee));
1096 fprintf (dump_file, " into %s\n", cgraph_node_name (node));
1097 }
1098 cgraph_mark_inline (e);
1099 inlined = true;
1100 }
1101
1102 /* Now do the automatic inlining. */
1103 if (!flag_really_no_inline)
1104 for (e = node->callees; e; e = e->next_callee)
1105 if (e->callee->local.inlinable
1106 && e->inline_failed
1107 && !e->callee->local.disregard_inline_limits
1108 && !cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed)
1109 && (!early
1110 || (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1111 <= e->caller->global.insns))
1112 && cgraph_check_inline_limits (node, e->callee, &e->inline_failed)
1113 && (DECL_SAVED_TREE (e->callee->decl) || e->callee->inline_decl))
1114 {
1115 if (cgraph_default_inline_p (e->callee, &failed_reason))
1116 {
1117 if (dump_file && early)
1118 {
1119 fprintf (dump_file, " Early inlining %s",
1120 cgraph_node_name (e->callee));
1121 fprintf (dump_file, " into %s\n", cgraph_node_name (node));
1122 }
1123 cgraph_mark_inline (e);
1124 inlined = true;
1125 }
1126 else if (!early)
1127 e->inline_failed = failed_reason;
1128 }
1129 if (early && inlined)
1130 {
1131 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1132 tree_register_cfg_hooks ();
1133 current_function_decl = node->decl;
1134 optimize_inline_calls (current_function_decl);
1135 node->local.self_insns = node->global.insns;
1136 current_function_decl = NULL;
1137 pop_cfun ();
1138 }
1139 return inlined;
1140 }
1141
1142 /* When inlining shall be performed. */
1143 static bool
1144 cgraph_gate_inlining (void)
1145 {
1146 return flag_inline_trees;
1147 }
1148
1149 struct tree_opt_pass pass_ipa_inline =
1150 {
1151 "inline", /* name */
1152 cgraph_gate_inlining, /* gate */
1153 cgraph_decide_inlining, /* execute */
1154 NULL, /* sub */
1155 NULL, /* next */
1156 0, /* static_pass_number */
1157 TV_INTEGRATION, /* tv_id */
1158 0, /* properties_required */
1159 PROP_cfg, /* properties_provided */
1160 0, /* properties_destroyed */
1161 0, /* todo_flags_start */
1162 TODO_dump_cgraph | TODO_dump_func, /* todo_flags_finish */
1163 0 /* letter */
1164 };
1165
1166 /* Because inlining might remove no-longer reachable nodes, we need to
1167 keep the array visible to garbage collector to avoid reading collected
1168 out nodes. */
1169 static int nnodes;
1170 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1171
1172 /* Do inlining of small functions. Doing so early helps profiling and other
1173 passes to be somewhat more effective and avoids some code duplication in
1174 later real inlining pass for testcases with very many function calls. */
1175 static unsigned int
1176 cgraph_early_inlining (void)
1177 {
1178 struct cgraph_node *node;
1179 int i;
1180
1181 if (sorrycount || errorcount)
1182 return 0;
1183 #ifdef ENABLE_CHECKING
1184 for (node = cgraph_nodes; node; node = node->next)
1185 gcc_assert (!node->aux);
1186 #endif
1187
1188 order = ggc_alloc (sizeof (*order) * cgraph_n_nodes);
1189 nnodes = cgraph_postorder (order);
1190 for (i = nnodes - 1; i >= 0; i--)
1191 {
1192 node = order[i];
1193 if (node->analyzed && (node->needed || node->reachable))
1194 node->local.self_insns = node->global.insns
1195 = estimate_num_insns (node->decl);
1196 }
1197 for (i = nnodes - 1; i >= 0; i--)
1198 {
1199 node = order[i];
1200 if (node->analyzed && node->local.inlinable
1201 && (node->needed || node->reachable)
1202 && node->callers)
1203 {
1204 if (cgraph_decide_inlining_incrementally (node, true))
1205 ggc_collect ();
1206 }
1207 }
1208 cgraph_remove_unreachable_nodes (true, dump_file);
1209 #ifdef ENABLE_CHECKING
1210 for (node = cgraph_nodes; node; node = node->next)
1211 gcc_assert (!node->global.inlined_to);
1212 #endif
1213 ggc_free (order);
1214 order = NULL;
1215 nnodes = 0;
1216 return 0;
1217 }
1218
1219 /* When inlining shall be performed. */
1220 static bool
1221 cgraph_gate_early_inlining (void)
1222 {
1223 return flag_inline_trees && flag_early_inlining;
1224 }
1225
1226 struct tree_opt_pass pass_early_ipa_inline =
1227 {
1228 "einline", /* name */
1229 cgraph_gate_early_inlining, /* gate */
1230 cgraph_early_inlining, /* execute */
1231 NULL, /* sub */
1232 NULL, /* next */
1233 0, /* static_pass_number */
1234 TV_INTEGRATION, /* tv_id */
1235 0, /* properties_required */
1236 PROP_cfg, /* properties_provided */
1237 0, /* properties_destroyed */
1238 0, /* todo_flags_start */
1239 TODO_dump_cgraph | TODO_dump_func, /* todo_flags_finish */
1240 0 /* letter */
1241 };
1242
1243 #include "gt-ipa-inline.h"