ipa-inline.c (cgraph_decide_inlining_of_small_function): Dump ...
[gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Inlining decision heuristics
22
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
26 callgraph.
27
28 There are three major parts of this file:
29
30 cgraph_mark_inline implementation
31
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
34 statistics)
35
36 inlining heuristics limits
37
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
40 growth and so on).
41
42 inlining heuristics
43
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
46
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
52
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
56
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
60
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used by early inliner.
64
65 The inliner itself is split into several passes:
66
67 pass_inline_parameters
68
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
72
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
76 on the function body.
77
78 pass_early_inlining
79
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
83 whole unit.
84
85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
91 cheaply.
92
93 pass_ipa_early_inlining
94
95 With profiling, the early inlining is also necessary to reduce
96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
100 inlining.
101
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
104
105 pass_ipa_inline
106
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
112
113 pass_apply_inline
114
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
118 */
119
120 #include "config.h"
121 #include "system.h"
122 #include "coretypes.h"
123 #include "tm.h"
124 #include "tree.h"
125 #include "tree-inline.h"
126 #include "langhooks.h"
127 #include "flags.h"
128 #include "cgraph.h"
129 #include "diagnostic.h"
130 #include "timevar.h"
131 #include "params.h"
132 #include "fibheap.h"
133 #include "intl.h"
134 #include "tree-pass.h"
135 #include "hashtab.h"
136 #include "coverage.h"
137 #include "ggc.h"
138 #include "tree-flow.h"
139 #include "rtl.h"
140 #include "ipa-prop.h"
141
142 /* Mode incremental inliner operate on:
143
144 In ALWAYS_INLINE only functions marked
145 always_inline are inlined. This mode is used after detecting cycle during
146 flattening.
147
148 In SIZE mode, only functions that reduce function body size after inlining
149 are inlined, this is used during early inlining.
150
151 in ALL mode, everything is inlined. This is used during flattening. */
152 enum inlining_mode {
153 INLINE_NONE = 0,
154 INLINE_ALWAYS_INLINE,
155 INLINE_SIZE,
156 INLINE_ALL
157 };
158 static bool
159 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
160 int);
161
162
163 /* Statistics we collect about inlining algorithm. */
164 static int ncalls_inlined;
165 static int nfunctions_inlined;
166 static int overall_insns;
167 static gcov_type max_count;
168
169 /* Holders of ipa cgraph hooks: */
170 static struct cgraph_node_hook_list *function_insertion_hook_holder;
171
172 static inline struct inline_summary *
173 inline_summary (struct cgraph_node *node)
174 {
175 return &node->local.inline_summary;
176 }
177
178 /* Estimate size of the function after inlining WHAT into TO. */
179
180 static int
181 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
182 struct cgraph_node *what)
183 {
184 int size;
185 tree fndecl = what->decl, arg;
186 int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
187
188 for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
189 call_insns += estimate_move_cost (TREE_TYPE (arg));
190 size = (what->global.insns - call_insns) * times + to->global.insns;
191 gcc_assert (size >= 0);
192 return size;
193 }
194
195 /* E is expected to be an edge being inlined. Clone destination node of
196 the edge and redirect it to the new clone.
197 DUPLICATE is used for bookkeeping on whether we are actually creating new
198 clones or re-using node originally representing out-of-line function call.
199 */
200 void
201 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
202 bool update_original)
203 {
204 HOST_WIDE_INT peak;
205
206 if (duplicate)
207 {
208 /* We may eliminate the need for out-of-line copy to be output.
209 In that case just go ahead and re-use it. */
210 if (!e->callee->callers->next_caller
211 && !e->callee->needed
212 && !cgraph_new_nodes)
213 {
214 gcc_assert (!e->callee->global.inlined_to);
215 if (e->callee->analyzed)
216 overall_insns -= e->callee->global.insns, nfunctions_inlined++;
217 duplicate = false;
218 }
219 else
220 {
221 struct cgraph_node *n;
222 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
223 update_original);
224 cgraph_redirect_edge_callee (e, n);
225 }
226 }
227
228 if (e->caller->global.inlined_to)
229 e->callee->global.inlined_to = e->caller->global.inlined_to;
230 else
231 e->callee->global.inlined_to = e->caller;
232 e->callee->global.stack_frame_offset
233 = e->caller->global.stack_frame_offset
234 + inline_summary (e->caller)->estimated_self_stack_size;
235 peak = e->callee->global.stack_frame_offset
236 + inline_summary (e->callee)->estimated_self_stack_size;
237 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
238 e->callee->global.inlined_to->global.estimated_stack_size = peak;
239
240 /* Recursively clone all bodies. */
241 for (e = e->callee->callees; e; e = e->next_callee)
242 if (!e->inline_failed)
243 cgraph_clone_inlined_nodes (e, duplicate, update_original);
244 }
245
246 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
247 specify whether profile of original function should be updated. If any new
248 indirect edges are discovered in the process, add them to NEW_EDGES, unless
249 it is NULL. Return true iff any new callgraph edges were discovered as a
250 result of inlining. */
251
252 static bool
253 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
254 VEC (cgraph_edge_p, heap) **new_edges)
255 {
256 int old_insns = 0, new_insns = 0;
257 struct cgraph_node *to = NULL, *what;
258 struct cgraph_edge *curr = e;
259
260 if (e->callee->inline_decl)
261 cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
262
263 gcc_assert (e->inline_failed);
264 e->inline_failed = CIF_OK;
265
266 if (!e->callee->global.inlined)
267 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
268 e->callee->global.inlined = true;
269
270 cgraph_clone_inlined_nodes (e, true, update_original);
271
272 what = e->callee;
273
274 /* Now update size of caller and all functions caller is inlined into. */
275 for (;e && !e->inline_failed; e = e->caller->callers)
276 {
277 old_insns = e->caller->global.insns;
278 new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
279 what);
280 gcc_assert (new_insns >= 0);
281 to = e->caller;
282 to->global.insns = new_insns;
283 }
284 gcc_assert (what->global.inlined_to == to);
285 if (new_insns > old_insns)
286 overall_insns += new_insns - old_insns;
287 ncalls_inlined++;
288
289 if (flag_indirect_inlining)
290 return ipa_propagate_indirect_call_infos (curr, new_edges);
291 else
292 return false;
293 }
294
295 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
296 Return following unredirected edge in the list of callers
297 of EDGE->CALLEE */
298
299 static struct cgraph_edge *
300 cgraph_mark_inline (struct cgraph_edge *edge)
301 {
302 struct cgraph_node *to = edge->caller;
303 struct cgraph_node *what = edge->callee;
304 struct cgraph_edge *e, *next;
305
306 gcc_assert (!gimple_call_cannot_inline_p (edge->call_stmt));
307 /* Look for all calls, mark them inline and clone recursively
308 all inlined functions. */
309 for (e = what->callers; e; e = next)
310 {
311 next = e->next_caller;
312 if (e->caller == to && e->inline_failed)
313 {
314 cgraph_mark_inline_edge (e, true, NULL);
315 if (e == edge)
316 edge = next;
317 }
318 }
319
320 return edge;
321 }
322
323 /* Estimate the growth caused by inlining NODE into all callees. */
324
325 static int
326 cgraph_estimate_growth (struct cgraph_node *node)
327 {
328 int growth = 0;
329 struct cgraph_edge *e;
330 bool self_recursive = false;
331
332 if (node->global.estimated_growth != INT_MIN)
333 return node->global.estimated_growth;
334
335 for (e = node->callers; e; e = e->next_caller)
336 {
337 if (e->caller == node)
338 self_recursive = true;
339 if (e->inline_failed)
340 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
341 - e->caller->global.insns);
342 }
343
344 /* ??? Wrong for non-trivially self recursive functions or cases where
345 we decide to not inline for different reasons, but it is not big deal
346 as in that case we will keep the body around, but we will also avoid
347 some inlining. */
348 if (!node->needed && !DECL_EXTERNAL (node->decl) && !self_recursive)
349 growth -= node->global.insns;
350
351 node->global.estimated_growth = growth;
352 return growth;
353 }
354
355 /* Return false when inlining WHAT into TO is not good idea
356 as it would cause too large growth of function bodies.
357 When ONE_ONLY is true, assume that only one call site is going
358 to be inlined, otherwise figure out how many call sites in
359 TO calls WHAT and verify that all can be inlined.
360 */
361
362 static bool
363 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
364 cgraph_inline_failed_t *reason, bool one_only)
365 {
366 int times = 0;
367 struct cgraph_edge *e;
368 int newsize;
369 int limit;
370 HOST_WIDE_INT stack_size_limit, inlined_stack;
371
372 if (one_only)
373 times = 1;
374 else
375 for (e = to->callees; e; e = e->next_callee)
376 if (e->callee == what)
377 times++;
378
379 if (to->global.inlined_to)
380 to = to->global.inlined_to;
381
382 /* When inlining large function body called once into small function,
383 take the inlined function as base for limiting the growth. */
384 if (inline_summary (to)->self_insns > inline_summary(what)->self_insns)
385 limit = inline_summary (to)->self_insns;
386 else
387 limit = inline_summary (what)->self_insns;
388
389 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
390
391 /* Check the size after inlining against the function limits. But allow
392 the function to shrink if it went over the limits by forced inlining. */
393 newsize = cgraph_estimate_size_after_inlining (times, to, what);
394 if (newsize >= to->global.insns
395 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
396 && newsize > limit)
397 {
398 if (reason)
399 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
400 return false;
401 }
402
403 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
404
405 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
406
407 inlined_stack = (to->global.stack_frame_offset
408 + inline_summary (to)->estimated_self_stack_size
409 + what->global.estimated_stack_size);
410 if (inlined_stack > stack_size_limit
411 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
412 {
413 if (reason)
414 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
415 return false;
416 }
417 return true;
418 }
419
420 /* Return true when function N is small enough to be inlined. */
421
422 static bool
423 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
424 {
425 tree decl = n->decl;
426
427 if (n->inline_decl)
428 decl = n->inline_decl;
429 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
430 {
431 if (reason)
432 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
433 return false;
434 }
435
436 if (!DECL_STRUCT_FUNCTION (decl)->cfg)
437 {
438 if (reason)
439 *reason = CIF_BODY_NOT_AVAILABLE;
440 return false;
441 }
442
443 if (DECL_DECLARED_INLINE_P (decl))
444 {
445 if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
446 {
447 if (reason)
448 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
449 return false;
450 }
451 }
452 else
453 {
454 if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
455 {
456 if (reason)
457 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
458 return false;
459 }
460 }
461
462 return true;
463 }
464
465 /* Return true when inlining WHAT would create recursive inlining.
466 We call recursive inlining all cases where same function appears more than
467 once in the single recursion nest path in the inline graph. */
468
469 static bool
470 cgraph_recursive_inlining_p (struct cgraph_node *to,
471 struct cgraph_node *what,
472 cgraph_inline_failed_t *reason)
473 {
474 bool recursive;
475 if (to->global.inlined_to)
476 recursive = what->decl == to->global.inlined_to->decl;
477 else
478 recursive = what->decl == to->decl;
479 /* Marking recursive function inline has sane semantic and thus we should
480 not warn on it. */
481 if (recursive && reason)
482 *reason = (what->local.disregard_inline_limits
483 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
484 return recursive;
485 }
486
487 /* A cost model driving the inlining heuristics in a way so the edges with
488 smallest badness are inlined first. After each inlining is performed
489 the costs of all caller edges of nodes affected are recomputed so the
490 metrics may accurately depend on values such as number of inlinable callers
491 of the function or function body size. */
492
493 static int
494 cgraph_edge_badness (struct cgraph_edge *edge)
495 {
496 int badness;
497 int growth =
498 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
499
500 growth -= edge->caller->global.insns;
501
502 /* Always prefer inlining saving code size. */
503 if (growth <= 0)
504 badness = INT_MIN - growth;
505
506 /* When profiling is available, base priorities -(#calls / growth).
507 So we optimize for overall number of "executed" inlined calls. */
508 else if (max_count)
509 badness = ((int)((double)edge->count * INT_MIN / max_count)) / growth;
510
511 /* When function local profile is available, base priorities on
512 growth / frequency, so we optimize for overall frequency of inlined
513 calls. This is not too accurate since while the call might be frequent
514 within function, the function itself is infrequent.
515
516 Other objective to optimize for is number of different calls inlined.
517 We add the estimated growth after inlining all functions to bias the
518 priorities slightly in this direction (so fewer times called functions
519 of the same size gets priority). */
520 else if (flag_guess_branch_prob)
521 {
522 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE;
523 int growth =
524 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
525 growth -= edge->caller->global.insns;
526 badness = growth * 256;
527
528 /* Decrease badness if call is nested. */
529 /* Compress the range so we don't overflow. */
530 if (div > 256)
531 div = 256 + ceil_log2 (div) - 8;
532 if (div < 1)
533 div = 1;
534 if (badness > 0)
535 badness /= div;
536 badness += cgraph_estimate_growth (edge->callee);
537 }
538 /* When function local profile is not available or it does not give
539 useful information (ie frequency is zero), base the cost on
540 loop nest and overall size growth, so we optimize for overall number
541 of functions fully inlined in program. */
542 else
543 {
544 int nest = MIN (edge->loop_nest, 8);
545 badness = cgraph_estimate_growth (edge->callee) * 256;
546
547 /* Decrease badness if call is nested. */
548 if (badness > 0)
549 badness >>= nest;
550 else
551 {
552 badness <<= nest;
553 }
554 }
555 /* Make recursive inlining happen always after other inlining is done. */
556 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
557 return badness + 1;
558 else
559 return badness;
560 }
561
562 /* Recompute heap nodes for each of caller edge. */
563
564 static void
565 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
566 bitmap updated_nodes)
567 {
568 struct cgraph_edge *edge;
569 cgraph_inline_failed_t failed_reason;
570
571 if (!node->local.inlinable || node->local.disregard_inline_limits
572 || node->global.inlined_to)
573 return;
574 if (bitmap_bit_p (updated_nodes, node->uid))
575 return;
576 bitmap_set_bit (updated_nodes, node->uid);
577 node->global.estimated_growth = INT_MIN;
578
579 if (!node->local.inlinable)
580 return;
581 /* Prune out edges we won't inline into anymore. */
582 if (!cgraph_default_inline_p (node, &failed_reason))
583 {
584 for (edge = node->callers; edge; edge = edge->next_caller)
585 if (edge->aux)
586 {
587 fibheap_delete_node (heap, (fibnode_t) edge->aux);
588 edge->aux = NULL;
589 if (edge->inline_failed)
590 edge->inline_failed = failed_reason;
591 }
592 return;
593 }
594
595 for (edge = node->callers; edge; edge = edge->next_caller)
596 if (edge->inline_failed)
597 {
598 int badness = cgraph_edge_badness (edge);
599 if (edge->aux)
600 {
601 fibnode_t n = (fibnode_t) edge->aux;
602 gcc_assert (n->data == edge);
603 if (n->key == badness)
604 continue;
605
606 /* fibheap_replace_key only increase the keys. */
607 if (fibheap_replace_key (heap, n, badness))
608 continue;
609 fibheap_delete_node (heap, (fibnode_t) edge->aux);
610 }
611 edge->aux = fibheap_insert (heap, badness, edge);
612 }
613 }
614
615 /* Recompute heap nodes for each of caller edges of each of callees. */
616
617 static void
618 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
619 bitmap updated_nodes)
620 {
621 struct cgraph_edge *e;
622 node->global.estimated_growth = INT_MIN;
623
624 for (e = node->callees; e; e = e->next_callee)
625 if (e->inline_failed)
626 update_caller_keys (heap, e->callee, updated_nodes);
627 else if (!e->inline_failed)
628 update_callee_keys (heap, e->callee, updated_nodes);
629 }
630
631 /* Enqueue all recursive calls from NODE into priority queue depending on
632 how likely we want to recursively inline the call. */
633
634 static void
635 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
636 fibheap_t heap)
637 {
638 static int priority;
639 struct cgraph_edge *e;
640 for (e = where->callees; e; e = e->next_callee)
641 if (e->callee == node)
642 {
643 /* When profile feedback is available, prioritize by expected number
644 of calls. Without profile feedback we maintain simple queue
645 to order candidates via recursive depths. */
646 fibheap_insert (heap,
647 !max_count ? priority++
648 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
649 e);
650 }
651 for (e = where->callees; e; e = e->next_callee)
652 if (!e->inline_failed)
653 lookup_recursive_calls (node, e->callee, heap);
654 }
655
656 /* Decide on recursive inlining: in the case function has recursive calls,
657 inline until body size reaches given argument. If any new indirect edges
658 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
659 is NULL. */
660
661 static bool
662 cgraph_decide_recursive_inlining (struct cgraph_node *node,
663 VEC (cgraph_edge_p, heap) **new_edges)
664 {
665 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
666 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
667 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
668 fibheap_t heap;
669 struct cgraph_edge *e;
670 struct cgraph_node *master_clone, *next;
671 int depth = 0;
672 int n = 0;
673
674 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
675 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
676 return false;
677
678 if (DECL_DECLARED_INLINE_P (node->decl))
679 {
680 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
681 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
682 }
683
684 /* Make sure that function is small enough to be considered for inlining. */
685 if (!max_depth
686 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
687 return false;
688 heap = fibheap_new ();
689 lookup_recursive_calls (node, node, heap);
690 if (fibheap_empty (heap))
691 {
692 fibheap_delete (heap);
693 return false;
694 }
695
696 if (dump_file)
697 fprintf (dump_file,
698 " Performing recursive inlining on %s\n",
699 cgraph_node_name (node));
700
701 /* We need original clone to copy around. */
702 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false);
703 master_clone->needed = true;
704 for (e = master_clone->callees; e; e = e->next_callee)
705 if (!e->inline_failed)
706 cgraph_clone_inlined_nodes (e, true, false);
707
708 /* Do the inlining and update list of recursive call during process. */
709 while (!fibheap_empty (heap)
710 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
711 <= limit))
712 {
713 struct cgraph_edge *curr
714 = (struct cgraph_edge *) fibheap_extract_min (heap);
715 struct cgraph_node *cnode;
716
717 depth = 1;
718 for (cnode = curr->caller;
719 cnode->global.inlined_to; cnode = cnode->callers->caller)
720 if (node->decl == curr->callee->decl)
721 depth++;
722 if (depth > max_depth)
723 {
724 if (dump_file)
725 fprintf (dump_file,
726 " maximal depth reached\n");
727 continue;
728 }
729
730 if (max_count)
731 {
732 if (!cgraph_maybe_hot_edge_p (curr))
733 {
734 if (dump_file)
735 fprintf (dump_file, " Not inlining cold call\n");
736 continue;
737 }
738 if (curr->count * 100 / node->count < probability)
739 {
740 if (dump_file)
741 fprintf (dump_file,
742 " Probability of edge is too small\n");
743 continue;
744 }
745 }
746
747 if (dump_file)
748 {
749 fprintf (dump_file,
750 " Inlining call of depth %i", depth);
751 if (node->count)
752 {
753 fprintf (dump_file, " called approx. %.2f times per call",
754 (double)curr->count / node->count);
755 }
756 fprintf (dump_file, "\n");
757 }
758 cgraph_redirect_edge_callee (curr, master_clone);
759 cgraph_mark_inline_edge (curr, false, new_edges);
760 lookup_recursive_calls (node, curr->callee, heap);
761 n++;
762 }
763 if (!fibheap_empty (heap) && dump_file)
764 fprintf (dump_file, " Recursive inlining growth limit met.\n");
765
766 fibheap_delete (heap);
767 if (dump_file)
768 fprintf (dump_file,
769 "\n Inlined %i times, body grown from %i to %i insns\n", n,
770 master_clone->global.insns, node->global.insns);
771
772 /* Remove master clone we used for inlining. We rely that clones inlined
773 into master clone gets queued just before master clone so we don't
774 need recursion. */
775 for (node = cgraph_nodes; node != master_clone;
776 node = next)
777 {
778 next = node->next;
779 if (node->global.inlined_to == master_clone)
780 cgraph_remove_node (node);
781 }
782 cgraph_remove_node (master_clone);
783 /* FIXME: Recursive inlining actually reduces number of calls of the
784 function. At this place we should probably walk the function and
785 inline clones and compensate the counts accordingly. This probably
786 doesn't matter much in practice. */
787 return n > 0;
788 }
789
790 /* Set inline_failed for all callers of given function to REASON. */
791
792 static void
793 cgraph_set_inline_failed (struct cgraph_node *node,
794 cgraph_inline_failed_t reason)
795 {
796 struct cgraph_edge *e;
797
798 if (dump_file)
799 fprintf (dump_file, "Inlining failed: %s\n",
800 cgraph_inline_failed_string (reason));
801 for (e = node->callers; e; e = e->next_caller)
802 if (e->inline_failed)
803 e->inline_failed = reason;
804 }
805
806 /* Given whole compilation unit estimate of INSNS, compute how large we can
807 allow the unit to grow. */
808 static int
809 compute_max_insns (int insns)
810 {
811 int max_insns = insns;
812 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
813 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
814
815 return ((HOST_WIDEST_INT) max_insns
816 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
817 }
818
819 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
820 static void
821 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
822 {
823 while (VEC_length (cgraph_edge_p, new_edges) > 0)
824 {
825 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
826
827 gcc_assert (!edge->aux);
828 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
829 }
830 }
831
832
833 /* We use greedy algorithm for inlining of small functions:
834 All inline candidates are put into prioritized heap based on estimated
835 growth of the overall number of instructions and then update the estimates.
836
837 INLINED and INLINED_CALEES are just pointers to arrays large enough
838 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
839
840 static void
841 cgraph_decide_inlining_of_small_functions (void)
842 {
843 struct cgraph_node *node;
844 struct cgraph_edge *edge;
845 cgraph_inline_failed_t failed_reason;
846 fibheap_t heap = fibheap_new ();
847 bitmap updated_nodes = BITMAP_ALLOC (NULL);
848 int min_insns, max_insns;
849 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
850
851 if (flag_indirect_inlining)
852 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
853
854 if (dump_file)
855 fprintf (dump_file, "\nDeciding on smaller functions:\n");
856
857 /* Put all inline candidates into the heap. */
858
859 for (node = cgraph_nodes; node; node = node->next)
860 {
861 if (!node->local.inlinable || !node->callers
862 || node->local.disregard_inline_limits)
863 continue;
864 if (dump_file)
865 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
866
867 node->global.estimated_growth = INT_MIN;
868 if (!cgraph_default_inline_p (node, &failed_reason))
869 {
870 cgraph_set_inline_failed (node, failed_reason);
871 continue;
872 }
873
874 for (edge = node->callers; edge; edge = edge->next_caller)
875 if (edge->inline_failed)
876 {
877 gcc_assert (!edge->aux);
878 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
879 }
880 }
881
882 max_insns = compute_max_insns (overall_insns);
883 min_insns = overall_insns;
884
885 while (overall_insns <= max_insns
886 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
887 {
888 int old_insns = overall_insns;
889 struct cgraph_node *where;
890 int growth =
891 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
892 cgraph_inline_failed_t not_good = CIF_OK;
893
894 growth -= edge->caller->global.insns;
895
896 if (dump_file)
897 {
898 fprintf (dump_file,
899 "\nConsidering %s with %i insns\n",
900 cgraph_node_name (edge->callee),
901 edge->callee->global.insns);
902 fprintf (dump_file,
903 " to be inlined into %s in %s:%i\n"
904 " Estimated growth after inlined into all callees is %+i insns.\n"
905 " Estimated badness is %i, frequency %.2f.\n",
906 cgraph_node_name (edge->caller),
907 gimple_filename ((const_gimple) edge->call_stmt),
908 gimple_lineno ((const_gimple) edge->call_stmt),
909 cgraph_estimate_growth (edge->callee),
910 cgraph_edge_badness (edge),
911 edge->frequency / (double)CGRAPH_FREQ_BASE);
912 if (edge->count)
913 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
914 }
915 gcc_assert (edge->aux);
916 edge->aux = NULL;
917 if (!edge->inline_failed)
918 continue;
919
920 /* When not having profile info ready we don't weight by any way the
921 position of call in procedure itself. This means if call of
922 function A from function B seems profitable to inline, the recursive
923 call of function A in inline copy of A in B will look profitable too
924 and we end up inlining until reaching maximal function growth. This
925 is not good idea so prohibit the recursive inlining.
926
927 ??? When the frequencies are taken into account we might not need this
928 restriction.
929
930 We need to be cureful here, in some testcases, e.g. directivec.c in
931 libcpp, we can estimate self recursive function to have negative growth
932 for inlining completely.
933 */
934 if (!edge->count)
935 {
936 where = edge->caller;
937 while (where->global.inlined_to)
938 {
939 if (where->decl == edge->callee->decl)
940 break;
941 where = where->callers->caller;
942 }
943 if (where->global.inlined_to)
944 {
945 edge->inline_failed
946 = (edge->callee->local.disregard_inline_limits
947 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
948 if (dump_file)
949 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
950 continue;
951 }
952 }
953
954 if (!cgraph_maybe_hot_edge_p (edge))
955 not_good = CIF_UNLIKELY_CALL;
956 if (!flag_inline_functions
957 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
958 not_good = CIF_NOT_DECLARED_INLINED;
959 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
960 not_good = CIF_OPTIMIZING_FOR_SIZE;
961 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
962 {
963 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
964 &edge->inline_failed))
965 {
966 edge->inline_failed = not_good;
967 if (dump_file)
968 fprintf (dump_file, " inline_failed:%s.\n",
969 cgraph_inline_failed_string (edge->inline_failed));
970 }
971 continue;
972 }
973 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
974 {
975 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
976 &edge->inline_failed))
977 {
978 if (dump_file)
979 fprintf (dump_file, " inline_failed:%s.\n",
980 cgraph_inline_failed_string (edge->inline_failed));
981 }
982 continue;
983 }
984 if (!tree_can_inline_p (edge->caller->decl, edge->callee->decl))
985 {
986 gimple_call_set_cannot_inline (edge->call_stmt, true);
987 edge->inline_failed = CIF_TARGET_OPTION_MISMATCH;
988 if (dump_file)
989 fprintf (dump_file, " inline_failed:%s.\n",
990 cgraph_inline_failed_string (edge->inline_failed));
991 continue;
992 }
993 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
994 &edge->inline_failed))
995 {
996 where = edge->caller;
997 if (where->global.inlined_to)
998 where = where->global.inlined_to;
999 if (!cgraph_decide_recursive_inlining (where,
1000 flag_indirect_inlining
1001 ? &new_indirect_edges : NULL))
1002 continue;
1003 if (flag_indirect_inlining)
1004 add_new_edges_to_heap (heap, new_indirect_edges);
1005 update_callee_keys (heap, where, updated_nodes);
1006 }
1007 else
1008 {
1009 struct cgraph_node *callee;
1010 if (gimple_call_cannot_inline_p (edge->call_stmt)
1011 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1012 &edge->inline_failed, true))
1013 {
1014 if (dump_file)
1015 fprintf (dump_file, " Not inlining into %s:%s.\n",
1016 cgraph_node_name (edge->caller),
1017 cgraph_inline_failed_string (edge->inline_failed));
1018 continue;
1019 }
1020 callee = edge->callee;
1021 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1022 if (flag_indirect_inlining)
1023 add_new_edges_to_heap (heap, new_indirect_edges);
1024
1025 update_callee_keys (heap, callee, updated_nodes);
1026 }
1027 where = edge->caller;
1028 if (where->global.inlined_to)
1029 where = where->global.inlined_to;
1030
1031 /* Our profitability metric can depend on local properties
1032 such as number of inlinable calls and size of the function body.
1033 After inlining these properties might change for the function we
1034 inlined into (since it's body size changed) and for the functions
1035 called by function we inlined (since number of it inlinable callers
1036 might change). */
1037 update_caller_keys (heap, where, updated_nodes);
1038 bitmap_clear (updated_nodes);
1039
1040 if (dump_file)
1041 {
1042 fprintf (dump_file,
1043 " Inlined into %s which now has %i insns,"
1044 "net change of %+i insns.\n",
1045 cgraph_node_name (edge->caller),
1046 edge->caller->global.insns,
1047 overall_insns - old_insns);
1048 }
1049 if (min_insns > overall_insns)
1050 {
1051 min_insns = overall_insns;
1052 max_insns = compute_max_insns (min_insns);
1053
1054 if (dump_file)
1055 fprintf (dump_file, "New minimal insns reached: %i\n", min_insns);
1056 }
1057 }
1058 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
1059 {
1060 gcc_assert (edge->aux);
1061 edge->aux = NULL;
1062 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1063 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1064 &edge->inline_failed))
1065 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1066 }
1067
1068 if (new_indirect_edges)
1069 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1070 fibheap_delete (heap);
1071 BITMAP_FREE (updated_nodes);
1072 }
1073
1074 /* Decide on the inlining. We do so in the topological order to avoid
1075 expenses on updating data structures. */
1076
1077 static unsigned int
1078 cgraph_decide_inlining (void)
1079 {
1080 struct cgraph_node *node;
1081 int nnodes;
1082 struct cgraph_node **order =
1083 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1084 int old_insns = 0;
1085 int i;
1086 int initial_insns = 0;
1087 bool redo_always_inline = true;
1088
1089 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1090
1091 max_count = 0;
1092 for (node = cgraph_nodes; node; node = node->next)
1093 if (node->analyzed && (node->needed || node->reachable))
1094 {
1095 struct cgraph_edge *e;
1096
1097 initial_insns += inline_summary (node)->self_insns;
1098 gcc_assert (inline_summary (node)->self_insns == node->global.insns);
1099 for (e = node->callees; e; e = e->next_callee)
1100 if (max_count < e->count)
1101 max_count = e->count;
1102 }
1103 overall_insns = initial_insns;
1104 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
1105
1106 nnodes = cgraph_postorder (order);
1107
1108 if (dump_file)
1109 fprintf (dump_file,
1110 "\nDeciding on inlining. Starting with %i insns.\n",
1111 initial_insns);
1112
1113 for (node = cgraph_nodes; node; node = node->next)
1114 node->aux = 0;
1115
1116 if (dump_file)
1117 fprintf (dump_file, "\nInlining always_inline functions:\n");
1118
1119 /* In the first pass mark all always_inline edges. Do this with a priority
1120 so none of our later choices will make this impossible. */
1121 while (redo_always_inline)
1122 {
1123 redo_always_inline = false;
1124 for (i = nnodes - 1; i >= 0; i--)
1125 {
1126 struct cgraph_edge *e, *next;
1127
1128 node = order[i];
1129
1130 /* Handle nodes to be flattened, but don't update overall unit
1131 size. */
1132 if (lookup_attribute ("flatten",
1133 DECL_ATTRIBUTES (node->decl)) != NULL)
1134 {
1135 if (dump_file)
1136 fprintf (dump_file,
1137 "Flattening %s\n", cgraph_node_name (node));
1138 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1139 }
1140
1141 if (!node->local.disregard_inline_limits)
1142 continue;
1143 if (dump_file)
1144 fprintf (dump_file,
1145 "\nConsidering %s %i insns (always inline)\n",
1146 cgraph_node_name (node), node->global.insns);
1147 old_insns = overall_insns;
1148 for (e = node->callers; e; e = next)
1149 {
1150 next = e->next_caller;
1151 if (!e->inline_failed
1152 || gimple_call_cannot_inline_p (e->call_stmt))
1153 continue;
1154 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1155 &e->inline_failed))
1156 continue;
1157 if (!tree_can_inline_p (e->caller->decl, e->callee->decl))
1158 {
1159 gimple_call_set_cannot_inline (e->call_stmt, true);
1160 continue;
1161 }
1162 if (cgraph_mark_inline_edge (e, true, NULL))
1163 redo_always_inline = true;
1164 if (dump_file)
1165 fprintf (dump_file,
1166 " Inlined into %s which now has %i insns.\n",
1167 cgraph_node_name (e->caller),
1168 e->caller->global.insns);
1169 }
1170 /* Inlining self recursive function might introduce new calls to
1171 themselves we didn't see in the loop above. Fill in the proper
1172 reason why inline failed. */
1173 for (e = node->callers; e; e = e->next_caller)
1174 if (e->inline_failed)
1175 e->inline_failed = CIF_RECURSIVE_INLINING;
1176 if (dump_file)
1177 fprintf (dump_file,
1178 " Inlined for a net change of %+i insns.\n",
1179 overall_insns - old_insns);
1180 }
1181 }
1182
1183 cgraph_decide_inlining_of_small_functions ();
1184
1185 if (flag_inline_functions_called_once)
1186 {
1187 if (dump_file)
1188 fprintf (dump_file, "\nDeciding on functions called once:\n");
1189
1190 /* And finally decide what functions are called once. */
1191 for (i = nnodes - 1; i >= 0; i--)
1192 {
1193 node = order[i];
1194
1195 if (node->callers
1196 && !node->callers->next_caller
1197 && !node->needed
1198 && node->local.inlinable
1199 && node->callers->inline_failed
1200 && !gimple_call_cannot_inline_p (node->callers->call_stmt)
1201 && !DECL_EXTERNAL (node->decl)
1202 && !DECL_COMDAT (node->decl))
1203 {
1204 if (dump_file)
1205 {
1206 fprintf (dump_file,
1207 "\nConsidering %s %i insns.\n",
1208 cgraph_node_name (node), node->global.insns);
1209 fprintf (dump_file,
1210 " Called once from %s %i insns.\n",
1211 cgraph_node_name (node->callers->caller),
1212 node->callers->caller->global.insns);
1213 }
1214
1215 old_insns = overall_insns;
1216
1217 if (cgraph_check_inline_limits (node->callers->caller, node,
1218 NULL, false))
1219 {
1220 cgraph_mark_inline (node->callers);
1221 if (dump_file)
1222 fprintf (dump_file,
1223 " Inlined into %s which now has %i insns"
1224 " for a net change of %+i insns.\n",
1225 cgraph_node_name (node->callers->caller),
1226 node->callers->caller->global.insns,
1227 overall_insns - old_insns);
1228 }
1229 else
1230 {
1231 if (dump_file)
1232 fprintf (dump_file,
1233 " Inline limit reached, not inlined.\n");
1234 }
1235 }
1236 }
1237 }
1238
1239 /* Free ipa-prop structures if they are no longer needed. */
1240 if (flag_indirect_inlining)
1241 free_all_ipa_structures_after_iinln ();
1242
1243 if (dump_file)
1244 fprintf (dump_file,
1245 "\nInlined %i calls, eliminated %i functions, "
1246 "%i insns turned to %i insns.\n\n",
1247 ncalls_inlined, nfunctions_inlined, initial_insns,
1248 overall_insns);
1249 free (order);
1250 return 0;
1251 }
1252
1253 /* Try to inline edge E from incremental inliner. MODE specifies mode
1254 of inliner.
1255
1256 We are detecting cycles by storing mode of inliner into cgraph_node last
1257 time we visited it in the recursion. In general when mode is set, we have
1258 recursive inlining, but as an special case, we want to try harder inline
1259 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1260 flatten, b being always inline. Flattening 'a' will collapse
1261 a->b->c before hitting cycle. To accommodate always inline, we however
1262 need to inline a->b->c->b.
1263
1264 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1265 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1266 static bool
1267 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1268 {
1269 struct cgraph_node *callee = e->callee;
1270 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
1271 bool always_inline = e->callee->local.disregard_inline_limits;
1272
1273 /* We've hit cycle? */
1274 if (callee_mode)
1275 {
1276 /* It is first time we see it and we are not in ALWAY_INLINE only
1277 mode yet. and the function in question is always_inline. */
1278 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1279 {
1280 if (dump_file)
1281 {
1282 indent_to (dump_file, depth);
1283 fprintf (dump_file,
1284 "Hit cycle in %s, switching to always inline only.\n",
1285 cgraph_node_name (callee));
1286 }
1287 mode = INLINE_ALWAYS_INLINE;
1288 }
1289 /* Otherwise it is time to give up. */
1290 else
1291 {
1292 if (dump_file)
1293 {
1294 indent_to (dump_file, depth);
1295 fprintf (dump_file,
1296 "Not inlining %s into %s to avoid cycle.\n",
1297 cgraph_node_name (callee),
1298 cgraph_node_name (e->caller));
1299 }
1300 e->inline_failed = (e->callee->local.disregard_inline_limits
1301 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1302 return false;
1303 }
1304 }
1305
1306 callee->aux = (void *)(size_t) mode;
1307 if (dump_file)
1308 {
1309 indent_to (dump_file, depth);
1310 fprintf (dump_file, " Inlining %s into %s.\n",
1311 cgraph_node_name (e->callee),
1312 cgraph_node_name (e->caller));
1313 }
1314 if (e->inline_failed)
1315 {
1316 cgraph_mark_inline (e);
1317
1318 /* In order to fully inline always_inline functions, we need to
1319 recurse here, since the inlined functions might not be processed by
1320 incremental inlining at all yet.
1321
1322 Also flattening needs to be done recursively. */
1323
1324 if (mode == INLINE_ALL || always_inline)
1325 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1326 }
1327 callee->aux = (void *)(size_t) callee_mode;
1328 return true;
1329 }
1330
1331 /* Decide on the inlining. We do so in the topological order to avoid
1332 expenses on updating data structures.
1333 DEPTH is depth of recursion, used only for debug output. */
1334
1335 static bool
1336 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1337 enum inlining_mode mode,
1338 int depth)
1339 {
1340 struct cgraph_edge *e;
1341 bool inlined = false;
1342 cgraph_inline_failed_t failed_reason;
1343 enum inlining_mode old_mode;
1344
1345 #ifdef ENABLE_CHECKING
1346 verify_cgraph_node (node);
1347 #endif
1348
1349 old_mode = (enum inlining_mode) (size_t)node->aux;
1350
1351 if (mode != INLINE_ALWAYS_INLINE
1352 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1353 {
1354 if (dump_file)
1355 {
1356 indent_to (dump_file, depth);
1357 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1358 }
1359 mode = INLINE_ALL;
1360 }
1361
1362 node->aux = (void *)(size_t) mode;
1363
1364 /* First of all look for always inline functions. */
1365 for (e = node->callees; e; e = e->next_callee)
1366 {
1367 if (!e->callee->local.disregard_inline_limits
1368 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1369 continue;
1370 if (gimple_call_cannot_inline_p (e->call_stmt))
1371 continue;
1372 /* When the edge is already inlined, we just need to recurse into
1373 it in order to fully flatten the leaves. */
1374 if (!e->inline_failed && mode == INLINE_ALL)
1375 {
1376 inlined |= try_inline (e, mode, depth);
1377 continue;
1378 }
1379 if (dump_file)
1380 {
1381 indent_to (dump_file, depth);
1382 fprintf (dump_file,
1383 "Considering to always inline inline candidate %s.\n",
1384 cgraph_node_name (e->callee));
1385 }
1386 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1387 {
1388 if (dump_file)
1389 {
1390 indent_to (dump_file, depth);
1391 fprintf (dump_file, "Not inlining: recursive call.\n");
1392 }
1393 continue;
1394 }
1395 if (!tree_can_inline_p (node->decl, e->callee->decl))
1396 {
1397 gimple_call_set_cannot_inline (e->call_stmt, true);
1398 if (dump_file)
1399 {
1400 indent_to (dump_file, depth);
1401 fprintf (dump_file,
1402 "Not inlining: Target specific option mismatch.\n");
1403 }
1404 continue;
1405 }
1406 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1407 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1408 {
1409 if (dump_file)
1410 {
1411 indent_to (dump_file, depth);
1412 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1413 }
1414 continue;
1415 }
1416 if (!e->callee->analyzed && !e->callee->inline_decl)
1417 {
1418 if (dump_file)
1419 {
1420 indent_to (dump_file, depth);
1421 fprintf (dump_file,
1422 "Not inlining: Function body no longer available.\n");
1423 }
1424 continue;
1425 }
1426 inlined |= try_inline (e, mode, depth);
1427 }
1428
1429 /* Now do the automatic inlining. */
1430 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE)
1431 for (e = node->callees; e; e = e->next_callee)
1432 {
1433 if (!e->callee->local.inlinable
1434 || !e->inline_failed
1435 || e->callee->local.disregard_inline_limits)
1436 continue;
1437 if (dump_file)
1438 fprintf (dump_file, "Considering inline candidate %s.\n",
1439 cgraph_node_name (e->callee));
1440 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1441 {
1442 if (dump_file)
1443 {
1444 indent_to (dump_file, depth);
1445 fprintf (dump_file, "Not inlining: recursive call.\n");
1446 }
1447 continue;
1448 }
1449 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1450 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1451 {
1452 if (dump_file)
1453 {
1454 indent_to (dump_file, depth);
1455 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1456 }
1457 continue;
1458 }
1459 /* When the function body would grow and inlining the function won't
1460 eliminate the need for offline copy of the function, don't inline.
1461 */
1462 if ((mode == INLINE_SIZE
1463 || (!flag_inline_functions
1464 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1465 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1466 > e->caller->global.insns)
1467 && cgraph_estimate_growth (e->callee) > 0)
1468 {
1469 if (dump_file)
1470 {
1471 indent_to (dump_file, depth);
1472 fprintf (dump_file,
1473 "Not inlining: code size would grow by %i insns.\n",
1474 cgraph_estimate_size_after_inlining (1, e->caller,
1475 e->callee)
1476 - e->caller->global.insns);
1477 }
1478 continue;
1479 }
1480 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1481 false)
1482 || gimple_call_cannot_inline_p (e->call_stmt))
1483 {
1484 if (dump_file)
1485 {
1486 indent_to (dump_file, depth);
1487 fprintf (dump_file, "Not inlining: %s.\n",
1488 cgraph_inline_failed_string (e->inline_failed));
1489 }
1490 continue;
1491 }
1492 if (!e->callee->analyzed && !e->callee->inline_decl)
1493 {
1494 if (dump_file)
1495 {
1496 indent_to (dump_file, depth);
1497 fprintf (dump_file,
1498 "Not inlining: Function body no longer available.\n");
1499 }
1500 continue;
1501 }
1502 if (!tree_can_inline_p (node->decl, e->callee->decl))
1503 {
1504 gimple_call_set_cannot_inline (e->call_stmt, true);
1505 if (dump_file)
1506 {
1507 indent_to (dump_file, depth);
1508 fprintf (dump_file,
1509 "Not inlining: Target specific option mismatch.\n");
1510 }
1511 continue;
1512 }
1513 if (cgraph_default_inline_p (e->callee, &failed_reason))
1514 inlined |= try_inline (e, mode, depth);
1515 }
1516 node->aux = (void *)(size_t) old_mode;
1517 return inlined;
1518 }
1519
1520 /* Because inlining might remove no-longer reachable nodes, we need to
1521 keep the array visible to garbage collector to avoid reading collected
1522 out nodes. */
1523 static int nnodes;
1524 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1525
1526 /* Do inlining of small functions. Doing so early helps profiling and other
1527 passes to be somewhat more effective and avoids some code duplication in
1528 later real inlining pass for testcases with very many function calls. */
1529 static unsigned int
1530 cgraph_early_inlining (void)
1531 {
1532 struct cgraph_node *node = cgraph_node (current_function_decl);
1533 unsigned int todo = 0;
1534
1535 if (sorrycount || errorcount)
1536 return 0;
1537 if (cgraph_decide_inlining_incrementally (node, INLINE_SIZE, 0))
1538 {
1539 timevar_push (TV_INTEGRATION);
1540 todo = optimize_inline_calls (current_function_decl);
1541 timevar_pop (TV_INTEGRATION);
1542 }
1543 cfun->always_inline_functions_inlined = true;
1544 return todo;
1545 }
1546
1547 /* When inlining shall be performed. */
1548 static bool
1549 cgraph_gate_early_inlining (void)
1550 {
1551 return flag_early_inlining;
1552 }
1553
1554 struct gimple_opt_pass pass_early_inline =
1555 {
1556 {
1557 GIMPLE_PASS,
1558 "einline", /* name */
1559 cgraph_gate_early_inlining, /* gate */
1560 cgraph_early_inlining, /* execute */
1561 NULL, /* sub */
1562 NULL, /* next */
1563 0, /* static_pass_number */
1564 TV_INLINE_HEURISTICS, /* tv_id */
1565 0, /* properties_required */
1566 0, /* properties_provided */
1567 0, /* properties_destroyed */
1568 0, /* todo_flags_start */
1569 TODO_dump_func /* todo_flags_finish */
1570 }
1571 };
1572
1573 /* When inlining shall be performed. */
1574 static bool
1575 cgraph_gate_ipa_early_inlining (void)
1576 {
1577 return (flag_early_inlining
1578 && (flag_branch_probabilities || flag_test_coverage
1579 || profile_arc_flag));
1580 }
1581
1582 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1583 before tree profiling so we have stand alone IPA pass for doing so. */
1584 struct simple_ipa_opt_pass pass_ipa_early_inline =
1585 {
1586 {
1587 SIMPLE_IPA_PASS,
1588 "einline_ipa", /* name */
1589 cgraph_gate_ipa_early_inlining, /* gate */
1590 NULL, /* execute */
1591 NULL, /* sub */
1592 NULL, /* next */
1593 0, /* static_pass_number */
1594 TV_INLINE_HEURISTICS, /* tv_id */
1595 0, /* properties_required */
1596 0, /* properties_provided */
1597 0, /* properties_destroyed */
1598 0, /* todo_flags_start */
1599 TODO_dump_cgraph /* todo_flags_finish */
1600 }
1601 };
1602
1603 /* Compute parameters of functions used by inliner. */
1604 unsigned int
1605 compute_inline_parameters (struct cgraph_node *node)
1606 {
1607 HOST_WIDE_INT self_stack_size;
1608
1609 gcc_assert (!node->global.inlined_to);
1610
1611 /* Estimate the stack size for the function. But not at -O0
1612 because estimated_stack_frame_size is a quadratic problem. */
1613 self_stack_size = optimize ? estimated_stack_frame_size () : 0;
1614 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1615 node->global.estimated_stack_size = self_stack_size;
1616 node->global.stack_frame_offset = 0;
1617
1618 /* Can this function be inlined at all? */
1619 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1620
1621 /* Estimate the number of instructions for this function.
1622 ??? At -O0 we don't use this information except for the dumps, and
1623 even then only for always_inline functions. But disabling this
1624 causes ICEs in the inline heuristics... */
1625 inline_summary (node)->self_insns
1626 = estimate_num_insns_fn (current_function_decl, &eni_inlining_weights);
1627 if (node->local.inlinable && !node->local.disregard_inline_limits)
1628 node->local.disregard_inline_limits
1629 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
1630
1631 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1632 node->global.insns = inline_summary (node)->self_insns;
1633 return 0;
1634 }
1635
1636
1637 /* Compute parameters of functions used by inliner using
1638 current_function_decl. */
1639 static unsigned int
1640 compute_inline_parameters_for_current (void)
1641 {
1642 compute_inline_parameters (cgraph_node (current_function_decl));
1643 return 0;
1644 }
1645
1646 struct gimple_opt_pass pass_inline_parameters =
1647 {
1648 {
1649 GIMPLE_PASS,
1650 NULL, /* name */
1651 NULL, /* gate */
1652 compute_inline_parameters_for_current,/* execute */
1653 NULL, /* sub */
1654 NULL, /* next */
1655 0, /* static_pass_number */
1656 TV_INLINE_HEURISTICS, /* tv_id */
1657 0, /* properties_required */
1658 0, /* properties_provided */
1659 0, /* properties_destroyed */
1660 0, /* todo_flags_start */
1661 0 /* todo_flags_finish */
1662 }
1663 };
1664
1665 /* This function performs intraprocedural analyzis in NODE that is required to
1666 inline indirect calls. */
1667 static void
1668 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
1669 {
1670 struct cgraph_edge *cs;
1671
1672 if (!flag_ipa_cp)
1673 {
1674 ipa_initialize_node_params (node);
1675 ipa_detect_param_modifications (node);
1676 }
1677 ipa_analyze_params_uses (node);
1678
1679 if (!flag_ipa_cp)
1680 for (cs = node->callees; cs; cs = cs->next_callee)
1681 {
1682 ipa_count_arguments (cs);
1683 ipa_compute_jump_functions (cs);
1684 }
1685
1686 if (dump_file)
1687 {
1688 ipa_print_node_params (dump_file, node);
1689 ipa_print_node_jump_functions (dump_file, node);
1690 }
1691 }
1692
1693 /* Note function body size. */
1694 static void
1695 analyze_function (struct cgraph_node *node)
1696 {
1697 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1698 current_function_decl = node->decl;
1699
1700 compute_inline_parameters (node);
1701 if (flag_indirect_inlining)
1702 inline_indirect_intraprocedural_analysis (node);
1703
1704 current_function_decl = NULL;
1705 pop_cfun ();
1706 }
1707
1708 /* Called when new function is inserted to callgraph late. */
1709 static void
1710 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
1711 {
1712 analyze_function (node);
1713 }
1714
1715 /* Note function body size. */
1716 static void
1717 inline_generate_summary (void)
1718 {
1719 struct cgraph_node *node;
1720
1721 function_insertion_hook_holder =
1722 cgraph_add_function_insertion_hook (&add_new_function, NULL);
1723
1724 if (flag_indirect_inlining)
1725 {
1726 ipa_register_cgraph_hooks ();
1727 ipa_check_create_node_params ();
1728 ipa_check_create_edge_args ();
1729 }
1730
1731 for (node = cgraph_nodes; node; node = node->next)
1732 if (node->analyzed)
1733 analyze_function (node);
1734
1735 return;
1736 }
1737
1738 /* Apply inline plan to function. */
1739 static unsigned int
1740 inline_transform (struct cgraph_node *node)
1741 {
1742 unsigned int todo = 0;
1743 struct cgraph_edge *e;
1744
1745 /* We might need the body of this function so that we can expand
1746 it inline somewhere else. */
1747 if (cgraph_preserve_function_body_p (node->decl))
1748 save_inline_function_body (node);
1749
1750 for (e = node->callees; e; e = e->next_callee)
1751 if (!e->inline_failed || warn_inline)
1752 break;
1753
1754 if (e)
1755 {
1756 timevar_push (TV_INTEGRATION);
1757 todo = optimize_inline_calls (current_function_decl);
1758 timevar_pop (TV_INTEGRATION);
1759 }
1760 cfun->always_inline_functions_inlined = true;
1761 cfun->after_inlining = true;
1762 return todo | execute_fixup_cfg ();
1763 }
1764
1765 struct ipa_opt_pass pass_ipa_inline =
1766 {
1767 {
1768 IPA_PASS,
1769 "inline", /* name */
1770 NULL, /* gate */
1771 cgraph_decide_inlining, /* execute */
1772 NULL, /* sub */
1773 NULL, /* next */
1774 0, /* static_pass_number */
1775 TV_INLINE_HEURISTICS, /* tv_id */
1776 0, /* properties_required */
1777 0, /* properties_provided */
1778 0, /* properties_destroyed */
1779 TODO_remove_functions, /* todo_flags_finish */
1780 TODO_dump_cgraph | TODO_dump_func
1781 | TODO_remove_functions /* todo_flags_finish */
1782 },
1783 inline_generate_summary, /* generate_summary */
1784 NULL, /* write_summary */
1785 NULL, /* read_summary */
1786 NULL, /* function_read_summary */
1787 0, /* TODOs */
1788 inline_transform, /* function_transform */
1789 NULL, /* variable_transform */
1790 };
1791
1792
1793 #include "gt-ipa-inline.h"