cgraph.c (cgraph_clone_node): Add redirect_callers parameter.
[gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Inlining decision heuristics
22
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
26 callgraph.
27
28 There are three major parts of this file:
29
30 cgraph_mark_inline implementation
31
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
34 statistics)
35
36 inlining heuristics limits
37
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
40 growth and so on).
41
42 inlining heuristics
43
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
46
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
52
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
56
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
60
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used by early inliner.
64
65 The inliner itself is split into several passes:
66
67 pass_inline_parameters
68
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
72
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
76 on the function body.
77
78 pass_early_inlining
79
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
83 whole unit.
84
85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
91 cheaply.
92
93 pass_ipa_early_inlining
94
95 With profiling, the early inlining is also necessary to reduce
96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
100 inlining.
101
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
104
105 pass_ipa_inline
106
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
112
113 pass_apply_inline
114
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
118 */
119
120 #include "config.h"
121 #include "system.h"
122 #include "coretypes.h"
123 #include "tm.h"
124 #include "tree.h"
125 #include "tree-inline.h"
126 #include "langhooks.h"
127 #include "flags.h"
128 #include "cgraph.h"
129 #include "diagnostic.h"
130 #include "timevar.h"
131 #include "params.h"
132 #include "fibheap.h"
133 #include "intl.h"
134 #include "tree-pass.h"
135 #include "hashtab.h"
136 #include "coverage.h"
137 #include "ggc.h"
138 #include "tree-flow.h"
139 #include "rtl.h"
140 #include "ipa-prop.h"
141 #include "except.h"
142
143 #define MAX_TIME 1000000000
144
145 /* Mode incremental inliner operate on:
146
147 In ALWAYS_INLINE only functions marked
148 always_inline are inlined. This mode is used after detecting cycle during
149 flattening.
150
151 In SIZE mode, only functions that reduce function body size after inlining
152 are inlined, this is used during early inlining.
153
154 in ALL mode, everything is inlined. This is used during flattening. */
155 enum inlining_mode {
156 INLINE_NONE = 0,
157 INLINE_ALWAYS_INLINE,
158 INLINE_SIZE_NORECURSIVE,
159 INLINE_SIZE,
160 INLINE_ALL
161 };
162 static bool
163 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
164 int);
165
166
167 /* Statistics we collect about inlining algorithm. */
168 static int ncalls_inlined;
169 static int nfunctions_inlined;
170 static int overall_size;
171 static gcov_type max_count, max_benefit;
172
173 /* Holders of ipa cgraph hooks: */
174 static struct cgraph_node_hook_list *function_insertion_hook_holder;
175
176 static inline struct inline_summary *
177 inline_summary (struct cgraph_node *node)
178 {
179 return &node->local.inline_summary;
180 }
181
182 /* Estimate self time of the function after inlining WHAT into TO. */
183
184 static int
185 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
186 struct cgraph_node *what)
187 {
188 gcov_type time = (((gcov_type)what->global.time
189 - inline_summary (what)->time_inlining_benefit)
190 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
191 + to->global.time;
192 if (time < 0)
193 time = 0;
194 if (time > MAX_TIME)
195 time = MAX_TIME;
196 return time;
197 }
198
199 /* Estimate self time of the function after inlining WHAT into TO. */
200
201 static int
202 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
203 struct cgraph_node *what)
204 {
205 int size = (what->global.size - inline_summary (what)->size_inlining_benefit) * times + to->global.size;
206 gcc_assert (size >= 0);
207 return size;
208 }
209
210 /* E is expected to be an edge being inlined. Clone destination node of
211 the edge and redirect it to the new clone.
212 DUPLICATE is used for bookkeeping on whether we are actually creating new
213 clones or re-using node originally representing out-of-line function call.
214 */
215 void
216 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
217 bool update_original)
218 {
219 HOST_WIDE_INT peak;
220
221 if (duplicate)
222 {
223 /* We may eliminate the need for out-of-line copy to be output.
224 In that case just go ahead and re-use it. */
225 if (!e->callee->callers->next_caller
226 && !e->callee->needed
227 && !cgraph_new_nodes)
228 {
229 gcc_assert (!e->callee->global.inlined_to);
230 if (e->callee->analyzed)
231 {
232 overall_size -= e->callee->global.size;
233 nfunctions_inlined++;
234 }
235 duplicate = false;
236 }
237 else
238 {
239 struct cgraph_node *n;
240 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
241 update_original, NULL);
242 cgraph_redirect_edge_callee (e, n);
243 }
244 }
245
246 if (e->caller->global.inlined_to)
247 e->callee->global.inlined_to = e->caller->global.inlined_to;
248 else
249 e->callee->global.inlined_to = e->caller;
250 e->callee->global.stack_frame_offset
251 = e->caller->global.stack_frame_offset
252 + inline_summary (e->caller)->estimated_self_stack_size;
253 peak = e->callee->global.stack_frame_offset
254 + inline_summary (e->callee)->estimated_self_stack_size;
255 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
256 e->callee->global.inlined_to->global.estimated_stack_size = peak;
257
258 /* Recursively clone all bodies. */
259 for (e = e->callee->callees; e; e = e->next_callee)
260 if (!e->inline_failed)
261 cgraph_clone_inlined_nodes (e, duplicate, update_original);
262 }
263
264 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
265 specify whether profile of original function should be updated. If any new
266 indirect edges are discovered in the process, add them to NEW_EDGES, unless
267 it is NULL. Return true iff any new callgraph edges were discovered as a
268 result of inlining. */
269
270 static bool
271 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
272 VEC (cgraph_edge_p, heap) **new_edges)
273 {
274 int old_size = 0, new_size = 0;
275 struct cgraph_node *to = NULL, *what;
276 struct cgraph_edge *curr = e;
277 int freq;
278 bool duplicate = false;
279 int orig_size = e->callee->global.size;
280
281 gcc_assert (e->inline_failed);
282 e->inline_failed = CIF_OK;
283
284 if (!e->callee->global.inlined)
285 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
286 e->callee->global.inlined = true;
287
288 if (e->callee->callers->next_caller
289 || e->callee->needed)
290 duplicate = true;
291 cgraph_clone_inlined_nodes (e, true, update_original);
292
293 what = e->callee;
294
295 freq = e->frequency;
296 /* Now update size of caller and all functions caller is inlined into. */
297 for (;e && !e->inline_failed; e = e->caller->callers)
298 {
299 to = e->caller;
300 old_size = e->caller->global.size;
301 new_size = cgraph_estimate_size_after_inlining (1, to, what);
302 to->global.size = new_size;
303 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
304 }
305 gcc_assert (what->global.inlined_to == to);
306 if (new_size > old_size)
307 overall_size += new_size - old_size;
308 if (!duplicate)
309 overall_size -= orig_size;
310 ncalls_inlined++;
311
312 if (flag_indirect_inlining)
313 return ipa_propagate_indirect_call_infos (curr, new_edges);
314 else
315 return false;
316 }
317
318 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
319 Return following unredirected edge in the list of callers
320 of EDGE->CALLEE */
321
322 static struct cgraph_edge *
323 cgraph_mark_inline (struct cgraph_edge *edge)
324 {
325 struct cgraph_node *to = edge->caller;
326 struct cgraph_node *what = edge->callee;
327 struct cgraph_edge *e, *next;
328
329 gcc_assert (!gimple_call_cannot_inline_p (edge->call_stmt));
330 /* Look for all calls, mark them inline and clone recursively
331 all inlined functions. */
332 for (e = what->callers; e; e = next)
333 {
334 next = e->next_caller;
335 if (e->caller == to && e->inline_failed)
336 {
337 cgraph_mark_inline_edge (e, true, NULL);
338 if (e == edge)
339 edge = next;
340 }
341 }
342
343 return edge;
344 }
345
346 /* Estimate the growth caused by inlining NODE into all callees. */
347
348 static int
349 cgraph_estimate_growth (struct cgraph_node *node)
350 {
351 int growth = 0;
352 struct cgraph_edge *e;
353 bool self_recursive = false;
354
355 if (node->global.estimated_growth != INT_MIN)
356 return node->global.estimated_growth;
357
358 for (e = node->callers; e; e = e->next_caller)
359 {
360 if (e->caller == node)
361 self_recursive = true;
362 if (e->inline_failed)
363 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
364 - e->caller->global.size);
365 }
366
367 /* ??? Wrong for non-trivially self recursive functions or cases where
368 we decide to not inline for different reasons, but it is not big deal
369 as in that case we will keep the body around, but we will also avoid
370 some inlining. */
371 if (!node->needed && !DECL_EXTERNAL (node->decl) && !self_recursive)
372 growth -= node->global.size;
373
374 node->global.estimated_growth = growth;
375 return growth;
376 }
377
378 /* Return false when inlining WHAT into TO is not good idea
379 as it would cause too large growth of function bodies.
380 When ONE_ONLY is true, assume that only one call site is going
381 to be inlined, otherwise figure out how many call sites in
382 TO calls WHAT and verify that all can be inlined.
383 */
384
385 static bool
386 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
387 cgraph_inline_failed_t *reason, bool one_only)
388 {
389 int times = 0;
390 struct cgraph_edge *e;
391 int newsize;
392 int limit;
393 HOST_WIDE_INT stack_size_limit, inlined_stack;
394
395 if (one_only)
396 times = 1;
397 else
398 for (e = to->callees; e; e = e->next_callee)
399 if (e->callee == what)
400 times++;
401
402 if (to->global.inlined_to)
403 to = to->global.inlined_to;
404
405 /* When inlining large function body called once into small function,
406 take the inlined function as base for limiting the growth. */
407 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
408 limit = inline_summary (to)->self_size;
409 else
410 limit = inline_summary (what)->self_size;
411
412 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
413
414 /* Check the size after inlining against the function limits. But allow
415 the function to shrink if it went over the limits by forced inlining. */
416 newsize = cgraph_estimate_size_after_inlining (times, to, what);
417 if (newsize >= to->global.size
418 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
419 && newsize > limit)
420 {
421 if (reason)
422 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
423 return false;
424 }
425
426 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
427
428 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
429
430 inlined_stack = (to->global.stack_frame_offset
431 + inline_summary (to)->estimated_self_stack_size
432 + what->global.estimated_stack_size);
433 if (inlined_stack > stack_size_limit
434 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
435 {
436 if (reason)
437 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
438 return false;
439 }
440 return true;
441 }
442
443 /* Return true when function N is small enough to be inlined. */
444
445 static bool
446 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
447 {
448 tree decl = n->decl;
449
450 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
451 {
452 if (reason)
453 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
454 return false;
455 }
456
457 if (!n->analyzed)
458 {
459 if (reason)
460 *reason = CIF_BODY_NOT_AVAILABLE;
461 return false;
462 }
463
464 if (DECL_DECLARED_INLINE_P (decl))
465 {
466 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
467 {
468 if (reason)
469 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
470 return false;
471 }
472 }
473 else
474 {
475 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
476 {
477 if (reason)
478 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
479 return false;
480 }
481 }
482
483 return true;
484 }
485
486 /* Return true when inlining WHAT would create recursive inlining.
487 We call recursive inlining all cases where same function appears more than
488 once in the single recursion nest path in the inline graph. */
489
490 static bool
491 cgraph_recursive_inlining_p (struct cgraph_node *to,
492 struct cgraph_node *what,
493 cgraph_inline_failed_t *reason)
494 {
495 bool recursive;
496 if (to->global.inlined_to)
497 recursive = what->decl == to->global.inlined_to->decl;
498 else
499 recursive = what->decl == to->decl;
500 /* Marking recursive function inline has sane semantic and thus we should
501 not warn on it. */
502 if (recursive && reason)
503 *reason = (what->local.disregard_inline_limits
504 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
505 return recursive;
506 }
507
508 /* A cost model driving the inlining heuristics in a way so the edges with
509 smallest badness are inlined first. After each inlining is performed
510 the costs of all caller edges of nodes affected are recomputed so the
511 metrics may accurately depend on values such as number of inlinable callers
512 of the function or function body size. */
513
514 static int
515 cgraph_edge_badness (struct cgraph_edge *edge)
516 {
517 gcov_type badness;
518 int growth =
519 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
520
521 growth -= edge->caller->global.size;
522
523 /* Always prefer inlining saving code size. */
524 if (growth <= 0)
525 badness = INT_MIN - growth;
526
527 /* When profiling is available, base priorities -(#calls / growth).
528 So we optimize for overall number of "executed" inlined calls. */
529 else if (max_count)
530 badness = ((int)((double)edge->count * INT_MIN / max_count / (max_benefit + 1))
531 * (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
532
533 /* When function local profile is available, base priorities on
534 growth / frequency, so we optimize for overall frequency of inlined
535 calls. This is not too accurate since while the call might be frequent
536 within function, the function itself is infrequent.
537
538 Other objective to optimize for is number of different calls inlined.
539 We add the estimated growth after inlining all functions to bias the
540 priorities slightly in this direction (so fewer times called functions
541 of the same size gets priority). */
542 else if (flag_guess_branch_prob)
543 {
544 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
545 badness = growth * 10000;
546 div *= MIN (100 * inline_summary (edge->callee)->time_inlining_benefit
547 / (edge->callee->global.time + 1) + 1, 100);
548
549
550 /* Decrease badness if call is nested. */
551 /* Compress the range so we don't overflow. */
552 if (div > 10000)
553 div = 10000 + ceil_log2 (div) - 8;
554 if (div < 1)
555 div = 1;
556 if (badness > 0)
557 badness /= div;
558 badness += cgraph_estimate_growth (edge->callee);
559 if (badness > INT_MAX)
560 badness = INT_MAX;
561 }
562 /* When function local profile is not available or it does not give
563 useful information (ie frequency is zero), base the cost on
564 loop nest and overall size growth, so we optimize for overall number
565 of functions fully inlined in program. */
566 else
567 {
568 int nest = MIN (edge->loop_nest, 8);
569 badness = cgraph_estimate_growth (edge->callee) * 256;
570
571 /* Decrease badness if call is nested. */
572 if (badness > 0)
573 badness >>= nest;
574 else
575 {
576 badness <<= nest;
577 }
578 }
579 /* Make recursive inlining happen always after other inlining is done. */
580 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
581 return badness + 1;
582 else
583 return badness;
584 }
585
586 /* Recompute heap nodes for each of caller edge. */
587
588 static void
589 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
590 bitmap updated_nodes)
591 {
592 struct cgraph_edge *edge;
593 cgraph_inline_failed_t failed_reason;
594
595 if (!node->local.inlinable || node->local.disregard_inline_limits
596 || node->global.inlined_to)
597 return;
598 if (bitmap_bit_p (updated_nodes, node->uid))
599 return;
600 bitmap_set_bit (updated_nodes, node->uid);
601 node->global.estimated_growth = INT_MIN;
602
603 if (!node->local.inlinable)
604 return;
605 /* Prune out edges we won't inline into anymore. */
606 if (!cgraph_default_inline_p (node, &failed_reason))
607 {
608 for (edge = node->callers; edge; edge = edge->next_caller)
609 if (edge->aux)
610 {
611 fibheap_delete_node (heap, (fibnode_t) edge->aux);
612 edge->aux = NULL;
613 if (edge->inline_failed)
614 edge->inline_failed = failed_reason;
615 }
616 return;
617 }
618
619 for (edge = node->callers; edge; edge = edge->next_caller)
620 if (edge->inline_failed)
621 {
622 int badness = cgraph_edge_badness (edge);
623 if (edge->aux)
624 {
625 fibnode_t n = (fibnode_t) edge->aux;
626 gcc_assert (n->data == edge);
627 if (n->key == badness)
628 continue;
629
630 /* fibheap_replace_key only increase the keys. */
631 if (fibheap_replace_key (heap, n, badness))
632 continue;
633 fibheap_delete_node (heap, (fibnode_t) edge->aux);
634 }
635 edge->aux = fibheap_insert (heap, badness, edge);
636 }
637 }
638
639 /* Recompute heap nodes for each of caller edges of each of callees. */
640
641 static void
642 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
643 bitmap updated_nodes)
644 {
645 struct cgraph_edge *e;
646 node->global.estimated_growth = INT_MIN;
647
648 for (e = node->callees; e; e = e->next_callee)
649 if (e->inline_failed)
650 update_caller_keys (heap, e->callee, updated_nodes);
651 else if (!e->inline_failed)
652 update_callee_keys (heap, e->callee, updated_nodes);
653 }
654
655 /* Enqueue all recursive calls from NODE into priority queue depending on
656 how likely we want to recursively inline the call. */
657
658 static void
659 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
660 fibheap_t heap)
661 {
662 static int priority;
663 struct cgraph_edge *e;
664 for (e = where->callees; e; e = e->next_callee)
665 if (e->callee == node)
666 {
667 /* When profile feedback is available, prioritize by expected number
668 of calls. Without profile feedback we maintain simple queue
669 to order candidates via recursive depths. */
670 fibheap_insert (heap,
671 !max_count ? priority++
672 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
673 e);
674 }
675 for (e = where->callees; e; e = e->next_callee)
676 if (!e->inline_failed)
677 lookup_recursive_calls (node, e->callee, heap);
678 }
679
680 /* Decide on recursive inlining: in the case function has recursive calls,
681 inline until body size reaches given argument. If any new indirect edges
682 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
683 is NULL. */
684
685 static bool
686 cgraph_decide_recursive_inlining (struct cgraph_node *node,
687 VEC (cgraph_edge_p, heap) **new_edges)
688 {
689 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
690 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
691 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
692 fibheap_t heap;
693 struct cgraph_edge *e;
694 struct cgraph_node *master_clone, *next;
695 int depth = 0;
696 int n = 0;
697
698 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
699 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
700 return false;
701
702 if (DECL_DECLARED_INLINE_P (node->decl))
703 {
704 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
705 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
706 }
707
708 /* Make sure that function is small enough to be considered for inlining. */
709 if (!max_depth
710 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
711 return false;
712 heap = fibheap_new ();
713 lookup_recursive_calls (node, node, heap);
714 if (fibheap_empty (heap))
715 {
716 fibheap_delete (heap);
717 return false;
718 }
719
720 if (dump_file)
721 fprintf (dump_file,
722 " Performing recursive inlining on %s\n",
723 cgraph_node_name (node));
724
725 /* We need original clone to copy around. */
726 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1,
727 false, NULL);
728 master_clone->needed = true;
729 for (e = master_clone->callees; e; e = e->next_callee)
730 if (!e->inline_failed)
731 cgraph_clone_inlined_nodes (e, true, false);
732
733 /* Do the inlining and update list of recursive call during process. */
734 while (!fibheap_empty (heap)
735 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
736 <= limit))
737 {
738 struct cgraph_edge *curr
739 = (struct cgraph_edge *) fibheap_extract_min (heap);
740 struct cgraph_node *cnode;
741
742 depth = 1;
743 for (cnode = curr->caller;
744 cnode->global.inlined_to; cnode = cnode->callers->caller)
745 if (node->decl == curr->callee->decl)
746 depth++;
747 if (depth > max_depth)
748 {
749 if (dump_file)
750 fprintf (dump_file,
751 " maximal depth reached\n");
752 continue;
753 }
754
755 if (max_count)
756 {
757 if (!cgraph_maybe_hot_edge_p (curr))
758 {
759 if (dump_file)
760 fprintf (dump_file, " Not inlining cold call\n");
761 continue;
762 }
763 if (curr->count * 100 / node->count < probability)
764 {
765 if (dump_file)
766 fprintf (dump_file,
767 " Probability of edge is too small\n");
768 continue;
769 }
770 }
771
772 if (dump_file)
773 {
774 fprintf (dump_file,
775 " Inlining call of depth %i", depth);
776 if (node->count)
777 {
778 fprintf (dump_file, " called approx. %.2f times per call",
779 (double)curr->count / node->count);
780 }
781 fprintf (dump_file, "\n");
782 }
783 cgraph_redirect_edge_callee (curr, master_clone);
784 cgraph_mark_inline_edge (curr, false, new_edges);
785 lookup_recursive_calls (node, curr->callee, heap);
786 n++;
787 }
788 if (!fibheap_empty (heap) && dump_file)
789 fprintf (dump_file, " Recursive inlining growth limit met.\n");
790
791 fibheap_delete (heap);
792 if (dump_file)
793 fprintf (dump_file,
794 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
795 master_clone->global.size, node->global.size,
796 master_clone->global.time, node->global.time);
797
798 /* Remove master clone we used for inlining. We rely that clones inlined
799 into master clone gets queued just before master clone so we don't
800 need recursion. */
801 for (node = cgraph_nodes; node != master_clone;
802 node = next)
803 {
804 next = node->next;
805 if (node->global.inlined_to == master_clone)
806 cgraph_remove_node (node);
807 }
808 cgraph_remove_node (master_clone);
809 /* FIXME: Recursive inlining actually reduces number of calls of the
810 function. At this place we should probably walk the function and
811 inline clones and compensate the counts accordingly. This probably
812 doesn't matter much in practice. */
813 return n > 0;
814 }
815
816 /* Set inline_failed for all callers of given function to REASON. */
817
818 static void
819 cgraph_set_inline_failed (struct cgraph_node *node,
820 cgraph_inline_failed_t reason)
821 {
822 struct cgraph_edge *e;
823
824 if (dump_file)
825 fprintf (dump_file, "Inlining failed: %s\n",
826 cgraph_inline_failed_string (reason));
827 for (e = node->callers; e; e = e->next_caller)
828 if (e->inline_failed)
829 e->inline_failed = reason;
830 }
831
832 /* Given whole compilation unit estimate of INSNS, compute how large we can
833 allow the unit to grow. */
834 static int
835 compute_max_insns (int insns)
836 {
837 int max_insns = insns;
838 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
839 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
840
841 return ((HOST_WIDEST_INT) max_insns
842 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
843 }
844
845 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
846 static void
847 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
848 {
849 while (VEC_length (cgraph_edge_p, new_edges) > 0)
850 {
851 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
852
853 gcc_assert (!edge->aux);
854 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
855 }
856 }
857
858
859 /* We use greedy algorithm for inlining of small functions:
860 All inline candidates are put into prioritized heap based on estimated
861 growth of the overall number of instructions and then update the estimates.
862
863 INLINED and INLINED_CALEES are just pointers to arrays large enough
864 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
865
866 static void
867 cgraph_decide_inlining_of_small_functions (void)
868 {
869 struct cgraph_node *node;
870 struct cgraph_edge *edge;
871 cgraph_inline_failed_t failed_reason;
872 fibheap_t heap = fibheap_new ();
873 bitmap updated_nodes = BITMAP_ALLOC (NULL);
874 int min_size, max_size;
875 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
876
877 if (flag_indirect_inlining)
878 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
879
880 if (dump_file)
881 fprintf (dump_file, "\nDeciding on smaller functions:\n");
882
883 /* Put all inline candidates into the heap. */
884
885 for (node = cgraph_nodes; node; node = node->next)
886 {
887 if (!node->local.inlinable || !node->callers
888 || node->local.disregard_inline_limits)
889 continue;
890 if (dump_file)
891 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
892
893 node->global.estimated_growth = INT_MIN;
894 if (!cgraph_default_inline_p (node, &failed_reason))
895 {
896 cgraph_set_inline_failed (node, failed_reason);
897 continue;
898 }
899
900 for (edge = node->callers; edge; edge = edge->next_caller)
901 if (edge->inline_failed)
902 {
903 gcc_assert (!edge->aux);
904 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
905 }
906 }
907
908 max_size = compute_max_insns (overall_size);
909 min_size = overall_size;
910
911 while (overall_size <= max_size
912 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
913 {
914 int old_size = overall_size;
915 struct cgraph_node *where;
916 int growth =
917 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
918 cgraph_inline_failed_t not_good = CIF_OK;
919
920 growth -= edge->caller->global.size;
921
922 if (dump_file)
923 {
924 fprintf (dump_file,
925 "\nConsidering %s with %i size\n",
926 cgraph_node_name (edge->callee),
927 edge->callee->global.size);
928 fprintf (dump_file,
929 " to be inlined into %s in %s:%i\n"
930 " Estimated growth after inlined into all callees is %+i insns.\n"
931 " Estimated badness is %i, frequency %.2f.\n",
932 cgraph_node_name (edge->caller),
933 gimple_filename ((const_gimple) edge->call_stmt),
934 gimple_lineno ((const_gimple) edge->call_stmt),
935 cgraph_estimate_growth (edge->callee),
936 cgraph_edge_badness (edge),
937 edge->frequency / (double)CGRAPH_FREQ_BASE);
938 if (edge->count)
939 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
940 }
941 gcc_assert (edge->aux);
942 edge->aux = NULL;
943 if (!edge->inline_failed)
944 continue;
945
946 /* When not having profile info ready we don't weight by any way the
947 position of call in procedure itself. This means if call of
948 function A from function B seems profitable to inline, the recursive
949 call of function A in inline copy of A in B will look profitable too
950 and we end up inlining until reaching maximal function growth. This
951 is not good idea so prohibit the recursive inlining.
952
953 ??? When the frequencies are taken into account we might not need this
954 restriction.
955
956 We need to be cureful here, in some testcases, e.g. directivec.c in
957 libcpp, we can estimate self recursive function to have negative growth
958 for inlining completely.
959 */
960 if (!edge->count)
961 {
962 where = edge->caller;
963 while (where->global.inlined_to)
964 {
965 if (where->decl == edge->callee->decl)
966 break;
967 where = where->callers->caller;
968 }
969 if (where->global.inlined_to)
970 {
971 edge->inline_failed
972 = (edge->callee->local.disregard_inline_limits
973 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
974 if (dump_file)
975 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
976 continue;
977 }
978 }
979
980 if (!cgraph_maybe_hot_edge_p (edge))
981 not_good = CIF_UNLIKELY_CALL;
982 if (!flag_inline_functions
983 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
984 not_good = CIF_NOT_DECLARED_INLINED;
985 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
986 not_good = CIF_OPTIMIZING_FOR_SIZE;
987 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
988 {
989 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
990 &edge->inline_failed))
991 {
992 edge->inline_failed = not_good;
993 if (dump_file)
994 fprintf (dump_file, " inline_failed:%s.\n",
995 cgraph_inline_failed_string (edge->inline_failed));
996 }
997 continue;
998 }
999 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1000 {
1001 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1002 &edge->inline_failed))
1003 {
1004 if (dump_file)
1005 fprintf (dump_file, " inline_failed:%s.\n",
1006 cgraph_inline_failed_string (edge->inline_failed));
1007 }
1008 continue;
1009 }
1010 if (!tree_can_inline_p (edge))
1011 {
1012 if (dump_file)
1013 fprintf (dump_file, " inline_failed:%s.\n",
1014 cgraph_inline_failed_string (edge->inline_failed));
1015 continue;
1016 }
1017 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1018 &edge->inline_failed))
1019 {
1020 where = edge->caller;
1021 if (where->global.inlined_to)
1022 where = where->global.inlined_to;
1023 if (!cgraph_decide_recursive_inlining (where,
1024 flag_indirect_inlining
1025 ? &new_indirect_edges : NULL))
1026 continue;
1027 if (flag_indirect_inlining)
1028 add_new_edges_to_heap (heap, new_indirect_edges);
1029 update_callee_keys (heap, where, updated_nodes);
1030 }
1031 else
1032 {
1033 struct cgraph_node *callee;
1034 if (gimple_call_cannot_inline_p (edge->call_stmt)
1035 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1036 &edge->inline_failed, true))
1037 {
1038 if (dump_file)
1039 fprintf (dump_file, " Not inlining into %s:%s.\n",
1040 cgraph_node_name (edge->caller),
1041 cgraph_inline_failed_string (edge->inline_failed));
1042 continue;
1043 }
1044 callee = edge->callee;
1045 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1046 if (flag_indirect_inlining)
1047 add_new_edges_to_heap (heap, new_indirect_edges);
1048
1049 update_callee_keys (heap, callee, updated_nodes);
1050 }
1051 where = edge->caller;
1052 if (where->global.inlined_to)
1053 where = where->global.inlined_to;
1054
1055 /* Our profitability metric can depend on local properties
1056 such as number of inlinable calls and size of the function body.
1057 After inlining these properties might change for the function we
1058 inlined into (since it's body size changed) and for the functions
1059 called by function we inlined (since number of it inlinable callers
1060 might change). */
1061 update_caller_keys (heap, where, updated_nodes);
1062 bitmap_clear (updated_nodes);
1063
1064 if (dump_file)
1065 {
1066 fprintf (dump_file,
1067 " Inlined into %s which now has size %i and self time %i,"
1068 "net change of %+i.\n",
1069 cgraph_node_name (edge->caller),
1070 edge->caller->global.time,
1071 edge->caller->global.size,
1072 overall_size - old_size);
1073 }
1074 if (min_size > overall_size)
1075 {
1076 min_size = overall_size;
1077 max_size = compute_max_insns (min_size);
1078
1079 if (dump_file)
1080 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1081 }
1082 }
1083 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
1084 {
1085 gcc_assert (edge->aux);
1086 edge->aux = NULL;
1087 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1088 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1089 &edge->inline_failed))
1090 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1091 }
1092
1093 if (new_indirect_edges)
1094 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1095 fibheap_delete (heap);
1096 BITMAP_FREE (updated_nodes);
1097 }
1098
1099 /* Decide on the inlining. We do so in the topological order to avoid
1100 expenses on updating data structures. */
1101
1102 static unsigned int
1103 cgraph_decide_inlining (void)
1104 {
1105 struct cgraph_node *node;
1106 int nnodes;
1107 struct cgraph_node **order =
1108 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1109 int old_size = 0;
1110 int i;
1111 bool redo_always_inline = true;
1112 int initial_size = 0;
1113
1114 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1115
1116 max_count = 0;
1117 max_benefit = 0;
1118 for (node = cgraph_nodes; node; node = node->next)
1119 if (node->analyzed)
1120 {
1121 struct cgraph_edge *e;
1122
1123 gcc_assert (inline_summary (node)->self_size == node->global.size);
1124 gcc_assert (node->needed || node->reachable);
1125 initial_size += node->global.size;
1126 for (e = node->callees; e; e = e->next_callee)
1127 if (max_count < e->count)
1128 max_count = e->count;
1129 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1130 max_benefit = inline_summary (node)->time_inlining_benefit;
1131 }
1132 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
1133 overall_size = initial_size;
1134
1135 nnodes = cgraph_postorder (order);
1136
1137 if (dump_file)
1138 fprintf (dump_file,
1139 "\nDeciding on inlining. Starting with size %i.\n",
1140 initial_size);
1141
1142 for (node = cgraph_nodes; node; node = node->next)
1143 node->aux = 0;
1144
1145 if (dump_file)
1146 fprintf (dump_file, "\nInlining always_inline functions:\n");
1147
1148 /* In the first pass mark all always_inline edges. Do this with a priority
1149 so none of our later choices will make this impossible. */
1150 while (redo_always_inline)
1151 {
1152 redo_always_inline = false;
1153 for (i = nnodes - 1; i >= 0; i--)
1154 {
1155 struct cgraph_edge *e, *next;
1156
1157 node = order[i];
1158
1159 /* Handle nodes to be flattened, but don't update overall unit
1160 size. */
1161 if (lookup_attribute ("flatten",
1162 DECL_ATTRIBUTES (node->decl)) != NULL)
1163 {
1164 if (dump_file)
1165 fprintf (dump_file,
1166 "Flattening %s\n", cgraph_node_name (node));
1167 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1168 }
1169
1170 if (!node->local.disregard_inline_limits)
1171 continue;
1172 if (dump_file)
1173 fprintf (dump_file,
1174 "\nConsidering %s size:%i (always inline)\n",
1175 cgraph_node_name (node), node->global.size);
1176 old_size = overall_size;
1177 for (e = node->callers; e; e = next)
1178 {
1179 next = e->next_caller;
1180 if (!e->inline_failed
1181 || gimple_call_cannot_inline_p (e->call_stmt))
1182 continue;
1183 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1184 &e->inline_failed))
1185 continue;
1186 if (!tree_can_inline_p (e))
1187 continue;
1188 if (cgraph_mark_inline_edge (e, true, NULL))
1189 redo_always_inline = true;
1190 if (dump_file)
1191 fprintf (dump_file,
1192 " Inlined into %s which now has size %i.\n",
1193 cgraph_node_name (e->caller),
1194 e->caller->global.size);
1195 }
1196 /* Inlining self recursive function might introduce new calls to
1197 themselves we didn't see in the loop above. Fill in the proper
1198 reason why inline failed. */
1199 for (e = node->callers; e; e = e->next_caller)
1200 if (e->inline_failed)
1201 e->inline_failed = CIF_RECURSIVE_INLINING;
1202 if (dump_file)
1203 fprintf (dump_file,
1204 " Inlined for a net change of %+i size.\n",
1205 overall_size - old_size);
1206 }
1207 }
1208
1209 cgraph_decide_inlining_of_small_functions ();
1210
1211 if (flag_inline_functions_called_once)
1212 {
1213 if (dump_file)
1214 fprintf (dump_file, "\nDeciding on functions called once:\n");
1215
1216 /* And finally decide what functions are called once. */
1217 for (i = nnodes - 1; i >= 0; i--)
1218 {
1219 node = order[i];
1220
1221 if (node->callers
1222 && !node->callers->next_caller
1223 && !node->needed
1224 && node->local.inlinable
1225 && node->callers->inline_failed
1226 && node->callers->caller != node
1227 && node->callers->caller->global.inlined_to != node
1228 && !gimple_call_cannot_inline_p (node->callers->call_stmt)
1229 && !DECL_EXTERNAL (node->decl)
1230 && !DECL_COMDAT (node->decl))
1231 {
1232 old_size = overall_size;
1233 if (dump_file)
1234 {
1235 fprintf (dump_file,
1236 "\nConsidering %s size %i.\n",
1237 cgraph_node_name (node), node->global.size);
1238 fprintf (dump_file,
1239 " Called once from %s %i insns.\n",
1240 cgraph_node_name (node->callers->caller),
1241 node->callers->caller->global.size);
1242 }
1243
1244 if (cgraph_check_inline_limits (node->callers->caller, node,
1245 NULL, false))
1246 {
1247 cgraph_mark_inline (node->callers);
1248 if (dump_file)
1249 fprintf (dump_file,
1250 " Inlined into %s which now has %i size"
1251 " for a net change of %+i size.\n",
1252 cgraph_node_name (node->callers->caller),
1253 node->callers->caller->global.size,
1254 overall_size - old_size);
1255 }
1256 else
1257 {
1258 if (dump_file)
1259 fprintf (dump_file,
1260 " Inline limit reached, not inlined.\n");
1261 }
1262 }
1263 }
1264 }
1265
1266 /* Free ipa-prop structures if they are no longer needed. */
1267 if (flag_indirect_inlining)
1268 free_all_ipa_structures_after_iinln ();
1269
1270 if (dump_file)
1271 fprintf (dump_file,
1272 "\nInlined %i calls, eliminated %i functions, "
1273 "size %i turned to %i size.\n\n",
1274 ncalls_inlined, nfunctions_inlined, initial_size,
1275 overall_size);
1276 free (order);
1277 return 0;
1278 }
1279
1280 /* Try to inline edge E from incremental inliner. MODE specifies mode
1281 of inliner.
1282
1283 We are detecting cycles by storing mode of inliner into cgraph_node last
1284 time we visited it in the recursion. In general when mode is set, we have
1285 recursive inlining, but as an special case, we want to try harder inline
1286 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1287 flatten, b being always inline. Flattening 'a' will collapse
1288 a->b->c before hitting cycle. To accommodate always inline, we however
1289 need to inline a->b->c->b.
1290
1291 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1292 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1293 static bool
1294 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1295 {
1296 struct cgraph_node *callee = e->callee;
1297 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
1298 bool always_inline = e->callee->local.disregard_inline_limits;
1299 bool inlined = false;
1300
1301 /* We've hit cycle? */
1302 if (callee_mode)
1303 {
1304 /* It is first time we see it and we are not in ALWAY_INLINE only
1305 mode yet. and the function in question is always_inline. */
1306 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1307 {
1308 if (dump_file)
1309 {
1310 indent_to (dump_file, depth);
1311 fprintf (dump_file,
1312 "Hit cycle in %s, switching to always inline only.\n",
1313 cgraph_node_name (callee));
1314 }
1315 mode = INLINE_ALWAYS_INLINE;
1316 }
1317 /* Otherwise it is time to give up. */
1318 else
1319 {
1320 if (dump_file)
1321 {
1322 indent_to (dump_file, depth);
1323 fprintf (dump_file,
1324 "Not inlining %s into %s to avoid cycle.\n",
1325 cgraph_node_name (callee),
1326 cgraph_node_name (e->caller));
1327 }
1328 e->inline_failed = (e->callee->local.disregard_inline_limits
1329 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1330 return false;
1331 }
1332 }
1333
1334 callee->aux = (void *)(size_t) mode;
1335 if (dump_file)
1336 {
1337 indent_to (dump_file, depth);
1338 fprintf (dump_file, " Inlining %s into %s.\n",
1339 cgraph_node_name (e->callee),
1340 cgraph_node_name (e->caller));
1341 }
1342 if (e->inline_failed)
1343 {
1344 cgraph_mark_inline (e);
1345
1346 /* In order to fully inline always_inline functions, we need to
1347 recurse here, since the inlined functions might not be processed by
1348 incremental inlining at all yet.
1349
1350 Also flattening needs to be done recursively. */
1351
1352 if (mode == INLINE_ALL || always_inline)
1353 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1354 inlined = true;
1355 }
1356 callee->aux = (void *)(size_t) callee_mode;
1357 return inlined;
1358 }
1359
1360 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1361 in leaf functions. */
1362 static bool
1363 leaf_node_p (struct cgraph_node *n)
1364 {
1365 struct cgraph_edge *e;
1366 for (e = n->callees; e; e = e->next_callee)
1367 if (!DECL_BUILT_IN (e->callee->decl)
1368 || (!TREE_READONLY (e->callee->decl)
1369 || DECL_PURE_P (e->callee->decl)))
1370 return false;
1371 return true;
1372 }
1373
1374 /* Decide on the inlining. We do so in the topological order to avoid
1375 expenses on updating data structures.
1376 DEPTH is depth of recursion, used only for debug output. */
1377
1378 static bool
1379 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1380 enum inlining_mode mode,
1381 int depth)
1382 {
1383 struct cgraph_edge *e;
1384 bool inlined = false;
1385 cgraph_inline_failed_t failed_reason;
1386 enum inlining_mode old_mode;
1387
1388 #ifdef ENABLE_CHECKING
1389 verify_cgraph_node (node);
1390 #endif
1391
1392 old_mode = (enum inlining_mode) (size_t)node->aux;
1393
1394 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1395 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1396 {
1397 if (dump_file)
1398 {
1399 indent_to (dump_file, depth);
1400 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1401 }
1402 mode = INLINE_ALL;
1403 }
1404
1405 node->aux = (void *)(size_t) mode;
1406
1407 /* First of all look for always inline functions. */
1408 if (mode != INLINE_SIZE_NORECURSIVE)
1409 for (e = node->callees; e; e = e->next_callee)
1410 {
1411 if (!e->callee->local.disregard_inline_limits
1412 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1413 continue;
1414 if (gimple_call_cannot_inline_p (e->call_stmt))
1415 continue;
1416 /* When the edge is already inlined, we just need to recurse into
1417 it in order to fully flatten the leaves. */
1418 if (!e->inline_failed && mode == INLINE_ALL)
1419 {
1420 inlined |= try_inline (e, mode, depth);
1421 continue;
1422 }
1423 if (dump_file)
1424 {
1425 indent_to (dump_file, depth);
1426 fprintf (dump_file,
1427 "Considering to always inline inline candidate %s.\n",
1428 cgraph_node_name (e->callee));
1429 }
1430 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1431 {
1432 if (dump_file)
1433 {
1434 indent_to (dump_file, depth);
1435 fprintf (dump_file, "Not inlining: recursive call.\n");
1436 }
1437 continue;
1438 }
1439 if (!tree_can_inline_p (e))
1440 {
1441 if (dump_file)
1442 {
1443 indent_to (dump_file, depth);
1444 fprintf (dump_file,
1445 "Not inlining: %s",
1446 cgraph_inline_failed_string (e->inline_failed));
1447 }
1448 continue;
1449 }
1450 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1451 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1452 {
1453 if (dump_file)
1454 {
1455 indent_to (dump_file, depth);
1456 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1457 }
1458 continue;
1459 }
1460 if (!e->callee->analyzed)
1461 {
1462 if (dump_file)
1463 {
1464 indent_to (dump_file, depth);
1465 fprintf (dump_file,
1466 "Not inlining: Function body no longer available.\n");
1467 }
1468 continue;
1469 }
1470 inlined |= try_inline (e, mode, depth);
1471 }
1472
1473 /* Now do the automatic inlining. */
1474 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE)
1475 for (e = node->callees; e; e = e->next_callee)
1476 {
1477 int allowed_growth = 0;
1478 if (!e->callee->local.inlinable
1479 || !e->inline_failed
1480 || e->callee->local.disregard_inline_limits)
1481 continue;
1482 if (dump_file)
1483 fprintf (dump_file, "Considering inline candidate %s.\n",
1484 cgraph_node_name (e->callee));
1485 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1486 {
1487 if (dump_file)
1488 {
1489 indent_to (dump_file, depth);
1490 fprintf (dump_file, "Not inlining: recursive call.\n");
1491 }
1492 continue;
1493 }
1494 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1495 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1496 {
1497 if (dump_file)
1498 {
1499 indent_to (dump_file, depth);
1500 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1501 }
1502 continue;
1503 }
1504
1505 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1506 && optimize_function_for_speed_p (cfun))
1507 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1508
1509 /* When the function body would grow and inlining the function won't
1510 eliminate the need for offline copy of the function, don't inline.
1511 */
1512 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1513 || (!flag_inline_functions
1514 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1515 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1516 > e->caller->global.size + allowed_growth)
1517 && cgraph_estimate_growth (e->callee) > allowed_growth)
1518 {
1519 if (dump_file)
1520 {
1521 indent_to (dump_file, depth);
1522 fprintf (dump_file,
1523 "Not inlining: code size would grow by %i.\n",
1524 cgraph_estimate_size_after_inlining (1, e->caller,
1525 e->callee)
1526 - e->caller->global.size);
1527 }
1528 continue;
1529 }
1530 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1531 false)
1532 || gimple_call_cannot_inline_p (e->call_stmt))
1533 {
1534 if (dump_file)
1535 {
1536 indent_to (dump_file, depth);
1537 fprintf (dump_file, "Not inlining: %s.\n",
1538 cgraph_inline_failed_string (e->inline_failed));
1539 }
1540 continue;
1541 }
1542 if (!e->callee->analyzed)
1543 {
1544 if (dump_file)
1545 {
1546 indent_to (dump_file, depth);
1547 fprintf (dump_file,
1548 "Not inlining: Function body no longer available.\n");
1549 }
1550 continue;
1551 }
1552 if (!tree_can_inline_p (e))
1553 {
1554 if (dump_file)
1555 {
1556 indent_to (dump_file, depth);
1557 fprintf (dump_file,
1558 "Not inlining: %s.",
1559 cgraph_inline_failed_string (e->inline_failed));
1560 }
1561 continue;
1562 }
1563 if (cgraph_default_inline_p (e->callee, &failed_reason))
1564 inlined |= try_inline (e, mode, depth);
1565 }
1566 node->aux = (void *)(size_t) old_mode;
1567 return inlined;
1568 }
1569
1570 /* Because inlining might remove no-longer reachable nodes, we need to
1571 keep the array visible to garbage collector to avoid reading collected
1572 out nodes. */
1573 static int nnodes;
1574 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1575
1576 /* Do inlining of small functions. Doing so early helps profiling and other
1577 passes to be somewhat more effective and avoids some code duplication in
1578 later real inlining pass for testcases with very many function calls. */
1579 static unsigned int
1580 cgraph_early_inlining (void)
1581 {
1582 struct cgraph_node *node = cgraph_node (current_function_decl);
1583 unsigned int todo = 0;
1584 int iterations = 0;
1585
1586 if (sorrycount || errorcount)
1587 return 0;
1588 while (cgraph_decide_inlining_incrementally (node,
1589 iterations
1590 ? INLINE_SIZE_NORECURSIVE : INLINE_SIZE, 0)
1591 && iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS))
1592 {
1593 timevar_push (TV_INTEGRATION);
1594 todo |= optimize_inline_calls (current_function_decl);
1595 iterations++;
1596 timevar_pop (TV_INTEGRATION);
1597 }
1598 if (dump_file)
1599 fprintf (dump_file, "Iterations: %i\n", iterations);
1600 cfun->always_inline_functions_inlined = true;
1601 return todo;
1602 }
1603
1604 /* When inlining shall be performed. */
1605 static bool
1606 cgraph_gate_early_inlining (void)
1607 {
1608 return flag_early_inlining;
1609 }
1610
1611 struct gimple_opt_pass pass_early_inline =
1612 {
1613 {
1614 GIMPLE_PASS,
1615 "einline", /* name */
1616 cgraph_gate_early_inlining, /* gate */
1617 cgraph_early_inlining, /* execute */
1618 NULL, /* sub */
1619 NULL, /* next */
1620 0, /* static_pass_number */
1621 TV_INLINE_HEURISTICS, /* tv_id */
1622 0, /* properties_required */
1623 0, /* properties_provided */
1624 0, /* properties_destroyed */
1625 0, /* todo_flags_start */
1626 TODO_dump_func /* todo_flags_finish */
1627 }
1628 };
1629
1630 /* When inlining shall be performed. */
1631 static bool
1632 cgraph_gate_ipa_early_inlining (void)
1633 {
1634 return (flag_early_inlining
1635 && (flag_branch_probabilities || flag_test_coverage
1636 || profile_arc_flag));
1637 }
1638
1639 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1640 before tree profiling so we have stand alone IPA pass for doing so. */
1641 struct simple_ipa_opt_pass pass_ipa_early_inline =
1642 {
1643 {
1644 SIMPLE_IPA_PASS,
1645 "einline_ipa", /* name */
1646 cgraph_gate_ipa_early_inlining, /* gate */
1647 NULL, /* execute */
1648 NULL, /* sub */
1649 NULL, /* next */
1650 0, /* static_pass_number */
1651 TV_INLINE_HEURISTICS, /* tv_id */
1652 0, /* properties_required */
1653 0, /* properties_provided */
1654 0, /* properties_destroyed */
1655 0, /* todo_flags_start */
1656 TODO_dump_cgraph /* todo_flags_finish */
1657 }
1658 };
1659
1660 /* See if statement might disappear after inlining. We are not terribly
1661 sophisficated, basically looking for simple abstraction penalty wrappers. */
1662
1663 static bool
1664 likely_eliminated_by_inlining_p (gimple stmt)
1665 {
1666 enum gimple_code code = gimple_code (stmt);
1667 switch (code)
1668 {
1669 case GIMPLE_RETURN:
1670 return true;
1671 case GIMPLE_ASSIGN:
1672 if (gimple_num_ops (stmt) != 2)
1673 return false;
1674
1675 /* Casts of parameters, loads from parameters passed by reference
1676 and stores to return value or parameters are probably free after
1677 inlining. */
1678 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1679 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1680 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1681 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1682 {
1683 tree rhs = gimple_assign_rhs1 (stmt);
1684 tree lhs = gimple_assign_lhs (stmt);
1685 tree inner_rhs = rhs;
1686 tree inner_lhs = lhs;
1687 bool rhs_free = false;
1688 bool lhs_free = false;
1689
1690 while (handled_component_p (inner_lhs) || TREE_CODE (inner_lhs) == INDIRECT_REF)
1691 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1692 while (handled_component_p (inner_rhs)
1693 || TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
1694 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1695
1696
1697 if (TREE_CODE (inner_rhs) == PARM_DECL
1698 || (TREE_CODE (inner_rhs) == SSA_NAME
1699 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1700 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1701 rhs_free = true;
1702 if (rhs_free && is_gimple_reg (lhs))
1703 lhs_free = true;
1704 if (((TREE_CODE (inner_lhs) == PARM_DECL
1705 || (TREE_CODE (inner_lhs) == SSA_NAME
1706 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1707 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1708 && inner_lhs != lhs)
1709 || TREE_CODE (inner_lhs) == RESULT_DECL
1710 || (TREE_CODE (inner_lhs) == SSA_NAME
1711 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1712 lhs_free = true;
1713 if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1714 rhs_free = true;
1715 if (lhs_free && rhs_free)
1716 return true;
1717 }
1718 return false;
1719 default:
1720 return false;
1721 }
1722 }
1723
1724 /* Compute function body size parameters for NODE. */
1725
1726 static void
1727 estimate_function_body_sizes (struct cgraph_node *node)
1728 {
1729 gcov_type time = 0;
1730 gcov_type time_inlining_benefit = 0;
1731 int size = 0;
1732 int size_inlining_benefit = 0;
1733 basic_block bb;
1734 gimple_stmt_iterator bsi;
1735 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1736 tree arg;
1737 int freq;
1738 tree funtype = TREE_TYPE (node->decl);
1739
1740 if (dump_file)
1741 fprintf (dump_file, "Analyzing function body size: %s\n",
1742 cgraph_node_name (node));
1743
1744 gcc_assert (my_function && my_function->cfg);
1745 FOR_EACH_BB_FN (bb, my_function)
1746 {
1747 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1748 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1749 {
1750 gimple stmt = gsi_stmt (bsi);
1751 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1752 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1753
1754 if (dump_file && (dump_flags & TDF_DETAILS))
1755 {
1756 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1757 freq, this_size, this_time);
1758 print_gimple_stmt (dump_file, stmt, 0, 0);
1759 }
1760 this_time *= freq;
1761 time += this_time;
1762 size += this_size;
1763 if (likely_eliminated_by_inlining_p (stmt))
1764 {
1765 size_inlining_benefit += this_size;
1766 time_inlining_benefit += this_time;
1767 if (dump_file && (dump_flags & TDF_DETAILS))
1768 fprintf (dump_file, " Likely eliminated\n");
1769 }
1770 gcc_assert (time >= 0);
1771 gcc_assert (size >= 0);
1772 }
1773 }
1774 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1775 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1776 / CGRAPH_FREQ_BASE);
1777 if (dump_file)
1778 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1779 (int)time, (int)time_inlining_benefit,
1780 size, size_inlining_benefit);
1781 time_inlining_benefit += eni_time_weights.call_cost;
1782 size_inlining_benefit += eni_size_weights.call_cost;
1783 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1784 {
1785 int cost = estimate_move_cost (TREE_TYPE (funtype));
1786 time_inlining_benefit += cost;
1787 size_inlining_benefit += cost;
1788 }
1789 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
1790 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1791 {
1792 int cost = estimate_move_cost (TREE_TYPE (arg));
1793 time_inlining_benefit += cost;
1794 size_inlining_benefit += cost;
1795 }
1796 if (time_inlining_benefit > MAX_TIME)
1797 time_inlining_benefit = MAX_TIME;
1798 if (time > MAX_TIME)
1799 time = MAX_TIME;
1800 inline_summary (node)->self_time = time;
1801 inline_summary (node)->self_size = size;
1802 if (dump_file)
1803 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1804 (int)time, (int)time_inlining_benefit,
1805 size, size_inlining_benefit);
1806 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1807 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
1808 }
1809
1810 /* Compute parameters of functions used by inliner. */
1811 unsigned int
1812 compute_inline_parameters (struct cgraph_node *node)
1813 {
1814 HOST_WIDE_INT self_stack_size;
1815
1816 gcc_assert (!node->global.inlined_to);
1817
1818 /* Estimate the stack size for the function. But not at -O0
1819 because estimated_stack_frame_size is a quadratic problem. */
1820 self_stack_size = optimize ? estimated_stack_frame_size () : 0;
1821 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1822 node->global.estimated_stack_size = self_stack_size;
1823 node->global.stack_frame_offset = 0;
1824
1825 /* Can this function be inlined at all? */
1826 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1827 if (node->local.inlinable && !node->local.disregard_inline_limits)
1828 node->local.disregard_inline_limits
1829 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
1830 estimate_function_body_sizes (node);
1831 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1832 node->global.time = inline_summary (node)->self_time;
1833 node->global.size = inline_summary (node)->self_size;
1834 return 0;
1835 }
1836
1837
1838 /* Compute parameters of functions used by inliner using
1839 current_function_decl. */
1840 static unsigned int
1841 compute_inline_parameters_for_current (void)
1842 {
1843 compute_inline_parameters (cgraph_node (current_function_decl));
1844 return 0;
1845 }
1846
1847 struct gimple_opt_pass pass_inline_parameters =
1848 {
1849 {
1850 GIMPLE_PASS,
1851 "inline_param", /* name */
1852 NULL, /* gate */
1853 compute_inline_parameters_for_current,/* execute */
1854 NULL, /* sub */
1855 NULL, /* next */
1856 0, /* static_pass_number */
1857 TV_INLINE_HEURISTICS, /* tv_id */
1858 0, /* properties_required */
1859 0, /* properties_provided */
1860 0, /* properties_destroyed */
1861 0, /* todo_flags_start */
1862 0 /* todo_flags_finish */
1863 }
1864 };
1865
1866 /* This function performs intraprocedural analyzis in NODE that is required to
1867 inline indirect calls. */
1868 static void
1869 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
1870 {
1871 struct cgraph_edge *cs;
1872
1873 if (!flag_ipa_cp)
1874 {
1875 ipa_initialize_node_params (node);
1876 ipa_detect_param_modifications (node);
1877 }
1878 ipa_analyze_params_uses (node);
1879
1880 if (!flag_ipa_cp)
1881 for (cs = node->callees; cs; cs = cs->next_callee)
1882 {
1883 ipa_count_arguments (cs);
1884 ipa_compute_jump_functions (cs);
1885 }
1886
1887 if (dump_file)
1888 {
1889 ipa_print_node_params (dump_file, node);
1890 ipa_print_node_jump_functions (dump_file, node);
1891 }
1892 }
1893
1894 /* Note function body size. */
1895 static void
1896 analyze_function (struct cgraph_node *node)
1897 {
1898 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1899 current_function_decl = node->decl;
1900
1901 compute_inline_parameters (node);
1902 if (flag_indirect_inlining)
1903 inline_indirect_intraprocedural_analysis (node);
1904
1905 current_function_decl = NULL;
1906 pop_cfun ();
1907 }
1908
1909 /* Called when new function is inserted to callgraph late. */
1910 static void
1911 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
1912 {
1913 analyze_function (node);
1914 }
1915
1916 /* Note function body size. */
1917 static void
1918 inline_generate_summary (void)
1919 {
1920 struct cgraph_node *node;
1921
1922 function_insertion_hook_holder =
1923 cgraph_add_function_insertion_hook (&add_new_function, NULL);
1924
1925 if (flag_indirect_inlining)
1926 {
1927 ipa_register_cgraph_hooks ();
1928 ipa_check_create_node_params ();
1929 ipa_check_create_edge_args ();
1930 }
1931
1932 for (node = cgraph_nodes; node; node = node->next)
1933 if (node->analyzed)
1934 analyze_function (node);
1935
1936 return;
1937 }
1938
1939 /* Apply inline plan to function. */
1940 static unsigned int
1941 inline_transform (struct cgraph_node *node)
1942 {
1943 unsigned int todo = 0;
1944 struct cgraph_edge *e;
1945
1946 /* We might need the body of this function so that we can expand
1947 it inline somewhere else. */
1948 if (cgraph_preserve_function_body_p (node->decl))
1949 save_inline_function_body (node);
1950
1951 for (e = node->callees; e; e = e->next_callee)
1952 if (!e->inline_failed || warn_inline)
1953 break;
1954
1955 if (e)
1956 {
1957 timevar_push (TV_INTEGRATION);
1958 todo = optimize_inline_calls (current_function_decl);
1959 timevar_pop (TV_INTEGRATION);
1960 }
1961 cfun->always_inline_functions_inlined = true;
1962 cfun->after_inlining = true;
1963 return todo | execute_fixup_cfg ();
1964 }
1965
1966 struct ipa_opt_pass_d pass_ipa_inline =
1967 {
1968 {
1969 IPA_PASS,
1970 "inline", /* name */
1971 NULL, /* gate */
1972 cgraph_decide_inlining, /* execute */
1973 NULL, /* sub */
1974 NULL, /* next */
1975 0, /* static_pass_number */
1976 TV_INLINE_HEURISTICS, /* tv_id */
1977 0, /* properties_required */
1978 0, /* properties_provided */
1979 0, /* properties_destroyed */
1980 TODO_remove_functions, /* todo_flags_finish */
1981 TODO_dump_cgraph | TODO_dump_func
1982 | TODO_remove_functions /* todo_flags_finish */
1983 },
1984 inline_generate_summary, /* generate_summary */
1985 NULL, /* write_summary */
1986 NULL, /* read_summary */
1987 NULL, /* function_read_summary */
1988 0, /* TODOs */
1989 inline_transform, /* function_transform */
1990 NULL, /* variable_transform */
1991 };
1992
1993
1994 #include "gt-ipa-inline.h"