ipa-inline.c (cgraph_decide_inlining_incrementally): When optimizing for size, reduce...
[gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Inlining decision heuristics
22
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
26 callgraph.
27
28 There are three major parts of this file:
29
30 cgraph_mark_inline implementation
31
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
34 statistics)
35
36 inlining heuristics limits
37
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
40 growth and so on).
41
42 inlining heuristics
43
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
46
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
52
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
56
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
60
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used by early inliner.
64
65 The inliner itself is split into several passes:
66
67 pass_inline_parameters
68
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
72
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
76 on the function body.
77
78 pass_early_inlining
79
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
83 whole unit.
84
85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
91 cheaply.
92
93 pass_ipa_early_inlining
94
95 With profiling, the early inlining is also necessary to reduce
96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
100 inlining.
101
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
104
105 pass_ipa_inline
106
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
112
113 pass_apply_inline
114
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
118 */
119
120 #include "config.h"
121 #include "system.h"
122 #include "coretypes.h"
123 #include "tm.h"
124 #include "tree.h"
125 #include "tree-inline.h"
126 #include "langhooks.h"
127 #include "flags.h"
128 #include "cgraph.h"
129 #include "diagnostic.h"
130 #include "timevar.h"
131 #include "params.h"
132 #include "fibheap.h"
133 #include "intl.h"
134 #include "tree-pass.h"
135 #include "hashtab.h"
136 #include "coverage.h"
137 #include "ggc.h"
138 #include "tree-flow.h"
139 #include "rtl.h"
140 #include "ipa-prop.h"
141 #include "except.h"
142
143 #define MAX_TIME 1000000000
144
145 /* Mode incremental inliner operate on:
146
147 In ALWAYS_INLINE only functions marked
148 always_inline are inlined. This mode is used after detecting cycle during
149 flattening.
150
151 In SIZE mode, only functions that reduce function body size after inlining
152 are inlined, this is used during early inlining.
153
154 in ALL mode, everything is inlined. This is used during flattening. */
155 enum inlining_mode {
156 INLINE_NONE = 0,
157 INLINE_ALWAYS_INLINE,
158 INLINE_SIZE_NORECURSIVE,
159 INLINE_SIZE,
160 INLINE_ALL
161 };
162 static bool
163 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
164 int);
165
166
167 /* Statistics we collect about inlining algorithm. */
168 static int ncalls_inlined;
169 static int nfunctions_inlined;
170 static int overall_size;
171 static gcov_type max_count, max_benefit;
172
173 /* Holders of ipa cgraph hooks: */
174 static struct cgraph_node_hook_list *function_insertion_hook_holder;
175
176 static inline struct inline_summary *
177 inline_summary (struct cgraph_node *node)
178 {
179 return &node->local.inline_summary;
180 }
181
182 /* Estimate self time of the function after inlining WHAT into TO. */
183
184 static int
185 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
186 struct cgraph_node *what)
187 {
188 gcov_type time = (((gcov_type)what->global.time
189 - inline_summary (what)->time_inlining_benefit)
190 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
191 + to->global.time;
192 if (time < 0)
193 time = 0;
194 if (time > MAX_TIME)
195 time = MAX_TIME;
196 return time;
197 }
198
199 /* Estimate self time of the function after inlining WHAT into TO. */
200
201 static int
202 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
203 struct cgraph_node *what)
204 {
205 int size = (what->global.size - inline_summary (what)->size_inlining_benefit) * times + to->global.size;
206 gcc_assert (size >= 0);
207 return size;
208 }
209
210 /* E is expected to be an edge being inlined. Clone destination node of
211 the edge and redirect it to the new clone.
212 DUPLICATE is used for bookkeeping on whether we are actually creating new
213 clones or re-using node originally representing out-of-line function call.
214 */
215 void
216 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
217 bool update_original)
218 {
219 HOST_WIDE_INT peak;
220
221 if (duplicate)
222 {
223 /* We may eliminate the need for out-of-line copy to be output.
224 In that case just go ahead and re-use it. */
225 if (!e->callee->callers->next_caller
226 && !e->callee->needed
227 && !cgraph_new_nodes)
228 {
229 gcc_assert (!e->callee->global.inlined_to);
230 if (e->callee->analyzed)
231 {
232 overall_size -= e->callee->global.size;
233 nfunctions_inlined++;
234 }
235 duplicate = false;
236 }
237 else
238 {
239 struct cgraph_node *n;
240 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
241 update_original);
242 cgraph_redirect_edge_callee (e, n);
243 }
244 }
245
246 if (e->caller->global.inlined_to)
247 e->callee->global.inlined_to = e->caller->global.inlined_to;
248 else
249 e->callee->global.inlined_to = e->caller;
250 e->callee->global.stack_frame_offset
251 = e->caller->global.stack_frame_offset
252 + inline_summary (e->caller)->estimated_self_stack_size;
253 peak = e->callee->global.stack_frame_offset
254 + inline_summary (e->callee)->estimated_self_stack_size;
255 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
256 e->callee->global.inlined_to->global.estimated_stack_size = peak;
257
258 /* Recursively clone all bodies. */
259 for (e = e->callee->callees; e; e = e->next_callee)
260 if (!e->inline_failed)
261 cgraph_clone_inlined_nodes (e, duplicate, update_original);
262 }
263
264 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
265 specify whether profile of original function should be updated. If any new
266 indirect edges are discovered in the process, add them to NEW_EDGES, unless
267 it is NULL. Return true iff any new callgraph edges were discovered as a
268 result of inlining. */
269
270 static bool
271 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
272 VEC (cgraph_edge_p, heap) **new_edges)
273 {
274 int old_size = 0, new_size = 0;
275 struct cgraph_node *to = NULL, *what;
276 struct cgraph_edge *curr = e;
277 int freq;
278 bool duplicate = false;
279 int orig_size = e->callee->global.size;
280
281 gcc_assert (e->inline_failed);
282 e->inline_failed = CIF_OK;
283
284 if (!e->callee->global.inlined)
285 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
286 e->callee->global.inlined = true;
287
288 if (e->callee->callers->next_caller
289 || e->callee->needed)
290 duplicate = true;
291 cgraph_clone_inlined_nodes (e, true, update_original);
292
293 what = e->callee;
294
295 freq = e->frequency;
296 /* Now update size of caller and all functions caller is inlined into. */
297 for (;e && !e->inline_failed; e = e->caller->callers)
298 {
299 to = e->caller;
300 old_size = e->caller->global.size;
301 new_size = cgraph_estimate_size_after_inlining (1, to, what);
302 to->global.size = new_size;
303 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
304 }
305 gcc_assert (what->global.inlined_to == to);
306 if (new_size > old_size)
307 overall_size += new_size - old_size;
308 if (!duplicate)
309 overall_size -= orig_size;
310 ncalls_inlined++;
311
312 if (flag_indirect_inlining)
313 return ipa_propagate_indirect_call_infos (curr, new_edges);
314 else
315 return false;
316 }
317
318 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
319 Return following unredirected edge in the list of callers
320 of EDGE->CALLEE */
321
322 static struct cgraph_edge *
323 cgraph_mark_inline (struct cgraph_edge *edge)
324 {
325 struct cgraph_node *to = edge->caller;
326 struct cgraph_node *what = edge->callee;
327 struct cgraph_edge *e, *next;
328
329 gcc_assert (!gimple_call_cannot_inline_p (edge->call_stmt));
330 /* Look for all calls, mark them inline and clone recursively
331 all inlined functions. */
332 for (e = what->callers; e; e = next)
333 {
334 next = e->next_caller;
335 if (e->caller == to && e->inline_failed)
336 {
337 cgraph_mark_inline_edge (e, true, NULL);
338 if (e == edge)
339 edge = next;
340 }
341 }
342
343 return edge;
344 }
345
346 /* Estimate the growth caused by inlining NODE into all callees. */
347
348 static int
349 cgraph_estimate_growth (struct cgraph_node *node)
350 {
351 int growth = 0;
352 struct cgraph_edge *e;
353 bool self_recursive = false;
354
355 if (node->global.estimated_growth != INT_MIN)
356 return node->global.estimated_growth;
357
358 for (e = node->callers; e; e = e->next_caller)
359 {
360 if (e->caller == node)
361 self_recursive = true;
362 if (e->inline_failed)
363 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
364 - e->caller->global.size);
365 }
366
367 /* ??? Wrong for non-trivially self recursive functions or cases where
368 we decide to not inline for different reasons, but it is not big deal
369 as in that case we will keep the body around, but we will also avoid
370 some inlining. */
371 if (!node->needed && !DECL_EXTERNAL (node->decl) && !self_recursive)
372 growth -= node->global.size;
373
374 node->global.estimated_growth = growth;
375 return growth;
376 }
377
378 /* Return false when inlining WHAT into TO is not good idea
379 as it would cause too large growth of function bodies.
380 When ONE_ONLY is true, assume that only one call site is going
381 to be inlined, otherwise figure out how many call sites in
382 TO calls WHAT and verify that all can be inlined.
383 */
384
385 static bool
386 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
387 cgraph_inline_failed_t *reason, bool one_only)
388 {
389 int times = 0;
390 struct cgraph_edge *e;
391 int newsize;
392 int limit;
393 HOST_WIDE_INT stack_size_limit, inlined_stack;
394
395 if (one_only)
396 times = 1;
397 else
398 for (e = to->callees; e; e = e->next_callee)
399 if (e->callee == what)
400 times++;
401
402 if (to->global.inlined_to)
403 to = to->global.inlined_to;
404
405 /* When inlining large function body called once into small function,
406 take the inlined function as base for limiting the growth. */
407 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
408 limit = inline_summary (to)->self_size;
409 else
410 limit = inline_summary (what)->self_size;
411
412 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
413
414 /* Check the size after inlining against the function limits. But allow
415 the function to shrink if it went over the limits by forced inlining. */
416 newsize = cgraph_estimate_size_after_inlining (times, to, what);
417 if (newsize >= to->global.size
418 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
419 && newsize > limit)
420 {
421 if (reason)
422 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
423 return false;
424 }
425
426 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
427
428 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
429
430 inlined_stack = (to->global.stack_frame_offset
431 + inline_summary (to)->estimated_self_stack_size
432 + what->global.estimated_stack_size);
433 if (inlined_stack > stack_size_limit
434 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
435 {
436 if (reason)
437 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
438 return false;
439 }
440 return true;
441 }
442
443 /* Return true when function N is small enough to be inlined. */
444
445 static bool
446 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
447 {
448 tree decl = n->decl;
449
450 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
451 {
452 if (reason)
453 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
454 return false;
455 }
456
457 if (!n->analyzed)
458 {
459 if (reason)
460 *reason = CIF_BODY_NOT_AVAILABLE;
461 return false;
462 }
463
464 if (DECL_DECLARED_INLINE_P (decl))
465 {
466 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
467 {
468 if (reason)
469 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
470 return false;
471 }
472 }
473 else
474 {
475 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
476 {
477 if (reason)
478 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
479 return false;
480 }
481 }
482
483 return true;
484 }
485
486 /* Return true when inlining WHAT would create recursive inlining.
487 We call recursive inlining all cases where same function appears more than
488 once in the single recursion nest path in the inline graph. */
489
490 static bool
491 cgraph_recursive_inlining_p (struct cgraph_node *to,
492 struct cgraph_node *what,
493 cgraph_inline_failed_t *reason)
494 {
495 bool recursive;
496 if (to->global.inlined_to)
497 recursive = what->decl == to->global.inlined_to->decl;
498 else
499 recursive = what->decl == to->decl;
500 /* Marking recursive function inline has sane semantic and thus we should
501 not warn on it. */
502 if (recursive && reason)
503 *reason = (what->local.disregard_inline_limits
504 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
505 return recursive;
506 }
507
508 /* A cost model driving the inlining heuristics in a way so the edges with
509 smallest badness are inlined first. After each inlining is performed
510 the costs of all caller edges of nodes affected are recomputed so the
511 metrics may accurately depend on values such as number of inlinable callers
512 of the function or function body size. */
513
514 static int
515 cgraph_edge_badness (struct cgraph_edge *edge)
516 {
517 gcov_type badness;
518 int growth =
519 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
520
521 growth -= edge->caller->global.size;
522
523 /* Always prefer inlining saving code size. */
524 if (growth <= 0)
525 badness = INT_MIN - growth;
526
527 /* When profiling is available, base priorities -(#calls / growth).
528 So we optimize for overall number of "executed" inlined calls. */
529 else if (max_count)
530 badness = ((int)((double)edge->count * INT_MIN / max_count / (max_benefit + 1))
531 * (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
532
533 /* When function local profile is available, base priorities on
534 growth / frequency, so we optimize for overall frequency of inlined
535 calls. This is not too accurate since while the call might be frequent
536 within function, the function itself is infrequent.
537
538 Other objective to optimize for is number of different calls inlined.
539 We add the estimated growth after inlining all functions to bias the
540 priorities slightly in this direction (so fewer times called functions
541 of the same size gets priority). */
542 else if (flag_guess_branch_prob)
543 {
544 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
545 badness = growth * 10000;
546 div *= MIN (100 * inline_summary (edge->callee)->time_inlining_benefit
547 / (edge->callee->global.time + 1) + 1, 100);
548
549
550 /* Decrease badness if call is nested. */
551 /* Compress the range so we don't overflow. */
552 if (div > 10000)
553 div = 10000 + ceil_log2 (div) - 8;
554 if (div < 1)
555 div = 1;
556 if (badness > 0)
557 badness /= div;
558 badness += cgraph_estimate_growth (edge->callee);
559 if (badness > INT_MAX)
560 badness = INT_MAX;
561 }
562 /* When function local profile is not available or it does not give
563 useful information (ie frequency is zero), base the cost on
564 loop nest and overall size growth, so we optimize for overall number
565 of functions fully inlined in program. */
566 else
567 {
568 int nest = MIN (edge->loop_nest, 8);
569 badness = cgraph_estimate_growth (edge->callee) * 256;
570
571 /* Decrease badness if call is nested. */
572 if (badness > 0)
573 badness >>= nest;
574 else
575 {
576 badness <<= nest;
577 }
578 }
579 /* Make recursive inlining happen always after other inlining is done. */
580 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
581 return badness + 1;
582 else
583 return badness;
584 }
585
586 /* Recompute heap nodes for each of caller edge. */
587
588 static void
589 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
590 bitmap updated_nodes)
591 {
592 struct cgraph_edge *edge;
593 cgraph_inline_failed_t failed_reason;
594
595 if (!node->local.inlinable || node->local.disregard_inline_limits
596 || node->global.inlined_to)
597 return;
598 if (bitmap_bit_p (updated_nodes, node->uid))
599 return;
600 bitmap_set_bit (updated_nodes, node->uid);
601 node->global.estimated_growth = INT_MIN;
602
603 if (!node->local.inlinable)
604 return;
605 /* Prune out edges we won't inline into anymore. */
606 if (!cgraph_default_inline_p (node, &failed_reason))
607 {
608 for (edge = node->callers; edge; edge = edge->next_caller)
609 if (edge->aux)
610 {
611 fibheap_delete_node (heap, (fibnode_t) edge->aux);
612 edge->aux = NULL;
613 if (edge->inline_failed)
614 edge->inline_failed = failed_reason;
615 }
616 return;
617 }
618
619 for (edge = node->callers; edge; edge = edge->next_caller)
620 if (edge->inline_failed)
621 {
622 int badness = cgraph_edge_badness (edge);
623 if (edge->aux)
624 {
625 fibnode_t n = (fibnode_t) edge->aux;
626 gcc_assert (n->data == edge);
627 if (n->key == badness)
628 continue;
629
630 /* fibheap_replace_key only increase the keys. */
631 if (fibheap_replace_key (heap, n, badness))
632 continue;
633 fibheap_delete_node (heap, (fibnode_t) edge->aux);
634 }
635 edge->aux = fibheap_insert (heap, badness, edge);
636 }
637 }
638
639 /* Recompute heap nodes for each of caller edges of each of callees. */
640
641 static void
642 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
643 bitmap updated_nodes)
644 {
645 struct cgraph_edge *e;
646 node->global.estimated_growth = INT_MIN;
647
648 for (e = node->callees; e; e = e->next_callee)
649 if (e->inline_failed)
650 update_caller_keys (heap, e->callee, updated_nodes);
651 else if (!e->inline_failed)
652 update_callee_keys (heap, e->callee, updated_nodes);
653 }
654
655 /* Enqueue all recursive calls from NODE into priority queue depending on
656 how likely we want to recursively inline the call. */
657
658 static void
659 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
660 fibheap_t heap)
661 {
662 static int priority;
663 struct cgraph_edge *e;
664 for (e = where->callees; e; e = e->next_callee)
665 if (e->callee == node)
666 {
667 /* When profile feedback is available, prioritize by expected number
668 of calls. Without profile feedback we maintain simple queue
669 to order candidates via recursive depths. */
670 fibheap_insert (heap,
671 !max_count ? priority++
672 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
673 e);
674 }
675 for (e = where->callees; e; e = e->next_callee)
676 if (!e->inline_failed)
677 lookup_recursive_calls (node, e->callee, heap);
678 }
679
680 /* Decide on recursive inlining: in the case function has recursive calls,
681 inline until body size reaches given argument. If any new indirect edges
682 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
683 is NULL. */
684
685 static bool
686 cgraph_decide_recursive_inlining (struct cgraph_node *node,
687 VEC (cgraph_edge_p, heap) **new_edges)
688 {
689 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
690 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
691 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
692 fibheap_t heap;
693 struct cgraph_edge *e;
694 struct cgraph_node *master_clone, *next;
695 int depth = 0;
696 int n = 0;
697
698 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
699 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
700 return false;
701
702 if (DECL_DECLARED_INLINE_P (node->decl))
703 {
704 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
705 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
706 }
707
708 /* Make sure that function is small enough to be considered for inlining. */
709 if (!max_depth
710 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
711 return false;
712 heap = fibheap_new ();
713 lookup_recursive_calls (node, node, heap);
714 if (fibheap_empty (heap))
715 {
716 fibheap_delete (heap);
717 return false;
718 }
719
720 if (dump_file)
721 fprintf (dump_file,
722 " Performing recursive inlining on %s\n",
723 cgraph_node_name (node));
724
725 /* We need original clone to copy around. */
726 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false);
727 master_clone->needed = true;
728 for (e = master_clone->callees; e; e = e->next_callee)
729 if (!e->inline_failed)
730 cgraph_clone_inlined_nodes (e, true, false);
731
732 /* Do the inlining and update list of recursive call during process. */
733 while (!fibheap_empty (heap)
734 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
735 <= limit))
736 {
737 struct cgraph_edge *curr
738 = (struct cgraph_edge *) fibheap_extract_min (heap);
739 struct cgraph_node *cnode;
740
741 depth = 1;
742 for (cnode = curr->caller;
743 cnode->global.inlined_to; cnode = cnode->callers->caller)
744 if (node->decl == curr->callee->decl)
745 depth++;
746 if (depth > max_depth)
747 {
748 if (dump_file)
749 fprintf (dump_file,
750 " maximal depth reached\n");
751 continue;
752 }
753
754 if (max_count)
755 {
756 if (!cgraph_maybe_hot_edge_p (curr))
757 {
758 if (dump_file)
759 fprintf (dump_file, " Not inlining cold call\n");
760 continue;
761 }
762 if (curr->count * 100 / node->count < probability)
763 {
764 if (dump_file)
765 fprintf (dump_file,
766 " Probability of edge is too small\n");
767 continue;
768 }
769 }
770
771 if (dump_file)
772 {
773 fprintf (dump_file,
774 " Inlining call of depth %i", depth);
775 if (node->count)
776 {
777 fprintf (dump_file, " called approx. %.2f times per call",
778 (double)curr->count / node->count);
779 }
780 fprintf (dump_file, "\n");
781 }
782 cgraph_redirect_edge_callee (curr, master_clone);
783 cgraph_mark_inline_edge (curr, false, new_edges);
784 lookup_recursive_calls (node, curr->callee, heap);
785 n++;
786 }
787 if (!fibheap_empty (heap) && dump_file)
788 fprintf (dump_file, " Recursive inlining growth limit met.\n");
789
790 fibheap_delete (heap);
791 if (dump_file)
792 fprintf (dump_file,
793 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
794 master_clone->global.size, node->global.size,
795 master_clone->global.time, node->global.time);
796
797 /* Remove master clone we used for inlining. We rely that clones inlined
798 into master clone gets queued just before master clone so we don't
799 need recursion. */
800 for (node = cgraph_nodes; node != master_clone;
801 node = next)
802 {
803 next = node->next;
804 if (node->global.inlined_to == master_clone)
805 cgraph_remove_node (node);
806 }
807 cgraph_remove_node (master_clone);
808 /* FIXME: Recursive inlining actually reduces number of calls of the
809 function. At this place we should probably walk the function and
810 inline clones and compensate the counts accordingly. This probably
811 doesn't matter much in practice. */
812 return n > 0;
813 }
814
815 /* Set inline_failed for all callers of given function to REASON. */
816
817 static void
818 cgraph_set_inline_failed (struct cgraph_node *node,
819 cgraph_inline_failed_t reason)
820 {
821 struct cgraph_edge *e;
822
823 if (dump_file)
824 fprintf (dump_file, "Inlining failed: %s\n",
825 cgraph_inline_failed_string (reason));
826 for (e = node->callers; e; e = e->next_caller)
827 if (e->inline_failed)
828 e->inline_failed = reason;
829 }
830
831 /* Given whole compilation unit estimate of INSNS, compute how large we can
832 allow the unit to grow. */
833 static int
834 compute_max_insns (int insns)
835 {
836 int max_insns = insns;
837 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
838 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
839
840 return ((HOST_WIDEST_INT) max_insns
841 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
842 }
843
844 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
845 static void
846 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
847 {
848 while (VEC_length (cgraph_edge_p, new_edges) > 0)
849 {
850 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
851
852 gcc_assert (!edge->aux);
853 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
854 }
855 }
856
857
858 /* We use greedy algorithm for inlining of small functions:
859 All inline candidates are put into prioritized heap based on estimated
860 growth of the overall number of instructions and then update the estimates.
861
862 INLINED and INLINED_CALEES are just pointers to arrays large enough
863 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
864
865 static void
866 cgraph_decide_inlining_of_small_functions (void)
867 {
868 struct cgraph_node *node;
869 struct cgraph_edge *edge;
870 cgraph_inline_failed_t failed_reason;
871 fibheap_t heap = fibheap_new ();
872 bitmap updated_nodes = BITMAP_ALLOC (NULL);
873 int min_size, max_size;
874 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
875
876 if (flag_indirect_inlining)
877 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
878
879 if (dump_file)
880 fprintf (dump_file, "\nDeciding on smaller functions:\n");
881
882 /* Put all inline candidates into the heap. */
883
884 for (node = cgraph_nodes; node; node = node->next)
885 {
886 if (!node->local.inlinable || !node->callers
887 || node->local.disregard_inline_limits)
888 continue;
889 if (dump_file)
890 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
891
892 node->global.estimated_growth = INT_MIN;
893 if (!cgraph_default_inline_p (node, &failed_reason))
894 {
895 cgraph_set_inline_failed (node, failed_reason);
896 continue;
897 }
898
899 for (edge = node->callers; edge; edge = edge->next_caller)
900 if (edge->inline_failed)
901 {
902 gcc_assert (!edge->aux);
903 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
904 }
905 }
906
907 max_size = compute_max_insns (overall_size);
908 min_size = overall_size;
909
910 while (overall_size <= max_size
911 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
912 {
913 int old_size = overall_size;
914 struct cgraph_node *where;
915 int growth =
916 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
917 cgraph_inline_failed_t not_good = CIF_OK;
918
919 growth -= edge->caller->global.size;
920
921 if (dump_file)
922 {
923 fprintf (dump_file,
924 "\nConsidering %s with %i size\n",
925 cgraph_node_name (edge->callee),
926 edge->callee->global.size);
927 fprintf (dump_file,
928 " to be inlined into %s in %s:%i\n"
929 " Estimated growth after inlined into all callees is %+i insns.\n"
930 " Estimated badness is %i, frequency %.2f.\n",
931 cgraph_node_name (edge->caller),
932 gimple_filename ((const_gimple) edge->call_stmt),
933 gimple_lineno ((const_gimple) edge->call_stmt),
934 cgraph_estimate_growth (edge->callee),
935 cgraph_edge_badness (edge),
936 edge->frequency / (double)CGRAPH_FREQ_BASE);
937 if (edge->count)
938 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
939 }
940 gcc_assert (edge->aux);
941 edge->aux = NULL;
942 if (!edge->inline_failed)
943 continue;
944
945 /* When not having profile info ready we don't weight by any way the
946 position of call in procedure itself. This means if call of
947 function A from function B seems profitable to inline, the recursive
948 call of function A in inline copy of A in B will look profitable too
949 and we end up inlining until reaching maximal function growth. This
950 is not good idea so prohibit the recursive inlining.
951
952 ??? When the frequencies are taken into account we might not need this
953 restriction.
954
955 We need to be cureful here, in some testcases, e.g. directivec.c in
956 libcpp, we can estimate self recursive function to have negative growth
957 for inlining completely.
958 */
959 if (!edge->count)
960 {
961 where = edge->caller;
962 while (where->global.inlined_to)
963 {
964 if (where->decl == edge->callee->decl)
965 break;
966 where = where->callers->caller;
967 }
968 if (where->global.inlined_to)
969 {
970 edge->inline_failed
971 = (edge->callee->local.disregard_inline_limits
972 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
973 if (dump_file)
974 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
975 continue;
976 }
977 }
978
979 if (!cgraph_maybe_hot_edge_p (edge))
980 not_good = CIF_UNLIKELY_CALL;
981 if (!flag_inline_functions
982 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
983 not_good = CIF_NOT_DECLARED_INLINED;
984 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
985 not_good = CIF_OPTIMIZING_FOR_SIZE;
986 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
987 {
988 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
989 &edge->inline_failed))
990 {
991 edge->inline_failed = not_good;
992 if (dump_file)
993 fprintf (dump_file, " inline_failed:%s.\n",
994 cgraph_inline_failed_string (edge->inline_failed));
995 }
996 continue;
997 }
998 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
999 {
1000 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1001 &edge->inline_failed))
1002 {
1003 if (dump_file)
1004 fprintf (dump_file, " inline_failed:%s.\n",
1005 cgraph_inline_failed_string (edge->inline_failed));
1006 }
1007 continue;
1008 }
1009 if (!tree_can_inline_p (edge->caller->decl, edge->callee->decl))
1010 {
1011 gimple_call_set_cannot_inline (edge->call_stmt, true);
1012 edge->inline_failed = CIF_TARGET_OPTION_MISMATCH;
1013 if (dump_file)
1014 fprintf (dump_file, " inline_failed:%s.\n",
1015 cgraph_inline_failed_string (edge->inline_failed));
1016 continue;
1017 }
1018 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1019 &edge->inline_failed))
1020 {
1021 where = edge->caller;
1022 if (where->global.inlined_to)
1023 where = where->global.inlined_to;
1024 if (!cgraph_decide_recursive_inlining (where,
1025 flag_indirect_inlining
1026 ? &new_indirect_edges : NULL))
1027 continue;
1028 if (flag_indirect_inlining)
1029 add_new_edges_to_heap (heap, new_indirect_edges);
1030 update_callee_keys (heap, where, updated_nodes);
1031 }
1032 else
1033 {
1034 struct cgraph_node *callee;
1035 if (gimple_call_cannot_inline_p (edge->call_stmt)
1036 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1037 &edge->inline_failed, true))
1038 {
1039 if (dump_file)
1040 fprintf (dump_file, " Not inlining into %s:%s.\n",
1041 cgraph_node_name (edge->caller),
1042 cgraph_inline_failed_string (edge->inline_failed));
1043 continue;
1044 }
1045 callee = edge->callee;
1046 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1047 if (flag_indirect_inlining)
1048 add_new_edges_to_heap (heap, new_indirect_edges);
1049
1050 update_callee_keys (heap, callee, updated_nodes);
1051 }
1052 where = edge->caller;
1053 if (where->global.inlined_to)
1054 where = where->global.inlined_to;
1055
1056 /* Our profitability metric can depend on local properties
1057 such as number of inlinable calls and size of the function body.
1058 After inlining these properties might change for the function we
1059 inlined into (since it's body size changed) and for the functions
1060 called by function we inlined (since number of it inlinable callers
1061 might change). */
1062 update_caller_keys (heap, where, updated_nodes);
1063 bitmap_clear (updated_nodes);
1064
1065 if (dump_file)
1066 {
1067 fprintf (dump_file,
1068 " Inlined into %s which now has size %i and self time %i,"
1069 "net change of %+i.\n",
1070 cgraph_node_name (edge->caller),
1071 edge->caller->global.time,
1072 edge->caller->global.size,
1073 overall_size - old_size);
1074 }
1075 if (min_size > overall_size)
1076 {
1077 min_size = overall_size;
1078 max_size = compute_max_insns (min_size);
1079
1080 if (dump_file)
1081 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1082 }
1083 }
1084 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
1085 {
1086 gcc_assert (edge->aux);
1087 edge->aux = NULL;
1088 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1089 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1090 &edge->inline_failed))
1091 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1092 }
1093
1094 if (new_indirect_edges)
1095 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1096 fibheap_delete (heap);
1097 BITMAP_FREE (updated_nodes);
1098 }
1099
1100 /* Decide on the inlining. We do so in the topological order to avoid
1101 expenses on updating data structures. */
1102
1103 static unsigned int
1104 cgraph_decide_inlining (void)
1105 {
1106 struct cgraph_node *node;
1107 int nnodes;
1108 struct cgraph_node **order =
1109 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1110 int old_size = 0;
1111 int i;
1112 bool redo_always_inline = true;
1113 int initial_size = 0;
1114
1115 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1116
1117 max_count = 0;
1118 max_benefit = 0;
1119 for (node = cgraph_nodes; node; node = node->next)
1120 if (node->analyzed)
1121 {
1122 struct cgraph_edge *e;
1123
1124 gcc_assert (inline_summary (node)->self_size == node->global.size);
1125 gcc_assert (node->needed || node->reachable);
1126 initial_size += node->global.size;
1127 for (e = node->callees; e; e = e->next_callee)
1128 if (max_count < e->count)
1129 max_count = e->count;
1130 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1131 max_benefit = inline_summary (node)->time_inlining_benefit;
1132 }
1133 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
1134 overall_size = initial_size;
1135
1136 nnodes = cgraph_postorder (order);
1137
1138 if (dump_file)
1139 fprintf (dump_file,
1140 "\nDeciding on inlining. Starting with size %i.\n",
1141 initial_size);
1142
1143 for (node = cgraph_nodes; node; node = node->next)
1144 node->aux = 0;
1145
1146 if (dump_file)
1147 fprintf (dump_file, "\nInlining always_inline functions:\n");
1148
1149 /* In the first pass mark all always_inline edges. Do this with a priority
1150 so none of our later choices will make this impossible. */
1151 while (redo_always_inline)
1152 {
1153 redo_always_inline = false;
1154 for (i = nnodes - 1; i >= 0; i--)
1155 {
1156 struct cgraph_edge *e, *next;
1157
1158 node = order[i];
1159
1160 /* Handle nodes to be flattened, but don't update overall unit
1161 size. */
1162 if (lookup_attribute ("flatten",
1163 DECL_ATTRIBUTES (node->decl)) != NULL)
1164 {
1165 if (dump_file)
1166 fprintf (dump_file,
1167 "Flattening %s\n", cgraph_node_name (node));
1168 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1169 }
1170
1171 if (!node->local.disregard_inline_limits)
1172 continue;
1173 if (dump_file)
1174 fprintf (dump_file,
1175 "\nConsidering %s size:%i (always inline)\n",
1176 cgraph_node_name (node), node->global.size);
1177 old_size = overall_size;
1178 for (e = node->callers; e; e = next)
1179 {
1180 next = e->next_caller;
1181 if (!e->inline_failed
1182 || gimple_call_cannot_inline_p (e->call_stmt))
1183 continue;
1184 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1185 &e->inline_failed))
1186 continue;
1187 if (!tree_can_inline_p (e->caller->decl, e->callee->decl))
1188 {
1189 gimple_call_set_cannot_inline (e->call_stmt, true);
1190 continue;
1191 }
1192 if (cgraph_mark_inline_edge (e, true, NULL))
1193 redo_always_inline = true;
1194 if (dump_file)
1195 fprintf (dump_file,
1196 " Inlined into %s which now has size %i.\n",
1197 cgraph_node_name (e->caller),
1198 e->caller->global.size);
1199 }
1200 /* Inlining self recursive function might introduce new calls to
1201 themselves we didn't see in the loop above. Fill in the proper
1202 reason why inline failed. */
1203 for (e = node->callers; e; e = e->next_caller)
1204 if (e->inline_failed)
1205 e->inline_failed = CIF_RECURSIVE_INLINING;
1206 if (dump_file)
1207 fprintf (dump_file,
1208 " Inlined for a net change of %+i size.\n",
1209 overall_size - old_size);
1210 }
1211 }
1212
1213 cgraph_decide_inlining_of_small_functions ();
1214
1215 if (flag_inline_functions_called_once)
1216 {
1217 if (dump_file)
1218 fprintf (dump_file, "\nDeciding on functions called once:\n");
1219
1220 /* And finally decide what functions are called once. */
1221 for (i = nnodes - 1; i >= 0; i--)
1222 {
1223 node = order[i];
1224
1225 if (node->callers
1226 && !node->callers->next_caller
1227 && !node->needed
1228 && node->local.inlinable
1229 && node->callers->inline_failed
1230 && !gimple_call_cannot_inline_p (node->callers->call_stmt)
1231 && !DECL_EXTERNAL (node->decl)
1232 && !DECL_COMDAT (node->decl))
1233 {
1234 old_size = overall_size;
1235 if (dump_file)
1236 {
1237 fprintf (dump_file,
1238 "\nConsidering %s size %i.\n",
1239 cgraph_node_name (node), node->global.size);
1240 fprintf (dump_file,
1241 " Called once from %s %i insns.\n",
1242 cgraph_node_name (node->callers->caller),
1243 node->callers->caller->global.size);
1244 }
1245
1246 if (cgraph_check_inline_limits (node->callers->caller, node,
1247 NULL, false))
1248 {
1249 cgraph_mark_inline (node->callers);
1250 if (dump_file)
1251 fprintf (dump_file,
1252 " Inlined into %s which now has %i size"
1253 " for a net change of %+i size.\n",
1254 cgraph_node_name (node->callers->caller),
1255 node->callers->caller->global.size,
1256 overall_size - old_size);
1257 }
1258 else
1259 {
1260 if (dump_file)
1261 fprintf (dump_file,
1262 " Inline limit reached, not inlined.\n");
1263 }
1264 }
1265 }
1266 }
1267
1268 /* Free ipa-prop structures if they are no longer needed. */
1269 if (flag_indirect_inlining)
1270 free_all_ipa_structures_after_iinln ();
1271
1272 if (dump_file)
1273 fprintf (dump_file,
1274 "\nInlined %i calls, eliminated %i functions, "
1275 "size %i turned to %i size.\n\n",
1276 ncalls_inlined, nfunctions_inlined, initial_size,
1277 overall_size);
1278 free (order);
1279 return 0;
1280 }
1281
1282 /* Try to inline edge E from incremental inliner. MODE specifies mode
1283 of inliner.
1284
1285 We are detecting cycles by storing mode of inliner into cgraph_node last
1286 time we visited it in the recursion. In general when mode is set, we have
1287 recursive inlining, but as an special case, we want to try harder inline
1288 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1289 flatten, b being always inline. Flattening 'a' will collapse
1290 a->b->c before hitting cycle. To accommodate always inline, we however
1291 need to inline a->b->c->b.
1292
1293 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1294 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1295 static bool
1296 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1297 {
1298 struct cgraph_node *callee = e->callee;
1299 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
1300 bool always_inline = e->callee->local.disregard_inline_limits;
1301 bool inlined = false;
1302
1303 /* We've hit cycle? */
1304 if (callee_mode)
1305 {
1306 /* It is first time we see it and we are not in ALWAY_INLINE only
1307 mode yet. and the function in question is always_inline. */
1308 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1309 {
1310 if (dump_file)
1311 {
1312 indent_to (dump_file, depth);
1313 fprintf (dump_file,
1314 "Hit cycle in %s, switching to always inline only.\n",
1315 cgraph_node_name (callee));
1316 }
1317 mode = INLINE_ALWAYS_INLINE;
1318 }
1319 /* Otherwise it is time to give up. */
1320 else
1321 {
1322 if (dump_file)
1323 {
1324 indent_to (dump_file, depth);
1325 fprintf (dump_file,
1326 "Not inlining %s into %s to avoid cycle.\n",
1327 cgraph_node_name (callee),
1328 cgraph_node_name (e->caller));
1329 }
1330 e->inline_failed = (e->callee->local.disregard_inline_limits
1331 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1332 return false;
1333 }
1334 }
1335
1336 callee->aux = (void *)(size_t) mode;
1337 if (dump_file)
1338 {
1339 indent_to (dump_file, depth);
1340 fprintf (dump_file, " Inlining %s into %s.\n",
1341 cgraph_node_name (e->callee),
1342 cgraph_node_name (e->caller));
1343 }
1344 if (e->inline_failed)
1345 {
1346 cgraph_mark_inline (e);
1347
1348 /* In order to fully inline always_inline functions, we need to
1349 recurse here, since the inlined functions might not be processed by
1350 incremental inlining at all yet.
1351
1352 Also flattening needs to be done recursively. */
1353
1354 if (mode == INLINE_ALL || always_inline)
1355 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1356 inlined = true;
1357 }
1358 callee->aux = (void *)(size_t) callee_mode;
1359 return inlined;
1360 }
1361
1362 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1363 in leaf functions. */
1364 static bool
1365 leaf_node_p (struct cgraph_node *n)
1366 {
1367 struct cgraph_edge *e;
1368 for (e = n->callees; e; e = e->next_callee)
1369 if (!DECL_BUILT_IN (e->callee->decl)
1370 || (!TREE_READONLY (e->callee->decl)
1371 || DECL_PURE_P (e->callee->decl)))
1372 return false;
1373 return true;
1374 }
1375
1376 /* Decide on the inlining. We do so in the topological order to avoid
1377 expenses on updating data structures.
1378 DEPTH is depth of recursion, used only for debug output. */
1379
1380 static bool
1381 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1382 enum inlining_mode mode,
1383 int depth)
1384 {
1385 struct cgraph_edge *e;
1386 bool inlined = false;
1387 cgraph_inline_failed_t failed_reason;
1388 enum inlining_mode old_mode;
1389
1390 #ifdef ENABLE_CHECKING
1391 verify_cgraph_node (node);
1392 #endif
1393
1394 old_mode = (enum inlining_mode) (size_t)node->aux;
1395
1396 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1397 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1398 {
1399 if (dump_file)
1400 {
1401 indent_to (dump_file, depth);
1402 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1403 }
1404 mode = INLINE_ALL;
1405 }
1406
1407 node->aux = (void *)(size_t) mode;
1408
1409 /* First of all look for always inline functions. */
1410 if (mode != INLINE_SIZE_NORECURSIVE)
1411 for (e = node->callees; e; e = e->next_callee)
1412 {
1413 if (!e->callee->local.disregard_inline_limits
1414 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1415 continue;
1416 if (gimple_call_cannot_inline_p (e->call_stmt))
1417 continue;
1418 /* When the edge is already inlined, we just need to recurse into
1419 it in order to fully flatten the leaves. */
1420 if (!e->inline_failed && mode == INLINE_ALL)
1421 {
1422 inlined |= try_inline (e, mode, depth);
1423 continue;
1424 }
1425 if (dump_file)
1426 {
1427 indent_to (dump_file, depth);
1428 fprintf (dump_file,
1429 "Considering to always inline inline candidate %s.\n",
1430 cgraph_node_name (e->callee));
1431 }
1432 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1433 {
1434 if (dump_file)
1435 {
1436 indent_to (dump_file, depth);
1437 fprintf (dump_file, "Not inlining: recursive call.\n");
1438 }
1439 continue;
1440 }
1441 if (!tree_can_inline_p (node->decl, e->callee->decl))
1442 {
1443 gimple_call_set_cannot_inline (e->call_stmt, true);
1444 if (dump_file)
1445 {
1446 indent_to (dump_file, depth);
1447 fprintf (dump_file,
1448 "Not inlining: Target specific option mismatch.\n");
1449 }
1450 continue;
1451 }
1452 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1453 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1454 {
1455 if (dump_file)
1456 {
1457 indent_to (dump_file, depth);
1458 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1459 }
1460 continue;
1461 }
1462 if (!e->callee->analyzed)
1463 {
1464 if (dump_file)
1465 {
1466 indent_to (dump_file, depth);
1467 fprintf (dump_file,
1468 "Not inlining: Function body no longer available.\n");
1469 }
1470 continue;
1471 }
1472 inlined |= try_inline (e, mode, depth);
1473 }
1474
1475 /* Now do the automatic inlining. */
1476 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE)
1477 for (e = node->callees; e; e = e->next_callee)
1478 {
1479 int allowed_growth = 0;
1480 if (!e->callee->local.inlinable
1481 || !e->inline_failed
1482 || e->callee->local.disregard_inline_limits)
1483 continue;
1484 if (dump_file)
1485 fprintf (dump_file, "Considering inline candidate %s.\n",
1486 cgraph_node_name (e->callee));
1487 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1488 {
1489 if (dump_file)
1490 {
1491 indent_to (dump_file, depth);
1492 fprintf (dump_file, "Not inlining: recursive call.\n");
1493 }
1494 continue;
1495 }
1496 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1497 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1498 {
1499 if (dump_file)
1500 {
1501 indent_to (dump_file, depth);
1502 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1503 }
1504 continue;
1505 }
1506
1507 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1508 && optimize_function_for_speed_p (cfun))
1509 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1510
1511 /* When the function body would grow and inlining the function won't
1512 eliminate the need for offline copy of the function, don't inline.
1513 */
1514 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1515 || (!flag_inline_functions
1516 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1517 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1518 > e->caller->global.size + allowed_growth)
1519 && cgraph_estimate_growth (e->callee) > allowed_growth)
1520 {
1521 if (dump_file)
1522 {
1523 indent_to (dump_file, depth);
1524 fprintf (dump_file,
1525 "Not inlining: code size would grow by %i.\n",
1526 cgraph_estimate_size_after_inlining (1, e->caller,
1527 e->callee)
1528 - e->caller->global.size);
1529 }
1530 continue;
1531 }
1532 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1533 false)
1534 || gimple_call_cannot_inline_p (e->call_stmt))
1535 {
1536 if (dump_file)
1537 {
1538 indent_to (dump_file, depth);
1539 fprintf (dump_file, "Not inlining: %s.\n",
1540 cgraph_inline_failed_string (e->inline_failed));
1541 }
1542 continue;
1543 }
1544 if (!e->callee->analyzed)
1545 {
1546 if (dump_file)
1547 {
1548 indent_to (dump_file, depth);
1549 fprintf (dump_file,
1550 "Not inlining: Function body no longer available.\n");
1551 }
1552 continue;
1553 }
1554 if (!tree_can_inline_p (node->decl, e->callee->decl))
1555 {
1556 gimple_call_set_cannot_inline (e->call_stmt, true);
1557 if (dump_file)
1558 {
1559 indent_to (dump_file, depth);
1560 fprintf (dump_file,
1561 "Not inlining: Target specific option mismatch.\n");
1562 }
1563 continue;
1564 }
1565 if (cgraph_default_inline_p (e->callee, &failed_reason))
1566 inlined |= try_inline (e, mode, depth);
1567 }
1568 node->aux = (void *)(size_t) old_mode;
1569 return inlined;
1570 }
1571
1572 /* Because inlining might remove no-longer reachable nodes, we need to
1573 keep the array visible to garbage collector to avoid reading collected
1574 out nodes. */
1575 static int nnodes;
1576 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1577
1578 /* Do inlining of small functions. Doing so early helps profiling and other
1579 passes to be somewhat more effective and avoids some code duplication in
1580 later real inlining pass for testcases with very many function calls. */
1581 static unsigned int
1582 cgraph_early_inlining (void)
1583 {
1584 struct cgraph_node *node = cgraph_node (current_function_decl);
1585 unsigned int todo = 0;
1586 int iterations = 0;
1587
1588 if (sorrycount || errorcount)
1589 return 0;
1590 while (cgraph_decide_inlining_incrementally (node,
1591 iterations
1592 ? INLINE_SIZE_NORECURSIVE : INLINE_SIZE, 0)
1593 && iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS))
1594 {
1595 timevar_push (TV_INTEGRATION);
1596 todo |= optimize_inline_calls (current_function_decl);
1597 iterations++;
1598 timevar_pop (TV_INTEGRATION);
1599 }
1600 if (dump_file)
1601 fprintf (dump_file, "Iterations: %i\n", iterations);
1602 cfun->always_inline_functions_inlined = true;
1603 return todo;
1604 }
1605
1606 /* When inlining shall be performed. */
1607 static bool
1608 cgraph_gate_early_inlining (void)
1609 {
1610 return flag_early_inlining;
1611 }
1612
1613 struct gimple_opt_pass pass_early_inline =
1614 {
1615 {
1616 GIMPLE_PASS,
1617 "einline", /* name */
1618 cgraph_gate_early_inlining, /* gate */
1619 cgraph_early_inlining, /* execute */
1620 NULL, /* sub */
1621 NULL, /* next */
1622 0, /* static_pass_number */
1623 TV_INLINE_HEURISTICS, /* tv_id */
1624 0, /* properties_required */
1625 0, /* properties_provided */
1626 0, /* properties_destroyed */
1627 0, /* todo_flags_start */
1628 TODO_dump_func /* todo_flags_finish */
1629 }
1630 };
1631
1632 /* When inlining shall be performed. */
1633 static bool
1634 cgraph_gate_ipa_early_inlining (void)
1635 {
1636 return (flag_early_inlining
1637 && (flag_branch_probabilities || flag_test_coverage
1638 || profile_arc_flag));
1639 }
1640
1641 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1642 before tree profiling so we have stand alone IPA pass for doing so. */
1643 struct simple_ipa_opt_pass pass_ipa_early_inline =
1644 {
1645 {
1646 SIMPLE_IPA_PASS,
1647 "einline_ipa", /* name */
1648 cgraph_gate_ipa_early_inlining, /* gate */
1649 NULL, /* execute */
1650 NULL, /* sub */
1651 NULL, /* next */
1652 0, /* static_pass_number */
1653 TV_INLINE_HEURISTICS, /* tv_id */
1654 0, /* properties_required */
1655 0, /* properties_provided */
1656 0, /* properties_destroyed */
1657 0, /* todo_flags_start */
1658 TODO_dump_cgraph /* todo_flags_finish */
1659 }
1660 };
1661
1662 /* See if statement might disappear after inlining. We are not terribly
1663 sophisficated, basically looking for simple abstraction penalty wrappers. */
1664
1665 static bool
1666 likely_eliminated_by_inlining_p (gimple stmt)
1667 {
1668 enum gimple_code code = gimple_code (stmt);
1669 switch (code)
1670 {
1671 case GIMPLE_RETURN:
1672 return true;
1673 case GIMPLE_ASSIGN:
1674 if (gimple_num_ops (stmt) != 2)
1675 return false;
1676
1677 /* Casts of parameters, loads from parameters passed by reference
1678 and stores to return value or parameters are probably free after
1679 inlining. */
1680 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1681 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1682 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1683 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1684 {
1685 tree rhs = gimple_assign_rhs1 (stmt);
1686 tree lhs = gimple_assign_lhs (stmt);
1687 tree inner_rhs = rhs;
1688 tree inner_lhs = lhs;
1689 bool rhs_free = false;
1690 bool lhs_free = false;
1691
1692 while (handled_component_p (inner_lhs) || TREE_CODE (inner_lhs) == INDIRECT_REF)
1693 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1694 while (handled_component_p (inner_rhs)
1695 || TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
1696 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1697
1698
1699 if (TREE_CODE (inner_rhs) == PARM_DECL
1700 || (TREE_CODE (inner_rhs) == SSA_NAME
1701 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1702 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1703 rhs_free = true;
1704 if (rhs_free && is_gimple_reg (lhs))
1705 lhs_free = true;
1706 if (((TREE_CODE (inner_lhs) == PARM_DECL
1707 || (TREE_CODE (inner_lhs) == SSA_NAME
1708 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1709 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1710 && inner_lhs != lhs)
1711 || TREE_CODE (inner_lhs) == RESULT_DECL
1712 || (TREE_CODE (inner_lhs) == SSA_NAME
1713 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1714 lhs_free = true;
1715 if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1716 rhs_free = true;
1717 if (lhs_free && rhs_free)
1718 return true;
1719 }
1720 return false;
1721 default:
1722 return false;
1723 }
1724 }
1725
1726 /* Compute function body size parameters for NODE. */
1727
1728 static void
1729 estimate_function_body_sizes (struct cgraph_node *node)
1730 {
1731 gcov_type time = 0;
1732 gcov_type time_inlining_benefit = 0;
1733 int size = 0;
1734 int size_inlining_benefit = 0;
1735 basic_block bb;
1736 gimple_stmt_iterator bsi;
1737 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1738 tree arg;
1739 int freq;
1740 tree funtype = TREE_TYPE (node->decl);
1741 bitmap must_not_throw = must_not_throw_labels ();
1742
1743 if (dump_file)
1744 {
1745 fprintf (dump_file, "Analyzing function body size: %s\n", cgraph_node_name (node));
1746 }
1747
1748 gcc_assert (my_function && my_function->cfg);
1749 FOR_EACH_BB_FN (bb, my_function)
1750 {
1751 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1752 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1753 {
1754 int this_size = estimate_num_insns (gsi_stmt (bsi), &eni_size_weights);
1755 int this_time = estimate_num_insns (gsi_stmt (bsi), &eni_time_weights);
1756
1757 /* MUST_NOT_THROW is usually handled by runtime calling terminate and stopping
1758 stacking unwinding. However when there is local cleanup that can resume
1759 to MUST_NOT_THROW then we generate explicit handler containing
1760 std::terminate () call.
1761
1762 Because inlining of function can introduce new cleanup region, prior
1763 inlining we keep std::terinate () calls for every MUST_NOT_THROW containing
1764 function call. Wast majority of these will be eliminated after inlining
1765 and crossjumping will inify possible duplicated calls. So ignore
1766 the handlers for function body estimates. */
1767 if (gimple_code (gsi_stmt (bsi)) == GIMPLE_LABEL
1768 && bitmap_bit_p (must_not_throw,
1769 LABEL_DECL_UID (gimple_label_label (gsi_stmt (bsi)))))
1770 {
1771 if (dump_file)
1772 fprintf (dump_file, " MUST_NOT_THROW landing pad. Ignoring whole BB.\n");
1773 }
1774 if (dump_file)
1775 {
1776 fprintf (dump_file, " freq:%6i size:%3i time:%3i ", freq, this_size, this_time);
1777 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
1778 }
1779 this_time *= freq;
1780 time += this_time;
1781 size += this_size;
1782 if (likely_eliminated_by_inlining_p (gsi_stmt (bsi)))
1783 {
1784 size_inlining_benefit += this_size;
1785 time_inlining_benefit += this_time;
1786 if (dump_file)
1787 fprintf (dump_file, " Likely eliminated\n");
1788 }
1789 gcc_assert (time >= 0);
1790 gcc_assert (size >= 0);
1791 }
1792 }
1793 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1794 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1795 / CGRAPH_FREQ_BASE);
1796 if (dump_file)
1797 {
1798 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1799 (int)time, (int)time_inlining_benefit,
1800 size, size_inlining_benefit);
1801 }
1802 time_inlining_benefit += eni_time_weights.call_cost;
1803 size_inlining_benefit += eni_size_weights.call_cost;
1804 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1805 {
1806 int cost = estimate_move_cost (TREE_TYPE (funtype));
1807 time_inlining_benefit += cost;
1808 size_inlining_benefit += cost;
1809 }
1810 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
1811 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1812 {
1813 int cost = estimate_move_cost (TREE_TYPE (arg));
1814 time_inlining_benefit += cost;
1815 size_inlining_benefit += cost;
1816 }
1817 if (time_inlining_benefit > MAX_TIME)
1818 time_inlining_benefit = MAX_TIME;
1819 if (time > MAX_TIME)
1820 time = MAX_TIME;
1821 inline_summary (node)->self_time = time;
1822 inline_summary (node)->self_size = size;
1823 if (dump_file)
1824 {
1825 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1826 (int)time, (int)time_inlining_benefit,
1827 size, size_inlining_benefit);
1828 }
1829 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1830 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
1831 BITMAP_FREE (must_not_throw);
1832 }
1833
1834 /* Compute parameters of functions used by inliner. */
1835 unsigned int
1836 compute_inline_parameters (struct cgraph_node *node)
1837 {
1838 HOST_WIDE_INT self_stack_size;
1839
1840 gcc_assert (!node->global.inlined_to);
1841
1842 /* Estimate the stack size for the function. But not at -O0
1843 because estimated_stack_frame_size is a quadratic problem. */
1844 self_stack_size = optimize ? estimated_stack_frame_size () : 0;
1845 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1846 node->global.estimated_stack_size = self_stack_size;
1847 node->global.stack_frame_offset = 0;
1848
1849 /* Can this function be inlined at all? */
1850 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1851 if (node->local.inlinable && !node->local.disregard_inline_limits)
1852 node->local.disregard_inline_limits
1853 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
1854 estimate_function_body_sizes (node);
1855 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1856 node->global.time = inline_summary (node)->self_time;
1857 node->global.size = inline_summary (node)->self_size;
1858 return 0;
1859 }
1860
1861
1862 /* Compute parameters of functions used by inliner using
1863 current_function_decl. */
1864 static unsigned int
1865 compute_inline_parameters_for_current (void)
1866 {
1867 compute_inline_parameters (cgraph_node (current_function_decl));
1868 return 0;
1869 }
1870
1871 struct gimple_opt_pass pass_inline_parameters =
1872 {
1873 {
1874 GIMPLE_PASS,
1875 "inline_param", /* name */
1876 NULL, /* gate */
1877 compute_inline_parameters_for_current,/* execute */
1878 NULL, /* sub */
1879 NULL, /* next */
1880 0, /* static_pass_number */
1881 TV_INLINE_HEURISTICS, /* tv_id */
1882 0, /* properties_required */
1883 0, /* properties_provided */
1884 0, /* properties_destroyed */
1885 0, /* todo_flags_start */
1886 0 /* todo_flags_finish */
1887 }
1888 };
1889
1890 /* This function performs intraprocedural analyzis in NODE that is required to
1891 inline indirect calls. */
1892 static void
1893 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
1894 {
1895 struct cgraph_edge *cs;
1896
1897 if (!flag_ipa_cp)
1898 {
1899 ipa_initialize_node_params (node);
1900 ipa_detect_param_modifications (node);
1901 }
1902 ipa_analyze_params_uses (node);
1903
1904 if (!flag_ipa_cp)
1905 for (cs = node->callees; cs; cs = cs->next_callee)
1906 {
1907 ipa_count_arguments (cs);
1908 ipa_compute_jump_functions (cs);
1909 }
1910
1911 if (dump_file)
1912 {
1913 ipa_print_node_params (dump_file, node);
1914 ipa_print_node_jump_functions (dump_file, node);
1915 }
1916 }
1917
1918 /* Note function body size. */
1919 static void
1920 analyze_function (struct cgraph_node *node)
1921 {
1922 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1923 current_function_decl = node->decl;
1924
1925 compute_inline_parameters (node);
1926 if (flag_indirect_inlining)
1927 inline_indirect_intraprocedural_analysis (node);
1928
1929 current_function_decl = NULL;
1930 pop_cfun ();
1931 }
1932
1933 /* Called when new function is inserted to callgraph late. */
1934 static void
1935 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
1936 {
1937 analyze_function (node);
1938 }
1939
1940 /* Note function body size. */
1941 static void
1942 inline_generate_summary (void)
1943 {
1944 struct cgraph_node *node;
1945
1946 function_insertion_hook_holder =
1947 cgraph_add_function_insertion_hook (&add_new_function, NULL);
1948
1949 if (flag_indirect_inlining)
1950 {
1951 ipa_register_cgraph_hooks ();
1952 ipa_check_create_node_params ();
1953 ipa_check_create_edge_args ();
1954 }
1955
1956 for (node = cgraph_nodes; node; node = node->next)
1957 if (node->analyzed)
1958 analyze_function (node);
1959
1960 return;
1961 }
1962
1963 /* Apply inline plan to function. */
1964 static unsigned int
1965 inline_transform (struct cgraph_node *node)
1966 {
1967 unsigned int todo = 0;
1968 struct cgraph_edge *e;
1969
1970 /* We might need the body of this function so that we can expand
1971 it inline somewhere else. */
1972 if (cgraph_preserve_function_body_p (node->decl))
1973 save_inline_function_body (node);
1974
1975 for (e = node->callees; e; e = e->next_callee)
1976 if (!e->inline_failed || warn_inline)
1977 break;
1978
1979 if (e)
1980 {
1981 timevar_push (TV_INTEGRATION);
1982 todo = optimize_inline_calls (current_function_decl);
1983 timevar_pop (TV_INTEGRATION);
1984 }
1985 cfun->always_inline_functions_inlined = true;
1986 cfun->after_inlining = true;
1987 return todo | execute_fixup_cfg ();
1988 }
1989
1990 struct ipa_opt_pass_d pass_ipa_inline =
1991 {
1992 {
1993 IPA_PASS,
1994 "inline", /* name */
1995 NULL, /* gate */
1996 cgraph_decide_inlining, /* execute */
1997 NULL, /* sub */
1998 NULL, /* next */
1999 0, /* static_pass_number */
2000 TV_INLINE_HEURISTICS, /* tv_id */
2001 0, /* properties_required */
2002 0, /* properties_provided */
2003 0, /* properties_destroyed */
2004 TODO_remove_functions, /* todo_flags_finish */
2005 TODO_dump_cgraph | TODO_dump_func
2006 | TODO_remove_functions /* todo_flags_finish */
2007 },
2008 inline_generate_summary, /* generate_summary */
2009 NULL, /* write_summary */
2010 NULL, /* read_summary */
2011 NULL, /* function_read_summary */
2012 0, /* TODOs */
2013 inline_transform, /* function_transform */
2014 NULL, /* variable_transform */
2015 };
2016
2017
2018 #include "gt-ipa-inline.h"