Squash commit of EH in gimple
[gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Inlining decision heuristics
22
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
26 callgraph.
27
28 There are three major parts of this file:
29
30 cgraph_mark_inline implementation
31
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
34 statistics)
35
36 inlining heuristics limits
37
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
40 growth and so on).
41
42 inlining heuristics
43
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
46
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
52
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
56
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
60
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used by early inliner.
64
65 The inliner itself is split into several passes:
66
67 pass_inline_parameters
68
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
72
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
76 on the function body.
77
78 pass_early_inlining
79
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
83 whole unit.
84
85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
91 cheaply.
92
93 pass_ipa_early_inlining
94
95 With profiling, the early inlining is also necessary to reduce
96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
100 inlining.
101
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
104
105 pass_ipa_inline
106
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
112
113 pass_apply_inline
114
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
118 */
119
120 #include "config.h"
121 #include "system.h"
122 #include "coretypes.h"
123 #include "tm.h"
124 #include "tree.h"
125 #include "tree-inline.h"
126 #include "langhooks.h"
127 #include "flags.h"
128 #include "cgraph.h"
129 #include "diagnostic.h"
130 #include "timevar.h"
131 #include "params.h"
132 #include "fibheap.h"
133 #include "intl.h"
134 #include "tree-pass.h"
135 #include "hashtab.h"
136 #include "coverage.h"
137 #include "ggc.h"
138 #include "tree-flow.h"
139 #include "rtl.h"
140 #include "ipa-prop.h"
141 #include "except.h"
142
143 #define MAX_TIME 1000000000
144
145 /* Mode incremental inliner operate on:
146
147 In ALWAYS_INLINE only functions marked
148 always_inline are inlined. This mode is used after detecting cycle during
149 flattening.
150
151 In SIZE mode, only functions that reduce function body size after inlining
152 are inlined, this is used during early inlining.
153
154 in ALL mode, everything is inlined. This is used during flattening. */
155 enum inlining_mode {
156 INLINE_NONE = 0,
157 INLINE_ALWAYS_INLINE,
158 INLINE_SIZE_NORECURSIVE,
159 INLINE_SIZE,
160 INLINE_ALL
161 };
162 static bool
163 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
164 int);
165
166
167 /* Statistics we collect about inlining algorithm. */
168 static int ncalls_inlined;
169 static int nfunctions_inlined;
170 static int overall_size;
171 static gcov_type max_count, max_benefit;
172
173 /* Holders of ipa cgraph hooks: */
174 static struct cgraph_node_hook_list *function_insertion_hook_holder;
175
176 static inline struct inline_summary *
177 inline_summary (struct cgraph_node *node)
178 {
179 return &node->local.inline_summary;
180 }
181
182 /* Estimate self time of the function after inlining WHAT into TO. */
183
184 static int
185 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
186 struct cgraph_node *what)
187 {
188 gcov_type time = (((gcov_type)what->global.time
189 - inline_summary (what)->time_inlining_benefit)
190 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
191 + to->global.time;
192 if (time < 0)
193 time = 0;
194 if (time > MAX_TIME)
195 time = MAX_TIME;
196 return time;
197 }
198
199 /* Estimate self time of the function after inlining WHAT into TO. */
200
201 static int
202 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
203 struct cgraph_node *what)
204 {
205 int size = (what->global.size - inline_summary (what)->size_inlining_benefit) * times + to->global.size;
206 gcc_assert (size >= 0);
207 return size;
208 }
209
210 /* E is expected to be an edge being inlined. Clone destination node of
211 the edge and redirect it to the new clone.
212 DUPLICATE is used for bookkeeping on whether we are actually creating new
213 clones or re-using node originally representing out-of-line function call.
214 */
215 void
216 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
217 bool update_original)
218 {
219 HOST_WIDE_INT peak;
220
221 if (duplicate)
222 {
223 /* We may eliminate the need for out-of-line copy to be output.
224 In that case just go ahead and re-use it. */
225 if (!e->callee->callers->next_caller
226 && !e->callee->needed
227 && !cgraph_new_nodes)
228 {
229 gcc_assert (!e->callee->global.inlined_to);
230 if (e->callee->analyzed)
231 {
232 overall_size -= e->callee->global.size;
233 nfunctions_inlined++;
234 }
235 duplicate = false;
236 }
237 else
238 {
239 struct cgraph_node *n;
240 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
241 update_original);
242 cgraph_redirect_edge_callee (e, n);
243 }
244 }
245
246 if (e->caller->global.inlined_to)
247 e->callee->global.inlined_to = e->caller->global.inlined_to;
248 else
249 e->callee->global.inlined_to = e->caller;
250 e->callee->global.stack_frame_offset
251 = e->caller->global.stack_frame_offset
252 + inline_summary (e->caller)->estimated_self_stack_size;
253 peak = e->callee->global.stack_frame_offset
254 + inline_summary (e->callee)->estimated_self_stack_size;
255 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
256 e->callee->global.inlined_to->global.estimated_stack_size = peak;
257
258 /* Recursively clone all bodies. */
259 for (e = e->callee->callees; e; e = e->next_callee)
260 if (!e->inline_failed)
261 cgraph_clone_inlined_nodes (e, duplicate, update_original);
262 }
263
264 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
265 specify whether profile of original function should be updated. If any new
266 indirect edges are discovered in the process, add them to NEW_EDGES, unless
267 it is NULL. Return true iff any new callgraph edges were discovered as a
268 result of inlining. */
269
270 static bool
271 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
272 VEC (cgraph_edge_p, heap) **new_edges)
273 {
274 int old_size = 0, new_size = 0;
275 struct cgraph_node *to = NULL, *what;
276 struct cgraph_edge *curr = e;
277 int freq;
278 bool duplicate = false;
279 int orig_size = e->callee->global.size;
280
281 gcc_assert (e->inline_failed);
282 e->inline_failed = CIF_OK;
283
284 if (!e->callee->global.inlined)
285 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
286 e->callee->global.inlined = true;
287
288 if (e->callee->callers->next_caller
289 || e->callee->needed)
290 duplicate = true;
291 cgraph_clone_inlined_nodes (e, true, update_original);
292
293 what = e->callee;
294
295 freq = e->frequency;
296 /* Now update size of caller and all functions caller is inlined into. */
297 for (;e && !e->inline_failed; e = e->caller->callers)
298 {
299 to = e->caller;
300 old_size = e->caller->global.size;
301 new_size = cgraph_estimate_size_after_inlining (1, to, what);
302 to->global.size = new_size;
303 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
304 }
305 gcc_assert (what->global.inlined_to == to);
306 if (new_size > old_size)
307 overall_size += new_size - old_size;
308 if (!duplicate)
309 overall_size -= orig_size;
310 ncalls_inlined++;
311
312 if (flag_indirect_inlining)
313 return ipa_propagate_indirect_call_infos (curr, new_edges);
314 else
315 return false;
316 }
317
318 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
319 Return following unredirected edge in the list of callers
320 of EDGE->CALLEE */
321
322 static struct cgraph_edge *
323 cgraph_mark_inline (struct cgraph_edge *edge)
324 {
325 struct cgraph_node *to = edge->caller;
326 struct cgraph_node *what = edge->callee;
327 struct cgraph_edge *e, *next;
328
329 gcc_assert (!gimple_call_cannot_inline_p (edge->call_stmt));
330 /* Look for all calls, mark them inline and clone recursively
331 all inlined functions. */
332 for (e = what->callers; e; e = next)
333 {
334 next = e->next_caller;
335 if (e->caller == to && e->inline_failed)
336 {
337 cgraph_mark_inline_edge (e, true, NULL);
338 if (e == edge)
339 edge = next;
340 }
341 }
342
343 return edge;
344 }
345
346 /* Estimate the growth caused by inlining NODE into all callees. */
347
348 static int
349 cgraph_estimate_growth (struct cgraph_node *node)
350 {
351 int growth = 0;
352 struct cgraph_edge *e;
353 bool self_recursive = false;
354
355 if (node->global.estimated_growth != INT_MIN)
356 return node->global.estimated_growth;
357
358 for (e = node->callers; e; e = e->next_caller)
359 {
360 if (e->caller == node)
361 self_recursive = true;
362 if (e->inline_failed)
363 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
364 - e->caller->global.size);
365 }
366
367 /* ??? Wrong for non-trivially self recursive functions or cases where
368 we decide to not inline for different reasons, but it is not big deal
369 as in that case we will keep the body around, but we will also avoid
370 some inlining. */
371 if (!node->needed && !DECL_EXTERNAL (node->decl) && !self_recursive)
372 growth -= node->global.size;
373
374 node->global.estimated_growth = growth;
375 return growth;
376 }
377
378 /* Return false when inlining WHAT into TO is not good idea
379 as it would cause too large growth of function bodies.
380 When ONE_ONLY is true, assume that only one call site is going
381 to be inlined, otherwise figure out how many call sites in
382 TO calls WHAT and verify that all can be inlined.
383 */
384
385 static bool
386 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
387 cgraph_inline_failed_t *reason, bool one_only)
388 {
389 int times = 0;
390 struct cgraph_edge *e;
391 int newsize;
392 int limit;
393 HOST_WIDE_INT stack_size_limit, inlined_stack;
394
395 if (one_only)
396 times = 1;
397 else
398 for (e = to->callees; e; e = e->next_callee)
399 if (e->callee == what)
400 times++;
401
402 if (to->global.inlined_to)
403 to = to->global.inlined_to;
404
405 /* When inlining large function body called once into small function,
406 take the inlined function as base for limiting the growth. */
407 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
408 limit = inline_summary (to)->self_size;
409 else
410 limit = inline_summary (what)->self_size;
411
412 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
413
414 /* Check the size after inlining against the function limits. But allow
415 the function to shrink if it went over the limits by forced inlining. */
416 newsize = cgraph_estimate_size_after_inlining (times, to, what);
417 if (newsize >= to->global.size
418 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
419 && newsize > limit)
420 {
421 if (reason)
422 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
423 return false;
424 }
425
426 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
427
428 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
429
430 inlined_stack = (to->global.stack_frame_offset
431 + inline_summary (to)->estimated_self_stack_size
432 + what->global.estimated_stack_size);
433 if (inlined_stack > stack_size_limit
434 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
435 {
436 if (reason)
437 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
438 return false;
439 }
440 return true;
441 }
442
443 /* Return true when function N is small enough to be inlined. */
444
445 static bool
446 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
447 {
448 tree decl = n->decl;
449
450 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
451 {
452 if (reason)
453 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
454 return false;
455 }
456
457 if (!n->analyzed)
458 {
459 if (reason)
460 *reason = CIF_BODY_NOT_AVAILABLE;
461 return false;
462 }
463
464 if (DECL_DECLARED_INLINE_P (decl))
465 {
466 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
467 {
468 if (reason)
469 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
470 return false;
471 }
472 }
473 else
474 {
475 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
476 {
477 if (reason)
478 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
479 return false;
480 }
481 }
482
483 return true;
484 }
485
486 /* Return true when inlining WHAT would create recursive inlining.
487 We call recursive inlining all cases where same function appears more than
488 once in the single recursion nest path in the inline graph. */
489
490 static bool
491 cgraph_recursive_inlining_p (struct cgraph_node *to,
492 struct cgraph_node *what,
493 cgraph_inline_failed_t *reason)
494 {
495 bool recursive;
496 if (to->global.inlined_to)
497 recursive = what->decl == to->global.inlined_to->decl;
498 else
499 recursive = what->decl == to->decl;
500 /* Marking recursive function inline has sane semantic and thus we should
501 not warn on it. */
502 if (recursive && reason)
503 *reason = (what->local.disregard_inline_limits
504 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
505 return recursive;
506 }
507
508 /* A cost model driving the inlining heuristics in a way so the edges with
509 smallest badness are inlined first. After each inlining is performed
510 the costs of all caller edges of nodes affected are recomputed so the
511 metrics may accurately depend on values such as number of inlinable callers
512 of the function or function body size. */
513
514 static int
515 cgraph_edge_badness (struct cgraph_edge *edge)
516 {
517 gcov_type badness;
518 int growth =
519 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
520
521 growth -= edge->caller->global.size;
522
523 /* Always prefer inlining saving code size. */
524 if (growth <= 0)
525 badness = INT_MIN - growth;
526
527 /* When profiling is available, base priorities -(#calls / growth).
528 So we optimize for overall number of "executed" inlined calls. */
529 else if (max_count)
530 badness = ((int)((double)edge->count * INT_MIN / max_count / (max_benefit + 1))
531 * (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
532
533 /* When function local profile is available, base priorities on
534 growth / frequency, so we optimize for overall frequency of inlined
535 calls. This is not too accurate since while the call might be frequent
536 within function, the function itself is infrequent.
537
538 Other objective to optimize for is number of different calls inlined.
539 We add the estimated growth after inlining all functions to bias the
540 priorities slightly in this direction (so fewer times called functions
541 of the same size gets priority). */
542 else if (flag_guess_branch_prob)
543 {
544 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
545 badness = growth * 10000;
546 div *= MIN (100 * inline_summary (edge->callee)->time_inlining_benefit
547 / (edge->callee->global.time + 1) + 1, 100);
548
549
550 /* Decrease badness if call is nested. */
551 /* Compress the range so we don't overflow. */
552 if (div > 10000)
553 div = 10000 + ceil_log2 (div) - 8;
554 if (div < 1)
555 div = 1;
556 if (badness > 0)
557 badness /= div;
558 badness += cgraph_estimate_growth (edge->callee);
559 if (badness > INT_MAX)
560 badness = INT_MAX;
561 }
562 /* When function local profile is not available or it does not give
563 useful information (ie frequency is zero), base the cost on
564 loop nest and overall size growth, so we optimize for overall number
565 of functions fully inlined in program. */
566 else
567 {
568 int nest = MIN (edge->loop_nest, 8);
569 badness = cgraph_estimate_growth (edge->callee) * 256;
570
571 /* Decrease badness if call is nested. */
572 if (badness > 0)
573 badness >>= nest;
574 else
575 {
576 badness <<= nest;
577 }
578 }
579 /* Make recursive inlining happen always after other inlining is done. */
580 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
581 return badness + 1;
582 else
583 return badness;
584 }
585
586 /* Recompute heap nodes for each of caller edge. */
587
588 static void
589 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
590 bitmap updated_nodes)
591 {
592 struct cgraph_edge *edge;
593 cgraph_inline_failed_t failed_reason;
594
595 if (!node->local.inlinable || node->local.disregard_inline_limits
596 || node->global.inlined_to)
597 return;
598 if (bitmap_bit_p (updated_nodes, node->uid))
599 return;
600 bitmap_set_bit (updated_nodes, node->uid);
601 node->global.estimated_growth = INT_MIN;
602
603 if (!node->local.inlinable)
604 return;
605 /* Prune out edges we won't inline into anymore. */
606 if (!cgraph_default_inline_p (node, &failed_reason))
607 {
608 for (edge = node->callers; edge; edge = edge->next_caller)
609 if (edge->aux)
610 {
611 fibheap_delete_node (heap, (fibnode_t) edge->aux);
612 edge->aux = NULL;
613 if (edge->inline_failed)
614 edge->inline_failed = failed_reason;
615 }
616 return;
617 }
618
619 for (edge = node->callers; edge; edge = edge->next_caller)
620 if (edge->inline_failed)
621 {
622 int badness = cgraph_edge_badness (edge);
623 if (edge->aux)
624 {
625 fibnode_t n = (fibnode_t) edge->aux;
626 gcc_assert (n->data == edge);
627 if (n->key == badness)
628 continue;
629
630 /* fibheap_replace_key only increase the keys. */
631 if (fibheap_replace_key (heap, n, badness))
632 continue;
633 fibheap_delete_node (heap, (fibnode_t) edge->aux);
634 }
635 edge->aux = fibheap_insert (heap, badness, edge);
636 }
637 }
638
639 /* Recompute heap nodes for each of caller edges of each of callees. */
640
641 static void
642 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
643 bitmap updated_nodes)
644 {
645 struct cgraph_edge *e;
646 node->global.estimated_growth = INT_MIN;
647
648 for (e = node->callees; e; e = e->next_callee)
649 if (e->inline_failed)
650 update_caller_keys (heap, e->callee, updated_nodes);
651 else if (!e->inline_failed)
652 update_callee_keys (heap, e->callee, updated_nodes);
653 }
654
655 /* Enqueue all recursive calls from NODE into priority queue depending on
656 how likely we want to recursively inline the call. */
657
658 static void
659 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
660 fibheap_t heap)
661 {
662 static int priority;
663 struct cgraph_edge *e;
664 for (e = where->callees; e; e = e->next_callee)
665 if (e->callee == node)
666 {
667 /* When profile feedback is available, prioritize by expected number
668 of calls. Without profile feedback we maintain simple queue
669 to order candidates via recursive depths. */
670 fibheap_insert (heap,
671 !max_count ? priority++
672 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
673 e);
674 }
675 for (e = where->callees; e; e = e->next_callee)
676 if (!e->inline_failed)
677 lookup_recursive_calls (node, e->callee, heap);
678 }
679
680 /* Decide on recursive inlining: in the case function has recursive calls,
681 inline until body size reaches given argument. If any new indirect edges
682 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
683 is NULL. */
684
685 static bool
686 cgraph_decide_recursive_inlining (struct cgraph_node *node,
687 VEC (cgraph_edge_p, heap) **new_edges)
688 {
689 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
690 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
691 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
692 fibheap_t heap;
693 struct cgraph_edge *e;
694 struct cgraph_node *master_clone, *next;
695 int depth = 0;
696 int n = 0;
697
698 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
699 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
700 return false;
701
702 if (DECL_DECLARED_INLINE_P (node->decl))
703 {
704 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
705 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
706 }
707
708 /* Make sure that function is small enough to be considered for inlining. */
709 if (!max_depth
710 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
711 return false;
712 heap = fibheap_new ();
713 lookup_recursive_calls (node, node, heap);
714 if (fibheap_empty (heap))
715 {
716 fibheap_delete (heap);
717 return false;
718 }
719
720 if (dump_file)
721 fprintf (dump_file,
722 " Performing recursive inlining on %s\n",
723 cgraph_node_name (node));
724
725 /* We need original clone to copy around. */
726 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false);
727 master_clone->needed = true;
728 for (e = master_clone->callees; e; e = e->next_callee)
729 if (!e->inline_failed)
730 cgraph_clone_inlined_nodes (e, true, false);
731
732 /* Do the inlining and update list of recursive call during process. */
733 while (!fibheap_empty (heap)
734 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
735 <= limit))
736 {
737 struct cgraph_edge *curr
738 = (struct cgraph_edge *) fibheap_extract_min (heap);
739 struct cgraph_node *cnode;
740
741 depth = 1;
742 for (cnode = curr->caller;
743 cnode->global.inlined_to; cnode = cnode->callers->caller)
744 if (node->decl == curr->callee->decl)
745 depth++;
746 if (depth > max_depth)
747 {
748 if (dump_file)
749 fprintf (dump_file,
750 " maximal depth reached\n");
751 continue;
752 }
753
754 if (max_count)
755 {
756 if (!cgraph_maybe_hot_edge_p (curr))
757 {
758 if (dump_file)
759 fprintf (dump_file, " Not inlining cold call\n");
760 continue;
761 }
762 if (curr->count * 100 / node->count < probability)
763 {
764 if (dump_file)
765 fprintf (dump_file,
766 " Probability of edge is too small\n");
767 continue;
768 }
769 }
770
771 if (dump_file)
772 {
773 fprintf (dump_file,
774 " Inlining call of depth %i", depth);
775 if (node->count)
776 {
777 fprintf (dump_file, " called approx. %.2f times per call",
778 (double)curr->count / node->count);
779 }
780 fprintf (dump_file, "\n");
781 }
782 cgraph_redirect_edge_callee (curr, master_clone);
783 cgraph_mark_inline_edge (curr, false, new_edges);
784 lookup_recursive_calls (node, curr->callee, heap);
785 n++;
786 }
787 if (!fibheap_empty (heap) && dump_file)
788 fprintf (dump_file, " Recursive inlining growth limit met.\n");
789
790 fibheap_delete (heap);
791 if (dump_file)
792 fprintf (dump_file,
793 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
794 master_clone->global.size, node->global.size,
795 master_clone->global.time, node->global.time);
796
797 /* Remove master clone we used for inlining. We rely that clones inlined
798 into master clone gets queued just before master clone so we don't
799 need recursion. */
800 for (node = cgraph_nodes; node != master_clone;
801 node = next)
802 {
803 next = node->next;
804 if (node->global.inlined_to == master_clone)
805 cgraph_remove_node (node);
806 }
807 cgraph_remove_node (master_clone);
808 /* FIXME: Recursive inlining actually reduces number of calls of the
809 function. At this place we should probably walk the function and
810 inline clones and compensate the counts accordingly. This probably
811 doesn't matter much in practice. */
812 return n > 0;
813 }
814
815 /* Set inline_failed for all callers of given function to REASON. */
816
817 static void
818 cgraph_set_inline_failed (struct cgraph_node *node,
819 cgraph_inline_failed_t reason)
820 {
821 struct cgraph_edge *e;
822
823 if (dump_file)
824 fprintf (dump_file, "Inlining failed: %s\n",
825 cgraph_inline_failed_string (reason));
826 for (e = node->callers; e; e = e->next_caller)
827 if (e->inline_failed)
828 e->inline_failed = reason;
829 }
830
831 /* Given whole compilation unit estimate of INSNS, compute how large we can
832 allow the unit to grow. */
833 static int
834 compute_max_insns (int insns)
835 {
836 int max_insns = insns;
837 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
838 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
839
840 return ((HOST_WIDEST_INT) max_insns
841 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
842 }
843
844 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
845 static void
846 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
847 {
848 while (VEC_length (cgraph_edge_p, new_edges) > 0)
849 {
850 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
851
852 gcc_assert (!edge->aux);
853 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
854 }
855 }
856
857
858 /* We use greedy algorithm for inlining of small functions:
859 All inline candidates are put into prioritized heap based on estimated
860 growth of the overall number of instructions and then update the estimates.
861
862 INLINED and INLINED_CALEES are just pointers to arrays large enough
863 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
864
865 static void
866 cgraph_decide_inlining_of_small_functions (void)
867 {
868 struct cgraph_node *node;
869 struct cgraph_edge *edge;
870 cgraph_inline_failed_t failed_reason;
871 fibheap_t heap = fibheap_new ();
872 bitmap updated_nodes = BITMAP_ALLOC (NULL);
873 int min_size, max_size;
874 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
875
876 if (flag_indirect_inlining)
877 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
878
879 if (dump_file)
880 fprintf (dump_file, "\nDeciding on smaller functions:\n");
881
882 /* Put all inline candidates into the heap. */
883
884 for (node = cgraph_nodes; node; node = node->next)
885 {
886 if (!node->local.inlinable || !node->callers
887 || node->local.disregard_inline_limits)
888 continue;
889 if (dump_file)
890 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
891
892 node->global.estimated_growth = INT_MIN;
893 if (!cgraph_default_inline_p (node, &failed_reason))
894 {
895 cgraph_set_inline_failed (node, failed_reason);
896 continue;
897 }
898
899 for (edge = node->callers; edge; edge = edge->next_caller)
900 if (edge->inline_failed)
901 {
902 gcc_assert (!edge->aux);
903 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
904 }
905 }
906
907 max_size = compute_max_insns (overall_size);
908 min_size = overall_size;
909
910 while (overall_size <= max_size
911 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
912 {
913 int old_size = overall_size;
914 struct cgraph_node *where;
915 int growth =
916 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
917 cgraph_inline_failed_t not_good = CIF_OK;
918
919 growth -= edge->caller->global.size;
920
921 if (dump_file)
922 {
923 fprintf (dump_file,
924 "\nConsidering %s with %i size\n",
925 cgraph_node_name (edge->callee),
926 edge->callee->global.size);
927 fprintf (dump_file,
928 " to be inlined into %s in %s:%i\n"
929 " Estimated growth after inlined into all callees is %+i insns.\n"
930 " Estimated badness is %i, frequency %.2f.\n",
931 cgraph_node_name (edge->caller),
932 gimple_filename ((const_gimple) edge->call_stmt),
933 gimple_lineno ((const_gimple) edge->call_stmt),
934 cgraph_estimate_growth (edge->callee),
935 cgraph_edge_badness (edge),
936 edge->frequency / (double)CGRAPH_FREQ_BASE);
937 if (edge->count)
938 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
939 }
940 gcc_assert (edge->aux);
941 edge->aux = NULL;
942 if (!edge->inline_failed)
943 continue;
944
945 /* When not having profile info ready we don't weight by any way the
946 position of call in procedure itself. This means if call of
947 function A from function B seems profitable to inline, the recursive
948 call of function A in inline copy of A in B will look profitable too
949 and we end up inlining until reaching maximal function growth. This
950 is not good idea so prohibit the recursive inlining.
951
952 ??? When the frequencies are taken into account we might not need this
953 restriction.
954
955 We need to be cureful here, in some testcases, e.g. directivec.c in
956 libcpp, we can estimate self recursive function to have negative growth
957 for inlining completely.
958 */
959 if (!edge->count)
960 {
961 where = edge->caller;
962 while (where->global.inlined_to)
963 {
964 if (where->decl == edge->callee->decl)
965 break;
966 where = where->callers->caller;
967 }
968 if (where->global.inlined_to)
969 {
970 edge->inline_failed
971 = (edge->callee->local.disregard_inline_limits
972 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
973 if (dump_file)
974 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
975 continue;
976 }
977 }
978
979 if (!cgraph_maybe_hot_edge_p (edge))
980 not_good = CIF_UNLIKELY_CALL;
981 if (!flag_inline_functions
982 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
983 not_good = CIF_NOT_DECLARED_INLINED;
984 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
985 not_good = CIF_OPTIMIZING_FOR_SIZE;
986 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
987 {
988 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
989 &edge->inline_failed))
990 {
991 edge->inline_failed = not_good;
992 if (dump_file)
993 fprintf (dump_file, " inline_failed:%s.\n",
994 cgraph_inline_failed_string (edge->inline_failed));
995 }
996 continue;
997 }
998 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
999 {
1000 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1001 &edge->inline_failed))
1002 {
1003 if (dump_file)
1004 fprintf (dump_file, " inline_failed:%s.\n",
1005 cgraph_inline_failed_string (edge->inline_failed));
1006 }
1007 continue;
1008 }
1009 if (!tree_can_inline_p (edge))
1010 {
1011 if (dump_file)
1012 fprintf (dump_file, " inline_failed:%s.\n",
1013 cgraph_inline_failed_string (edge->inline_failed));
1014 continue;
1015 }
1016 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1017 &edge->inline_failed))
1018 {
1019 where = edge->caller;
1020 if (where->global.inlined_to)
1021 where = where->global.inlined_to;
1022 if (!cgraph_decide_recursive_inlining (where,
1023 flag_indirect_inlining
1024 ? &new_indirect_edges : NULL))
1025 continue;
1026 if (flag_indirect_inlining)
1027 add_new_edges_to_heap (heap, new_indirect_edges);
1028 update_callee_keys (heap, where, updated_nodes);
1029 }
1030 else
1031 {
1032 struct cgraph_node *callee;
1033 if (gimple_call_cannot_inline_p (edge->call_stmt)
1034 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1035 &edge->inline_failed, true))
1036 {
1037 if (dump_file)
1038 fprintf (dump_file, " Not inlining into %s:%s.\n",
1039 cgraph_node_name (edge->caller),
1040 cgraph_inline_failed_string (edge->inline_failed));
1041 continue;
1042 }
1043 callee = edge->callee;
1044 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1045 if (flag_indirect_inlining)
1046 add_new_edges_to_heap (heap, new_indirect_edges);
1047
1048 update_callee_keys (heap, callee, updated_nodes);
1049 }
1050 where = edge->caller;
1051 if (where->global.inlined_to)
1052 where = where->global.inlined_to;
1053
1054 /* Our profitability metric can depend on local properties
1055 such as number of inlinable calls and size of the function body.
1056 After inlining these properties might change for the function we
1057 inlined into (since it's body size changed) and for the functions
1058 called by function we inlined (since number of it inlinable callers
1059 might change). */
1060 update_caller_keys (heap, where, updated_nodes);
1061 bitmap_clear (updated_nodes);
1062
1063 if (dump_file)
1064 {
1065 fprintf (dump_file,
1066 " Inlined into %s which now has size %i and self time %i,"
1067 "net change of %+i.\n",
1068 cgraph_node_name (edge->caller),
1069 edge->caller->global.time,
1070 edge->caller->global.size,
1071 overall_size - old_size);
1072 }
1073 if (min_size > overall_size)
1074 {
1075 min_size = overall_size;
1076 max_size = compute_max_insns (min_size);
1077
1078 if (dump_file)
1079 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1080 }
1081 }
1082 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
1083 {
1084 gcc_assert (edge->aux);
1085 edge->aux = NULL;
1086 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1087 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1088 &edge->inline_failed))
1089 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1090 }
1091
1092 if (new_indirect_edges)
1093 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1094 fibheap_delete (heap);
1095 BITMAP_FREE (updated_nodes);
1096 }
1097
1098 /* Decide on the inlining. We do so in the topological order to avoid
1099 expenses on updating data structures. */
1100
1101 static unsigned int
1102 cgraph_decide_inlining (void)
1103 {
1104 struct cgraph_node *node;
1105 int nnodes;
1106 struct cgraph_node **order =
1107 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1108 int old_size = 0;
1109 int i;
1110 bool redo_always_inline = true;
1111 int initial_size = 0;
1112
1113 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1114
1115 max_count = 0;
1116 max_benefit = 0;
1117 for (node = cgraph_nodes; node; node = node->next)
1118 if (node->analyzed)
1119 {
1120 struct cgraph_edge *e;
1121
1122 gcc_assert (inline_summary (node)->self_size == node->global.size);
1123 gcc_assert (node->needed || node->reachable);
1124 initial_size += node->global.size;
1125 for (e = node->callees; e; e = e->next_callee)
1126 if (max_count < e->count)
1127 max_count = e->count;
1128 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1129 max_benefit = inline_summary (node)->time_inlining_benefit;
1130 }
1131 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
1132 overall_size = initial_size;
1133
1134 nnodes = cgraph_postorder (order);
1135
1136 if (dump_file)
1137 fprintf (dump_file,
1138 "\nDeciding on inlining. Starting with size %i.\n",
1139 initial_size);
1140
1141 for (node = cgraph_nodes; node; node = node->next)
1142 node->aux = 0;
1143
1144 if (dump_file)
1145 fprintf (dump_file, "\nInlining always_inline functions:\n");
1146
1147 /* In the first pass mark all always_inline edges. Do this with a priority
1148 so none of our later choices will make this impossible. */
1149 while (redo_always_inline)
1150 {
1151 redo_always_inline = false;
1152 for (i = nnodes - 1; i >= 0; i--)
1153 {
1154 struct cgraph_edge *e, *next;
1155
1156 node = order[i];
1157
1158 /* Handle nodes to be flattened, but don't update overall unit
1159 size. */
1160 if (lookup_attribute ("flatten",
1161 DECL_ATTRIBUTES (node->decl)) != NULL)
1162 {
1163 if (dump_file)
1164 fprintf (dump_file,
1165 "Flattening %s\n", cgraph_node_name (node));
1166 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1167 }
1168
1169 if (!node->local.disregard_inline_limits)
1170 continue;
1171 if (dump_file)
1172 fprintf (dump_file,
1173 "\nConsidering %s size:%i (always inline)\n",
1174 cgraph_node_name (node), node->global.size);
1175 old_size = overall_size;
1176 for (e = node->callers; e; e = next)
1177 {
1178 next = e->next_caller;
1179 if (!e->inline_failed
1180 || gimple_call_cannot_inline_p (e->call_stmt))
1181 continue;
1182 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1183 &e->inline_failed))
1184 continue;
1185 if (!tree_can_inline_p (e))
1186 continue;
1187 if (cgraph_mark_inline_edge (e, true, NULL))
1188 redo_always_inline = true;
1189 if (dump_file)
1190 fprintf (dump_file,
1191 " Inlined into %s which now has size %i.\n",
1192 cgraph_node_name (e->caller),
1193 e->caller->global.size);
1194 }
1195 /* Inlining self recursive function might introduce new calls to
1196 themselves we didn't see in the loop above. Fill in the proper
1197 reason why inline failed. */
1198 for (e = node->callers; e; e = e->next_caller)
1199 if (e->inline_failed)
1200 e->inline_failed = CIF_RECURSIVE_INLINING;
1201 if (dump_file)
1202 fprintf (dump_file,
1203 " Inlined for a net change of %+i size.\n",
1204 overall_size - old_size);
1205 }
1206 }
1207
1208 cgraph_decide_inlining_of_small_functions ();
1209
1210 if (flag_inline_functions_called_once)
1211 {
1212 if (dump_file)
1213 fprintf (dump_file, "\nDeciding on functions called once:\n");
1214
1215 /* And finally decide what functions are called once. */
1216 for (i = nnodes - 1; i >= 0; i--)
1217 {
1218 node = order[i];
1219
1220 if (node->callers
1221 && !node->callers->next_caller
1222 && !node->needed
1223 && node->local.inlinable
1224 && node->callers->inline_failed
1225 && node->callers->caller != node
1226 && node->callers->caller->global.inlined_to != node
1227 && !gimple_call_cannot_inline_p (node->callers->call_stmt)
1228 && !DECL_EXTERNAL (node->decl)
1229 && !DECL_COMDAT (node->decl))
1230 {
1231 old_size = overall_size;
1232 if (dump_file)
1233 {
1234 fprintf (dump_file,
1235 "\nConsidering %s size %i.\n",
1236 cgraph_node_name (node), node->global.size);
1237 fprintf (dump_file,
1238 " Called once from %s %i insns.\n",
1239 cgraph_node_name (node->callers->caller),
1240 node->callers->caller->global.size);
1241 }
1242
1243 if (cgraph_check_inline_limits (node->callers->caller, node,
1244 NULL, false))
1245 {
1246 cgraph_mark_inline (node->callers);
1247 if (dump_file)
1248 fprintf (dump_file,
1249 " Inlined into %s which now has %i size"
1250 " for a net change of %+i size.\n",
1251 cgraph_node_name (node->callers->caller),
1252 node->callers->caller->global.size,
1253 overall_size - old_size);
1254 }
1255 else
1256 {
1257 if (dump_file)
1258 fprintf (dump_file,
1259 " Inline limit reached, not inlined.\n");
1260 }
1261 }
1262 }
1263 }
1264
1265 /* Free ipa-prop structures if they are no longer needed. */
1266 if (flag_indirect_inlining)
1267 free_all_ipa_structures_after_iinln ();
1268
1269 if (dump_file)
1270 fprintf (dump_file,
1271 "\nInlined %i calls, eliminated %i functions, "
1272 "size %i turned to %i size.\n\n",
1273 ncalls_inlined, nfunctions_inlined, initial_size,
1274 overall_size);
1275 free (order);
1276 return 0;
1277 }
1278
1279 /* Try to inline edge E from incremental inliner. MODE specifies mode
1280 of inliner.
1281
1282 We are detecting cycles by storing mode of inliner into cgraph_node last
1283 time we visited it in the recursion. In general when mode is set, we have
1284 recursive inlining, but as an special case, we want to try harder inline
1285 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1286 flatten, b being always inline. Flattening 'a' will collapse
1287 a->b->c before hitting cycle. To accommodate always inline, we however
1288 need to inline a->b->c->b.
1289
1290 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1291 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1292 static bool
1293 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1294 {
1295 struct cgraph_node *callee = e->callee;
1296 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
1297 bool always_inline = e->callee->local.disregard_inline_limits;
1298 bool inlined = false;
1299
1300 /* We've hit cycle? */
1301 if (callee_mode)
1302 {
1303 /* It is first time we see it and we are not in ALWAY_INLINE only
1304 mode yet. and the function in question is always_inline. */
1305 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1306 {
1307 if (dump_file)
1308 {
1309 indent_to (dump_file, depth);
1310 fprintf (dump_file,
1311 "Hit cycle in %s, switching to always inline only.\n",
1312 cgraph_node_name (callee));
1313 }
1314 mode = INLINE_ALWAYS_INLINE;
1315 }
1316 /* Otherwise it is time to give up. */
1317 else
1318 {
1319 if (dump_file)
1320 {
1321 indent_to (dump_file, depth);
1322 fprintf (dump_file,
1323 "Not inlining %s into %s to avoid cycle.\n",
1324 cgraph_node_name (callee),
1325 cgraph_node_name (e->caller));
1326 }
1327 e->inline_failed = (e->callee->local.disregard_inline_limits
1328 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1329 return false;
1330 }
1331 }
1332
1333 callee->aux = (void *)(size_t) mode;
1334 if (dump_file)
1335 {
1336 indent_to (dump_file, depth);
1337 fprintf (dump_file, " Inlining %s into %s.\n",
1338 cgraph_node_name (e->callee),
1339 cgraph_node_name (e->caller));
1340 }
1341 if (e->inline_failed)
1342 {
1343 cgraph_mark_inline (e);
1344
1345 /* In order to fully inline always_inline functions, we need to
1346 recurse here, since the inlined functions might not be processed by
1347 incremental inlining at all yet.
1348
1349 Also flattening needs to be done recursively. */
1350
1351 if (mode == INLINE_ALL || always_inline)
1352 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1353 inlined = true;
1354 }
1355 callee->aux = (void *)(size_t) callee_mode;
1356 return inlined;
1357 }
1358
1359 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1360 in leaf functions. */
1361 static bool
1362 leaf_node_p (struct cgraph_node *n)
1363 {
1364 struct cgraph_edge *e;
1365 for (e = n->callees; e; e = e->next_callee)
1366 if (!DECL_BUILT_IN (e->callee->decl)
1367 || (!TREE_READONLY (e->callee->decl)
1368 || DECL_PURE_P (e->callee->decl)))
1369 return false;
1370 return true;
1371 }
1372
1373 /* Decide on the inlining. We do so in the topological order to avoid
1374 expenses on updating data structures.
1375 DEPTH is depth of recursion, used only for debug output. */
1376
1377 static bool
1378 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1379 enum inlining_mode mode,
1380 int depth)
1381 {
1382 struct cgraph_edge *e;
1383 bool inlined = false;
1384 cgraph_inline_failed_t failed_reason;
1385 enum inlining_mode old_mode;
1386
1387 #ifdef ENABLE_CHECKING
1388 verify_cgraph_node (node);
1389 #endif
1390
1391 old_mode = (enum inlining_mode) (size_t)node->aux;
1392
1393 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1394 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1395 {
1396 if (dump_file)
1397 {
1398 indent_to (dump_file, depth);
1399 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1400 }
1401 mode = INLINE_ALL;
1402 }
1403
1404 node->aux = (void *)(size_t) mode;
1405
1406 /* First of all look for always inline functions. */
1407 if (mode != INLINE_SIZE_NORECURSIVE)
1408 for (e = node->callees; e; e = e->next_callee)
1409 {
1410 if (!e->callee->local.disregard_inline_limits
1411 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1412 continue;
1413 if (gimple_call_cannot_inline_p (e->call_stmt))
1414 continue;
1415 /* When the edge is already inlined, we just need to recurse into
1416 it in order to fully flatten the leaves. */
1417 if (!e->inline_failed && mode == INLINE_ALL)
1418 {
1419 inlined |= try_inline (e, mode, depth);
1420 continue;
1421 }
1422 if (dump_file)
1423 {
1424 indent_to (dump_file, depth);
1425 fprintf (dump_file,
1426 "Considering to always inline inline candidate %s.\n",
1427 cgraph_node_name (e->callee));
1428 }
1429 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1430 {
1431 if (dump_file)
1432 {
1433 indent_to (dump_file, depth);
1434 fprintf (dump_file, "Not inlining: recursive call.\n");
1435 }
1436 continue;
1437 }
1438 if (!tree_can_inline_p (e))
1439 {
1440 if (dump_file)
1441 {
1442 indent_to (dump_file, depth);
1443 fprintf (dump_file,
1444 "Not inlining: %s",
1445 cgraph_inline_failed_string (e->inline_failed));
1446 }
1447 continue;
1448 }
1449 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1450 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1451 {
1452 if (dump_file)
1453 {
1454 indent_to (dump_file, depth);
1455 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1456 }
1457 continue;
1458 }
1459 if (!e->callee->analyzed)
1460 {
1461 if (dump_file)
1462 {
1463 indent_to (dump_file, depth);
1464 fprintf (dump_file,
1465 "Not inlining: Function body no longer available.\n");
1466 }
1467 continue;
1468 }
1469 inlined |= try_inline (e, mode, depth);
1470 }
1471
1472 /* Now do the automatic inlining. */
1473 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE)
1474 for (e = node->callees; e; e = e->next_callee)
1475 {
1476 int allowed_growth = 0;
1477 if (!e->callee->local.inlinable
1478 || !e->inline_failed
1479 || e->callee->local.disregard_inline_limits)
1480 continue;
1481 if (dump_file)
1482 fprintf (dump_file, "Considering inline candidate %s.\n",
1483 cgraph_node_name (e->callee));
1484 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1485 {
1486 if (dump_file)
1487 {
1488 indent_to (dump_file, depth);
1489 fprintf (dump_file, "Not inlining: recursive call.\n");
1490 }
1491 continue;
1492 }
1493 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1494 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1495 {
1496 if (dump_file)
1497 {
1498 indent_to (dump_file, depth);
1499 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1500 }
1501 continue;
1502 }
1503
1504 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1505 && optimize_function_for_speed_p (cfun))
1506 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1507
1508 /* When the function body would grow and inlining the function won't
1509 eliminate the need for offline copy of the function, don't inline.
1510 */
1511 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1512 || (!flag_inline_functions
1513 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1514 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1515 > e->caller->global.size + allowed_growth)
1516 && cgraph_estimate_growth (e->callee) > allowed_growth)
1517 {
1518 if (dump_file)
1519 {
1520 indent_to (dump_file, depth);
1521 fprintf (dump_file,
1522 "Not inlining: code size would grow by %i.\n",
1523 cgraph_estimate_size_after_inlining (1, e->caller,
1524 e->callee)
1525 - e->caller->global.size);
1526 }
1527 continue;
1528 }
1529 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1530 false)
1531 || gimple_call_cannot_inline_p (e->call_stmt))
1532 {
1533 if (dump_file)
1534 {
1535 indent_to (dump_file, depth);
1536 fprintf (dump_file, "Not inlining: %s.\n",
1537 cgraph_inline_failed_string (e->inline_failed));
1538 }
1539 continue;
1540 }
1541 if (!e->callee->analyzed)
1542 {
1543 if (dump_file)
1544 {
1545 indent_to (dump_file, depth);
1546 fprintf (dump_file,
1547 "Not inlining: Function body no longer available.\n");
1548 }
1549 continue;
1550 }
1551 if (!tree_can_inline_p (e))
1552 {
1553 if (dump_file)
1554 {
1555 indent_to (dump_file, depth);
1556 fprintf (dump_file,
1557 "Not inlining: %s.",
1558 cgraph_inline_failed_string (e->inline_failed));
1559 }
1560 continue;
1561 }
1562 if (cgraph_default_inline_p (e->callee, &failed_reason))
1563 inlined |= try_inline (e, mode, depth);
1564 }
1565 node->aux = (void *)(size_t) old_mode;
1566 return inlined;
1567 }
1568
1569 /* Because inlining might remove no-longer reachable nodes, we need to
1570 keep the array visible to garbage collector to avoid reading collected
1571 out nodes. */
1572 static int nnodes;
1573 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1574
1575 /* Do inlining of small functions. Doing so early helps profiling and other
1576 passes to be somewhat more effective and avoids some code duplication in
1577 later real inlining pass for testcases with very many function calls. */
1578 static unsigned int
1579 cgraph_early_inlining (void)
1580 {
1581 struct cgraph_node *node = cgraph_node (current_function_decl);
1582 unsigned int todo = 0;
1583 int iterations = 0;
1584
1585 if (sorrycount || errorcount)
1586 return 0;
1587 while (cgraph_decide_inlining_incrementally (node,
1588 iterations
1589 ? INLINE_SIZE_NORECURSIVE : INLINE_SIZE, 0)
1590 && iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS))
1591 {
1592 timevar_push (TV_INTEGRATION);
1593 todo |= optimize_inline_calls (current_function_decl);
1594 iterations++;
1595 timevar_pop (TV_INTEGRATION);
1596 }
1597 if (dump_file)
1598 fprintf (dump_file, "Iterations: %i\n", iterations);
1599 cfun->always_inline_functions_inlined = true;
1600 return todo;
1601 }
1602
1603 /* When inlining shall be performed. */
1604 static bool
1605 cgraph_gate_early_inlining (void)
1606 {
1607 return flag_early_inlining;
1608 }
1609
1610 struct gimple_opt_pass pass_early_inline =
1611 {
1612 {
1613 GIMPLE_PASS,
1614 "einline", /* name */
1615 cgraph_gate_early_inlining, /* gate */
1616 cgraph_early_inlining, /* execute */
1617 NULL, /* sub */
1618 NULL, /* next */
1619 0, /* static_pass_number */
1620 TV_INLINE_HEURISTICS, /* tv_id */
1621 0, /* properties_required */
1622 0, /* properties_provided */
1623 0, /* properties_destroyed */
1624 0, /* todo_flags_start */
1625 TODO_dump_func /* todo_flags_finish */
1626 }
1627 };
1628
1629 /* When inlining shall be performed. */
1630 static bool
1631 cgraph_gate_ipa_early_inlining (void)
1632 {
1633 return (flag_early_inlining
1634 && (flag_branch_probabilities || flag_test_coverage
1635 || profile_arc_flag));
1636 }
1637
1638 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1639 before tree profiling so we have stand alone IPA pass for doing so. */
1640 struct simple_ipa_opt_pass pass_ipa_early_inline =
1641 {
1642 {
1643 SIMPLE_IPA_PASS,
1644 "einline_ipa", /* name */
1645 cgraph_gate_ipa_early_inlining, /* gate */
1646 NULL, /* execute */
1647 NULL, /* sub */
1648 NULL, /* next */
1649 0, /* static_pass_number */
1650 TV_INLINE_HEURISTICS, /* tv_id */
1651 0, /* properties_required */
1652 0, /* properties_provided */
1653 0, /* properties_destroyed */
1654 0, /* todo_flags_start */
1655 TODO_dump_cgraph /* todo_flags_finish */
1656 }
1657 };
1658
1659 /* See if statement might disappear after inlining. We are not terribly
1660 sophisficated, basically looking for simple abstraction penalty wrappers. */
1661
1662 static bool
1663 likely_eliminated_by_inlining_p (gimple stmt)
1664 {
1665 enum gimple_code code = gimple_code (stmt);
1666 switch (code)
1667 {
1668 case GIMPLE_RETURN:
1669 return true;
1670 case GIMPLE_ASSIGN:
1671 if (gimple_num_ops (stmt) != 2)
1672 return false;
1673
1674 /* Casts of parameters, loads from parameters passed by reference
1675 and stores to return value or parameters are probably free after
1676 inlining. */
1677 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1678 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1679 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1680 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1681 {
1682 tree rhs = gimple_assign_rhs1 (stmt);
1683 tree lhs = gimple_assign_lhs (stmt);
1684 tree inner_rhs = rhs;
1685 tree inner_lhs = lhs;
1686 bool rhs_free = false;
1687 bool lhs_free = false;
1688
1689 while (handled_component_p (inner_lhs) || TREE_CODE (inner_lhs) == INDIRECT_REF)
1690 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1691 while (handled_component_p (inner_rhs)
1692 || TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
1693 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1694
1695
1696 if (TREE_CODE (inner_rhs) == PARM_DECL
1697 || (TREE_CODE (inner_rhs) == SSA_NAME
1698 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1699 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1700 rhs_free = true;
1701 if (rhs_free && is_gimple_reg (lhs))
1702 lhs_free = true;
1703 if (((TREE_CODE (inner_lhs) == PARM_DECL
1704 || (TREE_CODE (inner_lhs) == SSA_NAME
1705 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1706 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1707 && inner_lhs != lhs)
1708 || TREE_CODE (inner_lhs) == RESULT_DECL
1709 || (TREE_CODE (inner_lhs) == SSA_NAME
1710 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1711 lhs_free = true;
1712 if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1713 rhs_free = true;
1714 if (lhs_free && rhs_free)
1715 return true;
1716 }
1717 return false;
1718 default:
1719 return false;
1720 }
1721 }
1722
1723 /* Compute function body size parameters for NODE. */
1724
1725 static void
1726 estimate_function_body_sizes (struct cgraph_node *node)
1727 {
1728 gcov_type time = 0;
1729 gcov_type time_inlining_benefit = 0;
1730 int size = 0;
1731 int size_inlining_benefit = 0;
1732 basic_block bb;
1733 gimple_stmt_iterator bsi;
1734 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1735 tree arg;
1736 int freq;
1737 tree funtype = TREE_TYPE (node->decl);
1738
1739 if (dump_file)
1740 {
1741 fprintf (dump_file, "Analyzing function body size: %s\n", cgraph_node_name (node));
1742 }
1743
1744 gcc_assert (my_function && my_function->cfg);
1745 FOR_EACH_BB_FN (bb, my_function)
1746 {
1747 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1748 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1749 {
1750 gimple stmt = gsi_stmt (bsi);
1751 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1752 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1753
1754 if (dump_file)
1755 {
1756 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1757 freq, this_size, this_time);
1758 print_gimple_stmt (dump_file, stmt, 0, 0);
1759 }
1760 this_time *= freq;
1761 time += this_time;
1762 size += this_size;
1763 if (likely_eliminated_by_inlining_p (stmt))
1764 {
1765 size_inlining_benefit += this_size;
1766 time_inlining_benefit += this_time;
1767 if (dump_file)
1768 fprintf (dump_file, " Likely eliminated\n");
1769 }
1770 gcc_assert (time >= 0);
1771 gcc_assert (size >= 0);
1772 }
1773 }
1774 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1775 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1776 / CGRAPH_FREQ_BASE);
1777 if (dump_file)
1778 {
1779 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1780 (int)time, (int)time_inlining_benefit,
1781 size, size_inlining_benefit);
1782 }
1783 time_inlining_benefit += eni_time_weights.call_cost;
1784 size_inlining_benefit += eni_size_weights.call_cost;
1785 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1786 {
1787 int cost = estimate_move_cost (TREE_TYPE (funtype));
1788 time_inlining_benefit += cost;
1789 size_inlining_benefit += cost;
1790 }
1791 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
1792 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1793 {
1794 int cost = estimate_move_cost (TREE_TYPE (arg));
1795 time_inlining_benefit += cost;
1796 size_inlining_benefit += cost;
1797 }
1798 if (time_inlining_benefit > MAX_TIME)
1799 time_inlining_benefit = MAX_TIME;
1800 if (time > MAX_TIME)
1801 time = MAX_TIME;
1802 inline_summary (node)->self_time = time;
1803 inline_summary (node)->self_size = size;
1804 if (dump_file)
1805 {
1806 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1807 (int)time, (int)time_inlining_benefit,
1808 size, size_inlining_benefit);
1809 }
1810 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1811 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
1812 }
1813
1814 /* Compute parameters of functions used by inliner. */
1815 unsigned int
1816 compute_inline_parameters (struct cgraph_node *node)
1817 {
1818 HOST_WIDE_INT self_stack_size;
1819
1820 gcc_assert (!node->global.inlined_to);
1821
1822 /* Estimate the stack size for the function. But not at -O0
1823 because estimated_stack_frame_size is a quadratic problem. */
1824 self_stack_size = optimize ? estimated_stack_frame_size () : 0;
1825 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1826 node->global.estimated_stack_size = self_stack_size;
1827 node->global.stack_frame_offset = 0;
1828
1829 /* Can this function be inlined at all? */
1830 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1831 if (node->local.inlinable && !node->local.disregard_inline_limits)
1832 node->local.disregard_inline_limits
1833 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
1834 estimate_function_body_sizes (node);
1835 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1836 node->global.time = inline_summary (node)->self_time;
1837 node->global.size = inline_summary (node)->self_size;
1838 return 0;
1839 }
1840
1841
1842 /* Compute parameters of functions used by inliner using
1843 current_function_decl. */
1844 static unsigned int
1845 compute_inline_parameters_for_current (void)
1846 {
1847 compute_inline_parameters (cgraph_node (current_function_decl));
1848 return 0;
1849 }
1850
1851 struct gimple_opt_pass pass_inline_parameters =
1852 {
1853 {
1854 GIMPLE_PASS,
1855 "inline_param", /* name */
1856 NULL, /* gate */
1857 compute_inline_parameters_for_current,/* execute */
1858 NULL, /* sub */
1859 NULL, /* next */
1860 0, /* static_pass_number */
1861 TV_INLINE_HEURISTICS, /* tv_id */
1862 0, /* properties_required */
1863 0, /* properties_provided */
1864 0, /* properties_destroyed */
1865 0, /* todo_flags_start */
1866 0 /* todo_flags_finish */
1867 }
1868 };
1869
1870 /* This function performs intraprocedural analyzis in NODE that is required to
1871 inline indirect calls. */
1872 static void
1873 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
1874 {
1875 struct cgraph_edge *cs;
1876
1877 if (!flag_ipa_cp)
1878 {
1879 ipa_initialize_node_params (node);
1880 ipa_detect_param_modifications (node);
1881 }
1882 ipa_analyze_params_uses (node);
1883
1884 if (!flag_ipa_cp)
1885 for (cs = node->callees; cs; cs = cs->next_callee)
1886 {
1887 ipa_count_arguments (cs);
1888 ipa_compute_jump_functions (cs);
1889 }
1890
1891 if (dump_file)
1892 {
1893 ipa_print_node_params (dump_file, node);
1894 ipa_print_node_jump_functions (dump_file, node);
1895 }
1896 }
1897
1898 /* Note function body size. */
1899 static void
1900 analyze_function (struct cgraph_node *node)
1901 {
1902 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1903 current_function_decl = node->decl;
1904
1905 compute_inline_parameters (node);
1906 if (flag_indirect_inlining)
1907 inline_indirect_intraprocedural_analysis (node);
1908
1909 current_function_decl = NULL;
1910 pop_cfun ();
1911 }
1912
1913 /* Called when new function is inserted to callgraph late. */
1914 static void
1915 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
1916 {
1917 analyze_function (node);
1918 }
1919
1920 /* Note function body size. */
1921 static void
1922 inline_generate_summary (void)
1923 {
1924 struct cgraph_node *node;
1925
1926 function_insertion_hook_holder =
1927 cgraph_add_function_insertion_hook (&add_new_function, NULL);
1928
1929 if (flag_indirect_inlining)
1930 {
1931 ipa_register_cgraph_hooks ();
1932 ipa_check_create_node_params ();
1933 ipa_check_create_edge_args ();
1934 }
1935
1936 for (node = cgraph_nodes; node; node = node->next)
1937 if (node->analyzed)
1938 analyze_function (node);
1939
1940 return;
1941 }
1942
1943 /* Apply inline plan to function. */
1944 static unsigned int
1945 inline_transform (struct cgraph_node *node)
1946 {
1947 unsigned int todo = 0;
1948 struct cgraph_edge *e;
1949
1950 /* We might need the body of this function so that we can expand
1951 it inline somewhere else. */
1952 if (cgraph_preserve_function_body_p (node->decl))
1953 save_inline_function_body (node);
1954
1955 for (e = node->callees; e; e = e->next_callee)
1956 if (!e->inline_failed || warn_inline)
1957 break;
1958
1959 if (e)
1960 {
1961 timevar_push (TV_INTEGRATION);
1962 todo = optimize_inline_calls (current_function_decl);
1963 timevar_pop (TV_INTEGRATION);
1964 }
1965 cfun->always_inline_functions_inlined = true;
1966 cfun->after_inlining = true;
1967 return todo | execute_fixup_cfg ();
1968 }
1969
1970 struct ipa_opt_pass_d pass_ipa_inline =
1971 {
1972 {
1973 IPA_PASS,
1974 "inline", /* name */
1975 NULL, /* gate */
1976 cgraph_decide_inlining, /* execute */
1977 NULL, /* sub */
1978 NULL, /* next */
1979 0, /* static_pass_number */
1980 TV_INLINE_HEURISTICS, /* tv_id */
1981 0, /* properties_required */
1982 0, /* properties_provided */
1983 0, /* properties_destroyed */
1984 TODO_remove_functions, /* todo_flags_finish */
1985 TODO_dump_cgraph | TODO_dump_func
1986 | TODO_remove_functions /* todo_flags_finish */
1987 },
1988 inline_generate_summary, /* generate_summary */
1989 NULL, /* write_summary */
1990 NULL, /* read_summary */
1991 NULL, /* function_read_summary */
1992 0, /* TODOs */
1993 inline_transform, /* function_transform */
1994 NULL, /* variable_transform */
1995 };
1996
1997
1998 #include "gt-ipa-inline.h"