* jvspec.c (jvgenmain_spec): Don't handle -fnew-verifier.
[gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Inlining decision heuristics
23
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
28
29 There are three major parts of this file:
30
31 cgraph_mark_inline implementation
32
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
36
37 inlining heuristics limits
38
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
42
43 inlining heuristics
44
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
47
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
53
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
57
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
61
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used by early inliner.
65
66 The inliner itself is split into several passes:
67
68 pass_inline_parameters
69
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
73
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
78
79 pass_early_inlining
80
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
85
86 The pass is run during conversion into SSA form. Only functions already
87 converted into SSA form are inlined, so the conversion must happen in
88 topological order on the callgraph (that is maintained by pass manager).
89 The functions after inlining are early optimized so the early inliner sees
90 unoptimized function itself, but all considered callees are already
91 optimized allowing it to unfold abstraction penalty on C++ effectively and
92 cheaply.
93
94 pass_ipa_early_inlining
95
96 With profiling, the early inlining is also necessary to reduce
97 instrumentation costs on program with high abstraction penalty (doing
98 many redundant calls). This can't happen in parallel with early
99 optimization and profile instrumentation, because we would end up
100 re-instrumenting already instrumented function bodies we brought in via
101 inlining.
102
103 To avoid this, this pass is executed as IPA pass before profiling. It is
104 simple wrapper to pass_early_inlining and ensures first inlining.
105
106 pass_ipa_inline
107
108 This is the main pass implementing simple greedy algorithm to do inlining
109 of small functions that results in overall growth of compilation unit and
110 inlining of functions called once. The pass compute just so called inline
111 plan (representation of inlining to be done in callgraph) and unlike early
112 inlining it is not performing the inlining itself.
113
114 pass_apply_inline
115
116 This pass performs actual inlining according to pass_ipa_inline on given
117 function. Possible the function body before inlining is saved when it is
118 needed for further inlining later.
119 */
120
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "tm.h"
125 #include "tree.h"
126 #include "tree-inline.h"
127 #include "langhooks.h"
128 #include "flags.h"
129 #include "cgraph.h"
130 #include "diagnostic.h"
131 #include "gimple-pretty-print.h"
132 #include "timevar.h"
133 #include "params.h"
134 #include "fibheap.h"
135 #include "intl.h"
136 #include "tree-pass.h"
137 #include "hashtab.h"
138 #include "coverage.h"
139 #include "ggc.h"
140 #include "tree-flow.h"
141 #include "rtl.h"
142 #include "ipa-prop.h"
143 #include "except.h"
144
145 #define MAX_TIME 1000000000
146
147 /* Mode incremental inliner operate on:
148
149 In ALWAYS_INLINE only functions marked
150 always_inline are inlined. This mode is used after detecting cycle during
151 flattening.
152
153 In SIZE mode, only functions that reduce function body size after inlining
154 are inlined, this is used during early inlining.
155
156 in ALL mode, everything is inlined. This is used during flattening. */
157 enum inlining_mode {
158 INLINE_NONE = 0,
159 INLINE_ALWAYS_INLINE,
160 INLINE_SIZE_NORECURSIVE,
161 INLINE_SIZE,
162 INLINE_ALL
163 };
164
165 static bool
166 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode);
167 static void cgraph_flatten (struct cgraph_node *node);
168
169
170 /* Statistics we collect about inlining algorithm. */
171 static int ncalls_inlined;
172 static int nfunctions_inlined;
173 static int overall_size;
174 static gcov_type max_count, max_benefit;
175
176 /* Holders of ipa cgraph hooks: */
177 static struct cgraph_node_hook_list *function_insertion_hook_holder;
178
179 static inline struct inline_summary *
180 inline_summary (struct cgraph_node *node)
181 {
182 return &node->local.inline_summary;
183 }
184
185 /* Estimate self time of the function after inlining WHAT into TO. */
186
187 static int
188 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
189 struct cgraph_node *what)
190 {
191 gcov_type time = (((gcov_type)what->global.time
192 - inline_summary (what)->time_inlining_benefit)
193 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
194 + to->global.time;
195 if (time < 0)
196 time = 0;
197 if (time > MAX_TIME)
198 time = MAX_TIME;
199 return time;
200 }
201
202 /* Estimate self time of the function after inlining WHAT into TO. */
203
204 static inline int
205 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
206 struct cgraph_node *what)
207 {
208 int size = ((what->global.size - inline_summary (what)->size_inlining_benefit)
209 * times + to->global.size);
210 gcc_assert (size >= 0);
211 return size;
212 }
213
214 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
215 by NEST. */
216
217 static void
218 update_noncloned_frequencies (struct cgraph_node *node,
219 int freq_scale, int nest)
220 {
221 struct cgraph_edge *e;
222
223 /* We do not want to ignore high loop nest after freq drops to 0. */
224 if (!freq_scale)
225 freq_scale = 1;
226 for (e = node->callees; e; e = e->next_callee)
227 {
228 e->loop_nest += nest;
229 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
230 if (e->frequency > CGRAPH_FREQ_MAX)
231 e->frequency = CGRAPH_FREQ_MAX;
232 if (!e->inline_failed)
233 update_noncloned_frequencies (e->callee, freq_scale, nest);
234 }
235 }
236
237 /* E is expected to be an edge being inlined. Clone destination node of
238 the edge and redirect it to the new clone.
239 DUPLICATE is used for bookkeeping on whether we are actually creating new
240 clones or re-using node originally representing out-of-line function call.
241 */
242 void
243 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
244 bool update_original)
245 {
246 HOST_WIDE_INT peak;
247
248 if (duplicate)
249 {
250 /* We may eliminate the need for out-of-line copy to be output.
251 In that case just go ahead and re-use it. */
252 if (!e->callee->callers->next_caller
253 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
254 /* Don't reuse if more than one function shares a comdat group.
255 If the other function(s) are needed, we need to emit even
256 this function out of line. */
257 && !e->callee->same_comdat_group
258 && !cgraph_new_nodes)
259 {
260 gcc_assert (!e->callee->global.inlined_to);
261 if (e->callee->analyzed)
262 {
263 overall_size -= e->callee->global.size;
264 nfunctions_inlined++;
265 }
266 duplicate = false;
267 e->callee->local.externally_visible = false;
268 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
269 }
270 else
271 {
272 struct cgraph_node *n;
273 n = cgraph_clone_node (e->callee, e->callee->decl,
274 e->count, e->frequency, e->loop_nest,
275 update_original, NULL);
276 cgraph_redirect_edge_callee (e, n);
277 }
278 }
279
280 if (e->caller->global.inlined_to)
281 e->callee->global.inlined_to = e->caller->global.inlined_to;
282 else
283 e->callee->global.inlined_to = e->caller;
284 e->callee->global.stack_frame_offset
285 = e->caller->global.stack_frame_offset
286 + inline_summary (e->caller)->estimated_self_stack_size;
287 peak = e->callee->global.stack_frame_offset
288 + inline_summary (e->callee)->estimated_self_stack_size;
289 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
290 e->callee->global.inlined_to->global.estimated_stack_size = peak;
291 cgraph_propagate_frequency (e->callee);
292
293 /* Recursively clone all bodies. */
294 for (e = e->callee->callees; e; e = e->next_callee)
295 if (!e->inline_failed)
296 cgraph_clone_inlined_nodes (e, duplicate, update_original);
297 }
298
299 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
300 specify whether profile of original function should be updated. If any new
301 indirect edges are discovered in the process, add them to NEW_EDGES, unless
302 it is NULL. Return true iff any new callgraph edges were discovered as a
303 result of inlining. */
304
305 static bool
306 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
307 VEC (cgraph_edge_p, heap) **new_edges)
308 {
309 int old_size = 0, new_size = 0;
310 struct cgraph_node *to = NULL, *what;
311 struct cgraph_edge *curr = e;
312 int freq;
313
314 gcc_assert (e->inline_failed);
315 e->inline_failed = CIF_OK;
316 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
317
318 cgraph_clone_inlined_nodes (e, true, update_original);
319
320 what = e->callee;
321
322 freq = e->frequency;
323 /* Now update size of caller and all functions caller is inlined into. */
324 for (;e && !e->inline_failed; e = e->caller->callers)
325 {
326 to = e->caller;
327 old_size = e->caller->global.size;
328 new_size = cgraph_estimate_size_after_inlining (1, to, what);
329 to->global.size = new_size;
330 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
331 }
332 gcc_assert (what->global.inlined_to == to);
333 if (new_size > old_size)
334 overall_size += new_size - old_size;
335 ncalls_inlined++;
336
337 /* FIXME: We should remove the optimize check after we ensure we never run
338 IPA passes when not optimizng. */
339 if (flag_indirect_inlining && optimize)
340 return ipa_propagate_indirect_call_infos (curr, new_edges);
341 else
342 return false;
343 }
344
345 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER. */
346
347 static void
348 cgraph_mark_inline (struct cgraph_edge *edge)
349 {
350 struct cgraph_node *to = edge->caller;
351 struct cgraph_node *what = edge->callee;
352 struct cgraph_edge *e, *next;
353
354 gcc_assert (!edge->call_stmt_cannot_inline_p);
355 /* Look for all calls, mark them inline and clone recursively
356 all inlined functions. */
357 for (e = what->callers; e; e = next)
358 {
359 next = e->next_caller;
360 if (e->caller == to && e->inline_failed)
361 {
362 cgraph_mark_inline_edge (e, true, NULL);
363 if (e == edge)
364 edge = next;
365 }
366 }
367 }
368
369 /* Estimate the growth caused by inlining NODE into all callees. */
370
371 static int
372 cgraph_estimate_growth (struct cgraph_node *node)
373 {
374 int growth = 0;
375 struct cgraph_edge *e;
376 bool self_recursive = false;
377
378 if (node->global.estimated_growth != INT_MIN)
379 return node->global.estimated_growth;
380
381 for (e = node->callers; e; e = e->next_caller)
382 {
383 if (e->caller == node)
384 self_recursive = true;
385 if (e->inline_failed)
386 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
387 - e->caller->global.size);
388 }
389
390 /* ??? Wrong for non-trivially self recursive functions or cases where
391 we decide to not inline for different reasons, but it is not big deal
392 as in that case we will keep the body around, but we will also avoid
393 some inlining. */
394 if (cgraph_will_be_removed_from_program_if_no_direct_calls (node)
395 && !DECL_EXTERNAL (node->decl) && !self_recursive)
396 growth -= node->global.size;
397
398 node->global.estimated_growth = growth;
399 return growth;
400 }
401
402 /* Return false when inlining WHAT into TO is not good idea
403 as it would cause too large growth of function bodies.
404 When ONE_ONLY is true, assume that only one call site is going
405 to be inlined, otherwise figure out how many call sites in
406 TO calls WHAT and verify that all can be inlined.
407 */
408
409 static bool
410 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
411 cgraph_inline_failed_t *reason, bool one_only)
412 {
413 int times = 0;
414 struct cgraph_edge *e;
415 int newsize;
416 int limit;
417 HOST_WIDE_INT stack_size_limit, inlined_stack;
418
419 if (one_only)
420 times = 1;
421 else
422 for (e = to->callees; e; e = e->next_callee)
423 if (e->callee == what)
424 times++;
425
426 if (to->global.inlined_to)
427 to = to->global.inlined_to;
428
429 /* When inlining large function body called once into small function,
430 take the inlined function as base for limiting the growth. */
431 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
432 limit = inline_summary (to)->self_size;
433 else
434 limit = inline_summary (what)->self_size;
435
436 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
437
438 /* Check the size after inlining against the function limits. But allow
439 the function to shrink if it went over the limits by forced inlining. */
440 newsize = cgraph_estimate_size_after_inlining (times, to, what);
441 if (newsize >= to->global.size
442 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
443 && newsize > limit)
444 {
445 if (reason)
446 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
447 return false;
448 }
449
450 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
451
452 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
453
454 inlined_stack = (to->global.stack_frame_offset
455 + inline_summary (to)->estimated_self_stack_size
456 + what->global.estimated_stack_size);
457 if (inlined_stack > stack_size_limit
458 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
459 {
460 if (reason)
461 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
462 return false;
463 }
464 return true;
465 }
466
467 /* Return true when function N is small enough to be inlined. */
468
469 static bool
470 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
471 {
472 tree decl = n->decl;
473
474 if (n->local.disregard_inline_limits)
475 return true;
476
477 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
478 {
479 if (reason)
480 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
481 return false;
482 }
483
484 if (!n->analyzed)
485 {
486 if (reason)
487 *reason = CIF_BODY_NOT_AVAILABLE;
488 return false;
489 }
490
491 if (DECL_DECLARED_INLINE_P (decl))
492 {
493 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
494 {
495 if (reason)
496 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
497 return false;
498 }
499 }
500 else
501 {
502 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
503 {
504 if (reason)
505 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
506 return false;
507 }
508 }
509
510 return true;
511 }
512
513 /* Return true when inlining WHAT would create recursive inlining.
514 We call recursive inlining all cases where same function appears more than
515 once in the single recursion nest path in the inline graph. */
516
517 static inline bool
518 cgraph_recursive_inlining_p (struct cgraph_node *to,
519 struct cgraph_node *what,
520 cgraph_inline_failed_t *reason)
521 {
522 bool recursive;
523 if (to->global.inlined_to)
524 recursive = what->decl == to->global.inlined_to->decl;
525 else
526 recursive = what->decl == to->decl;
527 /* Marking recursive function inline has sane semantic and thus we should
528 not warn on it. */
529 if (recursive && reason)
530 *reason = (what->local.disregard_inline_limits
531 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
532 return recursive;
533 }
534
535 /* A cost model driving the inlining heuristics in a way so the edges with
536 smallest badness are inlined first. After each inlining is performed
537 the costs of all caller edges of nodes affected are recomputed so the
538 metrics may accurately depend on values such as number of inlinable callers
539 of the function or function body size. */
540
541 static int
542 cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
543 {
544 gcov_type badness;
545 int growth =
546 (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
547 - edge->caller->global.size);
548
549 if (edge->callee->local.disregard_inline_limits)
550 return INT_MIN;
551
552 if (dump)
553 {
554 fprintf (dump_file, " Badness calculcation for %s -> %s\n",
555 cgraph_node_name (edge->caller),
556 cgraph_node_name (edge->callee));
557 fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
558 growth,
559 edge->callee->global.time,
560 inline_summary (edge->callee)->time_inlining_benefit,
561 edge->callee->global.size,
562 inline_summary (edge->callee)->size_inlining_benefit);
563 }
564
565 /* Always prefer inlining saving code size. */
566 if (growth <= 0)
567 {
568 badness = INT_MIN - growth;
569 if (dump)
570 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
571 growth);
572 }
573
574 /* When profiling is available, base priorities -(#calls / growth).
575 So we optimize for overall number of "executed" inlined calls. */
576 else if (max_count)
577 {
578 badness =
579 ((int)
580 ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
581 (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
582 if (dump)
583 {
584 fprintf (dump_file,
585 " %i (relative %f): profile info. Relative count %f"
586 " * Relative benefit %f\n",
587 (int) badness, (double) badness / INT_MIN,
588 (double) edge->count / max_count,
589 (double) (inline_summary (edge->callee)->
590 time_inlining_benefit + 1) / (max_benefit + 1));
591 }
592 }
593
594 /* When function local profile is available, base priorities on
595 growth / frequency, so we optimize for overall frequency of inlined
596 calls. This is not too accurate since while the call might be frequent
597 within function, the function itself is infrequent.
598
599 Other objective to optimize for is number of different calls inlined.
600 We add the estimated growth after inlining all functions to bias the
601 priorities slightly in this direction (so fewer times called functions
602 of the same size gets priority). */
603 else if (flag_guess_branch_prob)
604 {
605 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
606 int benefitperc;
607 int growth_for_all;
608 badness = growth * 10000;
609 benefitperc =
610 MIN (100 * inline_summary (edge->callee)->time_inlining_benefit /
611 (edge->callee->global.time + 1) +1, 100);
612 div *= benefitperc;
613
614
615 /* Decrease badness if call is nested. */
616 /* Compress the range so we don't overflow. */
617 if (div > 10000)
618 div = 10000 + ceil_log2 (div) - 8;
619 if (div < 1)
620 div = 1;
621 if (badness > 0)
622 badness /= div;
623 growth_for_all = cgraph_estimate_growth (edge->callee);
624 badness += growth_for_all;
625 if (badness > INT_MAX)
626 badness = INT_MAX;
627 if (dump)
628 {
629 fprintf (dump_file,
630 " %i: guessed profile. frequency %i, overall growth %i,"
631 " benefit %i%%, divisor %i\n",
632 (int) badness, edge->frequency, growth_for_all, benefitperc, div);
633 }
634 }
635 /* When function local profile is not available or it does not give
636 useful information (ie frequency is zero), base the cost on
637 loop nest and overall size growth, so we optimize for overall number
638 of functions fully inlined in program. */
639 else
640 {
641 int nest = MIN (edge->loop_nest, 8);
642 badness = cgraph_estimate_growth (edge->callee) * 256;
643
644 /* Decrease badness if call is nested. */
645 if (badness > 0)
646 badness >>= nest;
647 else
648 {
649 badness <<= nest;
650 }
651 if (dump)
652 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
653 nest);
654 }
655
656 /* Ensure that we did not overflow in all the fixed point math above. */
657 gcc_assert (badness >= INT_MIN);
658 gcc_assert (badness <= INT_MAX - 1);
659 /* Make recursive inlining happen always after other inlining is done. */
660 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
661 return badness + 1;
662 else
663 return badness;
664 }
665
666 /* Recompute badness of EDGE and update its key in HEAP if needed. */
667 static void
668 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
669 {
670 int badness = cgraph_edge_badness (edge, false);
671 if (edge->aux)
672 {
673 fibnode_t n = (fibnode_t) edge->aux;
674 gcc_checking_assert (n->data == edge);
675
676 /* fibheap_replace_key only decrease the keys.
677 When we increase the key we do not update heap
678 and instead re-insert the element once it becomes
679 a minium of heap. */
680 if (badness < n->key)
681 {
682 fibheap_replace_key (heap, n, badness);
683 gcc_checking_assert (n->key == badness);
684 }
685 }
686 else
687 edge->aux = fibheap_insert (heap, badness, edge);
688 }
689
690 /* Recompute heap nodes for each of caller edge. */
691
692 static void
693 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
694 bitmap updated_nodes)
695 {
696 struct cgraph_edge *edge;
697 cgraph_inline_failed_t failed_reason;
698
699 if (!node->local.inlinable
700 || node->global.inlined_to)
701 return;
702 if (!bitmap_set_bit (updated_nodes, node->uid))
703 return;
704 node->global.estimated_growth = INT_MIN;
705
706 /* See if there is something to do. */
707 for (edge = node->callers; edge; edge = edge->next_caller)
708 if (edge->inline_failed)
709 break;
710 if (!edge)
711 return;
712 /* Prune out edges we won't inline into anymore. */
713 if (!cgraph_default_inline_p (node, &failed_reason))
714 {
715 for (; edge; edge = edge->next_caller)
716 if (edge->aux)
717 {
718 fibheap_delete_node (heap, (fibnode_t) edge->aux);
719 edge->aux = NULL;
720 if (edge->inline_failed)
721 edge->inline_failed = failed_reason;
722 }
723 return;
724 }
725
726 for (; edge; edge = edge->next_caller)
727 if (edge->inline_failed)
728 update_edge_key (heap, edge);
729 }
730
731 /* Recompute heap nodes for each uninlined call.
732 This is used when we know that edge badnesses are going only to increase
733 (we introduced new call site) and thus all we need is to insert newly
734 created edges into heap. */
735
736 static void
737 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
738 bitmap updated_nodes)
739 {
740 struct cgraph_edge *e = node->callees;
741 node->global.estimated_growth = INT_MIN;
742
743 if (!e)
744 return;
745 while (true)
746 if (!e->inline_failed && e->callee->callees)
747 e = e->callee->callees;
748 else
749 {
750 if (e->inline_failed
751 && e->callee->local.inlinable
752 && !bitmap_bit_p (updated_nodes, e->callee->uid))
753 {
754 node->global.estimated_growth = INT_MIN;
755 /* If function becomes uninlinable, we need to remove it from the heap. */
756 if (!cgraph_default_inline_p (e->callee, &e->inline_failed))
757 update_caller_keys (heap, e->callee, updated_nodes);
758 else
759 /* Otherwise update just edge E. */
760 update_edge_key (heap, e);
761 }
762 if (e->next_callee)
763 e = e->next_callee;
764 else
765 {
766 do
767 {
768 if (e->caller == node)
769 return;
770 e = e->caller->callers;
771 }
772 while (!e->next_callee);
773 e = e->next_callee;
774 }
775 }
776 }
777
778 /* Recompute heap nodes for each of caller edges of each of callees.
779 Walk recursively into all inline clones. */
780
781 static void
782 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
783 bitmap updated_nodes)
784 {
785 struct cgraph_edge *e = node->callees;
786 node->global.estimated_growth = INT_MIN;
787
788 if (!e)
789 return;
790 while (true)
791 if (!e->inline_failed && e->callee->callees)
792 e = e->callee->callees;
793 else
794 {
795 if (e->inline_failed)
796 update_caller_keys (heap, e->callee, updated_nodes);
797 if (e->next_callee)
798 e = e->next_callee;
799 else
800 {
801 do
802 {
803 if (e->caller == node)
804 return;
805 e = e->caller->callers;
806 }
807 while (!e->next_callee);
808 e = e->next_callee;
809 }
810 }
811 }
812
813 /* Enqueue all recursive calls from NODE into priority queue depending on
814 how likely we want to recursively inline the call. */
815
816 static void
817 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
818 fibheap_t heap)
819 {
820 static int priority;
821 struct cgraph_edge *e;
822 for (e = where->callees; e; e = e->next_callee)
823 if (e->callee == node)
824 {
825 /* When profile feedback is available, prioritize by expected number
826 of calls. Without profile feedback we maintain simple queue
827 to order candidates via recursive depths. */
828 fibheap_insert (heap,
829 !max_count ? priority++
830 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
831 e);
832 }
833 for (e = where->callees; e; e = e->next_callee)
834 if (!e->inline_failed)
835 lookup_recursive_calls (node, e->callee, heap);
836 }
837
838 /* Decide on recursive inlining: in the case function has recursive calls,
839 inline until body size reaches given argument. If any new indirect edges
840 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
841 is NULL. */
842
843 static bool
844 cgraph_decide_recursive_inlining (struct cgraph_node *node,
845 VEC (cgraph_edge_p, heap) **new_edges)
846 {
847 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
848 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
849 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
850 fibheap_t heap;
851 struct cgraph_edge *e;
852 struct cgraph_node *master_clone, *next;
853 int depth = 0;
854 int n = 0;
855
856 /* It does not make sense to recursively inline always-inline functions
857 as we are going to sorry() on the remaining calls anyway. */
858 if (node->local.disregard_inline_limits
859 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl)))
860 return false;
861
862 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
863 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
864 return false;
865
866 if (DECL_DECLARED_INLINE_P (node->decl))
867 {
868 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
869 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
870 }
871
872 /* Make sure that function is small enough to be considered for inlining. */
873 if (!max_depth
874 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
875 return false;
876 heap = fibheap_new ();
877 lookup_recursive_calls (node, node, heap);
878 if (fibheap_empty (heap))
879 {
880 fibheap_delete (heap);
881 return false;
882 }
883
884 if (dump_file)
885 fprintf (dump_file,
886 " Performing recursive inlining on %s\n",
887 cgraph_node_name (node));
888
889 /* We need original clone to copy around. */
890 master_clone = cgraph_clone_node (node, node->decl,
891 node->count, CGRAPH_FREQ_BASE, 1,
892 false, NULL);
893 master_clone->needed = true;
894 for (e = master_clone->callees; e; e = e->next_callee)
895 if (!e->inline_failed)
896 cgraph_clone_inlined_nodes (e, true, false);
897
898 /* Do the inlining and update list of recursive call during process. */
899 while (!fibheap_empty (heap)
900 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
901 <= limit))
902 {
903 struct cgraph_edge *curr
904 = (struct cgraph_edge *) fibheap_extract_min (heap);
905 struct cgraph_node *cnode;
906
907 depth = 1;
908 for (cnode = curr->caller;
909 cnode->global.inlined_to; cnode = cnode->callers->caller)
910 if (node->decl == curr->callee->decl)
911 depth++;
912 if (depth > max_depth)
913 {
914 if (dump_file)
915 fprintf (dump_file,
916 " maximal depth reached\n");
917 continue;
918 }
919
920 if (max_count)
921 {
922 if (!cgraph_maybe_hot_edge_p (curr))
923 {
924 if (dump_file)
925 fprintf (dump_file, " Not inlining cold call\n");
926 continue;
927 }
928 if (curr->count * 100 / node->count < probability)
929 {
930 if (dump_file)
931 fprintf (dump_file,
932 " Probability of edge is too small\n");
933 continue;
934 }
935 }
936
937 if (dump_file)
938 {
939 fprintf (dump_file,
940 " Inlining call of depth %i", depth);
941 if (node->count)
942 {
943 fprintf (dump_file, " called approx. %.2f times per call",
944 (double)curr->count / node->count);
945 }
946 fprintf (dump_file, "\n");
947 }
948 cgraph_redirect_edge_callee (curr, master_clone);
949 cgraph_mark_inline_edge (curr, false, new_edges);
950 lookup_recursive_calls (node, curr->callee, heap);
951 n++;
952 }
953 if (!fibheap_empty (heap) && dump_file)
954 fprintf (dump_file, " Recursive inlining growth limit met.\n");
955
956 fibheap_delete (heap);
957 if (dump_file)
958 fprintf (dump_file,
959 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
960 master_clone->global.size, node->global.size,
961 master_clone->global.time, node->global.time);
962
963 /* Remove master clone we used for inlining. We rely that clones inlined
964 into master clone gets queued just before master clone so we don't
965 need recursion. */
966 for (node = cgraph_nodes; node != master_clone;
967 node = next)
968 {
969 next = node->next;
970 if (node->global.inlined_to == master_clone)
971 cgraph_remove_node (node);
972 }
973 cgraph_remove_node (master_clone);
974 /* FIXME: Recursive inlining actually reduces number of calls of the
975 function. At this place we should probably walk the function and
976 inline clones and compensate the counts accordingly. This probably
977 doesn't matter much in practice. */
978 return n > 0;
979 }
980
981 /* Set inline_failed for all callers of given function to REASON. */
982
983 static void
984 cgraph_set_inline_failed (struct cgraph_node *node,
985 cgraph_inline_failed_t reason)
986 {
987 struct cgraph_edge *e;
988
989 if (dump_file)
990 fprintf (dump_file, "Inlining failed: %s\n",
991 cgraph_inline_failed_string (reason));
992 for (e = node->callers; e; e = e->next_caller)
993 if (e->inline_failed)
994 e->inline_failed = reason;
995 }
996
997 /* Given whole compilation unit estimate of INSNS, compute how large we can
998 allow the unit to grow. */
999 static int
1000 compute_max_insns (int insns)
1001 {
1002 int max_insns = insns;
1003 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1004 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1005
1006 return ((HOST_WIDEST_INT) max_insns
1007 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1008 }
1009
1010 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1011 static void
1012 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
1013 {
1014 while (VEC_length (cgraph_edge_p, new_edges) > 0)
1015 {
1016 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
1017
1018 gcc_assert (!edge->aux);
1019 if (edge->callee->local.inlinable
1020 && cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1021 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1022 }
1023 }
1024
1025
1026 /* We use greedy algorithm for inlining of small functions:
1027 All inline candidates are put into prioritized heap based on estimated
1028 growth of the overall number of instructions and then update the estimates.
1029
1030 INLINED and INLINED_CALEES are just pointers to arrays large enough
1031 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
1032
1033 static void
1034 cgraph_decide_inlining_of_small_functions (void)
1035 {
1036 struct cgraph_node *node;
1037 struct cgraph_edge *edge;
1038 cgraph_inline_failed_t failed_reason;
1039 fibheap_t heap = fibheap_new ();
1040 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1041 int min_size, max_size;
1042 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1043
1044 if (flag_indirect_inlining)
1045 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1046
1047 if (dump_file)
1048 fprintf (dump_file, "\nDeciding on smaller functions:\n");
1049
1050 /* Put all inline candidates into the heap. */
1051
1052 for (node = cgraph_nodes; node; node = node->next)
1053 {
1054 if (!node->local.inlinable || !node->callers)
1055 continue;
1056 if (dump_file)
1057 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
1058
1059 node->global.estimated_growth = INT_MIN;
1060 if (!cgraph_default_inline_p (node, &failed_reason))
1061 {
1062 cgraph_set_inline_failed (node, failed_reason);
1063 continue;
1064 }
1065
1066 for (edge = node->callers; edge; edge = edge->next_caller)
1067 if (edge->inline_failed)
1068 {
1069 gcc_assert (!edge->aux);
1070 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1071 }
1072 }
1073
1074 max_size = compute_max_insns (overall_size);
1075 min_size = overall_size;
1076
1077 while (overall_size <= max_size
1078 && !fibheap_empty (heap))
1079 {
1080 int old_size = overall_size;
1081 struct cgraph_node *where, *callee;
1082 int badness = fibheap_min_key (heap);
1083 int current_badness;
1084 int growth;
1085 cgraph_inline_failed_t not_good = CIF_OK;
1086
1087 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1088 gcc_assert (edge->aux);
1089 edge->aux = NULL;
1090 if (!edge->inline_failed)
1091 continue;
1092
1093 /* When updating the edge costs, we only decrease badness in the keys.
1094 When the badness increase, we keep the heap as it is and re-insert
1095 key now. */
1096 current_badness = cgraph_edge_badness (edge, false);
1097 gcc_assert (current_badness >= badness);
1098 if (current_badness != badness)
1099 {
1100 edge->aux = fibheap_insert (heap, current_badness, edge);
1101 continue;
1102 }
1103
1104 callee = edge->callee;
1105
1106 growth = (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
1107 - edge->caller->global.size);
1108
1109 if (dump_file)
1110 {
1111 fprintf (dump_file,
1112 "\nConsidering %s with %i size\n",
1113 cgraph_node_name (edge->callee),
1114 edge->callee->global.size);
1115 fprintf (dump_file,
1116 " to be inlined into %s in %s:%i\n"
1117 " Estimated growth after inlined into all callees is %+i insns.\n"
1118 " Estimated badness is %i, frequency %.2f.\n",
1119 cgraph_node_name (edge->caller),
1120 flag_wpa ? "unknown"
1121 : gimple_filename ((const_gimple) edge->call_stmt),
1122 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1123 cgraph_estimate_growth (edge->callee),
1124 badness,
1125 edge->frequency / (double)CGRAPH_FREQ_BASE);
1126 if (edge->count)
1127 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1128 if (dump_flags & TDF_DETAILS)
1129 cgraph_edge_badness (edge, true);
1130 }
1131
1132 /* When not having profile info ready we don't weight by any way the
1133 position of call in procedure itself. This means if call of
1134 function A from function B seems profitable to inline, the recursive
1135 call of function A in inline copy of A in B will look profitable too
1136 and we end up inlining until reaching maximal function growth. This
1137 is not good idea so prohibit the recursive inlining.
1138
1139 ??? When the frequencies are taken into account we might not need this
1140 restriction.
1141
1142 We need to be cureful here, in some testcases, e.g. directivec.c in
1143 libcpp, we can estimate self recursive function to have negative growth
1144 for inlining completely.
1145 */
1146 if (!edge->count)
1147 {
1148 where = edge->caller;
1149 while (where->global.inlined_to)
1150 {
1151 if (where->decl == edge->callee->decl)
1152 break;
1153 where = where->callers->caller;
1154 }
1155 if (where->global.inlined_to)
1156 {
1157 edge->inline_failed
1158 = (edge->callee->local.disregard_inline_limits
1159 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1160 if (dump_file)
1161 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1162 continue;
1163 }
1164 }
1165
1166 if (edge->callee->local.disregard_inline_limits)
1167 ;
1168 else if (!cgraph_maybe_hot_edge_p (edge))
1169 not_good = CIF_UNLIKELY_CALL;
1170 else if (!flag_inline_functions
1171 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1172 not_good = CIF_NOT_DECLARED_INLINED;
1173 else if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1174 not_good = CIF_OPTIMIZING_FOR_SIZE;
1175 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
1176 {
1177 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1178 &edge->inline_failed))
1179 {
1180 edge->inline_failed = not_good;
1181 if (dump_file)
1182 fprintf (dump_file, " inline_failed:%s.\n",
1183 cgraph_inline_failed_string (edge->inline_failed));
1184 }
1185 continue;
1186 }
1187 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1188 {
1189 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1190 &edge->inline_failed))
1191 {
1192 if (dump_file)
1193 fprintf (dump_file, " inline_failed:%s.\n",
1194 cgraph_inline_failed_string (edge->inline_failed));
1195 }
1196 continue;
1197 }
1198 if (!tree_can_inline_p (edge))
1199 {
1200 if (dump_file)
1201 fprintf (dump_file, " inline_failed:%s.\n",
1202 cgraph_inline_failed_string (edge->inline_failed));
1203 continue;
1204 }
1205 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1206 &edge->inline_failed))
1207 {
1208 where = edge->caller;
1209 if (where->global.inlined_to)
1210 where = where->global.inlined_to;
1211 if (!cgraph_decide_recursive_inlining (where,
1212 flag_indirect_inlining
1213 ? &new_indirect_edges : NULL))
1214 continue;
1215 if (flag_indirect_inlining)
1216 add_new_edges_to_heap (heap, new_indirect_edges);
1217 update_all_callee_keys (heap, where, updated_nodes);
1218 }
1219 else
1220 {
1221 struct cgraph_node *callee;
1222 if (edge->call_stmt_cannot_inline_p
1223 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1224 &edge->inline_failed, true))
1225 {
1226 if (dump_file)
1227 fprintf (dump_file, " Not inlining into %s:%s.\n",
1228 cgraph_node_name (edge->caller),
1229 cgraph_inline_failed_string (edge->inline_failed));
1230 continue;
1231 }
1232 callee = edge->callee;
1233 gcc_checking_assert (!callee->global.inlined_to);
1234 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1235 if (flag_indirect_inlining)
1236 add_new_edges_to_heap (heap, new_indirect_edges);
1237
1238 /* We inlined last offline copy to the body. This might lead
1239 to callees of function having fewer call sites and thus they
1240 may need updating. */
1241 if (callee->global.inlined_to)
1242 update_all_callee_keys (heap, callee, updated_nodes);
1243 else
1244 update_callee_keys (heap, edge->callee, updated_nodes);
1245 }
1246 where = edge->caller;
1247 if (where->global.inlined_to)
1248 where = where->global.inlined_to;
1249
1250 /* Our profitability metric can depend on local properties
1251 such as number of inlinable calls and size of the function body.
1252 After inlining these properties might change for the function we
1253 inlined into (since it's body size changed) and for the functions
1254 called by function we inlined (since number of it inlinable callers
1255 might change). */
1256 update_caller_keys (heap, where, updated_nodes);
1257
1258 /* We removed one call of the function we just inlined. If offline
1259 copy is still needed, be sure to update the keys. */
1260 if (callee != where && !callee->global.inlined_to)
1261 update_caller_keys (heap, callee, updated_nodes);
1262 bitmap_clear (updated_nodes);
1263
1264 if (dump_file)
1265 {
1266 fprintf (dump_file,
1267 " Inlined into %s which now has size %i and self time %i,"
1268 "net change of %+i.\n",
1269 cgraph_node_name (edge->caller),
1270 edge->caller->global.time,
1271 edge->caller->global.size,
1272 overall_size - old_size);
1273 }
1274 if (min_size > overall_size)
1275 {
1276 min_size = overall_size;
1277 max_size = compute_max_insns (min_size);
1278
1279 if (dump_file)
1280 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1281 }
1282 }
1283 while (!fibheap_empty (heap))
1284 {
1285 int badness = fibheap_min_key (heap);
1286
1287 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1288 gcc_assert (edge->aux);
1289 edge->aux = NULL;
1290 if (!edge->inline_failed)
1291 continue;
1292 #ifdef ENABLE_CHECKING
1293 gcc_assert (cgraph_edge_badness (edge, false) >= badness);
1294 #endif
1295 if (dump_file)
1296 {
1297 fprintf (dump_file,
1298 "\nSkipping %s with %i size\n",
1299 cgraph_node_name (edge->callee),
1300 edge->callee->global.size);
1301 fprintf (dump_file,
1302 " called by %s in %s:%i\n"
1303 " Estimated growth after inlined into all callees is %+i insns.\n"
1304 " Estimated badness is %i, frequency %.2f.\n",
1305 cgraph_node_name (edge->caller),
1306 flag_wpa ? "unknown"
1307 : gimple_filename ((const_gimple) edge->call_stmt),
1308 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1309 cgraph_estimate_growth (edge->callee),
1310 badness,
1311 edge->frequency / (double)CGRAPH_FREQ_BASE);
1312 if (edge->count)
1313 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1314 if (dump_flags & TDF_DETAILS)
1315 cgraph_edge_badness (edge, true);
1316 }
1317 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1318 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1319 &edge->inline_failed))
1320 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1321 }
1322
1323 if (new_indirect_edges)
1324 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1325 fibheap_delete (heap);
1326 BITMAP_FREE (updated_nodes);
1327 }
1328
1329 /* Flatten NODE from the IPA inliner. */
1330
1331 static void
1332 cgraph_flatten (struct cgraph_node *node)
1333 {
1334 struct cgraph_edge *e;
1335
1336 /* We shouldn't be called recursively when we are being processed. */
1337 gcc_assert (node->aux == NULL);
1338
1339 node->aux = (void *)(size_t) INLINE_ALL;
1340
1341 for (e = node->callees; e; e = e->next_callee)
1342 {
1343 struct cgraph_node *orig_callee;
1344
1345 if (e->call_stmt_cannot_inline_p)
1346 continue;
1347
1348 if (!e->callee->analyzed)
1349 {
1350 if (dump_file)
1351 fprintf (dump_file,
1352 "Not inlining: Function body not available.\n");
1353 continue;
1354 }
1355
1356 /* We've hit cycle? It is time to give up. */
1357 if (e->callee->aux)
1358 {
1359 if (dump_file)
1360 fprintf (dump_file,
1361 "Not inlining %s into %s to avoid cycle.\n",
1362 cgraph_node_name (e->callee),
1363 cgraph_node_name (e->caller));
1364 e->inline_failed = CIF_RECURSIVE_INLINING;
1365 continue;
1366 }
1367
1368 /* When the edge is already inlined, we just need to recurse into
1369 it in order to fully flatten the leaves. */
1370 if (!e->inline_failed)
1371 {
1372 cgraph_flatten (e->callee);
1373 continue;
1374 }
1375
1376 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1377 {
1378 if (dump_file)
1379 fprintf (dump_file, "Not inlining: recursive call.\n");
1380 continue;
1381 }
1382
1383 if (!tree_can_inline_p (e))
1384 {
1385 if (dump_file)
1386 fprintf (dump_file, "Not inlining: %s",
1387 cgraph_inline_failed_string (e->inline_failed));
1388 continue;
1389 }
1390
1391 /* Inline the edge and flatten the inline clone. Avoid
1392 recursing through the original node if the node was cloned. */
1393 if (dump_file)
1394 fprintf (dump_file, " Inlining %s into %s.\n",
1395 cgraph_node_name (e->callee),
1396 cgraph_node_name (e->caller));
1397 orig_callee = e->callee;
1398 cgraph_mark_inline_edge (e, true, NULL);
1399 if (e->callee != orig_callee)
1400 orig_callee->aux = (void *)(size_t) INLINE_ALL;
1401 cgraph_flatten (e->callee);
1402 if (e->callee != orig_callee)
1403 orig_callee->aux = NULL;
1404 }
1405
1406 node->aux = NULL;
1407 }
1408
1409 /* Decide on the inlining. We do so in the topological order to avoid
1410 expenses on updating data structures. */
1411
1412 static unsigned int
1413 cgraph_decide_inlining (void)
1414 {
1415 struct cgraph_node *node;
1416 int nnodes;
1417 struct cgraph_node **order =
1418 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1419 int old_size = 0;
1420 int i;
1421 int initial_size = 0;
1422
1423 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1424 if (in_lto_p && flag_indirect_inlining)
1425 ipa_update_after_lto_read ();
1426 if (flag_indirect_inlining)
1427 ipa_create_all_structures_for_iinln ();
1428
1429 max_count = 0;
1430 max_benefit = 0;
1431 for (node = cgraph_nodes; node; node = node->next)
1432 if (node->analyzed)
1433 {
1434 struct cgraph_edge *e;
1435
1436 gcc_assert (inline_summary (node)->self_size == node->global.size);
1437 initial_size += node->global.size;
1438 for (e = node->callees; e; e = e->next_callee)
1439 if (max_count < e->count)
1440 max_count = e->count;
1441 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1442 max_benefit = inline_summary (node)->time_inlining_benefit;
1443 }
1444 gcc_assert (in_lto_p
1445 || !max_count
1446 || (profile_info && flag_branch_probabilities));
1447 overall_size = initial_size;
1448
1449 nnodes = cgraph_postorder (order);
1450
1451 if (dump_file)
1452 fprintf (dump_file,
1453 "\nDeciding on inlining. Starting with size %i.\n",
1454 initial_size);
1455
1456 for (node = cgraph_nodes; node; node = node->next)
1457 node->aux = 0;
1458
1459 if (dump_file)
1460 fprintf (dump_file, "\nFlattening functions:\n");
1461
1462 /* In the first pass handle functions to be flattened. Do this with
1463 a priority so none of our later choices will make this impossible. */
1464 for (i = nnodes - 1; i >= 0; i--)
1465 {
1466 node = order[i];
1467
1468 /* Handle nodes to be flattened, but don't update overall unit
1469 size. Calling the incremental inliner here is lame,
1470 a simple worklist should be enough. What should be left
1471 here from the early inliner (if it runs) is cyclic cases.
1472 Ideally when processing callees we stop inlining at the
1473 entry of cycles, possibly cloning that entry point and
1474 try to flatten itself turning it into a self-recursive
1475 function. */
1476 if (lookup_attribute ("flatten",
1477 DECL_ATTRIBUTES (node->decl)) != NULL)
1478 {
1479 if (dump_file)
1480 fprintf (dump_file,
1481 "Flattening %s\n", cgraph_node_name (node));
1482 cgraph_flatten (node);
1483 }
1484 }
1485
1486 cgraph_decide_inlining_of_small_functions ();
1487
1488 if (flag_inline_functions_called_once)
1489 {
1490 if (dump_file)
1491 fprintf (dump_file, "\nDeciding on functions called once:\n");
1492
1493 /* And finally decide what functions are called once. */
1494 for (i = nnodes - 1; i >= 0; i--)
1495 {
1496 node = order[i];
1497
1498 if (node->callers
1499 && !node->callers->next_caller
1500 && cgraph_will_be_removed_from_program_if_no_direct_calls (node)
1501 && node->local.inlinable
1502 && node->callers->inline_failed
1503 && node->callers->caller != node
1504 && node->callers->caller->global.inlined_to != node
1505 && !node->callers->call_stmt_cannot_inline_p
1506 && !DECL_EXTERNAL (node->decl))
1507 {
1508 cgraph_inline_failed_t reason;
1509 old_size = overall_size;
1510 if (dump_file)
1511 {
1512 fprintf (dump_file,
1513 "\nConsidering %s size %i.\n",
1514 cgraph_node_name (node), node->global.size);
1515 fprintf (dump_file,
1516 " Called once from %s %i insns.\n",
1517 cgraph_node_name (node->callers->caller),
1518 node->callers->caller->global.size);
1519 }
1520
1521 if (cgraph_check_inline_limits (node->callers->caller, node,
1522 &reason, false))
1523 {
1524 struct cgraph_node *caller = node->callers->caller;
1525 cgraph_mark_inline (node->callers);
1526 if (dump_file)
1527 fprintf (dump_file,
1528 " Inlined into %s which now has %i size"
1529 " for a net change of %+i size.\n",
1530 cgraph_node_name (caller),
1531 caller->global.size,
1532 overall_size - old_size);
1533 }
1534 else
1535 {
1536 if (dump_file)
1537 fprintf (dump_file,
1538 " Not inlining: %s.\n",
1539 cgraph_inline_failed_string (reason));
1540 }
1541 }
1542 }
1543 }
1544
1545 /* Free ipa-prop structures if they are no longer needed. */
1546 if (flag_indirect_inlining)
1547 ipa_free_all_structures_after_iinln ();
1548
1549 if (dump_file)
1550 fprintf (dump_file,
1551 "\nInlined %i calls, eliminated %i functions, "
1552 "size %i turned to %i size.\n\n",
1553 ncalls_inlined, nfunctions_inlined, initial_size,
1554 overall_size);
1555 free (order);
1556 return 0;
1557 }
1558
1559 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1560 in leaf functions. */
1561 static bool
1562 leaf_node_p (struct cgraph_node *n)
1563 {
1564 struct cgraph_edge *e;
1565 for (e = n->callees; e; e = e->next_callee)
1566 if (!DECL_BUILT_IN (e->callee->decl)
1567 || (!TREE_READONLY (e->callee->decl)
1568 || DECL_PURE_P (e->callee->decl)))
1569 return false;
1570 return true;
1571 }
1572
1573 /* Decide on the inlining. We do so in the topological order to avoid
1574 expenses on updating data structures. */
1575
1576 static bool
1577 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1578 enum inlining_mode mode)
1579 {
1580 struct cgraph_edge *e;
1581 bool inlined = false;
1582 cgraph_inline_failed_t failed_reason;
1583
1584 #ifdef ENABLE_CHECKING
1585 verify_cgraph_node (node);
1586 #endif
1587
1588 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1589 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1590 {
1591 if (dump_file)
1592 fprintf (dump_file, "Incrementally flattening %s\n",
1593 cgraph_node_name (node));
1594 mode = INLINE_ALL;
1595 }
1596
1597 /* First of all look for always inline functions. */
1598 if (mode != INLINE_SIZE_NORECURSIVE)
1599 for (e = node->callees; e; e = e->next_callee)
1600 {
1601 if (!e->callee->local.disregard_inline_limits
1602 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1603 continue;
1604 if (e->call_stmt_cannot_inline_p)
1605 continue;
1606 if (dump_file)
1607 fprintf (dump_file,
1608 "Considering to always inline inline candidate %s.\n",
1609 cgraph_node_name (e->callee));
1610 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1611 {
1612 if (dump_file)
1613 fprintf (dump_file, "Not inlining: recursive call.\n");
1614 continue;
1615 }
1616 if (!tree_can_inline_p (e))
1617 {
1618 if (dump_file)
1619 fprintf (dump_file,
1620 "Not inlining: %s",
1621 cgraph_inline_failed_string (e->inline_failed));
1622 continue;
1623 }
1624 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1625 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1626 {
1627 if (dump_file)
1628 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1629 continue;
1630 }
1631 if (!e->callee->analyzed)
1632 {
1633 if (dump_file)
1634 fprintf (dump_file,
1635 "Not inlining: Function body no longer available.\n");
1636 continue;
1637 }
1638
1639 if (dump_file)
1640 fprintf (dump_file, " Inlining %s into %s.\n",
1641 cgraph_node_name (e->callee),
1642 cgraph_node_name (e->caller));
1643 cgraph_mark_inline (e);
1644 inlined = true;
1645 }
1646
1647 /* Now do the automatic inlining. */
1648 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE
1649 /* Never inline regular functions into always-inline functions
1650 during incremental inlining. */
1651 && !node->local.disregard_inline_limits)
1652 {
1653 bitmap visited = BITMAP_ALLOC (NULL);
1654 for (e = node->callees; e; e = e->next_callee)
1655 {
1656 int allowed_growth = 0;
1657 if (!e->callee->local.inlinable
1658 || !e->inline_failed
1659 || e->callee->local.disregard_inline_limits)
1660 continue;
1661 /* We are inlining a function to all call-sites in node
1662 or to none. So visit each candidate only once. */
1663 if (!bitmap_set_bit (visited, e->callee->uid))
1664 continue;
1665 if (dump_file)
1666 fprintf (dump_file, "Considering inline candidate %s.\n",
1667 cgraph_node_name (e->callee));
1668 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1669 {
1670 if (dump_file)
1671 fprintf (dump_file, "Not inlining: recursive call.\n");
1672 continue;
1673 }
1674 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1675 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1676 {
1677 if (dump_file)
1678 fprintf (dump_file,
1679 "Not inlining: SSA form does not match.\n");
1680 continue;
1681 }
1682
1683 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1684 && optimize_function_for_speed_p (cfun))
1685 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1686
1687 /* When the function body would grow and inlining the function
1688 won't eliminate the need for offline copy of the function,
1689 don't inline. */
1690 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1691 || (!flag_inline_functions
1692 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1693 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1694 > e->caller->global.size + allowed_growth)
1695 && cgraph_estimate_growth (e->callee) > allowed_growth)
1696 {
1697 if (dump_file)
1698 fprintf (dump_file,
1699 "Not inlining: code size would grow by %i.\n",
1700 cgraph_estimate_size_after_inlining (1, e->caller,
1701 e->callee)
1702 - e->caller->global.size);
1703 continue;
1704 }
1705 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1706 false)
1707 || e->call_stmt_cannot_inline_p)
1708 {
1709 if (dump_file)
1710 fprintf (dump_file, "Not inlining: %s.\n",
1711 cgraph_inline_failed_string (e->inline_failed));
1712 continue;
1713 }
1714 if (!e->callee->analyzed)
1715 {
1716 if (dump_file)
1717 fprintf (dump_file,
1718 "Not inlining: Function body no longer available.\n");
1719 continue;
1720 }
1721 if (!tree_can_inline_p (e))
1722 {
1723 if (dump_file)
1724 fprintf (dump_file,
1725 "Not inlining: %s.",
1726 cgraph_inline_failed_string (e->inline_failed));
1727 continue;
1728 }
1729 if (cgraph_default_inline_p (e->callee, &failed_reason))
1730 {
1731 if (dump_file)
1732 fprintf (dump_file, " Inlining %s into %s.\n",
1733 cgraph_node_name (e->callee),
1734 cgraph_node_name (e->caller));
1735 cgraph_mark_inline (e);
1736 inlined = true;
1737 }
1738 }
1739 BITMAP_FREE (visited);
1740 }
1741 return inlined;
1742 }
1743
1744 /* Because inlining might remove no-longer reachable nodes, we need to
1745 keep the array visible to garbage collector to avoid reading collected
1746 out nodes. */
1747 static int nnodes;
1748 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1749
1750 /* Do inlining of small functions. Doing so early helps profiling and other
1751 passes to be somewhat more effective and avoids some code duplication in
1752 later real inlining pass for testcases with very many function calls. */
1753 static unsigned int
1754 cgraph_early_inlining (void)
1755 {
1756 struct cgraph_node *node = cgraph_node (current_function_decl);
1757 unsigned int todo = 0;
1758 int iterations = 0;
1759
1760 if (seen_error ())
1761 return 0;
1762
1763 if (!optimize
1764 || flag_no_inline
1765 || !flag_early_inlining)
1766 {
1767 /* When not optimizing or not inlining inline only always-inline
1768 functions. */
1769 cgraph_decide_inlining_incrementally (node, INLINE_ALWAYS_INLINE);
1770 timevar_push (TV_INTEGRATION);
1771 todo |= optimize_inline_calls (current_function_decl);
1772 timevar_pop (TV_INTEGRATION);
1773 }
1774 else
1775 {
1776 if (lookup_attribute ("flatten",
1777 DECL_ATTRIBUTES (node->decl)) != NULL)
1778 {
1779 if (dump_file)
1780 fprintf (dump_file,
1781 "Flattening %s\n", cgraph_node_name (node));
1782 cgraph_flatten (node);
1783 timevar_push (TV_INTEGRATION);
1784 todo |= optimize_inline_calls (current_function_decl);
1785 timevar_pop (TV_INTEGRATION);
1786 }
1787 /* We iterate incremental inlining to get trivial cases of indirect
1788 inlining. */
1789 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1790 && cgraph_decide_inlining_incrementally (node,
1791 iterations
1792 ? INLINE_SIZE_NORECURSIVE
1793 : INLINE_SIZE))
1794 {
1795 timevar_push (TV_INTEGRATION);
1796 todo |= optimize_inline_calls (current_function_decl);
1797 iterations++;
1798 timevar_pop (TV_INTEGRATION);
1799 }
1800 if (dump_file)
1801 fprintf (dump_file, "Iterations: %i\n", iterations);
1802 }
1803
1804 cfun->always_inline_functions_inlined = true;
1805
1806 return todo;
1807 }
1808
1809 struct gimple_opt_pass pass_early_inline =
1810 {
1811 {
1812 GIMPLE_PASS,
1813 "einline", /* name */
1814 NULL, /* gate */
1815 cgraph_early_inlining, /* execute */
1816 NULL, /* sub */
1817 NULL, /* next */
1818 0, /* static_pass_number */
1819 TV_INLINE_HEURISTICS, /* tv_id */
1820 0, /* properties_required */
1821 0, /* properties_provided */
1822 0, /* properties_destroyed */
1823 0, /* todo_flags_start */
1824 TODO_dump_func /* todo_flags_finish */
1825 }
1826 };
1827
1828 /* When inlining shall be performed. */
1829 static bool
1830 cgraph_gate_ipa_early_inlining (void)
1831 {
1832 return (flag_early_inlining
1833 && !in_lto_p
1834 && (flag_branch_probabilities || flag_test_coverage
1835 || profile_arc_flag));
1836 }
1837
1838 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1839 before tree profiling so we have stand alone IPA pass for doing so. */
1840 struct simple_ipa_opt_pass pass_ipa_early_inline =
1841 {
1842 {
1843 SIMPLE_IPA_PASS,
1844 "einline_ipa", /* name */
1845 cgraph_gate_ipa_early_inlining, /* gate */
1846 NULL, /* execute */
1847 NULL, /* sub */
1848 NULL, /* next */
1849 0, /* static_pass_number */
1850 TV_INLINE_HEURISTICS, /* tv_id */
1851 0, /* properties_required */
1852 0, /* properties_provided */
1853 0, /* properties_destroyed */
1854 0, /* todo_flags_start */
1855 TODO_dump_cgraph /* todo_flags_finish */
1856 }
1857 };
1858
1859 /* See if statement might disappear after inlining. We are not terribly
1860 sophisficated, basically looking for simple abstraction penalty wrappers. */
1861
1862 static bool
1863 likely_eliminated_by_inlining_p (gimple stmt)
1864 {
1865 enum gimple_code code = gimple_code (stmt);
1866 switch (code)
1867 {
1868 case GIMPLE_RETURN:
1869 return true;
1870 case GIMPLE_ASSIGN:
1871 if (gimple_num_ops (stmt) != 2)
1872 return false;
1873
1874 /* Casts of parameters, loads from parameters passed by reference
1875 and stores to return value or parameters are probably free after
1876 inlining. */
1877 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1878 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1879 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1880 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1881 {
1882 tree rhs = gimple_assign_rhs1 (stmt);
1883 tree lhs = gimple_assign_lhs (stmt);
1884 tree inner_rhs = rhs;
1885 tree inner_lhs = lhs;
1886 bool rhs_free = false;
1887 bool lhs_free = false;
1888
1889 while (handled_component_p (inner_lhs)
1890 || TREE_CODE (inner_lhs) == MEM_REF)
1891 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1892 while (handled_component_p (inner_rhs)
1893 || TREE_CODE (inner_rhs) == ADDR_EXPR
1894 || TREE_CODE (inner_rhs) == MEM_REF)
1895 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1896
1897
1898 if (TREE_CODE (inner_rhs) == PARM_DECL
1899 || (TREE_CODE (inner_rhs) == SSA_NAME
1900 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1901 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1902 rhs_free = true;
1903 if (rhs_free && is_gimple_reg (lhs))
1904 lhs_free = true;
1905 if (((TREE_CODE (inner_lhs) == PARM_DECL
1906 || (TREE_CODE (inner_lhs) == SSA_NAME
1907 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1908 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1909 && inner_lhs != lhs)
1910 || TREE_CODE (inner_lhs) == RESULT_DECL
1911 || (TREE_CODE (inner_lhs) == SSA_NAME
1912 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1913 lhs_free = true;
1914 if (lhs_free
1915 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1916 rhs_free = true;
1917 if (lhs_free && rhs_free)
1918 return true;
1919 }
1920 return false;
1921 default:
1922 return false;
1923 }
1924 }
1925
1926 /* Compute function body size parameters for NODE. */
1927
1928 static void
1929 estimate_function_body_sizes (struct cgraph_node *node)
1930 {
1931 gcov_type time = 0;
1932 gcov_type time_inlining_benefit = 0;
1933 int size = 0;
1934 int size_inlining_benefit = 0;
1935 basic_block bb;
1936 gimple_stmt_iterator bsi;
1937 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1938 tree arg;
1939 int freq;
1940 tree funtype = TREE_TYPE (node->decl);
1941
1942 if (dump_file)
1943 fprintf (dump_file, "Analyzing function body size: %s\n",
1944 cgraph_node_name (node));
1945
1946 gcc_assert (my_function && my_function->cfg);
1947 FOR_EACH_BB_FN (bb, my_function)
1948 {
1949 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1950 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1951 {
1952 gimple stmt = gsi_stmt (bsi);
1953 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1954 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1955
1956 if (dump_file && (dump_flags & TDF_DETAILS))
1957 {
1958 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1959 freq, this_size, this_time);
1960 print_gimple_stmt (dump_file, stmt, 0, 0);
1961 }
1962 this_time *= freq;
1963 time += this_time;
1964 size += this_size;
1965 if (likely_eliminated_by_inlining_p (stmt))
1966 {
1967 size_inlining_benefit += this_size;
1968 time_inlining_benefit += this_time;
1969 if (dump_file && (dump_flags & TDF_DETAILS))
1970 fprintf (dump_file, " Likely eliminated\n");
1971 }
1972 gcc_assert (time >= 0);
1973 gcc_assert (size >= 0);
1974 }
1975 }
1976 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1977 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1978 / CGRAPH_FREQ_BASE);
1979 if (dump_file)
1980 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1981 (int)time, (int)time_inlining_benefit,
1982 size, size_inlining_benefit);
1983 time_inlining_benefit += eni_time_weights.call_cost;
1984 size_inlining_benefit += eni_size_weights.call_cost;
1985 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1986 {
1987 int cost = estimate_move_cost (TREE_TYPE (funtype));
1988 time_inlining_benefit += cost;
1989 size_inlining_benefit += cost;
1990 }
1991 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = DECL_CHAIN (arg))
1992 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1993 {
1994 int cost = estimate_move_cost (TREE_TYPE (arg));
1995 time_inlining_benefit += cost;
1996 size_inlining_benefit += cost;
1997 }
1998 if (time_inlining_benefit > MAX_TIME)
1999 time_inlining_benefit = MAX_TIME;
2000 if (time > MAX_TIME)
2001 time = MAX_TIME;
2002 inline_summary (node)->self_time = time;
2003 inline_summary (node)->self_size = size;
2004 if (dump_file)
2005 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
2006 (int)time, (int)time_inlining_benefit,
2007 size, size_inlining_benefit);
2008 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
2009 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
2010 }
2011
2012 /* Compute parameters of functions used by inliner. */
2013 unsigned int
2014 compute_inline_parameters (struct cgraph_node *node)
2015 {
2016 HOST_WIDE_INT self_stack_size;
2017
2018 gcc_assert (!node->global.inlined_to);
2019
2020 /* Estimate the stack size for the function. But not at -O0
2021 because estimated_stack_frame_size is a quadratic problem. */
2022 self_stack_size = optimize ? estimated_stack_frame_size (node->decl) : 0;
2023 inline_summary (node)->estimated_self_stack_size = self_stack_size;
2024 node->global.estimated_stack_size = self_stack_size;
2025 node->global.stack_frame_offset = 0;
2026
2027 /* Can this function be inlined at all? */
2028 node->local.inlinable = tree_inlinable_function_p (node->decl);
2029 if (node->local.inlinable && !node->local.disregard_inline_limits)
2030 node->local.disregard_inline_limits
2031 = DECL_DISREGARD_INLINE_LIMITS (node->decl);
2032 estimate_function_body_sizes (node);
2033 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2034 node->global.time = inline_summary (node)->self_time;
2035 node->global.size = inline_summary (node)->self_size;
2036 return 0;
2037 }
2038
2039
2040 /* Compute parameters of functions used by inliner using
2041 current_function_decl. */
2042 static unsigned int
2043 compute_inline_parameters_for_current (void)
2044 {
2045 compute_inline_parameters (cgraph_node (current_function_decl));
2046 return 0;
2047 }
2048
2049 struct gimple_opt_pass pass_inline_parameters =
2050 {
2051 {
2052 GIMPLE_PASS,
2053 "inline_param", /* name */
2054 NULL, /* gate */
2055 compute_inline_parameters_for_current,/* execute */
2056 NULL, /* sub */
2057 NULL, /* next */
2058 0, /* static_pass_number */
2059 TV_INLINE_HEURISTICS, /* tv_id */
2060 0, /* properties_required */
2061 0, /* properties_provided */
2062 0, /* properties_destroyed */
2063 0, /* todo_flags_start */
2064 0 /* todo_flags_finish */
2065 }
2066 };
2067
2068 /* This function performs intraprocedural analyzis in NODE that is required to
2069 inline indirect calls. */
2070 static void
2071 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
2072 {
2073 ipa_analyze_node (node);
2074 if (dump_file && (dump_flags & TDF_DETAILS))
2075 {
2076 ipa_print_node_params (dump_file, node);
2077 ipa_print_node_jump_functions (dump_file, node);
2078 }
2079 }
2080
2081 /* Note function body size. */
2082 static void
2083 analyze_function (struct cgraph_node *node)
2084 {
2085 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2086 current_function_decl = node->decl;
2087
2088 compute_inline_parameters (node);
2089 /* FIXME: We should remove the optimize check after we ensure we never run
2090 IPA passes when not optimizng. */
2091 if (flag_indirect_inlining && optimize)
2092 inline_indirect_intraprocedural_analysis (node);
2093
2094 current_function_decl = NULL;
2095 pop_cfun ();
2096 }
2097
2098 /* Called when new function is inserted to callgraph late. */
2099 static void
2100 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
2101 {
2102 analyze_function (node);
2103 }
2104
2105 /* Note function body size. */
2106 static void
2107 inline_generate_summary (void)
2108 {
2109 struct cgraph_node *node;
2110
2111 function_insertion_hook_holder =
2112 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2113
2114 if (flag_indirect_inlining)
2115 {
2116 ipa_register_cgraph_hooks ();
2117 ipa_check_create_node_params ();
2118 ipa_check_create_edge_args ();
2119 }
2120
2121 for (node = cgraph_nodes; node; node = node->next)
2122 if (node->analyzed)
2123 analyze_function (node);
2124
2125 return;
2126 }
2127
2128 /* Apply inline plan to function. */
2129 static unsigned int
2130 inline_transform (struct cgraph_node *node)
2131 {
2132 unsigned int todo = 0;
2133 struct cgraph_edge *e;
2134 bool inline_p = false;
2135
2136 /* FIXME: Currently the passmanager is adding inline transform more than once to some
2137 clones. This needs revisiting after WPA cleanups. */
2138 if (cfun->after_inlining)
2139 return 0;
2140
2141 /* We might need the body of this function so that we can expand
2142 it inline somewhere else. */
2143 if (cgraph_preserve_function_body_p (node->decl))
2144 save_inline_function_body (node);
2145
2146 for (e = node->callees; e; e = e->next_callee)
2147 {
2148 cgraph_redirect_edge_call_stmt_to_callee (e);
2149 if (!e->inline_failed || warn_inline)
2150 inline_p = true;
2151 }
2152
2153 if (inline_p)
2154 {
2155 timevar_push (TV_INTEGRATION);
2156 todo = optimize_inline_calls (current_function_decl);
2157 timevar_pop (TV_INTEGRATION);
2158 }
2159 cfun->always_inline_functions_inlined = true;
2160 cfun->after_inlining = true;
2161 return todo | execute_fixup_cfg ();
2162 }
2163
2164 /* Read inline summary. Jump functions are shared among ipa-cp
2165 and inliner, so when ipa-cp is active, we don't need to write them
2166 twice. */
2167
2168 static void
2169 inline_read_summary (void)
2170 {
2171 if (flag_indirect_inlining)
2172 {
2173 ipa_register_cgraph_hooks ();
2174 if (!flag_ipa_cp)
2175 ipa_prop_read_jump_functions ();
2176 }
2177 function_insertion_hook_holder =
2178 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2179 }
2180
2181 /* Write inline summary for node in SET.
2182 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
2183 active, we don't need to write them twice. */
2184
2185 static void
2186 inline_write_summary (cgraph_node_set set,
2187 varpool_node_set vset ATTRIBUTE_UNUSED)
2188 {
2189 if (flag_indirect_inlining && !flag_ipa_cp)
2190 ipa_prop_write_jump_functions (set);
2191 }
2192
2193 /* When to run IPA inlining. Inlining of always-inline functions
2194 happens during early inlining. */
2195
2196 static bool
2197 gate_cgraph_decide_inlining (void)
2198 {
2199 /* ??? We'd like to skip this if not optimizing or not inlining as
2200 all always-inline functions have been processed by early
2201 inlining already. But this at least breaks EH with C++ as
2202 we need to unconditionally run fixup_cfg even at -O0.
2203 So leave it on unconditionally for now. */
2204 return 1;
2205 }
2206
2207 struct ipa_opt_pass_d pass_ipa_inline =
2208 {
2209 {
2210 IPA_PASS,
2211 "inline", /* name */
2212 gate_cgraph_decide_inlining, /* gate */
2213 cgraph_decide_inlining, /* execute */
2214 NULL, /* sub */
2215 NULL, /* next */
2216 0, /* static_pass_number */
2217 TV_INLINE_HEURISTICS, /* tv_id */
2218 0, /* properties_required */
2219 0, /* properties_provided */
2220 0, /* properties_destroyed */
2221 TODO_remove_functions, /* todo_flags_finish */
2222 TODO_dump_cgraph | TODO_dump_func
2223 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2224 },
2225 inline_generate_summary, /* generate_summary */
2226 inline_write_summary, /* write_summary */
2227 inline_read_summary, /* read_summary */
2228 NULL, /* write_optimization_summary */
2229 NULL, /* read_optimization_summary */
2230 NULL, /* stmt_fixup */
2231 0, /* TODOs */
2232 inline_transform, /* function_transform */
2233 NULL, /* variable_transform */
2234 };
2235
2236
2237 #include "gt-ipa-inline.h"