ipa-prop.h (struct ipa_param_call_note): New field lto_stmt_uid.
[gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Inlining decision heuristics
22
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
26 callgraph.
27
28 There are three major parts of this file:
29
30 cgraph_mark_inline implementation
31
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
34 statistics)
35
36 inlining heuristics limits
37
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
40 growth and so on).
41
42 inlining heuristics
43
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
46
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
52
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
56
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
60
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used by early inliner.
64
65 The inliner itself is split into several passes:
66
67 pass_inline_parameters
68
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
72
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
76 on the function body.
77
78 pass_early_inlining
79
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
83 whole unit.
84
85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
91 cheaply.
92
93 pass_ipa_early_inlining
94
95 With profiling, the early inlining is also necessary to reduce
96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
100 inlining.
101
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
104
105 pass_ipa_inline
106
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
112
113 pass_apply_inline
114
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
118 */
119
120 #include "config.h"
121 #include "system.h"
122 #include "coretypes.h"
123 #include "tm.h"
124 #include "tree.h"
125 #include "tree-inline.h"
126 #include "langhooks.h"
127 #include "flags.h"
128 #include "cgraph.h"
129 #include "diagnostic.h"
130 #include "timevar.h"
131 #include "params.h"
132 #include "fibheap.h"
133 #include "intl.h"
134 #include "tree-pass.h"
135 #include "hashtab.h"
136 #include "coverage.h"
137 #include "ggc.h"
138 #include "tree-flow.h"
139 #include "rtl.h"
140 #include "ipa-prop.h"
141 #include "except.h"
142
143 #define MAX_TIME 1000000000
144
145 /* Mode incremental inliner operate on:
146
147 In ALWAYS_INLINE only functions marked
148 always_inline are inlined. This mode is used after detecting cycle during
149 flattening.
150
151 In SIZE mode, only functions that reduce function body size after inlining
152 are inlined, this is used during early inlining.
153
154 in ALL mode, everything is inlined. This is used during flattening. */
155 enum inlining_mode {
156 INLINE_NONE = 0,
157 INLINE_ALWAYS_INLINE,
158 INLINE_SIZE_NORECURSIVE,
159 INLINE_SIZE,
160 INLINE_ALL
161 };
162 static bool
163 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
164 int);
165
166
167 /* Statistics we collect about inlining algorithm. */
168 static int ncalls_inlined;
169 static int nfunctions_inlined;
170 static int overall_size;
171 static gcov_type max_count, max_benefit;
172
173 /* Holders of ipa cgraph hooks: */
174 static struct cgraph_node_hook_list *function_insertion_hook_holder;
175
176 static inline struct inline_summary *
177 inline_summary (struct cgraph_node *node)
178 {
179 return &node->local.inline_summary;
180 }
181
182 /* Estimate self time of the function after inlining WHAT into TO. */
183
184 static int
185 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
186 struct cgraph_node *what)
187 {
188 gcov_type time = (((gcov_type)what->global.time
189 - inline_summary (what)->time_inlining_benefit)
190 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
191 + to->global.time;
192 if (time < 0)
193 time = 0;
194 if (time > MAX_TIME)
195 time = MAX_TIME;
196 return time;
197 }
198
199 /* Estimate self time of the function after inlining WHAT into TO. */
200
201 static int
202 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
203 struct cgraph_node *what)
204 {
205 int size = (what->global.size - inline_summary (what)->size_inlining_benefit) * times + to->global.size;
206 gcc_assert (size >= 0);
207 return size;
208 }
209
210 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
211 by NEST. */
212
213 static void
214 update_noncloned_frequencies (struct cgraph_node *node,
215 int freq_scale, int nest)
216 {
217 struct cgraph_edge *e;
218
219 /* We do not want to ignore high loop nest after freq drops to 0. */
220 if (!freq_scale)
221 freq_scale = 1;
222 for (e = node->callees; e; e = e->next_callee)
223 {
224 e->loop_nest += nest;
225 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
226 if (e->frequency > CGRAPH_FREQ_MAX)
227 e->frequency = CGRAPH_FREQ_MAX;
228 if (!e->inline_failed)
229 update_noncloned_frequencies (e->callee, freq_scale, nest);
230 }
231 }
232
233 /* E is expected to be an edge being inlined. Clone destination node of
234 the edge and redirect it to the new clone.
235 DUPLICATE is used for bookkeeping on whether we are actually creating new
236 clones or re-using node originally representing out-of-line function call.
237 */
238 void
239 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
240 bool update_original)
241 {
242 HOST_WIDE_INT peak;
243
244 if (duplicate)
245 {
246 /* We may eliminate the need for out-of-line copy to be output.
247 In that case just go ahead and re-use it. */
248 if (!e->callee->callers->next_caller
249 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
250 && !cgraph_new_nodes)
251 {
252 gcc_assert (!e->callee->global.inlined_to);
253 if (e->callee->analyzed)
254 {
255 overall_size -= e->callee->global.size;
256 nfunctions_inlined++;
257 }
258 duplicate = false;
259 e->callee->local.externally_visible = false;
260 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
261 }
262 else
263 {
264 struct cgraph_node *n;
265 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
266 update_original, NULL);
267 cgraph_redirect_edge_callee (e, n);
268 }
269 }
270
271 if (e->caller->global.inlined_to)
272 e->callee->global.inlined_to = e->caller->global.inlined_to;
273 else
274 e->callee->global.inlined_to = e->caller;
275 e->callee->global.stack_frame_offset
276 = e->caller->global.stack_frame_offset
277 + inline_summary (e->caller)->estimated_self_stack_size;
278 peak = e->callee->global.stack_frame_offset
279 + inline_summary (e->callee)->estimated_self_stack_size;
280 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
281 e->callee->global.inlined_to->global.estimated_stack_size = peak;
282
283 /* Recursively clone all bodies. */
284 for (e = e->callee->callees; e; e = e->next_callee)
285 if (!e->inline_failed)
286 cgraph_clone_inlined_nodes (e, duplicate, update_original);
287 }
288
289 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
290 specify whether profile of original function should be updated. If any new
291 indirect edges are discovered in the process, add them to NEW_EDGES, unless
292 it is NULL. Return true iff any new callgraph edges were discovered as a
293 result of inlining. */
294
295 static bool
296 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
297 VEC (cgraph_edge_p, heap) **new_edges)
298 {
299 int old_size = 0, new_size = 0;
300 struct cgraph_node *to = NULL, *what;
301 struct cgraph_edge *curr = e;
302 int freq;
303 bool duplicate = false;
304 int orig_size = e->callee->global.size;
305
306 gcc_assert (e->inline_failed);
307 e->inline_failed = CIF_OK;
308
309 if (!e->callee->global.inlined)
310 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
311 e->callee->global.inlined = true;
312
313 if (e->callee->callers->next_caller
314 || !cgraph_can_remove_if_no_direct_calls_p (e->callee))
315 duplicate = true;
316 cgraph_clone_inlined_nodes (e, true, update_original);
317
318 what = e->callee;
319
320 freq = e->frequency;
321 /* Now update size of caller and all functions caller is inlined into. */
322 for (;e && !e->inline_failed; e = e->caller->callers)
323 {
324 to = e->caller;
325 old_size = e->caller->global.size;
326 new_size = cgraph_estimate_size_after_inlining (1, to, what);
327 to->global.size = new_size;
328 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
329 }
330 gcc_assert (what->global.inlined_to == to);
331 if (new_size > old_size)
332 overall_size += new_size - old_size;
333 if (!duplicate)
334 overall_size -= orig_size;
335 ncalls_inlined++;
336
337 if (flag_indirect_inlining)
338 return ipa_propagate_indirect_call_infos (curr, new_edges);
339 else
340 return false;
341 }
342
343 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
344 Return following unredirected edge in the list of callers
345 of EDGE->CALLEE */
346
347 static struct cgraph_edge *
348 cgraph_mark_inline (struct cgraph_edge *edge)
349 {
350 struct cgraph_node *to = edge->caller;
351 struct cgraph_node *what = edge->callee;
352 struct cgraph_edge *e, *next;
353
354 gcc_assert (!edge->call_stmt_cannot_inline_p);
355 /* Look for all calls, mark them inline and clone recursively
356 all inlined functions. */
357 for (e = what->callers; e; e = next)
358 {
359 next = e->next_caller;
360 if (e->caller == to && e->inline_failed)
361 {
362 cgraph_mark_inline_edge (e, true, NULL);
363 if (e == edge)
364 edge = next;
365 }
366 }
367
368 return edge;
369 }
370
371 /* Estimate the growth caused by inlining NODE into all callees. */
372
373 static int
374 cgraph_estimate_growth (struct cgraph_node *node)
375 {
376 int growth = 0;
377 struct cgraph_edge *e;
378 bool self_recursive = false;
379
380 if (node->global.estimated_growth != INT_MIN)
381 return node->global.estimated_growth;
382
383 for (e = node->callers; e; e = e->next_caller)
384 {
385 if (e->caller == node)
386 self_recursive = true;
387 if (e->inline_failed)
388 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
389 - e->caller->global.size);
390 }
391
392 /* ??? Wrong for non-trivially self recursive functions or cases where
393 we decide to not inline for different reasons, but it is not big deal
394 as in that case we will keep the body around, but we will also avoid
395 some inlining. */
396 if (cgraph_only_called_directly_p (node)
397 && !DECL_EXTERNAL (node->decl) && !self_recursive)
398 growth -= node->global.size;
399
400 node->global.estimated_growth = growth;
401 return growth;
402 }
403
404 /* Return false when inlining WHAT into TO is not good idea
405 as it would cause too large growth of function bodies.
406 When ONE_ONLY is true, assume that only one call site is going
407 to be inlined, otherwise figure out how many call sites in
408 TO calls WHAT and verify that all can be inlined.
409 */
410
411 static bool
412 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
413 cgraph_inline_failed_t *reason, bool one_only)
414 {
415 int times = 0;
416 struct cgraph_edge *e;
417 int newsize;
418 int limit;
419 HOST_WIDE_INT stack_size_limit, inlined_stack;
420
421 if (one_only)
422 times = 1;
423 else
424 for (e = to->callees; e; e = e->next_callee)
425 if (e->callee == what)
426 times++;
427
428 if (to->global.inlined_to)
429 to = to->global.inlined_to;
430
431 /* When inlining large function body called once into small function,
432 take the inlined function as base for limiting the growth. */
433 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
434 limit = inline_summary (to)->self_size;
435 else
436 limit = inline_summary (what)->self_size;
437
438 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
439
440 /* Check the size after inlining against the function limits. But allow
441 the function to shrink if it went over the limits by forced inlining. */
442 newsize = cgraph_estimate_size_after_inlining (times, to, what);
443 if (newsize >= to->global.size
444 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
445 && newsize > limit)
446 {
447 if (reason)
448 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
449 return false;
450 }
451
452 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
453
454 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
455
456 inlined_stack = (to->global.stack_frame_offset
457 + inline_summary (to)->estimated_self_stack_size
458 + what->global.estimated_stack_size);
459 if (inlined_stack > stack_size_limit
460 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
461 {
462 if (reason)
463 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
464 return false;
465 }
466 return true;
467 }
468
469 /* Return true when function N is small enough to be inlined. */
470
471 static bool
472 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
473 {
474 tree decl = n->decl;
475
476 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
477 {
478 if (reason)
479 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
480 return false;
481 }
482
483 if (!n->analyzed)
484 {
485 if (reason)
486 *reason = CIF_BODY_NOT_AVAILABLE;
487 return false;
488 }
489
490 if (DECL_DECLARED_INLINE_P (decl))
491 {
492 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
493 {
494 if (reason)
495 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
496 return false;
497 }
498 }
499 else
500 {
501 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
502 {
503 if (reason)
504 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
505 return false;
506 }
507 }
508
509 return true;
510 }
511
512 /* Return true when inlining WHAT would create recursive inlining.
513 We call recursive inlining all cases where same function appears more than
514 once in the single recursion nest path in the inline graph. */
515
516 static bool
517 cgraph_recursive_inlining_p (struct cgraph_node *to,
518 struct cgraph_node *what,
519 cgraph_inline_failed_t *reason)
520 {
521 bool recursive;
522 if (to->global.inlined_to)
523 recursive = what->decl == to->global.inlined_to->decl;
524 else
525 recursive = what->decl == to->decl;
526 /* Marking recursive function inline has sane semantic and thus we should
527 not warn on it. */
528 if (recursive && reason)
529 *reason = (what->local.disregard_inline_limits
530 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
531 return recursive;
532 }
533
534 /* A cost model driving the inlining heuristics in a way so the edges with
535 smallest badness are inlined first. After each inlining is performed
536 the costs of all caller edges of nodes affected are recomputed so the
537 metrics may accurately depend on values such as number of inlinable callers
538 of the function or function body size. */
539
540 static int
541 cgraph_edge_badness (struct cgraph_edge *edge)
542 {
543 gcov_type badness;
544 int growth =
545 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
546
547 growth -= edge->caller->global.size;
548
549 /* Always prefer inlining saving code size. */
550 if (growth <= 0)
551 badness = INT_MIN - growth;
552
553 /* When profiling is available, base priorities -(#calls / growth).
554 So we optimize for overall number of "executed" inlined calls. */
555 else if (max_count)
556 badness = ((int)((double)edge->count * INT_MIN / max_count / (max_benefit + 1))
557 * (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
558
559 /* When function local profile is available, base priorities on
560 growth / frequency, so we optimize for overall frequency of inlined
561 calls. This is not too accurate since while the call might be frequent
562 within function, the function itself is infrequent.
563
564 Other objective to optimize for is number of different calls inlined.
565 We add the estimated growth after inlining all functions to bias the
566 priorities slightly in this direction (so fewer times called functions
567 of the same size gets priority). */
568 else if (flag_guess_branch_prob)
569 {
570 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
571 badness = growth * 10000;
572 div *= MIN (100 * inline_summary (edge->callee)->time_inlining_benefit
573 / (edge->callee->global.time + 1) + 1, 100);
574
575
576 /* Decrease badness if call is nested. */
577 /* Compress the range so we don't overflow. */
578 if (div > 10000)
579 div = 10000 + ceil_log2 (div) - 8;
580 if (div < 1)
581 div = 1;
582 if (badness > 0)
583 badness /= div;
584 badness += cgraph_estimate_growth (edge->callee);
585 if (badness > INT_MAX)
586 badness = INT_MAX;
587 }
588 /* When function local profile is not available or it does not give
589 useful information (ie frequency is zero), base the cost on
590 loop nest and overall size growth, so we optimize for overall number
591 of functions fully inlined in program. */
592 else
593 {
594 int nest = MIN (edge->loop_nest, 8);
595 badness = cgraph_estimate_growth (edge->callee) * 256;
596
597 /* Decrease badness if call is nested. */
598 if (badness > 0)
599 badness >>= nest;
600 else
601 {
602 badness <<= nest;
603 }
604 }
605 /* Make recursive inlining happen always after other inlining is done. */
606 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
607 return badness + 1;
608 else
609 return badness;
610 }
611
612 /* Recompute heap nodes for each of caller edge. */
613
614 static void
615 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
616 bitmap updated_nodes)
617 {
618 struct cgraph_edge *edge;
619 cgraph_inline_failed_t failed_reason;
620
621 if (!node->local.inlinable || node->local.disregard_inline_limits
622 || node->global.inlined_to)
623 return;
624 if (bitmap_bit_p (updated_nodes, node->uid))
625 return;
626 bitmap_set_bit (updated_nodes, node->uid);
627 node->global.estimated_growth = INT_MIN;
628
629 if (!node->local.inlinable)
630 return;
631 /* Prune out edges we won't inline into anymore. */
632 if (!cgraph_default_inline_p (node, &failed_reason))
633 {
634 for (edge = node->callers; edge; edge = edge->next_caller)
635 if (edge->aux)
636 {
637 fibheap_delete_node (heap, (fibnode_t) edge->aux);
638 edge->aux = NULL;
639 if (edge->inline_failed)
640 edge->inline_failed = failed_reason;
641 }
642 return;
643 }
644
645 for (edge = node->callers; edge; edge = edge->next_caller)
646 if (edge->inline_failed)
647 {
648 int badness = cgraph_edge_badness (edge);
649 if (edge->aux)
650 {
651 fibnode_t n = (fibnode_t) edge->aux;
652 gcc_assert (n->data == edge);
653 if (n->key == badness)
654 continue;
655
656 /* fibheap_replace_key only increase the keys. */
657 if (fibheap_replace_key (heap, n, badness))
658 continue;
659 fibheap_delete_node (heap, (fibnode_t) edge->aux);
660 }
661 edge->aux = fibheap_insert (heap, badness, edge);
662 }
663 }
664
665 /* Recompute heap nodes for each of caller edges of each of callees. */
666
667 static void
668 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
669 bitmap updated_nodes)
670 {
671 struct cgraph_edge *e;
672 node->global.estimated_growth = INT_MIN;
673
674 for (e = node->callees; e; e = e->next_callee)
675 if (e->inline_failed)
676 update_caller_keys (heap, e->callee, updated_nodes);
677 else if (!e->inline_failed)
678 update_callee_keys (heap, e->callee, updated_nodes);
679 }
680
681 /* Enqueue all recursive calls from NODE into priority queue depending on
682 how likely we want to recursively inline the call. */
683
684 static void
685 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
686 fibheap_t heap)
687 {
688 static int priority;
689 struct cgraph_edge *e;
690 for (e = where->callees; e; e = e->next_callee)
691 if (e->callee == node)
692 {
693 /* When profile feedback is available, prioritize by expected number
694 of calls. Without profile feedback we maintain simple queue
695 to order candidates via recursive depths. */
696 fibheap_insert (heap,
697 !max_count ? priority++
698 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
699 e);
700 }
701 for (e = where->callees; e; e = e->next_callee)
702 if (!e->inline_failed)
703 lookup_recursive_calls (node, e->callee, heap);
704 }
705
706 /* Decide on recursive inlining: in the case function has recursive calls,
707 inline until body size reaches given argument. If any new indirect edges
708 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
709 is NULL. */
710
711 static bool
712 cgraph_decide_recursive_inlining (struct cgraph_node *node,
713 VEC (cgraph_edge_p, heap) **new_edges)
714 {
715 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
716 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
717 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
718 fibheap_t heap;
719 struct cgraph_edge *e;
720 struct cgraph_node *master_clone, *next;
721 int depth = 0;
722 int n = 0;
723
724 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
725 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
726 return false;
727
728 if (DECL_DECLARED_INLINE_P (node->decl))
729 {
730 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
731 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
732 }
733
734 /* Make sure that function is small enough to be considered for inlining. */
735 if (!max_depth
736 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
737 return false;
738 heap = fibheap_new ();
739 lookup_recursive_calls (node, node, heap);
740 if (fibheap_empty (heap))
741 {
742 fibheap_delete (heap);
743 return false;
744 }
745
746 if (dump_file)
747 fprintf (dump_file,
748 " Performing recursive inlining on %s\n",
749 cgraph_node_name (node));
750
751 /* We need original clone to copy around. */
752 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1,
753 false, NULL);
754 master_clone->needed = true;
755 for (e = master_clone->callees; e; e = e->next_callee)
756 if (!e->inline_failed)
757 cgraph_clone_inlined_nodes (e, true, false);
758
759 /* Do the inlining and update list of recursive call during process. */
760 while (!fibheap_empty (heap)
761 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
762 <= limit))
763 {
764 struct cgraph_edge *curr
765 = (struct cgraph_edge *) fibheap_extract_min (heap);
766 struct cgraph_node *cnode;
767
768 depth = 1;
769 for (cnode = curr->caller;
770 cnode->global.inlined_to; cnode = cnode->callers->caller)
771 if (node->decl == curr->callee->decl)
772 depth++;
773 if (depth > max_depth)
774 {
775 if (dump_file)
776 fprintf (dump_file,
777 " maximal depth reached\n");
778 continue;
779 }
780
781 if (max_count)
782 {
783 if (!cgraph_maybe_hot_edge_p (curr))
784 {
785 if (dump_file)
786 fprintf (dump_file, " Not inlining cold call\n");
787 continue;
788 }
789 if (curr->count * 100 / node->count < probability)
790 {
791 if (dump_file)
792 fprintf (dump_file,
793 " Probability of edge is too small\n");
794 continue;
795 }
796 }
797
798 if (dump_file)
799 {
800 fprintf (dump_file,
801 " Inlining call of depth %i", depth);
802 if (node->count)
803 {
804 fprintf (dump_file, " called approx. %.2f times per call",
805 (double)curr->count / node->count);
806 }
807 fprintf (dump_file, "\n");
808 }
809 cgraph_redirect_edge_callee (curr, master_clone);
810 cgraph_mark_inline_edge (curr, false, new_edges);
811 lookup_recursive_calls (node, curr->callee, heap);
812 n++;
813 }
814 if (!fibheap_empty (heap) && dump_file)
815 fprintf (dump_file, " Recursive inlining growth limit met.\n");
816
817 fibheap_delete (heap);
818 if (dump_file)
819 fprintf (dump_file,
820 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
821 master_clone->global.size, node->global.size,
822 master_clone->global.time, node->global.time);
823
824 /* Remove master clone we used for inlining. We rely that clones inlined
825 into master clone gets queued just before master clone so we don't
826 need recursion. */
827 for (node = cgraph_nodes; node != master_clone;
828 node = next)
829 {
830 next = node->next;
831 if (node->global.inlined_to == master_clone)
832 cgraph_remove_node (node);
833 }
834 cgraph_remove_node (master_clone);
835 /* FIXME: Recursive inlining actually reduces number of calls of the
836 function. At this place we should probably walk the function and
837 inline clones and compensate the counts accordingly. This probably
838 doesn't matter much in practice. */
839 return n > 0;
840 }
841
842 /* Set inline_failed for all callers of given function to REASON. */
843
844 static void
845 cgraph_set_inline_failed (struct cgraph_node *node,
846 cgraph_inline_failed_t reason)
847 {
848 struct cgraph_edge *e;
849
850 if (dump_file)
851 fprintf (dump_file, "Inlining failed: %s\n",
852 cgraph_inline_failed_string (reason));
853 for (e = node->callers; e; e = e->next_caller)
854 if (e->inline_failed)
855 e->inline_failed = reason;
856 }
857
858 /* Given whole compilation unit estimate of INSNS, compute how large we can
859 allow the unit to grow. */
860 static int
861 compute_max_insns (int insns)
862 {
863 int max_insns = insns;
864 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
865 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
866
867 return ((HOST_WIDEST_INT) max_insns
868 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
869 }
870
871 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
872 static void
873 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
874 {
875 while (VEC_length (cgraph_edge_p, new_edges) > 0)
876 {
877 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
878
879 gcc_assert (!edge->aux);
880 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
881 }
882 }
883
884
885 /* We use greedy algorithm for inlining of small functions:
886 All inline candidates are put into prioritized heap based on estimated
887 growth of the overall number of instructions and then update the estimates.
888
889 INLINED and INLINED_CALEES are just pointers to arrays large enough
890 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
891
892 static void
893 cgraph_decide_inlining_of_small_functions (void)
894 {
895 struct cgraph_node *node;
896 struct cgraph_edge *edge;
897 cgraph_inline_failed_t failed_reason;
898 fibheap_t heap = fibheap_new ();
899 bitmap updated_nodes = BITMAP_ALLOC (NULL);
900 int min_size, max_size;
901 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
902
903 if (flag_indirect_inlining)
904 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
905
906 if (dump_file)
907 fprintf (dump_file, "\nDeciding on smaller functions:\n");
908
909 /* Put all inline candidates into the heap. */
910
911 for (node = cgraph_nodes; node; node = node->next)
912 {
913 if (!node->local.inlinable || !node->callers
914 || node->local.disregard_inline_limits)
915 continue;
916 if (dump_file)
917 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
918
919 node->global.estimated_growth = INT_MIN;
920 if (!cgraph_default_inline_p (node, &failed_reason))
921 {
922 cgraph_set_inline_failed (node, failed_reason);
923 continue;
924 }
925
926 for (edge = node->callers; edge; edge = edge->next_caller)
927 if (edge->inline_failed)
928 {
929 gcc_assert (!edge->aux);
930 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
931 }
932 }
933
934 max_size = compute_max_insns (overall_size);
935 min_size = overall_size;
936
937 while (overall_size <= max_size
938 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
939 {
940 int old_size = overall_size;
941 struct cgraph_node *where;
942 int growth =
943 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
944 cgraph_inline_failed_t not_good = CIF_OK;
945
946 growth -= edge->caller->global.size;
947
948 if (dump_file)
949 {
950 fprintf (dump_file,
951 "\nConsidering %s with %i size\n",
952 cgraph_node_name (edge->callee),
953 edge->callee->global.size);
954 fprintf (dump_file,
955 " to be inlined into %s in %s:%i\n"
956 " Estimated growth after inlined into all callees is %+i insns.\n"
957 " Estimated badness is %i, frequency %.2f.\n",
958 cgraph_node_name (edge->caller),
959 gimple_filename ((const_gimple) edge->call_stmt),
960 gimple_lineno ((const_gimple) edge->call_stmt),
961 cgraph_estimate_growth (edge->callee),
962 cgraph_edge_badness (edge),
963 edge->frequency / (double)CGRAPH_FREQ_BASE);
964 if (edge->count)
965 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
966 }
967 gcc_assert (edge->aux);
968 edge->aux = NULL;
969 if (!edge->inline_failed)
970 continue;
971
972 /* When not having profile info ready we don't weight by any way the
973 position of call in procedure itself. This means if call of
974 function A from function B seems profitable to inline, the recursive
975 call of function A in inline copy of A in B will look profitable too
976 and we end up inlining until reaching maximal function growth. This
977 is not good idea so prohibit the recursive inlining.
978
979 ??? When the frequencies are taken into account we might not need this
980 restriction.
981
982 We need to be cureful here, in some testcases, e.g. directivec.c in
983 libcpp, we can estimate self recursive function to have negative growth
984 for inlining completely.
985 */
986 if (!edge->count)
987 {
988 where = edge->caller;
989 while (where->global.inlined_to)
990 {
991 if (where->decl == edge->callee->decl)
992 break;
993 where = where->callers->caller;
994 }
995 if (where->global.inlined_to)
996 {
997 edge->inline_failed
998 = (edge->callee->local.disregard_inline_limits
999 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1000 if (dump_file)
1001 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1002 continue;
1003 }
1004 }
1005
1006 if (!cgraph_maybe_hot_edge_p (edge))
1007 not_good = CIF_UNLIKELY_CALL;
1008 if (!flag_inline_functions
1009 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1010 not_good = CIF_NOT_DECLARED_INLINED;
1011 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1012 not_good = CIF_OPTIMIZING_FOR_SIZE;
1013 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
1014 {
1015 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1016 &edge->inline_failed))
1017 {
1018 edge->inline_failed = not_good;
1019 if (dump_file)
1020 fprintf (dump_file, " inline_failed:%s.\n",
1021 cgraph_inline_failed_string (edge->inline_failed));
1022 }
1023 continue;
1024 }
1025 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1026 {
1027 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1028 &edge->inline_failed))
1029 {
1030 if (dump_file)
1031 fprintf (dump_file, " inline_failed:%s.\n",
1032 cgraph_inline_failed_string (edge->inline_failed));
1033 }
1034 continue;
1035 }
1036 if (!tree_can_inline_p (edge))
1037 {
1038 if (dump_file)
1039 fprintf (dump_file, " inline_failed:%s.\n",
1040 cgraph_inline_failed_string (edge->inline_failed));
1041 continue;
1042 }
1043 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1044 &edge->inline_failed))
1045 {
1046 where = edge->caller;
1047 if (where->global.inlined_to)
1048 where = where->global.inlined_to;
1049 if (!cgraph_decide_recursive_inlining (where,
1050 flag_indirect_inlining
1051 ? &new_indirect_edges : NULL))
1052 continue;
1053 if (flag_indirect_inlining)
1054 add_new_edges_to_heap (heap, new_indirect_edges);
1055 update_callee_keys (heap, where, updated_nodes);
1056 }
1057 else
1058 {
1059 struct cgraph_node *callee;
1060 if (edge->call_stmt_cannot_inline_p
1061 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1062 &edge->inline_failed, true))
1063 {
1064 if (dump_file)
1065 fprintf (dump_file, " Not inlining into %s:%s.\n",
1066 cgraph_node_name (edge->caller),
1067 cgraph_inline_failed_string (edge->inline_failed));
1068 continue;
1069 }
1070 callee = edge->callee;
1071 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1072 if (flag_indirect_inlining)
1073 add_new_edges_to_heap (heap, new_indirect_edges);
1074
1075 update_callee_keys (heap, callee, updated_nodes);
1076 }
1077 where = edge->caller;
1078 if (where->global.inlined_to)
1079 where = where->global.inlined_to;
1080
1081 /* Our profitability metric can depend on local properties
1082 such as number of inlinable calls and size of the function body.
1083 After inlining these properties might change for the function we
1084 inlined into (since it's body size changed) and for the functions
1085 called by function we inlined (since number of it inlinable callers
1086 might change). */
1087 update_caller_keys (heap, where, updated_nodes);
1088 bitmap_clear (updated_nodes);
1089
1090 if (dump_file)
1091 {
1092 fprintf (dump_file,
1093 " Inlined into %s which now has size %i and self time %i,"
1094 "net change of %+i.\n",
1095 cgraph_node_name (edge->caller),
1096 edge->caller->global.time,
1097 edge->caller->global.size,
1098 overall_size - old_size);
1099 }
1100 if (min_size > overall_size)
1101 {
1102 min_size = overall_size;
1103 max_size = compute_max_insns (min_size);
1104
1105 if (dump_file)
1106 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1107 }
1108 }
1109 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
1110 {
1111 gcc_assert (edge->aux);
1112 edge->aux = NULL;
1113 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1114 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1115 &edge->inline_failed))
1116 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1117 }
1118
1119 if (new_indirect_edges)
1120 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1121 fibheap_delete (heap);
1122 BITMAP_FREE (updated_nodes);
1123 }
1124
1125 /* Decide on the inlining. We do so in the topological order to avoid
1126 expenses on updating data structures. */
1127
1128 static unsigned int
1129 cgraph_decide_inlining (void)
1130 {
1131 struct cgraph_node *node;
1132 int nnodes;
1133 struct cgraph_node **order =
1134 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1135 int old_size = 0;
1136 int i;
1137 bool redo_always_inline = true;
1138 int initial_size = 0;
1139
1140 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1141 if (in_lto_p && flag_indirect_inlining)
1142 ipa_update_after_lto_read ();
1143
1144 max_count = 0;
1145 max_benefit = 0;
1146 for (node = cgraph_nodes; node; node = node->next)
1147 if (node->analyzed)
1148 {
1149 struct cgraph_edge *e;
1150
1151 gcc_assert (inline_summary (node)->self_size == node->global.size);
1152 initial_size += node->global.size;
1153 for (e = node->callees; e; e = e->next_callee)
1154 if (max_count < e->count)
1155 max_count = e->count;
1156 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1157 max_benefit = inline_summary (node)->time_inlining_benefit;
1158 }
1159 gcc_assert (in_lto_p
1160 || !max_count
1161 || (profile_info && flag_branch_probabilities));
1162 overall_size = initial_size;
1163
1164 nnodes = cgraph_postorder (order);
1165
1166 if (dump_file)
1167 fprintf (dump_file,
1168 "\nDeciding on inlining. Starting with size %i.\n",
1169 initial_size);
1170
1171 for (node = cgraph_nodes; node; node = node->next)
1172 node->aux = 0;
1173
1174 if (dump_file)
1175 fprintf (dump_file, "\nInlining always_inline functions:\n");
1176
1177 /* In the first pass mark all always_inline edges. Do this with a priority
1178 so none of our later choices will make this impossible. */
1179 while (redo_always_inline)
1180 {
1181 redo_always_inline = false;
1182 for (i = nnodes - 1; i >= 0; i--)
1183 {
1184 struct cgraph_edge *e, *next;
1185
1186 node = order[i];
1187
1188 /* Handle nodes to be flattened, but don't update overall unit
1189 size. */
1190 if (lookup_attribute ("flatten",
1191 DECL_ATTRIBUTES (node->decl)) != NULL)
1192 {
1193 if (dump_file)
1194 fprintf (dump_file,
1195 "Flattening %s\n", cgraph_node_name (node));
1196 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1197 }
1198
1199 if (!node->local.disregard_inline_limits)
1200 continue;
1201 if (dump_file)
1202 fprintf (dump_file,
1203 "\nConsidering %s size:%i (always inline)\n",
1204 cgraph_node_name (node), node->global.size);
1205 old_size = overall_size;
1206 for (e = node->callers; e; e = next)
1207 {
1208 next = e->next_caller;
1209 if (!e->inline_failed || e->call_stmt_cannot_inline_p)
1210 continue;
1211 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1212 &e->inline_failed))
1213 continue;
1214 if (!tree_can_inline_p (e))
1215 continue;
1216 if (cgraph_mark_inline_edge (e, true, NULL))
1217 redo_always_inline = true;
1218 if (dump_file)
1219 fprintf (dump_file,
1220 " Inlined into %s which now has size %i.\n",
1221 cgraph_node_name (e->caller),
1222 e->caller->global.size);
1223 }
1224 /* Inlining self recursive function might introduce new calls to
1225 themselves we didn't see in the loop above. Fill in the proper
1226 reason why inline failed. */
1227 for (e = node->callers; e; e = e->next_caller)
1228 if (e->inline_failed)
1229 e->inline_failed = CIF_RECURSIVE_INLINING;
1230 if (dump_file)
1231 fprintf (dump_file,
1232 " Inlined for a net change of %+i size.\n",
1233 overall_size - old_size);
1234 }
1235 }
1236
1237 cgraph_decide_inlining_of_small_functions ();
1238
1239 if (flag_inline_functions_called_once)
1240 {
1241 if (dump_file)
1242 fprintf (dump_file, "\nDeciding on functions called once:\n");
1243
1244 /* And finally decide what functions are called once. */
1245 for (i = nnodes - 1; i >= 0; i--)
1246 {
1247 node = order[i];
1248
1249 if (node->callers
1250 && !node->callers->next_caller
1251 && cgraph_only_called_directly_p (node)
1252 && node->local.inlinable
1253 && node->callers->inline_failed
1254 && node->callers->caller != node
1255 && node->callers->caller->global.inlined_to != node
1256 && !node->callers->call_stmt_cannot_inline_p
1257 && !DECL_EXTERNAL (node->decl)
1258 && !DECL_COMDAT (node->decl))
1259 {
1260 cgraph_inline_failed_t reason;
1261 old_size = overall_size;
1262 if (dump_file)
1263 {
1264 fprintf (dump_file,
1265 "\nConsidering %s size %i.\n",
1266 cgraph_node_name (node), node->global.size);
1267 fprintf (dump_file,
1268 " Called once from %s %i insns.\n",
1269 cgraph_node_name (node->callers->caller),
1270 node->callers->caller->global.size);
1271 }
1272
1273 if (cgraph_check_inline_limits (node->callers->caller, node,
1274 &reason, false))
1275 {
1276 cgraph_mark_inline (node->callers);
1277 if (dump_file)
1278 fprintf (dump_file,
1279 " Inlined into %s which now has %i size"
1280 " for a net change of %+i size.\n",
1281 cgraph_node_name (node->callers->caller),
1282 node->callers->caller->global.size,
1283 overall_size - old_size);
1284 }
1285 else
1286 {
1287 if (dump_file)
1288 fprintf (dump_file,
1289 " Not inlining: %s.\n",
1290 cgraph_inline_failed_string (reason));
1291 }
1292 }
1293 }
1294 }
1295
1296 /* Free ipa-prop structures if they are no longer needed. */
1297 if (flag_indirect_inlining)
1298 free_all_ipa_structures_after_iinln ();
1299
1300 if (dump_file)
1301 fprintf (dump_file,
1302 "\nInlined %i calls, eliminated %i functions, "
1303 "size %i turned to %i size.\n\n",
1304 ncalls_inlined, nfunctions_inlined, initial_size,
1305 overall_size);
1306 free (order);
1307 return 0;
1308 }
1309
1310 /* Try to inline edge E from incremental inliner. MODE specifies mode
1311 of inliner.
1312
1313 We are detecting cycles by storing mode of inliner into cgraph_node last
1314 time we visited it in the recursion. In general when mode is set, we have
1315 recursive inlining, but as an special case, we want to try harder inline
1316 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1317 flatten, b being always inline. Flattening 'a' will collapse
1318 a->b->c before hitting cycle. To accommodate always inline, we however
1319 need to inline a->b->c->b.
1320
1321 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1322 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1323 static bool
1324 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1325 {
1326 struct cgraph_node *callee = e->callee;
1327 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
1328 bool always_inline = e->callee->local.disregard_inline_limits;
1329 bool inlined = false;
1330
1331 /* We've hit cycle? */
1332 if (callee_mode)
1333 {
1334 /* It is first time we see it and we are not in ALWAY_INLINE only
1335 mode yet. and the function in question is always_inline. */
1336 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1337 {
1338 if (dump_file)
1339 {
1340 indent_to (dump_file, depth);
1341 fprintf (dump_file,
1342 "Hit cycle in %s, switching to always inline only.\n",
1343 cgraph_node_name (callee));
1344 }
1345 mode = INLINE_ALWAYS_INLINE;
1346 }
1347 /* Otherwise it is time to give up. */
1348 else
1349 {
1350 if (dump_file)
1351 {
1352 indent_to (dump_file, depth);
1353 fprintf (dump_file,
1354 "Not inlining %s into %s to avoid cycle.\n",
1355 cgraph_node_name (callee),
1356 cgraph_node_name (e->caller));
1357 }
1358 e->inline_failed = (e->callee->local.disregard_inline_limits
1359 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1360 return false;
1361 }
1362 }
1363
1364 callee->aux = (void *)(size_t) mode;
1365 if (dump_file)
1366 {
1367 indent_to (dump_file, depth);
1368 fprintf (dump_file, " Inlining %s into %s.\n",
1369 cgraph_node_name (e->callee),
1370 cgraph_node_name (e->caller));
1371 }
1372 if (e->inline_failed)
1373 {
1374 cgraph_mark_inline (e);
1375
1376 /* In order to fully inline always_inline functions, we need to
1377 recurse here, since the inlined functions might not be processed by
1378 incremental inlining at all yet.
1379
1380 Also flattening needs to be done recursively. */
1381
1382 if (mode == INLINE_ALL || always_inline)
1383 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1384 inlined = true;
1385 }
1386 callee->aux = (void *)(size_t) callee_mode;
1387 return inlined;
1388 }
1389
1390 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1391 in leaf functions. */
1392 static bool
1393 leaf_node_p (struct cgraph_node *n)
1394 {
1395 struct cgraph_edge *e;
1396 for (e = n->callees; e; e = e->next_callee)
1397 if (!DECL_BUILT_IN (e->callee->decl)
1398 || (!TREE_READONLY (e->callee->decl)
1399 || DECL_PURE_P (e->callee->decl)))
1400 return false;
1401 return true;
1402 }
1403
1404 /* Decide on the inlining. We do so in the topological order to avoid
1405 expenses on updating data structures.
1406 DEPTH is depth of recursion, used only for debug output. */
1407
1408 static bool
1409 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1410 enum inlining_mode mode,
1411 int depth)
1412 {
1413 struct cgraph_edge *e;
1414 bool inlined = false;
1415 cgraph_inline_failed_t failed_reason;
1416 enum inlining_mode old_mode;
1417
1418 #ifdef ENABLE_CHECKING
1419 verify_cgraph_node (node);
1420 #endif
1421
1422 old_mode = (enum inlining_mode) (size_t)node->aux;
1423
1424 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1425 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1426 {
1427 if (dump_file)
1428 {
1429 indent_to (dump_file, depth);
1430 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1431 }
1432 mode = INLINE_ALL;
1433 }
1434
1435 node->aux = (void *)(size_t) mode;
1436
1437 /* First of all look for always inline functions. */
1438 if (mode != INLINE_SIZE_NORECURSIVE)
1439 for (e = node->callees; e; e = e->next_callee)
1440 {
1441 if (!e->callee->local.disregard_inline_limits
1442 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1443 continue;
1444 if (e->call_stmt_cannot_inline_p)
1445 continue;
1446 /* When the edge is already inlined, we just need to recurse into
1447 it in order to fully flatten the leaves. */
1448 if (!e->inline_failed && mode == INLINE_ALL)
1449 {
1450 inlined |= try_inline (e, mode, depth);
1451 continue;
1452 }
1453 if (dump_file)
1454 {
1455 indent_to (dump_file, depth);
1456 fprintf (dump_file,
1457 "Considering to always inline inline candidate %s.\n",
1458 cgraph_node_name (e->callee));
1459 }
1460 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1461 {
1462 if (dump_file)
1463 {
1464 indent_to (dump_file, depth);
1465 fprintf (dump_file, "Not inlining: recursive call.\n");
1466 }
1467 continue;
1468 }
1469 if (!tree_can_inline_p (e))
1470 {
1471 if (dump_file)
1472 {
1473 indent_to (dump_file, depth);
1474 fprintf (dump_file,
1475 "Not inlining: %s",
1476 cgraph_inline_failed_string (e->inline_failed));
1477 }
1478 continue;
1479 }
1480 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1481 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1482 {
1483 if (dump_file)
1484 {
1485 indent_to (dump_file, depth);
1486 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1487 }
1488 continue;
1489 }
1490 if (!e->callee->analyzed)
1491 {
1492 if (dump_file)
1493 {
1494 indent_to (dump_file, depth);
1495 fprintf (dump_file,
1496 "Not inlining: Function body no longer available.\n");
1497 }
1498 continue;
1499 }
1500 inlined |= try_inline (e, mode, depth);
1501 }
1502
1503 /* Now do the automatic inlining. */
1504 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE)
1505 for (e = node->callees; e; e = e->next_callee)
1506 {
1507 int allowed_growth = 0;
1508 if (!e->callee->local.inlinable
1509 || !e->inline_failed
1510 || e->callee->local.disregard_inline_limits)
1511 continue;
1512 if (dump_file)
1513 fprintf (dump_file, "Considering inline candidate %s.\n",
1514 cgraph_node_name (e->callee));
1515 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1516 {
1517 if (dump_file)
1518 {
1519 indent_to (dump_file, depth);
1520 fprintf (dump_file, "Not inlining: recursive call.\n");
1521 }
1522 continue;
1523 }
1524 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1525 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1526 {
1527 if (dump_file)
1528 {
1529 indent_to (dump_file, depth);
1530 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1531 }
1532 continue;
1533 }
1534
1535 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1536 && optimize_function_for_speed_p (cfun))
1537 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1538
1539 /* When the function body would grow and inlining the function won't
1540 eliminate the need for offline copy of the function, don't inline.
1541 */
1542 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1543 || (!flag_inline_functions
1544 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1545 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1546 > e->caller->global.size + allowed_growth)
1547 && cgraph_estimate_growth (e->callee) > allowed_growth)
1548 {
1549 if (dump_file)
1550 {
1551 indent_to (dump_file, depth);
1552 fprintf (dump_file,
1553 "Not inlining: code size would grow by %i.\n",
1554 cgraph_estimate_size_after_inlining (1, e->caller,
1555 e->callee)
1556 - e->caller->global.size);
1557 }
1558 continue;
1559 }
1560 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1561 false)
1562 || e->call_stmt_cannot_inline_p)
1563 {
1564 if (dump_file)
1565 {
1566 indent_to (dump_file, depth);
1567 fprintf (dump_file, "Not inlining: %s.\n",
1568 cgraph_inline_failed_string (e->inline_failed));
1569 }
1570 continue;
1571 }
1572 if (!e->callee->analyzed)
1573 {
1574 if (dump_file)
1575 {
1576 indent_to (dump_file, depth);
1577 fprintf (dump_file,
1578 "Not inlining: Function body no longer available.\n");
1579 }
1580 continue;
1581 }
1582 if (!tree_can_inline_p (e))
1583 {
1584 if (dump_file)
1585 {
1586 indent_to (dump_file, depth);
1587 fprintf (dump_file,
1588 "Not inlining: %s.",
1589 cgraph_inline_failed_string (e->inline_failed));
1590 }
1591 continue;
1592 }
1593 if (cgraph_default_inline_p (e->callee, &failed_reason))
1594 inlined |= try_inline (e, mode, depth);
1595 }
1596 node->aux = (void *)(size_t) old_mode;
1597 return inlined;
1598 }
1599
1600 /* Because inlining might remove no-longer reachable nodes, we need to
1601 keep the array visible to garbage collector to avoid reading collected
1602 out nodes. */
1603 static int nnodes;
1604 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1605
1606 /* Do inlining of small functions. Doing so early helps profiling and other
1607 passes to be somewhat more effective and avoids some code duplication in
1608 later real inlining pass for testcases with very many function calls. */
1609 static unsigned int
1610 cgraph_early_inlining (void)
1611 {
1612 struct cgraph_node *node = cgraph_node (current_function_decl);
1613 unsigned int todo = 0;
1614 int iterations = 0;
1615
1616 if (sorrycount || errorcount)
1617 return 0;
1618 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1619 && cgraph_decide_inlining_incrementally (node,
1620 iterations
1621 ? INLINE_SIZE_NORECURSIVE : INLINE_SIZE, 0))
1622 {
1623 timevar_push (TV_INTEGRATION);
1624 todo |= optimize_inline_calls (current_function_decl);
1625 iterations++;
1626 timevar_pop (TV_INTEGRATION);
1627 }
1628 if (dump_file)
1629 fprintf (dump_file, "Iterations: %i\n", iterations);
1630 cfun->always_inline_functions_inlined = true;
1631 return todo;
1632 }
1633
1634 /* When inlining shall be performed. */
1635 static bool
1636 cgraph_gate_early_inlining (void)
1637 {
1638 return flag_early_inlining;
1639 }
1640
1641 struct gimple_opt_pass pass_early_inline =
1642 {
1643 {
1644 GIMPLE_PASS,
1645 "einline", /* name */
1646 cgraph_gate_early_inlining, /* gate */
1647 cgraph_early_inlining, /* execute */
1648 NULL, /* sub */
1649 NULL, /* next */
1650 0, /* static_pass_number */
1651 TV_INLINE_HEURISTICS, /* tv_id */
1652 0, /* properties_required */
1653 0, /* properties_provided */
1654 0, /* properties_destroyed */
1655 0, /* todo_flags_start */
1656 TODO_dump_func /* todo_flags_finish */
1657 }
1658 };
1659
1660 /* When inlining shall be performed. */
1661 static bool
1662 cgraph_gate_ipa_early_inlining (void)
1663 {
1664 return (flag_early_inlining
1665 && !in_lto_p
1666 && (flag_branch_probabilities || flag_test_coverage
1667 || profile_arc_flag));
1668 }
1669
1670 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1671 before tree profiling so we have stand alone IPA pass for doing so. */
1672 struct simple_ipa_opt_pass pass_ipa_early_inline =
1673 {
1674 {
1675 SIMPLE_IPA_PASS,
1676 "einline_ipa", /* name */
1677 cgraph_gate_ipa_early_inlining, /* gate */
1678 NULL, /* execute */
1679 NULL, /* sub */
1680 NULL, /* next */
1681 0, /* static_pass_number */
1682 TV_INLINE_HEURISTICS, /* tv_id */
1683 0, /* properties_required */
1684 0, /* properties_provided */
1685 0, /* properties_destroyed */
1686 0, /* todo_flags_start */
1687 TODO_dump_cgraph /* todo_flags_finish */
1688 }
1689 };
1690
1691 /* See if statement might disappear after inlining. We are not terribly
1692 sophisficated, basically looking for simple abstraction penalty wrappers. */
1693
1694 static bool
1695 likely_eliminated_by_inlining_p (gimple stmt)
1696 {
1697 enum gimple_code code = gimple_code (stmt);
1698 switch (code)
1699 {
1700 case GIMPLE_RETURN:
1701 return true;
1702 case GIMPLE_ASSIGN:
1703 if (gimple_num_ops (stmt) != 2)
1704 return false;
1705
1706 /* Casts of parameters, loads from parameters passed by reference
1707 and stores to return value or parameters are probably free after
1708 inlining. */
1709 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1710 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1711 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1712 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1713 {
1714 tree rhs = gimple_assign_rhs1 (stmt);
1715 tree lhs = gimple_assign_lhs (stmt);
1716 tree inner_rhs = rhs;
1717 tree inner_lhs = lhs;
1718 bool rhs_free = false;
1719 bool lhs_free = false;
1720
1721 while (handled_component_p (inner_lhs) || TREE_CODE (inner_lhs) == INDIRECT_REF)
1722 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1723 while (handled_component_p (inner_rhs)
1724 || TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
1725 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1726
1727
1728 if (TREE_CODE (inner_rhs) == PARM_DECL
1729 || (TREE_CODE (inner_rhs) == SSA_NAME
1730 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1731 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1732 rhs_free = true;
1733 if (rhs_free && is_gimple_reg (lhs))
1734 lhs_free = true;
1735 if (((TREE_CODE (inner_lhs) == PARM_DECL
1736 || (TREE_CODE (inner_lhs) == SSA_NAME
1737 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1738 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1739 && inner_lhs != lhs)
1740 || TREE_CODE (inner_lhs) == RESULT_DECL
1741 || (TREE_CODE (inner_lhs) == SSA_NAME
1742 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1743 lhs_free = true;
1744 if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1745 rhs_free = true;
1746 if (lhs_free && rhs_free)
1747 return true;
1748 }
1749 return false;
1750 default:
1751 return false;
1752 }
1753 }
1754
1755 /* Compute function body size parameters for NODE. */
1756
1757 static void
1758 estimate_function_body_sizes (struct cgraph_node *node)
1759 {
1760 gcov_type time = 0;
1761 gcov_type time_inlining_benefit = 0;
1762 int size = 0;
1763 int size_inlining_benefit = 0;
1764 basic_block bb;
1765 gimple_stmt_iterator bsi;
1766 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1767 tree arg;
1768 int freq;
1769 tree funtype = TREE_TYPE (node->decl);
1770
1771 if (dump_file)
1772 fprintf (dump_file, "Analyzing function body size: %s\n",
1773 cgraph_node_name (node));
1774
1775 gcc_assert (my_function && my_function->cfg);
1776 FOR_EACH_BB_FN (bb, my_function)
1777 {
1778 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1779 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1780 {
1781 gimple stmt = gsi_stmt (bsi);
1782 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1783 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1784
1785 if (dump_file && (dump_flags & TDF_DETAILS))
1786 {
1787 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1788 freq, this_size, this_time);
1789 print_gimple_stmt (dump_file, stmt, 0, 0);
1790 }
1791 this_time *= freq;
1792 time += this_time;
1793 size += this_size;
1794 if (likely_eliminated_by_inlining_p (stmt))
1795 {
1796 size_inlining_benefit += this_size;
1797 time_inlining_benefit += this_time;
1798 if (dump_file && (dump_flags & TDF_DETAILS))
1799 fprintf (dump_file, " Likely eliminated\n");
1800 }
1801 gcc_assert (time >= 0);
1802 gcc_assert (size >= 0);
1803 }
1804 }
1805 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1806 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1807 / CGRAPH_FREQ_BASE);
1808 if (dump_file)
1809 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1810 (int)time, (int)time_inlining_benefit,
1811 size, size_inlining_benefit);
1812 time_inlining_benefit += eni_time_weights.call_cost;
1813 size_inlining_benefit += eni_size_weights.call_cost;
1814 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1815 {
1816 int cost = estimate_move_cost (TREE_TYPE (funtype));
1817 time_inlining_benefit += cost;
1818 size_inlining_benefit += cost;
1819 }
1820 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
1821 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1822 {
1823 int cost = estimate_move_cost (TREE_TYPE (arg));
1824 time_inlining_benefit += cost;
1825 size_inlining_benefit += cost;
1826 }
1827 if (time_inlining_benefit > MAX_TIME)
1828 time_inlining_benefit = MAX_TIME;
1829 if (time > MAX_TIME)
1830 time = MAX_TIME;
1831 inline_summary (node)->self_time = time;
1832 inline_summary (node)->self_size = size;
1833 if (dump_file)
1834 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1835 (int)time, (int)time_inlining_benefit,
1836 size, size_inlining_benefit);
1837 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1838 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
1839 }
1840
1841 /* Compute parameters of functions used by inliner. */
1842 unsigned int
1843 compute_inline_parameters (struct cgraph_node *node)
1844 {
1845 HOST_WIDE_INT self_stack_size;
1846
1847 gcc_assert (!node->global.inlined_to);
1848
1849 /* Estimate the stack size for the function. But not at -O0
1850 because estimated_stack_frame_size is a quadratic problem. */
1851 self_stack_size = optimize ? estimated_stack_frame_size () : 0;
1852 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1853 node->global.estimated_stack_size = self_stack_size;
1854 node->global.stack_frame_offset = 0;
1855
1856 /* Can this function be inlined at all? */
1857 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1858 if (node->local.inlinable && !node->local.disregard_inline_limits)
1859 node->local.disregard_inline_limits
1860 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
1861 estimate_function_body_sizes (node);
1862 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1863 node->global.time = inline_summary (node)->self_time;
1864 node->global.size = inline_summary (node)->self_size;
1865 return 0;
1866 }
1867
1868
1869 /* Compute parameters of functions used by inliner using
1870 current_function_decl. */
1871 static unsigned int
1872 compute_inline_parameters_for_current (void)
1873 {
1874 compute_inline_parameters (cgraph_node (current_function_decl));
1875 return 0;
1876 }
1877
1878 struct gimple_opt_pass pass_inline_parameters =
1879 {
1880 {
1881 GIMPLE_PASS,
1882 "inline_param", /* name */
1883 NULL, /* gate */
1884 compute_inline_parameters_for_current,/* execute */
1885 NULL, /* sub */
1886 NULL, /* next */
1887 0, /* static_pass_number */
1888 TV_INLINE_HEURISTICS, /* tv_id */
1889 0, /* properties_required */
1890 0, /* properties_provided */
1891 0, /* properties_destroyed */
1892 0, /* todo_flags_start */
1893 0 /* todo_flags_finish */
1894 }
1895 };
1896
1897 /* This function performs intraprocedural analyzis in NODE that is required to
1898 inline indirect calls. */
1899 static void
1900 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
1901 {
1902 struct cgraph_edge *cs;
1903
1904 if (!flag_ipa_cp)
1905 {
1906 ipa_initialize_node_params (node);
1907 ipa_detect_param_modifications (node);
1908 }
1909 ipa_analyze_params_uses (node);
1910
1911 if (!flag_ipa_cp)
1912 for (cs = node->callees; cs; cs = cs->next_callee)
1913 {
1914 ipa_count_arguments (cs);
1915 ipa_compute_jump_functions (cs);
1916 }
1917
1918 if (dump_file)
1919 {
1920 ipa_print_node_params (dump_file, node);
1921 ipa_print_node_jump_functions (dump_file, node);
1922 }
1923 }
1924
1925 /* Note function body size. */
1926 static void
1927 analyze_function (struct cgraph_node *node)
1928 {
1929 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1930 current_function_decl = node->decl;
1931
1932 compute_inline_parameters (node);
1933 if (flag_indirect_inlining)
1934 inline_indirect_intraprocedural_analysis (node);
1935
1936 current_function_decl = NULL;
1937 pop_cfun ();
1938 }
1939
1940 /* Called when new function is inserted to callgraph late. */
1941 static void
1942 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
1943 {
1944 analyze_function (node);
1945 }
1946
1947 /* Note function body size. */
1948 static void
1949 inline_generate_summary (void)
1950 {
1951 struct cgraph_node *node;
1952
1953 function_insertion_hook_holder =
1954 cgraph_add_function_insertion_hook (&add_new_function, NULL);
1955
1956 if (flag_indirect_inlining)
1957 {
1958 ipa_register_cgraph_hooks ();
1959 ipa_check_create_node_params ();
1960 ipa_check_create_edge_args ();
1961 }
1962
1963 for (node = cgraph_nodes; node; node = node->next)
1964 if (node->analyzed)
1965 analyze_function (node);
1966
1967 return;
1968 }
1969
1970 /* Apply inline plan to function. */
1971 static unsigned int
1972 inline_transform (struct cgraph_node *node)
1973 {
1974 unsigned int todo = 0;
1975 struct cgraph_edge *e;
1976
1977 /* We might need the body of this function so that we can expand
1978 it inline somewhere else. */
1979 if (cgraph_preserve_function_body_p (node->decl))
1980 save_inline_function_body (node);
1981
1982 for (e = node->callees; e; e = e->next_callee)
1983 if (!e->inline_failed || warn_inline)
1984 break;
1985
1986 if (e)
1987 {
1988 timevar_push (TV_INTEGRATION);
1989 todo = optimize_inline_calls (current_function_decl);
1990 timevar_pop (TV_INTEGRATION);
1991 }
1992 cfun->always_inline_functions_inlined = true;
1993 cfun->after_inlining = true;
1994 return todo | execute_fixup_cfg ();
1995 }
1996
1997 /* Read inline summary. Jump functions are shared among ipa-cp
1998 and inliner, so when ipa-cp is active, we don't need to write them
1999 twice. */
2000
2001 static void
2002 inline_read_summary (void)
2003 {
2004 if (flag_indirect_inlining)
2005 {
2006 ipa_register_cgraph_hooks ();
2007 if (!flag_ipa_cp)
2008 ipa_prop_read_jump_functions ();
2009 }
2010 function_insertion_hook_holder =
2011 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2012 }
2013
2014 /* Write inline summary for node in SET.
2015 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
2016 active, we don't need to write them twice. */
2017
2018 static void
2019 inline_write_summary (cgraph_node_set set)
2020 {
2021 if (flag_indirect_inlining && !flag_ipa_cp)
2022 ipa_prop_write_jump_functions (set);
2023 }
2024
2025 struct ipa_opt_pass_d pass_ipa_inline =
2026 {
2027 {
2028 IPA_PASS,
2029 "inline", /* name */
2030 NULL, /* gate */
2031 cgraph_decide_inlining, /* execute */
2032 NULL, /* sub */
2033 NULL, /* next */
2034 0, /* static_pass_number */
2035 TV_INLINE_HEURISTICS, /* tv_id */
2036 0, /* properties_required */
2037 0, /* properties_provided */
2038 0, /* properties_destroyed */
2039 TODO_remove_functions, /* todo_flags_finish */
2040 TODO_dump_cgraph | TODO_dump_func
2041 | TODO_remove_functions /* todo_flags_finish */
2042 },
2043 inline_generate_summary, /* generate_summary */
2044 inline_write_summary, /* write_summary */
2045 inline_read_summary, /* read_summary */
2046 NULL, /* function_read_summary */
2047 lto_ipa_fixup_call_notes, /* stmt_fixup */
2048 0, /* TODOs */
2049 inline_transform, /* function_transform */
2050 NULL, /* variable_transform */
2051 };
2052
2053
2054 #include "gt-ipa-inline.h"