ipa-inline-analysis.c (check_callers): Check node->can_remove_if_no_direct_calls_and_...
[gcc.git] / gcc / ipa-inline-analysis.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Analysis used by the inliner and other passes limiting code size growth.
22
23 We estimate for each function
24 - function body size
25 - average function execution time
26 - inlining size benefit (that is how much of function body size
27 and its call sequence is expected to disappear by inlining)
28 - inlining time benefit
29 - function frame size
30 For each call
31 - call statement size and time
32
33 inlinie_summary datastructures store above information locally (i.e.
34 parameters of the function itself) and globally (i.e. parameters of
35 the function created by applying all the inline decisions already
36 present in the callgraph).
37
38 We provide accestor to the inline_summary datastructure and
39 basic logic updating the parameters when inlining is performed.
40
41 The summaries are context sensitive. Context means
42 1) partial assignment of known constant values of operands
43 2) whether function is inlined into the call or not.
44 It is easy to add more variants. To represent function size and time
45 that depends on context (i.e. it is known to be optimized away when
46 context is known either by inlining or from IP-CP and clonning),
47 we use predicates. Predicates are logical formulas in
48 conjunctive-disjunctive form consisting of clauses. Clauses are bitmaps
49 specifying what conditions must be true. Conditions are simple test
50 of the form described above.
51
52 In order to make predicate (possibly) true, all of its clauses must
53 be (possibly) true. To make clause (possibly) true, one of conditions
54 it mentions must be (possibly) true. There are fixed bounds on
55 number of clauses and conditions and all the manipulation functions
56 are conservative in positive direction. I.e. we may lose precision
57 by thinking that predicate may be true even when it is not.
58
59 estimate_edge_size and estimate_edge_growth can be used to query
60 function size/time in the given context. inline_merge_summary merges
61 properties of caller and callee after inlining.
62
63 Finally pass_inline_parameters is exported. This is used to drive
64 computation of function parameters used by the early inliner. IPA
65 inlined performs analysis via its analyze_function method. */
66
67 #include "config.h"
68 #include "system.h"
69 #include "coretypes.h"
70 #include "tm.h"
71 #include "hash-set.h"
72 #include "machmode.h"
73 #include "vec.h"
74 #include "double-int.h"
75 #include "input.h"
76 #include "alias.h"
77 #include "symtab.h"
78 #include "wide-int.h"
79 #include "inchash.h"
80 #include "real.h"
81 #include "tree.h"
82 #include "fold-const.h"
83 #include "stor-layout.h"
84 #include "stringpool.h"
85 #include "print-tree.h"
86 #include "tree-inline.h"
87 #include "langhooks.h"
88 #include "flags.h"
89 #include "diagnostic.h"
90 #include "gimple-pretty-print.h"
91 #include "params.h"
92 #include "tree-pass.h"
93 #include "coverage.h"
94 #include "predict.h"
95 #include "hard-reg-set.h"
96 #include "input.h"
97 #include "function.h"
98 #include "dominance.h"
99 #include "cfg.h"
100 #include "cfganal.h"
101 #include "basic-block.h"
102 #include "tree-ssa-alias.h"
103 #include "internal-fn.h"
104 #include "gimple-expr.h"
105 #include "is-a.h"
106 #include "gimple.h"
107 #include "gimple-iterator.h"
108 #include "gimple-ssa.h"
109 #include "tree-cfg.h"
110 #include "tree-phinodes.h"
111 #include "ssa-iterators.h"
112 #include "tree-ssanames.h"
113 #include "tree-ssa-loop-niter.h"
114 #include "tree-ssa-loop.h"
115 #include "hash-map.h"
116 #include "plugin-api.h"
117 #include "ipa-ref.h"
118 #include "cgraph.h"
119 #include "alloc-pool.h"
120 #include "symbol-summary.h"
121 #include "ipa-prop.h"
122 #include "lto-streamer.h"
123 #include "data-streamer.h"
124 #include "tree-streamer.h"
125 #include "ipa-inline.h"
126 #include "cfgloop.h"
127 #include "tree-scalar-evolution.h"
128 #include "ipa-utils.h"
129 #include "cilk.h"
130 #include "cfgexpand.h"
131
132 /* Estimate runtime of function can easilly run into huge numbers with many
133 nested loops. Be sure we can compute time * INLINE_SIZE_SCALE * 2 in an
134 integer. For anything larger we use gcov_type. */
135 #define MAX_TIME 500000
136
137 /* Number of bits in integer, but we really want to be stable across different
138 hosts. */
139 #define NUM_CONDITIONS 32
140
141 enum predicate_conditions
142 {
143 predicate_false_condition = 0,
144 predicate_not_inlined_condition = 1,
145 predicate_first_dynamic_condition = 2
146 };
147
148 /* Special condition code we use to represent test that operand is compile time
149 constant. */
150 #define IS_NOT_CONSTANT ERROR_MARK
151 /* Special condition code we use to represent test that operand is not changed
152 across invocation of the function. When operand IS_NOT_CONSTANT it is always
153 CHANGED, however i.e. loop invariants can be NOT_CHANGED given percentage
154 of executions even when they are not compile time constants. */
155 #define CHANGED IDENTIFIER_NODE
156
157 /* Holders of ipa cgraph hooks: */
158 static struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
159 static struct cgraph_edge_hook_list *edge_removal_hook_holder;
160 static void inline_edge_removal_hook (struct cgraph_edge *, void *);
161 static void inline_edge_duplication_hook (struct cgraph_edge *,
162 struct cgraph_edge *, void *);
163
164 /* VECtor holding inline summaries.
165 In GGC memory because conditions might point to constant trees. */
166 function_summary <inline_summary *> *inline_summaries;
167 vec<inline_edge_summary_t> inline_edge_summary_vec;
168
169 /* Cached node/edge growths. */
170 vec<edge_growth_cache_entry> edge_growth_cache;
171
172 /* Edge predicates goes here. */
173 static alloc_pool edge_predicate_pool;
174
175 /* Return true predicate (tautology).
176 We represent it by empty list of clauses. */
177
178 static inline struct predicate
179 true_predicate (void)
180 {
181 struct predicate p;
182 p.clause[0] = 0;
183 return p;
184 }
185
186
187 /* Return predicate testing single condition number COND. */
188
189 static inline struct predicate
190 single_cond_predicate (int cond)
191 {
192 struct predicate p;
193 p.clause[0] = 1 << cond;
194 p.clause[1] = 0;
195 return p;
196 }
197
198
199 /* Return false predicate. First clause require false condition. */
200
201 static inline struct predicate
202 false_predicate (void)
203 {
204 return single_cond_predicate (predicate_false_condition);
205 }
206
207
208 /* Return true if P is (true). */
209
210 static inline bool
211 true_predicate_p (struct predicate *p)
212 {
213 return !p->clause[0];
214 }
215
216
217 /* Return true if P is (false). */
218
219 static inline bool
220 false_predicate_p (struct predicate *p)
221 {
222 if (p->clause[0] == (1 << predicate_false_condition))
223 {
224 gcc_checking_assert (!p->clause[1]
225 && p->clause[0] == 1 << predicate_false_condition);
226 return true;
227 }
228 return false;
229 }
230
231
232 /* Return predicate that is set true when function is not inlined. */
233
234 static inline struct predicate
235 not_inlined_predicate (void)
236 {
237 return single_cond_predicate (predicate_not_inlined_condition);
238 }
239
240 /* Simple description of whether a memory load or a condition refers to a load
241 from an aggregate and if so, how and where from in the aggregate.
242 Individual fields have the same meaning like fields with the same name in
243 struct condition. */
244
245 struct agg_position_info
246 {
247 HOST_WIDE_INT offset;
248 bool agg_contents;
249 bool by_ref;
250 };
251
252 /* Add condition to condition list CONDS. AGGPOS describes whether the used
253 oprand is loaded from an aggregate and where in the aggregate it is. It can
254 be NULL, which means this not a load from an aggregate. */
255
256 static struct predicate
257 add_condition (struct inline_summary *summary, int operand_num,
258 struct agg_position_info *aggpos,
259 enum tree_code code, tree val)
260 {
261 int i;
262 struct condition *c;
263 struct condition new_cond;
264 HOST_WIDE_INT offset;
265 bool agg_contents, by_ref;
266
267 if (aggpos)
268 {
269 offset = aggpos->offset;
270 agg_contents = aggpos->agg_contents;
271 by_ref = aggpos->by_ref;
272 }
273 else
274 {
275 offset = 0;
276 agg_contents = false;
277 by_ref = false;
278 }
279
280 gcc_checking_assert (operand_num >= 0);
281 for (i = 0; vec_safe_iterate (summary->conds, i, &c); i++)
282 {
283 if (c->operand_num == operand_num
284 && c->code == code
285 && c->val == val
286 && c->agg_contents == agg_contents
287 && (!agg_contents || (c->offset == offset && c->by_ref == by_ref)))
288 return single_cond_predicate (i + predicate_first_dynamic_condition);
289 }
290 /* Too many conditions. Give up and return constant true. */
291 if (i == NUM_CONDITIONS - predicate_first_dynamic_condition)
292 return true_predicate ();
293
294 new_cond.operand_num = operand_num;
295 new_cond.code = code;
296 new_cond.val = val;
297 new_cond.agg_contents = agg_contents;
298 new_cond.by_ref = by_ref;
299 new_cond.offset = offset;
300 vec_safe_push (summary->conds, new_cond);
301 return single_cond_predicate (i + predicate_first_dynamic_condition);
302 }
303
304
305 /* Add clause CLAUSE into the predicate P. */
306
307 static inline void
308 add_clause (conditions conditions, struct predicate *p, clause_t clause)
309 {
310 int i;
311 int i2;
312 int insert_here = -1;
313 int c1, c2;
314
315 /* True clause. */
316 if (!clause)
317 return;
318
319 /* False clause makes the whole predicate false. Kill the other variants. */
320 if (clause == (1 << predicate_false_condition))
321 {
322 p->clause[0] = (1 << predicate_false_condition);
323 p->clause[1] = 0;
324 return;
325 }
326 if (false_predicate_p (p))
327 return;
328
329 /* No one should be silly enough to add false into nontrivial clauses. */
330 gcc_checking_assert (!(clause & (1 << predicate_false_condition)));
331
332 /* Look where to insert the clause. At the same time prune out
333 clauses of P that are implied by the new clause and thus
334 redundant. */
335 for (i = 0, i2 = 0; i <= MAX_CLAUSES; i++)
336 {
337 p->clause[i2] = p->clause[i];
338
339 if (!p->clause[i])
340 break;
341
342 /* If p->clause[i] implies clause, there is nothing to add. */
343 if ((p->clause[i] & clause) == p->clause[i])
344 {
345 /* We had nothing to add, none of clauses should've become
346 redundant. */
347 gcc_checking_assert (i == i2);
348 return;
349 }
350
351 if (p->clause[i] < clause && insert_here < 0)
352 insert_here = i2;
353
354 /* If clause implies p->clause[i], then p->clause[i] becomes redundant.
355 Otherwise the p->clause[i] has to stay. */
356 if ((p->clause[i] & clause) != clause)
357 i2++;
358 }
359
360 /* Look for clauses that are obviously true. I.e.
361 op0 == 5 || op0 != 5. */
362 for (c1 = predicate_first_dynamic_condition; c1 < NUM_CONDITIONS; c1++)
363 {
364 condition *cc1;
365 if (!(clause & (1 << c1)))
366 continue;
367 cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition];
368 /* We have no way to represent !CHANGED and !IS_NOT_CONSTANT
369 and thus there is no point for looking for them. */
370 if (cc1->code == CHANGED || cc1->code == IS_NOT_CONSTANT)
371 continue;
372 for (c2 = c1 + 1; c2 < NUM_CONDITIONS; c2++)
373 if (clause & (1 << c2))
374 {
375 condition *cc1 =
376 &(*conditions)[c1 - predicate_first_dynamic_condition];
377 condition *cc2 =
378 &(*conditions)[c2 - predicate_first_dynamic_condition];
379 if (cc1->operand_num == cc2->operand_num
380 && cc1->val == cc2->val
381 && cc2->code != IS_NOT_CONSTANT
382 && cc2->code != CHANGED
383 && cc1->code == invert_tree_comparison (cc2->code,
384 HONOR_NANS (cc1->val)))
385 return;
386 }
387 }
388
389
390 /* We run out of variants. Be conservative in positive direction. */
391 if (i2 == MAX_CLAUSES)
392 return;
393 /* Keep clauses in decreasing order. This makes equivalence testing easy. */
394 p->clause[i2 + 1] = 0;
395 if (insert_here >= 0)
396 for (; i2 > insert_here; i2--)
397 p->clause[i2] = p->clause[i2 - 1];
398 else
399 insert_here = i2;
400 p->clause[insert_here] = clause;
401 }
402
403
404 /* Return P & P2. */
405
406 static struct predicate
407 and_predicates (conditions conditions,
408 struct predicate *p, struct predicate *p2)
409 {
410 struct predicate out = *p;
411 int i;
412
413 /* Avoid busy work. */
414 if (false_predicate_p (p2) || true_predicate_p (p))
415 return *p2;
416 if (false_predicate_p (p) || true_predicate_p (p2))
417 return *p;
418
419 /* See how far predicates match. */
420 for (i = 0; p->clause[i] && p->clause[i] == p2->clause[i]; i++)
421 {
422 gcc_checking_assert (i < MAX_CLAUSES);
423 }
424
425 /* Combine the predicates rest. */
426 for (; p2->clause[i]; i++)
427 {
428 gcc_checking_assert (i < MAX_CLAUSES);
429 add_clause (conditions, &out, p2->clause[i]);
430 }
431 return out;
432 }
433
434
435 /* Return true if predicates are obviously equal. */
436
437 static inline bool
438 predicates_equal_p (struct predicate *p, struct predicate *p2)
439 {
440 int i;
441 for (i = 0; p->clause[i]; i++)
442 {
443 gcc_checking_assert (i < MAX_CLAUSES);
444 gcc_checking_assert (p->clause[i] > p->clause[i + 1]);
445 gcc_checking_assert (!p2->clause[i]
446 || p2->clause[i] > p2->clause[i + 1]);
447 if (p->clause[i] != p2->clause[i])
448 return false;
449 }
450 return !p2->clause[i];
451 }
452
453
454 /* Return P | P2. */
455
456 static struct predicate
457 or_predicates (conditions conditions,
458 struct predicate *p, struct predicate *p2)
459 {
460 struct predicate out = true_predicate ();
461 int i, j;
462
463 /* Avoid busy work. */
464 if (false_predicate_p (p2) || true_predicate_p (p))
465 return *p;
466 if (false_predicate_p (p) || true_predicate_p (p2))
467 return *p2;
468 if (predicates_equal_p (p, p2))
469 return *p;
470
471 /* OK, combine the predicates. */
472 for (i = 0; p->clause[i]; i++)
473 for (j = 0; p2->clause[j]; j++)
474 {
475 gcc_checking_assert (i < MAX_CLAUSES && j < MAX_CLAUSES);
476 add_clause (conditions, &out, p->clause[i] | p2->clause[j]);
477 }
478 return out;
479 }
480
481
482 /* Having partial truth assignment in POSSIBLE_TRUTHS, return false
483 if predicate P is known to be false. */
484
485 static bool
486 evaluate_predicate (struct predicate *p, clause_t possible_truths)
487 {
488 int i;
489
490 /* True remains true. */
491 if (true_predicate_p (p))
492 return true;
493
494 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
495
496 /* See if we can find clause we can disprove. */
497 for (i = 0; p->clause[i]; i++)
498 {
499 gcc_checking_assert (i < MAX_CLAUSES);
500 if (!(p->clause[i] & possible_truths))
501 return false;
502 }
503 return true;
504 }
505
506 /* Return the probability in range 0...REG_BR_PROB_BASE that the predicated
507 instruction will be recomputed per invocation of the inlined call. */
508
509 static int
510 predicate_probability (conditions conds,
511 struct predicate *p, clause_t possible_truths,
512 vec<inline_param_summary> inline_param_summary)
513 {
514 int i;
515 int combined_prob = REG_BR_PROB_BASE;
516
517 /* True remains true. */
518 if (true_predicate_p (p))
519 return REG_BR_PROB_BASE;
520
521 if (false_predicate_p (p))
522 return 0;
523
524 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
525
526 /* See if we can find clause we can disprove. */
527 for (i = 0; p->clause[i]; i++)
528 {
529 gcc_checking_assert (i < MAX_CLAUSES);
530 if (!(p->clause[i] & possible_truths))
531 return 0;
532 else
533 {
534 int this_prob = 0;
535 int i2;
536 if (!inline_param_summary.exists ())
537 return REG_BR_PROB_BASE;
538 for (i2 = 0; i2 < NUM_CONDITIONS; i2++)
539 if ((p->clause[i] & possible_truths) & (1 << i2))
540 {
541 if (i2 >= predicate_first_dynamic_condition)
542 {
543 condition *c =
544 &(*conds)[i2 - predicate_first_dynamic_condition];
545 if (c->code == CHANGED
546 && (c->operand_num <
547 (int) inline_param_summary.length ()))
548 {
549 int iprob =
550 inline_param_summary[c->operand_num].change_prob;
551 this_prob = MAX (this_prob, iprob);
552 }
553 else
554 this_prob = REG_BR_PROB_BASE;
555 }
556 else
557 this_prob = REG_BR_PROB_BASE;
558 }
559 combined_prob = MIN (this_prob, combined_prob);
560 if (!combined_prob)
561 return 0;
562 }
563 }
564 return combined_prob;
565 }
566
567
568 /* Dump conditional COND. */
569
570 static void
571 dump_condition (FILE *f, conditions conditions, int cond)
572 {
573 condition *c;
574 if (cond == predicate_false_condition)
575 fprintf (f, "false");
576 else if (cond == predicate_not_inlined_condition)
577 fprintf (f, "not inlined");
578 else
579 {
580 c = &(*conditions)[cond - predicate_first_dynamic_condition];
581 fprintf (f, "op%i", c->operand_num);
582 if (c->agg_contents)
583 fprintf (f, "[%soffset: " HOST_WIDE_INT_PRINT_DEC "]",
584 c->by_ref ? "ref " : "", c->offset);
585 if (c->code == IS_NOT_CONSTANT)
586 {
587 fprintf (f, " not constant");
588 return;
589 }
590 if (c->code == CHANGED)
591 {
592 fprintf (f, " changed");
593 return;
594 }
595 fprintf (f, " %s ", op_symbol_code (c->code));
596 print_generic_expr (f, c->val, 1);
597 }
598 }
599
600
601 /* Dump clause CLAUSE. */
602
603 static void
604 dump_clause (FILE *f, conditions conds, clause_t clause)
605 {
606 int i;
607 bool found = false;
608 fprintf (f, "(");
609 if (!clause)
610 fprintf (f, "true");
611 for (i = 0; i < NUM_CONDITIONS; i++)
612 if (clause & (1 << i))
613 {
614 if (found)
615 fprintf (f, " || ");
616 found = true;
617 dump_condition (f, conds, i);
618 }
619 fprintf (f, ")");
620 }
621
622
623 /* Dump predicate PREDICATE. */
624
625 static void
626 dump_predicate (FILE *f, conditions conds, struct predicate *pred)
627 {
628 int i;
629 if (true_predicate_p (pred))
630 dump_clause (f, conds, 0);
631 else
632 for (i = 0; pred->clause[i]; i++)
633 {
634 if (i)
635 fprintf (f, " && ");
636 dump_clause (f, conds, pred->clause[i]);
637 }
638 fprintf (f, "\n");
639 }
640
641
642 /* Dump inline hints. */
643 void
644 dump_inline_hints (FILE *f, inline_hints hints)
645 {
646 if (!hints)
647 return;
648 fprintf (f, "inline hints:");
649 if (hints & INLINE_HINT_indirect_call)
650 {
651 hints &= ~INLINE_HINT_indirect_call;
652 fprintf (f, " indirect_call");
653 }
654 if (hints & INLINE_HINT_loop_iterations)
655 {
656 hints &= ~INLINE_HINT_loop_iterations;
657 fprintf (f, " loop_iterations");
658 }
659 if (hints & INLINE_HINT_loop_stride)
660 {
661 hints &= ~INLINE_HINT_loop_stride;
662 fprintf (f, " loop_stride");
663 }
664 if (hints & INLINE_HINT_same_scc)
665 {
666 hints &= ~INLINE_HINT_same_scc;
667 fprintf (f, " same_scc");
668 }
669 if (hints & INLINE_HINT_in_scc)
670 {
671 hints &= ~INLINE_HINT_in_scc;
672 fprintf (f, " in_scc");
673 }
674 if (hints & INLINE_HINT_cross_module)
675 {
676 hints &= ~INLINE_HINT_cross_module;
677 fprintf (f, " cross_module");
678 }
679 if (hints & INLINE_HINT_declared_inline)
680 {
681 hints &= ~INLINE_HINT_declared_inline;
682 fprintf (f, " declared_inline");
683 }
684 if (hints & INLINE_HINT_array_index)
685 {
686 hints &= ~INLINE_HINT_array_index;
687 fprintf (f, " array_index");
688 }
689 if (hints & INLINE_HINT_known_hot)
690 {
691 hints &= ~INLINE_HINT_known_hot;
692 fprintf (f, " known_hot");
693 }
694 gcc_assert (!hints);
695 }
696
697
698 /* Record SIZE and TIME under condition PRED into the inline summary. */
699
700 static void
701 account_size_time (struct inline_summary *summary, int size, int time,
702 struct predicate *pred)
703 {
704 size_time_entry *e;
705 bool found = false;
706 int i;
707
708 if (false_predicate_p (pred))
709 return;
710
711 /* We need to create initial empty unconitional clause, but otherwie
712 we don't need to account empty times and sizes. */
713 if (!size && !time && summary->entry)
714 return;
715
716 /* Watch overflow that might result from insane profiles. */
717 if (time > MAX_TIME * INLINE_TIME_SCALE)
718 time = MAX_TIME * INLINE_TIME_SCALE;
719 gcc_assert (time >= 0);
720
721 for (i = 0; vec_safe_iterate (summary->entry, i, &e); i++)
722 if (predicates_equal_p (&e->predicate, pred))
723 {
724 found = true;
725 break;
726 }
727 if (i == 256)
728 {
729 i = 0;
730 found = true;
731 e = &(*summary->entry)[0];
732 gcc_assert (!e->predicate.clause[0]);
733 if (dump_file && (dump_flags & TDF_DETAILS))
734 fprintf (dump_file,
735 "\t\tReached limit on number of entries, "
736 "ignoring the predicate.");
737 }
738 if (dump_file && (dump_flags & TDF_DETAILS) && (time || size))
739 {
740 fprintf (dump_file,
741 "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate:",
742 ((double) size) / INLINE_SIZE_SCALE,
743 ((double) time) / INLINE_TIME_SCALE, found ? "" : "new ");
744 dump_predicate (dump_file, summary->conds, pred);
745 }
746 if (!found)
747 {
748 struct size_time_entry new_entry;
749 new_entry.size = size;
750 new_entry.time = time;
751 new_entry.predicate = *pred;
752 vec_safe_push (summary->entry, new_entry);
753 }
754 else
755 {
756 e->size += size;
757 e->time += time;
758 if (e->time > MAX_TIME * INLINE_TIME_SCALE)
759 e->time = MAX_TIME * INLINE_TIME_SCALE;
760 }
761 }
762
763 /* Set predicate for edge E. */
764
765 static void
766 edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate)
767 {
768 struct inline_edge_summary *es = inline_edge_summary (e);
769
770 /* If the edge is determined to be never executed, redirect it
771 to BUILTIN_UNREACHABLE to save inliner from inlining into it. */
772 if (predicate && false_predicate_p (predicate) && e->callee)
773 {
774 struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
775
776 e->redirect_callee (cgraph_node::get_create
777 (builtin_decl_implicit (BUILT_IN_UNREACHABLE)));
778 e->inline_failed = CIF_UNREACHABLE;
779 es->call_stmt_size = 0;
780 es->call_stmt_time = 0;
781 if (callee)
782 callee->remove_symbol_and_inline_clones ();
783 }
784 if (predicate && !true_predicate_p (predicate))
785 {
786 if (!es->predicate)
787 es->predicate = (struct predicate *) pool_alloc (edge_predicate_pool);
788 *es->predicate = *predicate;
789 }
790 else
791 {
792 if (es->predicate)
793 pool_free (edge_predicate_pool, es->predicate);
794 es->predicate = NULL;
795 }
796 }
797
798 /* Set predicate for hint *P. */
799
800 static void
801 set_hint_predicate (struct predicate **p, struct predicate new_predicate)
802 {
803 if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate))
804 {
805 if (*p)
806 pool_free (edge_predicate_pool, *p);
807 *p = NULL;
808 }
809 else
810 {
811 if (!*p)
812 *p = (struct predicate *) pool_alloc (edge_predicate_pool);
813 **p = new_predicate;
814 }
815 }
816
817
818 /* KNOWN_VALS is partial mapping of parameters of NODE to constant values.
819 KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
820 Return clause of possible truths. When INLINE_P is true, assume that we are
821 inlining.
822
823 ERROR_MARK means compile time invariant. */
824
825 static clause_t
826 evaluate_conditions_for_known_args (struct cgraph_node *node,
827 bool inline_p,
828 vec<tree> known_vals,
829 vec<ipa_agg_jump_function_p>
830 known_aggs)
831 {
832 clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
833 struct inline_summary *info = inline_summaries->get (node);
834 int i;
835 struct condition *c;
836
837 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
838 {
839 tree val;
840 tree res;
841
842 /* We allow call stmt to have fewer arguments than the callee function
843 (especially for K&R style programs). So bound check here (we assume
844 known_aggs vector, if non-NULL, has the same length as
845 known_vals). */
846 gcc_checking_assert (!known_aggs.exists ()
847 || (known_vals.length () == known_aggs.length ()));
848 if (c->operand_num >= (int) known_vals.length ())
849 {
850 clause |= 1 << (i + predicate_first_dynamic_condition);
851 continue;
852 }
853
854 if (c->agg_contents)
855 {
856 struct ipa_agg_jump_function *agg;
857
858 if (c->code == CHANGED
859 && !c->by_ref
860 && (known_vals[c->operand_num] == error_mark_node))
861 continue;
862
863 if (known_aggs.exists ())
864 {
865 agg = known_aggs[c->operand_num];
866 val = ipa_find_agg_cst_for_param (agg, c->offset, c->by_ref);
867 }
868 else
869 val = NULL_TREE;
870 }
871 else
872 {
873 val = known_vals[c->operand_num];
874 if (val == error_mark_node && c->code != CHANGED)
875 val = NULL_TREE;
876 }
877
878 if (!val)
879 {
880 clause |= 1 << (i + predicate_first_dynamic_condition);
881 continue;
882 }
883 if (c->code == IS_NOT_CONSTANT || c->code == CHANGED)
884 continue;
885
886 if (operand_equal_p (TYPE_SIZE (TREE_TYPE (c->val)),
887 TYPE_SIZE (TREE_TYPE (val)), 0))
888 {
889 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val);
890
891 res = val
892 ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
893 : NULL;
894
895 if (res && integer_zerop (res))
896 continue;
897 }
898 clause |= 1 << (i + predicate_first_dynamic_condition);
899 }
900 return clause;
901 }
902
903
904 /* Work out what conditions might be true at invocation of E. */
905
906 static void
907 evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
908 clause_t *clause_ptr,
909 vec<tree> *known_vals_ptr,
910 vec<ipa_polymorphic_call_context>
911 *known_contexts_ptr,
912 vec<ipa_agg_jump_function_p> *known_aggs_ptr)
913 {
914 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
915 struct inline_summary *info = inline_summaries->get (callee);
916 vec<tree> known_vals = vNULL;
917 vec<ipa_agg_jump_function_p> known_aggs = vNULL;
918
919 if (clause_ptr)
920 *clause_ptr = inline_p ? 0 : 1 << predicate_not_inlined_condition;
921 if (known_vals_ptr)
922 known_vals_ptr->create (0);
923 if (known_contexts_ptr)
924 known_contexts_ptr->create (0);
925
926 if (ipa_node_params_sum
927 && !e->call_stmt_cannot_inline_p
928 && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
929 {
930 struct ipa_node_params *parms_info;
931 struct ipa_edge_args *args = IPA_EDGE_REF (e);
932 struct inline_edge_summary *es = inline_edge_summary (e);
933 int i, count = ipa_get_cs_argument_count (args);
934
935 if (e->caller->global.inlined_to)
936 parms_info = IPA_NODE_REF (e->caller->global.inlined_to);
937 else
938 parms_info = IPA_NODE_REF (e->caller);
939
940 if (count && (info->conds || known_vals_ptr))
941 known_vals.safe_grow_cleared (count);
942 if (count && (info->conds || known_aggs_ptr))
943 known_aggs.safe_grow_cleared (count);
944 if (count && known_contexts_ptr)
945 known_contexts_ptr->safe_grow_cleared (count);
946
947 for (i = 0; i < count; i++)
948 {
949 struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
950 tree cst = ipa_value_from_jfunc (parms_info, jf);
951
952 if (!cst && e->call_stmt
953 && i < (int)gimple_call_num_args (e->call_stmt))
954 {
955 cst = gimple_call_arg (e->call_stmt, i);
956 if (!is_gimple_min_invariant (cst))
957 cst = NULL;
958 }
959 if (cst)
960 {
961 gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
962 if (known_vals.exists ())
963 known_vals[i] = cst;
964 }
965 else if (inline_p && !es->param[i].change_prob)
966 known_vals[i] = error_mark_node;
967
968 if (known_contexts_ptr)
969 (*known_contexts_ptr)[i] = ipa_context_from_jfunc (parms_info, e,
970 i, jf);
971 /* TODO: When IPA-CP starts propagating and merging aggregate jump
972 functions, use its knowledge of the caller too, just like the
973 scalar case above. */
974 known_aggs[i] = &jf->agg;
975 }
976 }
977 else if (e->call_stmt && !e->call_stmt_cannot_inline_p
978 && ((clause_ptr && info->conds) || known_vals_ptr))
979 {
980 int i, count = (int)gimple_call_num_args (e->call_stmt);
981
982 if (count && (info->conds || known_vals_ptr))
983 known_vals.safe_grow_cleared (count);
984 for (i = 0; i < count; i++)
985 {
986 tree cst = gimple_call_arg (e->call_stmt, i);
987 if (!is_gimple_min_invariant (cst))
988 cst = NULL;
989 if (cst)
990 known_vals[i] = cst;
991 }
992 }
993
994 if (clause_ptr)
995 *clause_ptr = evaluate_conditions_for_known_args (callee, inline_p,
996 known_vals, known_aggs);
997
998 if (known_vals_ptr)
999 *known_vals_ptr = known_vals;
1000 else
1001 known_vals.release ();
1002
1003 if (known_aggs_ptr)
1004 *known_aggs_ptr = known_aggs;
1005 else
1006 known_aggs.release ();
1007 }
1008
1009
1010 /* Allocate the inline summary vector or resize it to cover all cgraph nodes. */
1011
1012 static void
1013 inline_summary_alloc (void)
1014 {
1015 if (!edge_removal_hook_holder)
1016 edge_removal_hook_holder =
1017 symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
1018 if (!edge_duplication_hook_holder)
1019 edge_duplication_hook_holder =
1020 symtab->add_edge_duplication_hook (&inline_edge_duplication_hook, NULL);
1021
1022 if (!inline_summaries)
1023 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
1024
1025 if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid)
1026 inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1);
1027 if (!edge_predicate_pool)
1028 edge_predicate_pool = create_alloc_pool ("edge predicates",
1029 sizeof (struct predicate), 10);
1030 }
1031
1032 /* We are called multiple time for given function; clear
1033 data from previous run so they are not cumulated. */
1034
1035 static void
1036 reset_inline_edge_summary (struct cgraph_edge *e)
1037 {
1038 if (e->uid < (int) inline_edge_summary_vec.length ())
1039 {
1040 struct inline_edge_summary *es = inline_edge_summary (e);
1041
1042 es->call_stmt_size = es->call_stmt_time = 0;
1043 if (es->predicate)
1044 pool_free (edge_predicate_pool, es->predicate);
1045 es->predicate = NULL;
1046 es->param.release ();
1047 }
1048 }
1049
1050 /* We are called multiple time for given function; clear
1051 data from previous run so they are not cumulated. */
1052
1053 static void
1054 reset_inline_summary (struct cgraph_node *node,
1055 inline_summary *info)
1056 {
1057 struct cgraph_edge *e;
1058
1059 info->self_size = info->self_time = 0;
1060 info->estimated_stack_size = 0;
1061 info->estimated_self_stack_size = 0;
1062 info->stack_frame_offset = 0;
1063 info->size = 0;
1064 info->time = 0;
1065 info->growth = 0;
1066 info->scc_no = 0;
1067 if (info->loop_iterations)
1068 {
1069 pool_free (edge_predicate_pool, info->loop_iterations);
1070 info->loop_iterations = NULL;
1071 }
1072 if (info->loop_stride)
1073 {
1074 pool_free (edge_predicate_pool, info->loop_stride);
1075 info->loop_stride = NULL;
1076 }
1077 if (info->array_index)
1078 {
1079 pool_free (edge_predicate_pool, info->array_index);
1080 info->array_index = NULL;
1081 }
1082 vec_free (info->conds);
1083 vec_free (info->entry);
1084 for (e = node->callees; e; e = e->next_callee)
1085 reset_inline_edge_summary (e);
1086 for (e = node->indirect_calls; e; e = e->next_callee)
1087 reset_inline_edge_summary (e);
1088 }
1089
1090 /* Hook that is called by cgraph.c when a node is removed. */
1091
1092 void
1093 inline_summary_t::remove (cgraph_node *node, inline_summary *info)
1094 {
1095 reset_inline_summary (node, info);
1096 }
1097
1098 /* Remap predicate P of former function to be predicate of duplicated function.
1099 POSSIBLE_TRUTHS is clause of possible truths in the duplicated node,
1100 INFO is inline summary of the duplicated node. */
1101
1102 static struct predicate
1103 remap_predicate_after_duplication (struct predicate *p,
1104 clause_t possible_truths,
1105 struct inline_summary *info)
1106 {
1107 struct predicate new_predicate = true_predicate ();
1108 int j;
1109 for (j = 0; p->clause[j]; j++)
1110 if (!(possible_truths & p->clause[j]))
1111 {
1112 new_predicate = false_predicate ();
1113 break;
1114 }
1115 else
1116 add_clause (info->conds, &new_predicate,
1117 possible_truths & p->clause[j]);
1118 return new_predicate;
1119 }
1120
1121 /* Same as remap_predicate_after_duplication but handle hint predicate *P.
1122 Additionally care about allocating new memory slot for updated predicate
1123 and set it to NULL when it becomes true or false (and thus uninteresting).
1124 */
1125
1126 static void
1127 remap_hint_predicate_after_duplication (struct predicate **p,
1128 clause_t possible_truths,
1129 struct inline_summary *info)
1130 {
1131 struct predicate new_predicate;
1132
1133 if (!*p)
1134 return;
1135
1136 new_predicate = remap_predicate_after_duplication (*p,
1137 possible_truths, info);
1138 /* We do not want to free previous predicate; it is used by node origin. */
1139 *p = NULL;
1140 set_hint_predicate (p, new_predicate);
1141 }
1142
1143
1144 /* Hook that is called by cgraph.c when a node is duplicated. */
1145 void
1146 inline_summary_t::duplicate (cgraph_node *src,
1147 cgraph_node *dst,
1148 inline_summary *,
1149 inline_summary *info)
1150 {
1151 inline_summary_alloc ();
1152 memcpy (info, inline_summaries->get (src), sizeof (inline_summary));
1153 /* TODO: as an optimization, we may avoid copying conditions
1154 that are known to be false or true. */
1155 info->conds = vec_safe_copy (info->conds);
1156
1157 /* When there are any replacements in the function body, see if we can figure
1158 out that something was optimized out. */
1159 if (ipa_node_params_sum && dst->clone.tree_map)
1160 {
1161 vec<size_time_entry, va_gc> *entry = info->entry;
1162 /* Use SRC parm info since it may not be copied yet. */
1163 struct ipa_node_params *parms_info = IPA_NODE_REF (src);
1164 vec<tree> known_vals = vNULL;
1165 int count = ipa_get_param_count (parms_info);
1166 int i, j;
1167 clause_t possible_truths;
1168 struct predicate true_pred = true_predicate ();
1169 size_time_entry *e;
1170 int optimized_out_size = 0;
1171 bool inlined_to_p = false;
1172 struct cgraph_edge *edge;
1173
1174 info->entry = 0;
1175 known_vals.safe_grow_cleared (count);
1176 for (i = 0; i < count; i++)
1177 {
1178 struct ipa_replace_map *r;
1179
1180 for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
1181 {
1182 if (((!r->old_tree && r->parm_num == i)
1183 || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i)))
1184 && r->replace_p && !r->ref_p)
1185 {
1186 known_vals[i] = r->new_tree;
1187 break;
1188 }
1189 }
1190 }
1191 possible_truths = evaluate_conditions_for_known_args (dst, false,
1192 known_vals,
1193 vNULL);
1194 known_vals.release ();
1195
1196 account_size_time (info, 0, 0, &true_pred);
1197
1198 /* Remap size_time vectors.
1199 Simplify the predicate by prunning out alternatives that are known
1200 to be false.
1201 TODO: as on optimization, we can also eliminate conditions known
1202 to be true. */
1203 for (i = 0; vec_safe_iterate (entry, i, &e); i++)
1204 {
1205 struct predicate new_predicate;
1206 new_predicate = remap_predicate_after_duplication (&e->predicate,
1207 possible_truths,
1208 info);
1209 if (false_predicate_p (&new_predicate))
1210 optimized_out_size += e->size;
1211 else
1212 account_size_time (info, e->size, e->time, &new_predicate);
1213 }
1214
1215 /* Remap edge predicates with the same simplification as above.
1216 Also copy constantness arrays. */
1217 for (edge = dst->callees; edge; edge = edge->next_callee)
1218 {
1219 struct predicate new_predicate;
1220 struct inline_edge_summary *es = inline_edge_summary (edge);
1221
1222 if (!edge->inline_failed)
1223 inlined_to_p = true;
1224 if (!es->predicate)
1225 continue;
1226 new_predicate = remap_predicate_after_duplication (es->predicate,
1227 possible_truths,
1228 info);
1229 if (false_predicate_p (&new_predicate)
1230 && !false_predicate_p (es->predicate))
1231 {
1232 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1233 edge->frequency = 0;
1234 }
1235 edge_set_predicate (edge, &new_predicate);
1236 }
1237
1238 /* Remap indirect edge predicates with the same simplificaiton as above.
1239 Also copy constantness arrays. */
1240 for (edge = dst->indirect_calls; edge; edge = edge->next_callee)
1241 {
1242 struct predicate new_predicate;
1243 struct inline_edge_summary *es = inline_edge_summary (edge);
1244
1245 gcc_checking_assert (edge->inline_failed);
1246 if (!es->predicate)
1247 continue;
1248 new_predicate = remap_predicate_after_duplication (es->predicate,
1249 possible_truths,
1250 info);
1251 if (false_predicate_p (&new_predicate)
1252 && !false_predicate_p (es->predicate))
1253 {
1254 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1255 edge->frequency = 0;
1256 }
1257 edge_set_predicate (edge, &new_predicate);
1258 }
1259 remap_hint_predicate_after_duplication (&info->loop_iterations,
1260 possible_truths, info);
1261 remap_hint_predicate_after_duplication (&info->loop_stride,
1262 possible_truths, info);
1263 remap_hint_predicate_after_duplication (&info->array_index,
1264 possible_truths, info);
1265
1266 /* If inliner or someone after inliner will ever start producing
1267 non-trivial clones, we will get trouble with lack of information
1268 about updating self sizes, because size vectors already contains
1269 sizes of the calees. */
1270 gcc_assert (!inlined_to_p || !optimized_out_size);
1271 }
1272 else
1273 {
1274 info->entry = vec_safe_copy (info->entry);
1275 if (info->loop_iterations)
1276 {
1277 predicate p = *info->loop_iterations;
1278 info->loop_iterations = NULL;
1279 set_hint_predicate (&info->loop_iterations, p);
1280 }
1281 if (info->loop_stride)
1282 {
1283 predicate p = *info->loop_stride;
1284 info->loop_stride = NULL;
1285 set_hint_predicate (&info->loop_stride, p);
1286 }
1287 if (info->array_index)
1288 {
1289 predicate p = *info->array_index;
1290 info->array_index = NULL;
1291 set_hint_predicate (&info->array_index, p);
1292 }
1293 }
1294 if (!dst->global.inlined_to)
1295 inline_update_overall_summary (dst);
1296 }
1297
1298
1299 /* Hook that is called by cgraph.c when a node is duplicated. */
1300
1301 static void
1302 inline_edge_duplication_hook (struct cgraph_edge *src,
1303 struct cgraph_edge *dst,
1304 ATTRIBUTE_UNUSED void *data)
1305 {
1306 struct inline_edge_summary *info;
1307 struct inline_edge_summary *srcinfo;
1308 inline_summary_alloc ();
1309 info = inline_edge_summary (dst);
1310 srcinfo = inline_edge_summary (src);
1311 memcpy (info, srcinfo, sizeof (struct inline_edge_summary));
1312 info->predicate = NULL;
1313 edge_set_predicate (dst, srcinfo->predicate);
1314 info->param = srcinfo->param.copy ();
1315 if (!dst->indirect_unknown_callee && src->indirect_unknown_callee)
1316 {
1317 info->call_stmt_size -= (eni_size_weights.indirect_call_cost
1318 - eni_size_weights.call_cost);
1319 info->call_stmt_time -= (eni_time_weights.indirect_call_cost
1320 - eni_time_weights.call_cost);
1321 }
1322 }
1323
1324
1325 /* Keep edge cache consistent across edge removal. */
1326
1327 static void
1328 inline_edge_removal_hook (struct cgraph_edge *edge,
1329 void *data ATTRIBUTE_UNUSED)
1330 {
1331 if (edge_growth_cache.exists ())
1332 reset_edge_growth_cache (edge);
1333 reset_inline_edge_summary (edge);
1334 }
1335
1336
1337 /* Initialize growth caches. */
1338
1339 void
1340 initialize_growth_caches (void)
1341 {
1342 if (symtab->edges_max_uid)
1343 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
1344 }
1345
1346
1347 /* Free growth caches. */
1348
1349 void
1350 free_growth_caches (void)
1351 {
1352 edge_growth_cache.release ();
1353 }
1354
1355
1356 /* Dump edge summaries associated to NODE and recursively to all clones.
1357 Indent by INDENT. */
1358
1359 static void
1360 dump_inline_edge_summary (FILE *f, int indent, struct cgraph_node *node,
1361 struct inline_summary *info)
1362 {
1363 struct cgraph_edge *edge;
1364 for (edge = node->callees; edge; edge = edge->next_callee)
1365 {
1366 struct inline_edge_summary *es = inline_edge_summary (edge);
1367 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1368 int i;
1369
1370 fprintf (f,
1371 "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i"
1372 " time: %2i callee size:%2i stack:%2i",
1373 indent, "", callee->name (), callee->order,
1374 !edge->inline_failed
1375 ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
1376 indent, "", es->loop_depth, edge->frequency,
1377 es->call_stmt_size, es->call_stmt_time,
1378 (int) inline_summaries->get (callee)->size / INLINE_SIZE_SCALE,
1379 (int) inline_summaries->get (callee)->estimated_stack_size);
1380
1381 if (es->predicate)
1382 {
1383 fprintf (f, " predicate: ");
1384 dump_predicate (f, info->conds, es->predicate);
1385 }
1386 else
1387 fprintf (f, "\n");
1388 if (es->param.exists ())
1389 for (i = 0; i < (int) es->param.length (); i++)
1390 {
1391 int prob = es->param[i].change_prob;
1392
1393 if (!prob)
1394 fprintf (f, "%*s op%i is compile time invariant\n",
1395 indent + 2, "", i);
1396 else if (prob != REG_BR_PROB_BASE)
1397 fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
1398 prob * 100.0 / REG_BR_PROB_BASE);
1399 }
1400 if (!edge->inline_failed)
1401 {
1402 fprintf (f, "%*sStack frame offset %i, callee self size %i,"
1403 " callee size %i\n",
1404 indent + 2, "",
1405 (int) inline_summaries->get (callee)->stack_frame_offset,
1406 (int) inline_summaries->get (callee)->estimated_self_stack_size,
1407 (int) inline_summaries->get (callee)->estimated_stack_size);
1408 dump_inline_edge_summary (f, indent + 2, callee, info);
1409 }
1410 }
1411 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
1412 {
1413 struct inline_edge_summary *es = inline_edge_summary (edge);
1414 fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i"
1415 " time: %2i",
1416 indent, "",
1417 es->loop_depth,
1418 edge->frequency, es->call_stmt_size, es->call_stmt_time);
1419 if (es->predicate)
1420 {
1421 fprintf (f, "predicate: ");
1422 dump_predicate (f, info->conds, es->predicate);
1423 }
1424 else
1425 fprintf (f, "\n");
1426 }
1427 }
1428
1429
1430 void
1431 dump_inline_summary (FILE *f, struct cgraph_node *node)
1432 {
1433 if (node->definition)
1434 {
1435 struct inline_summary *s = inline_summaries->get (node);
1436 size_time_entry *e;
1437 int i;
1438 fprintf (f, "Inline summary for %s/%i", node->name (),
1439 node->order);
1440 if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
1441 fprintf (f, " always_inline");
1442 if (s->inlinable)
1443 fprintf (f, " inlinable");
1444 fprintf (f, "\n self time: %i\n", s->self_time);
1445 fprintf (f, " global time: %i\n", s->time);
1446 fprintf (f, " self size: %i\n", s->self_size);
1447 fprintf (f, " global size: %i\n", s->size);
1448 fprintf (f, " min size: %i\n", s->min_size);
1449 fprintf (f, " self stack: %i\n",
1450 (int) s->estimated_self_stack_size);
1451 fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size);
1452 if (s->growth)
1453 fprintf (f, " estimated growth:%i\n", (int) s->growth);
1454 if (s->scc_no)
1455 fprintf (f, " In SCC: %i\n", (int) s->scc_no);
1456 for (i = 0; vec_safe_iterate (s->entry, i, &e); i++)
1457 {
1458 fprintf (f, " size:%f, time:%f, predicate:",
1459 (double) e->size / INLINE_SIZE_SCALE,
1460 (double) e->time / INLINE_TIME_SCALE);
1461 dump_predicate (f, s->conds, &e->predicate);
1462 }
1463 if (s->loop_iterations)
1464 {
1465 fprintf (f, " loop iterations:");
1466 dump_predicate (f, s->conds, s->loop_iterations);
1467 }
1468 if (s->loop_stride)
1469 {
1470 fprintf (f, " loop stride:");
1471 dump_predicate (f, s->conds, s->loop_stride);
1472 }
1473 if (s->array_index)
1474 {
1475 fprintf (f, " array index:");
1476 dump_predicate (f, s->conds, s->array_index);
1477 }
1478 fprintf (f, " calls:\n");
1479 dump_inline_edge_summary (f, 4, node, s);
1480 fprintf (f, "\n");
1481 }
1482 }
1483
1484 DEBUG_FUNCTION void
1485 debug_inline_summary (struct cgraph_node *node)
1486 {
1487 dump_inline_summary (stderr, node);
1488 }
1489
1490 void
1491 dump_inline_summaries (FILE *f)
1492 {
1493 struct cgraph_node *node;
1494
1495 FOR_EACH_DEFINED_FUNCTION (node)
1496 if (!node->global.inlined_to)
1497 dump_inline_summary (f, node);
1498 }
1499
1500 /* Give initial reasons why inlining would fail on EDGE. This gets either
1501 nullified or usually overwritten by more precise reasons later. */
1502
1503 void
1504 initialize_inline_failed (struct cgraph_edge *e)
1505 {
1506 struct cgraph_node *callee = e->callee;
1507
1508 if (e->indirect_unknown_callee)
1509 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
1510 else if (!callee->definition)
1511 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
1512 else if (callee->local.redefined_extern_inline)
1513 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
1514 else if (e->call_stmt_cannot_inline_p)
1515 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
1516 else if (cfun && fn_contains_cilk_spawn_p (cfun))
1517 /* We can't inline if the function is spawing a function. */
1518 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
1519 else
1520 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
1521 }
1522
1523 /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
1524 boolean variable pointed to by DATA. */
1525
1526 static bool
1527 mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
1528 void *data)
1529 {
1530 bool *b = (bool *) data;
1531 *b = true;
1532 return true;
1533 }
1534
1535 /* If OP refers to value of function parameter, return the corresponding
1536 parameter. */
1537
1538 static tree
1539 unmodified_parm_1 (gimple stmt, tree op)
1540 {
1541 /* SSA_NAME referring to parm default def? */
1542 if (TREE_CODE (op) == SSA_NAME
1543 && SSA_NAME_IS_DEFAULT_DEF (op)
1544 && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
1545 return SSA_NAME_VAR (op);
1546 /* Non-SSA parm reference? */
1547 if (TREE_CODE (op) == PARM_DECL)
1548 {
1549 bool modified = false;
1550
1551 ao_ref refd;
1552 ao_ref_init (&refd, op);
1553 walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified,
1554 NULL);
1555 if (!modified)
1556 return op;
1557 }
1558 return NULL_TREE;
1559 }
1560
1561 /* If OP refers to value of function parameter, return the corresponding
1562 parameter. Also traverse chains of SSA register assignments. */
1563
1564 static tree
1565 unmodified_parm (gimple stmt, tree op)
1566 {
1567 tree res = unmodified_parm_1 (stmt, op);
1568 if (res)
1569 return res;
1570
1571 if (TREE_CODE (op) == SSA_NAME
1572 && !SSA_NAME_IS_DEFAULT_DEF (op)
1573 && gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1574 return unmodified_parm (SSA_NAME_DEF_STMT (op),
1575 gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)));
1576 return NULL_TREE;
1577 }
1578
1579 /* If OP refers to a value of a function parameter or value loaded from an
1580 aggregate passed to a parameter (either by value or reference), return TRUE
1581 and store the number of the parameter to *INDEX_P and information whether
1582 and how it has been loaded from an aggregate into *AGGPOS. INFO describes
1583 the function parameters, STMT is the statement in which OP is used or
1584 loaded. */
1585
1586 static bool
1587 unmodified_parm_or_parm_agg_item (struct ipa_node_params *info,
1588 gimple stmt, tree op, int *index_p,
1589 struct agg_position_info *aggpos)
1590 {
1591 tree res = unmodified_parm_1 (stmt, op);
1592
1593 gcc_checking_assert (aggpos);
1594 if (res)
1595 {
1596 *index_p = ipa_get_param_decl_index (info, res);
1597 if (*index_p < 0)
1598 return false;
1599 aggpos->agg_contents = false;
1600 aggpos->by_ref = false;
1601 return true;
1602 }
1603
1604 if (TREE_CODE (op) == SSA_NAME)
1605 {
1606 if (SSA_NAME_IS_DEFAULT_DEF (op)
1607 || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1608 return false;
1609 stmt = SSA_NAME_DEF_STMT (op);
1610 op = gimple_assign_rhs1 (stmt);
1611 if (!REFERENCE_CLASS_P (op))
1612 return unmodified_parm_or_parm_agg_item (info, stmt, op, index_p,
1613 aggpos);
1614 }
1615
1616 aggpos->agg_contents = true;
1617 return ipa_load_from_parm_agg (info, stmt, op, index_p, &aggpos->offset,
1618 &aggpos->by_ref);
1619 }
1620
1621 /* See if statement might disappear after inlining.
1622 0 - means not eliminated
1623 1 - half of statements goes away
1624 2 - for sure it is eliminated.
1625 We are not terribly sophisticated, basically looking for simple abstraction
1626 penalty wrappers. */
1627
1628 static int
1629 eliminated_by_inlining_prob (gimple stmt)
1630 {
1631 enum gimple_code code = gimple_code (stmt);
1632 enum tree_code rhs_code;
1633
1634 if (!optimize)
1635 return 0;
1636
1637 switch (code)
1638 {
1639 case GIMPLE_RETURN:
1640 return 2;
1641 case GIMPLE_ASSIGN:
1642 if (gimple_num_ops (stmt) != 2)
1643 return 0;
1644
1645 rhs_code = gimple_assign_rhs_code (stmt);
1646
1647 /* Casts of parameters, loads from parameters passed by reference
1648 and stores to return value or parameters are often free after
1649 inlining dua to SRA and further combining.
1650 Assume that half of statements goes away. */
1651 if (CONVERT_EXPR_CODE_P (rhs_code)
1652 || rhs_code == VIEW_CONVERT_EXPR
1653 || rhs_code == ADDR_EXPR
1654 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1655 {
1656 tree rhs = gimple_assign_rhs1 (stmt);
1657 tree lhs = gimple_assign_lhs (stmt);
1658 tree inner_rhs = get_base_address (rhs);
1659 tree inner_lhs = get_base_address (lhs);
1660 bool rhs_free = false;
1661 bool lhs_free = false;
1662
1663 if (!inner_rhs)
1664 inner_rhs = rhs;
1665 if (!inner_lhs)
1666 inner_lhs = lhs;
1667
1668 /* Reads of parameter are expected to be free. */
1669 if (unmodified_parm (stmt, inner_rhs))
1670 rhs_free = true;
1671 /* Match expressions of form &this->field. Those will most likely
1672 combine with something upstream after inlining. */
1673 else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
1674 {
1675 tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
1676 if (TREE_CODE (op) == PARM_DECL)
1677 rhs_free = true;
1678 else if (TREE_CODE (op) == MEM_REF
1679 && unmodified_parm (stmt, TREE_OPERAND (op, 0)))
1680 rhs_free = true;
1681 }
1682
1683 /* When parameter is not SSA register because its address is taken
1684 and it is just copied into one, the statement will be completely
1685 free after inlining (we will copy propagate backward). */
1686 if (rhs_free && is_gimple_reg (lhs))
1687 return 2;
1688
1689 /* Reads of parameters passed by reference
1690 expected to be free (i.e. optimized out after inlining). */
1691 if (TREE_CODE (inner_rhs) == MEM_REF
1692 && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0)))
1693 rhs_free = true;
1694
1695 /* Copying parameter passed by reference into gimple register is
1696 probably also going to copy propagate, but we can't be quite
1697 sure. */
1698 if (rhs_free && is_gimple_reg (lhs))
1699 lhs_free = true;
1700
1701 /* Writes to parameters, parameters passed by value and return value
1702 (either dirrectly or passed via invisible reference) are free.
1703
1704 TODO: We ought to handle testcase like
1705 struct a {int a,b;};
1706 struct a
1707 retrurnsturct (void)
1708 {
1709 struct a a ={1,2};
1710 return a;
1711 }
1712
1713 This translate into:
1714
1715 retrurnsturct ()
1716 {
1717 int a$b;
1718 int a$a;
1719 struct a a;
1720 struct a D.2739;
1721
1722 <bb 2>:
1723 D.2739.a = 1;
1724 D.2739.b = 2;
1725 return D.2739;
1726
1727 }
1728 For that we either need to copy ipa-split logic detecting writes
1729 to return value. */
1730 if (TREE_CODE (inner_lhs) == PARM_DECL
1731 || TREE_CODE (inner_lhs) == RESULT_DECL
1732 || (TREE_CODE (inner_lhs) == MEM_REF
1733 && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0))
1734 || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
1735 && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
1736 && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
1737 (inner_lhs,
1738 0))) == RESULT_DECL))))
1739 lhs_free = true;
1740 if (lhs_free
1741 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1742 rhs_free = true;
1743 if (lhs_free && rhs_free)
1744 return 1;
1745 }
1746 return 0;
1747 default:
1748 return 0;
1749 }
1750 }
1751
1752
1753 /* If BB ends by a conditional we can turn into predicates, attach corresponding
1754 predicates to the CFG edges. */
1755
1756 static void
1757 set_cond_stmt_execution_predicate (struct ipa_node_params *info,
1758 struct inline_summary *summary,
1759 basic_block bb)
1760 {
1761 gimple last;
1762 tree op;
1763 int index;
1764 struct agg_position_info aggpos;
1765 enum tree_code code, inverted_code;
1766 edge e;
1767 edge_iterator ei;
1768 gimple set_stmt;
1769 tree op2;
1770
1771 last = last_stmt (bb);
1772 if (!last || gimple_code (last) != GIMPLE_COND)
1773 return;
1774 if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
1775 return;
1776 op = gimple_cond_lhs (last);
1777 /* TODO: handle conditionals like
1778 var = op0 < 4;
1779 if (var != 0). */
1780 if (unmodified_parm_or_parm_agg_item (info, last, op, &index, &aggpos))
1781 {
1782 code = gimple_cond_code (last);
1783 inverted_code = invert_tree_comparison (code, HONOR_NANS (op));
1784
1785 FOR_EACH_EDGE (e, ei, bb->succs)
1786 {
1787 enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
1788 ? code : inverted_code);
1789 /* invert_tree_comparison will return ERROR_MARK on FP
1790 comparsions that are not EQ/NE instead of returning proper
1791 unordered one. Be sure it is not confused with NON_CONSTANT. */
1792 if (this_code != ERROR_MARK)
1793 {
1794 struct predicate p = add_condition (summary, index, &aggpos,
1795 this_code,
1796 gimple_cond_rhs (last));
1797 e->aux = pool_alloc (edge_predicate_pool);
1798 *(struct predicate *) e->aux = p;
1799 }
1800 }
1801 }
1802
1803 if (TREE_CODE (op) != SSA_NAME)
1804 return;
1805 /* Special case
1806 if (builtin_constant_p (op))
1807 constant_code
1808 else
1809 nonconstant_code.
1810 Here we can predicate nonconstant_code. We can't
1811 really handle constant_code since we have no predicate
1812 for this and also the constant code is not known to be
1813 optimized away when inliner doen't see operand is constant.
1814 Other optimizers might think otherwise. */
1815 if (gimple_cond_code (last) != NE_EXPR
1816 || !integer_zerop (gimple_cond_rhs (last)))
1817 return;
1818 set_stmt = SSA_NAME_DEF_STMT (op);
1819 if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
1820 || gimple_call_num_args (set_stmt) != 1)
1821 return;
1822 op2 = gimple_call_arg (set_stmt, 0);
1823 if (!unmodified_parm_or_parm_agg_item
1824 (info, set_stmt, op2, &index, &aggpos))
1825 return;
1826 FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
1827 {
1828 struct predicate p = add_condition (summary, index, &aggpos,
1829 IS_NOT_CONSTANT, NULL_TREE);
1830 e->aux = pool_alloc (edge_predicate_pool);
1831 *(struct predicate *) e->aux = p;
1832 }
1833 }
1834
1835
1836 /* If BB ends by a switch we can turn into predicates, attach corresponding
1837 predicates to the CFG edges. */
1838
1839 static void
1840 set_switch_stmt_execution_predicate (struct ipa_node_params *info,
1841 struct inline_summary *summary,
1842 basic_block bb)
1843 {
1844 gimple lastg;
1845 tree op;
1846 int index;
1847 struct agg_position_info aggpos;
1848 edge e;
1849 edge_iterator ei;
1850 size_t n;
1851 size_t case_idx;
1852
1853 lastg = last_stmt (bb);
1854 if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
1855 return;
1856 gswitch *last = as_a <gswitch *> (lastg);
1857 op = gimple_switch_index (last);
1858 if (!unmodified_parm_or_parm_agg_item (info, last, op, &index, &aggpos))
1859 return;
1860
1861 FOR_EACH_EDGE (e, ei, bb->succs)
1862 {
1863 e->aux = pool_alloc (edge_predicate_pool);
1864 *(struct predicate *) e->aux = false_predicate ();
1865 }
1866 n = gimple_switch_num_labels (last);
1867 for (case_idx = 0; case_idx < n; ++case_idx)
1868 {
1869 tree cl = gimple_switch_label (last, case_idx);
1870 tree min, max;
1871 struct predicate p;
1872
1873 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
1874 min = CASE_LOW (cl);
1875 max = CASE_HIGH (cl);
1876
1877 /* For default we might want to construct predicate that none
1878 of cases is met, but it is bit hard to do not having negations
1879 of conditionals handy. */
1880 if (!min && !max)
1881 p = true_predicate ();
1882 else if (!max)
1883 p = add_condition (summary, index, &aggpos, EQ_EXPR, min);
1884 else
1885 {
1886 struct predicate p1, p2;
1887 p1 = add_condition (summary, index, &aggpos, GE_EXPR, min);
1888 p2 = add_condition (summary, index, &aggpos, LE_EXPR, max);
1889 p = and_predicates (summary->conds, &p1, &p2);
1890 }
1891 *(struct predicate *) e->aux
1892 = or_predicates (summary->conds, &p, (struct predicate *) e->aux);
1893 }
1894 }
1895
1896
1897 /* For each BB in NODE attach to its AUX pointer predicate under
1898 which it is executable. */
1899
1900 static void
1901 compute_bb_predicates (struct cgraph_node *node,
1902 struct ipa_node_params *parms_info,
1903 struct inline_summary *summary)
1904 {
1905 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1906 bool done = false;
1907 basic_block bb;
1908
1909 FOR_EACH_BB_FN (bb, my_function)
1910 {
1911 set_cond_stmt_execution_predicate (parms_info, summary, bb);
1912 set_switch_stmt_execution_predicate (parms_info, summary, bb);
1913 }
1914
1915 /* Entry block is always executable. */
1916 ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1917 = pool_alloc (edge_predicate_pool);
1918 *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1919 = true_predicate ();
1920
1921 /* A simple dataflow propagation of predicates forward in the CFG.
1922 TODO: work in reverse postorder. */
1923 while (!done)
1924 {
1925 done = true;
1926 FOR_EACH_BB_FN (bb, my_function)
1927 {
1928 struct predicate p = false_predicate ();
1929 edge e;
1930 edge_iterator ei;
1931 FOR_EACH_EDGE (e, ei, bb->preds)
1932 {
1933 if (e->src->aux)
1934 {
1935 struct predicate this_bb_predicate
1936 = *(struct predicate *) e->src->aux;
1937 if (e->aux)
1938 this_bb_predicate
1939 = and_predicates (summary->conds, &this_bb_predicate,
1940 (struct predicate *) e->aux);
1941 p = or_predicates (summary->conds, &p, &this_bb_predicate);
1942 if (true_predicate_p (&p))
1943 break;
1944 }
1945 }
1946 if (false_predicate_p (&p))
1947 gcc_assert (!bb->aux);
1948 else
1949 {
1950 if (!bb->aux)
1951 {
1952 done = false;
1953 bb->aux = pool_alloc (edge_predicate_pool);
1954 *((struct predicate *) bb->aux) = p;
1955 }
1956 else if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1957 {
1958 /* This OR operation is needed to ensure monotonous data flow
1959 in the case we hit the limit on number of clauses and the
1960 and/or operations above give approximate answers. */
1961 p = or_predicates (summary->conds, &p, (struct predicate *)bb->aux);
1962 if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1963 {
1964 done = false;
1965 *((struct predicate *) bb->aux) = p;
1966 }
1967 }
1968 }
1969 }
1970 }
1971 }
1972
1973
1974 /* We keep info about constantness of SSA names. */
1975
1976 typedef struct predicate predicate_t;
1977 /* Return predicate specifying when the STMT might have result that is not
1978 a compile time constant. */
1979
1980 static struct predicate
1981 will_be_nonconstant_expr_predicate (struct ipa_node_params *info,
1982 struct inline_summary *summary,
1983 tree expr,
1984 vec<predicate_t> nonconstant_names)
1985 {
1986 tree parm;
1987 int index;
1988
1989 while (UNARY_CLASS_P (expr))
1990 expr = TREE_OPERAND (expr, 0);
1991
1992 parm = unmodified_parm (NULL, expr);
1993 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
1994 return add_condition (summary, index, NULL, CHANGED, NULL_TREE);
1995 if (is_gimple_min_invariant (expr))
1996 return false_predicate ();
1997 if (TREE_CODE (expr) == SSA_NAME)
1998 return nonconstant_names[SSA_NAME_VERSION (expr)];
1999 if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
2000 {
2001 struct predicate p1 = will_be_nonconstant_expr_predicate
2002 (info, summary, TREE_OPERAND (expr, 0),
2003 nonconstant_names);
2004 struct predicate p2;
2005 if (true_predicate_p (&p1))
2006 return p1;
2007 p2 = will_be_nonconstant_expr_predicate (info, summary,
2008 TREE_OPERAND (expr, 1),
2009 nonconstant_names);
2010 return or_predicates (summary->conds, &p1, &p2);
2011 }
2012 else if (TREE_CODE (expr) == COND_EXPR)
2013 {
2014 struct predicate p1 = will_be_nonconstant_expr_predicate
2015 (info, summary, TREE_OPERAND (expr, 0),
2016 nonconstant_names);
2017 struct predicate p2;
2018 if (true_predicate_p (&p1))
2019 return p1;
2020 p2 = will_be_nonconstant_expr_predicate (info, summary,
2021 TREE_OPERAND (expr, 1),
2022 nonconstant_names);
2023 if (true_predicate_p (&p2))
2024 return p2;
2025 p1 = or_predicates (summary->conds, &p1, &p2);
2026 p2 = will_be_nonconstant_expr_predicate (info, summary,
2027 TREE_OPERAND (expr, 2),
2028 nonconstant_names);
2029 return or_predicates (summary->conds, &p1, &p2);
2030 }
2031 else
2032 {
2033 debug_tree (expr);
2034 gcc_unreachable ();
2035 }
2036 return false_predicate ();
2037 }
2038
2039
2040 /* Return predicate specifying when the STMT might have result that is not
2041 a compile time constant. */
2042
2043 static struct predicate
2044 will_be_nonconstant_predicate (struct ipa_node_params *info,
2045 struct inline_summary *summary,
2046 gimple stmt,
2047 vec<predicate_t> nonconstant_names)
2048 {
2049 struct predicate p = true_predicate ();
2050 ssa_op_iter iter;
2051 tree use;
2052 struct predicate op_non_const;
2053 bool is_load;
2054 int base_index;
2055 struct agg_position_info aggpos;
2056
2057 /* What statments might be optimized away
2058 when their arguments are constant. */
2059 if (gimple_code (stmt) != GIMPLE_ASSIGN
2060 && gimple_code (stmt) != GIMPLE_COND
2061 && gimple_code (stmt) != GIMPLE_SWITCH
2062 && (gimple_code (stmt) != GIMPLE_CALL
2063 || !(gimple_call_flags (stmt) & ECF_CONST)))
2064 return p;
2065
2066 /* Stores will stay anyway. */
2067 if (gimple_store_p (stmt))
2068 return p;
2069
2070 is_load = gimple_assign_load_p (stmt);
2071
2072 /* Loads can be optimized when the value is known. */
2073 if (is_load)
2074 {
2075 tree op;
2076 gcc_assert (gimple_assign_single_p (stmt));
2077 op = gimple_assign_rhs1 (stmt);
2078 if (!unmodified_parm_or_parm_agg_item (info, stmt, op, &base_index,
2079 &aggpos))
2080 return p;
2081 }
2082 else
2083 base_index = -1;
2084
2085 /* See if we understand all operands before we start
2086 adding conditionals. */
2087 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2088 {
2089 tree parm = unmodified_parm (stmt, use);
2090 /* For arguments we can build a condition. */
2091 if (parm && ipa_get_param_decl_index (info, parm) >= 0)
2092 continue;
2093 if (TREE_CODE (use) != SSA_NAME)
2094 return p;
2095 /* If we know when operand is constant,
2096 we still can say something useful. */
2097 if (!true_predicate_p (&nonconstant_names[SSA_NAME_VERSION (use)]))
2098 continue;
2099 return p;
2100 }
2101
2102 if (is_load)
2103 op_non_const =
2104 add_condition (summary, base_index, &aggpos, CHANGED, NULL);
2105 else
2106 op_non_const = false_predicate ();
2107 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2108 {
2109 tree parm = unmodified_parm (stmt, use);
2110 int index;
2111
2112 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
2113 {
2114 if (index != base_index)
2115 p = add_condition (summary, index, NULL, CHANGED, NULL_TREE);
2116 else
2117 continue;
2118 }
2119 else
2120 p = nonconstant_names[SSA_NAME_VERSION (use)];
2121 op_non_const = or_predicates (summary->conds, &p, &op_non_const);
2122 }
2123 if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL)
2124 && gimple_op (stmt, 0)
2125 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2126 nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))]
2127 = op_non_const;
2128 return op_non_const;
2129 }
2130
2131 struct record_modified_bb_info
2132 {
2133 bitmap bb_set;
2134 gimple stmt;
2135 };
2136
2137 /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
2138 set except for info->stmt. */
2139
2140 static bool
2141 record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
2142 {
2143 struct record_modified_bb_info *info =
2144 (struct record_modified_bb_info *) data;
2145 if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
2146 return false;
2147 bitmap_set_bit (info->bb_set,
2148 SSA_NAME_IS_DEFAULT_DEF (vdef)
2149 ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
2150 : gimple_bb (SSA_NAME_DEF_STMT (vdef))->index);
2151 return false;
2152 }
2153
2154 /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
2155 will change since last invocation of STMT.
2156
2157 Value 0 is reserved for compile time invariants.
2158 For common parameters it is REG_BR_PROB_BASE. For loop invariants it
2159 ought to be REG_BR_PROB_BASE / estimated_iters. */
2160
2161 static int
2162 param_change_prob (gimple stmt, int i)
2163 {
2164 tree op = gimple_call_arg (stmt, i);
2165 basic_block bb = gimple_bb (stmt);
2166 tree base;
2167
2168 /* Global invariants neve change. */
2169 if (is_gimple_min_invariant (op))
2170 return 0;
2171 /* We would have to do non-trivial analysis to really work out what
2172 is the probability of value to change (i.e. when init statement
2173 is in a sibling loop of the call).
2174
2175 We do an conservative estimate: when call is executed N times more often
2176 than the statement defining value, we take the frequency 1/N. */
2177 if (TREE_CODE (op) == SSA_NAME)
2178 {
2179 int init_freq;
2180
2181 if (!bb->frequency)
2182 return REG_BR_PROB_BASE;
2183
2184 if (SSA_NAME_IS_DEFAULT_DEF (op))
2185 init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2186 else
2187 init_freq = gimple_bb (SSA_NAME_DEF_STMT (op))->frequency;
2188
2189 if (!init_freq)
2190 init_freq = 1;
2191 if (init_freq < bb->frequency)
2192 return MAX (GCOV_COMPUTE_SCALE (init_freq, bb->frequency), 1);
2193 else
2194 return REG_BR_PROB_BASE;
2195 }
2196
2197 base = get_base_address (op);
2198 if (base)
2199 {
2200 ao_ref refd;
2201 int max;
2202 struct record_modified_bb_info info;
2203 bitmap_iterator bi;
2204 unsigned index;
2205 tree init = ctor_for_folding (base);
2206
2207 if (init != error_mark_node)
2208 return 0;
2209 if (!bb->frequency)
2210 return REG_BR_PROB_BASE;
2211 ao_ref_init (&refd, op);
2212 info.stmt = stmt;
2213 info.bb_set = BITMAP_ALLOC (NULL);
2214 walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
2215 NULL);
2216 if (bitmap_bit_p (info.bb_set, bb->index))
2217 {
2218 BITMAP_FREE (info.bb_set);
2219 return REG_BR_PROB_BASE;
2220 }
2221
2222 /* Assume that every memory is initialized at entry.
2223 TODO: Can we easilly determine if value is always defined
2224 and thus we may skip entry block? */
2225 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
2226 max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2227 else
2228 max = 1;
2229
2230 EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
2231 max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency);
2232
2233 BITMAP_FREE (info.bb_set);
2234 if (max < bb->frequency)
2235 return MAX (GCOV_COMPUTE_SCALE (max, bb->frequency), 1);
2236 else
2237 return REG_BR_PROB_BASE;
2238 }
2239 return REG_BR_PROB_BASE;
2240 }
2241
2242 /* Find whether a basic block BB is the final block of a (half) diamond CFG
2243 sub-graph and if the predicate the condition depends on is known. If so,
2244 return true and store the pointer the predicate in *P. */
2245
2246 static bool
2247 phi_result_unknown_predicate (struct ipa_node_params *info,
2248 inline_summary *summary, basic_block bb,
2249 struct predicate *p,
2250 vec<predicate_t> nonconstant_names)
2251 {
2252 edge e;
2253 edge_iterator ei;
2254 basic_block first_bb = NULL;
2255 gimple stmt;
2256
2257 if (single_pred_p (bb))
2258 {
2259 *p = false_predicate ();
2260 return true;
2261 }
2262
2263 FOR_EACH_EDGE (e, ei, bb->preds)
2264 {
2265 if (single_succ_p (e->src))
2266 {
2267 if (!single_pred_p (e->src))
2268 return false;
2269 if (!first_bb)
2270 first_bb = single_pred (e->src);
2271 else if (single_pred (e->src) != first_bb)
2272 return false;
2273 }
2274 else
2275 {
2276 if (!first_bb)
2277 first_bb = e->src;
2278 else if (e->src != first_bb)
2279 return false;
2280 }
2281 }
2282
2283 if (!first_bb)
2284 return false;
2285
2286 stmt = last_stmt (first_bb);
2287 if (!stmt
2288 || gimple_code (stmt) != GIMPLE_COND
2289 || !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
2290 return false;
2291
2292 *p = will_be_nonconstant_expr_predicate (info, summary,
2293 gimple_cond_lhs (stmt),
2294 nonconstant_names);
2295 if (true_predicate_p (p))
2296 return false;
2297 else
2298 return true;
2299 }
2300
2301 /* Given a PHI statement in a function described by inline properties SUMMARY
2302 and *P being the predicate describing whether the selected PHI argument is
2303 known, store a predicate for the result of the PHI statement into
2304 NONCONSTANT_NAMES, if possible. */
2305
2306 static void
2307 predicate_for_phi_result (struct inline_summary *summary, gphi *phi,
2308 struct predicate *p,
2309 vec<predicate_t> nonconstant_names)
2310 {
2311 unsigned i;
2312
2313 for (i = 0; i < gimple_phi_num_args (phi); i++)
2314 {
2315 tree arg = gimple_phi_arg (phi, i)->def;
2316 if (!is_gimple_min_invariant (arg))
2317 {
2318 gcc_assert (TREE_CODE (arg) == SSA_NAME);
2319 *p = or_predicates (summary->conds, p,
2320 &nonconstant_names[SSA_NAME_VERSION (arg)]);
2321 if (true_predicate_p (p))
2322 return;
2323 }
2324 }
2325
2326 if (dump_file && (dump_flags & TDF_DETAILS))
2327 {
2328 fprintf (dump_file, "\t\tphi predicate: ");
2329 dump_predicate (dump_file, summary->conds, p);
2330 }
2331 nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
2332 }
2333
2334 /* Return predicate specifying when array index in access OP becomes non-constant. */
2335
2336 static struct predicate
2337 array_index_predicate (inline_summary *info,
2338 vec< predicate_t> nonconstant_names, tree op)
2339 {
2340 struct predicate p = false_predicate ();
2341 while (handled_component_p (op))
2342 {
2343 if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF)
2344 {
2345 if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
2346 p = or_predicates (info->conds, &p,
2347 &nonconstant_names[SSA_NAME_VERSION
2348 (TREE_OPERAND (op, 1))]);
2349 }
2350 op = TREE_OPERAND (op, 0);
2351 }
2352 return p;
2353 }
2354
2355 /* For a typical usage of __builtin_expect (a<b, 1), we
2356 may introduce an extra relation stmt:
2357 With the builtin, we have
2358 t1 = a <= b;
2359 t2 = (long int) t1;
2360 t3 = __builtin_expect (t2, 1);
2361 if (t3 != 0)
2362 goto ...
2363 Without the builtin, we have
2364 if (a<=b)
2365 goto...
2366 This affects the size/time estimation and may have
2367 an impact on the earlier inlining.
2368 Here find this pattern and fix it up later. */
2369
2370 static gimple
2371 find_foldable_builtin_expect (basic_block bb)
2372 {
2373 gimple_stmt_iterator bsi;
2374
2375 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2376 {
2377 gimple stmt = gsi_stmt (bsi);
2378 if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
2379 || (is_gimple_call (stmt)
2380 && gimple_call_internal_p (stmt)
2381 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT))
2382 {
2383 tree var = gimple_call_lhs (stmt);
2384 tree arg = gimple_call_arg (stmt, 0);
2385 use_operand_p use_p;
2386 gimple use_stmt;
2387 bool match = false;
2388 bool done = false;
2389
2390 if (!var || !arg)
2391 continue;
2392 gcc_assert (TREE_CODE (var) == SSA_NAME);
2393
2394 while (TREE_CODE (arg) == SSA_NAME)
2395 {
2396 gimple stmt_tmp = SSA_NAME_DEF_STMT (arg);
2397 if (!is_gimple_assign (stmt_tmp))
2398 break;
2399 switch (gimple_assign_rhs_code (stmt_tmp))
2400 {
2401 case LT_EXPR:
2402 case LE_EXPR:
2403 case GT_EXPR:
2404 case GE_EXPR:
2405 case EQ_EXPR:
2406 case NE_EXPR:
2407 match = true;
2408 done = true;
2409 break;
2410 CASE_CONVERT:
2411 break;
2412 default:
2413 done = true;
2414 break;
2415 }
2416 if (done)
2417 break;
2418 arg = gimple_assign_rhs1 (stmt_tmp);
2419 }
2420
2421 if (match && single_imm_use (var, &use_p, &use_stmt)
2422 && gimple_code (use_stmt) == GIMPLE_COND)
2423 return use_stmt;
2424 }
2425 }
2426 return NULL;
2427 }
2428
2429 /* Return true when the basic blocks contains only clobbers followed by RESX.
2430 Such BBs are kept around to make removal of dead stores possible with
2431 presence of EH and will be optimized out by optimize_clobbers later in the
2432 game.
2433
2434 NEED_EH is used to recurse in case the clobber has non-EH predecestors
2435 that can be clobber only, too.. When it is false, the RESX is not necessary
2436 on the end of basic block. */
2437
2438 static bool
2439 clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
2440 {
2441 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2442 edge_iterator ei;
2443 edge e;
2444
2445 if (need_eh)
2446 {
2447 if (gsi_end_p (gsi))
2448 return false;
2449 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
2450 return false;
2451 gsi_prev (&gsi);
2452 }
2453 else if (!single_succ_p (bb))
2454 return false;
2455
2456 for (; !gsi_end_p (gsi); gsi_prev (&gsi))
2457 {
2458 gimple stmt = gsi_stmt (gsi);
2459 if (is_gimple_debug (stmt))
2460 continue;
2461 if (gimple_clobber_p (stmt))
2462 continue;
2463 if (gimple_code (stmt) == GIMPLE_LABEL)
2464 break;
2465 return false;
2466 }
2467
2468 /* See if all predecestors are either throws or clobber only BBs. */
2469 FOR_EACH_EDGE (e, ei, bb->preds)
2470 if (!(e->flags & EDGE_EH)
2471 && !clobber_only_eh_bb_p (e->src, false))
2472 return false;
2473
2474 return true;
2475 }
2476
2477 /* Compute function body size parameters for NODE.
2478 When EARLY is true, we compute only simple summaries without
2479 non-trivial predicates to drive the early inliner. */
2480
2481 static void
2482 estimate_function_body_sizes (struct cgraph_node *node, bool early)
2483 {
2484 gcov_type time = 0;
2485 /* Estimate static overhead for function prologue/epilogue and alignment. */
2486 int size = 2;
2487 /* Benefits are scaled by probability of elimination that is in range
2488 <0,2>. */
2489 basic_block bb;
2490 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
2491 int freq;
2492 struct inline_summary *info = inline_summaries->get (node);
2493 struct predicate bb_predicate;
2494 struct ipa_node_params *parms_info = NULL;
2495 vec<predicate_t> nonconstant_names = vNULL;
2496 int nblocks, n;
2497 int *order;
2498 predicate array_index = true_predicate ();
2499 gimple fix_builtin_expect_stmt;
2500
2501 info->conds = NULL;
2502 info->entry = NULL;
2503
2504 /* When optimizing and analyzing for IPA inliner, initialize loop optimizer
2505 so we can produce proper inline hints.
2506
2507 When optimizing and analyzing for early inliner, initialize node params
2508 so we can produce correct BB predicates. */
2509
2510 if (opt_for_fn (node->decl, optimize))
2511 {
2512 calculate_dominance_info (CDI_DOMINATORS);
2513 if (!early)
2514 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2515 else
2516 {
2517 ipa_check_create_node_params ();
2518 ipa_initialize_node_params (node);
2519 }
2520
2521 if (ipa_node_params_sum)
2522 {
2523 parms_info = IPA_NODE_REF (node);
2524 nonconstant_names.safe_grow_cleared
2525 (SSANAMES (my_function)->length ());
2526 }
2527 }
2528
2529 if (dump_file)
2530 fprintf (dump_file, "\nAnalyzing function body size: %s\n",
2531 node->name ());
2532
2533 /* When we run into maximal number of entries, we assign everything to the
2534 constant truth case. Be sure to have it in list. */
2535 bb_predicate = true_predicate ();
2536 account_size_time (info, 0, 0, &bb_predicate);
2537
2538 bb_predicate = not_inlined_predicate ();
2539 account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &bb_predicate);
2540
2541 gcc_assert (my_function && my_function->cfg);
2542 if (parms_info)
2543 compute_bb_predicates (node, parms_info, info);
2544 gcc_assert (cfun == my_function);
2545 order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2546 nblocks = pre_and_rev_post_order_compute (NULL, order, false);
2547 for (n = 0; n < nblocks; n++)
2548 {
2549 bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
2550 freq = compute_call_stmt_bb_frequency (node->decl, bb);
2551 if (clobber_only_eh_bb_p (bb))
2552 {
2553 if (dump_file && (dump_flags & TDF_DETAILS))
2554 fprintf (dump_file, "\n Ignoring BB %i;"
2555 " it will be optimized away by cleanup_clobbers\n",
2556 bb->index);
2557 continue;
2558 }
2559
2560 /* TODO: Obviously predicates can be propagated down across CFG. */
2561 if (parms_info)
2562 {
2563 if (bb->aux)
2564 bb_predicate = *(struct predicate *) bb->aux;
2565 else
2566 bb_predicate = false_predicate ();
2567 }
2568 else
2569 bb_predicate = true_predicate ();
2570
2571 if (dump_file && (dump_flags & TDF_DETAILS))
2572 {
2573 fprintf (dump_file, "\n BB %i predicate:", bb->index);
2574 dump_predicate (dump_file, info->conds, &bb_predicate);
2575 }
2576
2577 if (parms_info && nonconstant_names.exists ())
2578 {
2579 struct predicate phi_predicate;
2580 bool first_phi = true;
2581
2582 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
2583 gsi_next (&bsi))
2584 {
2585 if (first_phi
2586 && !phi_result_unknown_predicate (parms_info, info, bb,
2587 &phi_predicate,
2588 nonconstant_names))
2589 break;
2590 first_phi = false;
2591 if (dump_file && (dump_flags & TDF_DETAILS))
2592 {
2593 fprintf (dump_file, " ");
2594 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
2595 }
2596 predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
2597 nonconstant_names);
2598 }
2599 }
2600
2601 fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
2602
2603 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
2604 gsi_next (&bsi))
2605 {
2606 gimple stmt = gsi_stmt (bsi);
2607 int this_size = estimate_num_insns (stmt, &eni_size_weights);
2608 int this_time = estimate_num_insns (stmt, &eni_time_weights);
2609 int prob;
2610 struct predicate will_be_nonconstant;
2611
2612 /* This relation stmt should be folded after we remove
2613 buildin_expect call. Adjust the cost here. */
2614 if (stmt == fix_builtin_expect_stmt)
2615 {
2616 this_size--;
2617 this_time--;
2618 }
2619
2620 if (dump_file && (dump_flags & TDF_DETAILS))
2621 {
2622 fprintf (dump_file, " ");
2623 print_gimple_stmt (dump_file, stmt, 0, 0);
2624 fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
2625 ((double) freq) / CGRAPH_FREQ_BASE, this_size,
2626 this_time);
2627 }
2628
2629 if (gimple_assign_load_p (stmt) && nonconstant_names.exists ())
2630 {
2631 struct predicate this_array_index;
2632 this_array_index =
2633 array_index_predicate (info, nonconstant_names,
2634 gimple_assign_rhs1 (stmt));
2635 if (!false_predicate_p (&this_array_index))
2636 array_index =
2637 and_predicates (info->conds, &array_index,
2638 &this_array_index);
2639 }
2640 if (gimple_store_p (stmt) && nonconstant_names.exists ())
2641 {
2642 struct predicate this_array_index;
2643 this_array_index =
2644 array_index_predicate (info, nonconstant_names,
2645 gimple_get_lhs (stmt));
2646 if (!false_predicate_p (&this_array_index))
2647 array_index =
2648 and_predicates (info->conds, &array_index,
2649 &this_array_index);
2650 }
2651
2652
2653 if (is_gimple_call (stmt)
2654 && !gimple_call_internal_p (stmt))
2655 {
2656 struct cgraph_edge *edge = node->get_edge (stmt);
2657 struct inline_edge_summary *es = inline_edge_summary (edge);
2658
2659 /* Special case: results of BUILT_IN_CONSTANT_P will be always
2660 resolved as constant. We however don't want to optimize
2661 out the cgraph edges. */
2662 if (nonconstant_names.exists ()
2663 && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
2664 && gimple_call_lhs (stmt)
2665 && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
2666 {
2667 struct predicate false_p = false_predicate ();
2668 nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
2669 = false_p;
2670 }
2671 if (ipa_node_params_sum)
2672 {
2673 int count = gimple_call_num_args (stmt);
2674 int i;
2675
2676 if (count)
2677 es->param.safe_grow_cleared (count);
2678 for (i = 0; i < count; i++)
2679 {
2680 int prob = param_change_prob (stmt, i);
2681 gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
2682 es->param[i].change_prob = prob;
2683 }
2684 }
2685
2686 es->call_stmt_size = this_size;
2687 es->call_stmt_time = this_time;
2688 es->loop_depth = bb_loop_depth (bb);
2689 edge_set_predicate (edge, &bb_predicate);
2690 }
2691
2692 /* TODO: When conditional jump or swithc is known to be constant, but
2693 we did not translate it into the predicates, we really can account
2694 just maximum of the possible paths. */
2695 if (parms_info)
2696 will_be_nonconstant
2697 = will_be_nonconstant_predicate (parms_info, info,
2698 stmt, nonconstant_names);
2699 if (this_time || this_size)
2700 {
2701 struct predicate p;
2702
2703 this_time *= freq;
2704
2705 prob = eliminated_by_inlining_prob (stmt);
2706 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
2707 fprintf (dump_file,
2708 "\t\t50%% will be eliminated by inlining\n");
2709 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
2710 fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
2711
2712 if (parms_info)
2713 p = and_predicates (info->conds, &bb_predicate,
2714 &will_be_nonconstant);
2715 else
2716 p = true_predicate ();
2717
2718 if (!false_predicate_p (&p)
2719 || (is_gimple_call (stmt)
2720 && !false_predicate_p (&bb_predicate)))
2721 {
2722 time += this_time;
2723 size += this_size;
2724 if (time > MAX_TIME * INLINE_TIME_SCALE)
2725 time = MAX_TIME * INLINE_TIME_SCALE;
2726 }
2727
2728 /* We account everything but the calls. Calls have their own
2729 size/time info attached to cgraph edges. This is necessary
2730 in order to make the cost disappear after inlining. */
2731 if (!is_gimple_call (stmt))
2732 {
2733 if (prob)
2734 {
2735 struct predicate ip = not_inlined_predicate ();
2736 ip = and_predicates (info->conds, &ip, &p);
2737 account_size_time (info, this_size * prob,
2738 this_time * prob, &ip);
2739 }
2740 if (prob != 2)
2741 account_size_time (info, this_size * (2 - prob),
2742 this_time * (2 - prob), &p);
2743 }
2744
2745 gcc_assert (time >= 0);
2746 gcc_assert (size >= 0);
2747 }
2748 }
2749 }
2750 set_hint_predicate (&inline_summaries->get (node)->array_index, array_index);
2751 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
2752 if (time > MAX_TIME)
2753 time = MAX_TIME;
2754 free (order);
2755
2756 if (nonconstant_names.exists () && !early)
2757 {
2758 struct loop *loop;
2759 predicate loop_iterations = true_predicate ();
2760 predicate loop_stride = true_predicate ();
2761
2762 if (dump_file && (dump_flags & TDF_DETAILS))
2763 flow_loops_dump (dump_file, NULL, 0);
2764 scev_initialize ();
2765 FOR_EACH_LOOP (loop, 0)
2766 {
2767 vec<edge> exits;
2768 edge ex;
2769 unsigned int j, i;
2770 struct tree_niter_desc niter_desc;
2771 basic_block *body = get_loop_body (loop);
2772 bb_predicate = *(struct predicate *) loop->header->aux;
2773
2774 exits = get_loop_exit_edges (loop);
2775 FOR_EACH_VEC_ELT (exits, j, ex)
2776 if (number_of_iterations_exit (loop, ex, &niter_desc, false)
2777 && !is_gimple_min_invariant (niter_desc.niter))
2778 {
2779 predicate will_be_nonconstant
2780 = will_be_nonconstant_expr_predicate (parms_info, info,
2781 niter_desc.niter,
2782 nonconstant_names);
2783 if (!true_predicate_p (&will_be_nonconstant))
2784 will_be_nonconstant = and_predicates (info->conds,
2785 &bb_predicate,
2786 &will_be_nonconstant);
2787 if (!true_predicate_p (&will_be_nonconstant)
2788 && !false_predicate_p (&will_be_nonconstant))
2789 /* This is slightly inprecise. We may want to represent each
2790 loop with independent predicate. */
2791 loop_iterations =
2792 and_predicates (info->conds, &loop_iterations,
2793 &will_be_nonconstant);
2794 }
2795 exits.release ();
2796
2797 for (i = 0; i < loop->num_nodes; i++)
2798 {
2799 gimple_stmt_iterator gsi;
2800 bb_predicate = *(struct predicate *) body[i]->aux;
2801 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi);
2802 gsi_next (&gsi))
2803 {
2804 gimple stmt = gsi_stmt (gsi);
2805 affine_iv iv;
2806 ssa_op_iter iter;
2807 tree use;
2808
2809 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2810 {
2811 predicate will_be_nonconstant;
2812
2813 if (!simple_iv
2814 (loop, loop_containing_stmt (stmt), use, &iv, true)
2815 || is_gimple_min_invariant (iv.step))
2816 continue;
2817 will_be_nonconstant
2818 = will_be_nonconstant_expr_predicate (parms_info, info,
2819 iv.step,
2820 nonconstant_names);
2821 if (!true_predicate_p (&will_be_nonconstant))
2822 will_be_nonconstant
2823 = and_predicates (info->conds,
2824 &bb_predicate,
2825 &will_be_nonconstant);
2826 if (!true_predicate_p (&will_be_nonconstant)
2827 && !false_predicate_p (&will_be_nonconstant))
2828 /* This is slightly inprecise. We may want to represent
2829 each loop with independent predicate. */
2830 loop_stride =
2831 and_predicates (info->conds, &loop_stride,
2832 &will_be_nonconstant);
2833 }
2834 }
2835 }
2836 free (body);
2837 }
2838 set_hint_predicate (&inline_summaries->get (node)->loop_iterations,
2839 loop_iterations);
2840 set_hint_predicate (&inline_summaries->get (node)->loop_stride, loop_stride);
2841 scev_finalize ();
2842 }
2843 FOR_ALL_BB_FN (bb, my_function)
2844 {
2845 edge e;
2846 edge_iterator ei;
2847
2848 if (bb->aux)
2849 pool_free (edge_predicate_pool, bb->aux);
2850 bb->aux = NULL;
2851 FOR_EACH_EDGE (e, ei, bb->succs)
2852 {
2853 if (e->aux)
2854 pool_free (edge_predicate_pool, e->aux);
2855 e->aux = NULL;
2856 }
2857 }
2858 inline_summaries->get (node)->self_time = time;
2859 inline_summaries->get (node)->self_size = size;
2860 nonconstant_names.release ();
2861 if (opt_for_fn (node->decl, optimize))
2862 {
2863 if (!early)
2864 loop_optimizer_finalize ();
2865 else if (!ipa_edge_args_vector)
2866 ipa_free_all_node_params ();
2867 free_dominance_info (CDI_DOMINATORS);
2868 }
2869 if (dump_file)
2870 {
2871 fprintf (dump_file, "\n");
2872 dump_inline_summary (dump_file, node);
2873 }
2874 }
2875
2876
2877 /* Compute parameters of functions used by inliner.
2878 EARLY is true when we compute parameters for the early inliner */
2879
2880 void
2881 compute_inline_parameters (struct cgraph_node *node, bool early)
2882 {
2883 HOST_WIDE_INT self_stack_size;
2884 struct cgraph_edge *e;
2885 struct inline_summary *info;
2886
2887 gcc_assert (!node->global.inlined_to);
2888
2889 inline_summary_alloc ();
2890
2891 info = inline_summaries->get (node);
2892 reset_inline_summary (node, info);
2893
2894 /* FIXME: Thunks are inlinable, but tree-inline don't know how to do that.
2895 Once this happen, we will need to more curefully predict call
2896 statement size. */
2897 if (node->thunk.thunk_p)
2898 {
2899 struct inline_edge_summary *es = inline_edge_summary (node->callees);
2900 struct predicate t = true_predicate ();
2901
2902 info->inlinable = 0;
2903 node->callees->call_stmt_cannot_inline_p = true;
2904 node->local.can_change_signature = false;
2905 es->call_stmt_time = 1;
2906 es->call_stmt_size = 1;
2907 account_size_time (info, 0, 0, &t);
2908 return;
2909 }
2910
2911 /* Even is_gimple_min_invariant rely on current_function_decl. */
2912 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2913
2914 /* Estimate the stack size for the function if we're optimizing. */
2915 self_stack_size = optimize ? estimated_stack_frame_size (node) : 0;
2916 info->estimated_self_stack_size = self_stack_size;
2917 info->estimated_stack_size = self_stack_size;
2918 info->stack_frame_offset = 0;
2919
2920 /* Can this function be inlined at all? */
2921 if (!opt_for_fn (node->decl, optimize)
2922 && !lookup_attribute ("always_inline",
2923 DECL_ATTRIBUTES (node->decl)))
2924 info->inlinable = false;
2925 else
2926 info->inlinable = tree_inlinable_function_p (node->decl);
2927
2928 /* Type attributes can use parameter indices to describe them. */
2929 if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
2930 node->local.can_change_signature = false;
2931 else
2932 {
2933 /* Otherwise, inlinable functions always can change signature. */
2934 if (info->inlinable)
2935 node->local.can_change_signature = true;
2936 else
2937 {
2938 /* Functions calling builtin_apply can not change signature. */
2939 for (e = node->callees; e; e = e->next_callee)
2940 {
2941 tree cdecl = e->callee->decl;
2942 if (DECL_BUILT_IN (cdecl)
2943 && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
2944 && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
2945 || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START))
2946 break;
2947 }
2948 node->local.can_change_signature = !e;
2949 }
2950 }
2951 estimate_function_body_sizes (node, early);
2952
2953 for (e = node->callees; e; e = e->next_callee)
2954 if (e->callee->comdat_local_p ())
2955 break;
2956 node->calls_comdat_local = (e != NULL);
2957
2958 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2959 info->time = info->self_time;
2960 info->size = info->self_size;
2961 info->stack_frame_offset = 0;
2962 info->estimated_stack_size = info->estimated_self_stack_size;
2963 #ifdef ENABLE_CHECKING
2964 inline_update_overall_summary (node);
2965 gcc_assert (info->time == info->self_time && info->size == info->self_size);
2966 #endif
2967
2968 pop_cfun ();
2969 }
2970
2971
2972 /* Compute parameters of functions used by inliner using
2973 current_function_decl. */
2974
2975 static unsigned int
2976 compute_inline_parameters_for_current (void)
2977 {
2978 compute_inline_parameters (cgraph_node::get (current_function_decl), true);
2979 return 0;
2980 }
2981
2982 namespace {
2983
2984 const pass_data pass_data_inline_parameters =
2985 {
2986 GIMPLE_PASS, /* type */
2987 "inline_param", /* name */
2988 OPTGROUP_INLINE, /* optinfo_flags */
2989 TV_INLINE_PARAMETERS, /* tv_id */
2990 0, /* properties_required */
2991 0, /* properties_provided */
2992 0, /* properties_destroyed */
2993 0, /* todo_flags_start */
2994 0, /* todo_flags_finish */
2995 };
2996
2997 class pass_inline_parameters : public gimple_opt_pass
2998 {
2999 public:
3000 pass_inline_parameters (gcc::context *ctxt)
3001 : gimple_opt_pass (pass_data_inline_parameters, ctxt)
3002 {}
3003
3004 /* opt_pass methods: */
3005 opt_pass * clone () { return new pass_inline_parameters (m_ctxt); }
3006 virtual unsigned int execute (function *)
3007 {
3008 return compute_inline_parameters_for_current ();
3009 }
3010
3011 }; // class pass_inline_parameters
3012
3013 } // anon namespace
3014
3015 gimple_opt_pass *
3016 make_pass_inline_parameters (gcc::context *ctxt)
3017 {
3018 return new pass_inline_parameters (ctxt);
3019 }
3020
3021
3022 /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
3023 KNOWN_CONTEXTS and KNOWN_AGGS. */
3024
3025 static bool
3026 estimate_edge_devirt_benefit (struct cgraph_edge *ie,
3027 int *size, int *time,
3028 vec<tree> known_vals,
3029 vec<ipa_polymorphic_call_context> known_contexts,
3030 vec<ipa_agg_jump_function_p> known_aggs)
3031 {
3032 tree target;
3033 struct cgraph_node *callee;
3034 struct inline_summary *isummary;
3035 enum availability avail;
3036 bool speculative;
3037
3038 if (!known_vals.exists () && !known_contexts.exists ())
3039 return false;
3040 if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
3041 return false;
3042
3043 target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts,
3044 known_aggs, &speculative);
3045 if (!target || speculative)
3046 return false;
3047
3048 /* Account for difference in cost between indirect and direct calls. */
3049 *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
3050 *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
3051 gcc_checking_assert (*time >= 0);
3052 gcc_checking_assert (*size >= 0);
3053
3054 callee = cgraph_node::get (target);
3055 if (!callee || !callee->definition)
3056 return false;
3057 callee = callee->function_symbol (&avail);
3058 if (avail < AVAIL_AVAILABLE)
3059 return false;
3060 isummary = inline_summaries->get (callee);
3061 return isummary->inlinable;
3062 }
3063
3064 /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
3065 handle edge E with probability PROB.
3066 Set HINTS if edge may be devirtualized.
3067 KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
3068 site. */
3069
3070 static inline void
3071 estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
3072 int *time,
3073 int prob,
3074 vec<tree> known_vals,
3075 vec<ipa_polymorphic_call_context> known_contexts,
3076 vec<ipa_agg_jump_function_p> known_aggs,
3077 inline_hints *hints)
3078 {
3079 struct inline_edge_summary *es = inline_edge_summary (e);
3080 int call_size = es->call_stmt_size;
3081 int call_time = es->call_stmt_time;
3082 int cur_size;
3083 if (!e->callee
3084 && estimate_edge_devirt_benefit (e, &call_size, &call_time,
3085 known_vals, known_contexts, known_aggs)
3086 && hints && e->maybe_hot_p ())
3087 *hints |= INLINE_HINT_indirect_call;
3088 cur_size = call_size * INLINE_SIZE_SCALE;
3089 *size += cur_size;
3090 if (min_size)
3091 *min_size += cur_size;
3092 *time += apply_probability ((gcov_type) call_time, prob)
3093 * e->frequency * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE);
3094 if (*time > MAX_TIME * INLINE_TIME_SCALE)
3095 *time = MAX_TIME * INLINE_TIME_SCALE;
3096 }
3097
3098
3099
3100 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
3101 calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3102 describe context of the call site. */
3103
3104 static void
3105 estimate_calls_size_and_time (struct cgraph_node *node, int *size,
3106 int *min_size, int *time,
3107 inline_hints *hints,
3108 clause_t possible_truths,
3109 vec<tree> known_vals,
3110 vec<ipa_polymorphic_call_context> known_contexts,
3111 vec<ipa_agg_jump_function_p> known_aggs)
3112 {
3113 struct cgraph_edge *e;
3114 for (e = node->callees; e; e = e->next_callee)
3115 {
3116 struct inline_edge_summary *es = inline_edge_summary (e);
3117
3118 /* Do not care about zero sized builtins. */
3119 if (e->inline_failed && !es->call_stmt_size)
3120 {
3121 gcc_checking_assert (!es->call_stmt_time);
3122 continue;
3123 }
3124 if (!es->predicate
3125 || evaluate_predicate (es->predicate, possible_truths))
3126 {
3127 if (e->inline_failed)
3128 {
3129 /* Predicates of calls shall not use NOT_CHANGED codes,
3130 sowe do not need to compute probabilities. */
3131 estimate_edge_size_and_time (e, size,
3132 es->predicate ? NULL : min_size,
3133 time, REG_BR_PROB_BASE,
3134 known_vals, known_contexts,
3135 known_aggs, hints);
3136 }
3137 else
3138 estimate_calls_size_and_time (e->callee, size, min_size, time,
3139 hints,
3140 possible_truths,
3141 known_vals, known_contexts,
3142 known_aggs);
3143 }
3144 }
3145 for (e = node->indirect_calls; e; e = e->next_callee)
3146 {
3147 struct inline_edge_summary *es = inline_edge_summary (e);
3148 if (!es->predicate
3149 || evaluate_predicate (es->predicate, possible_truths))
3150 estimate_edge_size_and_time (e, size,
3151 es->predicate ? NULL : min_size,
3152 time, REG_BR_PROB_BASE,
3153 known_vals, known_contexts, known_aggs,
3154 hints);
3155 }
3156 }
3157
3158
3159 /* Estimate size and time needed to execute NODE assuming
3160 POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3161 information about NODE's arguments. If non-NULL use also probability
3162 information present in INLINE_PARAM_SUMMARY vector.
3163 Additionally detemine hints determined by the context. Finally compute
3164 minimal size needed for the call that is independent on the call context and
3165 can be used for fast estimates. Return the values in RET_SIZE,
3166 RET_MIN_SIZE, RET_TIME and RET_HINTS. */
3167
3168 static void
3169 estimate_node_size_and_time (struct cgraph_node *node,
3170 clause_t possible_truths,
3171 vec<tree> known_vals,
3172 vec<ipa_polymorphic_call_context> known_contexts,
3173 vec<ipa_agg_jump_function_p> known_aggs,
3174 int *ret_size, int *ret_min_size, int *ret_time,
3175 inline_hints *ret_hints,
3176 vec<inline_param_summary>
3177 inline_param_summary)
3178 {
3179 struct inline_summary *info = inline_summaries->get (node);
3180 size_time_entry *e;
3181 int size = 0;
3182 int time = 0;
3183 int min_size = 0;
3184 inline_hints hints = 0;
3185 int i;
3186
3187 if (dump_file && (dump_flags & TDF_DETAILS))
3188 {
3189 bool found = false;
3190 fprintf (dump_file, " Estimating body: %s/%i\n"
3191 " Known to be false: ", node->name (),
3192 node->order);
3193
3194 for (i = predicate_not_inlined_condition;
3195 i < (predicate_first_dynamic_condition
3196 + (int) vec_safe_length (info->conds)); i++)
3197 if (!(possible_truths & (1 << i)))
3198 {
3199 if (found)
3200 fprintf (dump_file, ", ");
3201 found = true;
3202 dump_condition (dump_file, info->conds, i);
3203 }
3204 }
3205
3206 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3207 if (evaluate_predicate (&e->predicate, possible_truths))
3208 {
3209 size += e->size;
3210 gcc_checking_assert (e->time >= 0);
3211 gcc_checking_assert (time >= 0);
3212 if (!inline_param_summary.exists ())
3213 time += e->time;
3214 else
3215 {
3216 int prob = predicate_probability (info->conds,
3217 &e->predicate,
3218 possible_truths,
3219 inline_param_summary);
3220 gcc_checking_assert (prob >= 0);
3221 gcc_checking_assert (prob <= REG_BR_PROB_BASE);
3222 time += apply_probability ((gcov_type) e->time, prob);
3223 }
3224 if (time > MAX_TIME * INLINE_TIME_SCALE)
3225 time = MAX_TIME * INLINE_TIME_SCALE;
3226 gcc_checking_assert (time >= 0);
3227
3228 }
3229 gcc_checking_assert (true_predicate_p (&(*info->entry)[0].predicate));
3230 min_size = (*info->entry)[0].size;
3231 gcc_checking_assert (size >= 0);
3232 gcc_checking_assert (time >= 0);
3233
3234 if (info->loop_iterations
3235 && !evaluate_predicate (info->loop_iterations, possible_truths))
3236 hints |= INLINE_HINT_loop_iterations;
3237 if (info->loop_stride
3238 && !evaluate_predicate (info->loop_stride, possible_truths))
3239 hints |= INLINE_HINT_loop_stride;
3240 if (info->array_index
3241 && !evaluate_predicate (info->array_index, possible_truths))
3242 hints |= INLINE_HINT_array_index;
3243 if (info->scc_no)
3244 hints |= INLINE_HINT_in_scc;
3245 if (DECL_DECLARED_INLINE_P (node->decl))
3246 hints |= INLINE_HINT_declared_inline;
3247
3248 estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths,
3249 known_vals, known_contexts, known_aggs);
3250 gcc_checking_assert (size >= 0);
3251 gcc_checking_assert (time >= 0);
3252 time = RDIV (time, INLINE_TIME_SCALE);
3253 size = RDIV (size, INLINE_SIZE_SCALE);
3254 min_size = RDIV (min_size, INLINE_SIZE_SCALE);
3255
3256 if (dump_file && (dump_flags & TDF_DETAILS))
3257 fprintf (dump_file, "\n size:%i time:%i\n", (int) size, (int) time);
3258 if (ret_time)
3259 *ret_time = time;
3260 if (ret_size)
3261 *ret_size = size;
3262 if (ret_min_size)
3263 *ret_min_size = min_size;
3264 if (ret_hints)
3265 *ret_hints = hints;
3266 return;
3267 }
3268
3269
3270 /* Estimate size and time needed to execute callee of EDGE assuming that
3271 parameters known to be constant at caller of EDGE are propagated.
3272 KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
3273 and types for parameters. */
3274
3275 void
3276 estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
3277 vec<tree> known_vals,
3278 vec<ipa_polymorphic_call_context>
3279 known_contexts,
3280 vec<ipa_agg_jump_function_p> known_aggs,
3281 int *ret_size, int *ret_time,
3282 inline_hints *hints)
3283 {
3284 clause_t clause;
3285
3286 clause = evaluate_conditions_for_known_args (node, false, known_vals,
3287 known_aggs);
3288 estimate_node_size_and_time (node, clause, known_vals, known_contexts,
3289 known_aggs, ret_size, NULL, ret_time, hints, vNULL);
3290 }
3291
3292 /* Translate all conditions from callee representation into caller
3293 representation and symbolically evaluate predicate P into new predicate.
3294
3295 INFO is inline_summary of function we are adding predicate into, CALLEE_INFO
3296 is summary of function predicate P is from. OPERAND_MAP is array giving
3297 callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all
3298 callee conditions that may be true in caller context. TOPLEV_PREDICATE is
3299 predicate under which callee is executed. OFFSET_MAP is an array of of
3300 offsets that need to be added to conditions, negative offset means that
3301 conditions relying on values passed by reference have to be discarded
3302 because they might not be preserved (and should be considered offset zero
3303 for other purposes). */
3304
3305 static struct predicate
3306 remap_predicate (struct inline_summary *info,
3307 struct inline_summary *callee_info,
3308 struct predicate *p,
3309 vec<int> operand_map,
3310 vec<int> offset_map,
3311 clause_t possible_truths, struct predicate *toplev_predicate)
3312 {
3313 int i;
3314 struct predicate out = true_predicate ();
3315
3316 /* True predicate is easy. */
3317 if (true_predicate_p (p))
3318 return *toplev_predicate;
3319 for (i = 0; p->clause[i]; i++)
3320 {
3321 clause_t clause = p->clause[i];
3322 int cond;
3323 struct predicate clause_predicate = false_predicate ();
3324
3325 gcc_assert (i < MAX_CLAUSES);
3326
3327 for (cond = 0; cond < NUM_CONDITIONS; cond++)
3328 /* Do we have condition we can't disprove? */
3329 if (clause & possible_truths & (1 << cond))
3330 {
3331 struct predicate cond_predicate;
3332 /* Work out if the condition can translate to predicate in the
3333 inlined function. */
3334 if (cond >= predicate_first_dynamic_condition)
3335 {
3336 struct condition *c;
3337
3338 c = &(*callee_info->conds)[cond
3339 -
3340 predicate_first_dynamic_condition];
3341 /* See if we can remap condition operand to caller's operand.
3342 Otherwise give up. */
3343 if (!operand_map.exists ()
3344 || (int) operand_map.length () <= c->operand_num
3345 || operand_map[c->operand_num] == -1
3346 /* TODO: For non-aggregate conditions, adding an offset is
3347 basically an arithmetic jump function processing which
3348 we should support in future. */
3349 || ((!c->agg_contents || !c->by_ref)
3350 && offset_map[c->operand_num] > 0)
3351 || (c->agg_contents && c->by_ref
3352 && offset_map[c->operand_num] < 0))
3353 cond_predicate = true_predicate ();
3354 else
3355 {
3356 struct agg_position_info ap;
3357 HOST_WIDE_INT offset_delta = offset_map[c->operand_num];
3358 if (offset_delta < 0)
3359 {
3360 gcc_checking_assert (!c->agg_contents || !c->by_ref);
3361 offset_delta = 0;
3362 }
3363 gcc_assert (!c->agg_contents
3364 || c->by_ref || offset_delta == 0);
3365 ap.offset = c->offset + offset_delta;
3366 ap.agg_contents = c->agg_contents;
3367 ap.by_ref = c->by_ref;
3368 cond_predicate = add_condition (info,
3369 operand_map[c->operand_num],
3370 &ap, c->code, c->val);
3371 }
3372 }
3373 /* Fixed conditions remains same, construct single
3374 condition predicate. */
3375 else
3376 {
3377 cond_predicate.clause[0] = 1 << cond;
3378 cond_predicate.clause[1] = 0;
3379 }
3380 clause_predicate = or_predicates (info->conds, &clause_predicate,
3381 &cond_predicate);
3382 }
3383 out = and_predicates (info->conds, &out, &clause_predicate);
3384 }
3385 return and_predicates (info->conds, &out, toplev_predicate);
3386 }
3387
3388
3389 /* Update summary information of inline clones after inlining.
3390 Compute peak stack usage. */
3391
3392 static void
3393 inline_update_callee_summaries (struct cgraph_node *node, int depth)
3394 {
3395 struct cgraph_edge *e;
3396 struct inline_summary *callee_info = inline_summaries->get (node);
3397 struct inline_summary *caller_info = inline_summaries->get (node->callers->caller);
3398 HOST_WIDE_INT peak;
3399
3400 callee_info->stack_frame_offset
3401 = caller_info->stack_frame_offset
3402 + caller_info->estimated_self_stack_size;
3403 peak = callee_info->stack_frame_offset
3404 + callee_info->estimated_self_stack_size;
3405 if (inline_summaries->get (node->global.inlined_to)->estimated_stack_size < peak)
3406 inline_summaries->get (node->global.inlined_to)->estimated_stack_size = peak;
3407 ipa_propagate_frequency (node);
3408 for (e = node->callees; e; e = e->next_callee)
3409 {
3410 if (!e->inline_failed)
3411 inline_update_callee_summaries (e->callee, depth);
3412 inline_edge_summary (e)->loop_depth += depth;
3413 }
3414 for (e = node->indirect_calls; e; e = e->next_callee)
3415 inline_edge_summary (e)->loop_depth += depth;
3416 }
3417
3418 /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
3419 When functoin A is inlined in B and A calls C with parameter that
3420 changes with probability PROB1 and C is known to be passthroug
3421 of argument if B that change with probability PROB2, the probability
3422 of change is now PROB1*PROB2. */
3423
3424 static void
3425 remap_edge_change_prob (struct cgraph_edge *inlined_edge,
3426 struct cgraph_edge *edge)
3427 {
3428 if (ipa_node_params_sum)
3429 {
3430 int i;
3431 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3432 struct inline_edge_summary *es = inline_edge_summary (edge);
3433 struct inline_edge_summary *inlined_es
3434 = inline_edge_summary (inlined_edge);
3435
3436 for (i = 0; i < ipa_get_cs_argument_count (args); i++)
3437 {
3438 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3439 if (jfunc->type == IPA_JF_PASS_THROUGH
3440 && (ipa_get_jf_pass_through_formal_id (jfunc)
3441 < (int) inlined_es->param.length ()))
3442 {
3443 int jf_formal_id = ipa_get_jf_pass_through_formal_id (jfunc);
3444 int prob1 = es->param[i].change_prob;
3445 int prob2 = inlined_es->param[jf_formal_id].change_prob;
3446 int prob = combine_probabilities (prob1, prob2);
3447
3448 if (prob1 && prob2 && !prob)
3449 prob = 1;
3450
3451 es->param[i].change_prob = prob;
3452 }
3453 }
3454 }
3455 }
3456
3457 /* Update edge summaries of NODE after INLINED_EDGE has been inlined.
3458
3459 Remap predicates of callees of NODE. Rest of arguments match
3460 remap_predicate.
3461
3462 Also update change probabilities. */
3463
3464 static void
3465 remap_edge_summaries (struct cgraph_edge *inlined_edge,
3466 struct cgraph_node *node,
3467 struct inline_summary *info,
3468 struct inline_summary *callee_info,
3469 vec<int> operand_map,
3470 vec<int> offset_map,
3471 clause_t possible_truths,
3472 struct predicate *toplev_predicate)
3473 {
3474 struct cgraph_edge *e;
3475 for (e = node->callees; e; e = e->next_callee)
3476 {
3477 struct inline_edge_summary *es = inline_edge_summary (e);
3478 struct predicate p;
3479
3480 if (e->inline_failed)
3481 {
3482 remap_edge_change_prob (inlined_edge, e);
3483
3484 if (es->predicate)
3485 {
3486 p = remap_predicate (info, callee_info,
3487 es->predicate, operand_map, offset_map,
3488 possible_truths, toplev_predicate);
3489 edge_set_predicate (e, &p);
3490 /* TODO: We should remove the edge for code that will be
3491 optimized out, but we need to keep verifiers and tree-inline
3492 happy. Make it cold for now. */
3493 if (false_predicate_p (&p))
3494 {
3495 e->count = 0;
3496 e->frequency = 0;
3497 }
3498 }
3499 else
3500 edge_set_predicate (e, toplev_predicate);
3501 }
3502 else
3503 remap_edge_summaries (inlined_edge, e->callee, info, callee_info,
3504 operand_map, offset_map, possible_truths,
3505 toplev_predicate);
3506 }
3507 for (e = node->indirect_calls; e; e = e->next_callee)
3508 {
3509 struct inline_edge_summary *es = inline_edge_summary (e);
3510 struct predicate p;
3511
3512 remap_edge_change_prob (inlined_edge, e);
3513 if (es->predicate)
3514 {
3515 p = remap_predicate (info, callee_info,
3516 es->predicate, operand_map, offset_map,
3517 possible_truths, toplev_predicate);
3518 edge_set_predicate (e, &p);
3519 /* TODO: We should remove the edge for code that will be optimized
3520 out, but we need to keep verifiers and tree-inline happy.
3521 Make it cold for now. */
3522 if (false_predicate_p (&p))
3523 {
3524 e->count = 0;
3525 e->frequency = 0;
3526 }
3527 }
3528 else
3529 edge_set_predicate (e, toplev_predicate);
3530 }
3531 }
3532
3533 /* Same as remap_predicate, but set result into hint *HINT. */
3534
3535 static void
3536 remap_hint_predicate (struct inline_summary *info,
3537 struct inline_summary *callee_info,
3538 struct predicate **hint,
3539 vec<int> operand_map,
3540 vec<int> offset_map,
3541 clause_t possible_truths,
3542 struct predicate *toplev_predicate)
3543 {
3544 predicate p;
3545
3546 if (!*hint)
3547 return;
3548 p = remap_predicate (info, callee_info,
3549 *hint,
3550 operand_map, offset_map,
3551 possible_truths, toplev_predicate);
3552 if (!false_predicate_p (&p) && !true_predicate_p (&p))
3553 {
3554 if (!*hint)
3555 set_hint_predicate (hint, p);
3556 else
3557 **hint = and_predicates (info->conds, *hint, &p);
3558 }
3559 }
3560
3561 /* We inlined EDGE. Update summary of the function we inlined into. */
3562
3563 void
3564 inline_merge_summary (struct cgraph_edge *edge)
3565 {
3566 struct inline_summary *callee_info = inline_summaries->get (edge->callee);
3567 struct cgraph_node *to = (edge->caller->global.inlined_to
3568 ? edge->caller->global.inlined_to : edge->caller);
3569 struct inline_summary *info = inline_summaries->get (to);
3570 clause_t clause = 0; /* not_inline is known to be false. */
3571 size_time_entry *e;
3572 vec<int> operand_map = vNULL;
3573 vec<int> offset_map = vNULL;
3574 int i;
3575 struct predicate toplev_predicate;
3576 struct predicate true_p = true_predicate ();
3577 struct inline_edge_summary *es = inline_edge_summary (edge);
3578
3579 if (es->predicate)
3580 toplev_predicate = *es->predicate;
3581 else
3582 toplev_predicate = true_predicate ();
3583
3584 if (callee_info->conds)
3585 evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL);
3586 if (ipa_node_params_sum && callee_info->conds)
3587 {
3588 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3589 int count = ipa_get_cs_argument_count (args);
3590 int i;
3591
3592 if (count)
3593 {
3594 operand_map.safe_grow_cleared (count);
3595 offset_map.safe_grow_cleared (count);
3596 }
3597 for (i = 0; i < count; i++)
3598 {
3599 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3600 int map = -1;
3601
3602 /* TODO: handle non-NOPs when merging. */
3603 if (jfunc->type == IPA_JF_PASS_THROUGH)
3604 {
3605 if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
3606 map = ipa_get_jf_pass_through_formal_id (jfunc);
3607 if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
3608 offset_map[i] = -1;
3609 }
3610 else if (jfunc->type == IPA_JF_ANCESTOR)
3611 {
3612 HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
3613 if (offset >= 0 && offset < INT_MAX)
3614 {
3615 map = ipa_get_jf_ancestor_formal_id (jfunc);
3616 if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
3617 offset = -1;
3618 offset_map[i] = offset;
3619 }
3620 }
3621 operand_map[i] = map;
3622 gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to)));
3623 }
3624 }
3625 for (i = 0; vec_safe_iterate (callee_info->entry, i, &e); i++)
3626 {
3627 struct predicate p = remap_predicate (info, callee_info,
3628 &e->predicate, operand_map,
3629 offset_map, clause,
3630 &toplev_predicate);
3631 if (!false_predicate_p (&p))
3632 {
3633 gcov_type add_time = ((gcov_type) e->time * edge->frequency
3634 + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
3635 int prob = predicate_probability (callee_info->conds,
3636 &e->predicate,
3637 clause, es->param);
3638 add_time = apply_probability ((gcov_type) add_time, prob);
3639 if (add_time > MAX_TIME * INLINE_TIME_SCALE)
3640 add_time = MAX_TIME * INLINE_TIME_SCALE;
3641 if (prob != REG_BR_PROB_BASE
3642 && dump_file && (dump_flags & TDF_DETAILS))
3643 {
3644 fprintf (dump_file, "\t\tScaling time by probability:%f\n",
3645 (double) prob / REG_BR_PROB_BASE);
3646 }
3647 account_size_time (info, e->size, add_time, &p);
3648 }
3649 }
3650 remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map,
3651 offset_map, clause, &toplev_predicate);
3652 remap_hint_predicate (info, callee_info,
3653 &callee_info->loop_iterations,
3654 operand_map, offset_map, clause, &toplev_predicate);
3655 remap_hint_predicate (info, callee_info,
3656 &callee_info->loop_stride,
3657 operand_map, offset_map, clause, &toplev_predicate);
3658 remap_hint_predicate (info, callee_info,
3659 &callee_info->array_index,
3660 operand_map, offset_map, clause, &toplev_predicate);
3661
3662 inline_update_callee_summaries (edge->callee,
3663 inline_edge_summary (edge)->loop_depth);
3664
3665 /* We do not maintain predicates of inlined edges, free it. */
3666 edge_set_predicate (edge, &true_p);
3667 /* Similarly remove param summaries. */
3668 es->param.release ();
3669 operand_map.release ();
3670 offset_map.release ();
3671 }
3672
3673 /* For performance reasons inline_merge_summary is not updating overall size
3674 and time. Recompute it. */
3675
3676 void
3677 inline_update_overall_summary (struct cgraph_node *node)
3678 {
3679 struct inline_summary *info = inline_summaries->get (node);
3680 size_time_entry *e;
3681 int i;
3682
3683 info->size = 0;
3684 info->time = 0;
3685 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3686 {
3687 info->size += e->size, info->time += e->time;
3688 if (info->time > MAX_TIME * INLINE_TIME_SCALE)
3689 info->time = MAX_TIME * INLINE_TIME_SCALE;
3690 }
3691 estimate_calls_size_and_time (node, &info->size, &info->min_size,
3692 &info->time, NULL,
3693 ~(clause_t) (1 << predicate_false_condition),
3694 vNULL, vNULL, vNULL);
3695 info->time = (info->time + INLINE_TIME_SCALE / 2) / INLINE_TIME_SCALE;
3696 info->size = (info->size + INLINE_SIZE_SCALE / 2) / INLINE_SIZE_SCALE;
3697 }
3698
3699 /* Return hints derrived from EDGE. */
3700 int
3701 simple_edge_hints (struct cgraph_edge *edge)
3702 {
3703 int hints = 0;
3704 struct cgraph_node *to = (edge->caller->global.inlined_to
3705 ? edge->caller->global.inlined_to : edge->caller);
3706 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
3707 if (inline_summaries->get (to)->scc_no
3708 && inline_summaries->get (to)->scc_no
3709 == inline_summaries->get (callee)->scc_no
3710 && !edge->recursive_p ())
3711 hints |= INLINE_HINT_same_scc;
3712
3713 if (callee->lto_file_data && edge->caller->lto_file_data
3714 && edge->caller->lto_file_data != callee->lto_file_data
3715 && !callee->merged)
3716 hints |= INLINE_HINT_cross_module;
3717
3718 return hints;
3719 }
3720
3721 /* Estimate the time cost for the caller when inlining EDGE.
3722 Only to be called via estimate_edge_time, that handles the
3723 caching mechanism.
3724
3725 When caching, also update the cache entry. Compute both time and
3726 size, since we always need both metrics eventually. */
3727
3728 int
3729 do_estimate_edge_time (struct cgraph_edge *edge)
3730 {
3731 int time;
3732 int size;
3733 inline_hints hints;
3734 struct cgraph_node *callee;
3735 clause_t clause;
3736 vec<tree> known_vals;
3737 vec<ipa_polymorphic_call_context> known_contexts;
3738 vec<ipa_agg_jump_function_p> known_aggs;
3739 struct inline_edge_summary *es = inline_edge_summary (edge);
3740 int min_size;
3741
3742 callee = edge->callee->ultimate_alias_target ();
3743
3744 gcc_checking_assert (edge->inline_failed);
3745 evaluate_properties_for_edge (edge, true,
3746 &clause, &known_vals, &known_contexts,
3747 &known_aggs);
3748 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3749 known_aggs, &size, &min_size, &time, &hints, es->param);
3750
3751 /* When we have profile feedback, we can quite safely identify hot
3752 edges and for those we disable size limits. Don't do that when
3753 probability that caller will call the callee is low however, since it
3754 may hurt optimization of the caller's hot path. */
3755 if (edge->count && edge->maybe_hot_p ()
3756 && (edge->count * 2
3757 > (edge->caller->global.inlined_to
3758 ? edge->caller->global.inlined_to->count : edge->caller->count)))
3759 hints |= INLINE_HINT_known_hot;
3760
3761 known_vals.release ();
3762 known_contexts.release ();
3763 known_aggs.release ();
3764 gcc_checking_assert (size >= 0);
3765 gcc_checking_assert (time >= 0);
3766
3767 /* When caching, update the cache entry. */
3768 if (edge_growth_cache.exists ())
3769 {
3770 inline_summaries->get (edge->callee)->min_size = min_size;
3771 if ((int) edge_growth_cache.length () <= edge->uid)
3772 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
3773 edge_growth_cache[edge->uid].time = time + (time >= 0);
3774
3775 edge_growth_cache[edge->uid].size = size + (size >= 0);
3776 hints |= simple_edge_hints (edge);
3777 edge_growth_cache[edge->uid].hints = hints + 1;
3778 }
3779 return time;
3780 }
3781
3782
3783 /* Return estimated callee growth after inlining EDGE.
3784 Only to be called via estimate_edge_size. */
3785
3786 int
3787 do_estimate_edge_size (struct cgraph_edge *edge)
3788 {
3789 int size;
3790 struct cgraph_node *callee;
3791 clause_t clause;
3792 vec<tree> known_vals;
3793 vec<ipa_polymorphic_call_context> known_contexts;
3794 vec<ipa_agg_jump_function_p> known_aggs;
3795
3796 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3797
3798 if (edge_growth_cache.exists ())
3799 {
3800 do_estimate_edge_time (edge);
3801 size = edge_growth_cache[edge->uid].size;
3802 gcc_checking_assert (size);
3803 return size - (size > 0);
3804 }
3805
3806 callee = edge->callee->ultimate_alias_target ();
3807
3808 /* Early inliner runs without caching, go ahead and do the dirty work. */
3809 gcc_checking_assert (edge->inline_failed);
3810 evaluate_properties_for_edge (edge, true,
3811 &clause, &known_vals, &known_contexts,
3812 &known_aggs);
3813 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3814 known_aggs, &size, NULL, NULL, NULL, vNULL);
3815 known_vals.release ();
3816 known_contexts.release ();
3817 known_aggs.release ();
3818 return size;
3819 }
3820
3821
3822 /* Estimate the growth of the caller when inlining EDGE.
3823 Only to be called via estimate_edge_size. */
3824
3825 inline_hints
3826 do_estimate_edge_hints (struct cgraph_edge *edge)
3827 {
3828 inline_hints hints;
3829 struct cgraph_node *callee;
3830 clause_t clause;
3831 vec<tree> known_vals;
3832 vec<ipa_polymorphic_call_context> known_contexts;
3833 vec<ipa_agg_jump_function_p> known_aggs;
3834
3835 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3836
3837 if (edge_growth_cache.exists ())
3838 {
3839 do_estimate_edge_time (edge);
3840 hints = edge_growth_cache[edge->uid].hints;
3841 gcc_checking_assert (hints);
3842 return hints - 1;
3843 }
3844
3845 callee = edge->callee->ultimate_alias_target ();
3846
3847 /* Early inliner runs without caching, go ahead and do the dirty work. */
3848 gcc_checking_assert (edge->inline_failed);
3849 evaluate_properties_for_edge (edge, true,
3850 &clause, &known_vals, &known_contexts,
3851 &known_aggs);
3852 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3853 known_aggs, NULL, NULL, NULL, &hints, vNULL);
3854 known_vals.release ();
3855 known_contexts.release ();
3856 known_aggs.release ();
3857 hints |= simple_edge_hints (edge);
3858 return hints;
3859 }
3860
3861
3862 /* Estimate self time of the function NODE after inlining EDGE. */
3863
3864 int
3865 estimate_time_after_inlining (struct cgraph_node *node,
3866 struct cgraph_edge *edge)
3867 {
3868 struct inline_edge_summary *es = inline_edge_summary (edge);
3869 if (!es->predicate || !false_predicate_p (es->predicate))
3870 {
3871 gcov_type time =
3872 inline_summaries->get (node)->time + estimate_edge_time (edge);
3873 if (time < 0)
3874 time = 0;
3875 if (time > MAX_TIME)
3876 time = MAX_TIME;
3877 return time;
3878 }
3879 return inline_summaries->get (node)->time;
3880 }
3881
3882
3883 /* Estimate the size of NODE after inlining EDGE which should be an
3884 edge to either NODE or a call inlined into NODE. */
3885
3886 int
3887 estimate_size_after_inlining (struct cgraph_node *node,
3888 struct cgraph_edge *edge)
3889 {
3890 struct inline_edge_summary *es = inline_edge_summary (edge);
3891 if (!es->predicate || !false_predicate_p (es->predicate))
3892 {
3893 int size = inline_summaries->get (node)->size + estimate_edge_growth (edge);
3894 gcc_assert (size >= 0);
3895 return size;
3896 }
3897 return inline_summaries->get (node)->size;
3898 }
3899
3900
3901 struct growth_data
3902 {
3903 struct cgraph_node *node;
3904 bool self_recursive;
3905 bool uninlinable;
3906 int growth;
3907 };
3908
3909
3910 /* Worker for do_estimate_growth. Collect growth for all callers. */
3911
3912 static bool
3913 do_estimate_growth_1 (struct cgraph_node *node, void *data)
3914 {
3915 struct cgraph_edge *e;
3916 struct growth_data *d = (struct growth_data *) data;
3917
3918 for (e = node->callers; e; e = e->next_caller)
3919 {
3920 gcc_checking_assert (e->inline_failed);
3921
3922 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
3923 {
3924 d->uninlinable = true;
3925 continue;
3926 }
3927
3928 if (e->recursive_p ())
3929 {
3930 d->self_recursive = true;
3931 continue;
3932 }
3933 d->growth += estimate_edge_growth (e);
3934 }
3935 return false;
3936 }
3937
3938
3939 /* Estimate the growth caused by inlining NODE into all callees. */
3940
3941 int
3942 estimate_growth (struct cgraph_node *node)
3943 {
3944 struct growth_data d = { node, false, false, 0 };
3945 struct inline_summary *info = inline_summaries->get (node);
3946
3947 node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
3948
3949 /* For self recursive functions the growth estimation really should be
3950 infinity. We don't want to return very large values because the growth
3951 plays various roles in badness computation fractions. Be sure to not
3952 return zero or negative growths. */
3953 if (d.self_recursive)
3954 d.growth = d.growth < info->size ? info->size : d.growth;
3955 else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
3956 ;
3957 else
3958 {
3959 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
3960 d.growth -= info->size;
3961 /* COMDAT functions are very often not shared across multiple units
3962 since they come from various template instantiations.
3963 Take this into account. */
3964 else if (DECL_COMDAT (node->decl)
3965 && node->can_remove_if_no_direct_calls_p ())
3966 d.growth -= (info->size
3967 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
3968 + 50) / 100;
3969 }
3970
3971 return d.growth;
3972 }
3973
3974 /* Verify if there are fewer than MAX_CALLERS. */
3975
3976 static bool
3977 check_callers (cgraph_node *node, int *max_callers)
3978 {
3979 ipa_ref *ref;
3980
3981 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
3982 return true;
3983
3984 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
3985 {
3986 (*max_callers)--;
3987 if (!*max_callers
3988 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
3989 return true;
3990 }
3991
3992 FOR_EACH_ALIAS (node, ref)
3993 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
3994 return true;
3995
3996 return false;
3997 }
3998
3999
4000 /* Make cheap estimation if growth of NODE is likely positive knowing
4001 EDGE_GROWTH of one particular edge.
4002 We assume that most of other edges will have similar growth
4003 and skip computation if there are too many callers. */
4004
4005 bool
4006 growth_likely_positive (struct cgraph_node *node,
4007 int edge_growth)
4008 {
4009 int max_callers;
4010 struct cgraph_edge *e;
4011 gcc_checking_assert (edge_growth > 0);
4012
4013 /* First quickly check if NODE is removable at all. */
4014 if (DECL_EXTERNAL (node->decl))
4015 return true;
4016 if (!node->can_remove_if_no_direct_calls_and_refs_p ()
4017 || node->address_taken)
4018 return true;
4019
4020 max_callers = inline_summaries->get (node)->size * 4 / edge_growth + 2;
4021
4022 for (e = node->callers; e; e = e->next_caller)
4023 {
4024 max_callers--;
4025 if (!max_callers
4026 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
4027 return true;
4028 }
4029
4030 ipa_ref *ref;
4031 FOR_EACH_ALIAS (node, ref)
4032 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
4033 return true;
4034
4035 /* Unlike for functions called once, we play unsafe with
4036 COMDATs. We can allow that since we know functions
4037 in consideration are small (and thus risk is small) and
4038 moreover grow estimates already accounts that COMDAT
4039 functions may or may not disappear when eliminated from
4040 current unit. With good probability making aggressive
4041 choice in all units is going to make overall program
4042 smaller. */
4043 if (DECL_COMDAT (node->decl))
4044 {
4045 if (!node->can_remove_if_no_direct_calls_p ())
4046 return true;
4047 }
4048 else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
4049 return true;
4050
4051 return estimate_growth (node) > 0;
4052 }
4053
4054
4055 /* This function performs intraprocedural analysis in NODE that is required to
4056 inline indirect calls. */
4057
4058 static void
4059 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
4060 {
4061 ipa_analyze_node (node);
4062 if (dump_file && (dump_flags & TDF_DETAILS))
4063 {
4064 ipa_print_node_params (dump_file, node);
4065 ipa_print_node_jump_functions (dump_file, node);
4066 }
4067 }
4068
4069
4070 /* Note function body size. */
4071
4072 void
4073 inline_analyze_function (struct cgraph_node *node)
4074 {
4075 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4076
4077 if (dump_file)
4078 fprintf (dump_file, "\nAnalyzing function: %s/%u\n",
4079 node->name (), node->order);
4080 if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
4081 inline_indirect_intraprocedural_analysis (node);
4082 compute_inline_parameters (node, false);
4083 if (!optimize)
4084 {
4085 struct cgraph_edge *e;
4086 for (e = node->callees; e; e = e->next_callee)
4087 {
4088 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4089 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4090 e->call_stmt_cannot_inline_p = true;
4091 }
4092 for (e = node->indirect_calls; e; e = e->next_callee)
4093 {
4094 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4095 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4096 e->call_stmt_cannot_inline_p = true;
4097 }
4098 }
4099
4100 pop_cfun ();
4101 }
4102
4103
4104 /* Called when new function is inserted to callgraph late. */
4105
4106 void
4107 inline_summary_t::insert (struct cgraph_node *node, inline_summary *)
4108 {
4109 inline_analyze_function (node);
4110 }
4111
4112 /* Note function body size. */
4113
4114 void
4115 inline_generate_summary (void)
4116 {
4117 struct cgraph_node *node;
4118
4119 /* When not optimizing, do not bother to analyze. Inlining is still done
4120 because edge redirection needs to happen there. */
4121 if (!optimize && !flag_generate_lto && !flag_generate_offload && !flag_wpa)
4122 return;
4123
4124 if (!inline_summaries)
4125 inline_summaries = (inline_summary_t*) inline_summary_t::create_ggc (symtab);
4126
4127 inline_summaries->enable_insertion_hook ();
4128
4129 ipa_register_cgraph_hooks ();
4130 inline_free_summary ();
4131
4132 FOR_EACH_DEFINED_FUNCTION (node)
4133 if (!node->alias)
4134 inline_analyze_function (node);
4135 }
4136
4137
4138 /* Read predicate from IB. */
4139
4140 static struct predicate
4141 read_predicate (struct lto_input_block *ib)
4142 {
4143 struct predicate out;
4144 clause_t clause;
4145 int k = 0;
4146
4147 do
4148 {
4149 gcc_assert (k <= MAX_CLAUSES);
4150 clause = out.clause[k++] = streamer_read_uhwi (ib);
4151 }
4152 while (clause);
4153
4154 /* Zero-initialize the remaining clauses in OUT. */
4155 while (k <= MAX_CLAUSES)
4156 out.clause[k++] = 0;
4157
4158 return out;
4159 }
4160
4161
4162 /* Write inline summary for edge E to OB. */
4163
4164 static void
4165 read_inline_edge_summary (struct lto_input_block *ib, struct cgraph_edge *e)
4166 {
4167 struct inline_edge_summary *es = inline_edge_summary (e);
4168 struct predicate p;
4169 int length, i;
4170
4171 es->call_stmt_size = streamer_read_uhwi (ib);
4172 es->call_stmt_time = streamer_read_uhwi (ib);
4173 es->loop_depth = streamer_read_uhwi (ib);
4174 p = read_predicate (ib);
4175 edge_set_predicate (e, &p);
4176 length = streamer_read_uhwi (ib);
4177 if (length)
4178 {
4179 es->param.safe_grow_cleared (length);
4180 for (i = 0; i < length; i++)
4181 es->param[i].change_prob = streamer_read_uhwi (ib);
4182 }
4183 }
4184
4185
4186 /* Stream in inline summaries from the section. */
4187
4188 static void
4189 inline_read_section (struct lto_file_decl_data *file_data, const char *data,
4190 size_t len)
4191 {
4192 const struct lto_function_header *header =
4193 (const struct lto_function_header *) data;
4194 const int cfg_offset = sizeof (struct lto_function_header);
4195 const int main_offset = cfg_offset + header->cfg_size;
4196 const int string_offset = main_offset + header->main_size;
4197 struct data_in *data_in;
4198 unsigned int i, count2, j;
4199 unsigned int f_count;
4200
4201 lto_input_block ib ((const char *) data + main_offset, header->main_size,
4202 file_data->mode_table);
4203
4204 data_in =
4205 lto_data_in_create (file_data, (const char *) data + string_offset,
4206 header->string_size, vNULL);
4207 f_count = streamer_read_uhwi (&ib);
4208 for (i = 0; i < f_count; i++)
4209 {
4210 unsigned int index;
4211 struct cgraph_node *node;
4212 struct inline_summary *info;
4213 lto_symtab_encoder_t encoder;
4214 struct bitpack_d bp;
4215 struct cgraph_edge *e;
4216 predicate p;
4217
4218 index = streamer_read_uhwi (&ib);
4219 encoder = file_data->symtab_node_encoder;
4220 node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
4221 index));
4222 info = inline_summaries->get (node);
4223
4224 info->estimated_stack_size
4225 = info->estimated_self_stack_size = streamer_read_uhwi (&ib);
4226 info->size = info->self_size = streamer_read_uhwi (&ib);
4227 info->time = info->self_time = streamer_read_uhwi (&ib);
4228
4229 bp = streamer_read_bitpack (&ib);
4230 info->inlinable = bp_unpack_value (&bp, 1);
4231
4232 count2 = streamer_read_uhwi (&ib);
4233 gcc_assert (!info->conds);
4234 for (j = 0; j < count2; j++)
4235 {
4236 struct condition c;
4237 c.operand_num = streamer_read_uhwi (&ib);
4238 c.code = (enum tree_code) streamer_read_uhwi (&ib);
4239 c.val = stream_read_tree (&ib, data_in);
4240 bp = streamer_read_bitpack (&ib);
4241 c.agg_contents = bp_unpack_value (&bp, 1);
4242 c.by_ref = bp_unpack_value (&bp, 1);
4243 if (c.agg_contents)
4244 c.offset = streamer_read_uhwi (&ib);
4245 vec_safe_push (info->conds, c);
4246 }
4247 count2 = streamer_read_uhwi (&ib);
4248 gcc_assert (!info->entry);
4249 for (j = 0; j < count2; j++)
4250 {
4251 struct size_time_entry e;
4252
4253 e.size = streamer_read_uhwi (&ib);
4254 e.time = streamer_read_uhwi (&ib);
4255 e.predicate = read_predicate (&ib);
4256
4257 vec_safe_push (info->entry, e);
4258 }
4259
4260 p = read_predicate (&ib);
4261 set_hint_predicate (&info->loop_iterations, p);
4262 p = read_predicate (&ib);
4263 set_hint_predicate (&info->loop_stride, p);
4264 p = read_predicate (&ib);
4265 set_hint_predicate (&info->array_index, p);
4266 for (e = node->callees; e; e = e->next_callee)
4267 read_inline_edge_summary (&ib, e);
4268 for (e = node->indirect_calls; e; e = e->next_callee)
4269 read_inline_edge_summary (&ib, e);
4270 }
4271
4272 lto_free_section_data (file_data, LTO_section_inline_summary, NULL, data,
4273 len);
4274 lto_data_in_delete (data_in);
4275 }
4276
4277
4278 /* Read inline summary. Jump functions are shared among ipa-cp
4279 and inliner, so when ipa-cp is active, we don't need to write them
4280 twice. */
4281
4282 void
4283 inline_read_summary (void)
4284 {
4285 struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
4286 struct lto_file_decl_data *file_data;
4287 unsigned int j = 0;
4288
4289 inline_summary_alloc ();
4290
4291 while ((file_data = file_data_vec[j++]))
4292 {
4293 size_t len;
4294 const char *data = lto_get_section_data (file_data,
4295 LTO_section_inline_summary,
4296 NULL, &len);
4297 if (data)
4298 inline_read_section (file_data, data, len);
4299 else
4300 /* Fatal error here. We do not want to support compiling ltrans units
4301 with different version of compiler or different flags than the WPA
4302 unit, so this should never happen. */
4303 fatal_error (input_location,
4304 "ipa inline summary is missing in input file");
4305 }
4306 if (optimize)
4307 {
4308 ipa_register_cgraph_hooks ();
4309 if (!flag_ipa_cp)
4310 ipa_prop_read_jump_functions ();
4311 }
4312
4313 gcc_assert (inline_summaries);
4314 inline_summaries->enable_insertion_hook ();
4315 }
4316
4317
4318 /* Write predicate P to OB. */
4319
4320 static void
4321 write_predicate (struct output_block *ob, struct predicate *p)
4322 {
4323 int j;
4324 if (p)
4325 for (j = 0; p->clause[j]; j++)
4326 {
4327 gcc_assert (j < MAX_CLAUSES);
4328 streamer_write_uhwi (ob, p->clause[j]);
4329 }
4330 streamer_write_uhwi (ob, 0);
4331 }
4332
4333
4334 /* Write inline summary for edge E to OB. */
4335
4336 static void
4337 write_inline_edge_summary (struct output_block *ob, struct cgraph_edge *e)
4338 {
4339 struct inline_edge_summary *es = inline_edge_summary (e);
4340 int i;
4341
4342 streamer_write_uhwi (ob, es->call_stmt_size);
4343 streamer_write_uhwi (ob, es->call_stmt_time);
4344 streamer_write_uhwi (ob, es->loop_depth);
4345 write_predicate (ob, es->predicate);
4346 streamer_write_uhwi (ob, es->param.length ());
4347 for (i = 0; i < (int) es->param.length (); i++)
4348 streamer_write_uhwi (ob, es->param[i].change_prob);
4349 }
4350
4351
4352 /* Write inline summary for node in SET.
4353 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
4354 active, we don't need to write them twice. */
4355
4356 void
4357 inline_write_summary (void)
4358 {
4359 struct cgraph_node *node;
4360 struct output_block *ob = create_output_block (LTO_section_inline_summary);
4361 lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
4362 unsigned int count = 0;
4363 int i;
4364
4365 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4366 {
4367 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4368 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4369 if (cnode && cnode->definition && !cnode->alias)
4370 count++;
4371 }
4372 streamer_write_uhwi (ob, count);
4373
4374 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4375 {
4376 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4377 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4378 if (cnode && (node = cnode)->definition && !node->alias)
4379 {
4380 struct inline_summary *info = inline_summaries->get (node);
4381 struct bitpack_d bp;
4382 struct cgraph_edge *edge;
4383 int i;
4384 size_time_entry *e;
4385 struct condition *c;
4386
4387 streamer_write_uhwi (ob,
4388 lto_symtab_encoder_encode (encoder,
4389
4390 node));
4391 streamer_write_hwi (ob, info->estimated_self_stack_size);
4392 streamer_write_hwi (ob, info->self_size);
4393 streamer_write_hwi (ob, info->self_time);
4394 bp = bitpack_create (ob->main_stream);
4395 bp_pack_value (&bp, info->inlinable, 1);
4396 streamer_write_bitpack (&bp);
4397 streamer_write_uhwi (ob, vec_safe_length (info->conds));
4398 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
4399 {
4400 streamer_write_uhwi (ob, c->operand_num);
4401 streamer_write_uhwi (ob, c->code);
4402 stream_write_tree (ob, c->val, true);
4403 bp = bitpack_create (ob->main_stream);
4404 bp_pack_value (&bp, c->agg_contents, 1);
4405 bp_pack_value (&bp, c->by_ref, 1);
4406 streamer_write_bitpack (&bp);
4407 if (c->agg_contents)
4408 streamer_write_uhwi (ob, c->offset);
4409 }
4410 streamer_write_uhwi (ob, vec_safe_length (info->entry));
4411 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
4412 {
4413 streamer_write_uhwi (ob, e->size);
4414 streamer_write_uhwi (ob, e->time);
4415 write_predicate (ob, &e->predicate);
4416 }
4417 write_predicate (ob, info->loop_iterations);
4418 write_predicate (ob, info->loop_stride);
4419 write_predicate (ob, info->array_index);
4420 for (edge = node->callees; edge; edge = edge->next_callee)
4421 write_inline_edge_summary (ob, edge);
4422 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
4423 write_inline_edge_summary (ob, edge);
4424 }
4425 }
4426 streamer_write_char_stream (ob->main_stream, 0);
4427 produce_asm (ob, NULL);
4428 destroy_output_block (ob);
4429
4430 if (optimize && !flag_ipa_cp)
4431 ipa_prop_write_jump_functions ();
4432 }
4433
4434
4435 /* Release inline summary. */
4436
4437 void
4438 inline_free_summary (void)
4439 {
4440 struct cgraph_node *node;
4441 if (edge_removal_hook_holder)
4442 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
4443 edge_removal_hook_holder = NULL;
4444 if (edge_duplication_hook_holder)
4445 symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
4446 edge_duplication_hook_holder = NULL;
4447 if (!inline_edge_summary_vec.exists ())
4448 return;
4449 FOR_EACH_DEFINED_FUNCTION (node)
4450 if (!node->alias)
4451 reset_inline_summary (node, inline_summaries->get (node));
4452 inline_summaries->release ();
4453 inline_summaries = NULL;
4454 inline_edge_summary_vec.release ();
4455 if (edge_predicate_pool)
4456 free_alloc_pool (edge_predicate_pool);
4457 edge_predicate_pool = 0;
4458 }