tree-ssa-strlen.c (maybe_diag_stxncpy_trunc): Avoid undesirable warning.
[gcc.git] / gcc / predict.c
1 /* Branch prediction routines for the GNU compiler.
2 Copyright (C) 2000-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* References:
21
22 [1] "Branch Prediction for Free"
23 Ball and Larus; PLDI '93.
24 [2] "Static Branch Frequency and Program Profile Analysis"
25 Wu and Larus; MICRO-27.
26 [3] "Corpus-based Static Branch Prediction"
27 Calder, Grunwald, Lindsay, Martin, Mozer, and Zorn; PLDI '95. */
28
29
30 #include "config.h"
31 #include "system.h"
32 #include "coretypes.h"
33 #include "backend.h"
34 #include "rtl.h"
35 #include "tree.h"
36 #include "gimple.h"
37 #include "cfghooks.h"
38 #include "tree-pass.h"
39 #include "ssa.h"
40 #include "memmodel.h"
41 #include "emit-rtl.h"
42 #include "cgraph.h"
43 #include "coverage.h"
44 #include "diagnostic-core.h"
45 #include "gimple-predict.h"
46 #include "fold-const.h"
47 #include "calls.h"
48 #include "cfganal.h"
49 #include "profile.h"
50 #include "sreal.h"
51 #include "params.h"
52 #include "cfgloop.h"
53 #include "gimple-iterator.h"
54 #include "tree-cfg.h"
55 #include "tree-ssa-loop-niter.h"
56 #include "tree-ssa-loop.h"
57 #include "tree-scalar-evolution.h"
58 #include "ipa-utils.h"
59 #include "gimple-pretty-print.h"
60 #include "selftest.h"
61 #include "cfgrtl.h"
62 #include "stringpool.h"
63 #include "attribs.h"
64
65 /* Enum with reasons why a predictor is ignored. */
66
67 enum predictor_reason
68 {
69 REASON_NONE,
70 REASON_IGNORED,
71 REASON_SINGLE_EDGE_DUPLICATE,
72 REASON_EDGE_PAIR_DUPLICATE
73 };
74
75 /* String messages for the aforementioned enum. */
76
77 static const char *reason_messages[] = {"", " (ignored)",
78 " (single edge duplicate)", " (edge pair duplicate)"};
79
80 /* real constants: 0, 1, 1-1/REG_BR_PROB_BASE, REG_BR_PROB_BASE,
81 1/REG_BR_PROB_BASE, 0.5, BB_FREQ_MAX. */
82 static sreal real_almost_one, real_br_prob_base,
83 real_inv_br_prob_base, real_one_half, real_bb_freq_max;
84
85 static void combine_predictions_for_insn (rtx_insn *, basic_block);
86 static void dump_prediction (FILE *, enum br_predictor, int, basic_block,
87 enum predictor_reason, edge);
88 static void predict_paths_leading_to (basic_block, enum br_predictor,
89 enum prediction,
90 struct loop *in_loop = NULL);
91 static void predict_paths_leading_to_edge (edge, enum br_predictor,
92 enum prediction,
93 struct loop *in_loop = NULL);
94 static bool can_predict_insn_p (const rtx_insn *);
95 static HOST_WIDE_INT get_predictor_value (br_predictor, HOST_WIDE_INT);
96 static void determine_unlikely_bbs ();
97
98 /* Information we hold about each branch predictor.
99 Filled using information from predict.def. */
100
101 struct predictor_info
102 {
103 const char *const name; /* Name used in the debugging dumps. */
104 const int hitrate; /* Expected hitrate used by
105 predict_insn_def call. */
106 const int flags;
107 };
108
109 /* Use given predictor without Dempster-Shaffer theory if it matches
110 using first_match heuristics. */
111 #define PRED_FLAG_FIRST_MATCH 1
112
113 /* Recompute hitrate in percent to our representation. */
114
115 #define HITRATE(VAL) ((int) ((VAL) * REG_BR_PROB_BASE + 50) / 100)
116
117 #define DEF_PREDICTOR(ENUM, NAME, HITRATE, FLAGS) {NAME, HITRATE, FLAGS},
118 static const struct predictor_info predictor_info[]= {
119 #include "predict.def"
120
121 /* Upper bound on predictors. */
122 {NULL, 0, 0}
123 };
124 #undef DEF_PREDICTOR
125
126 static gcov_type min_count = -1;
127
128 /* Determine the threshold for hot BB counts. */
129
130 gcov_type
131 get_hot_bb_threshold ()
132 {
133 if (min_count == -1)
134 {
135 min_count
136 = profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION);
137 if (dump_file)
138 fprintf (dump_file, "Setting hotness threshold to %" PRId64 ".\n",
139 min_count);
140 }
141 return min_count;
142 }
143
144 /* Set the threshold for hot BB counts. */
145
146 void
147 set_hot_bb_threshold (gcov_type min)
148 {
149 min_count = min;
150 }
151
152 /* Return TRUE if frequency FREQ is considered to be hot. */
153
154 bool
155 maybe_hot_count_p (struct function *fun, profile_count count)
156 {
157 if (!count.initialized_p ())
158 return true;
159 if (count.ipa () == profile_count::zero ())
160 return false;
161 if (!count.ipa_p ())
162 {
163 struct cgraph_node *node = cgraph_node::get (fun->decl);
164 if (!profile_info || profile_status_for_fn (fun) != PROFILE_READ)
165 {
166 if (node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
167 return false;
168 if (node->frequency == NODE_FREQUENCY_HOT)
169 return true;
170 }
171 if (profile_status_for_fn (fun) == PROFILE_ABSENT)
172 return true;
173 if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
174 && count < (ENTRY_BLOCK_PTR_FOR_FN (fun)->count.apply_scale (2, 3)))
175 return false;
176 if (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION) == 0)
177 return false;
178 if (count.apply_scale (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION), 1)
179 < ENTRY_BLOCK_PTR_FOR_FN (fun)->count)
180 return false;
181 return true;
182 }
183 /* Code executed at most once is not hot. */
184 if (count <= MAX (profile_info ? profile_info->runs : 1, 1))
185 return false;
186 return (count.to_gcov_type () >= get_hot_bb_threshold ());
187 }
188
189 /* Return true in case BB can be CPU intensive and should be optimized
190 for maximal performance. */
191
192 bool
193 maybe_hot_bb_p (struct function *fun, const_basic_block bb)
194 {
195 gcc_checking_assert (fun);
196 return maybe_hot_count_p (fun, bb->count);
197 }
198
199 /* Return true in case BB can be CPU intensive and should be optimized
200 for maximal performance. */
201
202 bool
203 maybe_hot_edge_p (edge e)
204 {
205 return maybe_hot_count_p (cfun, e->count ());
206 }
207
208 /* Return true if profile COUNT and FREQUENCY, or function FUN static
209 node frequency reflects never being executed. */
210
211 static bool
212 probably_never_executed (struct function *fun,
213 profile_count count)
214 {
215 gcc_checking_assert (fun);
216 if (count.ipa () == profile_count::zero ())
217 return true;
218 /* Do not trust adjusted counts. This will make us to drop int cold section
219 code with low execution count as a result of inlining. These low counts
220 are not safe even with read profile and may lead us to dropping
221 code which actually gets executed into cold section of binary that is not
222 desirable. */
223 if (count.precise_p () && profile_status_for_fn (fun) == PROFILE_READ)
224 {
225 int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
226 if (count.apply_scale (unlikely_count_fraction, 1) >= profile_info->runs)
227 return false;
228 return true;
229 }
230 if ((!profile_info || profile_status_for_fn (fun) != PROFILE_READ)
231 && (cgraph_node::get (fun->decl)->frequency
232 == NODE_FREQUENCY_UNLIKELY_EXECUTED))
233 return true;
234 return false;
235 }
236
237
238 /* Return true in case BB is probably never executed. */
239
240 bool
241 probably_never_executed_bb_p (struct function *fun, const_basic_block bb)
242 {
243 return probably_never_executed (fun, bb->count);
244 }
245
246
247 /* Return true if E is unlikely executed for obvious reasons. */
248
249 static bool
250 unlikely_executed_edge_p (edge e)
251 {
252 return (e->count () == profile_count::zero ()
253 || e->probability == profile_probability::never ())
254 || (e->flags & (EDGE_EH | EDGE_FAKE));
255 }
256
257 /* Return true in case edge E is probably never executed. */
258
259 bool
260 probably_never_executed_edge_p (struct function *fun, edge e)
261 {
262 if (unlikely_executed_edge_p (e))
263 return true;
264 return probably_never_executed (fun, e->count ());
265 }
266
267 /* Return true when current function should always be optimized for size. */
268
269 bool
270 optimize_function_for_size_p (struct function *fun)
271 {
272 if (!fun || !fun->decl)
273 return optimize_size;
274 cgraph_node *n = cgraph_node::get (fun->decl);
275 return n && n->optimize_for_size_p ();
276 }
277
278 /* Return true when current function should always be optimized for speed. */
279
280 bool
281 optimize_function_for_speed_p (struct function *fun)
282 {
283 return !optimize_function_for_size_p (fun);
284 }
285
286 /* Return the optimization type that should be used for the function FUN. */
287
288 optimization_type
289 function_optimization_type (struct function *fun)
290 {
291 return (optimize_function_for_speed_p (fun)
292 ? OPTIMIZE_FOR_SPEED
293 : OPTIMIZE_FOR_SIZE);
294 }
295
296 /* Return TRUE when BB should be optimized for size. */
297
298 bool
299 optimize_bb_for_size_p (const_basic_block bb)
300 {
301 return (optimize_function_for_size_p (cfun)
302 || (bb && !maybe_hot_bb_p (cfun, bb)));
303 }
304
305 /* Return TRUE when BB should be optimized for speed. */
306
307 bool
308 optimize_bb_for_speed_p (const_basic_block bb)
309 {
310 return !optimize_bb_for_size_p (bb);
311 }
312
313 /* Return the optimization type that should be used for block BB. */
314
315 optimization_type
316 bb_optimization_type (const_basic_block bb)
317 {
318 return (optimize_bb_for_speed_p (bb)
319 ? OPTIMIZE_FOR_SPEED
320 : OPTIMIZE_FOR_SIZE);
321 }
322
323 /* Return TRUE when BB should be optimized for size. */
324
325 bool
326 optimize_edge_for_size_p (edge e)
327 {
328 return optimize_function_for_size_p (cfun) || !maybe_hot_edge_p (e);
329 }
330
331 /* Return TRUE when BB should be optimized for speed. */
332
333 bool
334 optimize_edge_for_speed_p (edge e)
335 {
336 return !optimize_edge_for_size_p (e);
337 }
338
339 /* Return TRUE when BB should be optimized for size. */
340
341 bool
342 optimize_insn_for_size_p (void)
343 {
344 return optimize_function_for_size_p (cfun) || !crtl->maybe_hot_insn_p;
345 }
346
347 /* Return TRUE when BB should be optimized for speed. */
348
349 bool
350 optimize_insn_for_speed_p (void)
351 {
352 return !optimize_insn_for_size_p ();
353 }
354
355 /* Return TRUE when LOOP should be optimized for size. */
356
357 bool
358 optimize_loop_for_size_p (struct loop *loop)
359 {
360 return optimize_bb_for_size_p (loop->header);
361 }
362
363 /* Return TRUE when LOOP should be optimized for speed. */
364
365 bool
366 optimize_loop_for_speed_p (struct loop *loop)
367 {
368 return optimize_bb_for_speed_p (loop->header);
369 }
370
371 /* Return TRUE when LOOP nest should be optimized for speed. */
372
373 bool
374 optimize_loop_nest_for_speed_p (struct loop *loop)
375 {
376 struct loop *l = loop;
377 if (optimize_loop_for_speed_p (loop))
378 return true;
379 l = loop->inner;
380 while (l && l != loop)
381 {
382 if (optimize_loop_for_speed_p (l))
383 return true;
384 if (l->inner)
385 l = l->inner;
386 else if (l->next)
387 l = l->next;
388 else
389 {
390 while (l != loop && !l->next)
391 l = loop_outer (l);
392 if (l != loop)
393 l = l->next;
394 }
395 }
396 return false;
397 }
398
399 /* Return TRUE when LOOP nest should be optimized for size. */
400
401 bool
402 optimize_loop_nest_for_size_p (struct loop *loop)
403 {
404 return !optimize_loop_nest_for_speed_p (loop);
405 }
406
407 /* Return true when edge E is likely to be well predictable by branch
408 predictor. */
409
410 bool
411 predictable_edge_p (edge e)
412 {
413 if (!e->probability.initialized_p ())
414 return false;
415 if ((e->probability.to_reg_br_prob_base ()
416 <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100)
417 || (REG_BR_PROB_BASE - e->probability.to_reg_br_prob_base ()
418 <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100))
419 return true;
420 return false;
421 }
422
423
424 /* Set RTL expansion for BB profile. */
425
426 void
427 rtl_profile_for_bb (basic_block bb)
428 {
429 crtl->maybe_hot_insn_p = maybe_hot_bb_p (cfun, bb);
430 }
431
432 /* Set RTL expansion for edge profile. */
433
434 void
435 rtl_profile_for_edge (edge e)
436 {
437 crtl->maybe_hot_insn_p = maybe_hot_edge_p (e);
438 }
439
440 /* Set RTL expansion to default mode (i.e. when profile info is not known). */
441 void
442 default_rtl_profile (void)
443 {
444 crtl->maybe_hot_insn_p = true;
445 }
446
447 /* Return true if the one of outgoing edges is already predicted by
448 PREDICTOR. */
449
450 bool
451 rtl_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
452 {
453 rtx note;
454 if (!INSN_P (BB_END (bb)))
455 return false;
456 for (note = REG_NOTES (BB_END (bb)); note; note = XEXP (note, 1))
457 if (REG_NOTE_KIND (note) == REG_BR_PRED
458 && INTVAL (XEXP (XEXP (note, 0), 0)) == (int)predictor)
459 return true;
460 return false;
461 }
462
463 /* Structure representing predictions in tree level. */
464
465 struct edge_prediction {
466 struct edge_prediction *ep_next;
467 edge ep_edge;
468 enum br_predictor ep_predictor;
469 int ep_probability;
470 };
471
472 /* This map contains for a basic block the list of predictions for the
473 outgoing edges. */
474
475 static hash_map<const_basic_block, edge_prediction *> *bb_predictions;
476
477 /* Return true if the one of outgoing edges is already predicted by
478 PREDICTOR. */
479
480 bool
481 gimple_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
482 {
483 struct edge_prediction *i;
484 edge_prediction **preds = bb_predictions->get (bb);
485
486 if (!preds)
487 return false;
488
489 for (i = *preds; i; i = i->ep_next)
490 if (i->ep_predictor == predictor)
491 return true;
492 return false;
493 }
494
495 /* Return true if the one of outgoing edges is already predicted by
496 PREDICTOR for edge E predicted as TAKEN. */
497
498 bool
499 edge_predicted_by_p (edge e, enum br_predictor predictor, bool taken)
500 {
501 struct edge_prediction *i;
502 basic_block bb = e->src;
503 edge_prediction **preds = bb_predictions->get (bb);
504 if (!preds)
505 return false;
506
507 int probability = predictor_info[(int) predictor].hitrate;
508
509 if (taken != TAKEN)
510 probability = REG_BR_PROB_BASE - probability;
511
512 for (i = *preds; i; i = i->ep_next)
513 if (i->ep_predictor == predictor
514 && i->ep_edge == e
515 && i->ep_probability == probability)
516 return true;
517 return false;
518 }
519
520 /* Same predicate as above, working on edges. */
521 bool
522 edge_probability_reliable_p (const_edge e)
523 {
524 return e->probability.probably_reliable_p ();
525 }
526
527 /* Same predicate as edge_probability_reliable_p, working on notes. */
528 bool
529 br_prob_note_reliable_p (const_rtx note)
530 {
531 gcc_assert (REG_NOTE_KIND (note) == REG_BR_PROB);
532 return profile_probability::from_reg_br_prob_note
533 (XINT (note, 0)).probably_reliable_p ();
534 }
535
536 static void
537 predict_insn (rtx_insn *insn, enum br_predictor predictor, int probability)
538 {
539 gcc_assert (any_condjump_p (insn));
540 if (!flag_guess_branch_prob)
541 return;
542
543 add_reg_note (insn, REG_BR_PRED,
544 gen_rtx_CONCAT (VOIDmode,
545 GEN_INT ((int) predictor),
546 GEN_INT ((int) probability)));
547 }
548
549 /* Predict insn by given predictor. */
550
551 void
552 predict_insn_def (rtx_insn *insn, enum br_predictor predictor,
553 enum prediction taken)
554 {
555 int probability = predictor_info[(int) predictor].hitrate;
556 gcc_assert (probability != PROB_UNINITIALIZED);
557
558 if (taken != TAKEN)
559 probability = REG_BR_PROB_BASE - probability;
560
561 predict_insn (insn, predictor, probability);
562 }
563
564 /* Predict edge E with given probability if possible. */
565
566 void
567 rtl_predict_edge (edge e, enum br_predictor predictor, int probability)
568 {
569 rtx_insn *last_insn;
570 last_insn = BB_END (e->src);
571
572 /* We can store the branch prediction information only about
573 conditional jumps. */
574 if (!any_condjump_p (last_insn))
575 return;
576
577 /* We always store probability of branching. */
578 if (e->flags & EDGE_FALLTHRU)
579 probability = REG_BR_PROB_BASE - probability;
580
581 predict_insn (last_insn, predictor, probability);
582 }
583
584 /* Predict edge E with the given PROBABILITY. */
585 void
586 gimple_predict_edge (edge e, enum br_predictor predictor, int probability)
587 {
588 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
589 && EDGE_COUNT (e->src->succs) > 1
590 && flag_guess_branch_prob
591 && optimize)
592 {
593 struct edge_prediction *i = XNEW (struct edge_prediction);
594 edge_prediction *&preds = bb_predictions->get_or_insert (e->src);
595
596 i->ep_next = preds;
597 preds = i;
598 i->ep_probability = probability;
599 i->ep_predictor = predictor;
600 i->ep_edge = e;
601 }
602 }
603
604 /* Filter edge predictions PREDS by a function FILTER. DATA are passed
605 to the filter function. */
606
607 void
608 filter_predictions (edge_prediction **preds,
609 bool (*filter) (edge_prediction *, void *), void *data)
610 {
611 if (!bb_predictions)
612 return;
613
614 if (preds)
615 {
616 struct edge_prediction **prediction = preds;
617 struct edge_prediction *next;
618
619 while (*prediction)
620 {
621 if ((*filter) (*prediction, data))
622 prediction = &((*prediction)->ep_next);
623 else
624 {
625 next = (*prediction)->ep_next;
626 free (*prediction);
627 *prediction = next;
628 }
629 }
630 }
631 }
632
633 /* Filter function predicate that returns true for a edge predicate P
634 if its edge is equal to DATA. */
635
636 bool
637 equal_edge_p (edge_prediction *p, void *data)
638 {
639 return p->ep_edge == (edge)data;
640 }
641
642 /* Remove all predictions on given basic block that are attached
643 to edge E. */
644 void
645 remove_predictions_associated_with_edge (edge e)
646 {
647 if (!bb_predictions)
648 return;
649
650 edge_prediction **preds = bb_predictions->get (e->src);
651 filter_predictions (preds, equal_edge_p, e);
652 }
653
654 /* Clears the list of predictions stored for BB. */
655
656 static void
657 clear_bb_predictions (basic_block bb)
658 {
659 edge_prediction **preds = bb_predictions->get (bb);
660 struct edge_prediction *pred, *next;
661
662 if (!preds)
663 return;
664
665 for (pred = *preds; pred; pred = next)
666 {
667 next = pred->ep_next;
668 free (pred);
669 }
670 *preds = NULL;
671 }
672
673 /* Return true when we can store prediction on insn INSN.
674 At the moment we represent predictions only on conditional
675 jumps, not at computed jump or other complicated cases. */
676 static bool
677 can_predict_insn_p (const rtx_insn *insn)
678 {
679 return (JUMP_P (insn)
680 && any_condjump_p (insn)
681 && EDGE_COUNT (BLOCK_FOR_INSN (insn)->succs) >= 2);
682 }
683
684 /* Predict edge E by given predictor if possible. */
685
686 void
687 predict_edge_def (edge e, enum br_predictor predictor,
688 enum prediction taken)
689 {
690 int probability = predictor_info[(int) predictor].hitrate;
691
692 if (taken != TAKEN)
693 probability = REG_BR_PROB_BASE - probability;
694
695 predict_edge (e, predictor, probability);
696 }
697
698 /* Invert all branch predictions or probability notes in the INSN. This needs
699 to be done each time we invert the condition used by the jump. */
700
701 void
702 invert_br_probabilities (rtx insn)
703 {
704 rtx note;
705
706 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
707 if (REG_NOTE_KIND (note) == REG_BR_PROB)
708 XINT (note, 0) = profile_probability::from_reg_br_prob_note
709 (XINT (note, 0)).invert ().to_reg_br_prob_note ();
710 else if (REG_NOTE_KIND (note) == REG_BR_PRED)
711 XEXP (XEXP (note, 0), 1)
712 = GEN_INT (REG_BR_PROB_BASE - INTVAL (XEXP (XEXP (note, 0), 1)));
713 }
714
715 /* Dump information about the branch prediction to the output file. */
716
717 static void
718 dump_prediction (FILE *file, enum br_predictor predictor, int probability,
719 basic_block bb, enum predictor_reason reason = REASON_NONE,
720 edge ep_edge = NULL)
721 {
722 edge e = ep_edge;
723 edge_iterator ei;
724
725 if (!file)
726 return;
727
728 if (e == NULL)
729 FOR_EACH_EDGE (e, ei, bb->succs)
730 if (! (e->flags & EDGE_FALLTHRU))
731 break;
732
733 char edge_info_str[128];
734 if (ep_edge)
735 sprintf (edge_info_str, " of edge %d->%d", ep_edge->src->index,
736 ep_edge->dest->index);
737 else
738 edge_info_str[0] = '\0';
739
740 fprintf (file, " %s heuristics%s%s: %.2f%%",
741 predictor_info[predictor].name,
742 edge_info_str, reason_messages[reason],
743 probability * 100.0 / REG_BR_PROB_BASE);
744
745 if (bb->count.initialized_p ())
746 {
747 fprintf (file, " exec ");
748 bb->count.dump (file);
749 if (e)
750 {
751 fprintf (file, " hit ");
752 e->count ().dump (file);
753 fprintf (file, " (%.1f%%)", e->count ().to_gcov_type() * 100.0
754 / bb->count.to_gcov_type ());
755 }
756 }
757
758 fprintf (file, "\n");
759
760 /* Print output that be easily read by analyze_brprob.py script. We are
761 interested only in counts that are read from GCDA files. */
762 if (dump_file && (dump_flags & TDF_DETAILS)
763 && bb->count.precise_p ()
764 && reason == REASON_NONE)
765 {
766 gcc_assert (e->count ().precise_p ());
767 fprintf (file, ";;heuristics;%s;%" PRId64 ";%" PRId64 ";%.1f;\n",
768 predictor_info[predictor].name,
769 bb->count.to_gcov_type (), e->count ().to_gcov_type (),
770 probability * 100.0 / REG_BR_PROB_BASE);
771 }
772 }
773
774 /* Return true if STMT is known to be unlikely executed. */
775
776 static bool
777 unlikely_executed_stmt_p (gimple *stmt)
778 {
779 if (!is_gimple_call (stmt))
780 return false;
781 /* NORETURN attribute alone is not strong enough: exit() may be quite
782 likely executed once during program run. */
783 if (gimple_call_fntype (stmt)
784 && lookup_attribute ("cold",
785 TYPE_ATTRIBUTES (gimple_call_fntype (stmt)))
786 && !lookup_attribute ("cold", DECL_ATTRIBUTES (current_function_decl)))
787 return true;
788 tree decl = gimple_call_fndecl (stmt);
789 if (!decl)
790 return false;
791 if (lookup_attribute ("cold", DECL_ATTRIBUTES (decl))
792 && !lookup_attribute ("cold", DECL_ATTRIBUTES (current_function_decl)))
793 return true;
794
795 cgraph_node *n = cgraph_node::get (decl);
796 if (!n)
797 return false;
798
799 availability avail;
800 n = n->ultimate_alias_target (&avail);
801 if (avail < AVAIL_AVAILABLE)
802 return false;
803 if (!n->analyzed
804 || n->decl == current_function_decl)
805 return false;
806 return n->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED;
807 }
808
809 /* Return true if BB is unlikely executed. */
810
811 static bool
812 unlikely_executed_bb_p (basic_block bb)
813 {
814 if (bb->count == profile_count::zero ())
815 return true;
816 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
817 return false;
818 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
819 !gsi_end_p (gsi); gsi_next (&gsi))
820 {
821 if (unlikely_executed_stmt_p (gsi_stmt (gsi)))
822 return true;
823 if (stmt_can_terminate_bb_p (gsi_stmt (gsi)))
824 return false;
825 }
826 return false;
827 }
828
829 /* We can not predict the probabilities of outgoing edges of bb. Set them
830 evenly and hope for the best. If UNLIKELY_EDGES is not null, distribute
831 even probability for all edges not mentioned in the set. These edges
832 are given PROB_VERY_UNLIKELY probability. Similarly for LIKELY_EDGES,
833 if we have exactly one likely edge, make the other edges predicted
834 as not probable. */
835
836 static void
837 set_even_probabilities (basic_block bb,
838 hash_set<edge> *unlikely_edges = NULL,
839 hash_set<edge_prediction *> *likely_edges = NULL)
840 {
841 unsigned nedges = 0, unlikely_count = 0;
842 edge e = NULL;
843 edge_iterator ei;
844 profile_probability all = profile_probability::always ();
845
846 FOR_EACH_EDGE (e, ei, bb->succs)
847 if (e->probability.initialized_p ())
848 all -= e->probability;
849 else if (!unlikely_executed_edge_p (e))
850 {
851 nedges++;
852 if (unlikely_edges != NULL && unlikely_edges->contains (e))
853 {
854 all -= profile_probability::very_unlikely ();
855 unlikely_count++;
856 }
857 }
858
859 /* Make the distribution even if all edges are unlikely. */
860 unsigned likely_count = likely_edges ? likely_edges->elements () : 0;
861 if (unlikely_count == nedges)
862 {
863 unlikely_edges = NULL;
864 unlikely_count = 0;
865 }
866
867 /* If we have one likely edge, then use its probability and distribute
868 remaining probabilities as even. */
869 if (likely_count == 1)
870 {
871 FOR_EACH_EDGE (e, ei, bb->succs)
872 if (e->probability.initialized_p ())
873 ;
874 else if (!unlikely_executed_edge_p (e))
875 {
876 edge_prediction *prediction = *likely_edges->begin ();
877 int p = prediction->ep_probability;
878 profile_probability prob
879 = profile_probability::from_reg_br_prob_base (p);
880 profile_probability remainder = prob.invert ();
881
882 if (prediction->ep_edge == e)
883 e->probability = prob;
884 else
885 e->probability = remainder.apply_scale (1, nedges - 1);
886 }
887 else
888 e->probability = profile_probability::never ();
889 }
890 else
891 {
892 /* Make all unlikely edges unlikely and the rest will have even
893 probability. */
894 unsigned scale = nedges - unlikely_count;
895 FOR_EACH_EDGE (e, ei, bb->succs)
896 if (e->probability.initialized_p ())
897 ;
898 else if (!unlikely_executed_edge_p (e))
899 {
900 if (unlikely_edges != NULL && unlikely_edges->contains (e))
901 e->probability = profile_probability::very_unlikely ();
902 else
903 e->probability = all.apply_scale (1, scale);
904 }
905 else
906 e->probability = profile_probability::never ();
907 }
908 }
909
910 /* Add REG_BR_PROB note to JUMP with PROB. */
911
912 void
913 add_reg_br_prob_note (rtx_insn *jump, profile_probability prob)
914 {
915 gcc_checking_assert (JUMP_P (jump) && !find_reg_note (jump, REG_BR_PROB, 0));
916 add_int_reg_note (jump, REG_BR_PROB, prob.to_reg_br_prob_note ());
917 }
918
919 /* Combine all REG_BR_PRED notes into single probability and attach REG_BR_PROB
920 note if not already present. Remove now useless REG_BR_PRED notes. */
921
922 static void
923 combine_predictions_for_insn (rtx_insn *insn, basic_block bb)
924 {
925 rtx prob_note;
926 rtx *pnote;
927 rtx note;
928 int best_probability = PROB_EVEN;
929 enum br_predictor best_predictor = END_PREDICTORS;
930 int combined_probability = REG_BR_PROB_BASE / 2;
931 int d;
932 bool first_match = false;
933 bool found = false;
934
935 if (!can_predict_insn_p (insn))
936 {
937 set_even_probabilities (bb);
938 return;
939 }
940
941 prob_note = find_reg_note (insn, REG_BR_PROB, 0);
942 pnote = &REG_NOTES (insn);
943 if (dump_file)
944 fprintf (dump_file, "Predictions for insn %i bb %i\n", INSN_UID (insn),
945 bb->index);
946
947 /* We implement "first match" heuristics and use probability guessed
948 by predictor with smallest index. */
949 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
950 if (REG_NOTE_KIND (note) == REG_BR_PRED)
951 {
952 enum br_predictor predictor = ((enum br_predictor)
953 INTVAL (XEXP (XEXP (note, 0), 0)));
954 int probability = INTVAL (XEXP (XEXP (note, 0), 1));
955
956 found = true;
957 if (best_predictor > predictor
958 && predictor_info[predictor].flags & PRED_FLAG_FIRST_MATCH)
959 best_probability = probability, best_predictor = predictor;
960
961 d = (combined_probability * probability
962 + (REG_BR_PROB_BASE - combined_probability)
963 * (REG_BR_PROB_BASE - probability));
964
965 /* Use FP math to avoid overflows of 32bit integers. */
966 if (d == 0)
967 /* If one probability is 0% and one 100%, avoid division by zero. */
968 combined_probability = REG_BR_PROB_BASE / 2;
969 else
970 combined_probability = (((double) combined_probability) * probability
971 * REG_BR_PROB_BASE / d + 0.5);
972 }
973
974 /* Decide which heuristic to use. In case we didn't match anything,
975 use no_prediction heuristic, in case we did match, use either
976 first match or Dempster-Shaffer theory depending on the flags. */
977
978 if (best_predictor != END_PREDICTORS)
979 first_match = true;
980
981 if (!found)
982 dump_prediction (dump_file, PRED_NO_PREDICTION,
983 combined_probability, bb);
984 else
985 {
986 if (!first_match)
987 dump_prediction (dump_file, PRED_DS_THEORY, combined_probability,
988 bb, !first_match ? REASON_NONE : REASON_IGNORED);
989 else
990 dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability,
991 bb, first_match ? REASON_NONE : REASON_IGNORED);
992 }
993
994 if (first_match)
995 combined_probability = best_probability;
996 dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb);
997
998 while (*pnote)
999 {
1000 if (REG_NOTE_KIND (*pnote) == REG_BR_PRED)
1001 {
1002 enum br_predictor predictor = ((enum br_predictor)
1003 INTVAL (XEXP (XEXP (*pnote, 0), 0)));
1004 int probability = INTVAL (XEXP (XEXP (*pnote, 0), 1));
1005
1006 dump_prediction (dump_file, predictor, probability, bb,
1007 (!first_match || best_predictor == predictor)
1008 ? REASON_NONE : REASON_IGNORED);
1009 *pnote = XEXP (*pnote, 1);
1010 }
1011 else
1012 pnote = &XEXP (*pnote, 1);
1013 }
1014
1015 if (!prob_note)
1016 {
1017 profile_probability p
1018 = profile_probability::from_reg_br_prob_base (combined_probability);
1019 add_reg_br_prob_note (insn, p);
1020
1021 /* Save the prediction into CFG in case we are seeing non-degenerated
1022 conditional jump. */
1023 if (!single_succ_p (bb))
1024 {
1025 BRANCH_EDGE (bb)->probability = p;
1026 FALLTHRU_EDGE (bb)->probability
1027 = BRANCH_EDGE (bb)->probability.invert ();
1028 }
1029 }
1030 else if (!single_succ_p (bb))
1031 {
1032 profile_probability prob = profile_probability::from_reg_br_prob_note
1033 (XINT (prob_note, 0));
1034
1035 BRANCH_EDGE (bb)->probability = prob;
1036 FALLTHRU_EDGE (bb)->probability = prob.invert ();
1037 }
1038 else
1039 single_succ_edge (bb)->probability = profile_probability::always ();
1040 }
1041
1042 /* Edge prediction hash traits. */
1043
1044 struct predictor_hash: pointer_hash <edge_prediction>
1045 {
1046
1047 static inline hashval_t hash (const edge_prediction *);
1048 static inline bool equal (const edge_prediction *, const edge_prediction *);
1049 };
1050
1051 /* Calculate hash value of an edge prediction P based on predictor and
1052 normalized probability. */
1053
1054 inline hashval_t
1055 predictor_hash::hash (const edge_prediction *p)
1056 {
1057 inchash::hash hstate;
1058 hstate.add_int (p->ep_predictor);
1059
1060 int prob = p->ep_probability;
1061 if (prob > REG_BR_PROB_BASE / 2)
1062 prob = REG_BR_PROB_BASE - prob;
1063
1064 hstate.add_int (prob);
1065
1066 return hstate.end ();
1067 }
1068
1069 /* Return true whether edge predictions P1 and P2 use the same predictor and
1070 have equal (or opposed probability). */
1071
1072 inline bool
1073 predictor_hash::equal (const edge_prediction *p1, const edge_prediction *p2)
1074 {
1075 return (p1->ep_predictor == p2->ep_predictor
1076 && (p1->ep_probability == p2->ep_probability
1077 || p1->ep_probability == REG_BR_PROB_BASE - p2->ep_probability));
1078 }
1079
1080 struct predictor_hash_traits: predictor_hash,
1081 typed_noop_remove <edge_prediction *> {};
1082
1083 /* Return true if edge prediction P is not in DATA hash set. */
1084
1085 static bool
1086 not_removed_prediction_p (edge_prediction *p, void *data)
1087 {
1088 hash_set<edge_prediction *> *remove = (hash_set<edge_prediction *> *) data;
1089 return !remove->contains (p);
1090 }
1091
1092 /* Prune predictions for a basic block BB. Currently we do following
1093 clean-up steps:
1094
1095 1) remove duplicate prediction that is guessed with the same probability
1096 (different than 1/2) to both edge
1097 2) remove duplicates for a prediction that belongs with the same probability
1098 to a single edge
1099
1100 */
1101
1102 static void
1103 prune_predictions_for_bb (basic_block bb)
1104 {
1105 edge_prediction **preds = bb_predictions->get (bb);
1106
1107 if (preds)
1108 {
1109 hash_table <predictor_hash_traits> s (13);
1110 hash_set <edge_prediction *> remove;
1111
1112 /* Step 1: identify predictors that should be removed. */
1113 for (edge_prediction *pred = *preds; pred; pred = pred->ep_next)
1114 {
1115 edge_prediction *existing = s.find (pred);
1116 if (existing)
1117 {
1118 if (pred->ep_edge == existing->ep_edge
1119 && pred->ep_probability == existing->ep_probability)
1120 {
1121 /* Remove a duplicate predictor. */
1122 dump_prediction (dump_file, pred->ep_predictor,
1123 pred->ep_probability, bb,
1124 REASON_SINGLE_EDGE_DUPLICATE, pred->ep_edge);
1125
1126 remove.add (pred);
1127 }
1128 else if (pred->ep_edge != existing->ep_edge
1129 && pred->ep_probability == existing->ep_probability
1130 && pred->ep_probability != REG_BR_PROB_BASE / 2)
1131 {
1132 /* Remove both predictors as they predict the same
1133 for both edges. */
1134 dump_prediction (dump_file, existing->ep_predictor,
1135 pred->ep_probability, bb,
1136 REASON_EDGE_PAIR_DUPLICATE,
1137 existing->ep_edge);
1138 dump_prediction (dump_file, pred->ep_predictor,
1139 pred->ep_probability, bb,
1140 REASON_EDGE_PAIR_DUPLICATE,
1141 pred->ep_edge);
1142
1143 remove.add (existing);
1144 remove.add (pred);
1145 }
1146 }
1147
1148 edge_prediction **slot2 = s.find_slot (pred, INSERT);
1149 *slot2 = pred;
1150 }
1151
1152 /* Step 2: Remove predictors. */
1153 filter_predictions (preds, not_removed_prediction_p, &remove);
1154 }
1155 }
1156
1157 /* Combine predictions into single probability and store them into CFG.
1158 Remove now useless prediction entries.
1159 If DRY_RUN is set, only produce dumps and do not modify profile. */
1160
1161 static void
1162 combine_predictions_for_bb (basic_block bb, bool dry_run)
1163 {
1164 int best_probability = PROB_EVEN;
1165 enum br_predictor best_predictor = END_PREDICTORS;
1166 int combined_probability = REG_BR_PROB_BASE / 2;
1167 int d;
1168 bool first_match = false;
1169 bool found = false;
1170 struct edge_prediction *pred;
1171 int nedges = 0;
1172 edge e, first = NULL, second = NULL;
1173 edge_iterator ei;
1174 int nzero = 0;
1175 int nunknown = 0;
1176
1177 FOR_EACH_EDGE (e, ei, bb->succs)
1178 {
1179 if (!unlikely_executed_edge_p (e))
1180 {
1181 nedges ++;
1182 if (first && !second)
1183 second = e;
1184 if (!first)
1185 first = e;
1186 }
1187 else if (!e->probability.initialized_p ())
1188 e->probability = profile_probability::never ();
1189 if (!e->probability.initialized_p ())
1190 nunknown++;
1191 else if (e->probability == profile_probability::never ())
1192 nzero++;
1193 }
1194
1195 /* When there is no successor or only one choice, prediction is easy.
1196
1197 When we have a basic block with more than 2 successors, the situation
1198 is more complicated as DS theory cannot be used literally.
1199 More precisely, let's assume we predicted edge e1 with probability p1,
1200 thus: m1({b1}) = p1. As we're going to combine more than 2 edges, we
1201 need to find probability of e.g. m1({b2}), which we don't know.
1202 The only approximation is to equally distribute 1-p1 to all edges
1203 different from b1.
1204
1205 According to numbers we've got from SPEC2006 benchark, there's only
1206 one interesting reliable predictor (noreturn call), which can be
1207 handled with a bit easier approach. */
1208 if (nedges != 2)
1209 {
1210 hash_set<edge> unlikely_edges (4);
1211 hash_set<edge_prediction *> likely_edges (4);
1212
1213 /* Identify all edges that have a probability close to very unlikely.
1214 Doing the approach for very unlikely doesn't worth for doing as
1215 there's no such probability in SPEC2006 benchmark. */
1216 edge_prediction **preds = bb_predictions->get (bb);
1217 if (preds)
1218 for (pred = *preds; pred; pred = pred->ep_next)
1219 {
1220 if (pred->ep_probability <= PROB_VERY_UNLIKELY)
1221 unlikely_edges.add (pred->ep_edge);
1222 if (pred->ep_probability >= PROB_VERY_LIKELY
1223 || pred->ep_predictor == PRED_BUILTIN_EXPECT)
1224 likely_edges.add (pred);
1225 }
1226
1227 if (!dry_run)
1228 set_even_probabilities (bb, &unlikely_edges, &likely_edges);
1229 clear_bb_predictions (bb);
1230 if (dump_file)
1231 {
1232 fprintf (dump_file, "Predictions for bb %i\n", bb->index);
1233 if (unlikely_edges.elements () == 0)
1234 fprintf (dump_file,
1235 "%i edges in bb %i predicted to even probabilities\n",
1236 nedges, bb->index);
1237 else
1238 {
1239 fprintf (dump_file,
1240 "%i edges in bb %i predicted with some unlikely edges\n",
1241 nedges, bb->index);
1242 FOR_EACH_EDGE (e, ei, bb->succs)
1243 if (!unlikely_executed_edge_p (e))
1244 dump_prediction (dump_file, PRED_COMBINED,
1245 e->probability.to_reg_br_prob_base (), bb, REASON_NONE, e);
1246 }
1247 }
1248 return;
1249 }
1250
1251 if (dump_file)
1252 fprintf (dump_file, "Predictions for bb %i\n", bb->index);
1253
1254 prune_predictions_for_bb (bb);
1255
1256 edge_prediction **preds = bb_predictions->get (bb);
1257
1258 if (preds)
1259 {
1260 /* We implement "first match" heuristics and use probability guessed
1261 by predictor with smallest index. */
1262 for (pred = *preds; pred; pred = pred->ep_next)
1263 {
1264 enum br_predictor predictor = pred->ep_predictor;
1265 int probability = pred->ep_probability;
1266
1267 if (pred->ep_edge != first)
1268 probability = REG_BR_PROB_BASE - probability;
1269
1270 found = true;
1271 /* First match heuristics would be widly confused if we predicted
1272 both directions. */
1273 if (best_predictor > predictor
1274 && predictor_info[predictor].flags & PRED_FLAG_FIRST_MATCH)
1275 {
1276 struct edge_prediction *pred2;
1277 int prob = probability;
1278
1279 for (pred2 = (struct edge_prediction *) *preds;
1280 pred2; pred2 = pred2->ep_next)
1281 if (pred2 != pred && pred2->ep_predictor == pred->ep_predictor)
1282 {
1283 int probability2 = pred2->ep_probability;
1284
1285 if (pred2->ep_edge != first)
1286 probability2 = REG_BR_PROB_BASE - probability2;
1287
1288 if ((probability < REG_BR_PROB_BASE / 2) !=
1289 (probability2 < REG_BR_PROB_BASE / 2))
1290 break;
1291
1292 /* If the same predictor later gave better result, go for it! */
1293 if ((probability >= REG_BR_PROB_BASE / 2 && (probability2 > probability))
1294 || (probability <= REG_BR_PROB_BASE / 2 && (probability2 < probability)))
1295 prob = probability2;
1296 }
1297 if (!pred2)
1298 best_probability = prob, best_predictor = predictor;
1299 }
1300
1301 d = (combined_probability * probability
1302 + (REG_BR_PROB_BASE - combined_probability)
1303 * (REG_BR_PROB_BASE - probability));
1304
1305 /* Use FP math to avoid overflows of 32bit integers. */
1306 if (d == 0)
1307 /* If one probability is 0% and one 100%, avoid division by zero. */
1308 combined_probability = REG_BR_PROB_BASE / 2;
1309 else
1310 combined_probability = (((double) combined_probability)
1311 * probability
1312 * REG_BR_PROB_BASE / d + 0.5);
1313 }
1314 }
1315
1316 /* Decide which heuristic to use. In case we didn't match anything,
1317 use no_prediction heuristic, in case we did match, use either
1318 first match or Dempster-Shaffer theory depending on the flags. */
1319
1320 if (best_predictor != END_PREDICTORS)
1321 first_match = true;
1322
1323 if (!found)
1324 dump_prediction (dump_file, PRED_NO_PREDICTION, combined_probability, bb);
1325 else
1326 {
1327 if (!first_match)
1328 dump_prediction (dump_file, PRED_DS_THEORY, combined_probability, bb,
1329 !first_match ? REASON_NONE : REASON_IGNORED);
1330 else
1331 dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability, bb,
1332 first_match ? REASON_NONE : REASON_IGNORED);
1333 }
1334
1335 if (first_match)
1336 combined_probability = best_probability;
1337 dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb);
1338
1339 if (preds)
1340 {
1341 for (pred = (struct edge_prediction *) *preds; pred; pred = pred->ep_next)
1342 {
1343 enum br_predictor predictor = pred->ep_predictor;
1344 int probability = pred->ep_probability;
1345
1346 dump_prediction (dump_file, predictor, probability, bb,
1347 (!first_match || best_predictor == predictor)
1348 ? REASON_NONE : REASON_IGNORED, pred->ep_edge);
1349 }
1350 }
1351 clear_bb_predictions (bb);
1352
1353
1354 /* If we have only one successor which is unknown, we can compute missing
1355 probablity. */
1356 if (nunknown == 1)
1357 {
1358 profile_probability prob = profile_probability::always ();
1359 edge missing = NULL;
1360
1361 FOR_EACH_EDGE (e, ei, bb->succs)
1362 if (e->probability.initialized_p ())
1363 prob -= e->probability;
1364 else if (missing == NULL)
1365 missing = e;
1366 else
1367 gcc_unreachable ();
1368 missing->probability = prob;
1369 }
1370 /* If nothing is unknown, we have nothing to update. */
1371 else if (!nunknown && nzero != (int)EDGE_COUNT (bb->succs))
1372 ;
1373 else if (!dry_run)
1374 {
1375 first->probability
1376 = profile_probability::from_reg_br_prob_base (combined_probability);
1377 second->probability = first->probability.invert ();
1378 }
1379 }
1380
1381 /* Check if T1 and T2 satisfy the IV_COMPARE condition.
1382 Return the SSA_NAME if the condition satisfies, NULL otherwise.
1383
1384 T1 and T2 should be one of the following cases:
1385 1. T1 is SSA_NAME, T2 is NULL
1386 2. T1 is SSA_NAME, T2 is INTEGER_CST between [-4, 4]
1387 3. T2 is SSA_NAME, T1 is INTEGER_CST between [-4, 4] */
1388
1389 static tree
1390 strips_small_constant (tree t1, tree t2)
1391 {
1392 tree ret = NULL;
1393 int value = 0;
1394
1395 if (!t1)
1396 return NULL;
1397 else if (TREE_CODE (t1) == SSA_NAME)
1398 ret = t1;
1399 else if (tree_fits_shwi_p (t1))
1400 value = tree_to_shwi (t1);
1401 else
1402 return NULL;
1403
1404 if (!t2)
1405 return ret;
1406 else if (tree_fits_shwi_p (t2))
1407 value = tree_to_shwi (t2);
1408 else if (TREE_CODE (t2) == SSA_NAME)
1409 {
1410 if (ret)
1411 return NULL;
1412 else
1413 ret = t2;
1414 }
1415
1416 if (value <= 4 && value >= -4)
1417 return ret;
1418 else
1419 return NULL;
1420 }
1421
1422 /* Return the SSA_NAME in T or T's operands.
1423 Return NULL if SSA_NAME cannot be found. */
1424
1425 static tree
1426 get_base_value (tree t)
1427 {
1428 if (TREE_CODE (t) == SSA_NAME)
1429 return t;
1430
1431 if (!BINARY_CLASS_P (t))
1432 return NULL;
1433
1434 switch (TREE_OPERAND_LENGTH (t))
1435 {
1436 case 1:
1437 return strips_small_constant (TREE_OPERAND (t, 0), NULL);
1438 case 2:
1439 return strips_small_constant (TREE_OPERAND (t, 0),
1440 TREE_OPERAND (t, 1));
1441 default:
1442 return NULL;
1443 }
1444 }
1445
1446 /* Check the compare STMT in LOOP. If it compares an induction
1447 variable to a loop invariant, return true, and save
1448 LOOP_INVARIANT, COMPARE_CODE and LOOP_STEP.
1449 Otherwise return false and set LOOP_INVAIANT to NULL. */
1450
1451 static bool
1452 is_comparison_with_loop_invariant_p (gcond *stmt, struct loop *loop,
1453 tree *loop_invariant,
1454 enum tree_code *compare_code,
1455 tree *loop_step,
1456 tree *loop_iv_base)
1457 {
1458 tree op0, op1, bound, base;
1459 affine_iv iv0, iv1;
1460 enum tree_code code;
1461 tree step;
1462
1463 code = gimple_cond_code (stmt);
1464 *loop_invariant = NULL;
1465
1466 switch (code)
1467 {
1468 case GT_EXPR:
1469 case GE_EXPR:
1470 case NE_EXPR:
1471 case LT_EXPR:
1472 case LE_EXPR:
1473 case EQ_EXPR:
1474 break;
1475
1476 default:
1477 return false;
1478 }
1479
1480 op0 = gimple_cond_lhs (stmt);
1481 op1 = gimple_cond_rhs (stmt);
1482
1483 if ((TREE_CODE (op0) != SSA_NAME && TREE_CODE (op0) != INTEGER_CST)
1484 || (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op1) != INTEGER_CST))
1485 return false;
1486 if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, true))
1487 return false;
1488 if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, true))
1489 return false;
1490 if (TREE_CODE (iv0.step) != INTEGER_CST
1491 || TREE_CODE (iv1.step) != INTEGER_CST)
1492 return false;
1493 if ((integer_zerop (iv0.step) && integer_zerop (iv1.step))
1494 || (!integer_zerop (iv0.step) && !integer_zerop (iv1.step)))
1495 return false;
1496
1497 if (integer_zerop (iv0.step))
1498 {
1499 if (code != NE_EXPR && code != EQ_EXPR)
1500 code = invert_tree_comparison (code, false);
1501 bound = iv0.base;
1502 base = iv1.base;
1503 if (tree_fits_shwi_p (iv1.step))
1504 step = iv1.step;
1505 else
1506 return false;
1507 }
1508 else
1509 {
1510 bound = iv1.base;
1511 base = iv0.base;
1512 if (tree_fits_shwi_p (iv0.step))
1513 step = iv0.step;
1514 else
1515 return false;
1516 }
1517
1518 if (TREE_CODE (bound) != INTEGER_CST)
1519 bound = get_base_value (bound);
1520 if (!bound)
1521 return false;
1522 if (TREE_CODE (base) != INTEGER_CST)
1523 base = get_base_value (base);
1524 if (!base)
1525 return false;
1526
1527 *loop_invariant = bound;
1528 *compare_code = code;
1529 *loop_step = step;
1530 *loop_iv_base = base;
1531 return true;
1532 }
1533
1534 /* Compare two SSA_NAMEs: returns TRUE if T1 and T2 are value coherent. */
1535
1536 static bool
1537 expr_coherent_p (tree t1, tree t2)
1538 {
1539 gimple *stmt;
1540 tree ssa_name_1 = NULL;
1541 tree ssa_name_2 = NULL;
1542
1543 gcc_assert (TREE_CODE (t1) == SSA_NAME || TREE_CODE (t1) == INTEGER_CST);
1544 gcc_assert (TREE_CODE (t2) == SSA_NAME || TREE_CODE (t2) == INTEGER_CST);
1545
1546 if (t1 == t2)
1547 return true;
1548
1549 if (TREE_CODE (t1) == INTEGER_CST && TREE_CODE (t2) == INTEGER_CST)
1550 return true;
1551 if (TREE_CODE (t1) == INTEGER_CST || TREE_CODE (t2) == INTEGER_CST)
1552 return false;
1553
1554 /* Check to see if t1 is expressed/defined with t2. */
1555 stmt = SSA_NAME_DEF_STMT (t1);
1556 gcc_assert (stmt != NULL);
1557 if (is_gimple_assign (stmt))
1558 {
1559 ssa_name_1 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1560 if (ssa_name_1 && ssa_name_1 == t2)
1561 return true;
1562 }
1563
1564 /* Check to see if t2 is expressed/defined with t1. */
1565 stmt = SSA_NAME_DEF_STMT (t2);
1566 gcc_assert (stmt != NULL);
1567 if (is_gimple_assign (stmt))
1568 {
1569 ssa_name_2 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1570 if (ssa_name_2 && ssa_name_2 == t1)
1571 return true;
1572 }
1573
1574 /* Compare if t1 and t2's def_stmts are identical. */
1575 if (ssa_name_2 != NULL && ssa_name_1 == ssa_name_2)
1576 return true;
1577 else
1578 return false;
1579 }
1580
1581 /* Return true if E is predicted by one of loop heuristics. */
1582
1583 static bool
1584 predicted_by_loop_heuristics_p (basic_block bb)
1585 {
1586 struct edge_prediction *i;
1587 edge_prediction **preds = bb_predictions->get (bb);
1588
1589 if (!preds)
1590 return false;
1591
1592 for (i = *preds; i; i = i->ep_next)
1593 if (i->ep_predictor == PRED_LOOP_ITERATIONS_GUESSED
1594 || i->ep_predictor == PRED_LOOP_ITERATIONS_MAX
1595 || i->ep_predictor == PRED_LOOP_ITERATIONS
1596 || i->ep_predictor == PRED_LOOP_EXIT
1597 || i->ep_predictor == PRED_LOOP_EXIT_WITH_RECURSION
1598 || i->ep_predictor == PRED_LOOP_EXTRA_EXIT)
1599 return true;
1600 return false;
1601 }
1602
1603 /* Predict branch probability of BB when BB contains a branch that compares
1604 an induction variable in LOOP with LOOP_IV_BASE_VAR to LOOP_BOUND_VAR. The
1605 loop exit is compared using LOOP_BOUND_CODE, with step of LOOP_BOUND_STEP.
1606
1607 E.g.
1608 for (int i = 0; i < bound; i++) {
1609 if (i < bound - 2)
1610 computation_1();
1611 else
1612 computation_2();
1613 }
1614
1615 In this loop, we will predict the branch inside the loop to be taken. */
1616
1617 static void
1618 predict_iv_comparison (struct loop *loop, basic_block bb,
1619 tree loop_bound_var,
1620 tree loop_iv_base_var,
1621 enum tree_code loop_bound_code,
1622 int loop_bound_step)
1623 {
1624 gimple *stmt;
1625 tree compare_var, compare_base;
1626 enum tree_code compare_code;
1627 tree compare_step_var;
1628 edge then_edge;
1629 edge_iterator ei;
1630
1631 if (predicted_by_loop_heuristics_p (bb))
1632 return;
1633
1634 stmt = last_stmt (bb);
1635 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
1636 return;
1637 if (!is_comparison_with_loop_invariant_p (as_a <gcond *> (stmt),
1638 loop, &compare_var,
1639 &compare_code,
1640 &compare_step_var,
1641 &compare_base))
1642 return;
1643
1644 /* Find the taken edge. */
1645 FOR_EACH_EDGE (then_edge, ei, bb->succs)
1646 if (then_edge->flags & EDGE_TRUE_VALUE)
1647 break;
1648
1649 /* When comparing an IV to a loop invariant, NE is more likely to be
1650 taken while EQ is more likely to be not-taken. */
1651 if (compare_code == NE_EXPR)
1652 {
1653 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1654 return;
1655 }
1656 else if (compare_code == EQ_EXPR)
1657 {
1658 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1659 return;
1660 }
1661
1662 if (!expr_coherent_p (loop_iv_base_var, compare_base))
1663 return;
1664
1665 /* If loop bound, base and compare bound are all constants, we can
1666 calculate the probability directly. */
1667 if (tree_fits_shwi_p (loop_bound_var)
1668 && tree_fits_shwi_p (compare_var)
1669 && tree_fits_shwi_p (compare_base))
1670 {
1671 int probability;
1672 wi::overflow_type overflow;
1673 bool overall_overflow = false;
1674 widest_int compare_count, tem;
1675
1676 /* (loop_bound - base) / compare_step */
1677 tem = wi::sub (wi::to_widest (loop_bound_var),
1678 wi::to_widest (compare_base), SIGNED, &overflow);
1679 overall_overflow |= overflow;
1680 widest_int loop_count = wi::div_trunc (tem,
1681 wi::to_widest (compare_step_var),
1682 SIGNED, &overflow);
1683 overall_overflow |= overflow;
1684
1685 if (!wi::neg_p (wi::to_widest (compare_step_var))
1686 ^ (compare_code == LT_EXPR || compare_code == LE_EXPR))
1687 {
1688 /* (loop_bound - compare_bound) / compare_step */
1689 tem = wi::sub (wi::to_widest (loop_bound_var),
1690 wi::to_widest (compare_var), SIGNED, &overflow);
1691 overall_overflow |= overflow;
1692 compare_count = wi::div_trunc (tem, wi::to_widest (compare_step_var),
1693 SIGNED, &overflow);
1694 overall_overflow |= overflow;
1695 }
1696 else
1697 {
1698 /* (compare_bound - base) / compare_step */
1699 tem = wi::sub (wi::to_widest (compare_var),
1700 wi::to_widest (compare_base), SIGNED, &overflow);
1701 overall_overflow |= overflow;
1702 compare_count = wi::div_trunc (tem, wi::to_widest (compare_step_var),
1703 SIGNED, &overflow);
1704 overall_overflow |= overflow;
1705 }
1706 if (compare_code == LE_EXPR || compare_code == GE_EXPR)
1707 ++compare_count;
1708 if (loop_bound_code == LE_EXPR || loop_bound_code == GE_EXPR)
1709 ++loop_count;
1710 if (wi::neg_p (compare_count))
1711 compare_count = 0;
1712 if (wi::neg_p (loop_count))
1713 loop_count = 0;
1714 if (loop_count == 0)
1715 probability = 0;
1716 else if (wi::cmps (compare_count, loop_count) == 1)
1717 probability = REG_BR_PROB_BASE;
1718 else
1719 {
1720 tem = compare_count * REG_BR_PROB_BASE;
1721 tem = wi::udiv_trunc (tem, loop_count);
1722 probability = tem.to_uhwi ();
1723 }
1724
1725 /* FIXME: The branch prediction seems broken. It has only 20% hitrate. */
1726 if (!overall_overflow)
1727 predict_edge (then_edge, PRED_LOOP_IV_COMPARE, probability);
1728
1729 return;
1730 }
1731
1732 if (expr_coherent_p (loop_bound_var, compare_var))
1733 {
1734 if ((loop_bound_code == LT_EXPR || loop_bound_code == LE_EXPR)
1735 && (compare_code == LT_EXPR || compare_code == LE_EXPR))
1736 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1737 else if ((loop_bound_code == GT_EXPR || loop_bound_code == GE_EXPR)
1738 && (compare_code == GT_EXPR || compare_code == GE_EXPR))
1739 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1740 else if (loop_bound_code == NE_EXPR)
1741 {
1742 /* If the loop backedge condition is "(i != bound)", we do
1743 the comparison based on the step of IV:
1744 * step < 0 : backedge condition is like (i > bound)
1745 * step > 0 : backedge condition is like (i < bound) */
1746 gcc_assert (loop_bound_step != 0);
1747 if (loop_bound_step > 0
1748 && (compare_code == LT_EXPR
1749 || compare_code == LE_EXPR))
1750 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1751 else if (loop_bound_step < 0
1752 && (compare_code == GT_EXPR
1753 || compare_code == GE_EXPR))
1754 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1755 else
1756 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1757 }
1758 else
1759 /* The branch is predicted not-taken if loop_bound_code is
1760 opposite with compare_code. */
1761 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1762 }
1763 else if (expr_coherent_p (loop_iv_base_var, compare_var))
1764 {
1765 /* For cases like:
1766 for (i = s; i < h; i++)
1767 if (i > s + 2) ....
1768 The branch should be predicted taken. */
1769 if (loop_bound_step > 0
1770 && (compare_code == GT_EXPR || compare_code == GE_EXPR))
1771 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1772 else if (loop_bound_step < 0
1773 && (compare_code == LT_EXPR || compare_code == LE_EXPR))
1774 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1775 else
1776 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1777 }
1778 }
1779
1780 /* Predict for extra loop exits that will lead to EXIT_EDGE. The extra loop
1781 exits are resulted from short-circuit conditions that will generate an
1782 if_tmp. E.g.:
1783
1784 if (foo() || global > 10)
1785 break;
1786
1787 This will be translated into:
1788
1789 BB3:
1790 loop header...
1791 BB4:
1792 if foo() goto BB6 else goto BB5
1793 BB5:
1794 if global > 10 goto BB6 else goto BB7
1795 BB6:
1796 goto BB7
1797 BB7:
1798 iftmp = (PHI 0(BB5), 1(BB6))
1799 if iftmp == 1 goto BB8 else goto BB3
1800 BB8:
1801 outside of the loop...
1802
1803 The edge BB7->BB8 is loop exit because BB8 is outside of the loop.
1804 From the dataflow, we can infer that BB4->BB6 and BB5->BB6 are also loop
1805 exits. This function takes BB7->BB8 as input, and finds out the extra loop
1806 exits to predict them using PRED_LOOP_EXTRA_EXIT. */
1807
1808 static void
1809 predict_extra_loop_exits (edge exit_edge)
1810 {
1811 unsigned i;
1812 bool check_value_one;
1813 gimple *lhs_def_stmt;
1814 gphi *phi_stmt;
1815 tree cmp_rhs, cmp_lhs;
1816 gimple *last;
1817 gcond *cmp_stmt;
1818
1819 last = last_stmt (exit_edge->src);
1820 if (!last)
1821 return;
1822 cmp_stmt = dyn_cast <gcond *> (last);
1823 if (!cmp_stmt)
1824 return;
1825
1826 cmp_rhs = gimple_cond_rhs (cmp_stmt);
1827 cmp_lhs = gimple_cond_lhs (cmp_stmt);
1828 if (!TREE_CONSTANT (cmp_rhs)
1829 || !(integer_zerop (cmp_rhs) || integer_onep (cmp_rhs)))
1830 return;
1831 if (TREE_CODE (cmp_lhs) != SSA_NAME)
1832 return;
1833
1834 /* If check_value_one is true, only the phi_args with value '1' will lead
1835 to loop exit. Otherwise, only the phi_args with value '0' will lead to
1836 loop exit. */
1837 check_value_one = (((integer_onep (cmp_rhs))
1838 ^ (gimple_cond_code (cmp_stmt) == EQ_EXPR))
1839 ^ ((exit_edge->flags & EDGE_TRUE_VALUE) != 0));
1840
1841 lhs_def_stmt = SSA_NAME_DEF_STMT (cmp_lhs);
1842 if (!lhs_def_stmt)
1843 return;
1844
1845 phi_stmt = dyn_cast <gphi *> (lhs_def_stmt);
1846 if (!phi_stmt)
1847 return;
1848
1849 for (i = 0; i < gimple_phi_num_args (phi_stmt); i++)
1850 {
1851 edge e1;
1852 edge_iterator ei;
1853 tree val = gimple_phi_arg_def (phi_stmt, i);
1854 edge e = gimple_phi_arg_edge (phi_stmt, i);
1855
1856 if (!TREE_CONSTANT (val) || !(integer_zerop (val) || integer_onep (val)))
1857 continue;
1858 if ((check_value_one ^ integer_onep (val)) == 1)
1859 continue;
1860 if (EDGE_COUNT (e->src->succs) != 1)
1861 {
1862 predict_paths_leading_to_edge (e, PRED_LOOP_EXTRA_EXIT, NOT_TAKEN);
1863 continue;
1864 }
1865
1866 FOR_EACH_EDGE (e1, ei, e->src->preds)
1867 predict_paths_leading_to_edge (e1, PRED_LOOP_EXTRA_EXIT, NOT_TAKEN);
1868 }
1869 }
1870
1871
1872 /* Predict edge probabilities by exploiting loop structure. */
1873
1874 static void
1875 predict_loops (void)
1876 {
1877 struct loop *loop;
1878 basic_block bb;
1879 hash_set <struct loop *> with_recursion(10);
1880
1881 FOR_EACH_BB_FN (bb, cfun)
1882 {
1883 gimple_stmt_iterator gsi;
1884 tree decl;
1885
1886 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1887 if (is_gimple_call (gsi_stmt (gsi))
1888 && (decl = gimple_call_fndecl (gsi_stmt (gsi))) != NULL
1889 && recursive_call_p (current_function_decl, decl))
1890 {
1891 loop = bb->loop_father;
1892 while (loop && !with_recursion.add (loop))
1893 loop = loop_outer (loop);
1894 }
1895 }
1896
1897 /* Try to predict out blocks in a loop that are not part of a
1898 natural loop. */
1899 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1900 {
1901 basic_block bb, *bbs;
1902 unsigned j, n_exits = 0;
1903 vec<edge> exits;
1904 struct tree_niter_desc niter_desc;
1905 edge ex;
1906 struct nb_iter_bound *nb_iter;
1907 enum tree_code loop_bound_code = ERROR_MARK;
1908 tree loop_bound_step = NULL;
1909 tree loop_bound_var = NULL;
1910 tree loop_iv_base = NULL;
1911 gcond *stmt = NULL;
1912 bool recursion = with_recursion.contains (loop);
1913
1914 exits = get_loop_exit_edges (loop);
1915 FOR_EACH_VEC_ELT (exits, j, ex)
1916 if (!unlikely_executed_edge_p (ex) && !(ex->flags & EDGE_ABNORMAL_CALL))
1917 n_exits ++;
1918 if (!n_exits)
1919 {
1920 exits.release ();
1921 continue;
1922 }
1923
1924 if (dump_file && (dump_flags & TDF_DETAILS))
1925 fprintf (dump_file, "Predicting loop %i%s with %i exits.\n",
1926 loop->num, recursion ? " (with recursion)":"", n_exits);
1927 if (dump_file && (dump_flags & TDF_DETAILS)
1928 && max_loop_iterations_int (loop) >= 0)
1929 {
1930 fprintf (dump_file,
1931 "Loop %d iterates at most %i times.\n", loop->num,
1932 (int)max_loop_iterations_int (loop));
1933 }
1934 if (dump_file && (dump_flags & TDF_DETAILS)
1935 && likely_max_loop_iterations_int (loop) >= 0)
1936 {
1937 fprintf (dump_file, "Loop %d likely iterates at most %i times.\n",
1938 loop->num, (int)likely_max_loop_iterations_int (loop));
1939 }
1940
1941 FOR_EACH_VEC_ELT (exits, j, ex)
1942 {
1943 tree niter = NULL;
1944 HOST_WIDE_INT nitercst;
1945 int max = PARAM_VALUE (PARAM_MAX_PREDICTED_ITERATIONS);
1946 int probability;
1947 enum br_predictor predictor;
1948 widest_int nit;
1949
1950 if (unlikely_executed_edge_p (ex)
1951 || (ex->flags & EDGE_ABNORMAL_CALL))
1952 continue;
1953 /* Loop heuristics do not expect exit conditional to be inside
1954 inner loop. We predict from innermost to outermost loop. */
1955 if (predicted_by_loop_heuristics_p (ex->src))
1956 {
1957 if (dump_file && (dump_flags & TDF_DETAILS))
1958 fprintf (dump_file, "Skipping exit %i->%i because "
1959 "it is already predicted.\n",
1960 ex->src->index, ex->dest->index);
1961 continue;
1962 }
1963 predict_extra_loop_exits (ex);
1964
1965 if (number_of_iterations_exit (loop, ex, &niter_desc, false, false))
1966 niter = niter_desc.niter;
1967 if (!niter || TREE_CODE (niter_desc.niter) != INTEGER_CST)
1968 niter = loop_niter_by_eval (loop, ex);
1969 if (dump_file && (dump_flags & TDF_DETAILS)
1970 && TREE_CODE (niter) == INTEGER_CST)
1971 {
1972 fprintf (dump_file, "Exit %i->%i %d iterates ",
1973 ex->src->index, ex->dest->index,
1974 loop->num);
1975 print_generic_expr (dump_file, niter, TDF_SLIM);
1976 fprintf (dump_file, " times.\n");
1977 }
1978
1979 if (TREE_CODE (niter) == INTEGER_CST)
1980 {
1981 if (tree_fits_uhwi_p (niter)
1982 && max
1983 && compare_tree_int (niter, max - 1) == -1)
1984 nitercst = tree_to_uhwi (niter) + 1;
1985 else
1986 nitercst = max;
1987 predictor = PRED_LOOP_ITERATIONS;
1988 }
1989 /* If we have just one exit and we can derive some information about
1990 the number of iterations of the loop from the statements inside
1991 the loop, use it to predict this exit. */
1992 else if (n_exits == 1
1993 && estimated_stmt_executions (loop, &nit))
1994 {
1995 if (wi::gtu_p (nit, max))
1996 nitercst = max;
1997 else
1998 nitercst = nit.to_shwi ();
1999 predictor = PRED_LOOP_ITERATIONS_GUESSED;
2000 }
2001 /* If we have likely upper bound, trust it for very small iteration
2002 counts. Such loops would otherwise get mispredicted by standard
2003 LOOP_EXIT heuristics. */
2004 else if (n_exits == 1
2005 && likely_max_stmt_executions (loop, &nit)
2006 && wi::ltu_p (nit,
2007 RDIV (REG_BR_PROB_BASE,
2008 REG_BR_PROB_BASE
2009 - predictor_info
2010 [recursion
2011 ? PRED_LOOP_EXIT_WITH_RECURSION
2012 : PRED_LOOP_EXIT].hitrate)))
2013 {
2014 nitercst = nit.to_shwi ();
2015 predictor = PRED_LOOP_ITERATIONS_MAX;
2016 }
2017 else
2018 {
2019 if (dump_file && (dump_flags & TDF_DETAILS))
2020 fprintf (dump_file, "Nothing known about exit %i->%i.\n",
2021 ex->src->index, ex->dest->index);
2022 continue;
2023 }
2024
2025 if (dump_file && (dump_flags & TDF_DETAILS))
2026 fprintf (dump_file, "Recording prediction to %i iterations by %s.\n",
2027 (int)nitercst, predictor_info[predictor].name);
2028 /* If the prediction for number of iterations is zero, do not
2029 predict the exit edges. */
2030 if (nitercst == 0)
2031 continue;
2032
2033 probability = RDIV (REG_BR_PROB_BASE, nitercst);
2034 predict_edge (ex, predictor, probability);
2035 }
2036 exits.release ();
2037
2038 /* Find information about loop bound variables. */
2039 for (nb_iter = loop->bounds; nb_iter;
2040 nb_iter = nb_iter->next)
2041 if (nb_iter->stmt
2042 && gimple_code (nb_iter->stmt) == GIMPLE_COND)
2043 {
2044 stmt = as_a <gcond *> (nb_iter->stmt);
2045 break;
2046 }
2047 if (!stmt && last_stmt (loop->header)
2048 && gimple_code (last_stmt (loop->header)) == GIMPLE_COND)
2049 stmt = as_a <gcond *> (last_stmt (loop->header));
2050 if (stmt)
2051 is_comparison_with_loop_invariant_p (stmt, loop,
2052 &loop_bound_var,
2053 &loop_bound_code,
2054 &loop_bound_step,
2055 &loop_iv_base);
2056
2057 bbs = get_loop_body (loop);
2058
2059 for (j = 0; j < loop->num_nodes; j++)
2060 {
2061 edge e;
2062 edge_iterator ei;
2063
2064 bb = bbs[j];
2065
2066 /* Bypass loop heuristics on continue statement. These
2067 statements construct loops via "non-loop" constructs
2068 in the source language and are better to be handled
2069 separately. */
2070 if (predicted_by_p (bb, PRED_CONTINUE))
2071 {
2072 if (dump_file && (dump_flags & TDF_DETAILS))
2073 fprintf (dump_file, "BB %i predicted by continue.\n",
2074 bb->index);
2075 continue;
2076 }
2077
2078 /* If we already used more reliable loop exit predictors, do not
2079 bother with PRED_LOOP_EXIT. */
2080 if (!predicted_by_loop_heuristics_p (bb))
2081 {
2082 /* For loop with many exits we don't want to predict all exits
2083 with the pretty large probability, because if all exits are
2084 considered in row, the loop would be predicted to iterate
2085 almost never. The code to divide probability by number of
2086 exits is very rough. It should compute the number of exits
2087 taken in each patch through function (not the overall number
2088 of exits that might be a lot higher for loops with wide switch
2089 statements in them) and compute n-th square root.
2090
2091 We limit the minimal probability by 2% to avoid
2092 EDGE_PROBABILITY_RELIABLE from trusting the branch prediction
2093 as this was causing regression in perl benchmark containing such
2094 a wide loop. */
2095
2096 int probability = ((REG_BR_PROB_BASE
2097 - predictor_info
2098 [recursion
2099 ? PRED_LOOP_EXIT_WITH_RECURSION
2100 : PRED_LOOP_EXIT].hitrate)
2101 / n_exits);
2102 if (probability < HITRATE (2))
2103 probability = HITRATE (2);
2104 FOR_EACH_EDGE (e, ei, bb->succs)
2105 if (e->dest->index < NUM_FIXED_BLOCKS
2106 || !flow_bb_inside_loop_p (loop, e->dest))
2107 {
2108 if (dump_file && (dump_flags & TDF_DETAILS))
2109 fprintf (dump_file,
2110 "Predicting exit %i->%i with prob %i.\n",
2111 e->src->index, e->dest->index, probability);
2112 predict_edge (e,
2113 recursion ? PRED_LOOP_EXIT_WITH_RECURSION
2114 : PRED_LOOP_EXIT, probability);
2115 }
2116 }
2117 if (loop_bound_var)
2118 predict_iv_comparison (loop, bb, loop_bound_var, loop_iv_base,
2119 loop_bound_code,
2120 tree_to_shwi (loop_bound_step));
2121 }
2122
2123 /* In the following code
2124 for (loop1)
2125 if (cond)
2126 for (loop2)
2127 body;
2128 guess that cond is unlikely. */
2129 if (loop_outer (loop)->num)
2130 {
2131 basic_block bb = NULL;
2132 edge preheader_edge = loop_preheader_edge (loop);
2133
2134 if (single_pred_p (preheader_edge->src)
2135 && single_succ_p (preheader_edge->src))
2136 preheader_edge = single_pred_edge (preheader_edge->src);
2137
2138 gimple *stmt = last_stmt (preheader_edge->src);
2139 /* Pattern match fortran loop preheader:
2140 _16 = BUILTIN_EXPECT (_15, 1, PRED_FORTRAN_LOOP_PREHEADER);
2141 _17 = (logical(kind=4)) _16;
2142 if (_17 != 0)
2143 goto <bb 11>;
2144 else
2145 goto <bb 13>;
2146
2147 Loop guard branch prediction says nothing about duplicated loop
2148 headers produced by fortran frontend and in this case we want
2149 to predict paths leading to this preheader. */
2150
2151 if (stmt
2152 && gimple_code (stmt) == GIMPLE_COND
2153 && gimple_cond_code (stmt) == NE_EXPR
2154 && TREE_CODE (gimple_cond_lhs (stmt)) == SSA_NAME
2155 && integer_zerop (gimple_cond_rhs (stmt)))
2156 {
2157 gimple *call_stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2158 if (gimple_code (call_stmt) == GIMPLE_ASSIGN
2159 && gimple_expr_code (call_stmt) == NOP_EXPR
2160 && TREE_CODE (gimple_assign_rhs1 (call_stmt)) == SSA_NAME)
2161 call_stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (call_stmt));
2162 if (gimple_call_internal_p (call_stmt, IFN_BUILTIN_EXPECT)
2163 && TREE_CODE (gimple_call_arg (call_stmt, 2)) == INTEGER_CST
2164 && tree_fits_uhwi_p (gimple_call_arg (call_stmt, 2))
2165 && tree_to_uhwi (gimple_call_arg (call_stmt, 2))
2166 == PRED_FORTRAN_LOOP_PREHEADER)
2167 bb = preheader_edge->src;
2168 }
2169 if (!bb)
2170 {
2171 if (!dominated_by_p (CDI_DOMINATORS,
2172 loop_outer (loop)->latch, loop->header))
2173 predict_paths_leading_to_edge (loop_preheader_edge (loop),
2174 recursion
2175 ? PRED_LOOP_GUARD_WITH_RECURSION
2176 : PRED_LOOP_GUARD,
2177 NOT_TAKEN,
2178 loop_outer (loop));
2179 }
2180 else
2181 {
2182 if (!dominated_by_p (CDI_DOMINATORS,
2183 loop_outer (loop)->latch, bb))
2184 predict_paths_leading_to (bb,
2185 recursion
2186 ? PRED_LOOP_GUARD_WITH_RECURSION
2187 : PRED_LOOP_GUARD,
2188 NOT_TAKEN,
2189 loop_outer (loop));
2190 }
2191 }
2192
2193 /* Free basic blocks from get_loop_body. */
2194 free (bbs);
2195 }
2196 }
2197
2198 /* Attempt to predict probabilities of BB outgoing edges using local
2199 properties. */
2200 static void
2201 bb_estimate_probability_locally (basic_block bb)
2202 {
2203 rtx_insn *last_insn = BB_END (bb);
2204 rtx cond;
2205
2206 if (! can_predict_insn_p (last_insn))
2207 return;
2208 cond = get_condition (last_insn, NULL, false, false);
2209 if (! cond)
2210 return;
2211
2212 /* Try "pointer heuristic."
2213 A comparison ptr == 0 is predicted as false.
2214 Similarly, a comparison ptr1 == ptr2 is predicted as false. */
2215 if (COMPARISON_P (cond)
2216 && ((REG_P (XEXP (cond, 0)) && REG_POINTER (XEXP (cond, 0)))
2217 || (REG_P (XEXP (cond, 1)) && REG_POINTER (XEXP (cond, 1)))))
2218 {
2219 if (GET_CODE (cond) == EQ)
2220 predict_insn_def (last_insn, PRED_POINTER, NOT_TAKEN);
2221 else if (GET_CODE (cond) == NE)
2222 predict_insn_def (last_insn, PRED_POINTER, TAKEN);
2223 }
2224 else
2225
2226 /* Try "opcode heuristic."
2227 EQ tests are usually false and NE tests are usually true. Also,
2228 most quantities are positive, so we can make the appropriate guesses
2229 about signed comparisons against zero. */
2230 switch (GET_CODE (cond))
2231 {
2232 case CONST_INT:
2233 /* Unconditional branch. */
2234 predict_insn_def (last_insn, PRED_UNCONDITIONAL,
2235 cond == const0_rtx ? NOT_TAKEN : TAKEN);
2236 break;
2237
2238 case EQ:
2239 case UNEQ:
2240 /* Floating point comparisons appears to behave in a very
2241 unpredictable way because of special role of = tests in
2242 FP code. */
2243 if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
2244 ;
2245 /* Comparisons with 0 are often used for booleans and there is
2246 nothing useful to predict about them. */
2247 else if (XEXP (cond, 1) == const0_rtx
2248 || XEXP (cond, 0) == const0_rtx)
2249 ;
2250 else
2251 predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, NOT_TAKEN);
2252 break;
2253
2254 case NE:
2255 case LTGT:
2256 /* Floating point comparisons appears to behave in a very
2257 unpredictable way because of special role of = tests in
2258 FP code. */
2259 if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
2260 ;
2261 /* Comparisons with 0 are often used for booleans and there is
2262 nothing useful to predict about them. */
2263 else if (XEXP (cond, 1) == const0_rtx
2264 || XEXP (cond, 0) == const0_rtx)
2265 ;
2266 else
2267 predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, TAKEN);
2268 break;
2269
2270 case ORDERED:
2271 predict_insn_def (last_insn, PRED_FPOPCODE, TAKEN);
2272 break;
2273
2274 case UNORDERED:
2275 predict_insn_def (last_insn, PRED_FPOPCODE, NOT_TAKEN);
2276 break;
2277
2278 case LE:
2279 case LT:
2280 if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
2281 || XEXP (cond, 1) == constm1_rtx)
2282 predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, NOT_TAKEN);
2283 break;
2284
2285 case GE:
2286 case GT:
2287 if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
2288 || XEXP (cond, 1) == constm1_rtx)
2289 predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, TAKEN);
2290 break;
2291
2292 default:
2293 break;
2294 }
2295 }
2296
2297 /* Set edge->probability for each successor edge of BB. */
2298 void
2299 guess_outgoing_edge_probabilities (basic_block bb)
2300 {
2301 bb_estimate_probability_locally (bb);
2302 combine_predictions_for_insn (BB_END (bb), bb);
2303 }
2304 \f
2305 static tree expr_expected_value (tree, bitmap, enum br_predictor *predictor,
2306 HOST_WIDE_INT *probability);
2307
2308 /* Helper function for expr_expected_value. */
2309
2310 static tree
2311 expr_expected_value_1 (tree type, tree op0, enum tree_code code,
2312 tree op1, bitmap visited, enum br_predictor *predictor,
2313 HOST_WIDE_INT *probability)
2314 {
2315 gimple *def;
2316
2317 /* Reset returned probability value. */
2318 *probability = -1;
2319 *predictor = PRED_UNCONDITIONAL;
2320
2321 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
2322 {
2323 if (TREE_CONSTANT (op0))
2324 return op0;
2325
2326 if (code == IMAGPART_EXPR)
2327 {
2328 if (TREE_CODE (TREE_OPERAND (op0, 0)) == SSA_NAME)
2329 {
2330 def = SSA_NAME_DEF_STMT (TREE_OPERAND (op0, 0));
2331 if (is_gimple_call (def)
2332 && gimple_call_internal_p (def)
2333 && (gimple_call_internal_fn (def)
2334 == IFN_ATOMIC_COMPARE_EXCHANGE))
2335 {
2336 /* Assume that any given atomic operation has low contention,
2337 and thus the compare-and-swap operation succeeds. */
2338 *predictor = PRED_COMPARE_AND_SWAP;
2339 return build_one_cst (TREE_TYPE (op0));
2340 }
2341 }
2342 }
2343
2344 if (code != SSA_NAME)
2345 return NULL_TREE;
2346
2347 def = SSA_NAME_DEF_STMT (op0);
2348
2349 /* If we were already here, break the infinite cycle. */
2350 if (!bitmap_set_bit (visited, SSA_NAME_VERSION (op0)))
2351 return NULL;
2352
2353 if (gimple_code (def) == GIMPLE_PHI)
2354 {
2355 /* All the arguments of the PHI node must have the same constant
2356 length. */
2357 int i, n = gimple_phi_num_args (def);
2358 tree val = NULL, new_val;
2359
2360 for (i = 0; i < n; i++)
2361 {
2362 tree arg = PHI_ARG_DEF (def, i);
2363 enum br_predictor predictor2;
2364
2365 /* If this PHI has itself as an argument, we cannot
2366 determine the string length of this argument. However,
2367 if we can find an expected constant value for the other
2368 PHI args then we can still be sure that this is
2369 likely a constant. So be optimistic and just
2370 continue with the next argument. */
2371 if (arg == PHI_RESULT (def))
2372 continue;
2373
2374 HOST_WIDE_INT probability2;
2375 new_val = expr_expected_value (arg, visited, &predictor2,
2376 &probability2);
2377
2378 /* It is difficult to combine value predictors. Simply assume
2379 that later predictor is weaker and take its prediction. */
2380 if (*predictor < predictor2)
2381 {
2382 *predictor = predictor2;
2383 *probability = probability2;
2384 }
2385 if (!new_val)
2386 return NULL;
2387 if (!val)
2388 val = new_val;
2389 else if (!operand_equal_p (val, new_val, false))
2390 return NULL;
2391 }
2392 return val;
2393 }
2394 if (is_gimple_assign (def))
2395 {
2396 if (gimple_assign_lhs (def) != op0)
2397 return NULL;
2398
2399 return expr_expected_value_1 (TREE_TYPE (gimple_assign_lhs (def)),
2400 gimple_assign_rhs1 (def),
2401 gimple_assign_rhs_code (def),
2402 gimple_assign_rhs2 (def),
2403 visited, predictor, probability);
2404 }
2405
2406 if (is_gimple_call (def))
2407 {
2408 tree decl = gimple_call_fndecl (def);
2409 if (!decl)
2410 {
2411 if (gimple_call_internal_p (def)
2412 && gimple_call_internal_fn (def) == IFN_BUILTIN_EXPECT)
2413 {
2414 gcc_assert (gimple_call_num_args (def) == 3);
2415 tree val = gimple_call_arg (def, 0);
2416 if (TREE_CONSTANT (val))
2417 return val;
2418 tree val2 = gimple_call_arg (def, 2);
2419 gcc_assert (TREE_CODE (val2) == INTEGER_CST
2420 && tree_fits_uhwi_p (val2)
2421 && tree_to_uhwi (val2) < END_PREDICTORS);
2422 *predictor = (enum br_predictor) tree_to_uhwi (val2);
2423 if (*predictor == PRED_BUILTIN_EXPECT)
2424 *probability
2425 = HITRATE (PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY));
2426 return gimple_call_arg (def, 1);
2427 }
2428 return NULL;
2429 }
2430
2431 if (DECL_IS_MALLOC (decl) || DECL_IS_OPERATOR_NEW (decl))
2432 {
2433 if (predictor)
2434 *predictor = PRED_MALLOC_NONNULL;
2435 return boolean_true_node;
2436 }
2437
2438 if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
2439 switch (DECL_FUNCTION_CODE (decl))
2440 {
2441 case BUILT_IN_EXPECT:
2442 {
2443 tree val;
2444 if (gimple_call_num_args (def) != 2)
2445 return NULL;
2446 val = gimple_call_arg (def, 0);
2447 if (TREE_CONSTANT (val))
2448 return val;
2449 *predictor = PRED_BUILTIN_EXPECT;
2450 *probability
2451 = HITRATE (PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY));
2452 return gimple_call_arg (def, 1);
2453 }
2454 case BUILT_IN_EXPECT_WITH_PROBABILITY:
2455 {
2456 tree val;
2457 if (gimple_call_num_args (def) != 3)
2458 return NULL;
2459 val = gimple_call_arg (def, 0);
2460 if (TREE_CONSTANT (val))
2461 return val;
2462 /* Compute final probability as:
2463 probability * REG_BR_PROB_BASE. */
2464 tree prob = gimple_call_arg (def, 2);
2465 tree t = TREE_TYPE (prob);
2466 tree base = build_int_cst (integer_type_node,
2467 REG_BR_PROB_BASE);
2468 base = build_real_from_int_cst (t, base);
2469 tree r = fold_build2_initializer_loc (UNKNOWN_LOCATION,
2470 MULT_EXPR, t, prob, base);
2471 if (TREE_CODE (r) != REAL_CST)
2472 {
2473 error_at (gimple_location (def),
2474 "probability %qE must be "
2475 "constant floating-point expression", prob);
2476 return NULL;
2477 }
2478 HOST_WIDE_INT probi
2479 = real_to_integer (TREE_REAL_CST_PTR (r));
2480 if (probi >= 0 && probi <= REG_BR_PROB_BASE)
2481 {
2482 *predictor = PRED_BUILTIN_EXPECT_WITH_PROBABILITY;
2483 *probability = probi;
2484 }
2485 else
2486 error_at (gimple_location (def),
2487 "probability %qE is outside "
2488 "the range [0.0, 1.0]", prob);
2489
2490 return gimple_call_arg (def, 1);
2491 }
2492
2493 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N:
2494 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1:
2495 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2:
2496 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4:
2497 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8:
2498 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16:
2499 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE:
2500 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N:
2501 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1:
2502 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2:
2503 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4:
2504 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8:
2505 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16:
2506 /* Assume that any given atomic operation has low contention,
2507 and thus the compare-and-swap operation succeeds. */
2508 *predictor = PRED_COMPARE_AND_SWAP;
2509 return boolean_true_node;
2510 case BUILT_IN_REALLOC:
2511 if (predictor)
2512 *predictor = PRED_MALLOC_NONNULL;
2513 return boolean_true_node;
2514 default:
2515 break;
2516 }
2517 }
2518
2519 return NULL;
2520 }
2521
2522 if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
2523 {
2524 tree res;
2525 enum br_predictor predictor2;
2526 HOST_WIDE_INT probability2;
2527 op0 = expr_expected_value (op0, visited, predictor, probability);
2528 if (!op0)
2529 return NULL;
2530 op1 = expr_expected_value (op1, visited, &predictor2, &probability2);
2531 if (!op1)
2532 return NULL;
2533 res = fold_build2 (code, type, op0, op1);
2534 if (TREE_CODE (res) == INTEGER_CST
2535 && TREE_CODE (op0) == INTEGER_CST
2536 && TREE_CODE (op1) == INTEGER_CST)
2537 {
2538 /* Combine binary predictions. */
2539 if (*probability != -1 || probability2 != -1)
2540 {
2541 HOST_WIDE_INT p1 = get_predictor_value (*predictor, *probability);
2542 HOST_WIDE_INT p2 = get_predictor_value (predictor2, probability2);
2543 *probability = RDIV (p1 * p2, REG_BR_PROB_BASE);
2544 }
2545
2546 if (*predictor < predictor2)
2547 *predictor = predictor2;
2548
2549 return res;
2550 }
2551 return NULL;
2552 }
2553 if (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS)
2554 {
2555 tree res;
2556 op0 = expr_expected_value (op0, visited, predictor, probability);
2557 if (!op0)
2558 return NULL;
2559 res = fold_build1 (code, type, op0);
2560 if (TREE_CONSTANT (res))
2561 return res;
2562 return NULL;
2563 }
2564 return NULL;
2565 }
2566
2567 /* Return constant EXPR will likely have at execution time, NULL if unknown.
2568 The function is used by builtin_expect branch predictor so the evidence
2569 must come from this construct and additional possible constant folding.
2570
2571 We may want to implement more involved value guess (such as value range
2572 propagation based prediction), but such tricks shall go to new
2573 implementation. */
2574
2575 static tree
2576 expr_expected_value (tree expr, bitmap visited,
2577 enum br_predictor *predictor,
2578 HOST_WIDE_INT *probability)
2579 {
2580 enum tree_code code;
2581 tree op0, op1;
2582
2583 if (TREE_CONSTANT (expr))
2584 {
2585 *predictor = PRED_UNCONDITIONAL;
2586 *probability = -1;
2587 return expr;
2588 }
2589
2590 extract_ops_from_tree (expr, &code, &op0, &op1);
2591 return expr_expected_value_1 (TREE_TYPE (expr),
2592 op0, code, op1, visited, predictor,
2593 probability);
2594 }
2595 \f
2596
2597 /* Return probability of a PREDICTOR. If the predictor has variable
2598 probability return passed PROBABILITY. */
2599
2600 static HOST_WIDE_INT
2601 get_predictor_value (br_predictor predictor, HOST_WIDE_INT probability)
2602 {
2603 switch (predictor)
2604 {
2605 case PRED_BUILTIN_EXPECT:
2606 case PRED_BUILTIN_EXPECT_WITH_PROBABILITY:
2607 gcc_assert (probability != -1);
2608 return probability;
2609 default:
2610 gcc_assert (probability == -1);
2611 return predictor_info[(int) predictor].hitrate;
2612 }
2613 }
2614
2615 /* Predict using opcode of the last statement in basic block. */
2616 static void
2617 tree_predict_by_opcode (basic_block bb)
2618 {
2619 gimple *stmt = last_stmt (bb);
2620 edge then_edge;
2621 tree op0, op1;
2622 tree type;
2623 tree val;
2624 enum tree_code cmp;
2625 edge_iterator ei;
2626 enum br_predictor predictor;
2627 HOST_WIDE_INT probability;
2628
2629 if (!stmt)
2630 return;
2631
2632 if (gswitch *sw = dyn_cast <gswitch *> (stmt))
2633 {
2634 tree index = gimple_switch_index (sw);
2635 tree val = expr_expected_value (index, auto_bitmap (),
2636 &predictor, &probability);
2637 if (val && TREE_CODE (val) == INTEGER_CST)
2638 {
2639 edge e = find_taken_edge_switch_expr (sw, val);
2640 if (predictor == PRED_BUILTIN_EXPECT)
2641 {
2642 int percent = PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY);
2643 gcc_assert (percent >= 0 && percent <= 100);
2644 predict_edge (e, PRED_BUILTIN_EXPECT,
2645 HITRATE (percent));
2646 }
2647 else
2648 predict_edge_def (e, predictor, TAKEN);
2649 }
2650 }
2651
2652 if (gimple_code (stmt) != GIMPLE_COND)
2653 return;
2654 FOR_EACH_EDGE (then_edge, ei, bb->succs)
2655 if (then_edge->flags & EDGE_TRUE_VALUE)
2656 break;
2657 op0 = gimple_cond_lhs (stmt);
2658 op1 = gimple_cond_rhs (stmt);
2659 cmp = gimple_cond_code (stmt);
2660 type = TREE_TYPE (op0);
2661 val = expr_expected_value_1 (boolean_type_node, op0, cmp, op1, auto_bitmap (),
2662 &predictor, &probability);
2663 if (val && TREE_CODE (val) == INTEGER_CST)
2664 {
2665 HOST_WIDE_INT prob = get_predictor_value (predictor, probability);
2666 if (integer_zerop (val))
2667 prob = REG_BR_PROB_BASE - prob;
2668 predict_edge (then_edge, predictor, prob);
2669 }
2670 /* Try "pointer heuristic."
2671 A comparison ptr == 0 is predicted as false.
2672 Similarly, a comparison ptr1 == ptr2 is predicted as false. */
2673 if (POINTER_TYPE_P (type))
2674 {
2675 if (cmp == EQ_EXPR)
2676 predict_edge_def (then_edge, PRED_TREE_POINTER, NOT_TAKEN);
2677 else if (cmp == NE_EXPR)
2678 predict_edge_def (then_edge, PRED_TREE_POINTER, TAKEN);
2679 }
2680 else
2681
2682 /* Try "opcode heuristic."
2683 EQ tests are usually false and NE tests are usually true. Also,
2684 most quantities are positive, so we can make the appropriate guesses
2685 about signed comparisons against zero. */
2686 switch (cmp)
2687 {
2688 case EQ_EXPR:
2689 case UNEQ_EXPR:
2690 /* Floating point comparisons appears to behave in a very
2691 unpredictable way because of special role of = tests in
2692 FP code. */
2693 if (FLOAT_TYPE_P (type))
2694 ;
2695 /* Comparisons with 0 are often used for booleans and there is
2696 nothing useful to predict about them. */
2697 else if (integer_zerop (op0) || integer_zerop (op1))
2698 ;
2699 else
2700 predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, NOT_TAKEN);
2701 break;
2702
2703 case NE_EXPR:
2704 case LTGT_EXPR:
2705 /* Floating point comparisons appears to behave in a very
2706 unpredictable way because of special role of = tests in
2707 FP code. */
2708 if (FLOAT_TYPE_P (type))
2709 ;
2710 /* Comparisons with 0 are often used for booleans and there is
2711 nothing useful to predict about them. */
2712 else if (integer_zerop (op0)
2713 || integer_zerop (op1))
2714 ;
2715 else
2716 predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, TAKEN);
2717 break;
2718
2719 case ORDERED_EXPR:
2720 predict_edge_def (then_edge, PRED_TREE_FPOPCODE, TAKEN);
2721 break;
2722
2723 case UNORDERED_EXPR:
2724 predict_edge_def (then_edge, PRED_TREE_FPOPCODE, NOT_TAKEN);
2725 break;
2726
2727 case LE_EXPR:
2728 case LT_EXPR:
2729 if (integer_zerop (op1)
2730 || integer_onep (op1)
2731 || integer_all_onesp (op1)
2732 || real_zerop (op1)
2733 || real_onep (op1)
2734 || real_minus_onep (op1))
2735 predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, NOT_TAKEN);
2736 break;
2737
2738 case GE_EXPR:
2739 case GT_EXPR:
2740 if (integer_zerop (op1)
2741 || integer_onep (op1)
2742 || integer_all_onesp (op1)
2743 || real_zerop (op1)
2744 || real_onep (op1)
2745 || real_minus_onep (op1))
2746 predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, TAKEN);
2747 break;
2748
2749 default:
2750 break;
2751 }
2752 }
2753
2754 /* Returns TRUE if the STMT is exit(0) like statement. */
2755
2756 static bool
2757 is_exit_with_zero_arg (const gimple *stmt)
2758 {
2759 /* This is not exit, _exit or _Exit. */
2760 if (!gimple_call_builtin_p (stmt, BUILT_IN_EXIT)
2761 && !gimple_call_builtin_p (stmt, BUILT_IN__EXIT)
2762 && !gimple_call_builtin_p (stmt, BUILT_IN__EXIT2))
2763 return false;
2764
2765 /* Argument is an interger zero. */
2766 return integer_zerop (gimple_call_arg (stmt, 0));
2767 }
2768
2769 /* Try to guess whether the value of return means error code. */
2770
2771 static enum br_predictor
2772 return_prediction (tree val, enum prediction *prediction)
2773 {
2774 /* VOID. */
2775 if (!val)
2776 return PRED_NO_PREDICTION;
2777 /* Different heuristics for pointers and scalars. */
2778 if (POINTER_TYPE_P (TREE_TYPE (val)))
2779 {
2780 /* NULL is usually not returned. */
2781 if (integer_zerop (val))
2782 {
2783 *prediction = NOT_TAKEN;
2784 return PRED_NULL_RETURN;
2785 }
2786 }
2787 else if (INTEGRAL_TYPE_P (TREE_TYPE (val)))
2788 {
2789 /* Negative return values are often used to indicate
2790 errors. */
2791 if (TREE_CODE (val) == INTEGER_CST
2792 && tree_int_cst_sgn (val) < 0)
2793 {
2794 *prediction = NOT_TAKEN;
2795 return PRED_NEGATIVE_RETURN;
2796 }
2797 /* Constant return values seems to be commonly taken.
2798 Zero/one often represent booleans so exclude them from the
2799 heuristics. */
2800 if (TREE_CONSTANT (val)
2801 && (!integer_zerop (val) && !integer_onep (val)))
2802 {
2803 *prediction = NOT_TAKEN;
2804 return PRED_CONST_RETURN;
2805 }
2806 }
2807 return PRED_NO_PREDICTION;
2808 }
2809
2810 /* Return zero if phi result could have values other than -1, 0 or 1,
2811 otherwise return a bitmask, with bits 0, 1 and 2 set if -1, 0 and 1
2812 values are used or likely. */
2813
2814 static int
2815 zero_one_minusone (gphi *phi, int limit)
2816 {
2817 int phi_num_args = gimple_phi_num_args (phi);
2818 int ret = 0;
2819 for (int i = 0; i < phi_num_args; i++)
2820 {
2821 tree t = PHI_ARG_DEF (phi, i);
2822 if (TREE_CODE (t) != INTEGER_CST)
2823 continue;
2824 wide_int w = wi::to_wide (t);
2825 if (w == -1)
2826 ret |= 1;
2827 else if (w == 0)
2828 ret |= 2;
2829 else if (w == 1)
2830 ret |= 4;
2831 else
2832 return 0;
2833 }
2834 for (int i = 0; i < phi_num_args; i++)
2835 {
2836 tree t = PHI_ARG_DEF (phi, i);
2837 if (TREE_CODE (t) == INTEGER_CST)
2838 continue;
2839 if (TREE_CODE (t) != SSA_NAME)
2840 return 0;
2841 gimple *g = SSA_NAME_DEF_STMT (t);
2842 if (gimple_code (g) == GIMPLE_PHI && limit > 0)
2843 if (int r = zero_one_minusone (as_a <gphi *> (g), limit - 1))
2844 {
2845 ret |= r;
2846 continue;
2847 }
2848 if (!is_gimple_assign (g))
2849 return 0;
2850 if (gimple_assign_cast_p (g))
2851 {
2852 tree rhs1 = gimple_assign_rhs1 (g);
2853 if (TREE_CODE (rhs1) != SSA_NAME
2854 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
2855 || TYPE_PRECISION (TREE_TYPE (rhs1)) != 1
2856 || !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
2857 return 0;
2858 ret |= (2 | 4);
2859 continue;
2860 }
2861 if (TREE_CODE_CLASS (gimple_assign_rhs_code (g)) != tcc_comparison)
2862 return 0;
2863 ret |= (2 | 4);
2864 }
2865 return ret;
2866 }
2867
2868 /* Find the basic block with return expression and look up for possible
2869 return value trying to apply RETURN_PREDICTION heuristics. */
2870 static void
2871 apply_return_prediction (void)
2872 {
2873 greturn *return_stmt = NULL;
2874 tree return_val;
2875 edge e;
2876 gphi *phi;
2877 int phi_num_args, i;
2878 enum br_predictor pred;
2879 enum prediction direction;
2880 edge_iterator ei;
2881
2882 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
2883 {
2884 gimple *last = last_stmt (e->src);
2885 if (last
2886 && gimple_code (last) == GIMPLE_RETURN)
2887 {
2888 return_stmt = as_a <greturn *> (last);
2889 break;
2890 }
2891 }
2892 if (!e)
2893 return;
2894 return_val = gimple_return_retval (return_stmt);
2895 if (!return_val)
2896 return;
2897 if (TREE_CODE (return_val) != SSA_NAME
2898 || !SSA_NAME_DEF_STMT (return_val)
2899 || gimple_code (SSA_NAME_DEF_STMT (return_val)) != GIMPLE_PHI)
2900 return;
2901 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (return_val));
2902 phi_num_args = gimple_phi_num_args (phi);
2903 pred = return_prediction (PHI_ARG_DEF (phi, 0), &direction);
2904
2905 /* Avoid the case where the function returns -1, 0 and 1 values and
2906 nothing else. Those could be qsort etc. comparison functions
2907 where the negative return isn't less probable than positive.
2908 For this require that the function returns at least -1 or 1
2909 or -1 and a boolean value or comparison result, so that functions
2910 returning just -1 and 0 are treated as if -1 represents error value. */
2911 if (INTEGRAL_TYPE_P (TREE_TYPE (return_val))
2912 && !TYPE_UNSIGNED (TREE_TYPE (return_val))
2913 && TYPE_PRECISION (TREE_TYPE (return_val)) > 1)
2914 if (int r = zero_one_minusone (phi, 3))
2915 if ((r & (1 | 4)) == (1 | 4))
2916 return;
2917
2918 /* Avoid the degenerate case where all return values form the function
2919 belongs to same category (ie they are all positive constants)
2920 so we can hardly say something about them. */
2921 for (i = 1; i < phi_num_args; i++)
2922 if (pred != return_prediction (PHI_ARG_DEF (phi, i), &direction))
2923 break;
2924 if (i != phi_num_args)
2925 for (i = 0; i < phi_num_args; i++)
2926 {
2927 pred = return_prediction (PHI_ARG_DEF (phi, i), &direction);
2928 if (pred != PRED_NO_PREDICTION)
2929 predict_paths_leading_to_edge (gimple_phi_arg_edge (phi, i), pred,
2930 direction);
2931 }
2932 }
2933
2934 /* Look for basic block that contains unlikely to happen events
2935 (such as noreturn calls) and mark all paths leading to execution
2936 of this basic blocks as unlikely. */
2937
2938 static void
2939 tree_bb_level_predictions (void)
2940 {
2941 basic_block bb;
2942 bool has_return_edges = false;
2943 edge e;
2944 edge_iterator ei;
2945
2946 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
2947 if (!unlikely_executed_edge_p (e) && !(e->flags & EDGE_ABNORMAL_CALL))
2948 {
2949 has_return_edges = true;
2950 break;
2951 }
2952
2953 apply_return_prediction ();
2954
2955 FOR_EACH_BB_FN (bb, cfun)
2956 {
2957 gimple_stmt_iterator gsi;
2958
2959 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2960 {
2961 gimple *stmt = gsi_stmt (gsi);
2962 tree decl;
2963
2964 if (is_gimple_call (stmt))
2965 {
2966 if (gimple_call_noreturn_p (stmt)
2967 && has_return_edges
2968 && !is_exit_with_zero_arg (stmt))
2969 predict_paths_leading_to (bb, PRED_NORETURN,
2970 NOT_TAKEN);
2971 decl = gimple_call_fndecl (stmt);
2972 if (decl
2973 && lookup_attribute ("cold",
2974 DECL_ATTRIBUTES (decl)))
2975 predict_paths_leading_to (bb, PRED_COLD_FUNCTION,
2976 NOT_TAKEN);
2977 if (decl && recursive_call_p (current_function_decl, decl))
2978 predict_paths_leading_to (bb, PRED_RECURSIVE_CALL,
2979 NOT_TAKEN);
2980 }
2981 else if (gimple_code (stmt) == GIMPLE_PREDICT)
2982 {
2983 predict_paths_leading_to (bb, gimple_predict_predictor (stmt),
2984 gimple_predict_outcome (stmt));
2985 /* Keep GIMPLE_PREDICT around so early inlining will propagate
2986 hints to callers. */
2987 }
2988 }
2989 }
2990 }
2991
2992 /* Callback for hash_map::traverse, asserts that the pointer map is
2993 empty. */
2994
2995 bool
2996 assert_is_empty (const_basic_block const &, edge_prediction *const &value,
2997 void *)
2998 {
2999 gcc_assert (!value);
3000 return false;
3001 }
3002
3003 /* Predict branch probabilities and estimate profile for basic block BB.
3004 When LOCAL_ONLY is set do not use any global properties of CFG. */
3005
3006 static void
3007 tree_estimate_probability_bb (basic_block bb, bool local_only)
3008 {
3009 edge e;
3010 edge_iterator ei;
3011
3012 FOR_EACH_EDGE (e, ei, bb->succs)
3013 {
3014 /* Look for block we are guarding (ie we dominate it,
3015 but it doesn't postdominate us). */
3016 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && e->dest != bb
3017 && !local_only
3018 && dominated_by_p (CDI_DOMINATORS, e->dest, e->src)
3019 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest))
3020 {
3021 gimple_stmt_iterator bi;
3022
3023 /* The call heuristic claims that a guarded function call
3024 is improbable. This is because such calls are often used
3025 to signal exceptional situations such as printing error
3026 messages. */
3027 for (bi = gsi_start_bb (e->dest); !gsi_end_p (bi);
3028 gsi_next (&bi))
3029 {
3030 gimple *stmt = gsi_stmt (bi);
3031 if (is_gimple_call (stmt)
3032 && !gimple_inexpensive_call_p (as_a <gcall *> (stmt))
3033 /* Constant and pure calls are hardly used to signalize
3034 something exceptional. */
3035 && gimple_has_side_effects (stmt))
3036 {
3037 if (gimple_call_fndecl (stmt))
3038 predict_edge_def (e, PRED_CALL, NOT_TAKEN);
3039 else if (virtual_method_call_p (gimple_call_fn (stmt)))
3040 predict_edge_def (e, PRED_POLYMORPHIC_CALL, NOT_TAKEN);
3041 else
3042 predict_edge_def (e, PRED_INDIR_CALL, TAKEN);
3043 break;
3044 }
3045 }
3046 }
3047 }
3048 tree_predict_by_opcode (bb);
3049 }
3050
3051 /* Predict branch probabilities and estimate profile of the tree CFG.
3052 This function can be called from the loop optimizers to recompute
3053 the profile information.
3054 If DRY_RUN is set, do not modify CFG and only produce dump files. */
3055
3056 void
3057 tree_estimate_probability (bool dry_run)
3058 {
3059 basic_block bb;
3060
3061 add_noreturn_fake_exit_edges ();
3062 connect_infinite_loops_to_exit ();
3063 /* We use loop_niter_by_eval, which requires that the loops have
3064 preheaders. */
3065 create_preheaders (CP_SIMPLE_PREHEADERS);
3066 calculate_dominance_info (CDI_POST_DOMINATORS);
3067 /* Decide which edges are known to be unlikely. This improves later
3068 branch prediction. */
3069 determine_unlikely_bbs ();
3070
3071 bb_predictions = new hash_map<const_basic_block, edge_prediction *>;
3072 tree_bb_level_predictions ();
3073 record_loop_exits ();
3074
3075 if (number_of_loops (cfun) > 1)
3076 predict_loops ();
3077
3078 FOR_EACH_BB_FN (bb, cfun)
3079 tree_estimate_probability_bb (bb, false);
3080
3081 FOR_EACH_BB_FN (bb, cfun)
3082 combine_predictions_for_bb (bb, dry_run);
3083
3084 if (flag_checking)
3085 bb_predictions->traverse<void *, assert_is_empty> (NULL);
3086
3087 delete bb_predictions;
3088 bb_predictions = NULL;
3089
3090 if (!dry_run)
3091 estimate_bb_frequencies (false);
3092 free_dominance_info (CDI_POST_DOMINATORS);
3093 remove_fake_exit_edges ();
3094 }
3095
3096 /* Set edge->probability for each successor edge of BB. */
3097 void
3098 tree_guess_outgoing_edge_probabilities (basic_block bb)
3099 {
3100 bb_predictions = new hash_map<const_basic_block, edge_prediction *>;
3101 tree_estimate_probability_bb (bb, true);
3102 combine_predictions_for_bb (bb, false);
3103 if (flag_checking)
3104 bb_predictions->traverse<void *, assert_is_empty> (NULL);
3105 delete bb_predictions;
3106 bb_predictions = NULL;
3107 }
3108 \f
3109 /* Predict edges to successors of CUR whose sources are not postdominated by
3110 BB by PRED and recurse to all postdominators. */
3111
3112 static void
3113 predict_paths_for_bb (basic_block cur, basic_block bb,
3114 enum br_predictor pred,
3115 enum prediction taken,
3116 bitmap visited, struct loop *in_loop = NULL)
3117 {
3118 edge e;
3119 edge_iterator ei;
3120 basic_block son;
3121
3122 /* If we exited the loop or CUR is unconditional in the loop, there is
3123 nothing to do. */
3124 if (in_loop
3125 && (!flow_bb_inside_loop_p (in_loop, cur)
3126 || dominated_by_p (CDI_DOMINATORS, in_loop->latch, cur)))
3127 return;
3128
3129 /* We are looking for all edges forming edge cut induced by
3130 set of all blocks postdominated by BB. */
3131 FOR_EACH_EDGE (e, ei, cur->preds)
3132 if (e->src->index >= NUM_FIXED_BLOCKS
3133 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, bb))
3134 {
3135 edge e2;
3136 edge_iterator ei2;
3137 bool found = false;
3138
3139 /* Ignore fake edges and eh, we predict them as not taken anyway. */
3140 if (unlikely_executed_edge_p (e))
3141 continue;
3142 gcc_assert (bb == cur || dominated_by_p (CDI_POST_DOMINATORS, cur, bb));
3143
3144 /* See if there is an edge from e->src that is not abnormal
3145 and does not lead to BB and does not exit the loop. */
3146 FOR_EACH_EDGE (e2, ei2, e->src->succs)
3147 if (e2 != e
3148 && !unlikely_executed_edge_p (e2)
3149 && !dominated_by_p (CDI_POST_DOMINATORS, e2->dest, bb)
3150 && (!in_loop || !loop_exit_edge_p (in_loop, e2)))
3151 {
3152 found = true;
3153 break;
3154 }
3155
3156 /* If there is non-abnormal path leaving e->src, predict edge
3157 using predictor. Otherwise we need to look for paths
3158 leading to e->src.
3159
3160 The second may lead to infinite loop in the case we are predicitng
3161 regions that are only reachable by abnormal edges. We simply
3162 prevent visiting given BB twice. */
3163 if (found)
3164 {
3165 if (!edge_predicted_by_p (e, pred, taken))
3166 predict_edge_def (e, pred, taken);
3167 }
3168 else if (bitmap_set_bit (visited, e->src->index))
3169 predict_paths_for_bb (e->src, e->src, pred, taken, visited, in_loop);
3170 }
3171 for (son = first_dom_son (CDI_POST_DOMINATORS, cur);
3172 son;
3173 son = next_dom_son (CDI_POST_DOMINATORS, son))
3174 predict_paths_for_bb (son, bb, pred, taken, visited, in_loop);
3175 }
3176
3177 /* Sets branch probabilities according to PREDiction and
3178 FLAGS. */
3179
3180 static void
3181 predict_paths_leading_to (basic_block bb, enum br_predictor pred,
3182 enum prediction taken, struct loop *in_loop)
3183 {
3184 predict_paths_for_bb (bb, bb, pred, taken, auto_bitmap (), in_loop);
3185 }
3186
3187 /* Like predict_paths_leading_to but take edge instead of basic block. */
3188
3189 static void
3190 predict_paths_leading_to_edge (edge e, enum br_predictor pred,
3191 enum prediction taken, struct loop *in_loop)
3192 {
3193 bool has_nonloop_edge = false;
3194 edge_iterator ei;
3195 edge e2;
3196
3197 basic_block bb = e->src;
3198 FOR_EACH_EDGE (e2, ei, bb->succs)
3199 if (e2->dest != e->src && e2->dest != e->dest
3200 && !unlikely_executed_edge_p (e)
3201 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e2->dest))
3202 {
3203 has_nonloop_edge = true;
3204 break;
3205 }
3206 if (!has_nonloop_edge)
3207 {
3208 predict_paths_for_bb (bb, bb, pred, taken, auto_bitmap (), in_loop);
3209 }
3210 else
3211 predict_edge_def (e, pred, taken);
3212 }
3213 \f
3214 /* This is used to carry information about basic blocks. It is
3215 attached to the AUX field of the standard CFG block. */
3216
3217 struct block_info
3218 {
3219 /* Estimated frequency of execution of basic_block. */
3220 sreal frequency;
3221
3222 /* To keep queue of basic blocks to process. */
3223 basic_block next;
3224
3225 /* Number of predecessors we need to visit first. */
3226 int npredecessors;
3227 };
3228
3229 /* Similar information for edges. */
3230 struct edge_prob_info
3231 {
3232 /* In case edge is a loopback edge, the probability edge will be reached
3233 in case header is. Estimated number of iterations of the loop can be
3234 then computed as 1 / (1 - back_edge_prob). */
3235 sreal back_edge_prob;
3236 /* True if the edge is a loopback edge in the natural loop. */
3237 unsigned int back_edge:1;
3238 };
3239
3240 #define BLOCK_INFO(B) ((block_info *) (B)->aux)
3241 #undef EDGE_INFO
3242 #define EDGE_INFO(E) ((edge_prob_info *) (E)->aux)
3243
3244 /* Helper function for estimate_bb_frequencies.
3245 Propagate the frequencies in blocks marked in
3246 TOVISIT, starting in HEAD. */
3247
3248 static void
3249 propagate_freq (basic_block head, bitmap tovisit)
3250 {
3251 basic_block bb;
3252 basic_block last;
3253 unsigned i;
3254 edge e;
3255 basic_block nextbb;
3256 bitmap_iterator bi;
3257
3258 /* For each basic block we need to visit count number of his predecessors
3259 we need to visit first. */
3260 EXECUTE_IF_SET_IN_BITMAP (tovisit, 0, i, bi)
3261 {
3262 edge_iterator ei;
3263 int count = 0;
3264
3265 bb = BASIC_BLOCK_FOR_FN (cfun, i);
3266
3267 FOR_EACH_EDGE (e, ei, bb->preds)
3268 {
3269 bool visit = bitmap_bit_p (tovisit, e->src->index);
3270
3271 if (visit && !(e->flags & EDGE_DFS_BACK))
3272 count++;
3273 else if (visit && dump_file && !EDGE_INFO (e)->back_edge)
3274 fprintf (dump_file,
3275 "Irreducible region hit, ignoring edge to %i->%i\n",
3276 e->src->index, bb->index);
3277 }
3278 BLOCK_INFO (bb)->npredecessors = count;
3279 /* When function never returns, we will never process exit block. */
3280 if (!count && bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
3281 bb->count = profile_count::zero ();
3282 }
3283
3284 BLOCK_INFO (head)->frequency = 1;
3285 last = head;
3286 for (bb = head; bb; bb = nextbb)
3287 {
3288 edge_iterator ei;
3289 sreal cyclic_probability = 0;
3290 sreal frequency = 0;
3291
3292 nextbb = BLOCK_INFO (bb)->next;
3293 BLOCK_INFO (bb)->next = NULL;
3294
3295 /* Compute frequency of basic block. */
3296 if (bb != head)
3297 {
3298 if (flag_checking)
3299 FOR_EACH_EDGE (e, ei, bb->preds)
3300 gcc_assert (!bitmap_bit_p (tovisit, e->src->index)
3301 || (e->flags & EDGE_DFS_BACK));
3302
3303 FOR_EACH_EDGE (e, ei, bb->preds)
3304 if (EDGE_INFO (e)->back_edge)
3305 {
3306 cyclic_probability += EDGE_INFO (e)->back_edge_prob;
3307 }
3308 else if (!(e->flags & EDGE_DFS_BACK))
3309 {
3310 /* frequency += (e->probability
3311 * BLOCK_INFO (e->src)->frequency /
3312 REG_BR_PROB_BASE); */
3313
3314 /* FIXME: Graphite is producing edges with no profile. Once
3315 this is fixed, drop this. */
3316 sreal tmp = e->probability.initialized_p () ?
3317 e->probability.to_reg_br_prob_base () : 0;
3318 tmp *= BLOCK_INFO (e->src)->frequency;
3319 tmp *= real_inv_br_prob_base;
3320 frequency += tmp;
3321 }
3322
3323 if (cyclic_probability == 0)
3324 {
3325 BLOCK_INFO (bb)->frequency = frequency;
3326 }
3327 else
3328 {
3329 if (cyclic_probability > real_almost_one)
3330 cyclic_probability = real_almost_one;
3331
3332 /* BLOCK_INFO (bb)->frequency = frequency
3333 / (1 - cyclic_probability) */
3334
3335 cyclic_probability = sreal (1) - cyclic_probability;
3336 BLOCK_INFO (bb)->frequency = frequency / cyclic_probability;
3337 }
3338 }
3339
3340 bitmap_clear_bit (tovisit, bb->index);
3341
3342 e = find_edge (bb, head);
3343 if (e)
3344 {
3345 /* EDGE_INFO (e)->back_edge_prob
3346 = ((e->probability * BLOCK_INFO (bb)->frequency)
3347 / REG_BR_PROB_BASE); */
3348
3349 /* FIXME: Graphite is producing edges with no profile. Once
3350 this is fixed, drop this. */
3351 sreal tmp = e->probability.initialized_p () ?
3352 e->probability.to_reg_br_prob_base () : 0;
3353 tmp *= BLOCK_INFO (bb)->frequency;
3354 EDGE_INFO (e)->back_edge_prob = tmp * real_inv_br_prob_base;
3355 }
3356
3357 /* Propagate to successor blocks. */
3358 FOR_EACH_EDGE (e, ei, bb->succs)
3359 if (!(e->flags & EDGE_DFS_BACK)
3360 && BLOCK_INFO (e->dest)->npredecessors)
3361 {
3362 BLOCK_INFO (e->dest)->npredecessors--;
3363 if (!BLOCK_INFO (e->dest)->npredecessors)
3364 {
3365 if (!nextbb)
3366 nextbb = e->dest;
3367 else
3368 BLOCK_INFO (last)->next = e->dest;
3369
3370 last = e->dest;
3371 }
3372 }
3373 }
3374 }
3375
3376 /* Estimate frequencies in loops at same nest level. */
3377
3378 static void
3379 estimate_loops_at_level (struct loop *first_loop)
3380 {
3381 struct loop *loop;
3382
3383 for (loop = first_loop; loop; loop = loop->next)
3384 {
3385 edge e;
3386 basic_block *bbs;
3387 unsigned i;
3388 auto_bitmap tovisit;
3389
3390 estimate_loops_at_level (loop->inner);
3391
3392 /* Find current loop back edge and mark it. */
3393 e = loop_latch_edge (loop);
3394 EDGE_INFO (e)->back_edge = 1;
3395
3396 bbs = get_loop_body (loop);
3397 for (i = 0; i < loop->num_nodes; i++)
3398 bitmap_set_bit (tovisit, bbs[i]->index);
3399 free (bbs);
3400 propagate_freq (loop->header, tovisit);
3401 }
3402 }
3403
3404 /* Propagates frequencies through structure of loops. */
3405
3406 static void
3407 estimate_loops (void)
3408 {
3409 auto_bitmap tovisit;
3410 basic_block bb;
3411
3412 /* Start by estimating the frequencies in the loops. */
3413 if (number_of_loops (cfun) > 1)
3414 estimate_loops_at_level (current_loops->tree_root->inner);
3415
3416 /* Now propagate the frequencies through all the blocks. */
3417 FOR_ALL_BB_FN (bb, cfun)
3418 {
3419 bitmap_set_bit (tovisit, bb->index);
3420 }
3421 propagate_freq (ENTRY_BLOCK_PTR_FOR_FN (cfun), tovisit);
3422 }
3423
3424 /* Drop the profile for NODE to guessed, and update its frequency based on
3425 whether it is expected to be hot given the CALL_COUNT. */
3426
3427 static void
3428 drop_profile (struct cgraph_node *node, profile_count call_count)
3429 {
3430 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
3431 /* In the case where this was called by another function with a
3432 dropped profile, call_count will be 0. Since there are no
3433 non-zero call counts to this function, we don't know for sure
3434 whether it is hot, and therefore it will be marked normal below. */
3435 bool hot = maybe_hot_count_p (NULL, call_count);
3436
3437 if (dump_file)
3438 fprintf (dump_file,
3439 "Dropping 0 profile for %s. %s based on calls.\n",
3440 node->dump_name (),
3441 hot ? "Function is hot" : "Function is normal");
3442 /* We only expect to miss profiles for functions that are reached
3443 via non-zero call edges in cases where the function may have
3444 been linked from another module or library (COMDATs and extern
3445 templates). See the comments below for handle_missing_profiles.
3446 Also, only warn in cases where the missing counts exceed the
3447 number of training runs. In certain cases with an execv followed
3448 by a no-return call the profile for the no-return call is not
3449 dumped and there can be a mismatch. */
3450 if (!DECL_COMDAT (node->decl) && !DECL_EXTERNAL (node->decl)
3451 && call_count > profile_info->runs)
3452 {
3453 if (flag_profile_correction)
3454 {
3455 if (dump_file)
3456 fprintf (dump_file,
3457 "Missing counts for called function %s\n",
3458 node->dump_name ());
3459 }
3460 else
3461 warning (0, "Missing counts for called function %s",
3462 node->dump_name ());
3463 }
3464
3465 basic_block bb;
3466 if (opt_for_fn (node->decl, flag_guess_branch_prob))
3467 {
3468 bool clear_zeros
3469 = !ENTRY_BLOCK_PTR_FOR_FN (fn)->count.nonzero_p ();
3470 FOR_ALL_BB_FN (bb, fn)
3471 if (clear_zeros || !(bb->count == profile_count::zero ()))
3472 bb->count = bb->count.guessed_local ();
3473 fn->cfg->count_max = fn->cfg->count_max.guessed_local ();
3474 }
3475 else
3476 {
3477 FOR_ALL_BB_FN (bb, fn)
3478 bb->count = profile_count::uninitialized ();
3479 fn->cfg->count_max = profile_count::uninitialized ();
3480 }
3481
3482 struct cgraph_edge *e;
3483 for (e = node->callees; e; e = e->next_callee)
3484 e->count = gimple_bb (e->call_stmt)->count;
3485 for (e = node->indirect_calls; e; e = e->next_callee)
3486 e->count = gimple_bb (e->call_stmt)->count;
3487 node->count = ENTRY_BLOCK_PTR_FOR_FN (fn)->count;
3488
3489 profile_status_for_fn (fn)
3490 = (flag_guess_branch_prob ? PROFILE_GUESSED : PROFILE_ABSENT);
3491 node->frequency
3492 = hot ? NODE_FREQUENCY_HOT : NODE_FREQUENCY_NORMAL;
3493 }
3494
3495 /* In the case of COMDAT routines, multiple object files will contain the same
3496 function and the linker will select one for the binary. In that case
3497 all the other copies from the profile instrument binary will be missing
3498 profile counts. Look for cases where this happened, due to non-zero
3499 call counts going to 0-count functions, and drop the profile to guessed
3500 so that we can use the estimated probabilities and avoid optimizing only
3501 for size.
3502
3503 The other case where the profile may be missing is when the routine
3504 is not going to be emitted to the object file, e.g. for "extern template"
3505 class methods. Those will be marked DECL_EXTERNAL. Emit a warning in
3506 all other cases of non-zero calls to 0-count functions. */
3507
3508 void
3509 handle_missing_profiles (void)
3510 {
3511 struct cgraph_node *node;
3512 int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
3513 auto_vec<struct cgraph_node *, 64> worklist;
3514
3515 /* See if 0 count function has non-0 count callers. In this case we
3516 lost some profile. Drop its function profile to PROFILE_GUESSED. */
3517 FOR_EACH_DEFINED_FUNCTION (node)
3518 {
3519 struct cgraph_edge *e;
3520 profile_count call_count = profile_count::zero ();
3521 gcov_type max_tp_first_run = 0;
3522 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
3523
3524 if (node->count.ipa ().nonzero_p ())
3525 continue;
3526 for (e = node->callers; e; e = e->next_caller)
3527 if (e->count.ipa ().initialized_p () && e->count.ipa () > 0)
3528 {
3529 call_count = call_count + e->count.ipa ();
3530
3531 if (e->caller->tp_first_run > max_tp_first_run)
3532 max_tp_first_run = e->caller->tp_first_run;
3533 }
3534
3535 /* If time profile is missing, let assign the maximum that comes from
3536 caller functions. */
3537 if (!node->tp_first_run && max_tp_first_run)
3538 node->tp_first_run = max_tp_first_run + 1;
3539
3540 if (call_count > 0
3541 && fn && fn->cfg
3542 && (call_count.apply_scale (unlikely_count_fraction, 1)
3543 >= profile_info->runs))
3544 {
3545 drop_profile (node, call_count);
3546 worklist.safe_push (node);
3547 }
3548 }
3549
3550 /* Propagate the profile dropping to other 0-count COMDATs that are
3551 potentially called by COMDATs we already dropped the profile on. */
3552 while (worklist.length () > 0)
3553 {
3554 struct cgraph_edge *e;
3555
3556 node = worklist.pop ();
3557 for (e = node->callees; e; e = e->next_caller)
3558 {
3559 struct cgraph_node *callee = e->callee;
3560 struct function *fn = DECL_STRUCT_FUNCTION (callee->decl);
3561
3562 if (!(e->count.ipa () == profile_count::zero ())
3563 && callee->count.ipa ().nonzero_p ())
3564 continue;
3565 if ((DECL_COMDAT (callee->decl) || DECL_EXTERNAL (callee->decl))
3566 && fn && fn->cfg
3567 && profile_status_for_fn (fn) == PROFILE_READ)
3568 {
3569 drop_profile (node, profile_count::zero ());
3570 worklist.safe_push (callee);
3571 }
3572 }
3573 }
3574 }
3575
3576 /* Convert counts measured by profile driven feedback to frequencies.
3577 Return nonzero iff there was any nonzero execution count. */
3578
3579 bool
3580 update_max_bb_count (void)
3581 {
3582 profile_count true_count_max = profile_count::uninitialized ();
3583 basic_block bb;
3584
3585 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3586 true_count_max = true_count_max.max (bb->count);
3587
3588 cfun->cfg->count_max = true_count_max;
3589
3590 return true_count_max.ipa ().nonzero_p ();
3591 }
3592
3593 /* Return true if function is likely to be expensive, so there is no point to
3594 optimize performance of prologue, epilogue or do inlining at the expense
3595 of code size growth. THRESHOLD is the limit of number of instructions
3596 function can execute at average to be still considered not expensive. */
3597
3598 bool
3599 expensive_function_p (int threshold)
3600 {
3601 basic_block bb;
3602
3603 /* If profile was scaled in a way entry block has count 0, then the function
3604 is deifnitly taking a lot of time. */
3605 if (!ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.nonzero_p ())
3606 return true;
3607
3608 profile_count limit = ENTRY_BLOCK_PTR_FOR_FN
3609 (cfun)->count.apply_scale (threshold, 1);
3610 profile_count sum = profile_count::zero ();
3611 FOR_EACH_BB_FN (bb, cfun)
3612 {
3613 rtx_insn *insn;
3614
3615 if (!bb->count.initialized_p ())
3616 {
3617 if (dump_file)
3618 fprintf (dump_file, "Function is considered expensive because"
3619 " count of bb %i is not initialized\n", bb->index);
3620 return true;
3621 }
3622
3623 FOR_BB_INSNS (bb, insn)
3624 if (active_insn_p (insn))
3625 {
3626 sum += bb->count;
3627 if (sum > limit)
3628 return true;
3629 }
3630 }
3631
3632 return false;
3633 }
3634
3635 /* All basic blocks that are reachable only from unlikely basic blocks are
3636 unlikely. */
3637
3638 void
3639 propagate_unlikely_bbs_forward (void)
3640 {
3641 auto_vec<basic_block, 64> worklist;
3642 basic_block bb;
3643 edge_iterator ei;
3644 edge e;
3645
3646 if (!(ENTRY_BLOCK_PTR_FOR_FN (cfun)->count == profile_count::zero ()))
3647 {
3648 ENTRY_BLOCK_PTR_FOR_FN (cfun)->aux = (void *)(size_t) 1;
3649 worklist.safe_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
3650
3651 while (worklist.length () > 0)
3652 {
3653 bb = worklist.pop ();
3654 FOR_EACH_EDGE (e, ei, bb->succs)
3655 if (!(e->count () == profile_count::zero ())
3656 && !(e->dest->count == profile_count::zero ())
3657 && !e->dest->aux)
3658 {
3659 e->dest->aux = (void *)(size_t) 1;
3660 worklist.safe_push (e->dest);
3661 }
3662 }
3663 }
3664
3665 FOR_ALL_BB_FN (bb, cfun)
3666 {
3667 if (!bb->aux)
3668 {
3669 if (!(bb->count == profile_count::zero ())
3670 && (dump_file && (dump_flags & TDF_DETAILS)))
3671 fprintf (dump_file,
3672 "Basic block %i is marked unlikely by forward prop\n",
3673 bb->index);
3674 bb->count = profile_count::zero ();
3675 }
3676 else
3677 bb->aux = NULL;
3678 }
3679 }
3680
3681 /* Determine basic blocks/edges that are known to be unlikely executed and set
3682 their counters to zero.
3683 This is done with first identifying obviously unlikely BBs/edges and then
3684 propagating in both directions. */
3685
3686 static void
3687 determine_unlikely_bbs ()
3688 {
3689 basic_block bb;
3690 auto_vec<basic_block, 64> worklist;
3691 edge_iterator ei;
3692 edge e;
3693
3694 FOR_EACH_BB_FN (bb, cfun)
3695 {
3696 if (!(bb->count == profile_count::zero ())
3697 && unlikely_executed_bb_p (bb))
3698 {
3699 if (dump_file && (dump_flags & TDF_DETAILS))
3700 fprintf (dump_file, "Basic block %i is locally unlikely\n",
3701 bb->index);
3702 bb->count = profile_count::zero ();
3703 }
3704
3705 FOR_EACH_EDGE (e, ei, bb->succs)
3706 if (!(e->probability == profile_probability::never ())
3707 && unlikely_executed_edge_p (e))
3708 {
3709 if (dump_file && (dump_flags & TDF_DETAILS))
3710 fprintf (dump_file, "Edge %i->%i is locally unlikely\n",
3711 bb->index, e->dest->index);
3712 e->probability = profile_probability::never ();
3713 }
3714
3715 gcc_checking_assert (!bb->aux);
3716 }
3717 propagate_unlikely_bbs_forward ();
3718
3719 auto_vec<int, 64> nsuccs;
3720 nsuccs.safe_grow_cleared (last_basic_block_for_fn (cfun));
3721 FOR_ALL_BB_FN (bb, cfun)
3722 if (!(bb->count == profile_count::zero ())
3723 && bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3724 {
3725 nsuccs[bb->index] = 0;
3726 FOR_EACH_EDGE (e, ei, bb->succs)
3727 if (!(e->probability == profile_probability::never ())
3728 && !(e->dest->count == profile_count::zero ()))
3729 nsuccs[bb->index]++;
3730 if (!nsuccs[bb->index])
3731 worklist.safe_push (bb);
3732 }
3733 while (worklist.length () > 0)
3734 {
3735 bb = worklist.pop ();
3736 if (bb->count == profile_count::zero ())
3737 continue;
3738 if (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
3739 {
3740 bool found = false;
3741 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
3742 !gsi_end_p (gsi); gsi_next (&gsi))
3743 if (stmt_can_terminate_bb_p (gsi_stmt (gsi))
3744 /* stmt_can_terminate_bb_p special cases noreturns because it
3745 assumes that fake edges are created. We want to know that
3746 noreturn alone does not imply BB to be unlikely. */
3747 || (is_gimple_call (gsi_stmt (gsi))
3748 && (gimple_call_flags (gsi_stmt (gsi)) & ECF_NORETURN)))
3749 {
3750 found = true;
3751 break;
3752 }
3753 if (found)
3754 continue;
3755 }
3756 if (dump_file && (dump_flags & TDF_DETAILS))
3757 fprintf (dump_file,
3758 "Basic block %i is marked unlikely by backward prop\n",
3759 bb->index);
3760 bb->count = profile_count::zero ();
3761 FOR_EACH_EDGE (e, ei, bb->preds)
3762 if (!(e->probability == profile_probability::never ()))
3763 {
3764 if (!(e->src->count == profile_count::zero ()))
3765 {
3766 gcc_checking_assert (nsuccs[e->src->index] > 0);
3767 nsuccs[e->src->index]--;
3768 if (!nsuccs[e->src->index])
3769 worklist.safe_push (e->src);
3770 }
3771 }
3772 }
3773 /* Finally all edges from non-0 regions to 0 are unlikely. */
3774 FOR_ALL_BB_FN (bb, cfun)
3775 {
3776 if (!(bb->count == profile_count::zero ()))
3777 FOR_EACH_EDGE (e, ei, bb->succs)
3778 if (!(e->probability == profile_probability::never ())
3779 && e->dest->count == profile_count::zero ())
3780 {
3781 if (dump_file && (dump_flags & TDF_DETAILS))
3782 fprintf (dump_file, "Edge %i->%i is unlikely because "
3783 "it enters unlikely block\n",
3784 bb->index, e->dest->index);
3785 e->probability = profile_probability::never ();
3786 }
3787
3788 edge other = NULL;
3789
3790 FOR_EACH_EDGE (e, ei, bb->succs)
3791 if (e->probability == profile_probability::never ())
3792 ;
3793 else if (other)
3794 {
3795 other = NULL;
3796 break;
3797 }
3798 else
3799 other = e;
3800 if (other
3801 && !(other->probability == profile_probability::always ()))
3802 {
3803 if (dump_file && (dump_flags & TDF_DETAILS))
3804 fprintf (dump_file, "Edge %i->%i is locally likely\n",
3805 bb->index, other->dest->index);
3806 other->probability = profile_probability::always ();
3807 }
3808 }
3809 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count == profile_count::zero ())
3810 cgraph_node::get (current_function_decl)->count = profile_count::zero ();
3811 }
3812
3813 /* Estimate and propagate basic block frequencies using the given branch
3814 probabilities. If FORCE is true, the frequencies are used to estimate
3815 the counts even when there are already non-zero profile counts. */
3816
3817 void
3818 estimate_bb_frequencies (bool force)
3819 {
3820 basic_block bb;
3821 sreal freq_max;
3822
3823 determine_unlikely_bbs ();
3824
3825 if (force || profile_status_for_fn (cfun) != PROFILE_READ
3826 || !update_max_bb_count ())
3827 {
3828 static int real_values_initialized = 0;
3829
3830 if (!real_values_initialized)
3831 {
3832 real_values_initialized = 1;
3833 real_br_prob_base = REG_BR_PROB_BASE;
3834 /* Scaling frequencies up to maximal profile count may result in
3835 frequent overflows especially when inlining loops.
3836 Small scalling results in unnecesary precision loss. Stay in
3837 the half of the (exponential) range. */
3838 real_bb_freq_max = (uint64_t)1 << (profile_count::n_bits / 2);
3839 real_one_half = sreal (1, -1);
3840 real_inv_br_prob_base = sreal (1) / real_br_prob_base;
3841 real_almost_one = sreal (1) - real_inv_br_prob_base;
3842 }
3843
3844 mark_dfs_back_edges ();
3845
3846 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->probability =
3847 profile_probability::always ();
3848
3849 /* Set up block info for each basic block. */
3850 alloc_aux_for_blocks (sizeof (block_info));
3851 alloc_aux_for_edges (sizeof (edge_prob_info));
3852 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3853 {
3854 edge e;
3855 edge_iterator ei;
3856
3857 FOR_EACH_EDGE (e, ei, bb->succs)
3858 {
3859 /* FIXME: Graphite is producing edges with no profile. Once
3860 this is fixed, drop this. */
3861 if (e->probability.initialized_p ())
3862 EDGE_INFO (e)->back_edge_prob
3863 = e->probability.to_reg_br_prob_base ();
3864 else
3865 EDGE_INFO (e)->back_edge_prob = REG_BR_PROB_BASE / 2;
3866 EDGE_INFO (e)->back_edge_prob *= real_inv_br_prob_base;
3867 }
3868 }
3869
3870 /* First compute frequencies locally for each loop from innermost
3871 to outermost to examine frequencies for back edges. */
3872 estimate_loops ();
3873
3874 freq_max = 0;
3875 FOR_EACH_BB_FN (bb, cfun)
3876 if (freq_max < BLOCK_INFO (bb)->frequency)
3877 freq_max = BLOCK_INFO (bb)->frequency;
3878
3879 freq_max = real_bb_freq_max / freq_max;
3880 if (freq_max < 16)
3881 freq_max = 16;
3882 profile_count ipa_count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa ();
3883 cfun->cfg->count_max = profile_count::uninitialized ();
3884 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3885 {
3886 sreal tmp = BLOCK_INFO (bb)->frequency * freq_max + real_one_half;
3887 profile_count count = profile_count::from_gcov_type (tmp.to_int ());
3888
3889 /* If we have profile feedback in which this function was never
3890 executed, then preserve this info. */
3891 if (!(bb->count == profile_count::zero ()))
3892 bb->count = count.guessed_local ().combine_with_ipa_count (ipa_count);
3893 cfun->cfg->count_max = cfun->cfg->count_max.max (bb->count);
3894 }
3895
3896 free_aux_for_blocks ();
3897 free_aux_for_edges ();
3898 }
3899 compute_function_frequency ();
3900 }
3901
3902 /* Decide whether function is hot, cold or unlikely executed. */
3903 void
3904 compute_function_frequency (void)
3905 {
3906 basic_block bb;
3907 struct cgraph_node *node = cgraph_node::get (current_function_decl);
3908
3909 if (DECL_STATIC_CONSTRUCTOR (current_function_decl)
3910 || MAIN_NAME_P (DECL_NAME (current_function_decl)))
3911 node->only_called_at_startup = true;
3912 if (DECL_STATIC_DESTRUCTOR (current_function_decl))
3913 node->only_called_at_exit = true;
3914
3915 if (profile_status_for_fn (cfun) != PROFILE_READ)
3916 {
3917 int flags = flags_from_decl_or_type (current_function_decl);
3918 if ((ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa_p ()
3919 && ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa() == profile_count::zero ())
3920 || lookup_attribute ("cold", DECL_ATTRIBUTES (current_function_decl))
3921 != NULL)
3922 {
3923 node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
3924 warn_function_cold (current_function_decl);
3925 }
3926 else if (lookup_attribute ("hot", DECL_ATTRIBUTES (current_function_decl))
3927 != NULL)
3928 node->frequency = NODE_FREQUENCY_HOT;
3929 else if (flags & ECF_NORETURN)
3930 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3931 else if (MAIN_NAME_P (DECL_NAME (current_function_decl)))
3932 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3933 else if (DECL_STATIC_CONSTRUCTOR (current_function_decl)
3934 || DECL_STATIC_DESTRUCTOR (current_function_decl))
3935 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3936 return;
3937 }
3938
3939 node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
3940 warn_function_cold (current_function_decl);
3941 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa() == profile_count::zero ())
3942 return;
3943 FOR_EACH_BB_FN (bb, cfun)
3944 {
3945 if (maybe_hot_bb_p (cfun, bb))
3946 {
3947 node->frequency = NODE_FREQUENCY_HOT;
3948 return;
3949 }
3950 if (!probably_never_executed_bb_p (cfun, bb))
3951 node->frequency = NODE_FREQUENCY_NORMAL;
3952 }
3953 }
3954
3955 /* Build PREDICT_EXPR. */
3956 tree
3957 build_predict_expr (enum br_predictor predictor, enum prediction taken)
3958 {
3959 tree t = build1 (PREDICT_EXPR, void_type_node,
3960 build_int_cst (integer_type_node, predictor));
3961 SET_PREDICT_EXPR_OUTCOME (t, taken);
3962 return t;
3963 }
3964
3965 const char *
3966 predictor_name (enum br_predictor predictor)
3967 {
3968 return predictor_info[predictor].name;
3969 }
3970
3971 /* Predict branch probabilities and estimate profile of the tree CFG. */
3972
3973 namespace {
3974
3975 const pass_data pass_data_profile =
3976 {
3977 GIMPLE_PASS, /* type */
3978 "profile_estimate", /* name */
3979 OPTGROUP_NONE, /* optinfo_flags */
3980 TV_BRANCH_PROB, /* tv_id */
3981 PROP_cfg, /* properties_required */
3982 0, /* properties_provided */
3983 0, /* properties_destroyed */
3984 0, /* todo_flags_start */
3985 0, /* todo_flags_finish */
3986 };
3987
3988 class pass_profile : public gimple_opt_pass
3989 {
3990 public:
3991 pass_profile (gcc::context *ctxt)
3992 : gimple_opt_pass (pass_data_profile, ctxt)
3993 {}
3994
3995 /* opt_pass methods: */
3996 virtual bool gate (function *) { return flag_guess_branch_prob; }
3997 virtual unsigned int execute (function *);
3998
3999 }; // class pass_profile
4000
4001 unsigned int
4002 pass_profile::execute (function *fun)
4003 {
4004 unsigned nb_loops;
4005
4006 if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
4007 return 0;
4008
4009 loop_optimizer_init (LOOPS_NORMAL);
4010 if (dump_file && (dump_flags & TDF_DETAILS))
4011 flow_loops_dump (dump_file, NULL, 0);
4012
4013 mark_irreducible_loops ();
4014
4015 nb_loops = number_of_loops (fun);
4016 if (nb_loops > 1)
4017 scev_initialize ();
4018
4019 tree_estimate_probability (false);
4020
4021 if (nb_loops > 1)
4022 scev_finalize ();
4023
4024 loop_optimizer_finalize ();
4025 if (dump_file && (dump_flags & TDF_DETAILS))
4026 gimple_dump_cfg (dump_file, dump_flags);
4027 if (profile_status_for_fn (fun) == PROFILE_ABSENT)
4028 profile_status_for_fn (fun) = PROFILE_GUESSED;
4029 if (dump_file && (dump_flags & TDF_DETAILS))
4030 {
4031 struct loop *loop;
4032 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
4033 if (loop->header->count.initialized_p ())
4034 fprintf (dump_file, "Loop got predicted %d to iterate %i times.\n",
4035 loop->num,
4036 (int)expected_loop_iterations_unbounded (loop));
4037 }
4038 return 0;
4039 }
4040
4041 } // anon namespace
4042
4043 gimple_opt_pass *
4044 make_pass_profile (gcc::context *ctxt)
4045 {
4046 return new pass_profile (ctxt);
4047 }
4048
4049 /* Return true when PRED predictor should be removed after early
4050 tree passes. Most of the predictors are beneficial to survive
4051 as early inlining can also distribute then into caller's bodies. */
4052
4053 static bool
4054 strip_predictor_early (enum br_predictor pred)
4055 {
4056 switch (pred)
4057 {
4058 case PRED_TREE_EARLY_RETURN:
4059 return true;
4060 default:
4061 return false;
4062 }
4063 }
4064
4065 /* Get rid of all builtin_expect calls and GIMPLE_PREDICT statements
4066 we no longer need. EARLY is set to true when called from early
4067 optimizations. */
4068
4069 unsigned int
4070 strip_predict_hints (function *fun, bool early)
4071 {
4072 basic_block bb;
4073 gimple *ass_stmt;
4074 tree var;
4075 bool changed = false;
4076
4077 FOR_EACH_BB_FN (bb, fun)
4078 {
4079 gimple_stmt_iterator bi;
4080 for (bi = gsi_start_bb (bb); !gsi_end_p (bi);)
4081 {
4082 gimple *stmt = gsi_stmt (bi);
4083
4084 if (gimple_code (stmt) == GIMPLE_PREDICT)
4085 {
4086 if (!early
4087 || strip_predictor_early (gimple_predict_predictor (stmt)))
4088 {
4089 gsi_remove (&bi, true);
4090 changed = true;
4091 continue;
4092 }
4093 }
4094 else if (is_gimple_call (stmt))
4095 {
4096 tree fndecl = gimple_call_fndecl (stmt);
4097
4098 if (!early
4099 && ((fndecl != NULL_TREE
4100 && fndecl_built_in_p (fndecl, BUILT_IN_EXPECT)
4101 && gimple_call_num_args (stmt) == 2)
4102 || (fndecl != NULL_TREE
4103 && fndecl_built_in_p (fndecl,
4104 BUILT_IN_EXPECT_WITH_PROBABILITY)
4105 && gimple_call_num_args (stmt) == 3)
4106 || (gimple_call_internal_p (stmt)
4107 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT)))
4108 {
4109 var = gimple_call_lhs (stmt);
4110 changed = true;
4111 if (var)
4112 {
4113 ass_stmt
4114 = gimple_build_assign (var, gimple_call_arg (stmt, 0));
4115 gsi_replace (&bi, ass_stmt, true);
4116 }
4117 else
4118 {
4119 gsi_remove (&bi, true);
4120 continue;
4121 }
4122 }
4123 }
4124 gsi_next (&bi);
4125 }
4126 }
4127 return changed ? TODO_cleanup_cfg : 0;
4128 }
4129
4130 namespace {
4131
4132 const pass_data pass_data_strip_predict_hints =
4133 {
4134 GIMPLE_PASS, /* type */
4135 "*strip_predict_hints", /* name */
4136 OPTGROUP_NONE, /* optinfo_flags */
4137 TV_BRANCH_PROB, /* tv_id */
4138 PROP_cfg, /* properties_required */
4139 0, /* properties_provided */
4140 0, /* properties_destroyed */
4141 0, /* todo_flags_start */
4142 0, /* todo_flags_finish */
4143 };
4144
4145 class pass_strip_predict_hints : public gimple_opt_pass
4146 {
4147 public:
4148 pass_strip_predict_hints (gcc::context *ctxt)
4149 : gimple_opt_pass (pass_data_strip_predict_hints, ctxt)
4150 {}
4151
4152 /* opt_pass methods: */
4153 opt_pass * clone () { return new pass_strip_predict_hints (m_ctxt); }
4154 void set_pass_param (unsigned int n, bool param)
4155 {
4156 gcc_assert (n == 0);
4157 early_p = param;
4158 }
4159
4160 virtual unsigned int execute (function *);
4161
4162 private:
4163 bool early_p;
4164
4165 }; // class pass_strip_predict_hints
4166
4167 unsigned int
4168 pass_strip_predict_hints::execute (function *fun)
4169 {
4170 return strip_predict_hints (fun, early_p);
4171 }
4172
4173 } // anon namespace
4174
4175 gimple_opt_pass *
4176 make_pass_strip_predict_hints (gcc::context *ctxt)
4177 {
4178 return new pass_strip_predict_hints (ctxt);
4179 }
4180
4181 /* Rebuild function frequencies. Passes are in general expected to
4182 maintain profile by hand, however in some cases this is not possible:
4183 for example when inlining several functions with loops freuqencies might run
4184 out of scale and thus needs to be recomputed. */
4185
4186 void
4187 rebuild_frequencies (void)
4188 {
4189 timevar_push (TV_REBUILD_FREQUENCIES);
4190
4191 /* When the max bb count in the function is small, there is a higher
4192 chance that there were truncation errors in the integer scaling
4193 of counts by inlining and other optimizations. This could lead
4194 to incorrect classification of code as being cold when it isn't.
4195 In that case, force the estimation of bb counts/frequencies from the
4196 branch probabilities, rather than computing frequencies from counts,
4197 which may also lead to frequencies incorrectly reduced to 0. There
4198 is less precision in the probabilities, so we only do this for small
4199 max counts. */
4200 cfun->cfg->count_max = profile_count::uninitialized ();
4201 basic_block bb;
4202 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
4203 cfun->cfg->count_max = cfun->cfg->count_max.max (bb->count);
4204
4205 if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
4206 {
4207 loop_optimizer_init (0);
4208 add_noreturn_fake_exit_edges ();
4209 mark_irreducible_loops ();
4210 connect_infinite_loops_to_exit ();
4211 estimate_bb_frequencies (true);
4212 remove_fake_exit_edges ();
4213 loop_optimizer_finalize ();
4214 }
4215 else if (profile_status_for_fn (cfun) == PROFILE_READ)
4216 update_max_bb_count ();
4217 else if (profile_status_for_fn (cfun) == PROFILE_ABSENT
4218 && !flag_guess_branch_prob)
4219 ;
4220 else
4221 gcc_unreachable ();
4222 timevar_pop (TV_REBUILD_FREQUENCIES);
4223 }
4224
4225 /* Perform a dry run of the branch prediction pass and report comparsion of
4226 the predicted and real profile into the dump file. */
4227
4228 void
4229 report_predictor_hitrates (void)
4230 {
4231 unsigned nb_loops;
4232
4233 loop_optimizer_init (LOOPS_NORMAL);
4234 if (dump_file && (dump_flags & TDF_DETAILS))
4235 flow_loops_dump (dump_file, NULL, 0);
4236
4237 mark_irreducible_loops ();
4238
4239 nb_loops = number_of_loops (cfun);
4240 if (nb_loops > 1)
4241 scev_initialize ();
4242
4243 tree_estimate_probability (true);
4244
4245 if (nb_loops > 1)
4246 scev_finalize ();
4247
4248 loop_optimizer_finalize ();
4249 }
4250
4251 /* Force edge E to be cold.
4252 If IMPOSSIBLE is true, for edge to have count and probability 0 otherwise
4253 keep low probability to represent possible error in a guess. This is used
4254 i.e. in case we predict loop to likely iterate given number of times but
4255 we are not 100% sure.
4256
4257 This function locally updates profile without attempt to keep global
4258 consistency which can not be reached in full generality without full profile
4259 rebuild from probabilities alone. Doing so is not necessarily a good idea
4260 because frequencies and counts may be more realistic then probabilities.
4261
4262 In some cases (such as for elimination of early exits during full loop
4263 unrolling) the caller can ensure that profile will get consistent
4264 afterwards. */
4265
4266 void
4267 force_edge_cold (edge e, bool impossible)
4268 {
4269 profile_count count_sum = profile_count::zero ();
4270 profile_probability prob_sum = profile_probability::never ();
4271 edge_iterator ei;
4272 edge e2;
4273 bool uninitialized_exit = false;
4274
4275 /* When branch probability guesses are not known, then do nothing. */
4276 if (!impossible && !e->count ().initialized_p ())
4277 return;
4278
4279 profile_probability goal = (impossible ? profile_probability::never ()
4280 : profile_probability::very_unlikely ());
4281
4282 /* If edge is already improbably or cold, just return. */
4283 if (e->probability <= goal
4284 && (!impossible || e->count () == profile_count::zero ()))
4285 return;
4286 FOR_EACH_EDGE (e2, ei, e->src->succs)
4287 if (e2 != e)
4288 {
4289 if (e->flags & EDGE_FAKE)
4290 continue;
4291 if (e2->count ().initialized_p ())
4292 count_sum += e2->count ();
4293 if (e2->probability.initialized_p ())
4294 prob_sum += e2->probability;
4295 else
4296 uninitialized_exit = true;
4297 }
4298
4299 /* If we are not guessing profiles but have some other edges out,
4300 just assume the control flow goes elsewhere. */
4301 if (uninitialized_exit)
4302 e->probability = goal;
4303 /* If there are other edges out of e->src, redistribute probabilitity
4304 there. */
4305 else if (prob_sum > profile_probability::never ())
4306 {
4307 if (!(e->probability < goal))
4308 e->probability = goal;
4309
4310 profile_probability prob_comp = prob_sum / e->probability.invert ();
4311
4312 if (dump_file && (dump_flags & TDF_DETAILS))
4313 fprintf (dump_file, "Making edge %i->%i %s by redistributing "
4314 "probability to other edges.\n",
4315 e->src->index, e->dest->index,
4316 impossible ? "impossible" : "cold");
4317 FOR_EACH_EDGE (e2, ei, e->src->succs)
4318 if (e2 != e)
4319 {
4320 e2->probability /= prob_comp;
4321 }
4322 if (current_ir_type () != IR_GIMPLE
4323 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
4324 update_br_prob_note (e->src);
4325 }
4326 /* If all edges out of e->src are unlikely, the basic block itself
4327 is unlikely. */
4328 else
4329 {
4330 if (prob_sum == profile_probability::never ())
4331 e->probability = profile_probability::always ();
4332 else
4333 {
4334 if (impossible)
4335 e->probability = profile_probability::never ();
4336 /* If BB has some edges out that are not impossible, we can not
4337 assume that BB itself is. */
4338 impossible = false;
4339 }
4340 if (current_ir_type () != IR_GIMPLE
4341 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
4342 update_br_prob_note (e->src);
4343 if (e->src->count == profile_count::zero ())
4344 return;
4345 if (count_sum == profile_count::zero () && impossible)
4346 {
4347 bool found = false;
4348 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
4349 ;
4350 else if (current_ir_type () == IR_GIMPLE)
4351 for (gimple_stmt_iterator gsi = gsi_start_bb (e->src);
4352 !gsi_end_p (gsi); gsi_next (&gsi))
4353 {
4354 if (stmt_can_terminate_bb_p (gsi_stmt (gsi)))
4355 {
4356 found = true;
4357 break;
4358 }
4359 }
4360 /* FIXME: Implement RTL path. */
4361 else
4362 found = true;
4363 if (!found)
4364 {
4365 if (dump_file && (dump_flags & TDF_DETAILS))
4366 fprintf (dump_file,
4367 "Making bb %i impossible and dropping count to 0.\n",
4368 e->src->index);
4369 e->src->count = profile_count::zero ();
4370 FOR_EACH_EDGE (e2, ei, e->src->preds)
4371 force_edge_cold (e2, impossible);
4372 return;
4373 }
4374 }
4375
4376 /* If we did not adjusting, the source basic block has no likely edeges
4377 leaving other direction. In that case force that bb cold, too.
4378 This in general is difficult task to do, but handle special case when
4379 BB has only one predecestor. This is common case when we are updating
4380 after loop transforms. */
4381 if (!(prob_sum > profile_probability::never ())
4382 && count_sum == profile_count::zero ()
4383 && single_pred_p (e->src) && e->src->count.to_frequency (cfun)
4384 > (impossible ? 0 : 1))
4385 {
4386 int old_frequency = e->src->count.to_frequency (cfun);
4387 if (dump_file && (dump_flags & TDF_DETAILS))
4388 fprintf (dump_file, "Making bb %i %s.\n", e->src->index,
4389 impossible ? "impossible" : "cold");
4390 int new_frequency = MIN (e->src->count.to_frequency (cfun),
4391 impossible ? 0 : 1);
4392 if (impossible)
4393 e->src->count = profile_count::zero ();
4394 else
4395 e->src->count = e->count ().apply_scale (new_frequency,
4396 old_frequency);
4397 force_edge_cold (single_pred_edge (e->src), impossible);
4398 }
4399 else if (dump_file && (dump_flags & TDF_DETAILS)
4400 && maybe_hot_bb_p (cfun, e->src))
4401 fprintf (dump_file, "Giving up on making bb %i %s.\n", e->src->index,
4402 impossible ? "impossible" : "cold");
4403 }
4404 }
4405
4406 #if CHECKING_P
4407
4408 namespace selftest {
4409
4410 /* Test that value range of predictor values defined in predict.def is
4411 within range (50, 100]. */
4412
4413 struct branch_predictor
4414 {
4415 const char *name;
4416 int probability;
4417 };
4418
4419 #define DEF_PREDICTOR(ENUM, NAME, HITRATE, FLAGS) { NAME, HITRATE },
4420
4421 static void
4422 test_prediction_value_range ()
4423 {
4424 branch_predictor predictors[] = {
4425 #include "predict.def"
4426 { NULL, PROB_UNINITIALIZED }
4427 };
4428
4429 for (unsigned i = 0; predictors[i].name != NULL; i++)
4430 {
4431 if (predictors[i].probability == PROB_UNINITIALIZED)
4432 continue;
4433
4434 unsigned p = 100 * predictors[i].probability / REG_BR_PROB_BASE;
4435 ASSERT_TRUE (p >= 50 && p <= 100);
4436 }
4437 }
4438
4439 #undef DEF_PREDICTOR
4440
4441 /* Run all of the selfests within this file. */
4442
4443 void
4444 predict_c_tests ()
4445 {
4446 test_prediction_value_range ();
4447 }
4448
4449 } // namespace selftest
4450 #endif /* CHECKING_P. */