predict.c (predict_paths_leading_to, [...]): Add in_loop parameter.
[gcc.git] / gcc / predict.c
1 /* Branch prediction routines for the GNU compiler.
2 Copyright (C) 2000-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* References:
21
22 [1] "Branch Prediction for Free"
23 Ball and Larus; PLDI '93.
24 [2] "Static Branch Frequency and Program Profile Analysis"
25 Wu and Larus; MICRO-27.
26 [3] "Corpus-based Static Branch Prediction"
27 Calder, Grunwald, Lindsay, Martin, Mozer, and Zorn; PLDI '95. */
28
29
30 #include "config.h"
31 #include "system.h"
32 #include "coretypes.h"
33 #include "backend.h"
34 #include "rtl.h"
35 #include "tree.h"
36 #include "gimple.h"
37 #include "cfghooks.h"
38 #include "tree-pass.h"
39 #include "ssa.h"
40 #include "emit-rtl.h"
41 #include "cgraph.h"
42 #include "coverage.h"
43 #include "diagnostic-core.h"
44 #include "gimple-predict.h"
45 #include "fold-const.h"
46 #include "calls.h"
47 #include "cfganal.h"
48 #include "profile.h"
49 #include "sreal.h"
50 #include "params.h"
51 #include "cfgloop.h"
52 #include "gimple-iterator.h"
53 #include "tree-cfg.h"
54 #include "tree-ssa-loop-niter.h"
55 #include "tree-ssa-loop.h"
56 #include "tree-scalar-evolution.h"
57 #include "ipa-utils.h"
58
59 /* Enum with reasons why a predictor is ignored. */
60
61 enum predictor_reason
62 {
63 REASON_NONE,
64 REASON_IGNORED,
65 REASON_SINGLE_EDGE_DUPLICATE,
66 REASON_EDGE_PAIR_DUPLICATE
67 };
68
69 /* String messages for the aforementioned enum. */
70
71 static const char *reason_messages[] = {"", " (ignored)",
72 " (single edge duplicate)", " (edge pair duplicate)"};
73
74 /* real constants: 0, 1, 1-1/REG_BR_PROB_BASE, REG_BR_PROB_BASE,
75 1/REG_BR_PROB_BASE, 0.5, BB_FREQ_MAX. */
76 static sreal real_almost_one, real_br_prob_base,
77 real_inv_br_prob_base, real_one_half, real_bb_freq_max;
78
79 static void combine_predictions_for_insn (rtx_insn *, basic_block);
80 static void dump_prediction (FILE *, enum br_predictor, int, basic_block,
81 enum predictor_reason, edge);
82 static void predict_paths_leading_to (basic_block, enum br_predictor,
83 enum prediction,
84 struct loop *in_loop = NULL);
85 static void predict_paths_leading_to_edge (edge, enum br_predictor,
86 enum prediction,
87 struct loop *in_loop = NULL);
88 static bool can_predict_insn_p (const rtx_insn *);
89
90 /* Information we hold about each branch predictor.
91 Filled using information from predict.def. */
92
93 struct predictor_info
94 {
95 const char *const name; /* Name used in the debugging dumps. */
96 const int hitrate; /* Expected hitrate used by
97 predict_insn_def call. */
98 const int flags;
99 };
100
101 /* Use given predictor without Dempster-Shaffer theory if it matches
102 using first_match heuristics. */
103 #define PRED_FLAG_FIRST_MATCH 1
104
105 /* Recompute hitrate in percent to our representation. */
106
107 #define HITRATE(VAL) ((int) ((VAL) * REG_BR_PROB_BASE + 50) / 100)
108
109 #define DEF_PREDICTOR(ENUM, NAME, HITRATE, FLAGS) {NAME, HITRATE, FLAGS},
110 static const struct predictor_info predictor_info[]= {
111 #include "predict.def"
112
113 /* Upper bound on predictors. */
114 {NULL, 0, 0}
115 };
116 #undef DEF_PREDICTOR
117
118 /* Return TRUE if frequency FREQ is considered to be hot. */
119
120 static inline bool
121 maybe_hot_frequency_p (struct function *fun, int freq)
122 {
123 struct cgraph_node *node = cgraph_node::get (fun->decl);
124 if (!profile_info
125 || !opt_for_fn (fun->decl, flag_branch_probabilities))
126 {
127 if (node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
128 return false;
129 if (node->frequency == NODE_FREQUENCY_HOT)
130 return true;
131 }
132 if (profile_status_for_fn (fun) == PROFILE_ABSENT)
133 return true;
134 if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
135 && freq < (ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency * 2 / 3))
136 return false;
137 if (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION) == 0)
138 return false;
139 if (freq * PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)
140 < ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency)
141 return false;
142 return true;
143 }
144
145 static gcov_type min_count = -1;
146
147 /* Determine the threshold for hot BB counts. */
148
149 gcov_type
150 get_hot_bb_threshold ()
151 {
152 gcov_working_set_t *ws;
153 if (min_count == -1)
154 {
155 ws = find_working_set (PARAM_VALUE (HOT_BB_COUNT_WS_PERMILLE));
156 gcc_assert (ws);
157 min_count = ws->min_counter;
158 }
159 return min_count;
160 }
161
162 /* Set the threshold for hot BB counts. */
163
164 void
165 set_hot_bb_threshold (gcov_type min)
166 {
167 min_count = min;
168 }
169
170 /* Return TRUE if frequency FREQ is considered to be hot. */
171
172 bool
173 maybe_hot_count_p (struct function *fun, gcov_type count)
174 {
175 if (fun && profile_status_for_fn (fun) != PROFILE_READ)
176 return true;
177 /* Code executed at most once is not hot. */
178 if (profile_info->runs >= count)
179 return false;
180 return (count >= get_hot_bb_threshold ());
181 }
182
183 /* Return true in case BB can be CPU intensive and should be optimized
184 for maximal performance. */
185
186 bool
187 maybe_hot_bb_p (struct function *fun, const_basic_block bb)
188 {
189 gcc_checking_assert (fun);
190 if (profile_status_for_fn (fun) == PROFILE_READ)
191 return maybe_hot_count_p (fun, bb->count);
192 return maybe_hot_frequency_p (fun, bb->frequency);
193 }
194
195 /* Return true in case BB can be CPU intensive and should be optimized
196 for maximal performance. */
197
198 bool
199 maybe_hot_edge_p (edge e)
200 {
201 if (profile_status_for_fn (cfun) == PROFILE_READ)
202 return maybe_hot_count_p (cfun, e->count);
203 return maybe_hot_frequency_p (cfun, EDGE_FREQUENCY (e));
204 }
205
206 /* Return true if profile COUNT and FREQUENCY, or function FUN static
207 node frequency reflects never being executed. */
208
209 static bool
210 probably_never_executed (struct function *fun,
211 gcov_type count, int frequency)
212 {
213 gcc_checking_assert (fun);
214 if (profile_status_for_fn (fun) == PROFILE_READ)
215 {
216 int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
217 if (count * unlikely_count_fraction >= profile_info->runs)
218 return false;
219 if (!frequency)
220 return true;
221 if (!ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency)
222 return false;
223 if (ENTRY_BLOCK_PTR_FOR_FN (fun)->count)
224 {
225 gcov_type computed_count;
226 /* Check for possibility of overflow, in which case entry bb count
227 is large enough to do the division first without losing much
228 precision. */
229 if (ENTRY_BLOCK_PTR_FOR_FN (fun)->count < REG_BR_PROB_BASE *
230 REG_BR_PROB_BASE)
231 {
232 gcov_type scaled_count
233 = frequency * ENTRY_BLOCK_PTR_FOR_FN (fun)->count *
234 unlikely_count_fraction;
235 computed_count = RDIV (scaled_count,
236 ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency);
237 }
238 else
239 {
240 computed_count = RDIV (ENTRY_BLOCK_PTR_FOR_FN (fun)->count,
241 ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency);
242 computed_count *= frequency * unlikely_count_fraction;
243 }
244 if (computed_count >= profile_info->runs)
245 return false;
246 }
247 return true;
248 }
249 if ((!profile_info || !(opt_for_fn (fun->decl, flag_branch_probabilities)))
250 && (cgraph_node::get (fun->decl)->frequency
251 == NODE_FREQUENCY_UNLIKELY_EXECUTED))
252 return true;
253 return false;
254 }
255
256
257 /* Return true in case BB is probably never executed. */
258
259 bool
260 probably_never_executed_bb_p (struct function *fun, const_basic_block bb)
261 {
262 return probably_never_executed (fun, bb->count, bb->frequency);
263 }
264
265
266 /* Return true in case edge E is probably never executed. */
267
268 bool
269 probably_never_executed_edge_p (struct function *fun, edge e)
270 {
271 return probably_never_executed (fun, e->count, EDGE_FREQUENCY (e));
272 }
273
274 /* Return true when current function should always be optimized for size. */
275
276 bool
277 optimize_function_for_size_p (struct function *fun)
278 {
279 if (!fun || !fun->decl)
280 return optimize_size;
281 cgraph_node *n = cgraph_node::get (fun->decl);
282 return n && n->optimize_for_size_p ();
283 }
284
285 /* Return true when current function should always be optimized for speed. */
286
287 bool
288 optimize_function_for_speed_p (struct function *fun)
289 {
290 return !optimize_function_for_size_p (fun);
291 }
292
293 /* Return the optimization type that should be used for the function FUN. */
294
295 optimization_type
296 function_optimization_type (struct function *fun)
297 {
298 return (optimize_function_for_speed_p (fun)
299 ? OPTIMIZE_FOR_SPEED
300 : OPTIMIZE_FOR_SIZE);
301 }
302
303 /* Return TRUE when BB should be optimized for size. */
304
305 bool
306 optimize_bb_for_size_p (const_basic_block bb)
307 {
308 return (optimize_function_for_size_p (cfun)
309 || (bb && !maybe_hot_bb_p (cfun, bb)));
310 }
311
312 /* Return TRUE when BB should be optimized for speed. */
313
314 bool
315 optimize_bb_for_speed_p (const_basic_block bb)
316 {
317 return !optimize_bb_for_size_p (bb);
318 }
319
320 /* Return the optimization type that should be used for block BB. */
321
322 optimization_type
323 bb_optimization_type (const_basic_block bb)
324 {
325 return (optimize_bb_for_speed_p (bb)
326 ? OPTIMIZE_FOR_SPEED
327 : OPTIMIZE_FOR_SIZE);
328 }
329
330 /* Return TRUE when BB should be optimized for size. */
331
332 bool
333 optimize_edge_for_size_p (edge e)
334 {
335 return optimize_function_for_size_p (cfun) || !maybe_hot_edge_p (e);
336 }
337
338 /* Return TRUE when BB should be optimized for speed. */
339
340 bool
341 optimize_edge_for_speed_p (edge e)
342 {
343 return !optimize_edge_for_size_p (e);
344 }
345
346 /* Return TRUE when BB should be optimized for size. */
347
348 bool
349 optimize_insn_for_size_p (void)
350 {
351 return optimize_function_for_size_p (cfun) || !crtl->maybe_hot_insn_p;
352 }
353
354 /* Return TRUE when BB should be optimized for speed. */
355
356 bool
357 optimize_insn_for_speed_p (void)
358 {
359 return !optimize_insn_for_size_p ();
360 }
361
362 /* Return TRUE when LOOP should be optimized for size. */
363
364 bool
365 optimize_loop_for_size_p (struct loop *loop)
366 {
367 return optimize_bb_for_size_p (loop->header);
368 }
369
370 /* Return TRUE when LOOP should be optimized for speed. */
371
372 bool
373 optimize_loop_for_speed_p (struct loop *loop)
374 {
375 return optimize_bb_for_speed_p (loop->header);
376 }
377
378 /* Return TRUE when LOOP nest should be optimized for speed. */
379
380 bool
381 optimize_loop_nest_for_speed_p (struct loop *loop)
382 {
383 struct loop *l = loop;
384 if (optimize_loop_for_speed_p (loop))
385 return true;
386 l = loop->inner;
387 while (l && l != loop)
388 {
389 if (optimize_loop_for_speed_p (l))
390 return true;
391 if (l->inner)
392 l = l->inner;
393 else if (l->next)
394 l = l->next;
395 else
396 {
397 while (l != loop && !l->next)
398 l = loop_outer (l);
399 if (l != loop)
400 l = l->next;
401 }
402 }
403 return false;
404 }
405
406 /* Return TRUE when LOOP nest should be optimized for size. */
407
408 bool
409 optimize_loop_nest_for_size_p (struct loop *loop)
410 {
411 return !optimize_loop_nest_for_speed_p (loop);
412 }
413
414 /* Return true when edge E is likely to be well predictable by branch
415 predictor. */
416
417 bool
418 predictable_edge_p (edge e)
419 {
420 if (profile_status_for_fn (cfun) == PROFILE_ABSENT)
421 return false;
422 if ((e->probability
423 <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100)
424 || (REG_BR_PROB_BASE - e->probability
425 <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100))
426 return true;
427 return false;
428 }
429
430
431 /* Set RTL expansion for BB profile. */
432
433 void
434 rtl_profile_for_bb (basic_block bb)
435 {
436 crtl->maybe_hot_insn_p = maybe_hot_bb_p (cfun, bb);
437 }
438
439 /* Set RTL expansion for edge profile. */
440
441 void
442 rtl_profile_for_edge (edge e)
443 {
444 crtl->maybe_hot_insn_p = maybe_hot_edge_p (e);
445 }
446
447 /* Set RTL expansion to default mode (i.e. when profile info is not known). */
448 void
449 default_rtl_profile (void)
450 {
451 crtl->maybe_hot_insn_p = true;
452 }
453
454 /* Return true if the one of outgoing edges is already predicted by
455 PREDICTOR. */
456
457 bool
458 rtl_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
459 {
460 rtx note;
461 if (!INSN_P (BB_END (bb)))
462 return false;
463 for (note = REG_NOTES (BB_END (bb)); note; note = XEXP (note, 1))
464 if (REG_NOTE_KIND (note) == REG_BR_PRED
465 && INTVAL (XEXP (XEXP (note, 0), 0)) == (int)predictor)
466 return true;
467 return false;
468 }
469
470 /* Structure representing predictions in tree level. */
471
472 struct edge_prediction {
473 struct edge_prediction *ep_next;
474 edge ep_edge;
475 enum br_predictor ep_predictor;
476 int ep_probability;
477 };
478
479 /* This map contains for a basic block the list of predictions for the
480 outgoing edges. */
481
482 static hash_map<const_basic_block, edge_prediction *> *bb_predictions;
483
484 /* Return true if the one of outgoing edges is already predicted by
485 PREDICTOR. */
486
487 bool
488 gimple_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
489 {
490 struct edge_prediction *i;
491 edge_prediction **preds = bb_predictions->get (bb);
492
493 if (!preds)
494 return false;
495
496 for (i = *preds; i; i = i->ep_next)
497 if (i->ep_predictor == predictor)
498 return true;
499 return false;
500 }
501
502 /* Return true if the one of outgoing edges is already predicted by
503 PREDICTOR for edge E predicted as TAKEN. */
504
505 bool
506 edge_predicted_by_p (edge e, enum br_predictor predictor, bool taken)
507 {
508 struct edge_prediction *i;
509 basic_block bb = e->src;
510 edge_prediction **preds = bb_predictions->get (bb);
511 if (!preds)
512 return false;
513
514 int probability = predictor_info[(int) predictor].hitrate;
515
516 if (taken != TAKEN)
517 probability = REG_BR_PROB_BASE - probability;
518
519 for (i = *preds; i; i = i->ep_next)
520 if (i->ep_predictor == predictor
521 && i->ep_edge == e
522 && i->ep_probability == probability)
523 return true;
524 return false;
525 }
526
527 /* Return true when the probability of edge is reliable.
528
529 The profile guessing code is good at predicting branch outcome (ie.
530 taken/not taken), that is predicted right slightly over 75% of time.
531 It is however notoriously poor on predicting the probability itself.
532 In general the profile appear a lot flatter (with probabilities closer
533 to 50%) than the reality so it is bad idea to use it to drive optimization
534 such as those disabling dynamic branch prediction for well predictable
535 branches.
536
537 There are two exceptions - edges leading to noreturn edges and edges
538 predicted by number of iterations heuristics are predicted well. This macro
539 should be able to distinguish those, but at the moment it simply check for
540 noreturn heuristic that is only one giving probability over 99% or bellow
541 1%. In future we might want to propagate reliability information across the
542 CFG if we find this information useful on multiple places. */
543 static bool
544 probability_reliable_p (int prob)
545 {
546 return (profile_status_for_fn (cfun) == PROFILE_READ
547 || (profile_status_for_fn (cfun) == PROFILE_GUESSED
548 && (prob <= HITRATE (1) || prob >= HITRATE (99))));
549 }
550
551 /* Same predicate as above, working on edges. */
552 bool
553 edge_probability_reliable_p (const_edge e)
554 {
555 return probability_reliable_p (e->probability);
556 }
557
558 /* Same predicate as edge_probability_reliable_p, working on notes. */
559 bool
560 br_prob_note_reliable_p (const_rtx note)
561 {
562 gcc_assert (REG_NOTE_KIND (note) == REG_BR_PROB);
563 return probability_reliable_p (XINT (note, 0));
564 }
565
566 static void
567 predict_insn (rtx_insn *insn, enum br_predictor predictor, int probability)
568 {
569 gcc_assert (any_condjump_p (insn));
570 if (!flag_guess_branch_prob)
571 return;
572
573 add_reg_note (insn, REG_BR_PRED,
574 gen_rtx_CONCAT (VOIDmode,
575 GEN_INT ((int) predictor),
576 GEN_INT ((int) probability)));
577 }
578
579 /* Predict insn by given predictor. */
580
581 void
582 predict_insn_def (rtx_insn *insn, enum br_predictor predictor,
583 enum prediction taken)
584 {
585 int probability = predictor_info[(int) predictor].hitrate;
586
587 if (taken != TAKEN)
588 probability = REG_BR_PROB_BASE - probability;
589
590 predict_insn (insn, predictor, probability);
591 }
592
593 /* Predict edge E with given probability if possible. */
594
595 void
596 rtl_predict_edge (edge e, enum br_predictor predictor, int probability)
597 {
598 rtx_insn *last_insn;
599 last_insn = BB_END (e->src);
600
601 /* We can store the branch prediction information only about
602 conditional jumps. */
603 if (!any_condjump_p (last_insn))
604 return;
605
606 /* We always store probability of branching. */
607 if (e->flags & EDGE_FALLTHRU)
608 probability = REG_BR_PROB_BASE - probability;
609
610 predict_insn (last_insn, predictor, probability);
611 }
612
613 /* Predict edge E with the given PROBABILITY. */
614 void
615 gimple_predict_edge (edge e, enum br_predictor predictor, int probability)
616 {
617 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
618 && EDGE_COUNT (e->src->succs) > 1
619 && flag_guess_branch_prob
620 && optimize)
621 {
622 struct edge_prediction *i = XNEW (struct edge_prediction);
623 edge_prediction *&preds = bb_predictions->get_or_insert (e->src);
624
625 i->ep_next = preds;
626 preds = i;
627 i->ep_probability = probability;
628 i->ep_predictor = predictor;
629 i->ep_edge = e;
630 }
631 }
632
633 /* Filter edge predictions PREDS by a function FILTER. DATA are passed
634 to the filter function. */
635
636 void
637 filter_predictions (edge_prediction **preds,
638 bool (*filter) (edge_prediction *, void *), void *data)
639 {
640 if (!bb_predictions)
641 return;
642
643 if (preds)
644 {
645 struct edge_prediction **prediction = preds;
646 struct edge_prediction *next;
647
648 while (*prediction)
649 {
650 if ((*filter) (*prediction, data))
651 prediction = &((*prediction)->ep_next);
652 else
653 {
654 next = (*prediction)->ep_next;
655 free (*prediction);
656 *prediction = next;
657 }
658 }
659 }
660 }
661
662 /* Filter function predicate that returns true for a edge predicate P
663 if its edge is equal to DATA. */
664
665 bool
666 equal_edge_p (edge_prediction *p, void *data)
667 {
668 return p->ep_edge == (edge)data;
669 }
670
671 /* Remove all predictions on given basic block that are attached
672 to edge E. */
673 void
674 remove_predictions_associated_with_edge (edge e)
675 {
676 if (!bb_predictions)
677 return;
678
679 edge_prediction **preds = bb_predictions->get (e->src);
680 filter_predictions (preds, equal_edge_p, e);
681 }
682
683 /* Clears the list of predictions stored for BB. */
684
685 static void
686 clear_bb_predictions (basic_block bb)
687 {
688 edge_prediction **preds = bb_predictions->get (bb);
689 struct edge_prediction *pred, *next;
690
691 if (!preds)
692 return;
693
694 for (pred = *preds; pred; pred = next)
695 {
696 next = pred->ep_next;
697 free (pred);
698 }
699 *preds = NULL;
700 }
701
702 /* Return true when we can store prediction on insn INSN.
703 At the moment we represent predictions only on conditional
704 jumps, not at computed jump or other complicated cases. */
705 static bool
706 can_predict_insn_p (const rtx_insn *insn)
707 {
708 return (JUMP_P (insn)
709 && any_condjump_p (insn)
710 && EDGE_COUNT (BLOCK_FOR_INSN (insn)->succs) >= 2);
711 }
712
713 /* Predict edge E by given predictor if possible. */
714
715 void
716 predict_edge_def (edge e, enum br_predictor predictor,
717 enum prediction taken)
718 {
719 int probability = predictor_info[(int) predictor].hitrate;
720
721 if (taken != TAKEN)
722 probability = REG_BR_PROB_BASE - probability;
723
724 predict_edge (e, predictor, probability);
725 }
726
727 /* Invert all branch predictions or probability notes in the INSN. This needs
728 to be done each time we invert the condition used by the jump. */
729
730 void
731 invert_br_probabilities (rtx insn)
732 {
733 rtx note;
734
735 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
736 if (REG_NOTE_KIND (note) == REG_BR_PROB)
737 XINT (note, 0) = REG_BR_PROB_BASE - XINT (note, 0);
738 else if (REG_NOTE_KIND (note) == REG_BR_PRED)
739 XEXP (XEXP (note, 0), 1)
740 = GEN_INT (REG_BR_PROB_BASE - INTVAL (XEXP (XEXP (note, 0), 1)));
741 }
742
743 /* Dump information about the branch prediction to the output file. */
744
745 static void
746 dump_prediction (FILE *file, enum br_predictor predictor, int probability,
747 basic_block bb, enum predictor_reason reason = REASON_NONE,
748 edge ep_edge = NULL)
749 {
750 edge e = ep_edge;
751 edge_iterator ei;
752
753 if (!file)
754 return;
755
756 if (e == NULL)
757 FOR_EACH_EDGE (e, ei, bb->succs)
758 if (! (e->flags & EDGE_FALLTHRU))
759 break;
760
761 char edge_info_str[128];
762 if (ep_edge)
763 sprintf (edge_info_str, " of edge %d->%d", ep_edge->src->index,
764 ep_edge->dest->index);
765 else
766 edge_info_str[0] = '\0';
767
768 fprintf (file, " %s heuristics%s%s: %.1f%%",
769 predictor_info[predictor].name,
770 edge_info_str, reason_messages[reason],
771 probability * 100.0 / REG_BR_PROB_BASE);
772
773 if (bb->count)
774 {
775 fprintf (file, " exec %" PRId64, bb->count);
776 if (e)
777 {
778 fprintf (file, " hit %" PRId64, e->count);
779 fprintf (file, " (%.1f%%)", e->count * 100.0 / bb->count);
780 }
781 }
782
783 fprintf (file, "\n");
784 }
785
786 /* We can not predict the probabilities of outgoing edges of bb. Set them
787 evenly and hope for the best. */
788 static void
789 set_even_probabilities (basic_block bb)
790 {
791 int nedges = 0;
792 edge e;
793 edge_iterator ei;
794
795 FOR_EACH_EDGE (e, ei, bb->succs)
796 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
797 nedges ++;
798 FOR_EACH_EDGE (e, ei, bb->succs)
799 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
800 e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges;
801 else
802 e->probability = 0;
803 }
804
805 /* Combine all REG_BR_PRED notes into single probability and attach REG_BR_PROB
806 note if not already present. Remove now useless REG_BR_PRED notes. */
807
808 static void
809 combine_predictions_for_insn (rtx_insn *insn, basic_block bb)
810 {
811 rtx prob_note;
812 rtx *pnote;
813 rtx note;
814 int best_probability = PROB_EVEN;
815 enum br_predictor best_predictor = END_PREDICTORS;
816 int combined_probability = REG_BR_PROB_BASE / 2;
817 int d;
818 bool first_match = false;
819 bool found = false;
820
821 if (!can_predict_insn_p (insn))
822 {
823 set_even_probabilities (bb);
824 return;
825 }
826
827 prob_note = find_reg_note (insn, REG_BR_PROB, 0);
828 pnote = &REG_NOTES (insn);
829 if (dump_file)
830 fprintf (dump_file, "Predictions for insn %i bb %i\n", INSN_UID (insn),
831 bb->index);
832
833 /* We implement "first match" heuristics and use probability guessed
834 by predictor with smallest index. */
835 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
836 if (REG_NOTE_KIND (note) == REG_BR_PRED)
837 {
838 enum br_predictor predictor = ((enum br_predictor)
839 INTVAL (XEXP (XEXP (note, 0), 0)));
840 int probability = INTVAL (XEXP (XEXP (note, 0), 1));
841
842 found = true;
843 if (best_predictor > predictor
844 && predictor_info[predictor].flags & PRED_FLAG_FIRST_MATCH)
845 best_probability = probability, best_predictor = predictor;
846
847 d = (combined_probability * probability
848 + (REG_BR_PROB_BASE - combined_probability)
849 * (REG_BR_PROB_BASE - probability));
850
851 /* Use FP math to avoid overflows of 32bit integers. */
852 if (d == 0)
853 /* If one probability is 0% and one 100%, avoid division by zero. */
854 combined_probability = REG_BR_PROB_BASE / 2;
855 else
856 combined_probability = (((double) combined_probability) * probability
857 * REG_BR_PROB_BASE / d + 0.5);
858 }
859
860 /* Decide which heuristic to use. In case we didn't match anything,
861 use no_prediction heuristic, in case we did match, use either
862 first match or Dempster-Shaffer theory depending on the flags. */
863
864 if (best_predictor != END_PREDICTORS)
865 first_match = true;
866
867 if (!found)
868 dump_prediction (dump_file, PRED_NO_PREDICTION,
869 combined_probability, bb);
870 else
871 {
872 if (!first_match)
873 dump_prediction (dump_file, PRED_DS_THEORY, combined_probability,
874 bb, !first_match ? REASON_NONE : REASON_IGNORED);
875 else
876 dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability,
877 bb, first_match ? REASON_NONE : REASON_IGNORED);
878 }
879
880 if (first_match)
881 combined_probability = best_probability;
882 dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb);
883
884 while (*pnote)
885 {
886 if (REG_NOTE_KIND (*pnote) == REG_BR_PRED)
887 {
888 enum br_predictor predictor = ((enum br_predictor)
889 INTVAL (XEXP (XEXP (*pnote, 0), 0)));
890 int probability = INTVAL (XEXP (XEXP (*pnote, 0), 1));
891
892 dump_prediction (dump_file, predictor, probability, bb,
893 (!first_match || best_predictor == predictor)
894 ? REASON_NONE : REASON_IGNORED);
895 *pnote = XEXP (*pnote, 1);
896 }
897 else
898 pnote = &XEXP (*pnote, 1);
899 }
900
901 if (!prob_note)
902 {
903 add_int_reg_note (insn, REG_BR_PROB, combined_probability);
904
905 /* Save the prediction into CFG in case we are seeing non-degenerated
906 conditional jump. */
907 if (!single_succ_p (bb))
908 {
909 BRANCH_EDGE (bb)->probability = combined_probability;
910 FALLTHRU_EDGE (bb)->probability
911 = REG_BR_PROB_BASE - combined_probability;
912 }
913 }
914 else if (!single_succ_p (bb))
915 {
916 int prob = XINT (prob_note, 0);
917
918 BRANCH_EDGE (bb)->probability = prob;
919 FALLTHRU_EDGE (bb)->probability = REG_BR_PROB_BASE - prob;
920 }
921 else
922 single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
923 }
924
925 /* Edge prediction hash traits. */
926
927 struct predictor_hash: pointer_hash <edge_prediction>
928 {
929
930 static inline hashval_t hash (const edge_prediction *);
931 static inline bool equal (const edge_prediction *, const edge_prediction *);
932 };
933
934 /* Calculate hash value of an edge prediction P based on predictor and
935 normalized probability. */
936
937 inline hashval_t
938 predictor_hash::hash (const edge_prediction *p)
939 {
940 inchash::hash hstate;
941 hstate.add_int (p->ep_predictor);
942
943 int prob = p->ep_probability;
944 if (prob > REG_BR_PROB_BASE / 2)
945 prob = REG_BR_PROB_BASE - prob;
946
947 hstate.add_int (prob);
948
949 return hstate.end ();
950 }
951
952 /* Return true whether edge predictions P1 and P2 use the same predictor and
953 have equal (or opposed probability). */
954
955 inline bool
956 predictor_hash::equal (const edge_prediction *p1, const edge_prediction *p2)
957 {
958 return (p1->ep_predictor == p2->ep_predictor
959 && (p1->ep_probability == p2->ep_probability
960 || p1->ep_probability == REG_BR_PROB_BASE - p2->ep_probability));
961 }
962
963 struct predictor_hash_traits: predictor_hash,
964 typed_noop_remove <edge_prediction *> {};
965
966 /* Return true if edge prediction P is not in DATA hash set. */
967
968 static bool
969 not_removed_prediction_p (edge_prediction *p, void *data)
970 {
971 hash_set<edge_prediction *> *remove = (hash_set<edge_prediction *> *) data;
972 return !remove->contains (p);
973 }
974
975 /* Prune predictions for a basic block BB. Currently we do following
976 clean-up steps:
977
978 1) remove duplicate prediction that is guessed with the same probability
979 (different than 1/2) to both edge
980 2) remove duplicates for a prediction that belongs with the same probability
981 to a single edge
982
983 */
984
985 static void
986 prune_predictions_for_bb (basic_block bb)
987 {
988 edge_prediction **preds = bb_predictions->get (bb);
989
990 if (preds)
991 {
992 hash_table <predictor_hash_traits> s (13);
993 hash_set <edge_prediction *> remove;
994
995 /* Step 1: identify predictors that should be removed. */
996 for (edge_prediction *pred = *preds; pred; pred = pred->ep_next)
997 {
998 edge_prediction *existing = s.find (pred);
999 if (existing)
1000 {
1001 if (pred->ep_edge == existing->ep_edge
1002 && pred->ep_probability == existing->ep_probability)
1003 {
1004 /* Remove a duplicate predictor. */
1005 dump_prediction (dump_file, pred->ep_predictor,
1006 pred->ep_probability, bb,
1007 REASON_SINGLE_EDGE_DUPLICATE, pred->ep_edge);
1008
1009 remove.add (pred);
1010 }
1011 else if (pred->ep_edge != existing->ep_edge
1012 && pred->ep_probability == existing->ep_probability
1013 && pred->ep_probability != REG_BR_PROB_BASE / 2)
1014 {
1015 /* Remove both predictors as they predict the same
1016 for both edges. */
1017 dump_prediction (dump_file, existing->ep_predictor,
1018 pred->ep_probability, bb,
1019 REASON_EDGE_PAIR_DUPLICATE,
1020 existing->ep_edge);
1021 dump_prediction (dump_file, pred->ep_predictor,
1022 pred->ep_probability, bb,
1023 REASON_EDGE_PAIR_DUPLICATE,
1024 pred->ep_edge);
1025
1026 remove.add (existing);
1027 remove.add (pred);
1028 }
1029 }
1030
1031 edge_prediction **slot2 = s.find_slot (pred, INSERT);
1032 *slot2 = pred;
1033 }
1034
1035 /* Step 2: Remove predictors. */
1036 filter_predictions (preds, not_removed_prediction_p, &remove);
1037 }
1038 }
1039
1040 /* Combine predictions into single probability and store them into CFG.
1041 Remove now useless prediction entries.
1042 If DRY_RUN is set, only produce dumps and do not modify profile. */
1043
1044 static void
1045 combine_predictions_for_bb (basic_block bb, bool dry_run)
1046 {
1047 int best_probability = PROB_EVEN;
1048 enum br_predictor best_predictor = END_PREDICTORS;
1049 int combined_probability = REG_BR_PROB_BASE / 2;
1050 int d;
1051 bool first_match = false;
1052 bool found = false;
1053 struct edge_prediction *pred;
1054 int nedges = 0;
1055 edge e, first = NULL, second = NULL;
1056 edge_iterator ei;
1057
1058 FOR_EACH_EDGE (e, ei, bb->succs)
1059 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
1060 {
1061 nedges ++;
1062 if (first && !second)
1063 second = e;
1064 if (!first)
1065 first = e;
1066 }
1067
1068 /* When there is no successor or only one choice, prediction is easy.
1069
1070 We are lazy for now and predict only basic blocks with two outgoing
1071 edges. It is possible to predict generic case too, but we have to
1072 ignore first match heuristics and do more involved combining. Implement
1073 this later. */
1074 if (nedges != 2)
1075 {
1076 if (!bb->count && !dry_run)
1077 set_even_probabilities (bb);
1078 clear_bb_predictions (bb);
1079 if (dump_file)
1080 fprintf (dump_file, "%i edges in bb %i predicted to even probabilities\n",
1081 nedges, bb->index);
1082 return;
1083 }
1084
1085 if (dump_file)
1086 fprintf (dump_file, "Predictions for bb %i\n", bb->index);
1087
1088 prune_predictions_for_bb (bb);
1089
1090 edge_prediction **preds = bb_predictions->get (bb);
1091
1092 if (preds)
1093 {
1094 /* We implement "first match" heuristics and use probability guessed
1095 by predictor with smallest index. */
1096 for (pred = *preds; pred; pred = pred->ep_next)
1097 {
1098 enum br_predictor predictor = pred->ep_predictor;
1099 int probability = pred->ep_probability;
1100
1101 if (pred->ep_edge != first)
1102 probability = REG_BR_PROB_BASE - probability;
1103
1104 found = true;
1105 /* First match heuristics would be widly confused if we predicted
1106 both directions. */
1107 if (best_predictor > predictor
1108 && predictor_info[predictor].flags & PRED_FLAG_FIRST_MATCH)
1109 {
1110 struct edge_prediction *pred2;
1111 int prob = probability;
1112
1113 for (pred2 = (struct edge_prediction *) *preds;
1114 pred2; pred2 = pred2->ep_next)
1115 if (pred2 != pred && pred2->ep_predictor == pred->ep_predictor)
1116 {
1117 int probability2 = pred2->ep_probability;
1118
1119 if (pred2->ep_edge != first)
1120 probability2 = REG_BR_PROB_BASE - probability2;
1121
1122 if ((probability < REG_BR_PROB_BASE / 2) !=
1123 (probability2 < REG_BR_PROB_BASE / 2))
1124 break;
1125
1126 /* If the same predictor later gave better result, go for it! */
1127 if ((probability >= REG_BR_PROB_BASE / 2 && (probability2 > probability))
1128 || (probability <= REG_BR_PROB_BASE / 2 && (probability2 < probability)))
1129 prob = probability2;
1130 }
1131 if (!pred2)
1132 best_probability = prob, best_predictor = predictor;
1133 }
1134
1135 d = (combined_probability * probability
1136 + (REG_BR_PROB_BASE - combined_probability)
1137 * (REG_BR_PROB_BASE - probability));
1138
1139 /* Use FP math to avoid overflows of 32bit integers. */
1140 if (d == 0)
1141 /* If one probability is 0% and one 100%, avoid division by zero. */
1142 combined_probability = REG_BR_PROB_BASE / 2;
1143 else
1144 combined_probability = (((double) combined_probability)
1145 * probability
1146 * REG_BR_PROB_BASE / d + 0.5);
1147 }
1148 }
1149
1150 /* Decide which heuristic to use. In case we didn't match anything,
1151 use no_prediction heuristic, in case we did match, use either
1152 first match or Dempster-Shaffer theory depending on the flags. */
1153
1154 if (best_predictor != END_PREDICTORS)
1155 first_match = true;
1156
1157 if (!found)
1158 dump_prediction (dump_file, PRED_NO_PREDICTION, combined_probability, bb);
1159 else
1160 {
1161 if (!first_match)
1162 dump_prediction (dump_file, PRED_DS_THEORY, combined_probability, bb,
1163 !first_match ? REASON_NONE : REASON_IGNORED);
1164 else
1165 dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability, bb,
1166 first_match ? REASON_NONE : REASON_IGNORED);
1167 }
1168
1169 if (first_match)
1170 combined_probability = best_probability;
1171 dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb);
1172
1173 if (preds)
1174 {
1175 for (pred = (struct edge_prediction *) *preds; pred; pred = pred->ep_next)
1176 {
1177 enum br_predictor predictor = pred->ep_predictor;
1178 int probability = pred->ep_probability;
1179
1180 dump_prediction (dump_file, predictor, probability, bb,
1181 (!first_match || best_predictor == predictor)
1182 ? REASON_NONE : REASON_IGNORED, pred->ep_edge);
1183 }
1184 }
1185 clear_bb_predictions (bb);
1186
1187 if (!bb->count && !dry_run)
1188 {
1189 first->probability = combined_probability;
1190 second->probability = REG_BR_PROB_BASE - combined_probability;
1191 }
1192 }
1193
1194 /* Check if T1 and T2 satisfy the IV_COMPARE condition.
1195 Return the SSA_NAME if the condition satisfies, NULL otherwise.
1196
1197 T1 and T2 should be one of the following cases:
1198 1. T1 is SSA_NAME, T2 is NULL
1199 2. T1 is SSA_NAME, T2 is INTEGER_CST between [-4, 4]
1200 3. T2 is SSA_NAME, T1 is INTEGER_CST between [-4, 4] */
1201
1202 static tree
1203 strips_small_constant (tree t1, tree t2)
1204 {
1205 tree ret = NULL;
1206 int value = 0;
1207
1208 if (!t1)
1209 return NULL;
1210 else if (TREE_CODE (t1) == SSA_NAME)
1211 ret = t1;
1212 else if (tree_fits_shwi_p (t1))
1213 value = tree_to_shwi (t1);
1214 else
1215 return NULL;
1216
1217 if (!t2)
1218 return ret;
1219 else if (tree_fits_shwi_p (t2))
1220 value = tree_to_shwi (t2);
1221 else if (TREE_CODE (t2) == SSA_NAME)
1222 {
1223 if (ret)
1224 return NULL;
1225 else
1226 ret = t2;
1227 }
1228
1229 if (value <= 4 && value >= -4)
1230 return ret;
1231 else
1232 return NULL;
1233 }
1234
1235 /* Return the SSA_NAME in T or T's operands.
1236 Return NULL if SSA_NAME cannot be found. */
1237
1238 static tree
1239 get_base_value (tree t)
1240 {
1241 if (TREE_CODE (t) == SSA_NAME)
1242 return t;
1243
1244 if (!BINARY_CLASS_P (t))
1245 return NULL;
1246
1247 switch (TREE_OPERAND_LENGTH (t))
1248 {
1249 case 1:
1250 return strips_small_constant (TREE_OPERAND (t, 0), NULL);
1251 case 2:
1252 return strips_small_constant (TREE_OPERAND (t, 0),
1253 TREE_OPERAND (t, 1));
1254 default:
1255 return NULL;
1256 }
1257 }
1258
1259 /* Check the compare STMT in LOOP. If it compares an induction
1260 variable to a loop invariant, return true, and save
1261 LOOP_INVARIANT, COMPARE_CODE and LOOP_STEP.
1262 Otherwise return false and set LOOP_INVAIANT to NULL. */
1263
1264 static bool
1265 is_comparison_with_loop_invariant_p (gcond *stmt, struct loop *loop,
1266 tree *loop_invariant,
1267 enum tree_code *compare_code,
1268 tree *loop_step,
1269 tree *loop_iv_base)
1270 {
1271 tree op0, op1, bound, base;
1272 affine_iv iv0, iv1;
1273 enum tree_code code;
1274 tree step;
1275
1276 code = gimple_cond_code (stmt);
1277 *loop_invariant = NULL;
1278
1279 switch (code)
1280 {
1281 case GT_EXPR:
1282 case GE_EXPR:
1283 case NE_EXPR:
1284 case LT_EXPR:
1285 case LE_EXPR:
1286 case EQ_EXPR:
1287 break;
1288
1289 default:
1290 return false;
1291 }
1292
1293 op0 = gimple_cond_lhs (stmt);
1294 op1 = gimple_cond_rhs (stmt);
1295
1296 if ((TREE_CODE (op0) != SSA_NAME && TREE_CODE (op0) != INTEGER_CST)
1297 || (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op1) != INTEGER_CST))
1298 return false;
1299 if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, true))
1300 return false;
1301 if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, true))
1302 return false;
1303 if (TREE_CODE (iv0.step) != INTEGER_CST
1304 || TREE_CODE (iv1.step) != INTEGER_CST)
1305 return false;
1306 if ((integer_zerop (iv0.step) && integer_zerop (iv1.step))
1307 || (!integer_zerop (iv0.step) && !integer_zerop (iv1.step)))
1308 return false;
1309
1310 if (integer_zerop (iv0.step))
1311 {
1312 if (code != NE_EXPR && code != EQ_EXPR)
1313 code = invert_tree_comparison (code, false);
1314 bound = iv0.base;
1315 base = iv1.base;
1316 if (tree_fits_shwi_p (iv1.step))
1317 step = iv1.step;
1318 else
1319 return false;
1320 }
1321 else
1322 {
1323 bound = iv1.base;
1324 base = iv0.base;
1325 if (tree_fits_shwi_p (iv0.step))
1326 step = iv0.step;
1327 else
1328 return false;
1329 }
1330
1331 if (TREE_CODE (bound) != INTEGER_CST)
1332 bound = get_base_value (bound);
1333 if (!bound)
1334 return false;
1335 if (TREE_CODE (base) != INTEGER_CST)
1336 base = get_base_value (base);
1337 if (!base)
1338 return false;
1339
1340 *loop_invariant = bound;
1341 *compare_code = code;
1342 *loop_step = step;
1343 *loop_iv_base = base;
1344 return true;
1345 }
1346
1347 /* Compare two SSA_NAMEs: returns TRUE if T1 and T2 are value coherent. */
1348
1349 static bool
1350 expr_coherent_p (tree t1, tree t2)
1351 {
1352 gimple *stmt;
1353 tree ssa_name_1 = NULL;
1354 tree ssa_name_2 = NULL;
1355
1356 gcc_assert (TREE_CODE (t1) == SSA_NAME || TREE_CODE (t1) == INTEGER_CST);
1357 gcc_assert (TREE_CODE (t2) == SSA_NAME || TREE_CODE (t2) == INTEGER_CST);
1358
1359 if (t1 == t2)
1360 return true;
1361
1362 if (TREE_CODE (t1) == INTEGER_CST && TREE_CODE (t2) == INTEGER_CST)
1363 return true;
1364 if (TREE_CODE (t1) == INTEGER_CST || TREE_CODE (t2) == INTEGER_CST)
1365 return false;
1366
1367 /* Check to see if t1 is expressed/defined with t2. */
1368 stmt = SSA_NAME_DEF_STMT (t1);
1369 gcc_assert (stmt != NULL);
1370 if (is_gimple_assign (stmt))
1371 {
1372 ssa_name_1 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1373 if (ssa_name_1 && ssa_name_1 == t2)
1374 return true;
1375 }
1376
1377 /* Check to see if t2 is expressed/defined with t1. */
1378 stmt = SSA_NAME_DEF_STMT (t2);
1379 gcc_assert (stmt != NULL);
1380 if (is_gimple_assign (stmt))
1381 {
1382 ssa_name_2 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1383 if (ssa_name_2 && ssa_name_2 == t1)
1384 return true;
1385 }
1386
1387 /* Compare if t1 and t2's def_stmts are identical. */
1388 if (ssa_name_2 != NULL && ssa_name_1 == ssa_name_2)
1389 return true;
1390 else
1391 return false;
1392 }
1393
1394 /* Return true if E is predicted by one of loop heuristics. */
1395
1396 static bool
1397 predicted_by_loop_heuristics_p (basic_block bb)
1398 {
1399 struct edge_prediction *i;
1400 edge_prediction **preds = bb_predictions->get (bb);
1401
1402 if (!preds)
1403 return false;
1404
1405 for (i = *preds; i; i = i->ep_next)
1406 if (i->ep_predictor == PRED_LOOP_ITERATIONS_GUESSED
1407 || i->ep_predictor == PRED_LOOP_ITERATIONS_MAX
1408 || i->ep_predictor == PRED_LOOP_ITERATIONS
1409 || i->ep_predictor == PRED_LOOP_EXIT
1410 || i->ep_predictor == PRED_LOOP_EXTRA_EXIT)
1411 return true;
1412 return false;
1413 }
1414
1415 /* Predict branch probability of BB when BB contains a branch that compares
1416 an induction variable in LOOP with LOOP_IV_BASE_VAR to LOOP_BOUND_VAR. The
1417 loop exit is compared using LOOP_BOUND_CODE, with step of LOOP_BOUND_STEP.
1418
1419 E.g.
1420 for (int i = 0; i < bound; i++) {
1421 if (i < bound - 2)
1422 computation_1();
1423 else
1424 computation_2();
1425 }
1426
1427 In this loop, we will predict the branch inside the loop to be taken. */
1428
1429 static void
1430 predict_iv_comparison (struct loop *loop, basic_block bb,
1431 tree loop_bound_var,
1432 tree loop_iv_base_var,
1433 enum tree_code loop_bound_code,
1434 int loop_bound_step)
1435 {
1436 gimple *stmt;
1437 tree compare_var, compare_base;
1438 enum tree_code compare_code;
1439 tree compare_step_var;
1440 edge then_edge;
1441 edge_iterator ei;
1442
1443 if (predicted_by_loop_heuristics_p (bb))
1444 return;
1445
1446 stmt = last_stmt (bb);
1447 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
1448 return;
1449 if (!is_comparison_with_loop_invariant_p (as_a <gcond *> (stmt),
1450 loop, &compare_var,
1451 &compare_code,
1452 &compare_step_var,
1453 &compare_base))
1454 return;
1455
1456 /* Find the taken edge. */
1457 FOR_EACH_EDGE (then_edge, ei, bb->succs)
1458 if (then_edge->flags & EDGE_TRUE_VALUE)
1459 break;
1460
1461 /* When comparing an IV to a loop invariant, NE is more likely to be
1462 taken while EQ is more likely to be not-taken. */
1463 if (compare_code == NE_EXPR)
1464 {
1465 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1466 return;
1467 }
1468 else if (compare_code == EQ_EXPR)
1469 {
1470 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1471 return;
1472 }
1473
1474 if (!expr_coherent_p (loop_iv_base_var, compare_base))
1475 return;
1476
1477 /* If loop bound, base and compare bound are all constants, we can
1478 calculate the probability directly. */
1479 if (tree_fits_shwi_p (loop_bound_var)
1480 && tree_fits_shwi_p (compare_var)
1481 && tree_fits_shwi_p (compare_base))
1482 {
1483 int probability;
1484 bool overflow, overall_overflow = false;
1485 widest_int compare_count, tem;
1486
1487 /* (loop_bound - base) / compare_step */
1488 tem = wi::sub (wi::to_widest (loop_bound_var),
1489 wi::to_widest (compare_base), SIGNED, &overflow);
1490 overall_overflow |= overflow;
1491 widest_int loop_count = wi::div_trunc (tem,
1492 wi::to_widest (compare_step_var),
1493 SIGNED, &overflow);
1494 overall_overflow |= overflow;
1495
1496 if (!wi::neg_p (wi::to_widest (compare_step_var))
1497 ^ (compare_code == LT_EXPR || compare_code == LE_EXPR))
1498 {
1499 /* (loop_bound - compare_bound) / compare_step */
1500 tem = wi::sub (wi::to_widest (loop_bound_var),
1501 wi::to_widest (compare_var), SIGNED, &overflow);
1502 overall_overflow |= overflow;
1503 compare_count = wi::div_trunc (tem, wi::to_widest (compare_step_var),
1504 SIGNED, &overflow);
1505 overall_overflow |= overflow;
1506 }
1507 else
1508 {
1509 /* (compare_bound - base) / compare_step */
1510 tem = wi::sub (wi::to_widest (compare_var),
1511 wi::to_widest (compare_base), SIGNED, &overflow);
1512 overall_overflow |= overflow;
1513 compare_count = wi::div_trunc (tem, wi::to_widest (compare_step_var),
1514 SIGNED, &overflow);
1515 overall_overflow |= overflow;
1516 }
1517 if (compare_code == LE_EXPR || compare_code == GE_EXPR)
1518 ++compare_count;
1519 if (loop_bound_code == LE_EXPR || loop_bound_code == GE_EXPR)
1520 ++loop_count;
1521 if (wi::neg_p (compare_count))
1522 compare_count = 0;
1523 if (wi::neg_p (loop_count))
1524 loop_count = 0;
1525 if (loop_count == 0)
1526 probability = 0;
1527 else if (wi::cmps (compare_count, loop_count) == 1)
1528 probability = REG_BR_PROB_BASE;
1529 else
1530 {
1531 tem = compare_count * REG_BR_PROB_BASE;
1532 tem = wi::udiv_trunc (tem, loop_count);
1533 probability = tem.to_uhwi ();
1534 }
1535
1536 /* FIXME: The branch prediction seems broken. It has only 20% hitrate. */
1537 if (!overall_overflow)
1538 predict_edge (then_edge, PRED_LOOP_IV_COMPARE, probability);
1539
1540 return;
1541 }
1542
1543 if (expr_coherent_p (loop_bound_var, compare_var))
1544 {
1545 if ((loop_bound_code == LT_EXPR || loop_bound_code == LE_EXPR)
1546 && (compare_code == LT_EXPR || compare_code == LE_EXPR))
1547 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1548 else if ((loop_bound_code == GT_EXPR || loop_bound_code == GE_EXPR)
1549 && (compare_code == GT_EXPR || compare_code == GE_EXPR))
1550 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1551 else if (loop_bound_code == NE_EXPR)
1552 {
1553 /* If the loop backedge condition is "(i != bound)", we do
1554 the comparison based on the step of IV:
1555 * step < 0 : backedge condition is like (i > bound)
1556 * step > 0 : backedge condition is like (i < bound) */
1557 gcc_assert (loop_bound_step != 0);
1558 if (loop_bound_step > 0
1559 && (compare_code == LT_EXPR
1560 || compare_code == LE_EXPR))
1561 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1562 else if (loop_bound_step < 0
1563 && (compare_code == GT_EXPR
1564 || compare_code == GE_EXPR))
1565 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1566 else
1567 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1568 }
1569 else
1570 /* The branch is predicted not-taken if loop_bound_code is
1571 opposite with compare_code. */
1572 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1573 }
1574 else if (expr_coherent_p (loop_iv_base_var, compare_var))
1575 {
1576 /* For cases like:
1577 for (i = s; i < h; i++)
1578 if (i > s + 2) ....
1579 The branch should be predicted taken. */
1580 if (loop_bound_step > 0
1581 && (compare_code == GT_EXPR || compare_code == GE_EXPR))
1582 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1583 else if (loop_bound_step < 0
1584 && (compare_code == LT_EXPR || compare_code == LE_EXPR))
1585 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1586 else
1587 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1588 }
1589 }
1590
1591 /* Predict for extra loop exits that will lead to EXIT_EDGE. The extra loop
1592 exits are resulted from short-circuit conditions that will generate an
1593 if_tmp. E.g.:
1594
1595 if (foo() || global > 10)
1596 break;
1597
1598 This will be translated into:
1599
1600 BB3:
1601 loop header...
1602 BB4:
1603 if foo() goto BB6 else goto BB5
1604 BB5:
1605 if global > 10 goto BB6 else goto BB7
1606 BB6:
1607 goto BB7
1608 BB7:
1609 iftmp = (PHI 0(BB5), 1(BB6))
1610 if iftmp == 1 goto BB8 else goto BB3
1611 BB8:
1612 outside of the loop...
1613
1614 The edge BB7->BB8 is loop exit because BB8 is outside of the loop.
1615 From the dataflow, we can infer that BB4->BB6 and BB5->BB6 are also loop
1616 exits. This function takes BB7->BB8 as input, and finds out the extra loop
1617 exits to predict them using PRED_LOOP_EXTRA_EXIT. */
1618
1619 static void
1620 predict_extra_loop_exits (edge exit_edge)
1621 {
1622 unsigned i;
1623 bool check_value_one;
1624 gimple *lhs_def_stmt;
1625 gphi *phi_stmt;
1626 tree cmp_rhs, cmp_lhs;
1627 gimple *last;
1628 gcond *cmp_stmt;
1629
1630 last = last_stmt (exit_edge->src);
1631 if (!last)
1632 return;
1633 cmp_stmt = dyn_cast <gcond *> (last);
1634 if (!cmp_stmt)
1635 return;
1636
1637 cmp_rhs = gimple_cond_rhs (cmp_stmt);
1638 cmp_lhs = gimple_cond_lhs (cmp_stmt);
1639 if (!TREE_CONSTANT (cmp_rhs)
1640 || !(integer_zerop (cmp_rhs) || integer_onep (cmp_rhs)))
1641 return;
1642 if (TREE_CODE (cmp_lhs) != SSA_NAME)
1643 return;
1644
1645 /* If check_value_one is true, only the phi_args with value '1' will lead
1646 to loop exit. Otherwise, only the phi_args with value '0' will lead to
1647 loop exit. */
1648 check_value_one = (((integer_onep (cmp_rhs))
1649 ^ (gimple_cond_code (cmp_stmt) == EQ_EXPR))
1650 ^ ((exit_edge->flags & EDGE_TRUE_VALUE) != 0));
1651
1652 lhs_def_stmt = SSA_NAME_DEF_STMT (cmp_lhs);
1653 if (!lhs_def_stmt)
1654 return;
1655
1656 phi_stmt = dyn_cast <gphi *> (lhs_def_stmt);
1657 if (!phi_stmt)
1658 return;
1659
1660 for (i = 0; i < gimple_phi_num_args (phi_stmt); i++)
1661 {
1662 edge e1;
1663 edge_iterator ei;
1664 tree val = gimple_phi_arg_def (phi_stmt, i);
1665 edge e = gimple_phi_arg_edge (phi_stmt, i);
1666
1667 if (!TREE_CONSTANT (val) || !(integer_zerop (val) || integer_onep (val)))
1668 continue;
1669 if ((check_value_one ^ integer_onep (val)) == 1)
1670 continue;
1671 if (EDGE_COUNT (e->src->succs) != 1)
1672 {
1673 predict_paths_leading_to_edge (e, PRED_LOOP_EXTRA_EXIT, NOT_TAKEN);
1674 continue;
1675 }
1676
1677 FOR_EACH_EDGE (e1, ei, e->src->preds)
1678 predict_paths_leading_to_edge (e1, PRED_LOOP_EXTRA_EXIT, NOT_TAKEN);
1679 }
1680 }
1681
1682
1683 /* Predict edge probabilities by exploiting loop structure. */
1684
1685 static void
1686 predict_loops (void)
1687 {
1688 struct loop *loop;
1689
1690 /* Try to predict out blocks in a loop that are not part of a
1691 natural loop. */
1692 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1693 {
1694 basic_block bb, *bbs;
1695 unsigned j, n_exits = 0;
1696 vec<edge> exits;
1697 struct tree_niter_desc niter_desc;
1698 edge ex;
1699 struct nb_iter_bound *nb_iter;
1700 enum tree_code loop_bound_code = ERROR_MARK;
1701 tree loop_bound_step = NULL;
1702 tree loop_bound_var = NULL;
1703 tree loop_iv_base = NULL;
1704 gcond *stmt = NULL;
1705
1706 exits = get_loop_exit_edges (loop);
1707 FOR_EACH_VEC_ELT (exits, j, ex)
1708 if (!(ex->flags & (EDGE_EH | EDGE_ABNORMAL_CALL | EDGE_FAKE)))
1709 n_exits ++;
1710 if (!n_exits)
1711 {
1712 exits.release ();
1713 continue;
1714 }
1715
1716 FOR_EACH_VEC_ELT (exits, j, ex)
1717 {
1718 tree niter = NULL;
1719 HOST_WIDE_INT nitercst;
1720 int max = PARAM_VALUE (PARAM_MAX_PREDICTED_ITERATIONS);
1721 int probability;
1722 enum br_predictor predictor;
1723 widest_int nit;
1724
1725 if (ex->flags & (EDGE_EH | EDGE_ABNORMAL_CALL | EDGE_FAKE))
1726 continue;
1727 /* Loop heuristics do not expect exit conditional to be inside
1728 inner loop. We predict from innermost to outermost loop. */
1729 if (predicted_by_loop_heuristics_p (ex->src))
1730 continue;
1731 predict_extra_loop_exits (ex);
1732
1733 if (number_of_iterations_exit (loop, ex, &niter_desc, false, false))
1734 niter = niter_desc.niter;
1735 if (!niter || TREE_CODE (niter_desc.niter) != INTEGER_CST)
1736 niter = loop_niter_by_eval (loop, ex);
1737
1738 if (TREE_CODE (niter) == INTEGER_CST)
1739 {
1740 if (tree_fits_uhwi_p (niter)
1741 && max
1742 && compare_tree_int (niter, max - 1) == -1)
1743 nitercst = tree_to_uhwi (niter) + 1;
1744 else
1745 nitercst = max;
1746 predictor = PRED_LOOP_ITERATIONS;
1747 }
1748 /* If we have just one exit and we can derive some information about
1749 the number of iterations of the loop from the statements inside
1750 the loop, use it to predict this exit. */
1751 else if (n_exits == 1
1752 && estimated_stmt_executions (loop, &nit))
1753 {
1754 if (wi::gtu_p (nit, max))
1755 nitercst = max;
1756 else
1757 nitercst = nit.to_shwi ();
1758 predictor = PRED_LOOP_ITERATIONS_GUESSED;
1759 }
1760 /* If we have likely upper bound, trust it for very small iteration
1761 counts. Such loops would otherwise get mispredicted by standard
1762 LOOP_EXIT heuristics. */
1763 else if (n_exits == 1
1764 && likely_max_stmt_executions (loop, &nit)
1765 && wi::ltu_p (nit,
1766 RDIV (REG_BR_PROB_BASE,
1767 REG_BR_PROB_BASE
1768 - predictor_info
1769 [PRED_LOOP_EXIT].hitrate)))
1770 {
1771 nitercst = nit.to_shwi ();
1772 predictor = PRED_LOOP_ITERATIONS_MAX;
1773 }
1774 else
1775 continue;
1776
1777 /* If the prediction for number of iterations is zero, do not
1778 predict the exit edges. */
1779 if (nitercst == 0)
1780 continue;
1781
1782 probability = RDIV (REG_BR_PROB_BASE, nitercst);
1783 predict_edge (ex, predictor, probability);
1784 }
1785 exits.release ();
1786
1787 /* Find information about loop bound variables. */
1788 for (nb_iter = loop->bounds; nb_iter;
1789 nb_iter = nb_iter->next)
1790 if (nb_iter->stmt
1791 && gimple_code (nb_iter->stmt) == GIMPLE_COND)
1792 {
1793 stmt = as_a <gcond *> (nb_iter->stmt);
1794 break;
1795 }
1796 if (!stmt && last_stmt (loop->header)
1797 && gimple_code (last_stmt (loop->header)) == GIMPLE_COND)
1798 stmt = as_a <gcond *> (last_stmt (loop->header));
1799 if (stmt)
1800 is_comparison_with_loop_invariant_p (stmt, loop,
1801 &loop_bound_var,
1802 &loop_bound_code,
1803 &loop_bound_step,
1804 &loop_iv_base);
1805
1806 bbs = get_loop_body (loop);
1807
1808 for (j = 0; j < loop->num_nodes; j++)
1809 {
1810 int header_found = 0;
1811 edge e;
1812 edge_iterator ei;
1813
1814 bb = bbs[j];
1815
1816 /* Bypass loop heuristics on continue statement. These
1817 statements construct loops via "non-loop" constructs
1818 in the source language and are better to be handled
1819 separately. */
1820 if (predicted_by_p (bb, PRED_CONTINUE))
1821 continue;
1822
1823 /* Loop exit heuristics - predict an edge exiting the loop if the
1824 conditional has no loop header successors as not taken. */
1825 if (!header_found
1826 /* If we already used more reliable loop exit predictors, do not
1827 bother with PRED_LOOP_EXIT. */
1828 && !predicted_by_loop_heuristics_p (bb))
1829 {
1830 /* For loop with many exits we don't want to predict all exits
1831 with the pretty large probability, because if all exits are
1832 considered in row, the loop would be predicted to iterate
1833 almost never. The code to divide probability by number of
1834 exits is very rough. It should compute the number of exits
1835 taken in each patch through function (not the overall number
1836 of exits that might be a lot higher for loops with wide switch
1837 statements in them) and compute n-th square root.
1838
1839 We limit the minimal probability by 2% to avoid
1840 EDGE_PROBABILITY_RELIABLE from trusting the branch prediction
1841 as this was causing regression in perl benchmark containing such
1842 a wide loop. */
1843
1844 int probability = ((REG_BR_PROB_BASE
1845 - predictor_info [(int) PRED_LOOP_EXIT].hitrate)
1846 / n_exits);
1847 if (probability < HITRATE (2))
1848 probability = HITRATE (2);
1849 FOR_EACH_EDGE (e, ei, bb->succs)
1850 if (e->dest->index < NUM_FIXED_BLOCKS
1851 || !flow_bb_inside_loop_p (loop, e->dest))
1852 predict_edge (e, PRED_LOOP_EXIT, probability);
1853 }
1854 if (loop_bound_var)
1855 predict_iv_comparison (loop, bb, loop_bound_var, loop_iv_base,
1856 loop_bound_code,
1857 tree_to_shwi (loop_bound_step));
1858 }
1859
1860 /* In the following code
1861 for (loop1)
1862 if (cond)
1863 for (loop2)
1864 body;
1865 guess that cond is unlikely. */
1866 if (loop_outer (loop)->num)
1867 {
1868 basic_block bb = NULL;
1869 edge preheader_edge = loop_preheader_edge (loop);
1870
1871 if (single_pred_p (preheader_edge->src)
1872 && single_succ_p (preheader_edge->src))
1873 preheader_edge = single_pred_edge (preheader_edge->src);
1874
1875 gimple *stmt = last_stmt (preheader_edge->src);
1876 /* Pattern match fortran loop preheader:
1877 _16 = BUILTIN_EXPECT (_15, 1, PRED_FORTRAN_LOOP_PREHEADER);
1878 _17 = (logical(kind=4)) _16;
1879 if (_17 != 0)
1880 goto <bb 11>;
1881 else
1882 goto <bb 13>;
1883
1884 Loop guard branch prediction says nothing about duplicated loop
1885 headers produced by fortran frontend and in this case we want
1886 to predict paths leading to this preheader. */
1887
1888 if (stmt
1889 && gimple_code (stmt) == GIMPLE_COND
1890 && gimple_cond_code (stmt) == NE_EXPR
1891 && TREE_CODE (gimple_cond_lhs (stmt)) == SSA_NAME
1892 && integer_zerop (gimple_cond_rhs (stmt)))
1893 {
1894 gimple *call_stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
1895 if (gimple_code (call_stmt) == GIMPLE_ASSIGN
1896 && gimple_expr_code (call_stmt) == NOP_EXPR
1897 && TREE_CODE (gimple_assign_rhs1 (call_stmt)) == SSA_NAME)
1898 call_stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (call_stmt));
1899 if (gimple_code (call_stmt) == GIMPLE_CALL
1900 && gimple_call_internal_p (call_stmt)
1901 && gimple_call_internal_fn (call_stmt) == IFN_BUILTIN_EXPECT
1902 && TREE_CODE (gimple_call_arg (call_stmt, 2)) == INTEGER_CST
1903 && tree_fits_uhwi_p (gimple_call_arg (call_stmt, 2))
1904 && tree_to_uhwi (gimple_call_arg (call_stmt, 2))
1905 == PRED_FORTRAN_LOOP_PREHEADER)
1906 bb = preheader_edge->src;
1907 }
1908 if (!bb)
1909 {
1910 if (!dominated_by_p (CDI_DOMINATORS,
1911 loop_outer (loop)->latch, loop->header))
1912 predict_paths_leading_to_edge (loop_preheader_edge (loop),
1913 PRED_LOOP_GUARD,
1914 NOT_TAKEN,
1915 loop_outer (loop));
1916 }
1917 else
1918 {
1919 if (!dominated_by_p (CDI_DOMINATORS,
1920 loop_outer (loop)->latch, bb))
1921 predict_paths_leading_to (bb,
1922 PRED_LOOP_GUARD,
1923 NOT_TAKEN,
1924 loop_outer (loop));
1925 }
1926 }
1927
1928 /* Free basic blocks from get_loop_body. */
1929 free (bbs);
1930 }
1931 }
1932
1933 /* Attempt to predict probabilities of BB outgoing edges using local
1934 properties. */
1935 static void
1936 bb_estimate_probability_locally (basic_block bb)
1937 {
1938 rtx_insn *last_insn = BB_END (bb);
1939 rtx cond;
1940
1941 if (! can_predict_insn_p (last_insn))
1942 return;
1943 cond = get_condition (last_insn, NULL, false, false);
1944 if (! cond)
1945 return;
1946
1947 /* Try "pointer heuristic."
1948 A comparison ptr == 0 is predicted as false.
1949 Similarly, a comparison ptr1 == ptr2 is predicted as false. */
1950 if (COMPARISON_P (cond)
1951 && ((REG_P (XEXP (cond, 0)) && REG_POINTER (XEXP (cond, 0)))
1952 || (REG_P (XEXP (cond, 1)) && REG_POINTER (XEXP (cond, 1)))))
1953 {
1954 if (GET_CODE (cond) == EQ)
1955 predict_insn_def (last_insn, PRED_POINTER, NOT_TAKEN);
1956 else if (GET_CODE (cond) == NE)
1957 predict_insn_def (last_insn, PRED_POINTER, TAKEN);
1958 }
1959 else
1960
1961 /* Try "opcode heuristic."
1962 EQ tests are usually false and NE tests are usually true. Also,
1963 most quantities are positive, so we can make the appropriate guesses
1964 about signed comparisons against zero. */
1965 switch (GET_CODE (cond))
1966 {
1967 case CONST_INT:
1968 /* Unconditional branch. */
1969 predict_insn_def (last_insn, PRED_UNCONDITIONAL,
1970 cond == const0_rtx ? NOT_TAKEN : TAKEN);
1971 break;
1972
1973 case EQ:
1974 case UNEQ:
1975 /* Floating point comparisons appears to behave in a very
1976 unpredictable way because of special role of = tests in
1977 FP code. */
1978 if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
1979 ;
1980 /* Comparisons with 0 are often used for booleans and there is
1981 nothing useful to predict about them. */
1982 else if (XEXP (cond, 1) == const0_rtx
1983 || XEXP (cond, 0) == const0_rtx)
1984 ;
1985 else
1986 predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, NOT_TAKEN);
1987 break;
1988
1989 case NE:
1990 case LTGT:
1991 /* Floating point comparisons appears to behave in a very
1992 unpredictable way because of special role of = tests in
1993 FP code. */
1994 if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
1995 ;
1996 /* Comparisons with 0 are often used for booleans and there is
1997 nothing useful to predict about them. */
1998 else if (XEXP (cond, 1) == const0_rtx
1999 || XEXP (cond, 0) == const0_rtx)
2000 ;
2001 else
2002 predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, TAKEN);
2003 break;
2004
2005 case ORDERED:
2006 predict_insn_def (last_insn, PRED_FPOPCODE, TAKEN);
2007 break;
2008
2009 case UNORDERED:
2010 predict_insn_def (last_insn, PRED_FPOPCODE, NOT_TAKEN);
2011 break;
2012
2013 case LE:
2014 case LT:
2015 if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
2016 || XEXP (cond, 1) == constm1_rtx)
2017 predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, NOT_TAKEN);
2018 break;
2019
2020 case GE:
2021 case GT:
2022 if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
2023 || XEXP (cond, 1) == constm1_rtx)
2024 predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, TAKEN);
2025 break;
2026
2027 default:
2028 break;
2029 }
2030 }
2031
2032 /* Set edge->probability for each successor edge of BB. */
2033 void
2034 guess_outgoing_edge_probabilities (basic_block bb)
2035 {
2036 bb_estimate_probability_locally (bb);
2037 combine_predictions_for_insn (BB_END (bb), bb);
2038 }
2039 \f
2040 static tree expr_expected_value (tree, bitmap, enum br_predictor *predictor);
2041
2042 /* Helper function for expr_expected_value. */
2043
2044 static tree
2045 expr_expected_value_1 (tree type, tree op0, enum tree_code code,
2046 tree op1, bitmap visited, enum br_predictor *predictor)
2047 {
2048 gimple *def;
2049
2050 if (predictor)
2051 *predictor = PRED_UNCONDITIONAL;
2052
2053 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
2054 {
2055 if (TREE_CONSTANT (op0))
2056 return op0;
2057
2058 if (code != SSA_NAME)
2059 return NULL_TREE;
2060
2061 def = SSA_NAME_DEF_STMT (op0);
2062
2063 /* If we were already here, break the infinite cycle. */
2064 if (!bitmap_set_bit (visited, SSA_NAME_VERSION (op0)))
2065 return NULL;
2066
2067 if (gimple_code (def) == GIMPLE_PHI)
2068 {
2069 /* All the arguments of the PHI node must have the same constant
2070 length. */
2071 int i, n = gimple_phi_num_args (def);
2072 tree val = NULL, new_val;
2073
2074 for (i = 0; i < n; i++)
2075 {
2076 tree arg = PHI_ARG_DEF (def, i);
2077 enum br_predictor predictor2;
2078
2079 /* If this PHI has itself as an argument, we cannot
2080 determine the string length of this argument. However,
2081 if we can find an expected constant value for the other
2082 PHI args then we can still be sure that this is
2083 likely a constant. So be optimistic and just
2084 continue with the next argument. */
2085 if (arg == PHI_RESULT (def))
2086 continue;
2087
2088 new_val = expr_expected_value (arg, visited, &predictor2);
2089
2090 /* It is difficult to combine value predictors. Simply assume
2091 that later predictor is weaker and take its prediction. */
2092 if (predictor && *predictor < predictor2)
2093 *predictor = predictor2;
2094 if (!new_val)
2095 return NULL;
2096 if (!val)
2097 val = new_val;
2098 else if (!operand_equal_p (val, new_val, false))
2099 return NULL;
2100 }
2101 return val;
2102 }
2103 if (is_gimple_assign (def))
2104 {
2105 if (gimple_assign_lhs (def) != op0)
2106 return NULL;
2107
2108 return expr_expected_value_1 (TREE_TYPE (gimple_assign_lhs (def)),
2109 gimple_assign_rhs1 (def),
2110 gimple_assign_rhs_code (def),
2111 gimple_assign_rhs2 (def),
2112 visited, predictor);
2113 }
2114
2115 if (is_gimple_call (def))
2116 {
2117 tree decl = gimple_call_fndecl (def);
2118 if (!decl)
2119 {
2120 if (gimple_call_internal_p (def)
2121 && gimple_call_internal_fn (def) == IFN_BUILTIN_EXPECT)
2122 {
2123 gcc_assert (gimple_call_num_args (def) == 3);
2124 tree val = gimple_call_arg (def, 0);
2125 if (TREE_CONSTANT (val))
2126 return val;
2127 if (predictor)
2128 {
2129 tree val2 = gimple_call_arg (def, 2);
2130 gcc_assert (TREE_CODE (val2) == INTEGER_CST
2131 && tree_fits_uhwi_p (val2)
2132 && tree_to_uhwi (val2) < END_PREDICTORS);
2133 *predictor = (enum br_predictor) tree_to_uhwi (val2);
2134 }
2135 return gimple_call_arg (def, 1);
2136 }
2137 return NULL;
2138 }
2139 if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
2140 switch (DECL_FUNCTION_CODE (decl))
2141 {
2142 case BUILT_IN_EXPECT:
2143 {
2144 tree val;
2145 if (gimple_call_num_args (def) != 2)
2146 return NULL;
2147 val = gimple_call_arg (def, 0);
2148 if (TREE_CONSTANT (val))
2149 return val;
2150 if (predictor)
2151 *predictor = PRED_BUILTIN_EXPECT;
2152 return gimple_call_arg (def, 1);
2153 }
2154
2155 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N:
2156 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1:
2157 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2:
2158 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4:
2159 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8:
2160 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16:
2161 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE:
2162 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N:
2163 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1:
2164 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2:
2165 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4:
2166 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8:
2167 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16:
2168 /* Assume that any given atomic operation has low contention,
2169 and thus the compare-and-swap operation succeeds. */
2170 if (predictor)
2171 *predictor = PRED_COMPARE_AND_SWAP;
2172 return boolean_true_node;
2173 default:
2174 break;
2175 }
2176 }
2177
2178 return NULL;
2179 }
2180
2181 if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
2182 {
2183 tree res;
2184 enum br_predictor predictor2;
2185 op0 = expr_expected_value (op0, visited, predictor);
2186 if (!op0)
2187 return NULL;
2188 op1 = expr_expected_value (op1, visited, &predictor2);
2189 if (predictor && *predictor < predictor2)
2190 *predictor = predictor2;
2191 if (!op1)
2192 return NULL;
2193 res = fold_build2 (code, type, op0, op1);
2194 if (TREE_CONSTANT (res))
2195 return res;
2196 return NULL;
2197 }
2198 if (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS)
2199 {
2200 tree res;
2201 op0 = expr_expected_value (op0, visited, predictor);
2202 if (!op0)
2203 return NULL;
2204 res = fold_build1 (code, type, op0);
2205 if (TREE_CONSTANT (res))
2206 return res;
2207 return NULL;
2208 }
2209 return NULL;
2210 }
2211
2212 /* Return constant EXPR will likely have at execution time, NULL if unknown.
2213 The function is used by builtin_expect branch predictor so the evidence
2214 must come from this construct and additional possible constant folding.
2215
2216 We may want to implement more involved value guess (such as value range
2217 propagation based prediction), but such tricks shall go to new
2218 implementation. */
2219
2220 static tree
2221 expr_expected_value (tree expr, bitmap visited,
2222 enum br_predictor *predictor)
2223 {
2224 enum tree_code code;
2225 tree op0, op1;
2226
2227 if (TREE_CONSTANT (expr))
2228 {
2229 if (predictor)
2230 *predictor = PRED_UNCONDITIONAL;
2231 return expr;
2232 }
2233
2234 extract_ops_from_tree (expr, &code, &op0, &op1);
2235 return expr_expected_value_1 (TREE_TYPE (expr),
2236 op0, code, op1, visited, predictor);
2237 }
2238 \f
2239 /* Predict using opcode of the last statement in basic block. */
2240 static void
2241 tree_predict_by_opcode (basic_block bb)
2242 {
2243 gimple *stmt = last_stmt (bb);
2244 edge then_edge;
2245 tree op0, op1;
2246 tree type;
2247 tree val;
2248 enum tree_code cmp;
2249 bitmap visited;
2250 edge_iterator ei;
2251 enum br_predictor predictor;
2252
2253 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
2254 return;
2255 FOR_EACH_EDGE (then_edge, ei, bb->succs)
2256 if (then_edge->flags & EDGE_TRUE_VALUE)
2257 break;
2258 op0 = gimple_cond_lhs (stmt);
2259 op1 = gimple_cond_rhs (stmt);
2260 cmp = gimple_cond_code (stmt);
2261 type = TREE_TYPE (op0);
2262 visited = BITMAP_ALLOC (NULL);
2263 val = expr_expected_value_1 (boolean_type_node, op0, cmp, op1, visited,
2264 &predictor);
2265 BITMAP_FREE (visited);
2266 if (val && TREE_CODE (val) == INTEGER_CST)
2267 {
2268 if (predictor == PRED_BUILTIN_EXPECT)
2269 {
2270 int percent = PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY);
2271
2272 gcc_assert (percent >= 0 && percent <= 100);
2273 if (integer_zerop (val))
2274 percent = 100 - percent;
2275 predict_edge (then_edge, PRED_BUILTIN_EXPECT, HITRATE (percent));
2276 }
2277 else
2278 predict_edge_def (then_edge, predictor,
2279 integer_zerop (val) ? NOT_TAKEN : TAKEN);
2280 }
2281 /* Try "pointer heuristic."
2282 A comparison ptr == 0 is predicted as false.
2283 Similarly, a comparison ptr1 == ptr2 is predicted as false. */
2284 if (POINTER_TYPE_P (type))
2285 {
2286 if (cmp == EQ_EXPR)
2287 predict_edge_def (then_edge, PRED_TREE_POINTER, NOT_TAKEN);
2288 else if (cmp == NE_EXPR)
2289 predict_edge_def (then_edge, PRED_TREE_POINTER, TAKEN);
2290 }
2291 else
2292
2293 /* Try "opcode heuristic."
2294 EQ tests are usually false and NE tests are usually true. Also,
2295 most quantities are positive, so we can make the appropriate guesses
2296 about signed comparisons against zero. */
2297 switch (cmp)
2298 {
2299 case EQ_EXPR:
2300 case UNEQ_EXPR:
2301 /* Floating point comparisons appears to behave in a very
2302 unpredictable way because of special role of = tests in
2303 FP code. */
2304 if (FLOAT_TYPE_P (type))
2305 ;
2306 /* Comparisons with 0 are often used for booleans and there is
2307 nothing useful to predict about them. */
2308 else if (integer_zerop (op0) || integer_zerop (op1))
2309 ;
2310 else
2311 predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, NOT_TAKEN);
2312 break;
2313
2314 case NE_EXPR:
2315 case LTGT_EXPR:
2316 /* Floating point comparisons appears to behave in a very
2317 unpredictable way because of special role of = tests in
2318 FP code. */
2319 if (FLOAT_TYPE_P (type))
2320 ;
2321 /* Comparisons with 0 are often used for booleans and there is
2322 nothing useful to predict about them. */
2323 else if (integer_zerop (op0)
2324 || integer_zerop (op1))
2325 ;
2326 else
2327 predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, TAKEN);
2328 break;
2329
2330 case ORDERED_EXPR:
2331 predict_edge_def (then_edge, PRED_TREE_FPOPCODE, TAKEN);
2332 break;
2333
2334 case UNORDERED_EXPR:
2335 predict_edge_def (then_edge, PRED_TREE_FPOPCODE, NOT_TAKEN);
2336 break;
2337
2338 case LE_EXPR:
2339 case LT_EXPR:
2340 if (integer_zerop (op1)
2341 || integer_onep (op1)
2342 || integer_all_onesp (op1)
2343 || real_zerop (op1)
2344 || real_onep (op1)
2345 || real_minus_onep (op1))
2346 predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, NOT_TAKEN);
2347 break;
2348
2349 case GE_EXPR:
2350 case GT_EXPR:
2351 if (integer_zerop (op1)
2352 || integer_onep (op1)
2353 || integer_all_onesp (op1)
2354 || real_zerop (op1)
2355 || real_onep (op1)
2356 || real_minus_onep (op1))
2357 predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, TAKEN);
2358 break;
2359
2360 default:
2361 break;
2362 }
2363 }
2364
2365 /* Try to guess whether the value of return means error code. */
2366
2367 static enum br_predictor
2368 return_prediction (tree val, enum prediction *prediction)
2369 {
2370 /* VOID. */
2371 if (!val)
2372 return PRED_NO_PREDICTION;
2373 /* Different heuristics for pointers and scalars. */
2374 if (POINTER_TYPE_P (TREE_TYPE (val)))
2375 {
2376 /* NULL is usually not returned. */
2377 if (integer_zerop (val))
2378 {
2379 *prediction = NOT_TAKEN;
2380 return PRED_NULL_RETURN;
2381 }
2382 }
2383 else if (INTEGRAL_TYPE_P (TREE_TYPE (val)))
2384 {
2385 /* Negative return values are often used to indicate
2386 errors. */
2387 if (TREE_CODE (val) == INTEGER_CST
2388 && tree_int_cst_sgn (val) < 0)
2389 {
2390 *prediction = NOT_TAKEN;
2391 return PRED_NEGATIVE_RETURN;
2392 }
2393 /* Constant return values seems to be commonly taken.
2394 Zero/one often represent booleans so exclude them from the
2395 heuristics. */
2396 if (TREE_CONSTANT (val)
2397 && (!integer_zerop (val) && !integer_onep (val)))
2398 {
2399 *prediction = NOT_TAKEN;
2400 return PRED_CONST_RETURN;
2401 }
2402 }
2403 return PRED_NO_PREDICTION;
2404 }
2405
2406 /* Find the basic block with return expression and look up for possible
2407 return value trying to apply RETURN_PREDICTION heuristics. */
2408 static void
2409 apply_return_prediction (void)
2410 {
2411 greturn *return_stmt = NULL;
2412 tree return_val;
2413 edge e;
2414 gphi *phi;
2415 int phi_num_args, i;
2416 enum br_predictor pred;
2417 enum prediction direction;
2418 edge_iterator ei;
2419
2420 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
2421 {
2422 gimple *last = last_stmt (e->src);
2423 if (last
2424 && gimple_code (last) == GIMPLE_RETURN)
2425 {
2426 return_stmt = as_a <greturn *> (last);
2427 break;
2428 }
2429 }
2430 if (!e)
2431 return;
2432 return_val = gimple_return_retval (return_stmt);
2433 if (!return_val)
2434 return;
2435 if (TREE_CODE (return_val) != SSA_NAME
2436 || !SSA_NAME_DEF_STMT (return_val)
2437 || gimple_code (SSA_NAME_DEF_STMT (return_val)) != GIMPLE_PHI)
2438 return;
2439 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (return_val));
2440 phi_num_args = gimple_phi_num_args (phi);
2441 pred = return_prediction (PHI_ARG_DEF (phi, 0), &direction);
2442
2443 /* Avoid the degenerate case where all return values form the function
2444 belongs to same category (ie they are all positive constants)
2445 so we can hardly say something about them. */
2446 for (i = 1; i < phi_num_args; i++)
2447 if (pred != return_prediction (PHI_ARG_DEF (phi, i), &direction))
2448 break;
2449 if (i != phi_num_args)
2450 for (i = 0; i < phi_num_args; i++)
2451 {
2452 pred = return_prediction (PHI_ARG_DEF (phi, i), &direction);
2453 if (pred != PRED_NO_PREDICTION)
2454 predict_paths_leading_to_edge (gimple_phi_arg_edge (phi, i), pred,
2455 direction);
2456 }
2457 }
2458
2459 /* Look for basic block that contains unlikely to happen events
2460 (such as noreturn calls) and mark all paths leading to execution
2461 of this basic blocks as unlikely. */
2462
2463 static void
2464 tree_bb_level_predictions (void)
2465 {
2466 basic_block bb;
2467 bool has_return_edges = false;
2468 edge e;
2469 edge_iterator ei;
2470
2471 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
2472 if (!(e->flags & (EDGE_ABNORMAL | EDGE_FAKE | EDGE_EH)))
2473 {
2474 has_return_edges = true;
2475 break;
2476 }
2477
2478 apply_return_prediction ();
2479
2480 FOR_EACH_BB_FN (bb, cfun)
2481 {
2482 gimple_stmt_iterator gsi;
2483
2484 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2485 {
2486 gimple *stmt = gsi_stmt (gsi);
2487 tree decl;
2488
2489 if (is_gimple_call (stmt))
2490 {
2491 if ((gimple_call_flags (stmt) & ECF_NORETURN)
2492 && has_return_edges)
2493 predict_paths_leading_to (bb, PRED_NORETURN,
2494 NOT_TAKEN);
2495 decl = gimple_call_fndecl (stmt);
2496 if (decl
2497 && lookup_attribute ("cold",
2498 DECL_ATTRIBUTES (decl)))
2499 predict_paths_leading_to (bb, PRED_COLD_FUNCTION,
2500 NOT_TAKEN);
2501 if (decl && recursive_call_p (current_function_decl, decl))
2502 predict_paths_leading_to (bb, PRED_RECURSIVE_CALL,
2503 NOT_TAKEN);
2504 }
2505 else if (gimple_code (stmt) == GIMPLE_PREDICT)
2506 {
2507 predict_paths_leading_to (bb, gimple_predict_predictor (stmt),
2508 gimple_predict_outcome (stmt));
2509 /* Keep GIMPLE_PREDICT around so early inlining will propagate
2510 hints to callers. */
2511 }
2512 }
2513 }
2514 }
2515
2516 /* Callback for hash_map::traverse, asserts that the pointer map is
2517 empty. */
2518
2519 bool
2520 assert_is_empty (const_basic_block const &, edge_prediction *const &value,
2521 void *)
2522 {
2523 gcc_assert (!value);
2524 return false;
2525 }
2526
2527 /* Predict branch probabilities and estimate profile for basic block BB. */
2528
2529 static void
2530 tree_estimate_probability_bb (basic_block bb)
2531 {
2532 edge e;
2533 edge_iterator ei;
2534 gimple *last;
2535
2536 FOR_EACH_EDGE (e, ei, bb->succs)
2537 {
2538 /* Predict edges to user labels with attributes. */
2539 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2540 {
2541 gimple_stmt_iterator gi;
2542 for (gi = gsi_start_bb (e->dest); !gsi_end_p (gi); gsi_next (&gi))
2543 {
2544 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gi));
2545 tree decl;
2546
2547 if (!label_stmt)
2548 break;
2549 decl = gimple_label_label (label_stmt);
2550 if (DECL_ARTIFICIAL (decl))
2551 continue;
2552
2553 /* Finally, we have a user-defined label. */
2554 if (lookup_attribute ("cold", DECL_ATTRIBUTES (decl)))
2555 predict_edge_def (e, PRED_COLD_LABEL, NOT_TAKEN);
2556 else if (lookup_attribute ("hot", DECL_ATTRIBUTES (decl)))
2557 predict_edge_def (e, PRED_HOT_LABEL, TAKEN);
2558 }
2559 }
2560
2561 /* Predict early returns to be probable, as we've already taken
2562 care for error returns and other cases are often used for
2563 fast paths through function.
2564
2565 Since we've already removed the return statements, we are
2566 looking for CFG like:
2567
2568 if (conditional)
2569 {
2570 ..
2571 goto return_block
2572 }
2573 some other blocks
2574 return_block:
2575 return_stmt. */
2576 if (e->dest != bb->next_bb
2577 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
2578 && single_succ_p (e->dest)
2579 && single_succ_edge (e->dest)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2580 && (last = last_stmt (e->dest)) != NULL
2581 && gimple_code (last) == GIMPLE_RETURN)
2582 {
2583 edge e1;
2584 edge_iterator ei1;
2585
2586 if (single_succ_p (bb))
2587 {
2588 FOR_EACH_EDGE (e1, ei1, bb->preds)
2589 if (!predicted_by_p (e1->src, PRED_NULL_RETURN)
2590 && !predicted_by_p (e1->src, PRED_CONST_RETURN)
2591 && !predicted_by_p (e1->src, PRED_NEGATIVE_RETURN))
2592 predict_edge_def (e1, PRED_TREE_EARLY_RETURN, NOT_TAKEN);
2593 }
2594 else
2595 if (!predicted_by_p (e->src, PRED_NULL_RETURN)
2596 && !predicted_by_p (e->src, PRED_CONST_RETURN)
2597 && !predicted_by_p (e->src, PRED_NEGATIVE_RETURN))
2598 predict_edge_def (e, PRED_TREE_EARLY_RETURN, NOT_TAKEN);
2599 }
2600
2601 /* Look for block we are guarding (ie we dominate it,
2602 but it doesn't postdominate us). */
2603 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && e->dest != bb
2604 && dominated_by_p (CDI_DOMINATORS, e->dest, e->src)
2605 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest))
2606 {
2607 gimple_stmt_iterator bi;
2608
2609 /* The call heuristic claims that a guarded function call
2610 is improbable. This is because such calls are often used
2611 to signal exceptional situations such as printing error
2612 messages. */
2613 for (bi = gsi_start_bb (e->dest); !gsi_end_p (bi);
2614 gsi_next (&bi))
2615 {
2616 gimple *stmt = gsi_stmt (bi);
2617 if (is_gimple_call (stmt)
2618 && !gimple_inexpensive_call_p (as_a <gcall *> (stmt))
2619 /* Constant and pure calls are hardly used to signalize
2620 something exceptional. */
2621 && gimple_has_side_effects (stmt))
2622 {
2623 predict_edge_def (e, PRED_CALL, NOT_TAKEN);
2624 break;
2625 }
2626 }
2627 }
2628 }
2629 tree_predict_by_opcode (bb);
2630 }
2631
2632 /* Predict branch probabilities and estimate profile of the tree CFG.
2633 This function can be called from the loop optimizers to recompute
2634 the profile information.
2635 If DRY_RUN is set, do not modify CFG and only produce dump files. */
2636
2637 void
2638 tree_estimate_probability (bool dry_run)
2639 {
2640 basic_block bb;
2641
2642 add_noreturn_fake_exit_edges ();
2643 connect_infinite_loops_to_exit ();
2644 /* We use loop_niter_by_eval, which requires that the loops have
2645 preheaders. */
2646 create_preheaders (CP_SIMPLE_PREHEADERS);
2647 calculate_dominance_info (CDI_POST_DOMINATORS);
2648
2649 bb_predictions = new hash_map<const_basic_block, edge_prediction *>;
2650 tree_bb_level_predictions ();
2651 record_loop_exits ();
2652
2653 if (number_of_loops (cfun) > 1)
2654 predict_loops ();
2655
2656 FOR_EACH_BB_FN (bb, cfun)
2657 tree_estimate_probability_bb (bb);
2658
2659 FOR_EACH_BB_FN (bb, cfun)
2660 combine_predictions_for_bb (bb, dry_run);
2661
2662 if (flag_checking)
2663 bb_predictions->traverse<void *, assert_is_empty> (NULL);
2664
2665 delete bb_predictions;
2666 bb_predictions = NULL;
2667
2668 if (!dry_run)
2669 estimate_bb_frequencies (false);
2670 free_dominance_info (CDI_POST_DOMINATORS);
2671 remove_fake_exit_edges ();
2672 }
2673 \f
2674 /* Predict edges to successors of CUR whose sources are not postdominated by
2675 BB by PRED and recurse to all postdominators. */
2676
2677 static void
2678 predict_paths_for_bb (basic_block cur, basic_block bb,
2679 enum br_predictor pred,
2680 enum prediction taken,
2681 bitmap visited, struct loop *in_loop = NULL)
2682 {
2683 edge e;
2684 edge_iterator ei;
2685 basic_block son;
2686
2687 /* If we exited the loop or CUR is unconditional in the loop, there is
2688 nothing to do. */
2689 if (in_loop
2690 && (!flow_bb_inside_loop_p (in_loop, cur)
2691 || dominated_by_p (CDI_DOMINATORS, in_loop->latch, cur)))
2692 return;
2693
2694 /* We are looking for all edges forming edge cut induced by
2695 set of all blocks postdominated by BB. */
2696 FOR_EACH_EDGE (e, ei, cur->preds)
2697 if (e->src->index >= NUM_FIXED_BLOCKS
2698 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, bb))
2699 {
2700 edge e2;
2701 edge_iterator ei2;
2702 bool found = false;
2703
2704 /* Ignore fake edges and eh, we predict them as not taken anyway. */
2705 if (e->flags & (EDGE_EH | EDGE_FAKE))
2706 continue;
2707 gcc_assert (bb == cur || dominated_by_p (CDI_POST_DOMINATORS, cur, bb));
2708
2709 /* See if there is an edge from e->src that is not abnormal
2710 and does not lead to BB and does not exit the loop. */
2711 FOR_EACH_EDGE (e2, ei2, e->src->succs)
2712 if (e2 != e
2713 && !(e2->flags & (EDGE_EH | EDGE_FAKE))
2714 && !dominated_by_p (CDI_POST_DOMINATORS, e2->dest, bb)
2715 && (!in_loop || !loop_exit_edge_p (in_loop, e2)))
2716 {
2717 found = true;
2718 break;
2719 }
2720
2721 /* If there is non-abnormal path leaving e->src, predict edge
2722 using predictor. Otherwise we need to look for paths
2723 leading to e->src.
2724
2725 The second may lead to infinite loop in the case we are predicitng
2726 regions that are only reachable by abnormal edges. We simply
2727 prevent visiting given BB twice. */
2728 if (found)
2729 {
2730 if (!edge_predicted_by_p (e, pred, taken))
2731 predict_edge_def (e, pred, taken);
2732 }
2733 else if (bitmap_set_bit (visited, e->src->index))
2734 predict_paths_for_bb (e->src, e->src, pred, taken, visited, in_loop);
2735 }
2736 for (son = first_dom_son (CDI_POST_DOMINATORS, cur);
2737 son;
2738 son = next_dom_son (CDI_POST_DOMINATORS, son))
2739 predict_paths_for_bb (son, bb, pred, taken, visited, in_loop);
2740 }
2741
2742 /* Sets branch probabilities according to PREDiction and
2743 FLAGS. */
2744
2745 static void
2746 predict_paths_leading_to (basic_block bb, enum br_predictor pred,
2747 enum prediction taken, struct loop *in_loop)
2748 {
2749 bitmap visited = BITMAP_ALLOC (NULL);
2750 predict_paths_for_bb (bb, bb, pred, taken, visited, in_loop);
2751 BITMAP_FREE (visited);
2752 }
2753
2754 /* Like predict_paths_leading_to but take edge instead of basic block. */
2755
2756 static void
2757 predict_paths_leading_to_edge (edge e, enum br_predictor pred,
2758 enum prediction taken, struct loop *in_loop)
2759 {
2760 bool has_nonloop_edge = false;
2761 edge_iterator ei;
2762 edge e2;
2763
2764 basic_block bb = e->src;
2765 FOR_EACH_EDGE (e2, ei, bb->succs)
2766 if (e2->dest != e->src && e2->dest != e->dest
2767 && !(e->flags & (EDGE_EH | EDGE_FAKE))
2768 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e2->dest))
2769 {
2770 has_nonloop_edge = true;
2771 break;
2772 }
2773 if (!has_nonloop_edge)
2774 {
2775 bitmap visited = BITMAP_ALLOC (NULL);
2776 predict_paths_for_bb (bb, bb, pred, taken, visited, in_loop);
2777 BITMAP_FREE (visited);
2778 }
2779 else
2780 predict_edge_def (e, pred, taken);
2781 }
2782 \f
2783 /* This is used to carry information about basic blocks. It is
2784 attached to the AUX field of the standard CFG block. */
2785
2786 struct block_info
2787 {
2788 /* Estimated frequency of execution of basic_block. */
2789 sreal frequency;
2790
2791 /* To keep queue of basic blocks to process. */
2792 basic_block next;
2793
2794 /* Number of predecessors we need to visit first. */
2795 int npredecessors;
2796 };
2797
2798 /* Similar information for edges. */
2799 struct edge_prob_info
2800 {
2801 /* In case edge is a loopback edge, the probability edge will be reached
2802 in case header is. Estimated number of iterations of the loop can be
2803 then computed as 1 / (1 - back_edge_prob). */
2804 sreal back_edge_prob;
2805 /* True if the edge is a loopback edge in the natural loop. */
2806 unsigned int back_edge:1;
2807 };
2808
2809 #define BLOCK_INFO(B) ((block_info *) (B)->aux)
2810 #undef EDGE_INFO
2811 #define EDGE_INFO(E) ((edge_prob_info *) (E)->aux)
2812
2813 /* Helper function for estimate_bb_frequencies.
2814 Propagate the frequencies in blocks marked in
2815 TOVISIT, starting in HEAD. */
2816
2817 static void
2818 propagate_freq (basic_block head, bitmap tovisit)
2819 {
2820 basic_block bb;
2821 basic_block last;
2822 unsigned i;
2823 edge e;
2824 basic_block nextbb;
2825 bitmap_iterator bi;
2826
2827 /* For each basic block we need to visit count number of his predecessors
2828 we need to visit first. */
2829 EXECUTE_IF_SET_IN_BITMAP (tovisit, 0, i, bi)
2830 {
2831 edge_iterator ei;
2832 int count = 0;
2833
2834 bb = BASIC_BLOCK_FOR_FN (cfun, i);
2835
2836 FOR_EACH_EDGE (e, ei, bb->preds)
2837 {
2838 bool visit = bitmap_bit_p (tovisit, e->src->index);
2839
2840 if (visit && !(e->flags & EDGE_DFS_BACK))
2841 count++;
2842 else if (visit && dump_file && !EDGE_INFO (e)->back_edge)
2843 fprintf (dump_file,
2844 "Irreducible region hit, ignoring edge to %i->%i\n",
2845 e->src->index, bb->index);
2846 }
2847 BLOCK_INFO (bb)->npredecessors = count;
2848 /* When function never returns, we will never process exit block. */
2849 if (!count && bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2850 bb->count = bb->frequency = 0;
2851 }
2852
2853 BLOCK_INFO (head)->frequency = 1;
2854 last = head;
2855 for (bb = head; bb; bb = nextbb)
2856 {
2857 edge_iterator ei;
2858 sreal cyclic_probability = 0;
2859 sreal frequency = 0;
2860
2861 nextbb = BLOCK_INFO (bb)->next;
2862 BLOCK_INFO (bb)->next = NULL;
2863
2864 /* Compute frequency of basic block. */
2865 if (bb != head)
2866 {
2867 if (flag_checking)
2868 FOR_EACH_EDGE (e, ei, bb->preds)
2869 gcc_assert (!bitmap_bit_p (tovisit, e->src->index)
2870 || (e->flags & EDGE_DFS_BACK));
2871
2872 FOR_EACH_EDGE (e, ei, bb->preds)
2873 if (EDGE_INFO (e)->back_edge)
2874 {
2875 cyclic_probability += EDGE_INFO (e)->back_edge_prob;
2876 }
2877 else if (!(e->flags & EDGE_DFS_BACK))
2878 {
2879 /* frequency += (e->probability
2880 * BLOCK_INFO (e->src)->frequency /
2881 REG_BR_PROB_BASE); */
2882
2883 sreal tmp = e->probability;
2884 tmp *= BLOCK_INFO (e->src)->frequency;
2885 tmp *= real_inv_br_prob_base;
2886 frequency += tmp;
2887 }
2888
2889 if (cyclic_probability == 0)
2890 {
2891 BLOCK_INFO (bb)->frequency = frequency;
2892 }
2893 else
2894 {
2895 if (cyclic_probability > real_almost_one)
2896 cyclic_probability = real_almost_one;
2897
2898 /* BLOCK_INFO (bb)->frequency = frequency
2899 / (1 - cyclic_probability) */
2900
2901 cyclic_probability = sreal (1) - cyclic_probability;
2902 BLOCK_INFO (bb)->frequency = frequency / cyclic_probability;
2903 }
2904 }
2905
2906 bitmap_clear_bit (tovisit, bb->index);
2907
2908 e = find_edge (bb, head);
2909 if (e)
2910 {
2911 /* EDGE_INFO (e)->back_edge_prob
2912 = ((e->probability * BLOCK_INFO (bb)->frequency)
2913 / REG_BR_PROB_BASE); */
2914
2915 sreal tmp = e->probability;
2916 tmp *= BLOCK_INFO (bb)->frequency;
2917 EDGE_INFO (e)->back_edge_prob = tmp * real_inv_br_prob_base;
2918 }
2919
2920 /* Propagate to successor blocks. */
2921 FOR_EACH_EDGE (e, ei, bb->succs)
2922 if (!(e->flags & EDGE_DFS_BACK)
2923 && BLOCK_INFO (e->dest)->npredecessors)
2924 {
2925 BLOCK_INFO (e->dest)->npredecessors--;
2926 if (!BLOCK_INFO (e->dest)->npredecessors)
2927 {
2928 if (!nextbb)
2929 nextbb = e->dest;
2930 else
2931 BLOCK_INFO (last)->next = e->dest;
2932
2933 last = e->dest;
2934 }
2935 }
2936 }
2937 }
2938
2939 /* Estimate frequencies in loops at same nest level. */
2940
2941 static void
2942 estimate_loops_at_level (struct loop *first_loop)
2943 {
2944 struct loop *loop;
2945
2946 for (loop = first_loop; loop; loop = loop->next)
2947 {
2948 edge e;
2949 basic_block *bbs;
2950 unsigned i;
2951 bitmap tovisit = BITMAP_ALLOC (NULL);
2952
2953 estimate_loops_at_level (loop->inner);
2954
2955 /* Find current loop back edge and mark it. */
2956 e = loop_latch_edge (loop);
2957 EDGE_INFO (e)->back_edge = 1;
2958
2959 bbs = get_loop_body (loop);
2960 for (i = 0; i < loop->num_nodes; i++)
2961 bitmap_set_bit (tovisit, bbs[i]->index);
2962 free (bbs);
2963 propagate_freq (loop->header, tovisit);
2964 BITMAP_FREE (tovisit);
2965 }
2966 }
2967
2968 /* Propagates frequencies through structure of loops. */
2969
2970 static void
2971 estimate_loops (void)
2972 {
2973 bitmap tovisit = BITMAP_ALLOC (NULL);
2974 basic_block bb;
2975
2976 /* Start by estimating the frequencies in the loops. */
2977 if (number_of_loops (cfun) > 1)
2978 estimate_loops_at_level (current_loops->tree_root->inner);
2979
2980 /* Now propagate the frequencies through all the blocks. */
2981 FOR_ALL_BB_FN (bb, cfun)
2982 {
2983 bitmap_set_bit (tovisit, bb->index);
2984 }
2985 propagate_freq (ENTRY_BLOCK_PTR_FOR_FN (cfun), tovisit);
2986 BITMAP_FREE (tovisit);
2987 }
2988
2989 /* Drop the profile for NODE to guessed, and update its frequency based on
2990 whether it is expected to be hot given the CALL_COUNT. */
2991
2992 static void
2993 drop_profile (struct cgraph_node *node, gcov_type call_count)
2994 {
2995 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
2996 /* In the case where this was called by another function with a
2997 dropped profile, call_count will be 0. Since there are no
2998 non-zero call counts to this function, we don't know for sure
2999 whether it is hot, and therefore it will be marked normal below. */
3000 bool hot = maybe_hot_count_p (NULL, call_count);
3001
3002 if (dump_file)
3003 fprintf (dump_file,
3004 "Dropping 0 profile for %s/%i. %s based on calls.\n",
3005 node->name (), node->order,
3006 hot ? "Function is hot" : "Function is normal");
3007 /* We only expect to miss profiles for functions that are reached
3008 via non-zero call edges in cases where the function may have
3009 been linked from another module or library (COMDATs and extern
3010 templates). See the comments below for handle_missing_profiles.
3011 Also, only warn in cases where the missing counts exceed the
3012 number of training runs. In certain cases with an execv followed
3013 by a no-return call the profile for the no-return call is not
3014 dumped and there can be a mismatch. */
3015 if (!DECL_COMDAT (node->decl) && !DECL_EXTERNAL (node->decl)
3016 && call_count > profile_info->runs)
3017 {
3018 if (flag_profile_correction)
3019 {
3020 if (dump_file)
3021 fprintf (dump_file,
3022 "Missing counts for called function %s/%i\n",
3023 node->name (), node->order);
3024 }
3025 else
3026 warning (0, "Missing counts for called function %s/%i",
3027 node->name (), node->order);
3028 }
3029
3030 profile_status_for_fn (fn)
3031 = (flag_guess_branch_prob ? PROFILE_GUESSED : PROFILE_ABSENT);
3032 node->frequency
3033 = hot ? NODE_FREQUENCY_HOT : NODE_FREQUENCY_NORMAL;
3034 }
3035
3036 /* In the case of COMDAT routines, multiple object files will contain the same
3037 function and the linker will select one for the binary. In that case
3038 all the other copies from the profile instrument binary will be missing
3039 profile counts. Look for cases where this happened, due to non-zero
3040 call counts going to 0-count functions, and drop the profile to guessed
3041 so that we can use the estimated probabilities and avoid optimizing only
3042 for size.
3043
3044 The other case where the profile may be missing is when the routine
3045 is not going to be emitted to the object file, e.g. for "extern template"
3046 class methods. Those will be marked DECL_EXTERNAL. Emit a warning in
3047 all other cases of non-zero calls to 0-count functions. */
3048
3049 void
3050 handle_missing_profiles (void)
3051 {
3052 struct cgraph_node *node;
3053 int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
3054 vec<struct cgraph_node *> worklist;
3055 worklist.create (64);
3056
3057 /* See if 0 count function has non-0 count callers. In this case we
3058 lost some profile. Drop its function profile to PROFILE_GUESSED. */
3059 FOR_EACH_DEFINED_FUNCTION (node)
3060 {
3061 struct cgraph_edge *e;
3062 gcov_type call_count = 0;
3063 gcov_type max_tp_first_run = 0;
3064 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
3065
3066 if (node->count)
3067 continue;
3068 for (e = node->callers; e; e = e->next_caller)
3069 {
3070 call_count += e->count;
3071
3072 if (e->caller->tp_first_run > max_tp_first_run)
3073 max_tp_first_run = e->caller->tp_first_run;
3074 }
3075
3076 /* If time profile is missing, let assign the maximum that comes from
3077 caller functions. */
3078 if (!node->tp_first_run && max_tp_first_run)
3079 node->tp_first_run = max_tp_first_run + 1;
3080
3081 if (call_count
3082 && fn && fn->cfg
3083 && (call_count * unlikely_count_fraction >= profile_info->runs))
3084 {
3085 drop_profile (node, call_count);
3086 worklist.safe_push (node);
3087 }
3088 }
3089
3090 /* Propagate the profile dropping to other 0-count COMDATs that are
3091 potentially called by COMDATs we already dropped the profile on. */
3092 while (worklist.length () > 0)
3093 {
3094 struct cgraph_edge *e;
3095
3096 node = worklist.pop ();
3097 for (e = node->callees; e; e = e->next_caller)
3098 {
3099 struct cgraph_node *callee = e->callee;
3100 struct function *fn = DECL_STRUCT_FUNCTION (callee->decl);
3101
3102 if (callee->count > 0)
3103 continue;
3104 if (DECL_COMDAT (callee->decl) && fn && fn->cfg
3105 && profile_status_for_fn (fn) == PROFILE_READ)
3106 {
3107 drop_profile (node, 0);
3108 worklist.safe_push (callee);
3109 }
3110 }
3111 }
3112 worklist.release ();
3113 }
3114
3115 /* Convert counts measured by profile driven feedback to frequencies.
3116 Return nonzero iff there was any nonzero execution count. */
3117
3118 int
3119 counts_to_freqs (void)
3120 {
3121 gcov_type count_max, true_count_max = 0;
3122 basic_block bb;
3123
3124 /* Don't overwrite the estimated frequencies when the profile for
3125 the function is missing. We may drop this function PROFILE_GUESSED
3126 later in drop_profile (). */
3127 if (!flag_auto_profile && !ENTRY_BLOCK_PTR_FOR_FN (cfun)->count)
3128 return 0;
3129
3130 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3131 true_count_max = MAX (bb->count, true_count_max);
3132
3133 count_max = MAX (true_count_max, 1);
3134 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3135 bb->frequency = (bb->count * BB_FREQ_MAX + count_max / 2) / count_max;
3136
3137 return true_count_max;
3138 }
3139
3140 /* Return true if function is likely to be expensive, so there is no point to
3141 optimize performance of prologue, epilogue or do inlining at the expense
3142 of code size growth. THRESHOLD is the limit of number of instructions
3143 function can execute at average to be still considered not expensive. */
3144
3145 bool
3146 expensive_function_p (int threshold)
3147 {
3148 unsigned int sum = 0;
3149 basic_block bb;
3150 unsigned int limit;
3151
3152 /* We can not compute accurately for large thresholds due to scaled
3153 frequencies. */
3154 gcc_assert (threshold <= BB_FREQ_MAX);
3155
3156 /* Frequencies are out of range. This either means that function contains
3157 internal loop executing more than BB_FREQ_MAX times or profile feedback
3158 is available and function has not been executed at all. */
3159 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency == 0)
3160 return true;
3161
3162 /* Maximally BB_FREQ_MAX^2 so overflow won't happen. */
3163 limit = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency * threshold;
3164 FOR_EACH_BB_FN (bb, cfun)
3165 {
3166 rtx_insn *insn;
3167
3168 FOR_BB_INSNS (bb, insn)
3169 if (active_insn_p (insn))
3170 {
3171 sum += bb->frequency;
3172 if (sum > limit)
3173 return true;
3174 }
3175 }
3176
3177 return false;
3178 }
3179
3180 /* Estimate and propagate basic block frequencies using the given branch
3181 probabilities. If FORCE is true, the frequencies are used to estimate
3182 the counts even when there are already non-zero profile counts. */
3183
3184 void
3185 estimate_bb_frequencies (bool force)
3186 {
3187 basic_block bb;
3188 sreal freq_max;
3189
3190 if (force || profile_status_for_fn (cfun) != PROFILE_READ || !counts_to_freqs ())
3191 {
3192 static int real_values_initialized = 0;
3193
3194 if (!real_values_initialized)
3195 {
3196 real_values_initialized = 1;
3197 real_br_prob_base = REG_BR_PROB_BASE;
3198 real_bb_freq_max = BB_FREQ_MAX;
3199 real_one_half = sreal (1, -1);
3200 real_inv_br_prob_base = sreal (1) / real_br_prob_base;
3201 real_almost_one = sreal (1) - real_inv_br_prob_base;
3202 }
3203
3204 mark_dfs_back_edges ();
3205
3206 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->probability =
3207 REG_BR_PROB_BASE;
3208
3209 /* Set up block info for each basic block. */
3210 alloc_aux_for_blocks (sizeof (block_info));
3211 alloc_aux_for_edges (sizeof (edge_prob_info));
3212 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3213 {
3214 edge e;
3215 edge_iterator ei;
3216
3217 FOR_EACH_EDGE (e, ei, bb->succs)
3218 {
3219 EDGE_INFO (e)->back_edge_prob = e->probability;
3220 EDGE_INFO (e)->back_edge_prob *= real_inv_br_prob_base;
3221 }
3222 }
3223
3224 /* First compute frequencies locally for each loop from innermost
3225 to outermost to examine frequencies for back edges. */
3226 estimate_loops ();
3227
3228 freq_max = 0;
3229 FOR_EACH_BB_FN (bb, cfun)
3230 if (freq_max < BLOCK_INFO (bb)->frequency)
3231 freq_max = BLOCK_INFO (bb)->frequency;
3232
3233 freq_max = real_bb_freq_max / freq_max;
3234 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3235 {
3236 sreal tmp = BLOCK_INFO (bb)->frequency * freq_max + real_one_half;
3237 bb->frequency = tmp.to_int ();
3238 }
3239
3240 free_aux_for_blocks ();
3241 free_aux_for_edges ();
3242 }
3243 compute_function_frequency ();
3244 }
3245
3246 /* Decide whether function is hot, cold or unlikely executed. */
3247 void
3248 compute_function_frequency (void)
3249 {
3250 basic_block bb;
3251 struct cgraph_node *node = cgraph_node::get (current_function_decl);
3252
3253 if (DECL_STATIC_CONSTRUCTOR (current_function_decl)
3254 || MAIN_NAME_P (DECL_NAME (current_function_decl)))
3255 node->only_called_at_startup = true;
3256 if (DECL_STATIC_DESTRUCTOR (current_function_decl))
3257 node->only_called_at_exit = true;
3258
3259 if (profile_status_for_fn (cfun) != PROFILE_READ)
3260 {
3261 int flags = flags_from_decl_or_type (current_function_decl);
3262 if (lookup_attribute ("cold", DECL_ATTRIBUTES (current_function_decl))
3263 != NULL)
3264 node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
3265 else if (lookup_attribute ("hot", DECL_ATTRIBUTES (current_function_decl))
3266 != NULL)
3267 node->frequency = NODE_FREQUENCY_HOT;
3268 else if (flags & ECF_NORETURN)
3269 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3270 else if (MAIN_NAME_P (DECL_NAME (current_function_decl)))
3271 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3272 else if (DECL_STATIC_CONSTRUCTOR (current_function_decl)
3273 || DECL_STATIC_DESTRUCTOR (current_function_decl))
3274 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3275 return;
3276 }
3277
3278 /* Only first time try to drop function into unlikely executed.
3279 After inlining the roundoff errors may confuse us.
3280 Ipa-profile pass will drop functions only called from unlikely
3281 functions to unlikely and that is most of what we care about. */
3282 if (!cfun->after_inlining)
3283 node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
3284 FOR_EACH_BB_FN (bb, cfun)
3285 {
3286 if (maybe_hot_bb_p (cfun, bb))
3287 {
3288 node->frequency = NODE_FREQUENCY_HOT;
3289 return;
3290 }
3291 if (!probably_never_executed_bb_p (cfun, bb))
3292 node->frequency = NODE_FREQUENCY_NORMAL;
3293 }
3294 }
3295
3296 /* Build PREDICT_EXPR. */
3297 tree
3298 build_predict_expr (enum br_predictor predictor, enum prediction taken)
3299 {
3300 tree t = build1 (PREDICT_EXPR, void_type_node,
3301 build_int_cst (integer_type_node, predictor));
3302 SET_PREDICT_EXPR_OUTCOME (t, taken);
3303 return t;
3304 }
3305
3306 const char *
3307 predictor_name (enum br_predictor predictor)
3308 {
3309 return predictor_info[predictor].name;
3310 }
3311
3312 /* Predict branch probabilities and estimate profile of the tree CFG. */
3313
3314 namespace {
3315
3316 const pass_data pass_data_profile =
3317 {
3318 GIMPLE_PASS, /* type */
3319 "profile_estimate", /* name */
3320 OPTGROUP_NONE, /* optinfo_flags */
3321 TV_BRANCH_PROB, /* tv_id */
3322 PROP_cfg, /* properties_required */
3323 0, /* properties_provided */
3324 0, /* properties_destroyed */
3325 0, /* todo_flags_start */
3326 0, /* todo_flags_finish */
3327 };
3328
3329 class pass_profile : public gimple_opt_pass
3330 {
3331 public:
3332 pass_profile (gcc::context *ctxt)
3333 : gimple_opt_pass (pass_data_profile, ctxt)
3334 {}
3335
3336 /* opt_pass methods: */
3337 virtual bool gate (function *) { return flag_guess_branch_prob; }
3338 virtual unsigned int execute (function *);
3339
3340 }; // class pass_profile
3341
3342 unsigned int
3343 pass_profile::execute (function *fun)
3344 {
3345 unsigned nb_loops;
3346
3347 if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
3348 return 0;
3349
3350 loop_optimizer_init (LOOPS_NORMAL);
3351 if (dump_file && (dump_flags & TDF_DETAILS))
3352 flow_loops_dump (dump_file, NULL, 0);
3353
3354 mark_irreducible_loops ();
3355
3356 nb_loops = number_of_loops (fun);
3357 if (nb_loops > 1)
3358 scev_initialize ();
3359
3360 tree_estimate_probability (false);
3361
3362 if (nb_loops > 1)
3363 scev_finalize ();
3364
3365 loop_optimizer_finalize ();
3366 if (dump_file && (dump_flags & TDF_DETAILS))
3367 gimple_dump_cfg (dump_file, dump_flags);
3368 if (profile_status_for_fn (fun) == PROFILE_ABSENT)
3369 profile_status_for_fn (fun) = PROFILE_GUESSED;
3370 return 0;
3371 }
3372
3373 } // anon namespace
3374
3375 gimple_opt_pass *
3376 make_pass_profile (gcc::context *ctxt)
3377 {
3378 return new pass_profile (ctxt);
3379 }
3380
3381 namespace {
3382
3383 const pass_data pass_data_strip_predict_hints =
3384 {
3385 GIMPLE_PASS, /* type */
3386 "*strip_predict_hints", /* name */
3387 OPTGROUP_NONE, /* optinfo_flags */
3388 TV_BRANCH_PROB, /* tv_id */
3389 PROP_cfg, /* properties_required */
3390 0, /* properties_provided */
3391 0, /* properties_destroyed */
3392 0, /* todo_flags_start */
3393 0, /* todo_flags_finish */
3394 };
3395
3396 class pass_strip_predict_hints : public gimple_opt_pass
3397 {
3398 public:
3399 pass_strip_predict_hints (gcc::context *ctxt)
3400 : gimple_opt_pass (pass_data_strip_predict_hints, ctxt)
3401 {}
3402
3403 /* opt_pass methods: */
3404 opt_pass * clone () { return new pass_strip_predict_hints (m_ctxt); }
3405 virtual unsigned int execute (function *);
3406
3407 }; // class pass_strip_predict_hints
3408
3409 /* Get rid of all builtin_expect calls and GIMPLE_PREDICT statements
3410 we no longer need. */
3411 unsigned int
3412 pass_strip_predict_hints::execute (function *fun)
3413 {
3414 basic_block bb;
3415 gimple *ass_stmt;
3416 tree var;
3417 bool changed = false;
3418
3419 FOR_EACH_BB_FN (bb, fun)
3420 {
3421 gimple_stmt_iterator bi;
3422 for (bi = gsi_start_bb (bb); !gsi_end_p (bi);)
3423 {
3424 gimple *stmt = gsi_stmt (bi);
3425
3426 if (gimple_code (stmt) == GIMPLE_PREDICT)
3427 {
3428 gsi_remove (&bi, true);
3429 changed = true;
3430 continue;
3431 }
3432 else if (is_gimple_call (stmt))
3433 {
3434 tree fndecl = gimple_call_fndecl (stmt);
3435
3436 if ((fndecl
3437 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
3438 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT
3439 && gimple_call_num_args (stmt) == 2)
3440 || (gimple_call_internal_p (stmt)
3441 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT))
3442 {
3443 var = gimple_call_lhs (stmt);
3444 changed = true;
3445 if (var)
3446 {
3447 ass_stmt
3448 = gimple_build_assign (var, gimple_call_arg (stmt, 0));
3449 gsi_replace (&bi, ass_stmt, true);
3450 }
3451 else
3452 {
3453 gsi_remove (&bi, true);
3454 continue;
3455 }
3456 }
3457 }
3458 gsi_next (&bi);
3459 }
3460 }
3461 return changed ? TODO_cleanup_cfg : 0;
3462 }
3463
3464 } // anon namespace
3465
3466 gimple_opt_pass *
3467 make_pass_strip_predict_hints (gcc::context *ctxt)
3468 {
3469 return new pass_strip_predict_hints (ctxt);
3470 }
3471
3472 /* Rebuild function frequencies. Passes are in general expected to
3473 maintain profile by hand, however in some cases this is not possible:
3474 for example when inlining several functions with loops freuqencies might run
3475 out of scale and thus needs to be recomputed. */
3476
3477 void
3478 rebuild_frequencies (void)
3479 {
3480 timevar_push (TV_REBUILD_FREQUENCIES);
3481
3482 /* When the max bb count in the function is small, there is a higher
3483 chance that there were truncation errors in the integer scaling
3484 of counts by inlining and other optimizations. This could lead
3485 to incorrect classification of code as being cold when it isn't.
3486 In that case, force the estimation of bb counts/frequencies from the
3487 branch probabilities, rather than computing frequencies from counts,
3488 which may also lead to frequencies incorrectly reduced to 0. There
3489 is less precision in the probabilities, so we only do this for small
3490 max counts. */
3491 gcov_type count_max = 0;
3492 basic_block bb;
3493 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3494 count_max = MAX (bb->count, count_max);
3495
3496 if (profile_status_for_fn (cfun) == PROFILE_GUESSED
3497 || (!flag_auto_profile && profile_status_for_fn (cfun) == PROFILE_READ
3498 && count_max < REG_BR_PROB_BASE/10))
3499 {
3500 loop_optimizer_init (0);
3501 add_noreturn_fake_exit_edges ();
3502 mark_irreducible_loops ();
3503 connect_infinite_loops_to_exit ();
3504 estimate_bb_frequencies (true);
3505 remove_fake_exit_edges ();
3506 loop_optimizer_finalize ();
3507 }
3508 else if (profile_status_for_fn (cfun) == PROFILE_READ)
3509 counts_to_freqs ();
3510 else
3511 gcc_unreachable ();
3512 timevar_pop (TV_REBUILD_FREQUENCIES);
3513 }
3514
3515 /* Perform a dry run of the branch prediction pass and report comparsion of
3516 the predicted and real profile into the dump file. */
3517
3518 void
3519 report_predictor_hitrates (void)
3520 {
3521 unsigned nb_loops;
3522
3523 loop_optimizer_init (LOOPS_NORMAL);
3524 if (dump_file && (dump_flags & TDF_DETAILS))
3525 flow_loops_dump (dump_file, NULL, 0);
3526
3527 mark_irreducible_loops ();
3528
3529 nb_loops = number_of_loops (cfun);
3530 if (nb_loops > 1)
3531 scev_initialize ();
3532
3533 tree_estimate_probability (true);
3534
3535 if (nb_loops > 1)
3536 scev_finalize ();
3537
3538 loop_optimizer_finalize ();
3539 }
3540
3541 /* Force edge E to be cold.
3542 If IMPOSSIBLE is true, for edge to have count and probability 0 otherwise
3543 keep low probability to represent possible error in a guess. This is used
3544 i.e. in case we predict loop to likely iterate given number of times but
3545 we are not 100% sure.
3546
3547 This function locally updates profile without attempt to keep global
3548 consistency which can not be reached in full generality without full profile
3549 rebuild from probabilities alone. Doing so is not necessarily a good idea
3550 because frequencies and counts may be more realistic then probabilities.
3551
3552 In some cases (such as for elimination of early exits during full loop
3553 unrolling) the caller can ensure that profile will get consistent
3554 afterwards. */
3555
3556 void
3557 force_edge_cold (edge e, bool impossible)
3558 {
3559 gcov_type count_sum = 0;
3560 int prob_sum = 0;
3561 edge_iterator ei;
3562 edge e2;
3563 gcov_type old_count = e->count;
3564 int old_probability = e->probability;
3565 gcov_type gcov_scale = REG_BR_PROB_BASE;
3566 int prob_scale = REG_BR_PROB_BASE;
3567
3568 /* If edge is already improbably or cold, just return. */
3569 if (e->probability <= impossible ? PROB_VERY_UNLIKELY : 0
3570 && (!impossible || !e->count))
3571 return;
3572 FOR_EACH_EDGE (e2, ei, e->src->succs)
3573 if (e2 != e)
3574 {
3575 count_sum += e2->count;
3576 prob_sum += e2->probability;
3577 }
3578
3579 /* If there are other edges out of e->src, redistribute probabilitity
3580 there. */
3581 if (prob_sum)
3582 {
3583 e->probability
3584 = MIN (e->probability, impossible ? 0 : PROB_VERY_UNLIKELY);
3585 if (old_probability)
3586 e->count = RDIV (e->count * e->probability, old_probability);
3587 else
3588 e->count = MIN (e->count, impossible ? 0 : 1);
3589
3590 if (count_sum)
3591 gcov_scale = RDIV ((count_sum + old_count - e->count) * REG_BR_PROB_BASE,
3592 count_sum);
3593 prob_scale = RDIV ((REG_BR_PROB_BASE - e->probability) * REG_BR_PROB_BASE,
3594 prob_sum);
3595 if (dump_file && (dump_flags & TDF_DETAILS))
3596 fprintf (dump_file, "Making edge %i->%i %s by redistributing "
3597 "probability to other edges.\n",
3598 e->src->index, e->dest->index,
3599 impossible ? "impossible" : "cold");
3600 FOR_EACH_EDGE (e2, ei, e->src->succs)
3601 if (e2 != e)
3602 {
3603 e2->count = RDIV (e2->count * gcov_scale, REG_BR_PROB_BASE);
3604 e2->probability = RDIV (e2->probability * prob_scale,
3605 REG_BR_PROB_BASE);
3606 }
3607 }
3608 /* If all edges out of e->src are unlikely, the basic block itself
3609 is unlikely. */
3610 else
3611 {
3612 e->probability = REG_BR_PROB_BASE;
3613
3614 /* If we did not adjusting, the source basic block has no likely edeges
3615 leaving other direction. In that case force that bb cold, too.
3616 This in general is difficult task to do, but handle special case when
3617 BB has only one predecestor. This is common case when we are updating
3618 after loop transforms. */
3619 if (!prob_sum && !count_sum && single_pred_p (e->src)
3620 && e->src->frequency > (impossible ? 0 : 1))
3621 {
3622 int old_frequency = e->src->frequency;
3623 if (dump_file && (dump_flags & TDF_DETAILS))
3624 fprintf (dump_file, "Making bb %i %s.\n", e->src->index,
3625 impossible ? "impossible" : "cold");
3626 e->src->frequency = MIN (e->src->frequency, impossible ? 0 : 1);
3627 e->src->count = e->count = RDIV (e->src->count * e->src->frequency,
3628 old_frequency);
3629 force_edge_cold (single_pred_edge (e->src), impossible);
3630 }
3631 else if (dump_file && (dump_flags & TDF_DETAILS)
3632 && maybe_hot_bb_p (cfun, e->src))
3633 fprintf (dump_file, "Giving up on making bb %i %s.\n", e->src->index,
3634 impossible ? "impossible" : "cold");
3635 }
3636 }