Factor unrelated declarations out of tree.h.
[gcc.git] / gcc / predict.c
1 /* Branch prediction routines for the GNU compiler.
2 Copyright (C) 2000-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* References:
21
22 [1] "Branch Prediction for Free"
23 Ball and Larus; PLDI '93.
24 [2] "Static Branch Frequency and Program Profile Analysis"
25 Wu and Larus; MICRO-27.
26 [3] "Corpus-based Static Branch Prediction"
27 Calder, Grunwald, Lindsay, Martin, Mozer, and Zorn; PLDI '95. */
28
29
30 #include "config.h"
31 #include "system.h"
32 #include "coretypes.h"
33 #include "tm.h"
34 #include "tree.h"
35 #include "calls.h"
36 #include "rtl.h"
37 #include "tm_p.h"
38 #include "hard-reg-set.h"
39 #include "basic-block.h"
40 #include "insn-config.h"
41 #include "regs.h"
42 #include "flags.h"
43 #include "function.h"
44 #include "except.h"
45 #include "diagnostic-core.h"
46 #include "recog.h"
47 #include "expr.h"
48 #include "predict.h"
49 #include "coverage.h"
50 #include "sreal.h"
51 #include "params.h"
52 #include "target.h"
53 #include "cfgloop.h"
54 #include "gimple.h"
55 #include "gimple-iterator.h"
56 #include "gimple-ssa.h"
57 #include "cgraph.h"
58 #include "tree-cfg.h"
59 #include "tree-phinodes.h"
60 #include "ssa-iterators.h"
61 #include "tree-ssa-loop-niter.h"
62 #include "tree-ssa-loop.h"
63 #include "ggc.h"
64 #include "tree-pass.h"
65 #include "tree-scalar-evolution.h"
66 #include "cfgloop.h"
67 #include "pointer-set.h"
68
69 /* real constants: 0, 1, 1-1/REG_BR_PROB_BASE, REG_BR_PROB_BASE,
70 1/REG_BR_PROB_BASE, 0.5, BB_FREQ_MAX. */
71 static sreal real_zero, real_one, real_almost_one, real_br_prob_base,
72 real_inv_br_prob_base, real_one_half, real_bb_freq_max;
73
74 /* Random guesstimation given names.
75 PROV_VERY_UNLIKELY should be small enough so basic block predicted
76 by it gets below HOT_BB_FREQUENCY_FRACTION. */
77 #define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
78 #define PROB_EVEN (REG_BR_PROB_BASE / 2)
79 #define PROB_VERY_LIKELY (REG_BR_PROB_BASE - PROB_VERY_UNLIKELY)
80 #define PROB_ALWAYS (REG_BR_PROB_BASE)
81
82 static void combine_predictions_for_insn (rtx, basic_block);
83 static void dump_prediction (FILE *, enum br_predictor, int, basic_block, int);
84 static void predict_paths_leading_to (basic_block, enum br_predictor, enum prediction);
85 static void predict_paths_leading_to_edge (edge, enum br_predictor, enum prediction);
86 static bool can_predict_insn_p (const_rtx);
87
88 /* Information we hold about each branch predictor.
89 Filled using information from predict.def. */
90
91 struct predictor_info
92 {
93 const char *const name; /* Name used in the debugging dumps. */
94 const int hitrate; /* Expected hitrate used by
95 predict_insn_def call. */
96 const int flags;
97 };
98
99 /* Use given predictor without Dempster-Shaffer theory if it matches
100 using first_match heuristics. */
101 #define PRED_FLAG_FIRST_MATCH 1
102
103 /* Recompute hitrate in percent to our representation. */
104
105 #define HITRATE(VAL) ((int) ((VAL) * REG_BR_PROB_BASE + 50) / 100)
106
107 #define DEF_PREDICTOR(ENUM, NAME, HITRATE, FLAGS) {NAME, HITRATE, FLAGS},
108 static const struct predictor_info predictor_info[]= {
109 #include "predict.def"
110
111 /* Upper bound on predictors. */
112 {NULL, 0, 0}
113 };
114 #undef DEF_PREDICTOR
115
116 /* Return TRUE if frequency FREQ is considered to be hot. */
117
118 static inline bool
119 maybe_hot_frequency_p (struct function *fun, int freq)
120 {
121 struct cgraph_node *node = cgraph_get_node (fun->decl);
122 if (!profile_info || !flag_branch_probabilities)
123 {
124 if (node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
125 return false;
126 if (node->frequency == NODE_FREQUENCY_HOT)
127 return true;
128 }
129 if (profile_status_for_function (fun) == PROFILE_ABSENT)
130 return true;
131 if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
132 && freq < (ENTRY_BLOCK_PTR_FOR_FUNCTION (fun)->frequency * 2 / 3))
133 return false;
134 if (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION) == 0)
135 return false;
136 if (freq < (ENTRY_BLOCK_PTR_FOR_FUNCTION (fun)->frequency
137 / PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)))
138 return false;
139 return true;
140 }
141
142 static gcov_type min_count = -1;
143
144 /* Determine the threshold for hot BB counts. */
145
146 gcov_type
147 get_hot_bb_threshold ()
148 {
149 gcov_working_set_t *ws;
150 if (min_count == -1)
151 {
152 ws = find_working_set (PARAM_VALUE (HOT_BB_COUNT_WS_PERMILLE));
153 gcc_assert (ws);
154 min_count = ws->min_counter;
155 }
156 return min_count;
157 }
158
159 /* Set the threshold for hot BB counts. */
160
161 void
162 set_hot_bb_threshold (gcov_type min)
163 {
164 min_count = min;
165 }
166
167 /* Return TRUE if frequency FREQ is considered to be hot. */
168
169 static inline bool
170 maybe_hot_count_p (struct function *fun, gcov_type count)
171 {
172 if (fun && profile_status_for_function (fun) != PROFILE_READ)
173 return true;
174 /* Code executed at most once is not hot. */
175 if (profile_info->runs >= count)
176 return false;
177 return (count >= get_hot_bb_threshold ());
178 }
179
180 /* Return true in case BB can be CPU intensive and should be optimized
181 for maximal performance. */
182
183 bool
184 maybe_hot_bb_p (struct function *fun, const_basic_block bb)
185 {
186 gcc_checking_assert (fun);
187 if (profile_status_for_function (fun) == PROFILE_READ)
188 return maybe_hot_count_p (fun, bb->count);
189 return maybe_hot_frequency_p (fun, bb->frequency);
190 }
191
192 /* Return true if the call can be hot. */
193
194 bool
195 cgraph_maybe_hot_edge_p (struct cgraph_edge *edge)
196 {
197 if (profile_info && flag_branch_probabilities
198 && !maybe_hot_count_p (NULL,
199 edge->count))
200 return false;
201 if (edge->caller->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED
202 || (edge->callee
203 && edge->callee->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED))
204 return false;
205 if (edge->caller->frequency > NODE_FREQUENCY_UNLIKELY_EXECUTED
206 && (edge->callee
207 && edge->callee->frequency <= NODE_FREQUENCY_EXECUTED_ONCE))
208 return false;
209 if (optimize_size)
210 return false;
211 if (edge->caller->frequency == NODE_FREQUENCY_HOT)
212 return true;
213 if (edge->caller->frequency == NODE_FREQUENCY_EXECUTED_ONCE
214 && edge->frequency < CGRAPH_FREQ_BASE * 3 / 2)
215 return false;
216 if (flag_guess_branch_prob)
217 {
218 if (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION) == 0
219 || edge->frequency <= (CGRAPH_FREQ_BASE
220 / PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)))
221 return false;
222 }
223 return true;
224 }
225
226 /* Return true in case BB can be CPU intensive and should be optimized
227 for maximal performance. */
228
229 bool
230 maybe_hot_edge_p (edge e)
231 {
232 if (profile_status == PROFILE_READ)
233 return maybe_hot_count_p (cfun, e->count);
234 return maybe_hot_frequency_p (cfun, EDGE_FREQUENCY (e));
235 }
236
237
238
239 /* Return true if profile COUNT and FREQUENCY, or function FUN static
240 node frequency reflects never being executed. */
241
242 static bool
243 probably_never_executed (struct function *fun,
244 gcov_type count, int frequency)
245 {
246 gcc_checking_assert (fun);
247 if (profile_status_for_function (fun) == PROFILE_READ)
248 {
249 int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
250 if (count * unlikely_count_fraction >= profile_info->runs)
251 return false;
252 if (!frequency)
253 return true;
254 if (!ENTRY_BLOCK_PTR->frequency)
255 return false;
256 if (ENTRY_BLOCK_PTR->count)
257 {
258 gcov_type computed_count;
259 /* Check for possibility of overflow, in which case entry bb count
260 is large enough to do the division first without losing much
261 precision. */
262 if (ENTRY_BLOCK_PTR->count < REG_BR_PROB_BASE * REG_BR_PROB_BASE)
263 {
264 gcov_type scaled_count
265 = frequency * ENTRY_BLOCK_PTR->count * unlikely_count_fraction;
266 computed_count = RDIV (scaled_count, ENTRY_BLOCK_PTR->frequency);
267 }
268 else
269 {
270 computed_count = RDIV (ENTRY_BLOCK_PTR->count,
271 ENTRY_BLOCK_PTR->frequency);
272 computed_count *= frequency * unlikely_count_fraction;
273 }
274 if (computed_count >= profile_info->runs)
275 return false;
276 }
277 return true;
278 }
279 if ((!profile_info || !flag_branch_probabilities)
280 && (cgraph_get_node (fun->decl)->frequency
281 == NODE_FREQUENCY_UNLIKELY_EXECUTED))
282 return true;
283 return false;
284 }
285
286
287 /* Return true in case BB is probably never executed. */
288
289 bool
290 probably_never_executed_bb_p (struct function *fun, const_basic_block bb)
291 {
292 return probably_never_executed (fun, bb->count, bb->frequency);
293 }
294
295
296 /* Return true in case edge E is probably never executed. */
297
298 bool
299 probably_never_executed_edge_p (struct function *fun, edge e)
300 {
301 return probably_never_executed (fun, e->count, EDGE_FREQUENCY (e));
302 }
303
304 /* Return true if NODE should be optimized for size. */
305
306 bool
307 cgraph_optimize_for_size_p (struct cgraph_node *node)
308 {
309 if (optimize_size)
310 return true;
311 if (node && (node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED))
312 return true;
313 else
314 return false;
315 }
316
317 /* Return true when current function should always be optimized for size. */
318
319 bool
320 optimize_function_for_size_p (struct function *fun)
321 {
322 if (optimize_size)
323 return true;
324 if (!fun || !fun->decl)
325 return false;
326 return cgraph_optimize_for_size_p (cgraph_get_node (fun->decl));
327 }
328
329 /* Return true when current function should always be optimized for speed. */
330
331 bool
332 optimize_function_for_speed_p (struct function *fun)
333 {
334 return !optimize_function_for_size_p (fun);
335 }
336
337 /* Return TRUE when BB should be optimized for size. */
338
339 bool
340 optimize_bb_for_size_p (const_basic_block bb)
341 {
342 return optimize_function_for_size_p (cfun) || !maybe_hot_bb_p (cfun, bb);
343 }
344
345 /* Return TRUE when BB should be optimized for speed. */
346
347 bool
348 optimize_bb_for_speed_p (const_basic_block bb)
349 {
350 return !optimize_bb_for_size_p (bb);
351 }
352
353 /* Return TRUE when BB should be optimized for size. */
354
355 bool
356 optimize_edge_for_size_p (edge e)
357 {
358 return optimize_function_for_size_p (cfun) || !maybe_hot_edge_p (e);
359 }
360
361 /* Return TRUE when BB should be optimized for speed. */
362
363 bool
364 optimize_edge_for_speed_p (edge e)
365 {
366 return !optimize_edge_for_size_p (e);
367 }
368
369 /* Return TRUE when BB should be optimized for size. */
370
371 bool
372 optimize_insn_for_size_p (void)
373 {
374 return optimize_function_for_size_p (cfun) || !crtl->maybe_hot_insn_p;
375 }
376
377 /* Return TRUE when BB should be optimized for speed. */
378
379 bool
380 optimize_insn_for_speed_p (void)
381 {
382 return !optimize_insn_for_size_p ();
383 }
384
385 /* Return TRUE when LOOP should be optimized for size. */
386
387 bool
388 optimize_loop_for_size_p (struct loop *loop)
389 {
390 return optimize_bb_for_size_p (loop->header);
391 }
392
393 /* Return TRUE when LOOP should be optimized for speed. */
394
395 bool
396 optimize_loop_for_speed_p (struct loop *loop)
397 {
398 return optimize_bb_for_speed_p (loop->header);
399 }
400
401 /* Return TRUE when LOOP nest should be optimized for speed. */
402
403 bool
404 optimize_loop_nest_for_speed_p (struct loop *loop)
405 {
406 struct loop *l = loop;
407 if (optimize_loop_for_speed_p (loop))
408 return true;
409 l = loop->inner;
410 while (l && l != loop)
411 {
412 if (optimize_loop_for_speed_p (l))
413 return true;
414 if (l->inner)
415 l = l->inner;
416 else if (l->next)
417 l = l->next;
418 else
419 {
420 while (l != loop && !l->next)
421 l = loop_outer (l);
422 if (l != loop)
423 l = l->next;
424 }
425 }
426 return false;
427 }
428
429 /* Return TRUE when LOOP nest should be optimized for size. */
430
431 bool
432 optimize_loop_nest_for_size_p (struct loop *loop)
433 {
434 return !optimize_loop_nest_for_speed_p (loop);
435 }
436
437 /* Return true when edge E is likely to be well predictable by branch
438 predictor. */
439
440 bool
441 predictable_edge_p (edge e)
442 {
443 if (profile_status == PROFILE_ABSENT)
444 return false;
445 if ((e->probability
446 <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100)
447 || (REG_BR_PROB_BASE - e->probability
448 <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100))
449 return true;
450 return false;
451 }
452
453
454 /* Set RTL expansion for BB profile. */
455
456 void
457 rtl_profile_for_bb (basic_block bb)
458 {
459 crtl->maybe_hot_insn_p = maybe_hot_bb_p (cfun, bb);
460 }
461
462 /* Set RTL expansion for edge profile. */
463
464 void
465 rtl_profile_for_edge (edge e)
466 {
467 crtl->maybe_hot_insn_p = maybe_hot_edge_p (e);
468 }
469
470 /* Set RTL expansion to default mode (i.e. when profile info is not known). */
471 void
472 default_rtl_profile (void)
473 {
474 crtl->maybe_hot_insn_p = true;
475 }
476
477 /* Return true if the one of outgoing edges is already predicted by
478 PREDICTOR. */
479
480 bool
481 rtl_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
482 {
483 rtx note;
484 if (!INSN_P (BB_END (bb)))
485 return false;
486 for (note = REG_NOTES (BB_END (bb)); note; note = XEXP (note, 1))
487 if (REG_NOTE_KIND (note) == REG_BR_PRED
488 && INTVAL (XEXP (XEXP (note, 0), 0)) == (int)predictor)
489 return true;
490 return false;
491 }
492
493 /* This map contains for a basic block the list of predictions for the
494 outgoing edges. */
495
496 static struct pointer_map_t *bb_predictions;
497
498 /* Structure representing predictions in tree level. */
499
500 struct edge_prediction {
501 struct edge_prediction *ep_next;
502 edge ep_edge;
503 enum br_predictor ep_predictor;
504 int ep_probability;
505 };
506
507 /* Return true if the one of outgoing edges is already predicted by
508 PREDICTOR. */
509
510 bool
511 gimple_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
512 {
513 struct edge_prediction *i;
514 void **preds = pointer_map_contains (bb_predictions, bb);
515
516 if (!preds)
517 return false;
518
519 for (i = (struct edge_prediction *) *preds; i; i = i->ep_next)
520 if (i->ep_predictor == predictor)
521 return true;
522 return false;
523 }
524
525 /* Return true when the probability of edge is reliable.
526
527 The profile guessing code is good at predicting branch outcome (ie.
528 taken/not taken), that is predicted right slightly over 75% of time.
529 It is however notoriously poor on predicting the probability itself.
530 In general the profile appear a lot flatter (with probabilities closer
531 to 50%) than the reality so it is bad idea to use it to drive optimization
532 such as those disabling dynamic branch prediction for well predictable
533 branches.
534
535 There are two exceptions - edges leading to noreturn edges and edges
536 predicted by number of iterations heuristics are predicted well. This macro
537 should be able to distinguish those, but at the moment it simply check for
538 noreturn heuristic that is only one giving probability over 99% or bellow
539 1%. In future we might want to propagate reliability information across the
540 CFG if we find this information useful on multiple places. */
541 static bool
542 probability_reliable_p (int prob)
543 {
544 return (profile_status == PROFILE_READ
545 || (profile_status == PROFILE_GUESSED
546 && (prob <= HITRATE (1) || prob >= HITRATE (99))));
547 }
548
549 /* Same predicate as above, working on edges. */
550 bool
551 edge_probability_reliable_p (const_edge e)
552 {
553 return probability_reliable_p (e->probability);
554 }
555
556 /* Same predicate as edge_probability_reliable_p, working on notes. */
557 bool
558 br_prob_note_reliable_p (const_rtx note)
559 {
560 gcc_assert (REG_NOTE_KIND (note) == REG_BR_PROB);
561 return probability_reliable_p (XINT (note, 0));
562 }
563
564 static void
565 predict_insn (rtx insn, enum br_predictor predictor, int probability)
566 {
567 gcc_assert (any_condjump_p (insn));
568 if (!flag_guess_branch_prob)
569 return;
570
571 add_reg_note (insn, REG_BR_PRED,
572 gen_rtx_CONCAT (VOIDmode,
573 GEN_INT ((int) predictor),
574 GEN_INT ((int) probability)));
575 }
576
577 /* Predict insn by given predictor. */
578
579 void
580 predict_insn_def (rtx insn, enum br_predictor predictor,
581 enum prediction taken)
582 {
583 int probability = predictor_info[(int) predictor].hitrate;
584
585 if (taken != TAKEN)
586 probability = REG_BR_PROB_BASE - probability;
587
588 predict_insn (insn, predictor, probability);
589 }
590
591 /* Predict edge E with given probability if possible. */
592
593 void
594 rtl_predict_edge (edge e, enum br_predictor predictor, int probability)
595 {
596 rtx last_insn;
597 last_insn = BB_END (e->src);
598
599 /* We can store the branch prediction information only about
600 conditional jumps. */
601 if (!any_condjump_p (last_insn))
602 return;
603
604 /* We always store probability of branching. */
605 if (e->flags & EDGE_FALLTHRU)
606 probability = REG_BR_PROB_BASE - probability;
607
608 predict_insn (last_insn, predictor, probability);
609 }
610
611 /* Predict edge E with the given PROBABILITY. */
612 void
613 gimple_predict_edge (edge e, enum br_predictor predictor, int probability)
614 {
615 gcc_assert (profile_status != PROFILE_GUESSED);
616 if ((e->src != ENTRY_BLOCK_PTR && EDGE_COUNT (e->src->succs) > 1)
617 && flag_guess_branch_prob && optimize)
618 {
619 struct edge_prediction *i = XNEW (struct edge_prediction);
620 void **preds = pointer_map_insert (bb_predictions, e->src);
621
622 i->ep_next = (struct edge_prediction *) *preds;
623 *preds = i;
624 i->ep_probability = probability;
625 i->ep_predictor = predictor;
626 i->ep_edge = e;
627 }
628 }
629
630 /* Remove all predictions on given basic block that are attached
631 to edge E. */
632 void
633 remove_predictions_associated_with_edge (edge e)
634 {
635 void **preds;
636
637 if (!bb_predictions)
638 return;
639
640 preds = pointer_map_contains (bb_predictions, e->src);
641
642 if (preds)
643 {
644 struct edge_prediction **prediction = (struct edge_prediction **) preds;
645 struct edge_prediction *next;
646
647 while (*prediction)
648 {
649 if ((*prediction)->ep_edge == e)
650 {
651 next = (*prediction)->ep_next;
652 free (*prediction);
653 *prediction = next;
654 }
655 else
656 prediction = &((*prediction)->ep_next);
657 }
658 }
659 }
660
661 /* Clears the list of predictions stored for BB. */
662
663 static void
664 clear_bb_predictions (basic_block bb)
665 {
666 void **preds = pointer_map_contains (bb_predictions, bb);
667 struct edge_prediction *pred, *next;
668
669 if (!preds)
670 return;
671
672 for (pred = (struct edge_prediction *) *preds; pred; pred = next)
673 {
674 next = pred->ep_next;
675 free (pred);
676 }
677 *preds = NULL;
678 }
679
680 /* Return true when we can store prediction on insn INSN.
681 At the moment we represent predictions only on conditional
682 jumps, not at computed jump or other complicated cases. */
683 static bool
684 can_predict_insn_p (const_rtx insn)
685 {
686 return (JUMP_P (insn)
687 && any_condjump_p (insn)
688 && EDGE_COUNT (BLOCK_FOR_INSN (insn)->succs) >= 2);
689 }
690
691 /* Predict edge E by given predictor if possible. */
692
693 void
694 predict_edge_def (edge e, enum br_predictor predictor,
695 enum prediction taken)
696 {
697 int probability = predictor_info[(int) predictor].hitrate;
698
699 if (taken != TAKEN)
700 probability = REG_BR_PROB_BASE - probability;
701
702 predict_edge (e, predictor, probability);
703 }
704
705 /* Invert all branch predictions or probability notes in the INSN. This needs
706 to be done each time we invert the condition used by the jump. */
707
708 void
709 invert_br_probabilities (rtx insn)
710 {
711 rtx note;
712
713 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
714 if (REG_NOTE_KIND (note) == REG_BR_PROB)
715 XINT (note, 0) = REG_BR_PROB_BASE - XINT (note, 0);
716 else if (REG_NOTE_KIND (note) == REG_BR_PRED)
717 XEXP (XEXP (note, 0), 1)
718 = GEN_INT (REG_BR_PROB_BASE - INTVAL (XEXP (XEXP (note, 0), 1)));
719 }
720
721 /* Dump information about the branch prediction to the output file. */
722
723 static void
724 dump_prediction (FILE *file, enum br_predictor predictor, int probability,
725 basic_block bb, int used)
726 {
727 edge e;
728 edge_iterator ei;
729
730 if (!file)
731 return;
732
733 FOR_EACH_EDGE (e, ei, bb->succs)
734 if (! (e->flags & EDGE_FALLTHRU))
735 break;
736
737 fprintf (file, " %s heuristics%s: %.1f%%",
738 predictor_info[predictor].name,
739 used ? "" : " (ignored)", probability * 100.0 / REG_BR_PROB_BASE);
740
741 if (bb->count)
742 {
743 fprintf (file, " exec ");
744 fprintf (file, HOST_WIDEST_INT_PRINT_DEC, bb->count);
745 if (e)
746 {
747 fprintf (file, " hit ");
748 fprintf (file, HOST_WIDEST_INT_PRINT_DEC, e->count);
749 fprintf (file, " (%.1f%%)", e->count * 100.0 / bb->count);
750 }
751 }
752
753 fprintf (file, "\n");
754 }
755
756 /* We can not predict the probabilities of outgoing edges of bb. Set them
757 evenly and hope for the best. */
758 static void
759 set_even_probabilities (basic_block bb)
760 {
761 int nedges = 0;
762 edge e;
763 edge_iterator ei;
764
765 FOR_EACH_EDGE (e, ei, bb->succs)
766 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
767 nedges ++;
768 FOR_EACH_EDGE (e, ei, bb->succs)
769 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
770 e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges;
771 else
772 e->probability = 0;
773 }
774
775 /* Combine all REG_BR_PRED notes into single probability and attach REG_BR_PROB
776 note if not already present. Remove now useless REG_BR_PRED notes. */
777
778 static void
779 combine_predictions_for_insn (rtx insn, basic_block bb)
780 {
781 rtx prob_note;
782 rtx *pnote;
783 rtx note;
784 int best_probability = PROB_EVEN;
785 enum br_predictor best_predictor = END_PREDICTORS;
786 int combined_probability = REG_BR_PROB_BASE / 2;
787 int d;
788 bool first_match = false;
789 bool found = false;
790
791 if (!can_predict_insn_p (insn))
792 {
793 set_even_probabilities (bb);
794 return;
795 }
796
797 prob_note = find_reg_note (insn, REG_BR_PROB, 0);
798 pnote = &REG_NOTES (insn);
799 if (dump_file)
800 fprintf (dump_file, "Predictions for insn %i bb %i\n", INSN_UID (insn),
801 bb->index);
802
803 /* We implement "first match" heuristics and use probability guessed
804 by predictor with smallest index. */
805 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
806 if (REG_NOTE_KIND (note) == REG_BR_PRED)
807 {
808 enum br_predictor predictor = ((enum br_predictor)
809 INTVAL (XEXP (XEXP (note, 0), 0)));
810 int probability = INTVAL (XEXP (XEXP (note, 0), 1));
811
812 found = true;
813 if (best_predictor > predictor)
814 best_probability = probability, best_predictor = predictor;
815
816 d = (combined_probability * probability
817 + (REG_BR_PROB_BASE - combined_probability)
818 * (REG_BR_PROB_BASE - probability));
819
820 /* Use FP math to avoid overflows of 32bit integers. */
821 if (d == 0)
822 /* If one probability is 0% and one 100%, avoid division by zero. */
823 combined_probability = REG_BR_PROB_BASE / 2;
824 else
825 combined_probability = (((double) combined_probability) * probability
826 * REG_BR_PROB_BASE / d + 0.5);
827 }
828
829 /* Decide which heuristic to use. In case we didn't match anything,
830 use no_prediction heuristic, in case we did match, use either
831 first match or Dempster-Shaffer theory depending on the flags. */
832
833 if (predictor_info [best_predictor].flags & PRED_FLAG_FIRST_MATCH)
834 first_match = true;
835
836 if (!found)
837 dump_prediction (dump_file, PRED_NO_PREDICTION,
838 combined_probability, bb, true);
839 else
840 {
841 dump_prediction (dump_file, PRED_DS_THEORY, combined_probability,
842 bb, !first_match);
843 dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability,
844 bb, first_match);
845 }
846
847 if (first_match)
848 combined_probability = best_probability;
849 dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb, true);
850
851 while (*pnote)
852 {
853 if (REG_NOTE_KIND (*pnote) == REG_BR_PRED)
854 {
855 enum br_predictor predictor = ((enum br_predictor)
856 INTVAL (XEXP (XEXP (*pnote, 0), 0)));
857 int probability = INTVAL (XEXP (XEXP (*pnote, 0), 1));
858
859 dump_prediction (dump_file, predictor, probability, bb,
860 !first_match || best_predictor == predictor);
861 *pnote = XEXP (*pnote, 1);
862 }
863 else
864 pnote = &XEXP (*pnote, 1);
865 }
866
867 if (!prob_note)
868 {
869 add_int_reg_note (insn, REG_BR_PROB, combined_probability);
870
871 /* Save the prediction into CFG in case we are seeing non-degenerated
872 conditional jump. */
873 if (!single_succ_p (bb))
874 {
875 BRANCH_EDGE (bb)->probability = combined_probability;
876 FALLTHRU_EDGE (bb)->probability
877 = REG_BR_PROB_BASE - combined_probability;
878 }
879 }
880 else if (!single_succ_p (bb))
881 {
882 int prob = XINT (prob_note, 0);
883
884 BRANCH_EDGE (bb)->probability = prob;
885 FALLTHRU_EDGE (bb)->probability = REG_BR_PROB_BASE - prob;
886 }
887 else
888 single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
889 }
890
891 /* Combine predictions into single probability and store them into CFG.
892 Remove now useless prediction entries. */
893
894 static void
895 combine_predictions_for_bb (basic_block bb)
896 {
897 int best_probability = PROB_EVEN;
898 enum br_predictor best_predictor = END_PREDICTORS;
899 int combined_probability = REG_BR_PROB_BASE / 2;
900 int d;
901 bool first_match = false;
902 bool found = false;
903 struct edge_prediction *pred;
904 int nedges = 0;
905 edge e, first = NULL, second = NULL;
906 edge_iterator ei;
907 void **preds;
908
909 FOR_EACH_EDGE (e, ei, bb->succs)
910 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
911 {
912 nedges ++;
913 if (first && !second)
914 second = e;
915 if (!first)
916 first = e;
917 }
918
919 /* When there is no successor or only one choice, prediction is easy.
920
921 We are lazy for now and predict only basic blocks with two outgoing
922 edges. It is possible to predict generic case too, but we have to
923 ignore first match heuristics and do more involved combining. Implement
924 this later. */
925 if (nedges != 2)
926 {
927 if (!bb->count)
928 set_even_probabilities (bb);
929 clear_bb_predictions (bb);
930 if (dump_file)
931 fprintf (dump_file, "%i edges in bb %i predicted to even probabilities\n",
932 nedges, bb->index);
933 return;
934 }
935
936 if (dump_file)
937 fprintf (dump_file, "Predictions for bb %i\n", bb->index);
938
939 preds = pointer_map_contains (bb_predictions, bb);
940 if (preds)
941 {
942 /* We implement "first match" heuristics and use probability guessed
943 by predictor with smallest index. */
944 for (pred = (struct edge_prediction *) *preds; pred; pred = pred->ep_next)
945 {
946 enum br_predictor predictor = pred->ep_predictor;
947 int probability = pred->ep_probability;
948
949 if (pred->ep_edge != first)
950 probability = REG_BR_PROB_BASE - probability;
951
952 found = true;
953 /* First match heuristics would be widly confused if we predicted
954 both directions. */
955 if (best_predictor > predictor)
956 {
957 struct edge_prediction *pred2;
958 int prob = probability;
959
960 for (pred2 = (struct edge_prediction *) *preds; pred2; pred2 = pred2->ep_next)
961 if (pred2 != pred && pred2->ep_predictor == pred->ep_predictor)
962 {
963 int probability2 = pred->ep_probability;
964
965 if (pred2->ep_edge != first)
966 probability2 = REG_BR_PROB_BASE - probability2;
967
968 if ((probability < REG_BR_PROB_BASE / 2) !=
969 (probability2 < REG_BR_PROB_BASE / 2))
970 break;
971
972 /* If the same predictor later gave better result, go for it! */
973 if ((probability >= REG_BR_PROB_BASE / 2 && (probability2 > probability))
974 || (probability <= REG_BR_PROB_BASE / 2 && (probability2 < probability)))
975 prob = probability2;
976 }
977 if (!pred2)
978 best_probability = prob, best_predictor = predictor;
979 }
980
981 d = (combined_probability * probability
982 + (REG_BR_PROB_BASE - combined_probability)
983 * (REG_BR_PROB_BASE - probability));
984
985 /* Use FP math to avoid overflows of 32bit integers. */
986 if (d == 0)
987 /* If one probability is 0% and one 100%, avoid division by zero. */
988 combined_probability = REG_BR_PROB_BASE / 2;
989 else
990 combined_probability = (((double) combined_probability)
991 * probability
992 * REG_BR_PROB_BASE / d + 0.5);
993 }
994 }
995
996 /* Decide which heuristic to use. In case we didn't match anything,
997 use no_prediction heuristic, in case we did match, use either
998 first match or Dempster-Shaffer theory depending on the flags. */
999
1000 if (predictor_info [best_predictor].flags & PRED_FLAG_FIRST_MATCH)
1001 first_match = true;
1002
1003 if (!found)
1004 dump_prediction (dump_file, PRED_NO_PREDICTION, combined_probability, bb, true);
1005 else
1006 {
1007 dump_prediction (dump_file, PRED_DS_THEORY, combined_probability, bb,
1008 !first_match);
1009 dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability, bb,
1010 first_match);
1011 }
1012
1013 if (first_match)
1014 combined_probability = best_probability;
1015 dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb, true);
1016
1017 if (preds)
1018 {
1019 for (pred = (struct edge_prediction *) *preds; pred; pred = pred->ep_next)
1020 {
1021 enum br_predictor predictor = pred->ep_predictor;
1022 int probability = pred->ep_probability;
1023
1024 if (pred->ep_edge != EDGE_SUCC (bb, 0))
1025 probability = REG_BR_PROB_BASE - probability;
1026 dump_prediction (dump_file, predictor, probability, bb,
1027 !first_match || best_predictor == predictor);
1028 }
1029 }
1030 clear_bb_predictions (bb);
1031
1032 if (!bb->count)
1033 {
1034 first->probability = combined_probability;
1035 second->probability = REG_BR_PROB_BASE - combined_probability;
1036 }
1037 }
1038
1039 /* Check if T1 and T2 satisfy the IV_COMPARE condition.
1040 Return the SSA_NAME if the condition satisfies, NULL otherwise.
1041
1042 T1 and T2 should be one of the following cases:
1043 1. T1 is SSA_NAME, T2 is NULL
1044 2. T1 is SSA_NAME, T2 is INTEGER_CST between [-4, 4]
1045 3. T2 is SSA_NAME, T1 is INTEGER_CST between [-4, 4] */
1046
1047 static tree
1048 strips_small_constant (tree t1, tree t2)
1049 {
1050 tree ret = NULL;
1051 int value = 0;
1052
1053 if (!t1)
1054 return NULL;
1055 else if (TREE_CODE (t1) == SSA_NAME)
1056 ret = t1;
1057 else if (tree_fits_shwi_p (t1))
1058 value = tree_to_shwi (t1);
1059 else
1060 return NULL;
1061
1062 if (!t2)
1063 return ret;
1064 else if (tree_fits_shwi_p (t2))
1065 value = tree_to_shwi (t2);
1066 else if (TREE_CODE (t2) == SSA_NAME)
1067 {
1068 if (ret)
1069 return NULL;
1070 else
1071 ret = t2;
1072 }
1073
1074 if (value <= 4 && value >= -4)
1075 return ret;
1076 else
1077 return NULL;
1078 }
1079
1080 /* Return the SSA_NAME in T or T's operands.
1081 Return NULL if SSA_NAME cannot be found. */
1082
1083 static tree
1084 get_base_value (tree t)
1085 {
1086 if (TREE_CODE (t) == SSA_NAME)
1087 return t;
1088
1089 if (!BINARY_CLASS_P (t))
1090 return NULL;
1091
1092 switch (TREE_OPERAND_LENGTH (t))
1093 {
1094 case 1:
1095 return strips_small_constant (TREE_OPERAND (t, 0), NULL);
1096 case 2:
1097 return strips_small_constant (TREE_OPERAND (t, 0),
1098 TREE_OPERAND (t, 1));
1099 default:
1100 return NULL;
1101 }
1102 }
1103
1104 /* Check the compare STMT in LOOP. If it compares an induction
1105 variable to a loop invariant, return true, and save
1106 LOOP_INVARIANT, COMPARE_CODE and LOOP_STEP.
1107 Otherwise return false and set LOOP_INVAIANT to NULL. */
1108
1109 static bool
1110 is_comparison_with_loop_invariant_p (gimple stmt, struct loop *loop,
1111 tree *loop_invariant,
1112 enum tree_code *compare_code,
1113 tree *loop_step,
1114 tree *loop_iv_base)
1115 {
1116 tree op0, op1, bound, base;
1117 affine_iv iv0, iv1;
1118 enum tree_code code;
1119 tree step;
1120
1121 code = gimple_cond_code (stmt);
1122 *loop_invariant = NULL;
1123
1124 switch (code)
1125 {
1126 case GT_EXPR:
1127 case GE_EXPR:
1128 case NE_EXPR:
1129 case LT_EXPR:
1130 case LE_EXPR:
1131 case EQ_EXPR:
1132 break;
1133
1134 default:
1135 return false;
1136 }
1137
1138 op0 = gimple_cond_lhs (stmt);
1139 op1 = gimple_cond_rhs (stmt);
1140
1141 if ((TREE_CODE (op0) != SSA_NAME && TREE_CODE (op0) != INTEGER_CST)
1142 || (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op1) != INTEGER_CST))
1143 return false;
1144 if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, true))
1145 return false;
1146 if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, true))
1147 return false;
1148 if (TREE_CODE (iv0.step) != INTEGER_CST
1149 || TREE_CODE (iv1.step) != INTEGER_CST)
1150 return false;
1151 if ((integer_zerop (iv0.step) && integer_zerop (iv1.step))
1152 || (!integer_zerop (iv0.step) && !integer_zerop (iv1.step)))
1153 return false;
1154
1155 if (integer_zerop (iv0.step))
1156 {
1157 if (code != NE_EXPR && code != EQ_EXPR)
1158 code = invert_tree_comparison (code, false);
1159 bound = iv0.base;
1160 base = iv1.base;
1161 if (tree_fits_shwi_p (iv1.step))
1162 step = iv1.step;
1163 else
1164 return false;
1165 }
1166 else
1167 {
1168 bound = iv1.base;
1169 base = iv0.base;
1170 if (tree_fits_shwi_p (iv0.step))
1171 step = iv0.step;
1172 else
1173 return false;
1174 }
1175
1176 if (TREE_CODE (bound) != INTEGER_CST)
1177 bound = get_base_value (bound);
1178 if (!bound)
1179 return false;
1180 if (TREE_CODE (base) != INTEGER_CST)
1181 base = get_base_value (base);
1182 if (!base)
1183 return false;
1184
1185 *loop_invariant = bound;
1186 *compare_code = code;
1187 *loop_step = step;
1188 *loop_iv_base = base;
1189 return true;
1190 }
1191
1192 /* Compare two SSA_NAMEs: returns TRUE if T1 and T2 are value coherent. */
1193
1194 static bool
1195 expr_coherent_p (tree t1, tree t2)
1196 {
1197 gimple stmt;
1198 tree ssa_name_1 = NULL;
1199 tree ssa_name_2 = NULL;
1200
1201 gcc_assert (TREE_CODE (t1) == SSA_NAME || TREE_CODE (t1) == INTEGER_CST);
1202 gcc_assert (TREE_CODE (t2) == SSA_NAME || TREE_CODE (t2) == INTEGER_CST);
1203
1204 if (t1 == t2)
1205 return true;
1206
1207 if (TREE_CODE (t1) == INTEGER_CST && TREE_CODE (t2) == INTEGER_CST)
1208 return true;
1209 if (TREE_CODE (t1) == INTEGER_CST || TREE_CODE (t2) == INTEGER_CST)
1210 return false;
1211
1212 /* Check to see if t1 is expressed/defined with t2. */
1213 stmt = SSA_NAME_DEF_STMT (t1);
1214 gcc_assert (stmt != NULL);
1215 if (is_gimple_assign (stmt))
1216 {
1217 ssa_name_1 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1218 if (ssa_name_1 && ssa_name_1 == t2)
1219 return true;
1220 }
1221
1222 /* Check to see if t2 is expressed/defined with t1. */
1223 stmt = SSA_NAME_DEF_STMT (t2);
1224 gcc_assert (stmt != NULL);
1225 if (is_gimple_assign (stmt))
1226 {
1227 ssa_name_2 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1228 if (ssa_name_2 && ssa_name_2 == t1)
1229 return true;
1230 }
1231
1232 /* Compare if t1 and t2's def_stmts are identical. */
1233 if (ssa_name_2 != NULL && ssa_name_1 == ssa_name_2)
1234 return true;
1235 else
1236 return false;
1237 }
1238
1239 /* Predict branch probability of BB when BB contains a branch that compares
1240 an induction variable in LOOP with LOOP_IV_BASE_VAR to LOOP_BOUND_VAR. The
1241 loop exit is compared using LOOP_BOUND_CODE, with step of LOOP_BOUND_STEP.
1242
1243 E.g.
1244 for (int i = 0; i < bound; i++) {
1245 if (i < bound - 2)
1246 computation_1();
1247 else
1248 computation_2();
1249 }
1250
1251 In this loop, we will predict the branch inside the loop to be taken. */
1252
1253 static void
1254 predict_iv_comparison (struct loop *loop, basic_block bb,
1255 tree loop_bound_var,
1256 tree loop_iv_base_var,
1257 enum tree_code loop_bound_code,
1258 int loop_bound_step)
1259 {
1260 gimple stmt;
1261 tree compare_var, compare_base;
1262 enum tree_code compare_code;
1263 tree compare_step_var;
1264 edge then_edge;
1265 edge_iterator ei;
1266
1267 if (predicted_by_p (bb, PRED_LOOP_ITERATIONS_GUESSED)
1268 || predicted_by_p (bb, PRED_LOOP_ITERATIONS)
1269 || predicted_by_p (bb, PRED_LOOP_EXIT))
1270 return;
1271
1272 stmt = last_stmt (bb);
1273 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
1274 return;
1275 if (!is_comparison_with_loop_invariant_p (stmt, loop, &compare_var,
1276 &compare_code,
1277 &compare_step_var,
1278 &compare_base))
1279 return;
1280
1281 /* Find the taken edge. */
1282 FOR_EACH_EDGE (then_edge, ei, bb->succs)
1283 if (then_edge->flags & EDGE_TRUE_VALUE)
1284 break;
1285
1286 /* When comparing an IV to a loop invariant, NE is more likely to be
1287 taken while EQ is more likely to be not-taken. */
1288 if (compare_code == NE_EXPR)
1289 {
1290 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1291 return;
1292 }
1293 else if (compare_code == EQ_EXPR)
1294 {
1295 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1296 return;
1297 }
1298
1299 if (!expr_coherent_p (loop_iv_base_var, compare_base))
1300 return;
1301
1302 /* If loop bound, base and compare bound are all constants, we can
1303 calculate the probability directly. */
1304 if (tree_fits_shwi_p (loop_bound_var)
1305 && tree_fits_shwi_p (compare_var)
1306 && tree_fits_shwi_p (compare_base))
1307 {
1308 int probability;
1309 bool of, overflow = false;
1310 double_int mod, compare_count, tem, loop_count;
1311
1312 double_int loop_bound = tree_to_double_int (loop_bound_var);
1313 double_int compare_bound = tree_to_double_int (compare_var);
1314 double_int base = tree_to_double_int (compare_base);
1315 double_int compare_step = tree_to_double_int (compare_step_var);
1316
1317 /* (loop_bound - base) / compare_step */
1318 tem = loop_bound.sub_with_overflow (base, &of);
1319 overflow |= of;
1320 loop_count = tem.divmod_with_overflow (compare_step,
1321 0, TRUNC_DIV_EXPR,
1322 &mod, &of);
1323 overflow |= of;
1324
1325 if ((!compare_step.is_negative ())
1326 ^ (compare_code == LT_EXPR || compare_code == LE_EXPR))
1327 {
1328 /* (loop_bound - compare_bound) / compare_step */
1329 tem = loop_bound.sub_with_overflow (compare_bound, &of);
1330 overflow |= of;
1331 compare_count = tem.divmod_with_overflow (compare_step,
1332 0, TRUNC_DIV_EXPR,
1333 &mod, &of);
1334 overflow |= of;
1335 }
1336 else
1337 {
1338 /* (compare_bound - base) / compare_step */
1339 tem = compare_bound.sub_with_overflow (base, &of);
1340 overflow |= of;
1341 compare_count = tem.divmod_with_overflow (compare_step,
1342 0, TRUNC_DIV_EXPR,
1343 &mod, &of);
1344 overflow |= of;
1345 }
1346 if (compare_code == LE_EXPR || compare_code == GE_EXPR)
1347 ++compare_count;
1348 if (loop_bound_code == LE_EXPR || loop_bound_code == GE_EXPR)
1349 ++loop_count;
1350 if (compare_count.is_negative ())
1351 compare_count = double_int_zero;
1352 if (loop_count.is_negative ())
1353 loop_count = double_int_zero;
1354 if (loop_count.is_zero ())
1355 probability = 0;
1356 else if (compare_count.scmp (loop_count) == 1)
1357 probability = REG_BR_PROB_BASE;
1358 else
1359 {
1360 /* If loop_count is too big, such that REG_BR_PROB_BASE * loop_count
1361 could overflow, shift both loop_count and compare_count right
1362 a bit so that it doesn't overflow. Note both counts are known not
1363 to be negative at this point. */
1364 int clz_bits = clz_hwi (loop_count.high);
1365 gcc_assert (REG_BR_PROB_BASE < 32768);
1366 if (clz_bits < 16)
1367 {
1368 loop_count.arshift (16 - clz_bits, HOST_BITS_PER_DOUBLE_INT);
1369 compare_count.arshift (16 - clz_bits, HOST_BITS_PER_DOUBLE_INT);
1370 }
1371 tem = compare_count.mul_with_sign (double_int::from_shwi
1372 (REG_BR_PROB_BASE), true, &of);
1373 gcc_assert (!of);
1374 tem = tem.divmod (loop_count, true, TRUNC_DIV_EXPR, &mod);
1375 probability = tem.to_uhwi ();
1376 }
1377
1378 if (!overflow)
1379 predict_edge (then_edge, PRED_LOOP_IV_COMPARE, probability);
1380
1381 return;
1382 }
1383
1384 if (expr_coherent_p (loop_bound_var, compare_var))
1385 {
1386 if ((loop_bound_code == LT_EXPR || loop_bound_code == LE_EXPR)
1387 && (compare_code == LT_EXPR || compare_code == LE_EXPR))
1388 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1389 else if ((loop_bound_code == GT_EXPR || loop_bound_code == GE_EXPR)
1390 && (compare_code == GT_EXPR || compare_code == GE_EXPR))
1391 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1392 else if (loop_bound_code == NE_EXPR)
1393 {
1394 /* If the loop backedge condition is "(i != bound)", we do
1395 the comparison based on the step of IV:
1396 * step < 0 : backedge condition is like (i > bound)
1397 * step > 0 : backedge condition is like (i < bound) */
1398 gcc_assert (loop_bound_step != 0);
1399 if (loop_bound_step > 0
1400 && (compare_code == LT_EXPR
1401 || compare_code == LE_EXPR))
1402 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1403 else if (loop_bound_step < 0
1404 && (compare_code == GT_EXPR
1405 || compare_code == GE_EXPR))
1406 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1407 else
1408 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1409 }
1410 else
1411 /* The branch is predicted not-taken if loop_bound_code is
1412 opposite with compare_code. */
1413 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1414 }
1415 else if (expr_coherent_p (loop_iv_base_var, compare_var))
1416 {
1417 /* For cases like:
1418 for (i = s; i < h; i++)
1419 if (i > s + 2) ....
1420 The branch should be predicted taken. */
1421 if (loop_bound_step > 0
1422 && (compare_code == GT_EXPR || compare_code == GE_EXPR))
1423 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1424 else if (loop_bound_step < 0
1425 && (compare_code == LT_EXPR || compare_code == LE_EXPR))
1426 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1427 else
1428 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1429 }
1430 }
1431
1432 /* Predict for extra loop exits that will lead to EXIT_EDGE. The extra loop
1433 exits are resulted from short-circuit conditions that will generate an
1434 if_tmp. E.g.:
1435
1436 if (foo() || global > 10)
1437 break;
1438
1439 This will be translated into:
1440
1441 BB3:
1442 loop header...
1443 BB4:
1444 if foo() goto BB6 else goto BB5
1445 BB5:
1446 if global > 10 goto BB6 else goto BB7
1447 BB6:
1448 goto BB7
1449 BB7:
1450 iftmp = (PHI 0(BB5), 1(BB6))
1451 if iftmp == 1 goto BB8 else goto BB3
1452 BB8:
1453 outside of the loop...
1454
1455 The edge BB7->BB8 is loop exit because BB8 is outside of the loop.
1456 From the dataflow, we can infer that BB4->BB6 and BB5->BB6 are also loop
1457 exits. This function takes BB7->BB8 as input, and finds out the extra loop
1458 exits to predict them using PRED_LOOP_EXIT. */
1459
1460 static void
1461 predict_extra_loop_exits (edge exit_edge)
1462 {
1463 unsigned i;
1464 bool check_value_one;
1465 gimple phi_stmt;
1466 tree cmp_rhs, cmp_lhs;
1467 gimple cmp_stmt = last_stmt (exit_edge->src);
1468
1469 if (!cmp_stmt || gimple_code (cmp_stmt) != GIMPLE_COND)
1470 return;
1471 cmp_rhs = gimple_cond_rhs (cmp_stmt);
1472 cmp_lhs = gimple_cond_lhs (cmp_stmt);
1473 if (!TREE_CONSTANT (cmp_rhs)
1474 || !(integer_zerop (cmp_rhs) || integer_onep (cmp_rhs)))
1475 return;
1476 if (TREE_CODE (cmp_lhs) != SSA_NAME)
1477 return;
1478
1479 /* If check_value_one is true, only the phi_args with value '1' will lead
1480 to loop exit. Otherwise, only the phi_args with value '0' will lead to
1481 loop exit. */
1482 check_value_one = (((integer_onep (cmp_rhs))
1483 ^ (gimple_cond_code (cmp_stmt) == EQ_EXPR))
1484 ^ ((exit_edge->flags & EDGE_TRUE_VALUE) != 0));
1485
1486 phi_stmt = SSA_NAME_DEF_STMT (cmp_lhs);
1487 if (!phi_stmt || gimple_code (phi_stmt) != GIMPLE_PHI)
1488 return;
1489
1490 for (i = 0; i < gimple_phi_num_args (phi_stmt); i++)
1491 {
1492 edge e1;
1493 edge_iterator ei;
1494 tree val = gimple_phi_arg_def (phi_stmt, i);
1495 edge e = gimple_phi_arg_edge (phi_stmt, i);
1496
1497 if (!TREE_CONSTANT (val) || !(integer_zerop (val) || integer_onep (val)))
1498 continue;
1499 if ((check_value_one ^ integer_onep (val)) == 1)
1500 continue;
1501 if (EDGE_COUNT (e->src->succs) != 1)
1502 {
1503 predict_paths_leading_to_edge (e, PRED_LOOP_EXIT, NOT_TAKEN);
1504 continue;
1505 }
1506
1507 FOR_EACH_EDGE (e1, ei, e->src->preds)
1508 predict_paths_leading_to_edge (e1, PRED_LOOP_EXIT, NOT_TAKEN);
1509 }
1510 }
1511
1512 /* Predict edge probabilities by exploiting loop structure. */
1513
1514 static void
1515 predict_loops (void)
1516 {
1517 loop_iterator li;
1518 struct loop *loop;
1519
1520 /* Try to predict out blocks in a loop that are not part of a
1521 natural loop. */
1522 FOR_EACH_LOOP (li, loop, 0)
1523 {
1524 basic_block bb, *bbs;
1525 unsigned j, n_exits;
1526 vec<edge> exits;
1527 struct tree_niter_desc niter_desc;
1528 edge ex;
1529 struct nb_iter_bound *nb_iter;
1530 enum tree_code loop_bound_code = ERROR_MARK;
1531 tree loop_bound_step = NULL;
1532 tree loop_bound_var = NULL;
1533 tree loop_iv_base = NULL;
1534 gimple stmt = NULL;
1535
1536 exits = get_loop_exit_edges (loop);
1537 n_exits = exits.length ();
1538 if (!n_exits)
1539 {
1540 exits.release ();
1541 continue;
1542 }
1543
1544 FOR_EACH_VEC_ELT (exits, j, ex)
1545 {
1546 tree niter = NULL;
1547 HOST_WIDE_INT nitercst;
1548 int max = PARAM_VALUE (PARAM_MAX_PREDICTED_ITERATIONS);
1549 int probability;
1550 enum br_predictor predictor;
1551
1552 predict_extra_loop_exits (ex);
1553
1554 if (number_of_iterations_exit (loop, ex, &niter_desc, false, false))
1555 niter = niter_desc.niter;
1556 if (!niter || TREE_CODE (niter_desc.niter) != INTEGER_CST)
1557 niter = loop_niter_by_eval (loop, ex);
1558
1559 if (TREE_CODE (niter) == INTEGER_CST)
1560 {
1561 if (tree_fits_uhwi_p (niter)
1562 && max
1563 && compare_tree_int (niter, max - 1) == -1)
1564 nitercst = tree_to_uhwi (niter) + 1;
1565 else
1566 nitercst = max;
1567 predictor = PRED_LOOP_ITERATIONS;
1568 }
1569 /* If we have just one exit and we can derive some information about
1570 the number of iterations of the loop from the statements inside
1571 the loop, use it to predict this exit. */
1572 else if (n_exits == 1)
1573 {
1574 nitercst = estimated_stmt_executions_int (loop);
1575 if (nitercst < 0)
1576 continue;
1577 if (nitercst > max)
1578 nitercst = max;
1579
1580 predictor = PRED_LOOP_ITERATIONS_GUESSED;
1581 }
1582 else
1583 continue;
1584
1585 /* If the prediction for number of iterations is zero, do not
1586 predict the exit edges. */
1587 if (nitercst == 0)
1588 continue;
1589
1590 probability = ((REG_BR_PROB_BASE + nitercst / 2) / nitercst);
1591 predict_edge (ex, predictor, probability);
1592 }
1593 exits.release ();
1594
1595 /* Find information about loop bound variables. */
1596 for (nb_iter = loop->bounds; nb_iter;
1597 nb_iter = nb_iter->next)
1598 if (nb_iter->stmt
1599 && gimple_code (nb_iter->stmt) == GIMPLE_COND)
1600 {
1601 stmt = nb_iter->stmt;
1602 break;
1603 }
1604 if (!stmt && last_stmt (loop->header)
1605 && gimple_code (last_stmt (loop->header)) == GIMPLE_COND)
1606 stmt = last_stmt (loop->header);
1607 if (stmt)
1608 is_comparison_with_loop_invariant_p (stmt, loop,
1609 &loop_bound_var,
1610 &loop_bound_code,
1611 &loop_bound_step,
1612 &loop_iv_base);
1613
1614 bbs = get_loop_body (loop);
1615
1616 for (j = 0; j < loop->num_nodes; j++)
1617 {
1618 int header_found = 0;
1619 edge e;
1620 edge_iterator ei;
1621
1622 bb = bbs[j];
1623
1624 /* Bypass loop heuristics on continue statement. These
1625 statements construct loops via "non-loop" constructs
1626 in the source language and are better to be handled
1627 separately. */
1628 if (predicted_by_p (bb, PRED_CONTINUE))
1629 continue;
1630
1631 /* Loop branch heuristics - predict an edge back to a
1632 loop's head as taken. */
1633 if (bb == loop->latch)
1634 {
1635 e = find_edge (loop->latch, loop->header);
1636 if (e)
1637 {
1638 header_found = 1;
1639 predict_edge_def (e, PRED_LOOP_BRANCH, TAKEN);
1640 }
1641 }
1642
1643 /* Loop exit heuristics - predict an edge exiting the loop if the
1644 conditional has no loop header successors as not taken. */
1645 if (!header_found
1646 /* If we already used more reliable loop exit predictors, do not
1647 bother with PRED_LOOP_EXIT. */
1648 && !predicted_by_p (bb, PRED_LOOP_ITERATIONS_GUESSED)
1649 && !predicted_by_p (bb, PRED_LOOP_ITERATIONS))
1650 {
1651 /* For loop with many exits we don't want to predict all exits
1652 with the pretty large probability, because if all exits are
1653 considered in row, the loop would be predicted to iterate
1654 almost never. The code to divide probability by number of
1655 exits is very rough. It should compute the number of exits
1656 taken in each patch through function (not the overall number
1657 of exits that might be a lot higher for loops with wide switch
1658 statements in them) and compute n-th square root.
1659
1660 We limit the minimal probability by 2% to avoid
1661 EDGE_PROBABILITY_RELIABLE from trusting the branch prediction
1662 as this was causing regression in perl benchmark containing such
1663 a wide loop. */
1664
1665 int probability = ((REG_BR_PROB_BASE
1666 - predictor_info [(int) PRED_LOOP_EXIT].hitrate)
1667 / n_exits);
1668 if (probability < HITRATE (2))
1669 probability = HITRATE (2);
1670 FOR_EACH_EDGE (e, ei, bb->succs)
1671 if (e->dest->index < NUM_FIXED_BLOCKS
1672 || !flow_bb_inside_loop_p (loop, e->dest))
1673 predict_edge (e, PRED_LOOP_EXIT, probability);
1674 }
1675 if (loop_bound_var)
1676 predict_iv_comparison (loop, bb, loop_bound_var, loop_iv_base,
1677 loop_bound_code,
1678 tree_to_shwi (loop_bound_step));
1679 }
1680
1681 /* Free basic blocks from get_loop_body. */
1682 free (bbs);
1683 }
1684 }
1685
1686 /* Attempt to predict probabilities of BB outgoing edges using local
1687 properties. */
1688 static void
1689 bb_estimate_probability_locally (basic_block bb)
1690 {
1691 rtx last_insn = BB_END (bb);
1692 rtx cond;
1693
1694 if (! can_predict_insn_p (last_insn))
1695 return;
1696 cond = get_condition (last_insn, NULL, false, false);
1697 if (! cond)
1698 return;
1699
1700 /* Try "pointer heuristic."
1701 A comparison ptr == 0 is predicted as false.
1702 Similarly, a comparison ptr1 == ptr2 is predicted as false. */
1703 if (COMPARISON_P (cond)
1704 && ((REG_P (XEXP (cond, 0)) && REG_POINTER (XEXP (cond, 0)))
1705 || (REG_P (XEXP (cond, 1)) && REG_POINTER (XEXP (cond, 1)))))
1706 {
1707 if (GET_CODE (cond) == EQ)
1708 predict_insn_def (last_insn, PRED_POINTER, NOT_TAKEN);
1709 else if (GET_CODE (cond) == NE)
1710 predict_insn_def (last_insn, PRED_POINTER, TAKEN);
1711 }
1712 else
1713
1714 /* Try "opcode heuristic."
1715 EQ tests are usually false and NE tests are usually true. Also,
1716 most quantities are positive, so we can make the appropriate guesses
1717 about signed comparisons against zero. */
1718 switch (GET_CODE (cond))
1719 {
1720 case CONST_INT:
1721 /* Unconditional branch. */
1722 predict_insn_def (last_insn, PRED_UNCONDITIONAL,
1723 cond == const0_rtx ? NOT_TAKEN : TAKEN);
1724 break;
1725
1726 case EQ:
1727 case UNEQ:
1728 /* Floating point comparisons appears to behave in a very
1729 unpredictable way because of special role of = tests in
1730 FP code. */
1731 if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
1732 ;
1733 /* Comparisons with 0 are often used for booleans and there is
1734 nothing useful to predict about them. */
1735 else if (XEXP (cond, 1) == const0_rtx
1736 || XEXP (cond, 0) == const0_rtx)
1737 ;
1738 else
1739 predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, NOT_TAKEN);
1740 break;
1741
1742 case NE:
1743 case LTGT:
1744 /* Floating point comparisons appears to behave in a very
1745 unpredictable way because of special role of = tests in
1746 FP code. */
1747 if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
1748 ;
1749 /* Comparisons with 0 are often used for booleans and there is
1750 nothing useful to predict about them. */
1751 else if (XEXP (cond, 1) == const0_rtx
1752 || XEXP (cond, 0) == const0_rtx)
1753 ;
1754 else
1755 predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, TAKEN);
1756 break;
1757
1758 case ORDERED:
1759 predict_insn_def (last_insn, PRED_FPOPCODE, TAKEN);
1760 break;
1761
1762 case UNORDERED:
1763 predict_insn_def (last_insn, PRED_FPOPCODE, NOT_TAKEN);
1764 break;
1765
1766 case LE:
1767 case LT:
1768 if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
1769 || XEXP (cond, 1) == constm1_rtx)
1770 predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, NOT_TAKEN);
1771 break;
1772
1773 case GE:
1774 case GT:
1775 if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
1776 || XEXP (cond, 1) == constm1_rtx)
1777 predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, TAKEN);
1778 break;
1779
1780 default:
1781 break;
1782 }
1783 }
1784
1785 /* Set edge->probability for each successor edge of BB. */
1786 void
1787 guess_outgoing_edge_probabilities (basic_block bb)
1788 {
1789 bb_estimate_probability_locally (bb);
1790 combine_predictions_for_insn (BB_END (bb), bb);
1791 }
1792 \f
1793 static tree expr_expected_value (tree, bitmap);
1794
1795 /* Helper function for expr_expected_value. */
1796
1797 static tree
1798 expr_expected_value_1 (tree type, tree op0, enum tree_code code,
1799 tree op1, bitmap visited)
1800 {
1801 gimple def;
1802
1803 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1804 {
1805 if (TREE_CONSTANT (op0))
1806 return op0;
1807
1808 if (code != SSA_NAME)
1809 return NULL_TREE;
1810
1811 def = SSA_NAME_DEF_STMT (op0);
1812
1813 /* If we were already here, break the infinite cycle. */
1814 if (!bitmap_set_bit (visited, SSA_NAME_VERSION (op0)))
1815 return NULL;
1816
1817 if (gimple_code (def) == GIMPLE_PHI)
1818 {
1819 /* All the arguments of the PHI node must have the same constant
1820 length. */
1821 int i, n = gimple_phi_num_args (def);
1822 tree val = NULL, new_val;
1823
1824 for (i = 0; i < n; i++)
1825 {
1826 tree arg = PHI_ARG_DEF (def, i);
1827
1828 /* If this PHI has itself as an argument, we cannot
1829 determine the string length of this argument. However,
1830 if we can find an expected constant value for the other
1831 PHI args then we can still be sure that this is
1832 likely a constant. So be optimistic and just
1833 continue with the next argument. */
1834 if (arg == PHI_RESULT (def))
1835 continue;
1836
1837 new_val = expr_expected_value (arg, visited);
1838 if (!new_val)
1839 return NULL;
1840 if (!val)
1841 val = new_val;
1842 else if (!operand_equal_p (val, new_val, false))
1843 return NULL;
1844 }
1845 return val;
1846 }
1847 if (is_gimple_assign (def))
1848 {
1849 if (gimple_assign_lhs (def) != op0)
1850 return NULL;
1851
1852 return expr_expected_value_1 (TREE_TYPE (gimple_assign_lhs (def)),
1853 gimple_assign_rhs1 (def),
1854 gimple_assign_rhs_code (def),
1855 gimple_assign_rhs2 (def),
1856 visited);
1857 }
1858
1859 if (is_gimple_call (def))
1860 {
1861 tree decl = gimple_call_fndecl (def);
1862 if (!decl)
1863 return NULL;
1864 if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
1865 switch (DECL_FUNCTION_CODE (decl))
1866 {
1867 case BUILT_IN_EXPECT:
1868 {
1869 tree val;
1870 if (gimple_call_num_args (def) != 2)
1871 return NULL;
1872 val = gimple_call_arg (def, 0);
1873 if (TREE_CONSTANT (val))
1874 return val;
1875 return gimple_call_arg (def, 1);
1876 }
1877
1878 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N:
1879 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1:
1880 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2:
1881 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4:
1882 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8:
1883 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16:
1884 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE:
1885 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N:
1886 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1:
1887 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2:
1888 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4:
1889 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8:
1890 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16:
1891 /* Assume that any given atomic operation has low contention,
1892 and thus the compare-and-swap operation succeeds. */
1893 return boolean_true_node;
1894 }
1895 }
1896
1897 return NULL;
1898 }
1899
1900 if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
1901 {
1902 tree res;
1903 op0 = expr_expected_value (op0, visited);
1904 if (!op0)
1905 return NULL;
1906 op1 = expr_expected_value (op1, visited);
1907 if (!op1)
1908 return NULL;
1909 res = fold_build2 (code, type, op0, op1);
1910 if (TREE_CONSTANT (res))
1911 return res;
1912 return NULL;
1913 }
1914 if (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS)
1915 {
1916 tree res;
1917 op0 = expr_expected_value (op0, visited);
1918 if (!op0)
1919 return NULL;
1920 res = fold_build1 (code, type, op0);
1921 if (TREE_CONSTANT (res))
1922 return res;
1923 return NULL;
1924 }
1925 return NULL;
1926 }
1927
1928 /* Return constant EXPR will likely have at execution time, NULL if unknown.
1929 The function is used by builtin_expect branch predictor so the evidence
1930 must come from this construct and additional possible constant folding.
1931
1932 We may want to implement more involved value guess (such as value range
1933 propagation based prediction), but such tricks shall go to new
1934 implementation. */
1935
1936 static tree
1937 expr_expected_value (tree expr, bitmap visited)
1938 {
1939 enum tree_code code;
1940 tree op0, op1;
1941
1942 if (TREE_CONSTANT (expr))
1943 return expr;
1944
1945 extract_ops_from_tree (expr, &code, &op0, &op1);
1946 return expr_expected_value_1 (TREE_TYPE (expr),
1947 op0, code, op1, visited);
1948 }
1949
1950 \f
1951 /* Get rid of all builtin_expect calls and GIMPLE_PREDICT statements
1952 we no longer need. */
1953 static unsigned int
1954 strip_predict_hints (void)
1955 {
1956 basic_block bb;
1957 gimple ass_stmt;
1958 tree var;
1959
1960 FOR_EACH_BB (bb)
1961 {
1962 gimple_stmt_iterator bi;
1963 for (bi = gsi_start_bb (bb); !gsi_end_p (bi);)
1964 {
1965 gimple stmt = gsi_stmt (bi);
1966
1967 if (gimple_code (stmt) == GIMPLE_PREDICT)
1968 {
1969 gsi_remove (&bi, true);
1970 continue;
1971 }
1972 else if (gimple_code (stmt) == GIMPLE_CALL)
1973 {
1974 tree fndecl = gimple_call_fndecl (stmt);
1975
1976 if (fndecl
1977 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1978 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT
1979 && gimple_call_num_args (stmt) == 2)
1980 {
1981 var = gimple_call_lhs (stmt);
1982 if (var)
1983 {
1984 ass_stmt
1985 = gimple_build_assign (var, gimple_call_arg (stmt, 0));
1986 gsi_replace (&bi, ass_stmt, true);
1987 }
1988 else
1989 {
1990 gsi_remove (&bi, true);
1991 continue;
1992 }
1993 }
1994 }
1995 gsi_next (&bi);
1996 }
1997 }
1998 return 0;
1999 }
2000 \f
2001 /* Predict using opcode of the last statement in basic block. */
2002 static void
2003 tree_predict_by_opcode (basic_block bb)
2004 {
2005 gimple stmt = last_stmt (bb);
2006 edge then_edge;
2007 tree op0, op1;
2008 tree type;
2009 tree val;
2010 enum tree_code cmp;
2011 bitmap visited;
2012 edge_iterator ei;
2013
2014 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
2015 return;
2016 FOR_EACH_EDGE (then_edge, ei, bb->succs)
2017 if (then_edge->flags & EDGE_TRUE_VALUE)
2018 break;
2019 op0 = gimple_cond_lhs (stmt);
2020 op1 = gimple_cond_rhs (stmt);
2021 cmp = gimple_cond_code (stmt);
2022 type = TREE_TYPE (op0);
2023 visited = BITMAP_ALLOC (NULL);
2024 val = expr_expected_value_1 (boolean_type_node, op0, cmp, op1, visited);
2025 BITMAP_FREE (visited);
2026 if (val)
2027 {
2028 int percent = PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY);
2029
2030 gcc_assert (percent >= 0 && percent <= 100);
2031 if (integer_zerop (val))
2032 percent = 100 - percent;
2033 predict_edge (then_edge, PRED_BUILTIN_EXPECT, HITRATE (percent));
2034 }
2035 /* Try "pointer heuristic."
2036 A comparison ptr == 0 is predicted as false.
2037 Similarly, a comparison ptr1 == ptr2 is predicted as false. */
2038 if (POINTER_TYPE_P (type))
2039 {
2040 if (cmp == EQ_EXPR)
2041 predict_edge_def (then_edge, PRED_TREE_POINTER, NOT_TAKEN);
2042 else if (cmp == NE_EXPR)
2043 predict_edge_def (then_edge, PRED_TREE_POINTER, TAKEN);
2044 }
2045 else
2046
2047 /* Try "opcode heuristic."
2048 EQ tests are usually false and NE tests are usually true. Also,
2049 most quantities are positive, so we can make the appropriate guesses
2050 about signed comparisons against zero. */
2051 switch (cmp)
2052 {
2053 case EQ_EXPR:
2054 case UNEQ_EXPR:
2055 /* Floating point comparisons appears to behave in a very
2056 unpredictable way because of special role of = tests in
2057 FP code. */
2058 if (FLOAT_TYPE_P (type))
2059 ;
2060 /* Comparisons with 0 are often used for booleans and there is
2061 nothing useful to predict about them. */
2062 else if (integer_zerop (op0) || integer_zerop (op1))
2063 ;
2064 else
2065 predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, NOT_TAKEN);
2066 break;
2067
2068 case NE_EXPR:
2069 case LTGT_EXPR:
2070 /* Floating point comparisons appears to behave in a very
2071 unpredictable way because of special role of = tests in
2072 FP code. */
2073 if (FLOAT_TYPE_P (type))
2074 ;
2075 /* Comparisons with 0 are often used for booleans and there is
2076 nothing useful to predict about them. */
2077 else if (integer_zerop (op0)
2078 || integer_zerop (op1))
2079 ;
2080 else
2081 predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, TAKEN);
2082 break;
2083
2084 case ORDERED_EXPR:
2085 predict_edge_def (then_edge, PRED_TREE_FPOPCODE, TAKEN);
2086 break;
2087
2088 case UNORDERED_EXPR:
2089 predict_edge_def (then_edge, PRED_TREE_FPOPCODE, NOT_TAKEN);
2090 break;
2091
2092 case LE_EXPR:
2093 case LT_EXPR:
2094 if (integer_zerop (op1)
2095 || integer_onep (op1)
2096 || integer_all_onesp (op1)
2097 || real_zerop (op1)
2098 || real_onep (op1)
2099 || real_minus_onep (op1))
2100 predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, NOT_TAKEN);
2101 break;
2102
2103 case GE_EXPR:
2104 case GT_EXPR:
2105 if (integer_zerop (op1)
2106 || integer_onep (op1)
2107 || integer_all_onesp (op1)
2108 || real_zerop (op1)
2109 || real_onep (op1)
2110 || real_minus_onep (op1))
2111 predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, TAKEN);
2112 break;
2113
2114 default:
2115 break;
2116 }
2117 }
2118
2119 /* Try to guess whether the value of return means error code. */
2120
2121 static enum br_predictor
2122 return_prediction (tree val, enum prediction *prediction)
2123 {
2124 /* VOID. */
2125 if (!val)
2126 return PRED_NO_PREDICTION;
2127 /* Different heuristics for pointers and scalars. */
2128 if (POINTER_TYPE_P (TREE_TYPE (val)))
2129 {
2130 /* NULL is usually not returned. */
2131 if (integer_zerop (val))
2132 {
2133 *prediction = NOT_TAKEN;
2134 return PRED_NULL_RETURN;
2135 }
2136 }
2137 else if (INTEGRAL_TYPE_P (TREE_TYPE (val)))
2138 {
2139 /* Negative return values are often used to indicate
2140 errors. */
2141 if (TREE_CODE (val) == INTEGER_CST
2142 && tree_int_cst_sgn (val) < 0)
2143 {
2144 *prediction = NOT_TAKEN;
2145 return PRED_NEGATIVE_RETURN;
2146 }
2147 /* Constant return values seems to be commonly taken.
2148 Zero/one often represent booleans so exclude them from the
2149 heuristics. */
2150 if (TREE_CONSTANT (val)
2151 && (!integer_zerop (val) && !integer_onep (val)))
2152 {
2153 *prediction = TAKEN;
2154 return PRED_CONST_RETURN;
2155 }
2156 }
2157 return PRED_NO_PREDICTION;
2158 }
2159
2160 /* Find the basic block with return expression and look up for possible
2161 return value trying to apply RETURN_PREDICTION heuristics. */
2162 static void
2163 apply_return_prediction (void)
2164 {
2165 gimple return_stmt = NULL;
2166 tree return_val;
2167 edge e;
2168 gimple phi;
2169 int phi_num_args, i;
2170 enum br_predictor pred;
2171 enum prediction direction;
2172 edge_iterator ei;
2173
2174 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2175 {
2176 return_stmt = last_stmt (e->src);
2177 if (return_stmt
2178 && gimple_code (return_stmt) == GIMPLE_RETURN)
2179 break;
2180 }
2181 if (!e)
2182 return;
2183 return_val = gimple_return_retval (return_stmt);
2184 if (!return_val)
2185 return;
2186 if (TREE_CODE (return_val) != SSA_NAME
2187 || !SSA_NAME_DEF_STMT (return_val)
2188 || gimple_code (SSA_NAME_DEF_STMT (return_val)) != GIMPLE_PHI)
2189 return;
2190 phi = SSA_NAME_DEF_STMT (return_val);
2191 phi_num_args = gimple_phi_num_args (phi);
2192 pred = return_prediction (PHI_ARG_DEF (phi, 0), &direction);
2193
2194 /* Avoid the degenerate case where all return values form the function
2195 belongs to same category (ie they are all positive constants)
2196 so we can hardly say something about them. */
2197 for (i = 1; i < phi_num_args; i++)
2198 if (pred != return_prediction (PHI_ARG_DEF (phi, i), &direction))
2199 break;
2200 if (i != phi_num_args)
2201 for (i = 0; i < phi_num_args; i++)
2202 {
2203 pred = return_prediction (PHI_ARG_DEF (phi, i), &direction);
2204 if (pred != PRED_NO_PREDICTION)
2205 predict_paths_leading_to_edge (gimple_phi_arg_edge (phi, i), pred,
2206 direction);
2207 }
2208 }
2209
2210 /* Look for basic block that contains unlikely to happen events
2211 (such as noreturn calls) and mark all paths leading to execution
2212 of this basic blocks as unlikely. */
2213
2214 static void
2215 tree_bb_level_predictions (void)
2216 {
2217 basic_block bb;
2218 bool has_return_edges = false;
2219 edge e;
2220 edge_iterator ei;
2221
2222 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2223 if (!(e->flags & (EDGE_ABNORMAL | EDGE_FAKE | EDGE_EH)))
2224 {
2225 has_return_edges = true;
2226 break;
2227 }
2228
2229 apply_return_prediction ();
2230
2231 FOR_EACH_BB (bb)
2232 {
2233 gimple_stmt_iterator gsi;
2234
2235 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2236 {
2237 gimple stmt = gsi_stmt (gsi);
2238 tree decl;
2239
2240 if (is_gimple_call (stmt))
2241 {
2242 if ((gimple_call_flags (stmt) & ECF_NORETURN)
2243 && has_return_edges)
2244 predict_paths_leading_to (bb, PRED_NORETURN,
2245 NOT_TAKEN);
2246 decl = gimple_call_fndecl (stmt);
2247 if (decl
2248 && lookup_attribute ("cold",
2249 DECL_ATTRIBUTES (decl)))
2250 predict_paths_leading_to (bb, PRED_COLD_FUNCTION,
2251 NOT_TAKEN);
2252 }
2253 else if (gimple_code (stmt) == GIMPLE_PREDICT)
2254 {
2255 predict_paths_leading_to (bb, gimple_predict_predictor (stmt),
2256 gimple_predict_outcome (stmt));
2257 /* Keep GIMPLE_PREDICT around so early inlining will propagate
2258 hints to callers. */
2259 }
2260 }
2261 }
2262 }
2263
2264 #ifdef ENABLE_CHECKING
2265
2266 /* Callback for pointer_map_traverse, asserts that the pointer map is
2267 empty. */
2268
2269 static bool
2270 assert_is_empty (const void *key ATTRIBUTE_UNUSED, void **value,
2271 void *data ATTRIBUTE_UNUSED)
2272 {
2273 gcc_assert (!*value);
2274 return false;
2275 }
2276 #endif
2277
2278 /* Predict branch probabilities and estimate profile for basic block BB. */
2279
2280 static void
2281 tree_estimate_probability_bb (basic_block bb)
2282 {
2283 edge e;
2284 edge_iterator ei;
2285 gimple last;
2286
2287 FOR_EACH_EDGE (e, ei, bb->succs)
2288 {
2289 /* Predict edges to user labels with attributes. */
2290 if (e->dest != EXIT_BLOCK_PTR)
2291 {
2292 gimple_stmt_iterator gi;
2293 for (gi = gsi_start_bb (e->dest); !gsi_end_p (gi); gsi_next (&gi))
2294 {
2295 gimple stmt = gsi_stmt (gi);
2296 tree decl;
2297
2298 if (gimple_code (stmt) != GIMPLE_LABEL)
2299 break;
2300 decl = gimple_label_label (stmt);
2301 if (DECL_ARTIFICIAL (decl))
2302 continue;
2303
2304 /* Finally, we have a user-defined label. */
2305 if (lookup_attribute ("cold", DECL_ATTRIBUTES (decl)))
2306 predict_edge_def (e, PRED_COLD_LABEL, NOT_TAKEN);
2307 else if (lookup_attribute ("hot", DECL_ATTRIBUTES (decl)))
2308 predict_edge_def (e, PRED_HOT_LABEL, TAKEN);
2309 }
2310 }
2311
2312 /* Predict early returns to be probable, as we've already taken
2313 care for error returns and other cases are often used for
2314 fast paths through function.
2315
2316 Since we've already removed the return statements, we are
2317 looking for CFG like:
2318
2319 if (conditional)
2320 {
2321 ..
2322 goto return_block
2323 }
2324 some other blocks
2325 return_block:
2326 return_stmt. */
2327 if (e->dest != bb->next_bb
2328 && e->dest != EXIT_BLOCK_PTR
2329 && single_succ_p (e->dest)
2330 && single_succ_edge (e->dest)->dest == EXIT_BLOCK_PTR
2331 && (last = last_stmt (e->dest)) != NULL
2332 && gimple_code (last) == GIMPLE_RETURN)
2333 {
2334 edge e1;
2335 edge_iterator ei1;
2336
2337 if (single_succ_p (bb))
2338 {
2339 FOR_EACH_EDGE (e1, ei1, bb->preds)
2340 if (!predicted_by_p (e1->src, PRED_NULL_RETURN)
2341 && !predicted_by_p (e1->src, PRED_CONST_RETURN)
2342 && !predicted_by_p (e1->src, PRED_NEGATIVE_RETURN))
2343 predict_edge_def (e1, PRED_TREE_EARLY_RETURN, NOT_TAKEN);
2344 }
2345 else
2346 if (!predicted_by_p (e->src, PRED_NULL_RETURN)
2347 && !predicted_by_p (e->src, PRED_CONST_RETURN)
2348 && !predicted_by_p (e->src, PRED_NEGATIVE_RETURN))
2349 predict_edge_def (e, PRED_TREE_EARLY_RETURN, NOT_TAKEN);
2350 }
2351
2352 /* Look for block we are guarding (ie we dominate it,
2353 but it doesn't postdominate us). */
2354 if (e->dest != EXIT_BLOCK_PTR && e->dest != bb
2355 && dominated_by_p (CDI_DOMINATORS, e->dest, e->src)
2356 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest))
2357 {
2358 gimple_stmt_iterator bi;
2359
2360 /* The call heuristic claims that a guarded function call
2361 is improbable. This is because such calls are often used
2362 to signal exceptional situations such as printing error
2363 messages. */
2364 for (bi = gsi_start_bb (e->dest); !gsi_end_p (bi);
2365 gsi_next (&bi))
2366 {
2367 gimple stmt = gsi_stmt (bi);
2368 if (is_gimple_call (stmt)
2369 /* Constant and pure calls are hardly used to signalize
2370 something exceptional. */
2371 && gimple_has_side_effects (stmt))
2372 {
2373 predict_edge_def (e, PRED_CALL, NOT_TAKEN);
2374 break;
2375 }
2376 }
2377 }
2378 }
2379 tree_predict_by_opcode (bb);
2380 }
2381
2382 /* Predict branch probabilities and estimate profile of the tree CFG.
2383 This function can be called from the loop optimizers to recompute
2384 the profile information. */
2385
2386 void
2387 tree_estimate_probability (void)
2388 {
2389 basic_block bb;
2390
2391 add_noreturn_fake_exit_edges ();
2392 connect_infinite_loops_to_exit ();
2393 /* We use loop_niter_by_eval, which requires that the loops have
2394 preheaders. */
2395 create_preheaders (CP_SIMPLE_PREHEADERS);
2396 calculate_dominance_info (CDI_POST_DOMINATORS);
2397
2398 bb_predictions = pointer_map_create ();
2399 tree_bb_level_predictions ();
2400 record_loop_exits ();
2401
2402 if (number_of_loops (cfun) > 1)
2403 predict_loops ();
2404
2405 FOR_EACH_BB (bb)
2406 tree_estimate_probability_bb (bb);
2407
2408 FOR_EACH_BB (bb)
2409 combine_predictions_for_bb (bb);
2410
2411 #ifdef ENABLE_CHECKING
2412 pointer_map_traverse (bb_predictions, assert_is_empty, NULL);
2413 #endif
2414 pointer_map_destroy (bb_predictions);
2415 bb_predictions = NULL;
2416
2417 estimate_bb_frequencies (false);
2418 free_dominance_info (CDI_POST_DOMINATORS);
2419 remove_fake_exit_edges ();
2420 }
2421
2422 /* Predict branch probabilities and estimate profile of the tree CFG.
2423 This is the driver function for PASS_PROFILE. */
2424
2425 static unsigned int
2426 tree_estimate_probability_driver (void)
2427 {
2428 unsigned nb_loops;
2429
2430 loop_optimizer_init (LOOPS_NORMAL);
2431 if (dump_file && (dump_flags & TDF_DETAILS))
2432 flow_loops_dump (dump_file, NULL, 0);
2433
2434 mark_irreducible_loops ();
2435
2436 nb_loops = number_of_loops (cfun);
2437 if (nb_loops > 1)
2438 scev_initialize ();
2439
2440 tree_estimate_probability ();
2441
2442 if (nb_loops > 1)
2443 scev_finalize ();
2444
2445 loop_optimizer_finalize ();
2446 if (dump_file && (dump_flags & TDF_DETAILS))
2447 gimple_dump_cfg (dump_file, dump_flags);
2448 if (profile_status == PROFILE_ABSENT)
2449 profile_status = PROFILE_GUESSED;
2450 return 0;
2451 }
2452 \f
2453 /* Predict edges to successors of CUR whose sources are not postdominated by
2454 BB by PRED and recurse to all postdominators. */
2455
2456 static void
2457 predict_paths_for_bb (basic_block cur, basic_block bb,
2458 enum br_predictor pred,
2459 enum prediction taken,
2460 bitmap visited)
2461 {
2462 edge e;
2463 edge_iterator ei;
2464 basic_block son;
2465
2466 /* We are looking for all edges forming edge cut induced by
2467 set of all blocks postdominated by BB. */
2468 FOR_EACH_EDGE (e, ei, cur->preds)
2469 if (e->src->index >= NUM_FIXED_BLOCKS
2470 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, bb))
2471 {
2472 edge e2;
2473 edge_iterator ei2;
2474 bool found = false;
2475
2476 /* Ignore fake edges and eh, we predict them as not taken anyway. */
2477 if (e->flags & (EDGE_EH | EDGE_FAKE))
2478 continue;
2479 gcc_assert (bb == cur || dominated_by_p (CDI_POST_DOMINATORS, cur, bb));
2480
2481 /* See if there is an edge from e->src that is not abnormal
2482 and does not lead to BB. */
2483 FOR_EACH_EDGE (e2, ei2, e->src->succs)
2484 if (e2 != e
2485 && !(e2->flags & (EDGE_EH | EDGE_FAKE))
2486 && !dominated_by_p (CDI_POST_DOMINATORS, e2->dest, bb))
2487 {
2488 found = true;
2489 break;
2490 }
2491
2492 /* If there is non-abnormal path leaving e->src, predict edge
2493 using predictor. Otherwise we need to look for paths
2494 leading to e->src.
2495
2496 The second may lead to infinite loop in the case we are predicitng
2497 regions that are only reachable by abnormal edges. We simply
2498 prevent visiting given BB twice. */
2499 if (found)
2500 predict_edge_def (e, pred, taken);
2501 else if (bitmap_set_bit (visited, e->src->index))
2502 predict_paths_for_bb (e->src, e->src, pred, taken, visited);
2503 }
2504 for (son = first_dom_son (CDI_POST_DOMINATORS, cur);
2505 son;
2506 son = next_dom_son (CDI_POST_DOMINATORS, son))
2507 predict_paths_for_bb (son, bb, pred, taken, visited);
2508 }
2509
2510 /* Sets branch probabilities according to PREDiction and
2511 FLAGS. */
2512
2513 static void
2514 predict_paths_leading_to (basic_block bb, enum br_predictor pred,
2515 enum prediction taken)
2516 {
2517 bitmap visited = BITMAP_ALLOC (NULL);
2518 predict_paths_for_bb (bb, bb, pred, taken, visited);
2519 BITMAP_FREE (visited);
2520 }
2521
2522 /* Like predict_paths_leading_to but take edge instead of basic block. */
2523
2524 static void
2525 predict_paths_leading_to_edge (edge e, enum br_predictor pred,
2526 enum prediction taken)
2527 {
2528 bool has_nonloop_edge = false;
2529 edge_iterator ei;
2530 edge e2;
2531
2532 basic_block bb = e->src;
2533 FOR_EACH_EDGE (e2, ei, bb->succs)
2534 if (e2->dest != e->src && e2->dest != e->dest
2535 && !(e->flags & (EDGE_EH | EDGE_FAKE))
2536 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e2->dest))
2537 {
2538 has_nonloop_edge = true;
2539 break;
2540 }
2541 if (!has_nonloop_edge)
2542 {
2543 bitmap visited = BITMAP_ALLOC (NULL);
2544 predict_paths_for_bb (bb, bb, pred, taken, visited);
2545 BITMAP_FREE (visited);
2546 }
2547 else
2548 predict_edge_def (e, pred, taken);
2549 }
2550 \f
2551 /* This is used to carry information about basic blocks. It is
2552 attached to the AUX field of the standard CFG block. */
2553
2554 typedef struct block_info_def
2555 {
2556 /* Estimated frequency of execution of basic_block. */
2557 sreal frequency;
2558
2559 /* To keep queue of basic blocks to process. */
2560 basic_block next;
2561
2562 /* Number of predecessors we need to visit first. */
2563 int npredecessors;
2564 } *block_info;
2565
2566 /* Similar information for edges. */
2567 typedef struct edge_info_def
2568 {
2569 /* In case edge is a loopback edge, the probability edge will be reached
2570 in case header is. Estimated number of iterations of the loop can be
2571 then computed as 1 / (1 - back_edge_prob). */
2572 sreal back_edge_prob;
2573 /* True if the edge is a loopback edge in the natural loop. */
2574 unsigned int back_edge:1;
2575 } *edge_info;
2576
2577 #define BLOCK_INFO(B) ((block_info) (B)->aux)
2578 #define EDGE_INFO(E) ((edge_info) (E)->aux)
2579
2580 /* Helper function for estimate_bb_frequencies.
2581 Propagate the frequencies in blocks marked in
2582 TOVISIT, starting in HEAD. */
2583
2584 static void
2585 propagate_freq (basic_block head, bitmap tovisit)
2586 {
2587 basic_block bb;
2588 basic_block last;
2589 unsigned i;
2590 edge e;
2591 basic_block nextbb;
2592 bitmap_iterator bi;
2593
2594 /* For each basic block we need to visit count number of his predecessors
2595 we need to visit first. */
2596 EXECUTE_IF_SET_IN_BITMAP (tovisit, 0, i, bi)
2597 {
2598 edge_iterator ei;
2599 int count = 0;
2600
2601 bb = BASIC_BLOCK (i);
2602
2603 FOR_EACH_EDGE (e, ei, bb->preds)
2604 {
2605 bool visit = bitmap_bit_p (tovisit, e->src->index);
2606
2607 if (visit && !(e->flags & EDGE_DFS_BACK))
2608 count++;
2609 else if (visit && dump_file && !EDGE_INFO (e)->back_edge)
2610 fprintf (dump_file,
2611 "Irreducible region hit, ignoring edge to %i->%i\n",
2612 e->src->index, bb->index);
2613 }
2614 BLOCK_INFO (bb)->npredecessors = count;
2615 /* When function never returns, we will never process exit block. */
2616 if (!count && bb == EXIT_BLOCK_PTR)
2617 bb->count = bb->frequency = 0;
2618 }
2619
2620 memcpy (&BLOCK_INFO (head)->frequency, &real_one, sizeof (real_one));
2621 last = head;
2622 for (bb = head; bb; bb = nextbb)
2623 {
2624 edge_iterator ei;
2625 sreal cyclic_probability, frequency;
2626
2627 memcpy (&cyclic_probability, &real_zero, sizeof (real_zero));
2628 memcpy (&frequency, &real_zero, sizeof (real_zero));
2629
2630 nextbb = BLOCK_INFO (bb)->next;
2631 BLOCK_INFO (bb)->next = NULL;
2632
2633 /* Compute frequency of basic block. */
2634 if (bb != head)
2635 {
2636 #ifdef ENABLE_CHECKING
2637 FOR_EACH_EDGE (e, ei, bb->preds)
2638 gcc_assert (!bitmap_bit_p (tovisit, e->src->index)
2639 || (e->flags & EDGE_DFS_BACK));
2640 #endif
2641
2642 FOR_EACH_EDGE (e, ei, bb->preds)
2643 if (EDGE_INFO (e)->back_edge)
2644 {
2645 sreal_add (&cyclic_probability, &cyclic_probability,
2646 &EDGE_INFO (e)->back_edge_prob);
2647 }
2648 else if (!(e->flags & EDGE_DFS_BACK))
2649 {
2650 sreal tmp;
2651
2652 /* frequency += (e->probability
2653 * BLOCK_INFO (e->src)->frequency /
2654 REG_BR_PROB_BASE); */
2655
2656 sreal_init (&tmp, e->probability, 0);
2657 sreal_mul (&tmp, &tmp, &BLOCK_INFO (e->src)->frequency);
2658 sreal_mul (&tmp, &tmp, &real_inv_br_prob_base);
2659 sreal_add (&frequency, &frequency, &tmp);
2660 }
2661
2662 if (sreal_compare (&cyclic_probability, &real_zero) == 0)
2663 {
2664 memcpy (&BLOCK_INFO (bb)->frequency, &frequency,
2665 sizeof (frequency));
2666 }
2667 else
2668 {
2669 if (sreal_compare (&cyclic_probability, &real_almost_one) > 0)
2670 {
2671 memcpy (&cyclic_probability, &real_almost_one,
2672 sizeof (real_almost_one));
2673 }
2674
2675 /* BLOCK_INFO (bb)->frequency = frequency
2676 / (1 - cyclic_probability) */
2677
2678 sreal_sub (&cyclic_probability, &real_one, &cyclic_probability);
2679 sreal_div (&BLOCK_INFO (bb)->frequency,
2680 &frequency, &cyclic_probability);
2681 }
2682 }
2683
2684 bitmap_clear_bit (tovisit, bb->index);
2685
2686 e = find_edge (bb, head);
2687 if (e)
2688 {
2689 sreal tmp;
2690
2691 /* EDGE_INFO (e)->back_edge_prob
2692 = ((e->probability * BLOCK_INFO (bb)->frequency)
2693 / REG_BR_PROB_BASE); */
2694
2695 sreal_init (&tmp, e->probability, 0);
2696 sreal_mul (&tmp, &tmp, &BLOCK_INFO (bb)->frequency);
2697 sreal_mul (&EDGE_INFO (e)->back_edge_prob,
2698 &tmp, &real_inv_br_prob_base);
2699 }
2700
2701 /* Propagate to successor blocks. */
2702 FOR_EACH_EDGE (e, ei, bb->succs)
2703 if (!(e->flags & EDGE_DFS_BACK)
2704 && BLOCK_INFO (e->dest)->npredecessors)
2705 {
2706 BLOCK_INFO (e->dest)->npredecessors--;
2707 if (!BLOCK_INFO (e->dest)->npredecessors)
2708 {
2709 if (!nextbb)
2710 nextbb = e->dest;
2711 else
2712 BLOCK_INFO (last)->next = e->dest;
2713
2714 last = e->dest;
2715 }
2716 }
2717 }
2718 }
2719
2720 /* Estimate frequencies in loops at same nest level. */
2721
2722 static void
2723 estimate_loops_at_level (struct loop *first_loop)
2724 {
2725 struct loop *loop;
2726
2727 for (loop = first_loop; loop; loop = loop->next)
2728 {
2729 edge e;
2730 basic_block *bbs;
2731 unsigned i;
2732 bitmap tovisit = BITMAP_ALLOC (NULL);
2733
2734 estimate_loops_at_level (loop->inner);
2735
2736 /* Find current loop back edge and mark it. */
2737 e = loop_latch_edge (loop);
2738 EDGE_INFO (e)->back_edge = 1;
2739
2740 bbs = get_loop_body (loop);
2741 for (i = 0; i < loop->num_nodes; i++)
2742 bitmap_set_bit (tovisit, bbs[i]->index);
2743 free (bbs);
2744 propagate_freq (loop->header, tovisit);
2745 BITMAP_FREE (tovisit);
2746 }
2747 }
2748
2749 /* Propagates frequencies through structure of loops. */
2750
2751 static void
2752 estimate_loops (void)
2753 {
2754 bitmap tovisit = BITMAP_ALLOC (NULL);
2755 basic_block bb;
2756
2757 /* Start by estimating the frequencies in the loops. */
2758 if (number_of_loops (cfun) > 1)
2759 estimate_loops_at_level (current_loops->tree_root->inner);
2760
2761 /* Now propagate the frequencies through all the blocks. */
2762 FOR_ALL_BB (bb)
2763 {
2764 bitmap_set_bit (tovisit, bb->index);
2765 }
2766 propagate_freq (ENTRY_BLOCK_PTR, tovisit);
2767 BITMAP_FREE (tovisit);
2768 }
2769
2770 /* Drop the profile for NODE to guessed, and update its frequency based on
2771 whether it is expected to be hot given the CALL_COUNT. */
2772
2773 static void
2774 drop_profile (struct cgraph_node *node, gcov_type call_count)
2775 {
2776 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
2777 /* In the case where this was called by another function with a
2778 dropped profile, call_count will be 0. Since there are no
2779 non-zero call counts to this function, we don't know for sure
2780 whether it is hot, and therefore it will be marked normal below. */
2781 bool hot = maybe_hot_count_p (NULL, call_count);
2782
2783 if (dump_file)
2784 fprintf (dump_file,
2785 "Dropping 0 profile for %s/%i. %s based on calls.\n",
2786 node->name (), node->order,
2787 hot ? "Function is hot" : "Function is normal");
2788 /* We only expect to miss profiles for functions that are reached
2789 via non-zero call edges in cases where the function may have
2790 been linked from another module or library (COMDATs and extern
2791 templates). See the comments below for handle_missing_profiles.
2792 Also, only warn in cases where the missing counts exceed the
2793 number of training runs. In certain cases with an execv followed
2794 by a no-return call the profile for the no-return call is not
2795 dumped and there can be a mismatch. */
2796 if (!DECL_COMDAT (node->decl) && !DECL_EXTERNAL (node->decl)
2797 && call_count > profile_info->runs)
2798 {
2799 if (flag_profile_correction)
2800 {
2801 if (dump_file)
2802 fprintf (dump_file,
2803 "Missing counts for called function %s/%i\n",
2804 node->name (), node->order);
2805 }
2806 else
2807 warning (0, "Missing counts for called function %s/%i",
2808 node->name (), node->order);
2809 }
2810
2811 profile_status_for_function (fn)
2812 = (flag_guess_branch_prob ? PROFILE_GUESSED : PROFILE_ABSENT);
2813 node->frequency
2814 = hot ? NODE_FREQUENCY_HOT : NODE_FREQUENCY_NORMAL;
2815 }
2816
2817 /* In the case of COMDAT routines, multiple object files will contain the same
2818 function and the linker will select one for the binary. In that case
2819 all the other copies from the profile instrument binary will be missing
2820 profile counts. Look for cases where this happened, due to non-zero
2821 call counts going to 0-count functions, and drop the profile to guessed
2822 so that we can use the estimated probabilities and avoid optimizing only
2823 for size.
2824
2825 The other case where the profile may be missing is when the routine
2826 is not going to be emitted to the object file, e.g. for "extern template"
2827 class methods. Those will be marked DECL_EXTERNAL. Emit a warning in
2828 all other cases of non-zero calls to 0-count functions. */
2829
2830 void
2831 handle_missing_profiles (void)
2832 {
2833 struct cgraph_node *node;
2834 int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
2835 vec<struct cgraph_node *> worklist;
2836 worklist.create (64);
2837
2838 /* See if 0 count function has non-0 count callers. In this case we
2839 lost some profile. Drop its function profile to PROFILE_GUESSED. */
2840 FOR_EACH_DEFINED_FUNCTION (node)
2841 {
2842 struct cgraph_edge *e;
2843 gcov_type call_count = 0;
2844 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
2845
2846 if (node->count)
2847 continue;
2848 for (e = node->callers; e; e = e->next_caller)
2849 call_count += e->count;
2850 if (call_count
2851 && fn && fn->cfg
2852 && (call_count * unlikely_count_fraction >= profile_info->runs))
2853 {
2854 drop_profile (node, call_count);
2855 worklist.safe_push (node);
2856 }
2857 }
2858
2859 /* Propagate the profile dropping to other 0-count COMDATs that are
2860 potentially called by COMDATs we already dropped the profile on. */
2861 while (worklist.length () > 0)
2862 {
2863 struct cgraph_edge *e;
2864
2865 node = worklist.pop ();
2866 for (e = node->callees; e; e = e->next_caller)
2867 {
2868 struct cgraph_node *callee = e->callee;
2869 struct function *fn = DECL_STRUCT_FUNCTION (callee->decl);
2870
2871 if (callee->count > 0)
2872 continue;
2873 if (DECL_COMDAT (callee->decl) && fn && fn->cfg
2874 && profile_status_for_function (fn) == PROFILE_READ)
2875 {
2876 drop_profile (node, 0);
2877 worklist.safe_push (callee);
2878 }
2879 }
2880 }
2881 worklist.release ();
2882 }
2883
2884 /* Convert counts measured by profile driven feedback to frequencies.
2885 Return nonzero iff there was any nonzero execution count. */
2886
2887 int
2888 counts_to_freqs (void)
2889 {
2890 gcov_type count_max, true_count_max = 0;
2891 basic_block bb;
2892
2893 /* Don't overwrite the estimated frequencies when the profile for
2894 the function is missing. We may drop this function PROFILE_GUESSED
2895 later in drop_profile (). */
2896 if (!ENTRY_BLOCK_PTR->count)
2897 return 0;
2898
2899 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
2900 true_count_max = MAX (bb->count, true_count_max);
2901
2902 count_max = MAX (true_count_max, 1);
2903 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
2904 bb->frequency = (bb->count * BB_FREQ_MAX + count_max / 2) / count_max;
2905
2906 return true_count_max;
2907 }
2908
2909 /* Return true if function is likely to be expensive, so there is no point to
2910 optimize performance of prologue, epilogue or do inlining at the expense
2911 of code size growth. THRESHOLD is the limit of number of instructions
2912 function can execute at average to be still considered not expensive. */
2913
2914 bool
2915 expensive_function_p (int threshold)
2916 {
2917 unsigned int sum = 0;
2918 basic_block bb;
2919 unsigned int limit;
2920
2921 /* We can not compute accurately for large thresholds due to scaled
2922 frequencies. */
2923 gcc_assert (threshold <= BB_FREQ_MAX);
2924
2925 /* Frequencies are out of range. This either means that function contains
2926 internal loop executing more than BB_FREQ_MAX times or profile feedback
2927 is available and function has not been executed at all. */
2928 if (ENTRY_BLOCK_PTR->frequency == 0)
2929 return true;
2930
2931 /* Maximally BB_FREQ_MAX^2 so overflow won't happen. */
2932 limit = ENTRY_BLOCK_PTR->frequency * threshold;
2933 FOR_EACH_BB (bb)
2934 {
2935 rtx insn;
2936
2937 FOR_BB_INSNS (bb, insn)
2938 if (active_insn_p (insn))
2939 {
2940 sum += bb->frequency;
2941 if (sum > limit)
2942 return true;
2943 }
2944 }
2945
2946 return false;
2947 }
2948
2949 /* Estimate and propagate basic block frequencies using the given branch
2950 probabilities. If FORCE is true, the frequencies are used to estimate
2951 the counts even when there are already non-zero profile counts. */
2952
2953 void
2954 estimate_bb_frequencies (bool force)
2955 {
2956 basic_block bb;
2957 sreal freq_max;
2958
2959 if (force || profile_status != PROFILE_READ || !counts_to_freqs ())
2960 {
2961 static int real_values_initialized = 0;
2962
2963 if (!real_values_initialized)
2964 {
2965 real_values_initialized = 1;
2966 sreal_init (&real_zero, 0, 0);
2967 sreal_init (&real_one, 1, 0);
2968 sreal_init (&real_br_prob_base, REG_BR_PROB_BASE, 0);
2969 sreal_init (&real_bb_freq_max, BB_FREQ_MAX, 0);
2970 sreal_init (&real_one_half, 1, -1);
2971 sreal_div (&real_inv_br_prob_base, &real_one, &real_br_prob_base);
2972 sreal_sub (&real_almost_one, &real_one, &real_inv_br_prob_base);
2973 }
2974
2975 mark_dfs_back_edges ();
2976
2977 single_succ_edge (ENTRY_BLOCK_PTR)->probability = REG_BR_PROB_BASE;
2978
2979 /* Set up block info for each basic block. */
2980 alloc_aux_for_blocks (sizeof (struct block_info_def));
2981 alloc_aux_for_edges (sizeof (struct edge_info_def));
2982 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
2983 {
2984 edge e;
2985 edge_iterator ei;
2986
2987 FOR_EACH_EDGE (e, ei, bb->succs)
2988 {
2989 sreal_init (&EDGE_INFO (e)->back_edge_prob, e->probability, 0);
2990 sreal_mul (&EDGE_INFO (e)->back_edge_prob,
2991 &EDGE_INFO (e)->back_edge_prob,
2992 &real_inv_br_prob_base);
2993 }
2994 }
2995
2996 /* First compute frequencies locally for each loop from innermost
2997 to outermost to examine frequencies for back edges. */
2998 estimate_loops ();
2999
3000 memcpy (&freq_max, &real_zero, sizeof (real_zero));
3001 FOR_EACH_BB (bb)
3002 if (sreal_compare (&freq_max, &BLOCK_INFO (bb)->frequency) < 0)
3003 memcpy (&freq_max, &BLOCK_INFO (bb)->frequency, sizeof (freq_max));
3004
3005 sreal_div (&freq_max, &real_bb_freq_max, &freq_max);
3006 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
3007 {
3008 sreal tmp;
3009
3010 sreal_mul (&tmp, &BLOCK_INFO (bb)->frequency, &freq_max);
3011 sreal_add (&tmp, &tmp, &real_one_half);
3012 bb->frequency = sreal_to_int (&tmp);
3013 }
3014
3015 free_aux_for_blocks ();
3016 free_aux_for_edges ();
3017 }
3018 compute_function_frequency ();
3019 }
3020
3021 /* Decide whether function is hot, cold or unlikely executed. */
3022 void
3023 compute_function_frequency (void)
3024 {
3025 basic_block bb;
3026 struct cgraph_node *node = cgraph_get_node (current_function_decl);
3027
3028 if (DECL_STATIC_CONSTRUCTOR (current_function_decl)
3029 || MAIN_NAME_P (DECL_NAME (current_function_decl)))
3030 node->only_called_at_startup = true;
3031 if (DECL_STATIC_DESTRUCTOR (current_function_decl))
3032 node->only_called_at_exit = true;
3033
3034 if (profile_status != PROFILE_READ)
3035 {
3036 int flags = flags_from_decl_or_type (current_function_decl);
3037 if (lookup_attribute ("cold", DECL_ATTRIBUTES (current_function_decl))
3038 != NULL)
3039 node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
3040 else if (lookup_attribute ("hot", DECL_ATTRIBUTES (current_function_decl))
3041 != NULL)
3042 node->frequency = NODE_FREQUENCY_HOT;
3043 else if (flags & ECF_NORETURN)
3044 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3045 else if (MAIN_NAME_P (DECL_NAME (current_function_decl)))
3046 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3047 else if (DECL_STATIC_CONSTRUCTOR (current_function_decl)
3048 || DECL_STATIC_DESTRUCTOR (current_function_decl))
3049 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
3050 return;
3051 }
3052
3053 /* Only first time try to drop function into unlikely executed.
3054 After inlining the roundoff errors may confuse us.
3055 Ipa-profile pass will drop functions only called from unlikely
3056 functions to unlikely and that is most of what we care about. */
3057 if (!cfun->after_inlining)
3058 node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
3059 FOR_EACH_BB (bb)
3060 {
3061 if (maybe_hot_bb_p (cfun, bb))
3062 {
3063 node->frequency = NODE_FREQUENCY_HOT;
3064 return;
3065 }
3066 if (!probably_never_executed_bb_p (cfun, bb))
3067 node->frequency = NODE_FREQUENCY_NORMAL;
3068 }
3069 }
3070
3071 static bool
3072 gate_estimate_probability (void)
3073 {
3074 return flag_guess_branch_prob;
3075 }
3076
3077 /* Build PREDICT_EXPR. */
3078 tree
3079 build_predict_expr (enum br_predictor predictor, enum prediction taken)
3080 {
3081 tree t = build1 (PREDICT_EXPR, void_type_node,
3082 build_int_cst (integer_type_node, predictor));
3083 SET_PREDICT_EXPR_OUTCOME (t, taken);
3084 return t;
3085 }
3086
3087 const char *
3088 predictor_name (enum br_predictor predictor)
3089 {
3090 return predictor_info[predictor].name;
3091 }
3092
3093 namespace {
3094
3095 const pass_data pass_data_profile =
3096 {
3097 GIMPLE_PASS, /* type */
3098 "profile_estimate", /* name */
3099 OPTGROUP_NONE, /* optinfo_flags */
3100 true, /* has_gate */
3101 true, /* has_execute */
3102 TV_BRANCH_PROB, /* tv_id */
3103 PROP_cfg, /* properties_required */
3104 0, /* properties_provided */
3105 0, /* properties_destroyed */
3106 0, /* todo_flags_start */
3107 TODO_verify_ssa, /* todo_flags_finish */
3108 };
3109
3110 class pass_profile : public gimple_opt_pass
3111 {
3112 public:
3113 pass_profile (gcc::context *ctxt)
3114 : gimple_opt_pass (pass_data_profile, ctxt)
3115 {}
3116
3117 /* opt_pass methods: */
3118 bool gate () { return gate_estimate_probability (); }
3119 unsigned int execute () { return tree_estimate_probability_driver (); }
3120
3121 }; // class pass_profile
3122
3123 } // anon namespace
3124
3125 gimple_opt_pass *
3126 make_pass_profile (gcc::context *ctxt)
3127 {
3128 return new pass_profile (ctxt);
3129 }
3130
3131 namespace {
3132
3133 const pass_data pass_data_strip_predict_hints =
3134 {
3135 GIMPLE_PASS, /* type */
3136 "*strip_predict_hints", /* name */
3137 OPTGROUP_NONE, /* optinfo_flags */
3138 false, /* has_gate */
3139 true, /* has_execute */
3140 TV_BRANCH_PROB, /* tv_id */
3141 PROP_cfg, /* properties_required */
3142 0, /* properties_provided */
3143 0, /* properties_destroyed */
3144 0, /* todo_flags_start */
3145 TODO_verify_ssa, /* todo_flags_finish */
3146 };
3147
3148 class pass_strip_predict_hints : public gimple_opt_pass
3149 {
3150 public:
3151 pass_strip_predict_hints (gcc::context *ctxt)
3152 : gimple_opt_pass (pass_data_strip_predict_hints, ctxt)
3153 {}
3154
3155 /* opt_pass methods: */
3156 opt_pass * clone () { return new pass_strip_predict_hints (m_ctxt); }
3157 unsigned int execute () { return strip_predict_hints (); }
3158
3159 }; // class pass_strip_predict_hints
3160
3161 } // anon namespace
3162
3163 gimple_opt_pass *
3164 make_pass_strip_predict_hints (gcc::context *ctxt)
3165 {
3166 return new pass_strip_predict_hints (ctxt);
3167 }
3168
3169 /* Rebuild function frequencies. Passes are in general expected to
3170 maintain profile by hand, however in some cases this is not possible:
3171 for example when inlining several functions with loops freuqencies might run
3172 out of scale and thus needs to be recomputed. */
3173
3174 void
3175 rebuild_frequencies (void)
3176 {
3177 timevar_push (TV_REBUILD_FREQUENCIES);
3178
3179 /* When the max bb count in the function is small, there is a higher
3180 chance that there were truncation errors in the integer scaling
3181 of counts by inlining and other optimizations. This could lead
3182 to incorrect classification of code as being cold when it isn't.
3183 In that case, force the estimation of bb counts/frequencies from the
3184 branch probabilities, rather than computing frequencies from counts,
3185 which may also lead to frequencies incorrectly reduced to 0. There
3186 is less precision in the probabilities, so we only do this for small
3187 max counts. */
3188 gcov_type count_max = 0;
3189 basic_block bb;
3190 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
3191 count_max = MAX (bb->count, count_max);
3192
3193 if (profile_status == PROFILE_GUESSED
3194 || (profile_status == PROFILE_READ && count_max < REG_BR_PROB_BASE/10))
3195 {
3196 loop_optimizer_init (0);
3197 add_noreturn_fake_exit_edges ();
3198 mark_irreducible_loops ();
3199 connect_infinite_loops_to_exit ();
3200 estimate_bb_frequencies (true);
3201 remove_fake_exit_edges ();
3202 loop_optimizer_finalize ();
3203 }
3204 else if (profile_status == PROFILE_READ)
3205 counts_to_freqs ();
3206 else
3207 gcc_unreachable ();
3208 timevar_pop (TV_REBUILD_FREQUENCIES);
3209 }