re PR tree-optimization/91351 (-fstrict-enums generates incorrect code)
[gcc.git] / gcc / profile-count.h
1 /* Profile counter container type.
2 Copyright (C) 2017-2019 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #ifndef GCC_PROFILE_COUNT_H
22 #define GCC_PROFILE_COUNT_H
23
24 struct function;
25 struct profile_count;
26
27 /* Quality of the profile count. Because gengtype does not support enums
28 inside of classes, this is in global namespace. */
29 enum profile_quality {
30 /* Uninitialized value. */
31 UNINITIALIZED_PROFILE,
32
33 /* Profile is based on static branch prediction heuristics and may
34 or may not match reality. It is local to function and cannot be compared
35 inter-procedurally. Never used by probabilities (they are always local).
36 */
37 GUESSED_LOCAL,
38
39 /* Profile was read by feedback and was 0, we used local heuristics to guess
40 better. This is the case of functions not run in profile fedback.
41 Never used by probabilities. */
42 GUESSED_GLOBAL0,
43
44 /* Same as GUESSED_GLOBAL0 but global count is adjusted 0. */
45 GUESSED_GLOBAL0_ADJUSTED,
46
47 /* Profile is based on static branch prediction heuristics. It may or may
48 not reflect the reality but it can be compared interprocedurally
49 (for example, we inlined function w/o profile feedback into function
50 with feedback and propagated from that).
51 Never used by probablities. */
52 GUESSED,
53
54 /* Profile was determined by autofdo. */
55 AFDO,
56
57 /* Profile was originally based on feedback but it was adjusted
58 by code duplicating optimization. It may not precisely reflect the
59 particular code path. */
60 ADJUSTED,
61
62 /* Profile was read from profile feedback or determined by accurate static
63 method. */
64 PRECISE
65 };
66
67 extern const char *profile_quality_as_string (enum profile_quality);
68 extern bool parse_profile_quality (const char *value,
69 profile_quality *quality);
70
71 /* The base value for branch probability notes and edge probabilities. */
72 #define REG_BR_PROB_BASE 10000
73
74 #define RDIV(X,Y) (((X) + (Y) / 2) / (Y))
75
76 bool slow_safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res);
77
78 /* Compute RES=(a*b + c/2)/c capping and return false if overflow happened. */
79
80 inline bool
81 safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res)
82 {
83 #if (GCC_VERSION >= 5000)
84 uint64_t tmp;
85 if (!__builtin_mul_overflow (a, b, &tmp)
86 && !__builtin_add_overflow (tmp, c/2, &tmp))
87 {
88 *res = tmp / c;
89 return true;
90 }
91 if (c == 1)
92 {
93 *res = (uint64_t) -1;
94 return false;
95 }
96 #else
97 if (a < ((uint64_t)1 << 31)
98 && b < ((uint64_t)1 << 31)
99 && c < ((uint64_t)1 << 31))
100 {
101 *res = (a * b + (c / 2)) / c;
102 return true;
103 }
104 #endif
105 return slow_safe_scale_64bit (a, b, c, res);
106 }
107
108 /* Data type to hold probabilities. It implements fixed point arithmetics
109 with capping so probability is always in range [0,1] and scaling requiring
110 values greater than 1 needs to be represented otherwise.
111
112 In addition to actual value the quality of profile is tracked and propagated
113 through all operations. Special value UNINITIALIZED_PROFILE is used for probabilities
114 that has not been determined yet (for example bacause of
115 -fno-guess-branch-probability)
116
117 Typically probabilities are derived from profile feedback (via
118 probability_in_gcov_type), autoFDO or guessed statically and then propagated
119 thorough the compilation.
120
121 Named probabilities are available:
122 - never (0 probability)
123 - guessed_never
124 - very_unlikely (1/2000 probability)
125 - unlikely (1/5 probablity)
126 - even (1/2 probability)
127 - likely (4/5 probability)
128 - very_likely (1999/2000 probability)
129 - guessed_always
130 - always
131
132 Named probabilities except for never/always are assumed to be statically
133 guessed and thus not necessarily accurate. The difference between never
134 and guessed_never is that the first one should be used only in case that
135 well behaving program will very likely not execute the "never" path.
136 For example if the path is going to abort () call or it exception handling.
137
138 Always and guessed_always probabilities are symmetric.
139
140 For legacy code we support conversion to/from REG_BR_PROB_BASE based fixpoint
141 integer arithmetics. Once the code is converted to branch probabilities,
142 these conversions will probably go away because they are lossy.
143 */
144
145 class GTY((user)) profile_probability
146 {
147 static const int n_bits = 29;
148 /* We can technically use ((uint32_t) 1 << (n_bits - 1)) - 2 but that
149 will lead to harder multiplication sequences. */
150 static const uint32_t max_probability = (uint32_t) 1 << (n_bits - 2);
151 static const uint32_t uninitialized_probability
152 = ((uint32_t) 1 << (n_bits - 1)) - 1;
153
154 uint32_t m_val : 29;
155 enum profile_quality m_quality : 3;
156
157 friend struct profile_count;
158 public:
159 profile_probability (): m_val (uninitialized_probability),
160 m_quality (GUESSED)
161 {}
162
163 profile_probability (uint32_t val, profile_quality quality):
164 m_val (val), m_quality (quality)
165 {}
166
167 /* Named probabilities. */
168 static profile_probability never ()
169 {
170 profile_probability ret;
171 ret.m_val = 0;
172 ret.m_quality = PRECISE;
173 return ret;
174 }
175
176 static profile_probability guessed_never ()
177 {
178 profile_probability ret;
179 ret.m_val = 0;
180 ret.m_quality = GUESSED;
181 return ret;
182 }
183
184 static profile_probability very_unlikely ()
185 {
186 /* Be consistent with PROB_VERY_UNLIKELY in predict.h. */
187 profile_probability r = guessed_always ().apply_scale (1, 2000);
188 r.m_val--;
189 return r;
190 }
191
192 static profile_probability unlikely ()
193 {
194 /* Be consistent with PROB_VERY_LIKELY in predict.h. */
195 profile_probability r = guessed_always ().apply_scale (1, 5);
196 r.m_val--;
197 return r;
198 }
199
200 static profile_probability even ()
201 {
202 return guessed_always ().apply_scale (1, 2);
203 }
204
205 static profile_probability very_likely ()
206 {
207 return always () - very_unlikely ();
208 }
209
210 static profile_probability likely ()
211 {
212 return always () - unlikely ();
213 }
214
215 static profile_probability guessed_always ()
216 {
217 profile_probability ret;
218 ret.m_val = max_probability;
219 ret.m_quality = GUESSED;
220 return ret;
221 }
222
223 static profile_probability always ()
224 {
225 profile_probability ret;
226 ret.m_val = max_probability;
227 ret.m_quality = PRECISE;
228 return ret;
229 }
230
231 /* Probabilities which has not been initialized. Either because
232 initialization did not happen yet or because profile is unknown. */
233 static profile_probability uninitialized ()
234 {
235 profile_probability c;
236 c.m_val = uninitialized_probability;
237 c.m_quality = GUESSED;
238 return c;
239 }
240
241 /* Return true if value has been initialized. */
242 bool initialized_p () const
243 {
244 return m_val != uninitialized_probability;
245 }
246
247 /* Return true if value can be trusted. */
248 bool reliable_p () const
249 {
250 return m_quality >= ADJUSTED;
251 }
252
253 /* Conversion from and to REG_BR_PROB_BASE integer fixpoint arithmetics.
254 this is mostly to support legacy code and should go away. */
255 static profile_probability from_reg_br_prob_base (int v)
256 {
257 profile_probability ret;
258 gcc_checking_assert (v >= 0 && v <= REG_BR_PROB_BASE);
259 ret.m_val = RDIV (v * (uint64_t) max_probability, REG_BR_PROB_BASE);
260 ret.m_quality = GUESSED;
261 return ret;
262 }
263
264 int to_reg_br_prob_base () const
265 {
266 gcc_checking_assert (initialized_p ());
267 return RDIV (m_val * (uint64_t) REG_BR_PROB_BASE, max_probability);
268 }
269
270 /* Conversion to and from RTL representation of profile probabilities. */
271 static profile_probability from_reg_br_prob_note (int v)
272 {
273 profile_probability ret;
274 ret.m_val = ((unsigned int)v) / 8;
275 ret.m_quality = (enum profile_quality)(v & 7);
276 return ret;
277 }
278
279 int to_reg_br_prob_note () const
280 {
281 gcc_checking_assert (initialized_p ());
282 int ret = m_val * 8 + m_quality;
283 gcc_checking_assert (from_reg_br_prob_note (ret) == *this);
284 return ret;
285 }
286
287 /* Return VAL1/VAL2. */
288 static profile_probability probability_in_gcov_type
289 (gcov_type val1, gcov_type val2)
290 {
291 profile_probability ret;
292 gcc_checking_assert (val1 >= 0 && val2 > 0);
293 if (val1 > val2)
294 ret.m_val = max_probability;
295 else
296 {
297 uint64_t tmp;
298 safe_scale_64bit (val1, max_probability, val2, &tmp);
299 gcc_checking_assert (tmp <= max_probability);
300 ret.m_val = tmp;
301 }
302 ret.m_quality = PRECISE;
303 return ret;
304 }
305
306 /* Basic operations. */
307 bool operator== (const profile_probability &other) const
308 {
309 return m_val == other.m_val && m_quality == other.m_quality;
310 }
311
312 profile_probability operator+ (const profile_probability &other) const
313 {
314 if (other == never ())
315 return *this;
316 if (*this == never ())
317 return other;
318 if (!initialized_p () || !other.initialized_p ())
319 return uninitialized ();
320
321 profile_probability ret;
322 ret.m_val = MIN ((uint32_t)(m_val + other.m_val), max_probability);
323 ret.m_quality = MIN (m_quality, other.m_quality);
324 return ret;
325 }
326
327 profile_probability &operator+= (const profile_probability &other)
328 {
329 if (other == never ())
330 return *this;
331 if (*this == never ())
332 {
333 *this = other;
334 return *this;
335 }
336 if (!initialized_p () || !other.initialized_p ())
337 return *this = uninitialized ();
338 else
339 {
340 m_val = MIN ((uint32_t)(m_val + other.m_val), max_probability);
341 m_quality = MIN (m_quality, other.m_quality);
342 }
343 return *this;
344 }
345
346 profile_probability operator- (const profile_probability &other) const
347 {
348 if (*this == never ()
349 || other == never ())
350 return *this;
351 if (!initialized_p () || !other.initialized_p ())
352 return uninitialized ();
353 profile_probability ret;
354 ret.m_val = m_val >= other.m_val ? m_val - other.m_val : 0;
355 ret.m_quality = MIN (m_quality, other.m_quality);
356 return ret;
357 }
358
359 profile_probability &operator-= (const profile_probability &other)
360 {
361 if (*this == never ()
362 || other == never ())
363 return *this;
364 if (!initialized_p () || !other.initialized_p ())
365 return *this = uninitialized ();
366 else
367 {
368 m_val = m_val >= other.m_val ? m_val - other.m_val : 0;
369 m_quality = MIN (m_quality, other.m_quality);
370 }
371 return *this;
372 }
373
374 profile_probability operator* (const profile_probability &other) const
375 {
376 if (*this == never ()
377 || other == never ())
378 return never ();
379 if (!initialized_p () || !other.initialized_p ())
380 return uninitialized ();
381 profile_probability ret;
382 ret.m_val = RDIV ((uint64_t)m_val * other.m_val, max_probability);
383 ret.m_quality = MIN (MIN (m_quality, other.m_quality), ADJUSTED);
384 return ret;
385 }
386
387 profile_probability &operator*= (const profile_probability &other)
388 {
389 if (*this == never ()
390 || other == never ())
391 return *this = never ();
392 if (!initialized_p () || !other.initialized_p ())
393 return *this = uninitialized ();
394 else
395 {
396 m_val = RDIV ((uint64_t)m_val * other.m_val, max_probability);
397 m_quality = MIN (MIN (m_quality, other.m_quality), ADJUSTED);
398 }
399 return *this;
400 }
401
402 profile_probability operator/ (const profile_probability &other) const
403 {
404 if (*this == never ())
405 return never ();
406 if (!initialized_p () || !other.initialized_p ())
407 return uninitialized ();
408 profile_probability ret;
409 /* If we get probability above 1, mark it as unreliable and return 1. */
410 if (m_val >= other.m_val)
411 {
412 ret.m_val = max_probability;
413 ret.m_quality = MIN (MIN (m_quality, other.m_quality),
414 GUESSED);
415 return ret;
416 }
417 else if (!m_val)
418 ret.m_val = 0;
419 else
420 {
421 gcc_checking_assert (other.m_val);
422 ret.m_val = MIN (RDIV ((uint64_t)m_val * max_probability,
423 other.m_val),
424 max_probability);
425 }
426 ret.m_quality = MIN (MIN (m_quality, other.m_quality), ADJUSTED);
427 return ret;
428 }
429
430 profile_probability &operator/= (const profile_probability &other)
431 {
432 if (*this == never ())
433 return *this = never ();
434 if (!initialized_p () || !other.initialized_p ())
435 return *this = uninitialized ();
436 else
437 {
438 /* If we get probability above 1, mark it as unreliable
439 and return 1. */
440 if (m_val > other.m_val)
441 {
442 m_val = max_probability;
443 m_quality = MIN (MIN (m_quality, other.m_quality),
444 GUESSED);
445 return *this;
446 }
447 else if (!m_val)
448 ;
449 else
450 {
451 gcc_checking_assert (other.m_val);
452 m_val = MIN (RDIV ((uint64_t)m_val * max_probability,
453 other.m_val),
454 max_probability);
455 }
456 m_quality = MIN (MIN (m_quality, other.m_quality), ADJUSTED);
457 }
458 return *this;
459 }
460
461 /* Split *THIS (ORIG) probability into 2 probabilities, such that
462 the returned one (FIRST) is *THIS * CPROB and *THIS is
463 adjusted (SECOND) so that FIRST + FIRST.invert () * SECOND
464 == ORIG. This is useful e.g. when splitting a conditional
465 branch like:
466 if (cond)
467 goto lab; // ORIG probability
468 into
469 if (cond1)
470 goto lab; // FIRST = ORIG * CPROB probability
471 if (cond2)
472 goto lab; // SECOND probability
473 such that the overall probability of jumping to lab remains
474 the same. CPROB gives the relative probability between the
475 branches. */
476 profile_probability split (const profile_probability &cprob)
477 {
478 profile_probability ret = *this * cprob;
479 /* The following is equivalent to:
480 *this = cprob.invert () * *this / ret.invert ();
481 Avoid scaling when overall outcome is supposed to be always.
482 Without knowing that one is inverse of toher, the result would be
483 conservative. */
484 if (!(*this == always ()))
485 *this = (*this - ret) / ret.invert ();
486 return ret;
487 }
488
489 gcov_type apply (gcov_type val) const
490 {
491 if (*this == uninitialized ())
492 return val / 2;
493 return RDIV (val * m_val, max_probability);
494 }
495
496 /* Return 1-*THIS. */
497 profile_probability invert () const
498 {
499 return always() - *this;
500 }
501
502 /* Return THIS with quality dropped to GUESSED. */
503 profile_probability guessed () const
504 {
505 profile_probability ret = *this;
506 ret.m_quality = GUESSED;
507 return ret;
508 }
509
510 /* Return THIS with quality dropped to AFDO. */
511 profile_probability afdo () const
512 {
513 profile_probability ret = *this;
514 ret.m_quality = AFDO;
515 return ret;
516 }
517
518 /* Return *THIS * NUM / DEN. */
519 profile_probability apply_scale (int64_t num, int64_t den) const
520 {
521 if (*this == never ())
522 return *this;
523 if (!initialized_p ())
524 return uninitialized ();
525 profile_probability ret;
526 uint64_t tmp;
527 safe_scale_64bit (m_val, num, den, &tmp);
528 ret.m_val = MIN (tmp, max_probability);
529 ret.m_quality = MIN (m_quality, ADJUSTED);
530 return ret;
531 }
532
533 /* Return true when the probability of edge is reliable.
534
535 The profile guessing code is good at predicting branch outcome (ie.
536 taken/not taken), that is predicted right slightly over 75% of time.
537 It is however notoriously poor on predicting the probability itself.
538 In general the profile appear a lot flatter (with probabilities closer
539 to 50%) than the reality so it is bad idea to use it to drive optimization
540 such as those disabling dynamic branch prediction for well predictable
541 branches.
542
543 There are two exceptions - edges leading to noreturn edges and edges
544 predicted by number of iterations heuristics are predicted well. This macro
545 should be able to distinguish those, but at the moment it simply check for
546 noreturn heuristic that is only one giving probability over 99% or bellow
547 1%. In future we might want to propagate reliability information across the
548 CFG if we find this information useful on multiple places. */
549 bool probably_reliable_p () const
550 {
551 if (m_quality >= ADJUSTED)
552 return true;
553 if (!initialized_p ())
554 return false;
555 return m_val < max_probability / 100
556 || m_val > max_probability - max_probability / 100;
557 }
558
559 /* Return false if profile_probability is bogus. */
560 bool verify () const
561 {
562 gcc_checking_assert (m_quality != UNINITIALIZED_PROFILE);
563 if (m_val == uninitialized_probability)
564 return m_quality == GUESSED;
565 else if (m_quality < GUESSED)
566 return false;
567 return m_val <= max_probability;
568 }
569
570 /* Comparsions are three-state and conservative. False is returned if
571 the inequality cannot be decided. */
572 bool operator< (const profile_probability &other) const
573 {
574 return initialized_p () && other.initialized_p () && m_val < other.m_val;
575 }
576
577 bool operator> (const profile_probability &other) const
578 {
579 return initialized_p () && other.initialized_p () && m_val > other.m_val;
580 }
581
582 bool operator<= (const profile_probability &other) const
583 {
584 return initialized_p () && other.initialized_p () && m_val <= other.m_val;
585 }
586
587 bool operator>= (const profile_probability &other) const
588 {
589 return initialized_p () && other.initialized_p () && m_val >= other.m_val;
590 }
591
592 /* Get the value of the count. */
593 uint32_t value () const { return m_val; }
594
595 /* Get the quality of the count. */
596 enum profile_quality quality () const { return m_quality; }
597
598 /* Output THIS to F. */
599 void dump (FILE *f) const;
600
601 /* Print THIS to stderr. */
602 void debug () const;
603
604 /* Return true if THIS is known to differ significantly from OTHER. */
605 bool differs_from_p (profile_probability other) const;
606
607 /* Return if difference is greater than 50%. */
608 bool differs_lot_from_p (profile_probability other) const;
609
610 /* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER
611 happens with COUNT2 probablity. Return probablity that either *THIS or
612 OTHER happens. */
613 profile_probability combine_with_count (profile_count count1,
614 profile_probability other,
615 profile_count count2) const;
616
617 /* LTO streaming support. */
618 static profile_probability stream_in (class lto_input_block *);
619 void stream_out (struct output_block *);
620 void stream_out (struct lto_output_stream *);
621 };
622
623 /* Main data type to hold profile counters in GCC. Profile counts originate
624 either from profile feedback, static profile estimation or both. We do not
625 perform whole program profile propagation and thus profile estimation
626 counters are often local to function, while counters from profile feedback
627 (or special cases of profile estimation) can be used inter-procedurally.
628
629 There are 3 basic types
630 1) local counters which are result of intra-procedural static profile
631 estimation.
632 2) ipa counters which are result of profile feedback or special case
633 of static profile estimation (such as in function main).
634 3) counters which counts as 0 inter-procedurally (beause given function
635 was never run in train feedback) but they hold local static profile
636 estimate.
637
638 Counters of type 1 and 3 cannot be mixed with counters of different type
639 within operation (because whole function should use one type of counter)
640 with exception that global zero mix in most operations where outcome is
641 well defined.
642
643 To take local counter and use it inter-procedurally use ipa member function
644 which strips information irelevant at the inter-procedural level.
645
646 Counters are 61bit integers representing number of executions during the
647 train run or normalized frequency within the function.
648
649 As the profile is maintained during the compilation, many adjustments are
650 made. Not all transformations can be made precisely, most importantly
651 when code is being duplicated. It also may happen that part of CFG has
652 profile counts known while other do not - for example when LTO optimizing
653 partly profiled program or when profile was lost due to COMDAT merging.
654
655 For this reason profile_count tracks more information than
656 just unsigned integer and it is also ready for profile mismatches.
657 The API of this data type represent operations that are natural
658 on profile counts - sum, difference and operation with scales and
659 probabilities. All operations are safe by never getting negative counts
660 and they do end up in uninitialized scale if any of the parameters is
661 uninitialized.
662
663 All comparsions that are three state and handling of probabilities. Thus
664 a < b is not equal to !(a >= b).
665
666 The following pre-defined counts are available:
667
668 profile_count::zero () for code that is known to execute zero times at
669 runtime (this can be detected statically i.e. for paths leading to
670 abort ();
671 profile_count::one () for code that is known to execute once (such as
672 main () function
673 profile_count::uninitialized () for unknown execution count.
674
675 */
676
677 class sreal;
678
679 struct GTY(()) profile_count
680 {
681 public:
682 /* Use 62bit to hold basic block counters. Should be at least
683 64bit. Although a counter cannot be negative, we use a signed
684 type to hold various extra stages. */
685
686 static const int n_bits = 61;
687 static const uint64_t max_count = ((uint64_t) 1 << n_bits) - 2;
688 private:
689 static const uint64_t uninitialized_count = ((uint64_t) 1 << n_bits) - 1;
690
691 #if defined (__arm__) && (__GNUC__ >= 6 && __GNUC__ <= 8)
692 /* Work-around for PR88469. A bug in the gcc-6/7/8 PCS layout code
693 incorrectly detects the alignment of a structure where the only
694 64-bit aligned object is a bit-field. We force the alignment of
695 the entire field to mitigate this. */
696 #define UINT64_BIT_FIELD_ALIGN __attribute__ ((aligned(8)))
697 #else
698 #define UINT64_BIT_FIELD_ALIGN
699 #endif
700 uint64_t UINT64_BIT_FIELD_ALIGN m_val : n_bits;
701 #undef UINT64_BIT_FIELD_ALIGN
702 enum profile_quality m_quality : 3;
703
704 /* Return true if both values can meaningfully appear in single function
705 body. We have either all counters in function local or global, otherwise
706 operations between them are not really defined well. */
707 bool compatible_p (const profile_count other) const
708 {
709 if (!initialized_p () || !other.initialized_p ())
710 return true;
711 if (*this == zero ()
712 || other == zero ())
713 return true;
714 return ipa_p () == other.ipa_p ();
715 }
716 public:
717 /* Used for counters which are expected to be never executed. */
718 static profile_count zero ()
719 {
720 return from_gcov_type (0);
721 }
722
723 static profile_count adjusted_zero ()
724 {
725 profile_count c;
726 c.m_val = 0;
727 c.m_quality = ADJUSTED;
728 return c;
729 }
730
731 static profile_count guessed_zero ()
732 {
733 profile_count c;
734 c.m_val = 0;
735 c.m_quality = GUESSED;
736 return c;
737 }
738
739 static profile_count one ()
740 {
741 return from_gcov_type (1);
742 }
743
744 /* Value of counters which has not been initialized. Either because
745 initialization did not happen yet or because profile is unknown. */
746 static profile_count uninitialized ()
747 {
748 profile_count c;
749 c.m_val = uninitialized_count;
750 c.m_quality = GUESSED_LOCAL;
751 return c;
752 }
753
754 /* Conversion to gcov_type is lossy. */
755 gcov_type to_gcov_type () const
756 {
757 gcc_checking_assert (initialized_p ());
758 return m_val;
759 }
760
761 /* Return true if value has been initialized. */
762 bool initialized_p () const
763 {
764 return m_val != uninitialized_count;
765 }
766
767 /* Return true if value can be trusted. */
768 bool reliable_p () const
769 {
770 return m_quality >= ADJUSTED;
771 }
772
773 /* Return true if vlaue can be operated inter-procedurally. */
774 bool ipa_p () const
775 {
776 return !initialized_p () || m_quality >= GUESSED_GLOBAL0;
777 }
778
779 /* Return true if quality of profile is precise. */
780 bool precise_p () const
781 {
782 return m_quality == PRECISE;
783 }
784
785 /* Get the value of the count. */
786 uint32_t value () const { return m_val; }
787
788 /* Get the quality of the count. */
789 enum profile_quality quality () const { return m_quality; }
790
791 /* When merging basic blocks, the two different profile counts are unified.
792 Return true if this can be done without losing info about profile.
793 The only case we care about here is when first BB contains something
794 that makes it terminate in a way not visible in CFG. */
795 bool ok_for_merging (profile_count other) const
796 {
797 if (m_quality < ADJUSTED
798 || other.m_quality < ADJUSTED)
799 return true;
800 return !(other < *this);
801 }
802
803 /* When merging two BBs with different counts, pick common count that looks
804 most representative. */
805 profile_count merge (profile_count other) const
806 {
807 if (*this == other || !other.initialized_p ()
808 || m_quality > other.m_quality)
809 return *this;
810 if (other.m_quality > m_quality
811 || other > *this)
812 return other;
813 return *this;
814 }
815
816 /* Basic operations. */
817 bool operator== (const profile_count &other) const
818 {
819 return m_val == other.m_val && m_quality == other.m_quality;
820 }
821
822 profile_count operator+ (const profile_count &other) const
823 {
824 if (other == zero ())
825 return *this;
826 if (*this == zero ())
827 return other;
828 if (!initialized_p () || !other.initialized_p ())
829 return uninitialized ();
830
831 profile_count ret;
832 gcc_checking_assert (compatible_p (other));
833 ret.m_val = m_val + other.m_val;
834 ret.m_quality = MIN (m_quality, other.m_quality);
835 return ret;
836 }
837
838 profile_count &operator+= (const profile_count &other)
839 {
840 if (other == zero ())
841 return *this;
842 if (*this == zero ())
843 {
844 *this = other;
845 return *this;
846 }
847 if (!initialized_p () || !other.initialized_p ())
848 return *this = uninitialized ();
849 else
850 {
851 gcc_checking_assert (compatible_p (other));
852 m_val += other.m_val;
853 m_quality = MIN (m_quality, other.m_quality);
854 }
855 return *this;
856 }
857
858 profile_count operator- (const profile_count &other) const
859 {
860 if (*this == zero () || other == zero ())
861 return *this;
862 if (!initialized_p () || !other.initialized_p ())
863 return uninitialized ();
864 gcc_checking_assert (compatible_p (other));
865 profile_count ret;
866 ret.m_val = m_val >= other.m_val ? m_val - other.m_val : 0;
867 ret.m_quality = MIN (m_quality, other.m_quality);
868 return ret;
869 }
870
871 profile_count &operator-= (const profile_count &other)
872 {
873 if (*this == zero () || other == zero ())
874 return *this;
875 if (!initialized_p () || !other.initialized_p ())
876 return *this = uninitialized ();
877 else
878 {
879 gcc_checking_assert (compatible_p (other));
880 m_val = m_val >= other.m_val ? m_val - other.m_val: 0;
881 m_quality = MIN (m_quality, other.m_quality);
882 }
883 return *this;
884 }
885
886 /* Return false if profile_count is bogus. */
887 bool verify () const
888 {
889 gcc_checking_assert (m_quality != UNINITIALIZED_PROFILE);
890 return m_val != uninitialized_count || m_quality == GUESSED_LOCAL;
891 }
892
893 /* Comparsions are three-state and conservative. False is returned if
894 the inequality cannot be decided. */
895 bool operator< (const profile_count &other) const
896 {
897 if (!initialized_p () || !other.initialized_p ())
898 return false;
899 if (*this == zero ())
900 return !(other == zero ());
901 if (other == zero ())
902 return false;
903 gcc_checking_assert (compatible_p (other));
904 return m_val < other.m_val;
905 }
906
907 bool operator> (const profile_count &other) const
908 {
909 if (!initialized_p () || !other.initialized_p ())
910 return false;
911 if (*this == zero ())
912 return false;
913 if (other == zero ())
914 return !(*this == zero ());
915 gcc_checking_assert (compatible_p (other));
916 return initialized_p () && other.initialized_p () && m_val > other.m_val;
917 }
918
919 bool operator< (const gcov_type other) const
920 {
921 gcc_checking_assert (ipa_p ());
922 gcc_checking_assert (other >= 0);
923 return initialized_p () && m_val < (uint64_t) other;
924 }
925
926 bool operator> (const gcov_type other) const
927 {
928 gcc_checking_assert (ipa_p ());
929 gcc_checking_assert (other >= 0);
930 return initialized_p () && m_val > (uint64_t) other;
931 }
932
933 bool operator<= (const profile_count &other) const
934 {
935 if (!initialized_p () || !other.initialized_p ())
936 return false;
937 if (*this == zero ())
938 return true;
939 if (other == zero ())
940 return (*this == zero ());
941 gcc_checking_assert (compatible_p (other));
942 return m_val <= other.m_val;
943 }
944
945 bool operator>= (const profile_count &other) const
946 {
947 if (!initialized_p () || !other.initialized_p ())
948 return false;
949 if (other == zero ())
950 return true;
951 if (*this == zero ())
952 return (other == zero ());
953 gcc_checking_assert (compatible_p (other));
954 return m_val >= other.m_val;
955 }
956
957 bool operator<= (const gcov_type other) const
958 {
959 gcc_checking_assert (ipa_p ());
960 gcc_checking_assert (other >= 0);
961 return initialized_p () && m_val <= (uint64_t) other;
962 }
963
964 bool operator>= (const gcov_type other) const
965 {
966 gcc_checking_assert (ipa_p ());
967 gcc_checking_assert (other >= 0);
968 return initialized_p () && m_val >= (uint64_t) other;
969 }
970
971 /* Return true when value is not zero and can be used for scaling.
972 This is different from *this > 0 because that requires counter to
973 be IPA. */
974 bool nonzero_p () const
975 {
976 return initialized_p () && m_val != 0;
977 }
978
979 /* Make counter forcingly nonzero. */
980 profile_count force_nonzero () const
981 {
982 if (!initialized_p ())
983 return *this;
984 profile_count ret = *this;
985 if (ret.m_val == 0)
986 {
987 ret.m_val = 1;
988 ret.m_quality = MIN (m_quality, ADJUSTED);
989 }
990 return ret;
991 }
992
993 profile_count max (profile_count other) const
994 {
995 if (!initialized_p ())
996 return other;
997 if (!other.initialized_p ())
998 return *this;
999 if (*this == zero ())
1000 return other;
1001 if (other == zero ())
1002 return *this;
1003 gcc_checking_assert (compatible_p (other));
1004 if (m_val < other.m_val || (m_val == other.m_val
1005 && m_quality < other.m_quality))
1006 return other;
1007 return *this;
1008 }
1009
1010 /* PROB is a probability in scale 0...REG_BR_PROB_BASE. Scale counter
1011 accordingly. */
1012 profile_count apply_probability (int prob) const
1013 {
1014 gcc_checking_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
1015 if (m_val == 0)
1016 return *this;
1017 if (!initialized_p ())
1018 return uninitialized ();
1019 profile_count ret;
1020 ret.m_val = RDIV (m_val * prob, REG_BR_PROB_BASE);
1021 ret.m_quality = MIN (m_quality, ADJUSTED);
1022 return ret;
1023 }
1024
1025 /* Scale counter according to PROB. */
1026 profile_count apply_probability (profile_probability prob) const
1027 {
1028 if (*this == zero ())
1029 return *this;
1030 if (prob == profile_probability::never ())
1031 return zero ();
1032 if (!initialized_p ())
1033 return uninitialized ();
1034 profile_count ret;
1035 uint64_t tmp;
1036 safe_scale_64bit (m_val, prob.m_val, profile_probability::max_probability,
1037 &tmp);
1038 ret.m_val = tmp;
1039 ret.m_quality = MIN (m_quality, prob.m_quality);
1040 return ret;
1041 }
1042
1043 /* Return *THIS * NUM / DEN. */
1044 profile_count apply_scale (int64_t num, int64_t den) const
1045 {
1046 if (m_val == 0)
1047 return *this;
1048 if (!initialized_p ())
1049 return uninitialized ();
1050 profile_count ret;
1051 uint64_t tmp;
1052
1053 gcc_checking_assert (num >= 0 && den > 0);
1054 safe_scale_64bit (m_val, num, den, &tmp);
1055 ret.m_val = MIN (tmp, max_count);
1056 ret.m_quality = MIN (m_quality, ADJUSTED);
1057 return ret;
1058 }
1059
1060 profile_count apply_scale (profile_count num, profile_count den) const
1061 {
1062 if (*this == zero ())
1063 return *this;
1064 if (num == zero ())
1065 return num;
1066 if (!initialized_p () || !num.initialized_p () || !den.initialized_p ())
1067 return uninitialized ();
1068 if (num == den)
1069 return *this;
1070 gcc_checking_assert (den.m_val);
1071
1072 profile_count ret;
1073 uint64_t val;
1074 safe_scale_64bit (m_val, num.m_val, den.m_val, &val);
1075 ret.m_val = MIN (val, max_count);
1076 ret.m_quality = MIN (MIN (MIN (m_quality, ADJUSTED),
1077 num.m_quality), den.m_quality);
1078 if (num.ipa_p () && !ret.ipa_p ())
1079 ret.m_quality = MIN (num.m_quality, GUESSED);
1080 return ret;
1081 }
1082
1083 /* Return THIS with quality dropped to GUESSED_LOCAL. */
1084 profile_count guessed_local () const
1085 {
1086 profile_count ret = *this;
1087 if (!initialized_p ())
1088 return *this;
1089 ret.m_quality = GUESSED_LOCAL;
1090 return ret;
1091 }
1092
1093 /* We know that profile is globally 0 but keep local profile if present. */
1094 profile_count global0 () const
1095 {
1096 profile_count ret = *this;
1097 if (!initialized_p ())
1098 return *this;
1099 ret.m_quality = GUESSED_GLOBAL0;
1100 return ret;
1101 }
1102
1103 /* We know that profile is globally adjusted 0 but keep local profile
1104 if present. */
1105 profile_count global0adjusted () const
1106 {
1107 profile_count ret = *this;
1108 if (!initialized_p ())
1109 return *this;
1110 ret.m_quality = GUESSED_GLOBAL0_ADJUSTED;
1111 return ret;
1112 }
1113
1114 /* Return THIS with quality dropped to GUESSED. */
1115 profile_count guessed () const
1116 {
1117 profile_count ret = *this;
1118 ret.m_quality = MIN (ret.m_quality, GUESSED);
1119 return ret;
1120 }
1121
1122 /* Return variant of profile counte which is always safe to compare
1123 acorss functions. */
1124 profile_count ipa () const
1125 {
1126 if (m_quality > GUESSED_GLOBAL0_ADJUSTED)
1127 return *this;
1128 if (m_quality == GUESSED_GLOBAL0)
1129 return zero ();
1130 if (m_quality == GUESSED_GLOBAL0_ADJUSTED)
1131 return adjusted_zero ();
1132 return uninitialized ();
1133 }
1134
1135 /* Return THIS with quality dropped to AFDO. */
1136 profile_count afdo () const
1137 {
1138 profile_count ret = *this;
1139 ret.m_quality = AFDO;
1140 return ret;
1141 }
1142
1143 /* Return probability of event with counter THIS within event with counter
1144 OVERALL. */
1145 profile_probability probability_in (const profile_count overall) const
1146 {
1147 if (*this == zero ()
1148 && !(overall == zero ()))
1149 return profile_probability::never ();
1150 if (!initialized_p () || !overall.initialized_p ()
1151 || !overall.m_val)
1152 return profile_probability::uninitialized ();
1153 if (*this == overall && m_quality == PRECISE)
1154 return profile_probability::always ();
1155 profile_probability ret;
1156 gcc_checking_assert (compatible_p (overall));
1157
1158 if (overall.m_val < m_val)
1159 {
1160 ret.m_val = profile_probability::max_probability;
1161 ret.m_quality = GUESSED;
1162 return ret;
1163 }
1164 else
1165 ret.m_val = RDIV (m_val * profile_probability::max_probability,
1166 overall.m_val);
1167 ret.m_quality = MIN (MAX (MIN (m_quality, overall.m_quality),
1168 GUESSED), ADJUSTED);
1169 return ret;
1170 }
1171
1172 int to_frequency (struct function *fun) const;
1173 int to_cgraph_frequency (profile_count entry_bb_count) const;
1174 sreal to_sreal_scale (profile_count in, bool *known = NULL) const;
1175
1176 /* Output THIS to F. */
1177 void dump (FILE *f) const;
1178
1179 /* Print THIS to stderr. */
1180 void debug () const;
1181
1182 /* Return true if THIS is known to differ significantly from OTHER. */
1183 bool differs_from_p (profile_count other) const;
1184
1185 /* We want to scale profile across function boundary from NUM to DEN.
1186 Take care of the side case when NUM and DEN are zeros of incompatible
1187 kinds. */
1188 static void adjust_for_ipa_scaling (profile_count *num, profile_count *den);
1189
1190 /* THIS is a count of bb which is known to be executed IPA times.
1191 Combine this information into bb counter. This means returning IPA
1192 if it is nonzero, not changing anything if IPA is uninitialized
1193 and if IPA is zero, turning THIS into corresponding local profile with
1194 global0. */
1195 profile_count combine_with_ipa_count (profile_count ipa);
1196
1197 /* The profiling runtime uses gcov_type, which is usually 64bit integer.
1198 Conversions back and forth are used to read the coverage and get it
1199 into internal representation. */
1200 static profile_count from_gcov_type (gcov_type v,
1201 profile_quality quality = PRECISE);
1202
1203 /* LTO streaming support. */
1204 static profile_count stream_in (class lto_input_block *);
1205 void stream_out (struct output_block *);
1206 void stream_out (struct lto_output_stream *);
1207 };
1208 #endif