1 /* File format for coverage information
2 Copyright (C) 1996-2014 Free Software Foundation, Inc.
3 Contributed by Bob Manson <manson@cygnus.com>.
4 Completely remangled by Nathan Sidwell <nathan@codesourcery.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 /* Routines declared in gcov-io.h. This file should be #included by
28 another source file, after having #included gcov-io.h. */
31 static void gcov_write_block (unsigned);
32 static gcov_unsigned_t
*gcov_write_words (unsigned);
34 static const gcov_unsigned_t
*gcov_read_words (unsigned);
36 static void gcov_allocate (unsigned);
39 /* Optimum number of gcov_unsigned_t's read from or written to disk. */
40 #define GCOV_BLOCK_SIZE (1 << 10)
42 GCOV_LINKAGE
struct gcov_var
45 gcov_position_t start
; /* Position of first byte of block */
46 unsigned offset
; /* Read/write position within the block. */
47 unsigned length
; /* Read limit in the block. */
48 unsigned overread
; /* Number of words overread. */
49 int error
; /* < 0 overflow, > 0 disk error. */
50 int mode
; /* < 0 writing, > 0 reading */
52 /* Holds one block plus 4 bytes, thus all coverage reads & writes
53 fit within this buffer and we always can transfer GCOV_BLOCK_SIZE
54 to and from the disk. libgcov never backtracks and only writes 4
56 gcov_unsigned_t buffer
[GCOV_BLOCK_SIZE
+ 1];
58 int endian
; /* Swap endianness. */
59 /* Holds a variable length block, as the compiler can write
60 strings and needs to backtrack. */
62 gcov_unsigned_t
*buffer
;
66 /* Save the current position in the gcov file. */
67 static inline gcov_position_t
70 gcov_nonruntime_assert (gcov_var
.mode
> 0);
71 return gcov_var
.start
+ gcov_var
.offset
;
74 /* Return nonzero if the error flag is set. */
78 return gcov_var
.file
? gcov_var
.error
: 1;
82 /* Move to beginning of file and initialize for writing. */
83 GCOV_LINKAGE
inline void
89 fseek (gcov_var
.file
, 0L, SEEK_SET
);
93 static inline gcov_unsigned_t
from_file (gcov_unsigned_t value
)
98 value
= (value
>> 16) | (value
<< 16);
99 value
= ((value
& 0xff00ff) << 8) | ((value
>> 8) & 0xff00ff);
105 /* Open a gcov file. NAME is the name of the file to open and MODE
106 indicates whether a new file should be created, or an existing file
107 opened. If MODE is >= 0 an existing file will be opened, if
108 possible, and if MODE is <= 0, a new file will be created. Use
109 MODE=0 to attempt to reopen an existing file and then fall back on
110 creating a new one. If MODE < 0, the file will be opened in
111 read-only mode. Otherwise it will be opened for modification.
112 Return zero on failure, >0 on opening an existing file and <0 on
113 creating a new one. */
117 gcov_open (const char *name
)
119 gcov_open (const char *name
, int mode
)
126 struct flock s_flock
;
129 s_flock
.l_whence
= SEEK_SET
;
131 s_flock
.l_len
= 0; /* Until EOF. */
132 s_flock
.l_pid
= getpid ();
135 gcov_nonruntime_assert (!gcov_var
.file
);
137 gcov_var
.offset
= gcov_var
.length
= 0;
138 gcov_var
.overread
= -1u;
146 /* Read-only mode - acquire a read-lock. */
147 s_flock
.l_type
= F_RDLCK
;
148 /* pass mode (ignored) for compatibility */
149 fd
= open (name
, O_RDONLY
, S_IRUSR
| S_IWUSR
);
153 /* Write mode - acquire a write-lock. */
154 s_flock
.l_type
= F_WRLCK
;
155 fd
= open (name
, O_RDWR
| O_CREAT
| O_TRUNC
, 0666);
159 /* Read-Write mode - acquire a write-lock. */
160 s_flock
.l_type
= F_WRLCK
;
161 fd
= open (name
, O_RDWR
| O_CREAT
, 0666);
166 while (fcntl (fd
, F_SETLKW
, &s_flock
) && errno
== EINTR
)
169 gcov_var
.file
= fdopen (fd
, (mode
> 0) ? "rb" : "r+b");
183 if (fstat (fd
, &st
) < 0)
185 fclose (gcov_var
.file
);
192 gcov_var
.mode
= mode
* 2 + 1;
195 gcov_var
.mode
= mode
* 2 + 1;
198 gcov_var
.file
= fopen (name
, (mode
> 0) ? "rb" : "r+b");
204 gcov_var
.file
= fopen (name
, "w+b");
206 gcov_var
.mode
= mode
* 2 + 1;
212 setbuf (gcov_var
.file
, (char *)0);
217 /* Close the current gcov file. Flushes data to disk. Returns nonzero
218 on failure or error flag set. */
226 if (gcov_var
.offset
&& gcov_var
.mode
< 0)
227 gcov_write_block (gcov_var
.offset
);
229 fclose (gcov_var
.file
);
234 free (gcov_var
.buffer
);
239 return gcov_var
.error
;
243 /* Check if MAGIC is EXPECTED. Use it to determine endianness of the
244 file. Returns +1 for same endian, -1 for other endian and zero for
248 gcov_magic (gcov_unsigned_t magic
, gcov_unsigned_t expected
)
250 if (magic
== expected
)
252 magic
= (magic
>> 16) | (magic
<< 16);
253 magic
= ((magic
& 0xff00ff) << 8) | ((magic
>> 8) & 0xff00ff);
254 if (magic
== expected
)
265 gcov_allocate (unsigned length
)
267 size_t new_size
= gcov_var
.alloc
;
270 new_size
= GCOV_BLOCK_SIZE
;
274 gcov_var
.alloc
= new_size
;
275 gcov_var
.buffer
= XRESIZEVAR (gcov_unsigned_t
, gcov_var
.buffer
, new_size
<< 2);
280 /* Write out the current block, if needs be. */
283 gcov_write_block (unsigned size
)
285 if (fwrite (gcov_var
.buffer
, size
<< 2, 1, gcov_var
.file
) != 1)
287 gcov_var
.start
+= size
;
288 gcov_var
.offset
-= size
;
291 /* Allocate space to write BYTES bytes to the gcov file. Return a
292 pointer to those bytes, or NULL on failure. */
294 static gcov_unsigned_t
*
295 gcov_write_words (unsigned words
)
297 gcov_unsigned_t
*result
;
299 gcov_nonruntime_assert (gcov_var
.mode
< 0);
301 if (gcov_var
.offset
>= GCOV_BLOCK_SIZE
)
303 gcov_write_block (GCOV_BLOCK_SIZE
);
306 memcpy (gcov_var
.buffer
, gcov_var
.buffer
+ GCOV_BLOCK_SIZE
, 4);
310 if (gcov_var
.offset
+ words
> gcov_var
.alloc
)
311 gcov_allocate (gcov_var
.offset
+ words
);
313 result
= &gcov_var
.buffer
[gcov_var
.offset
];
314 gcov_var
.offset
+= words
;
319 /* Write unsigned VALUE to coverage file. Sets error flag
323 gcov_write_unsigned (gcov_unsigned_t value
)
325 gcov_unsigned_t
*buffer
= gcov_write_words (1);
330 /* Write counter VALUE to coverage file. Sets error flag
335 gcov_write_counter (gcov_type value
)
337 gcov_unsigned_t
*buffer
= gcov_write_words (2);
339 buffer
[0] = (gcov_unsigned_t
) value
;
340 if (sizeof (value
) > sizeof (gcov_unsigned_t
))
341 buffer
[1] = (gcov_unsigned_t
) (value
>> 32);
345 #endif /* IN_LIBGCOV */
348 /* Write STRING to coverage file. Sets error flag on file
349 error, overflow flag on overflow */
352 gcov_write_string (const char *string
)
356 gcov_unsigned_t
*buffer
;
360 length
= strlen (string
);
361 alloc
= (length
+ 4) >> 2;
364 buffer
= gcov_write_words (1 + alloc
);
368 memcpy (&buffer
[1], string
, length
);
373 /* Write a tag TAG and reserve space for the record length. Return a
374 value to be used for gcov_write_length. */
376 GCOV_LINKAGE gcov_position_t
377 gcov_write_tag (gcov_unsigned_t tag
)
379 gcov_position_t result
= gcov_var
.start
+ gcov_var
.offset
;
380 gcov_unsigned_t
*buffer
= gcov_write_words (2);
388 /* Write a record length using POSITION, which was returned by
389 gcov_write_tag. The current file position is the end of the
390 record, and is restored before returning. Returns nonzero on
394 gcov_write_length (gcov_position_t position
)
397 gcov_unsigned_t length
;
398 gcov_unsigned_t
*buffer
;
400 gcov_nonruntime_assert (gcov_var
.mode
< 0);
401 gcov_nonruntime_assert (position
+ 2 <= gcov_var
.start
+ gcov_var
.offset
);
402 gcov_nonruntime_assert (position
>= gcov_var
.start
);
403 offset
= position
- gcov_var
.start
;
404 length
= gcov_var
.offset
- offset
- 2;
405 buffer
= (gcov_unsigned_t
*) &gcov_var
.buffer
[offset
];
407 if (gcov_var
.offset
>= GCOV_BLOCK_SIZE
)
408 gcov_write_block (gcov_var
.offset
);
411 #else /* IN_LIBGCOV */
413 /* Write a tag TAG and length LENGTH. */
416 gcov_write_tag_length (gcov_unsigned_t tag
, gcov_unsigned_t length
)
418 gcov_unsigned_t
*buffer
= gcov_write_words (2);
424 /* Write a summary structure to the gcov file. Return nonzero on
428 gcov_write_summary (gcov_unsigned_t tag
, const struct gcov_summary
*summary
)
430 unsigned ix
, h_ix
, bv_ix
, h_cnt
= 0;
431 const struct gcov_ctr_summary
*csum
;
432 unsigned histo_bitvector
[GCOV_HISTOGRAM_BITVECTOR_SIZE
];
434 /* Count number of non-zero histogram entries, and fill in a bit vector
435 of non-zero indices. The histogram is only currently computed for arc
437 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
438 histo_bitvector
[bv_ix
] = 0;
439 csum
= &summary
->ctrs
[GCOV_COUNTER_ARCS
];
440 for (h_ix
= 0; h_ix
< GCOV_HISTOGRAM_SIZE
; h_ix
++)
442 if (csum
->histogram
[h_ix
].num_counters
> 0)
444 histo_bitvector
[h_ix
/ 32] |= 1 << (h_ix
% 32);
448 gcov_write_tag_length (tag
, GCOV_TAG_SUMMARY_LENGTH (h_cnt
));
449 gcov_write_unsigned (summary
->checksum
);
450 for (csum
= summary
->ctrs
, ix
= GCOV_COUNTERS_SUMMABLE
; ix
--; csum
++)
452 gcov_write_unsigned (csum
->num
);
453 gcov_write_unsigned (csum
->runs
);
454 gcov_write_counter (csum
->sum_all
);
455 gcov_write_counter (csum
->run_max
);
456 gcov_write_counter (csum
->sum_max
);
457 if (ix
!= GCOV_COUNTER_ARCS
)
459 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
460 gcov_write_unsigned (0);
463 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
464 gcov_write_unsigned (histo_bitvector
[bv_ix
]);
465 for (h_ix
= 0; h_ix
< GCOV_HISTOGRAM_SIZE
; h_ix
++)
467 if (!csum
->histogram
[h_ix
].num_counters
)
469 gcov_write_unsigned (csum
->histogram
[h_ix
].num_counters
);
470 gcov_write_counter (csum
->histogram
[h_ix
].min_value
);
471 gcov_write_counter (csum
->histogram
[h_ix
].cum_value
);
475 #endif /* IN_LIBGCOV */
479 /* Return a pointer to read BYTES bytes from the gcov file. Returns
480 NULL on failure (read past EOF). */
482 static const gcov_unsigned_t
*
483 gcov_read_words (unsigned words
)
485 const gcov_unsigned_t
*result
;
486 unsigned excess
= gcov_var
.length
- gcov_var
.offset
;
488 gcov_nonruntime_assert (gcov_var
.mode
> 0);
491 gcov_var
.start
+= gcov_var
.offset
;
495 memcpy (gcov_var
.buffer
, gcov_var
.buffer
+ gcov_var
.offset
, 4);
497 memmove (gcov_var
.buffer
, gcov_var
.buffer
+ gcov_var
.offset
,
502 gcov_var
.length
= excess
;
504 excess
= GCOV_BLOCK_SIZE
;
506 if (gcov_var
.length
+ words
> gcov_var
.alloc
)
507 gcov_allocate (gcov_var
.length
+ words
);
508 excess
= gcov_var
.alloc
- gcov_var
.length
;
510 excess
= fread (gcov_var
.buffer
+ gcov_var
.length
,
511 1, excess
<< 2, gcov_var
.file
) >> 2;
512 gcov_var
.length
+= excess
;
513 if (gcov_var
.length
< words
)
515 gcov_var
.overread
+= words
- gcov_var
.length
;
520 result
= &gcov_var
.buffer
[gcov_var
.offset
];
521 gcov_var
.offset
+= words
;
525 /* Read unsigned value from a coverage file. Sets error flag on file
526 error, overflow flag on overflow */
528 GCOV_LINKAGE gcov_unsigned_t
529 gcov_read_unsigned (void)
531 gcov_unsigned_t value
;
532 const gcov_unsigned_t
*buffer
= gcov_read_words (1);
536 value
= from_file (buffer
[0]);
540 /* Read counter value from a coverage file. Sets error flag on file
541 error, overflow flag on overflow */
543 GCOV_LINKAGE gcov_type
544 gcov_read_counter (void)
547 const gcov_unsigned_t
*buffer
= gcov_read_words (2);
551 value
= from_file (buffer
[0]);
552 if (sizeof (value
) > sizeof (gcov_unsigned_t
))
553 value
|= ((gcov_type
) from_file (buffer
[1])) << 32;
560 /* Read string from coverage file. Returns a pointer to a static
561 buffer, or NULL on empty string. You must copy the string before
562 calling another gcov function. */
565 GCOV_LINKAGE
const char *
566 gcov_read_string (void)
568 unsigned length
= gcov_read_unsigned ();
573 return (const char *) gcov_read_words (length
);
578 gcov_read_summary (struct gcov_summary
*summary
)
580 unsigned ix
, h_ix
, bv_ix
, h_cnt
= 0;
581 struct gcov_ctr_summary
*csum
;
582 unsigned histo_bitvector
[GCOV_HISTOGRAM_BITVECTOR_SIZE
];
583 unsigned cur_bitvector
;
585 summary
->checksum
= gcov_read_unsigned ();
586 for (csum
= summary
->ctrs
, ix
= GCOV_COUNTERS_SUMMABLE
; ix
--; csum
++)
588 csum
->num
= gcov_read_unsigned ();
589 csum
->runs
= gcov_read_unsigned ();
590 csum
->sum_all
= gcov_read_counter ();
591 csum
->run_max
= gcov_read_counter ();
592 csum
->sum_max
= gcov_read_counter ();
593 memset (csum
->histogram
, 0,
594 sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
595 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
597 histo_bitvector
[bv_ix
] = gcov_read_unsigned ();
599 /* When building libgcov we don't include system.h, which includes
600 hwint.h (where popcount_hwi is declared). However, libgcov.a
601 is built by the bootstrapped compiler and therefore the builtins
602 are always available. */
603 h_cnt
+= __builtin_popcount (histo_bitvector
[bv_ix
]);
605 h_cnt
+= popcount_hwi (histo_bitvector
[bv_ix
]);
613 /* Find the index corresponding to the next entry we will read in.
614 First find the next non-zero bitvector and re-initialize
615 the histogram index accordingly, then right shift and increment
616 the index until we find a set bit. */
617 while (!cur_bitvector
)
620 if (bv_ix
>= GCOV_HISTOGRAM_BITVECTOR_SIZE
)
621 gcov_error ("corrupted profile info: summary histogram "
622 "bitvector is corrupt");
623 cur_bitvector
= histo_bitvector
[bv_ix
++];
625 while (!(cur_bitvector
& 0x1))
630 if (h_ix
>= GCOV_HISTOGRAM_SIZE
)
631 gcov_error ("corrupted profile info: summary histogram "
634 csum
->histogram
[h_ix
].num_counters
= gcov_read_unsigned ();
635 csum
->histogram
[h_ix
].min_value
= gcov_read_counter ();
636 csum
->histogram
[h_ix
].cum_value
= gcov_read_counter ();
637 /* Shift off the index we are done with and increment to the
638 corresponding next histogram entry. */
646 /* Reset to a known position. BASE should have been obtained from
647 gcov_position, LENGTH should be a record length. */
650 gcov_sync (gcov_position_t base
, gcov_unsigned_t length
)
652 gcov_nonruntime_assert (gcov_var
.mode
> 0);
654 if (base
- gcov_var
.start
<= gcov_var
.length
)
655 gcov_var
.offset
= base
- gcov_var
.start
;
658 gcov_var
.offset
= gcov_var
.length
= 0;
659 fseek (gcov_var
.file
, base
<< 2, SEEK_SET
);
660 gcov_var
.start
= ftell (gcov_var
.file
) >> 2;
666 /* Move to a given position in a gcov file. */
669 gcov_seek (gcov_position_t base
)
672 gcov_write_block (gcov_var
.offset
);
673 fseek (gcov_var
.file
, base
<< 2, SEEK_SET
);
674 gcov_var
.start
= ftell (gcov_var
.file
) >> 2;
679 /* Return the modification time of the current gcov file. */
686 if (fstat (fileno (gcov_var
.file
), &status
))
689 return status
.st_mtime
;
694 /* Determine the index into histogram for VALUE. */
699 GCOV_LINKAGE
unsigned
701 gcov_histo_index (gcov_type value
)
703 gcov_type_unsigned v
= (gcov_type_unsigned
)value
;
705 unsigned prev2bits
= 0;
707 /* Find index into log2 scale histogram, where each of the log2
708 sized buckets is divided into 4 linear sub-buckets for better
709 focus in the higher buckets. */
711 /* Find the place of the most-significant bit set. */
715 /* When building libgcov we don't include system.h, which includes
716 hwint.h (where floor_log2 is declared). However, libgcov.a
717 is built by the bootstrapped compiler and therefore the builtins
718 are always available. */
719 r
= sizeof (long long) * __CHAR_BIT__
- 1 - __builtin_clzll (v
);
721 /* We use floor_log2 from hwint.c, which takes a HOST_WIDE_INT
722 that is 64 bits and gcov_type_unsigned is 64 bits. */
727 /* If at most the 2 least significant bits are set (value is
728 0 - 3) then that value is our index into the lowest set of
731 return (unsigned)value
;
733 gcov_nonruntime_assert (r
< 64);
735 /* Find the two next most significant bits to determine which
736 of the four linear sub-buckets to select. */
737 prev2bits
= (v
>> (r
- 2)) & 0x3;
738 /* Finally, compose the final bucket index from the log2 index and
739 the next 2 bits. The minimum r value at this point is 2 since we
740 returned above if r was 2 or more, so the minimum bucket at this
742 return (r
- 1) * 4 + prev2bits
;
745 /* Merge SRC_HISTO into TGT_HISTO. The counters are assumed to be in
746 the same relative order in both histograms, and are matched up
747 and merged in reverse order. Each counter is assigned an equal portion of
748 its entry's original cumulative counter value when computing the
749 new merged cum_value. */
751 static void gcov_histogram_merge (gcov_bucket_type
*tgt_histo
,
752 gcov_bucket_type
*src_histo
)
754 int src_i
, tgt_i
, tmp_i
= 0;
755 unsigned src_num
, tgt_num
, merge_num
;
756 gcov_type src_cum
, tgt_cum
, merge_src_cum
, merge_tgt_cum
, merge_cum
;
758 gcov_bucket_type tmp_histo
[GCOV_HISTOGRAM_SIZE
];
761 memset (tmp_histo
, 0, sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
763 /* Assume that the counters are in the same relative order in both
764 histograms. Walk the histograms from largest to smallest entry,
765 matching up and combining counters in order. */
768 src_i
= GCOV_HISTOGRAM_SIZE
- 1;
769 for (tgt_i
= GCOV_HISTOGRAM_SIZE
- 1; tgt_i
>= 0 && !src_done
; tgt_i
--)
771 tgt_num
= tgt_histo
[tgt_i
].num_counters
;
772 tgt_cum
= tgt_histo
[tgt_i
].cum_value
;
773 /* Keep going until all of the target histogram's counters at this
774 position have been matched and merged with counters from the
776 while (tgt_num
> 0 && !src_done
)
778 /* If this is either the first time through this loop or we just
779 exhausted the previous non-zero source histogram entry, look
780 for the next non-zero source histogram entry. */
783 /* Locate the next non-zero entry. */
784 while (src_i
>= 0 && !src_histo
[src_i
].num_counters
)
786 /* If source histogram has fewer counters, then just copy over the
787 remaining target counters and quit. */
790 tmp_histo
[tgt_i
].num_counters
+= tgt_num
;
791 tmp_histo
[tgt_i
].cum_value
+= tgt_cum
;
792 if (!tmp_histo
[tgt_i
].min_value
||
793 tgt_histo
[tgt_i
].min_value
< tmp_histo
[tgt_i
].min_value
)
794 tmp_histo
[tgt_i
].min_value
= tgt_histo
[tgt_i
].min_value
;
797 tmp_histo
[tgt_i
].num_counters
798 += tgt_histo
[tgt_i
].num_counters
;
799 tmp_histo
[tgt_i
].cum_value
+= tgt_histo
[tgt_i
].cum_value
;
800 if (!tmp_histo
[tgt_i
].min_value
||
801 tgt_histo
[tgt_i
].min_value
802 < tmp_histo
[tgt_i
].min_value
)
803 tmp_histo
[tgt_i
].min_value
= tgt_histo
[tgt_i
].min_value
;
810 src_num
= src_histo
[src_i
].num_counters
;
811 src_cum
= src_histo
[src_i
].cum_value
;
814 /* The number of counters to merge on this pass is the minimum
815 of the remaining counters from the current target and source
816 histogram entries. */
818 if (src_num
< merge_num
)
821 /* The merged min_value is the sum of the min_values from target
823 merge_min
= tgt_histo
[tgt_i
].min_value
+ src_histo
[src_i
].min_value
;
825 /* Compute the portion of source and target entries' cum_value
826 that will be apportioned to the counters being merged.
827 The total remaining cum_value from each entry is divided
828 equally among the counters from that histogram entry if we
829 are not merging all of them. */
830 merge_src_cum
= src_cum
;
831 if (merge_num
< src_num
)
832 merge_src_cum
= merge_num
* src_cum
/ src_num
;
833 merge_tgt_cum
= tgt_cum
;
834 if (merge_num
< tgt_num
)
835 merge_tgt_cum
= merge_num
* tgt_cum
/ tgt_num
;
836 /* The merged cum_value is the sum of the source and target
838 merge_cum
= merge_src_cum
+ merge_tgt_cum
;
840 /* Update the remaining number of counters and cum_value left
841 to be merged from this source and target entry. */
842 src_cum
-= merge_src_cum
;
843 tgt_cum
-= merge_tgt_cum
;
844 src_num
-= merge_num
;
845 tgt_num
-= merge_num
;
847 /* The merged counters get placed in the new merged histogram
848 at the entry for the merged min_value. */
849 tmp_i
= gcov_histo_index (merge_min
);
850 gcov_nonruntime_assert (tmp_i
< GCOV_HISTOGRAM_SIZE
);
851 tmp_histo
[tmp_i
].num_counters
+= merge_num
;
852 tmp_histo
[tmp_i
].cum_value
+= merge_cum
;
853 if (!tmp_histo
[tmp_i
].min_value
||
854 merge_min
< tmp_histo
[tmp_i
].min_value
)
855 tmp_histo
[tmp_i
].min_value
= merge_min
;
857 /* Ensure the search for the next non-zero src_histo entry starts
858 at the next smallest histogram bucket. */
864 gcov_nonruntime_assert (tgt_i
< 0);
866 /* In the case where there were more counters in the source histogram,
867 accumulate the remaining unmerged cumulative counter values. Add
868 those to the smallest non-zero target histogram entry. Otherwise,
869 the total cumulative counter values in the histogram will be smaller
870 than the sum_all stored in the summary, which will complicate
871 computing the working set information from the histogram later on. */
876 src_cum
+= src_histo
[src_i
].cum_value
;
879 /* At this point, tmp_i should be the smallest non-zero entry in the
881 gcov_nonruntime_assert (tmp_i
>= 0 && tmp_i
< GCOV_HISTOGRAM_SIZE
882 && tmp_histo
[tmp_i
].num_counters
> 0);
883 tmp_histo
[tmp_i
].cum_value
+= src_cum
;
885 /* Finally, copy the merged histogram into tgt_histo. */
886 memcpy (tgt_histo
, tmp_histo
,
887 sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
889 #endif /* !IN_GCOV */
891 /* This is used by gcov-dump (IN_GCOV == -1) and in the compiler
892 (!IN_GCOV && !IN_LIBGCOV). */
893 #if IN_GCOV <= 0 && !IN_LIBGCOV
894 /* Compute the working set information from the counter histogram in
895 the profile summary. This is an array of information corresponding to a
896 range of percentages of the total execution count (sum_all), and includes
897 the number of counters required to cover that working set percentage and
898 the minimum counter value in that working set. */
901 compute_working_sets (const struct gcov_ctr_summary
*summary
,
902 gcov_working_set_t
*gcov_working_sets
)
904 gcov_type working_set_cum_values
[NUM_GCOV_WORKING_SETS
];
905 gcov_type ws_cum_hotness_incr
;
906 gcov_type cum
, tmp_cum
;
907 const gcov_bucket_type
*histo_bucket
;
908 unsigned ws_ix
, c_num
, count
;
911 /* Compute the amount of sum_all that the cumulative hotness grows
912 by in each successive working set entry, which depends on the
913 number of working set entries. */
914 ws_cum_hotness_incr
= summary
->sum_all
/ NUM_GCOV_WORKING_SETS
;
916 /* Next fill in an array of the cumulative hotness values corresponding
917 to each working set summary entry we are going to compute below.
918 Skip 0% statistics, which can be extrapolated from the
919 rest of the summary data. */
920 cum
= ws_cum_hotness_incr
;
921 for (ws_ix
= 0; ws_ix
< NUM_GCOV_WORKING_SETS
;
922 ws_ix
++, cum
+= ws_cum_hotness_incr
)
923 working_set_cum_values
[ws_ix
] = cum
;
924 /* The last summary entry is reserved for (roughly) 99.9% of the
925 working set. Divide by 1024 so it becomes a shift, which gives
926 almost exactly 99.9%. */
927 working_set_cum_values
[NUM_GCOV_WORKING_SETS
-1]
928 = summary
->sum_all
- summary
->sum_all
/1024;
930 /* Next, walk through the histogram in decending order of hotness
931 and compute the statistics for the working set summary array.
932 As histogram entries are accumulated, we check to see which
933 working set entries have had their expected cum_value reached
934 and fill them in, walking the working set entries in increasing
935 size of cum_value. */
936 ws_ix
= 0; /* The current entry into the working set array. */
937 cum
= 0; /* The current accumulated counter sum. */
938 count
= 0; /* The current accumulated count of block counters. */
939 for (h_ix
= GCOV_HISTOGRAM_SIZE
- 1;
940 h_ix
>= 0 && ws_ix
< NUM_GCOV_WORKING_SETS
; h_ix
--)
942 histo_bucket
= &summary
->histogram
[h_ix
];
944 /* If we haven't reached the required cumulative counter value for
945 the current working set percentage, simply accumulate this histogram
946 entry into the running sums and continue to the next histogram
948 if (cum
+ histo_bucket
->cum_value
< working_set_cum_values
[ws_ix
])
950 cum
+= histo_bucket
->cum_value
;
951 count
+= histo_bucket
->num_counters
;
955 /* If adding the current histogram entry's cumulative counter value
956 causes us to exceed the current working set size, then estimate
957 how many of this histogram entry's counter values are required to
958 reach the working set size, and fill in working set entries
959 as we reach their expected cumulative value. */
960 for (c_num
= 0, tmp_cum
= cum
;
961 c_num
< histo_bucket
->num_counters
&& ws_ix
< NUM_GCOV_WORKING_SETS
;
965 /* If we haven't reached the last histogram entry counter, add
966 in the minimum value again. This will underestimate the
967 cumulative sum so far, because many of the counter values in this
968 entry may have been larger than the minimum. We could add in the
969 average value every time, but that would require an expensive
971 if (c_num
+ 1 < histo_bucket
->num_counters
)
972 tmp_cum
+= histo_bucket
->min_value
;
973 /* If we have reached the last histogram entry counter, then add
974 in the entire cumulative value. */
976 tmp_cum
= cum
+ histo_bucket
->cum_value
;
978 /* Next walk through successive working set entries and fill in
979 the statistics for any whose size we have reached by accumulating
980 this histogram counter. */
981 while (ws_ix
< NUM_GCOV_WORKING_SETS
982 && tmp_cum
>= working_set_cum_values
[ws_ix
])
984 gcov_working_sets
[ws_ix
].num_counters
= count
;
985 gcov_working_sets
[ws_ix
].min_counter
986 = histo_bucket
->min_value
;
990 /* Finally, update the running cumulative value since we were
991 using a temporary above. */
992 cum
+= histo_bucket
->cum_value
;
994 gcov_nonruntime_assert (ws_ix
== NUM_GCOV_WORKING_SETS
);
996 #endif /* IN_GCOV <= 0 && !IN_LIBGCOV */