usage.adb: Change "pragma inline" to "pragma Inline" in information and error messages
[gcc.git] / gcc / ggc-zone.c
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
5 (dberlin@dberlin.org)
6
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "toplev.h"
33 #include "varray.h"
34 #include "flags.h"
35 #include "ggc.h"
36 #include "timevar.h"
37 #include "params.h"
38 #include "bitmap.h"
39
40 #ifdef ENABLE_VALGRIND_CHECKING
41 # ifdef HAVE_VALGRIND_MEMCHECK_H
42 # include <valgrind/memcheck.h>
43 # elif defined HAVE_MEMCHECK_H
44 # include <memcheck.h>
45 # else
46 # include <valgrind.h>
47 # endif
48 #else
49 /* Avoid #ifdef:s when we can help it. */
50 #define VALGRIND_DISCARD(x)
51 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
52 #define VALGRIND_FREELIKE_BLOCK(x,y)
53 #endif
54 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
55 file open. Prefer either to valloc. */
56 #ifdef HAVE_MMAP_ANON
57 # undef HAVE_MMAP_DEV_ZERO
58
59 # include <sys/mman.h>
60 # ifndef MAP_FAILED
61 # define MAP_FAILED -1
62 # endif
63 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
64 # define MAP_ANONYMOUS MAP_ANON
65 # endif
66 # define USING_MMAP
67
68 #endif
69
70 #ifdef HAVE_MMAP_DEV_ZERO
71
72 # include <sys/mman.h>
73 # ifndef MAP_FAILED
74 # define MAP_FAILED -1
75 # endif
76 # define USING_MMAP
77
78 #endif
79
80 #ifndef USING_MMAP
81 #error "Zone collector requires mmap"
82 #endif
83
84 #if (GCC_VERSION < 3001)
85 #define prefetch(X) ((void) X)
86 #else
87 #define prefetch(X) __builtin_prefetch (X)
88 #endif
89
90 /* NOTES:
91 If we track inter-zone pointers, we can mark single zones at a
92 time.
93 If we have a zone where we guarantee no inter-zone pointers, we
94 could mark that zone separately.
95 The garbage zone should not be marked, and we should return 1 in
96 ggc_set_mark for any object in the garbage zone, which cuts off
97 marking quickly. */
98 /* Stategy:
99
100 This garbage-collecting allocator segregates objects into zones.
101 It also segregates objects into "large" and "small" bins. Large
102 objects are greater or equal to page size.
103
104 Pages for small objects are broken up into chunks, each of which
105 are described by a struct alloc_chunk. One can walk over all
106 chunks on the page by adding the chunk size to the chunk's data
107 address. The free space for a page exists in the free chunk bins.
108
109 Each page-entry also has a context depth, which is used to track
110 pushing and popping of allocation contexts. Only objects allocated
111 in the current (highest-numbered) context may be collected.
112
113 Empty pages (of all sizes) are kept on a single page cache list,
114 and are considered first when new pages are required; they are
115 deallocated at the start of the next collection if they haven't
116 been recycled by then. */
117
118 /* Define GGC_DEBUG_LEVEL to print debugging information.
119 0: No debugging output.
120 1: GC statistics only.
121 2: Page-entry allocations/deallocations as well.
122 3: Object allocations as well.
123 4: Object marks as well. */
124 #define GGC_DEBUG_LEVEL (0)
125
126 #ifndef HOST_BITS_PER_PTR
127 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
128 #endif
129
130 #ifdef COOKIE_CHECKING
131 #define CHUNK_MAGIC 0x95321123
132 #define DEADCHUNK_MAGIC 0x12817317
133 #endif
134
135 /* This structure manages small chunks. When the chunk is free, it's
136 linked with other chunks via free_next. When the chunk is allocated,
137 the data starts at u. Large chunks are allocated one at a time to
138 their own page, and so don't come in here.
139
140 The "type" field is a placeholder for a future change to do
141 generational collection. At present it is 0 when free and
142 and 1 when allocated. */
143
144 struct alloc_chunk {
145 #ifdef COOKIE_CHECKING
146 unsigned int magic;
147 #endif
148 unsigned int type:1;
149 unsigned int mark:1;
150 unsigned char large;
151 unsigned short size;
152 /* Right now, on 32-bit hosts we don't have enough room to save the
153 typecode unless we make the one remaining flag into a bitfield.
154 There's a performance cost to that, so we don't do it until we're
155 ready to use the type information for something. */
156 union {
157 struct alloc_chunk *next_free;
158 char data[1];
159
160 /* Make sure the data is sufficiently aligned. */
161 HOST_WIDEST_INT align_i;
162 #ifdef HAVE_LONG_DOUBLE
163 long double align_d;
164 #else
165 double align_d;
166 #endif
167 } u;
168 };
169
170 #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
171
172 /* We maintain several bins of free lists for chunks for very small
173 objects. We never exhaustively search other bins -- if we don't
174 find one of the proper size, we allocate from the "larger" bin. */
175
176 /* Decreasing the number of free bins increases the time it takes to allocate.
177 Similar with increasing max_free_bin_size without increasing num_free_bins.
178
179 After much histogramming of allocation sizes and time spent on gc,
180 on a PowerPC G4 7450 - 667 mhz, and a Pentium 4 - 2.8ghz,
181 these were determined to be the optimal values. */
182 #define NUM_FREE_BINS 64
183 #define MAX_FREE_BIN_SIZE (64 * sizeof (void *))
184 #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
185 #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
186 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
187
188 /* Marker used as chunk->size for a large object. Should correspond
189 to the size of the bitfield above. */
190 #define LARGE_OBJECT_SIZE 0x7fff
191
192 /* We use this structure to determine the alignment required for
193 allocations. For power-of-two sized allocations, that's not a
194 problem, but it does matter for odd-sized allocations. */
195
196 struct max_alignment {
197 char c;
198 union {
199 HOST_WIDEST_INT i;
200 #ifdef HAVE_LONG_DOUBLE
201 long double d;
202 #else
203 double d;
204 #endif
205 } u;
206 };
207
208 /* The biggest alignment required. */
209
210 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
211
212 /* Compute the smallest nonnegative number which when added to X gives
213 a multiple of F. */
214
215 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
216
217 /* Compute the smallest multiple of F that is >= X. */
218
219 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
220
221
222 /* A page_entry records the status of an allocation page. */
223 typedef struct page_entry
224 {
225 /* The next page-entry with objects of the same size, or NULL if
226 this is the last page-entry. */
227 struct page_entry *next;
228
229 /* The number of bytes allocated. (This will always be a multiple
230 of the host system page size.) */
231 size_t bytes;
232
233 /* How many collections we've survived. */
234 size_t survived;
235
236 /* The address at which the memory is allocated. */
237 char *page;
238
239 /* Context depth of this page. */
240 unsigned short context_depth;
241
242 /* Does this page contain small objects, or one large object? */
243 bool large_p;
244
245 /* The zone that this page entry belongs to. */
246 struct alloc_zone *zone;
247 } page_entry;
248
249
250 /* The global variables. */
251 static struct globals
252 {
253 /* The linked list of zones. */
254 struct alloc_zone *zones;
255
256 /* The system's page size. */
257 size_t pagesize;
258 size_t lg_pagesize;
259
260 /* A file descriptor open to /dev/zero for reading. */
261 #if defined (HAVE_MMAP_DEV_ZERO)
262 int dev_zero_fd;
263 #endif
264
265 /* The file descriptor for debugging output. */
266 FILE *debug_file;
267 } G;
268
269 /* The zone allocation structure. */
270 struct alloc_zone
271 {
272 /* Name of the zone. */
273 const char *name;
274
275 /* Linked list of pages in a zone. */
276 page_entry *pages;
277
278 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
279 FREE_BIN_DELTA. All other chunks are in slot 0. */
280 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
281
282 /* Bytes currently allocated. */
283 size_t allocated;
284
285 /* Bytes currently allocated at the end of the last collection. */
286 size_t allocated_last_gc;
287
288 /* Total amount of memory mapped. */
289 size_t bytes_mapped;
290
291 /* Bit N set if any allocations have been done at context depth N. */
292 unsigned long context_depth_allocations;
293
294 /* Bit N set if any collections have been done at context depth N. */
295 unsigned long context_depth_collections;
296
297 /* The current depth in the context stack. */
298 unsigned short context_depth;
299
300 /* A cache of free system pages. */
301 page_entry *free_pages;
302
303 /* Next zone in the linked list of zones. */
304 struct alloc_zone *next_zone;
305
306 /* True if this zone was collected during this collection. */
307 bool was_collected;
308
309 /* True if this zone should be destroyed after the next collection. */
310 bool dead;
311
312 #ifdef GATHER_STATISTICS
313 struct
314 {
315 /* Total memory allocated with ggc_alloc. */
316 unsigned long long total_allocated;
317 /* Total overhead for memory to be allocated with ggc_alloc. */
318 unsigned long long total_overhead;
319
320 /* Total allocations and overhead for sizes less than 32, 64 and 128.
321 These sizes are interesting because they are typical cache line
322 sizes. */
323
324 unsigned long long total_allocated_under32;
325 unsigned long long total_overhead_under32;
326
327 unsigned long long total_allocated_under64;
328 unsigned long long total_overhead_under64;
329
330 unsigned long long total_allocated_under128;
331 unsigned long long total_overhead_under128;
332 } stats;
333 #endif
334 } main_zone;
335
336 struct alloc_zone *rtl_zone;
337 struct alloc_zone *garbage_zone;
338 struct alloc_zone *tree_zone;
339
340 static int always_collect;
341
342 /* Allocate pages in chunks of this size, to throttle calls to memory
343 allocation routines. The first page is used, the rest go onto the
344 free list. This cannot be larger than HOST_BITS_PER_INT for the
345 in_use bitmask for page_group. */
346 #define GGC_QUIRE_SIZE 16
347
348 static int ggc_allocated_p (const void *);
349 #ifdef USING_MMAP
350 static char *alloc_anon (char *, size_t, struct alloc_zone *);
351 #endif
352 static struct page_entry * alloc_small_page ( struct alloc_zone *);
353 static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
354 static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
355 static void free_page (struct page_entry *);
356 static void release_pages (struct alloc_zone *);
357 static void sweep_pages (struct alloc_zone *);
358 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short MEM_STAT_DECL);
359 static bool ggc_collect_1 (struct alloc_zone *, bool);
360 static void check_cookies (void);
361
362
363 /* Returns nonzero if P was allocated in GC'able memory. */
364
365 static inline int
366 ggc_allocated_p (const void *p)
367 {
368 struct alloc_chunk *chunk;
369 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
370 #ifdef COOKIE_CHECKING
371 gcc_assert (chunk->magic == CHUNK_MAGIC);
372 #endif
373 if (chunk->type == 1)
374 return true;
375 return false;
376 }
377
378
379 #ifdef USING_MMAP
380 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
381 (if non-null). The ifdef structure here is intended to cause a
382 compile error unless exactly one of the HAVE_* is defined. */
383
384 static inline char *
385 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
386 {
387 #ifdef HAVE_MMAP_ANON
388 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
389 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
390 #endif
391 #ifdef HAVE_MMAP_DEV_ZERO
392 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
393 MAP_PRIVATE, G.dev_zero_fd, 0);
394 #endif
395 VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0);
396
397 if (page == (char *) MAP_FAILED)
398 {
399 perror ("virtual memory exhausted");
400 exit (FATAL_EXIT_CODE);
401 }
402
403 /* Remember that we allocated this memory. */
404 zone->bytes_mapped += size;
405 /* Pretend we don't have access to the allocated pages. We'll enable
406 access to smaller pieces of the area in ggc_alloc. Discard the
407 handle to avoid handle leak. */
408 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
409 return page;
410 }
411 #endif
412
413 /* Allocate a new page for allocating objects of size 2^ORDER,
414 and return an entry for it. */
415
416 static inline struct page_entry *
417 alloc_small_page (struct alloc_zone *zone)
418 {
419 struct page_entry *entry;
420 char *page;
421
422 page = NULL;
423
424 /* Check the list of free pages for one we can use. */
425 entry = zone->free_pages;
426 if (entry != NULL)
427 {
428 /* Recycle the allocated memory from this page ... */
429 zone->free_pages = entry->next;
430 page = entry->page;
431
432
433 }
434 #ifdef USING_MMAP
435 else
436 {
437 /* We want just one page. Allocate a bunch of them and put the
438 extras on the freelist. (Can only do this optimization with
439 mmap for backing store.) */
440 struct page_entry *e, *f = zone->free_pages;
441 int i;
442
443 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone);
444
445 /* This loop counts down so that the chain will be in ascending
446 memory order. */
447 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
448 {
449 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
450 e->bytes = G.pagesize;
451 e->page = page + (i << G.lg_pagesize);
452 e->next = f;
453 f = e;
454 }
455
456 zone->free_pages = f;
457 }
458 #endif
459 if (entry == NULL)
460 entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
461
462 entry->next = 0;
463 entry->bytes = G.pagesize;
464 entry->page = page;
465 entry->context_depth = zone->context_depth;
466 entry->large_p = false;
467 entry->zone = zone;
468 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
469
470 if (GGC_DEBUG_LEVEL >= 2)
471 fprintf (G.debug_file,
472 "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
473 (PTR) entry, page, page + G.pagesize - 1);
474
475 return entry;
476 }
477 /* Compute the smallest multiple of F that is >= X. */
478
479 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
480
481 /* Allocate a large page of size SIZE in ZONE. */
482
483 static inline struct page_entry *
484 alloc_large_page (size_t size, struct alloc_zone *zone)
485 {
486 struct page_entry *entry;
487 char *page;
488 size = ROUND_UP (size, 1024);
489 page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
490 entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
491
492 entry->next = 0;
493 entry->bytes = size;
494 entry->page = page;
495 entry->context_depth = zone->context_depth;
496 entry->large_p = true;
497 entry->zone = zone;
498 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
499
500 if (GGC_DEBUG_LEVEL >= 2)
501 fprintf (G.debug_file,
502 "Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
503 (PTR) entry, page, page + size - 1);
504
505 return entry;
506 }
507
508
509 /* For a page that is no longer needed, put it on the free page list. */
510
511 static inline void
512 free_page (page_entry *entry)
513 {
514 if (GGC_DEBUG_LEVEL >= 2)
515 fprintf (G.debug_file,
516 "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
517 entry->page, entry->page + entry->bytes - 1);
518
519 if (entry->large_p)
520 {
521 free (entry->page);
522 VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
523 }
524 else
525 {
526 /* Mark the page as inaccessible. Discard the handle to
527 avoid handle leak. */
528 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
529
530 entry->next = entry->zone->free_pages;
531 entry->zone->free_pages = entry;
532 }
533 }
534
535 /* Release the free page cache to the system. */
536
537 static void
538 release_pages (struct alloc_zone *zone)
539 {
540 #ifdef USING_MMAP
541 page_entry *p, *next;
542 char *start;
543 size_t len;
544
545 /* Gather up adjacent pages so they are unmapped together. */
546 p = zone->free_pages;
547
548 while (p)
549 {
550 start = p->page;
551 next = p->next;
552 len = p->bytes;
553 free (p);
554 p = next;
555
556 while (p && p->page == start + len)
557 {
558 next = p->next;
559 len += p->bytes;
560 free (p);
561 p = next;
562 }
563
564 munmap (start, len);
565 zone->bytes_mapped -= len;
566 }
567
568 zone->free_pages = NULL;
569 #endif
570 }
571
572 /* Place CHUNK of size SIZE on the free list for ZONE. */
573
574 static inline void
575 free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone)
576 {
577 size_t bin = 0;
578
579 bin = SIZE_BIN_DOWN (size);
580 gcc_assert (bin);
581 if (bin > NUM_FREE_BINS)
582 bin = 0;
583 #ifdef COOKIE_CHECKING
584 gcc_assert (chunk->magic == CHUNK_MAGIC || chunk->magic == DEADCHUNK_MAGIC);
585 chunk->magic = DEADCHUNK_MAGIC;
586 #endif
587 chunk->u.next_free = zone->free_chunks[bin];
588 zone->free_chunks[bin] = chunk;
589 if (GGC_DEBUG_LEVEL >= 3)
590 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
591 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
592 }
593
594 /* Allocate a chunk of memory of SIZE bytes. */
595
596 static void *
597 ggc_alloc_zone_1 (size_t orig_size, struct alloc_zone *zone,
598 short type ATTRIBUTE_UNUSED
599 MEM_STAT_DECL)
600 {
601 size_t bin = 0;
602 size_t lsize = 0;
603 struct page_entry *entry;
604 struct alloc_chunk *chunk, *lchunk, **pp;
605 void *result;
606 size_t size = orig_size;
607
608 /* Align size, so that we're assured of aligned allocations. */
609 if (size < FREE_BIN_DELTA)
610 size = FREE_BIN_DELTA;
611 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
612
613 /* Large objects are handled specially. */
614 if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
615 {
616 size = ROUND_UP (size, 1024);
617 entry = alloc_large_page (size, zone);
618 entry->survived = 0;
619 entry->next = entry->zone->pages;
620 entry->zone->pages = entry;
621
622 chunk = (struct alloc_chunk *) entry->page;
623 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
624 chunk->large = 1;
625 chunk->size = CEIL (size, 1024);
626
627 goto found;
628 }
629
630 /* First look for a tiny object already segregated into its own
631 size bucket. */
632 bin = SIZE_BIN_UP (size);
633 if (bin <= NUM_FREE_BINS)
634 {
635 chunk = zone->free_chunks[bin];
636 if (chunk)
637 {
638 zone->free_chunks[bin] = chunk->u.next_free;
639 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
640 goto found;
641 }
642 }
643
644 /* Failing that, look through the "other" bucket for a chunk
645 that is large enough. */
646 pp = &(zone->free_chunks[0]);
647 chunk = *pp;
648 while (chunk && chunk->size < size)
649 {
650 pp = &chunk->u.next_free;
651 chunk = *pp;
652 }
653
654 /* Failing that, allocate new storage. */
655 if (!chunk)
656 {
657 entry = alloc_small_page (zone);
658 entry->next = entry->zone->pages;
659 entry->zone->pages = entry;
660
661 chunk = (struct alloc_chunk *) entry->page;
662 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
663 chunk->size = G.pagesize - CHUNK_OVERHEAD;
664 chunk->large = 0;
665 }
666 else
667 {
668 *pp = chunk->u.next_free;
669 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
670 chunk->large = 0;
671 }
672 /* Release extra memory from a chunk that's too big. */
673 lsize = chunk->size - size;
674 if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA)
675 {
676 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
677 chunk->size = size;
678
679 lsize -= CHUNK_OVERHEAD;
680 lchunk = (struct alloc_chunk *)(chunk->u.data + size);
681 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk)));
682 #ifdef COOKIE_CHECKING
683 lchunk->magic = CHUNK_MAGIC;
684 #endif
685 lchunk->type = 0;
686 lchunk->mark = 0;
687 lchunk->size = lsize;
688 lchunk->large = 0;
689 free_chunk (lchunk, lsize, zone);
690 lsize = 0;
691 }
692
693 /* Calculate the object's address. */
694 found:
695 #ifdef COOKIE_CHECKING
696 chunk->magic = CHUNK_MAGIC;
697 #endif
698 chunk->type = 1;
699 chunk->mark = 0;
700 /* We could save TYPE in the chunk, but we don't use that for
701 anything yet. */
702 result = chunk->u.data;
703
704 #ifdef ENABLE_GC_CHECKING
705 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
706 exact same semantics in presence of memory bugs, regardless of
707 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
708 handle to avoid handle leak. */
709 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
710
711 /* `Poison' the entire allocated object. */
712 memset (result, 0xaf, size);
713 #endif
714
715 /* Tell Valgrind that the memory is there, but its content isn't
716 defined. The bytes at the end of the object are still marked
717 unaccessible. */
718 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
719
720 /* Keep track of how many bytes are being allocated. This
721 information is used in deciding when to collect. */
722 zone->allocated += size;
723
724 #ifdef GATHER_STATISTICS
725 ggc_record_overhead (orig_size, size + CHUNK_OVERHEAD - orig_size PASS_MEM_STAT);
726
727 {
728 size_t object_size = size + CHUNK_OVERHEAD;
729 size_t overhead = object_size - orig_size;
730
731 zone->stats.total_overhead += overhead;
732 zone->stats.total_allocated += object_size;
733
734 if (orig_size <= 32)
735 {
736 zone->stats.total_overhead_under32 += overhead;
737 zone->stats.total_allocated_under32 += object_size;
738 }
739 if (orig_size <= 64)
740 {
741 zone->stats.total_overhead_under64 += overhead;
742 zone->stats.total_allocated_under64 += object_size;
743 }
744 if (orig_size <= 128)
745 {
746 zone->stats.total_overhead_under128 += overhead;
747 zone->stats.total_allocated_under128 += object_size;
748 }
749 }
750 #endif
751
752 if (GGC_DEBUG_LEVEL >= 3)
753 fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
754 (void *)chunk, (unsigned long) size, result);
755
756 return result;
757 }
758
759 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
760 for that type. */
761
762 void *
763 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
764 MEM_STAT_DECL)
765 {
766 switch (gte)
767 {
768 case gt_ggc_e_14lang_tree_node:
769 return ggc_alloc_zone_1 (size, tree_zone, gte PASS_MEM_STAT);
770
771 case gt_ggc_e_7rtx_def:
772 return ggc_alloc_zone_1 (size, rtl_zone, gte PASS_MEM_STAT);
773
774 case gt_ggc_e_9rtvec_def:
775 return ggc_alloc_zone_1 (size, rtl_zone, gte PASS_MEM_STAT);
776
777 default:
778 return ggc_alloc_zone_1 (size, &main_zone, gte PASS_MEM_STAT);
779 }
780 }
781
782 /* Normal ggc_alloc simply allocates into the main zone. */
783
784 void *
785 ggc_alloc_stat (size_t size MEM_STAT_DECL)
786 {
787 return ggc_alloc_zone_1 (size, &main_zone, -1 PASS_MEM_STAT);
788 }
789
790 /* Zone allocation allocates into the specified zone. */
791
792 void *
793 ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone MEM_STAT_DECL)
794 {
795 return ggc_alloc_zone_1 (size, zone, -1 PASS_MEM_STAT);
796 }
797
798 /* Poison the chunk. */
799 #ifdef ENABLE_GC_CHECKING
800 #define poison_chunk(CHUNK, SIZE) \
801 memset ((CHUNK)->u.data, 0xa5, (SIZE))
802 #else
803 #define poison_chunk(CHUNK, SIZE)
804 #endif
805
806 /* Free the object at P. */
807
808 void
809 ggc_free (void *p)
810 {
811 struct alloc_chunk *chunk;
812
813 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
814
815 /* Poison the chunk. */
816 poison_chunk (chunk, ggc_get_size (p));
817 }
818
819 /* If P is not marked, mark it and return false. Otherwise return true.
820 P must have been allocated by the GC allocator; it mustn't point to
821 static objects, stack variables, or memory allocated with malloc. */
822
823 int
824 ggc_set_mark (const void *p)
825 {
826 struct alloc_chunk *chunk;
827
828 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
829 #ifdef COOKIE_CHECKING
830 gcc_assert (chunk->magic == CHUNK_MAGIC);
831 #endif
832 if (chunk->mark)
833 return 1;
834 chunk->mark = 1;
835
836 if (GGC_DEBUG_LEVEL >= 4)
837 fprintf (G.debug_file, "Marking %p\n", p);
838
839 return 0;
840 }
841
842 /* Return 1 if P has been marked, zero otherwise.
843 P must have been allocated by the GC allocator; it mustn't point to
844 static objects, stack variables, or memory allocated with malloc. */
845
846 int
847 ggc_marked_p (const void *p)
848 {
849 struct alloc_chunk *chunk;
850
851 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
852 #ifdef COOKIE_CHECKING
853 gcc_assert (chunk->magic == CHUNK_MAGIC);
854 #endif
855 return chunk->mark;
856 }
857
858 /* Return the size of the gc-able object P. */
859
860 size_t
861 ggc_get_size (const void *p)
862 {
863 struct alloc_chunk *chunk;
864
865 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
866 #ifdef COOKIE_CHECKING
867 gcc_assert (chunk->magic == CHUNK_MAGIC);
868 #endif
869 if (chunk->large)
870 return chunk->size * 1024;
871
872 return chunk->size;
873 }
874
875 /* Initialize the ggc-zone-mmap allocator. */
876 void
877 init_ggc (void)
878 {
879 /* Set up the main zone by hand. */
880 main_zone.name = "Main zone";
881 G.zones = &main_zone;
882
883 /* Allocate the default zones. */
884 rtl_zone = new_ggc_zone ("RTL zone");
885 tree_zone = new_ggc_zone ("Tree zone");
886 garbage_zone = new_ggc_zone ("Garbage zone");
887
888 G.pagesize = getpagesize();
889 G.lg_pagesize = exact_log2 (G.pagesize);
890 #ifdef HAVE_MMAP_DEV_ZERO
891 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
892 gcc_assert (G.dev_zero_fd != -1);
893 #endif
894
895 #if 0
896 G.debug_file = fopen ("ggc-mmap.debug", "w");
897 setlinebuf (G.debug_file);
898 #else
899 G.debug_file = stdout;
900 #endif
901
902 #ifdef USING_MMAP
903 /* StunOS has an amazing off-by-one error for the first mmap allocation
904 after fiddling with RLIMIT_STACK. The result, as hard as it is to
905 believe, is an unaligned page allocation, which would cause us to
906 hork badly if we tried to use it. */
907 {
908 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
909 struct page_entry *e;
910 if ((size_t)p & (G.pagesize - 1))
911 {
912 /* How losing. Discard this one and try another. If we still
913 can't get something useful, give up. */
914
915 p = alloc_anon (NULL, G.pagesize, &main_zone);
916 gcc_assert (!((size_t)p & (G.pagesize - 1)));
917 }
918
919 /* We have a good page, might as well hold onto it... */
920 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
921 e->bytes = G.pagesize;
922 e->page = p;
923 e->next = main_zone.free_pages;
924 main_zone.free_pages = e;
925 }
926 #endif
927 }
928
929 /* Start a new GGC zone. */
930
931 struct alloc_zone *
932 new_ggc_zone (const char * name)
933 {
934 struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
935 new_zone->name = name;
936 new_zone->next_zone = G.zones->next_zone;
937 G.zones->next_zone = new_zone;
938 return new_zone;
939 }
940
941 /* Destroy a GGC zone. */
942 void
943 destroy_ggc_zone (struct alloc_zone * dead_zone)
944 {
945 struct alloc_zone *z;
946
947 for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
948 /* Just find that zone. */
949 continue;
950
951 /* We should have found the zone in the list. Anything else is fatal. */
952 gcc_assert (z);
953
954 /* z is dead, baby. z is dead. */
955 z->dead= true;
956 }
957
958 /* Increment the `GC context'. Objects allocated in an outer context
959 are never freed, eliminating the need to register their roots. */
960
961 void
962 ggc_push_context (void)
963 {
964 struct alloc_zone *zone;
965 for (zone = G.zones; zone; zone = zone->next_zone)
966 ++(zone->context_depth);
967 /* Die on wrap. */
968 gcc_assert (main_zone.context_depth < HOST_BITS_PER_LONG);
969 }
970
971 /* Decrement the `GC context'. All objects allocated since the
972 previous ggc_push_context are migrated to the outer context. */
973
974 static void
975 ggc_pop_context_1 (struct alloc_zone *zone)
976 {
977 unsigned long omask;
978 unsigned depth;
979 page_entry *p;
980
981 depth = --(zone->context_depth);
982 omask = (unsigned long)1 << (depth + 1);
983
984 if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask))
985 return;
986
987 zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1;
988 zone->context_depth_allocations &= omask - 1;
989 zone->context_depth_collections &= omask - 1;
990
991 /* Any remaining pages in the popped context are lowered to the new
992 current context; i.e. objects allocated in the popped context and
993 left over are imported into the previous context. */
994 for (p = zone->pages; p != NULL; p = p->next)
995 if (p->context_depth > depth)
996 p->context_depth = depth;
997 }
998
999 /* Pop all the zone contexts. */
1000
1001 void
1002 ggc_pop_context (void)
1003 {
1004 struct alloc_zone *zone;
1005 for (zone = G.zones; zone; zone = zone->next_zone)
1006 ggc_pop_context_1 (zone);
1007 }
1008
1009 /* Free all empty pages and objects within a page for a given zone */
1010
1011 static void
1012 sweep_pages (struct alloc_zone *zone)
1013 {
1014 page_entry **pp, *p, *next;
1015 struct alloc_chunk *chunk, *last_free, *end;
1016 size_t last_free_size, allocated = 0;
1017 bool nomarksinpage;
1018 /* First, reset the free_chunks lists, since we are going to
1019 re-free free chunks in hopes of coalescing them into large chunks. */
1020 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1021 pp = &zone->pages;
1022 for (p = zone->pages; p ; p = next)
1023 {
1024 next = p->next;
1025 /* Large pages are all or none affairs. Either they are
1026 completely empty, or they are completely full.
1027
1028 XXX: Should we bother to increment allocated. */
1029 if (p->large_p)
1030 {
1031 if (((struct alloc_chunk *)p->page)->mark == 1)
1032 {
1033 ((struct alloc_chunk *)p->page)->mark = 0;
1034 allocated += p->bytes - CHUNK_OVERHEAD;
1035 pp = &p->next;
1036 }
1037 else
1038 {
1039 *pp = next;
1040 #ifdef ENABLE_GC_CHECKING
1041 /* Poison the page. */
1042 memset (p->page, 0xb5, p->bytes);
1043 #endif
1044 free_page (p);
1045 }
1046 continue;
1047 }
1048
1049 /* This page has now survived another collection. */
1050 p->survived++;
1051
1052 /* Which leaves full and partial pages. Step through all chunks,
1053 consolidate those that are free and insert them into the free
1054 lists. Note that consolidation slows down collection
1055 slightly. */
1056
1057 chunk = (struct alloc_chunk *)p->page;
1058 end = (struct alloc_chunk *)(p->page + G.pagesize);
1059 last_free = NULL;
1060 last_free_size = 0;
1061 nomarksinpage = true;
1062 do
1063 {
1064 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1065 if (chunk->mark || p->context_depth < zone->context_depth)
1066 {
1067 nomarksinpage = false;
1068 if (last_free)
1069 {
1070 last_free->type = 0;
1071 last_free->size = last_free_size;
1072 last_free->mark = 0;
1073 poison_chunk (last_free, last_free_size);
1074 free_chunk (last_free, last_free_size, zone);
1075 last_free = NULL;
1076 }
1077 if (chunk->mark)
1078 {
1079 allocated += chunk->size;
1080 }
1081 chunk->mark = 0;
1082 }
1083 else
1084 {
1085 if (last_free)
1086 {
1087 last_free_size += CHUNK_OVERHEAD + chunk->size;
1088 }
1089 else
1090 {
1091 last_free = chunk;
1092 last_free_size = chunk->size;
1093 }
1094 }
1095
1096 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1097 }
1098 while (chunk < end);
1099
1100 if (nomarksinpage)
1101 {
1102 *pp = next;
1103 #ifdef ENABLE_GC_CHECKING
1104 /* Poison the page. */
1105 memset (p->page, 0xb5, p->bytes);
1106 #endif
1107 free_page (p);
1108 continue;
1109 }
1110 else if (last_free)
1111 {
1112 last_free->type = 0;
1113 last_free->size = last_free_size;
1114 last_free->mark = 0;
1115 poison_chunk (last_free, last_free_size);
1116 free_chunk (last_free, last_free_size, zone);
1117 }
1118 pp = &p->next;
1119 }
1120
1121 zone->allocated = allocated;
1122 }
1123
1124 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1125 is true if we need to mark before sweeping, false if some other
1126 zone collection has already performed marking for us. Returns true
1127 if we collected, false otherwise. */
1128
1129 static bool
1130 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1131 {
1132 if (!quiet_flag)
1133 fprintf (stderr, " {%s GC %luk -> ",
1134 zone->name, (unsigned long) zone->allocated / 1024);
1135
1136 /* Zero the total allocated bytes. This will be recalculated in the
1137 sweep phase. */
1138 zone->allocated = 0;
1139
1140 /* Release the pages we freed the last time we collected, but didn't
1141 reuse in the interim. */
1142 release_pages (zone);
1143
1144 /* Indicate that we've seen collections at this context depth. */
1145 zone->context_depth_collections
1146 = ((unsigned long)1 << (zone->context_depth + 1)) - 1;
1147 if (need_marking)
1148 ggc_mark_roots ();
1149 sweep_pages (zone);
1150 zone->was_collected = true;
1151 zone->allocated_last_gc = zone->allocated;
1152
1153 if (!quiet_flag)
1154 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1155 return true;
1156 }
1157
1158 /* Calculate the average page survival rate in terms of number of
1159 collections. */
1160
1161 static float
1162 calculate_average_page_survival (struct alloc_zone *zone)
1163 {
1164 float count = 0.0;
1165 float survival = 0.0;
1166 page_entry *p;
1167 for (p = zone->pages; p; p = p->next)
1168 {
1169 count += 1.0;
1170 survival += p->survived;
1171 }
1172 return survival/count;
1173 }
1174
1175 /* Check the magic cookies all of the chunks contain, to make sure we
1176 aren't doing anything stupid, like stomping on alloc_chunk
1177 structures. */
1178
1179 static inline void
1180 check_cookies (void)
1181 {
1182 #ifdef COOKIE_CHECKING
1183 page_entry *p;
1184 struct alloc_zone *zone;
1185
1186 for (zone = G.zones; zone; zone = zone->next_zone)
1187 {
1188 for (p = zone->pages; p; p = p->next)
1189 {
1190 if (!p->large_p)
1191 {
1192 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1193 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1194 do
1195 {
1196 gcc_assert (chunk->magic == CHUNK_MAGIC
1197 || chunk->magic == DEADCHUNK_MAGIC);
1198 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1199 }
1200 while (chunk < end);
1201 }
1202 }
1203 }
1204 #endif
1205 }
1206 /* Top level collection routine. */
1207
1208 void
1209 ggc_collect (void)
1210 {
1211 struct alloc_zone *zone;
1212 bool marked = false;
1213 float f;
1214
1215 timevar_push (TV_GC);
1216 check_cookies ();
1217
1218 if (!always_collect)
1219 {
1220 float allocated_last_gc = 0, allocated = 0, min_expand;
1221
1222 for (zone = G.zones; zone; zone = zone->next_zone)
1223 {
1224 allocated_last_gc += zone->allocated_last_gc;
1225 allocated += zone->allocated;
1226 }
1227
1228 allocated_last_gc =
1229 MAX (allocated_last_gc,
1230 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1231 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1232
1233 if (allocated < allocated_last_gc + min_expand)
1234 {
1235 timevar_pop (TV_GC);
1236 return;
1237 }
1238 }
1239
1240 /* Start by possibly collecting the main zone. */
1241 main_zone.was_collected = false;
1242 marked |= ggc_collect_1 (&main_zone, true);
1243
1244 /* In order to keep the number of collections down, we don't
1245 collect other zones unless we are collecting the main zone. This
1246 gives us roughly the same number of collections as we used to
1247 have with the old gc. The number of collection is important
1248 because our main slowdown (according to profiling) is now in
1249 marking. So if we mark twice as often as we used to, we'll be
1250 twice as slow. Hopefully we'll avoid this cost when we mark
1251 zone-at-a-time. */
1252 /* NOTE drow/2004-07-28: We now always collect the main zone, but
1253 keep this code in case the heuristics are further refined. */
1254
1255 if (main_zone.was_collected)
1256 {
1257 struct alloc_zone *zone;
1258
1259 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1260 {
1261 check_cookies ();
1262 zone->was_collected = false;
1263 marked |= ggc_collect_1 (zone, !marked);
1264 }
1265 }
1266
1267 /* Print page survival stats, if someone wants them. */
1268 if (GGC_DEBUG_LEVEL >= 2)
1269 {
1270 for (zone = G.zones; zone; zone = zone->next_zone)
1271 {
1272 if (zone->was_collected)
1273 {
1274 f = calculate_average_page_survival (zone);
1275 printf ("Average page survival in zone `%s' is %f\n",
1276 zone->name, f);
1277 }
1278 }
1279 }
1280
1281 /* Since we don't mark zone at a time right now, marking in any
1282 zone means marking in every zone. So we have to clear all the
1283 marks in all the zones that weren't collected already. */
1284 if (marked)
1285 {
1286 page_entry *p;
1287 for (zone = G.zones; zone; zone = zone->next_zone)
1288 {
1289 if (zone->was_collected)
1290 continue;
1291 for (p = zone->pages; p; p = p->next)
1292 {
1293 if (!p->large_p)
1294 {
1295 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1296 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1297 do
1298 {
1299 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1300 if (chunk->mark || p->context_depth < zone->context_depth)
1301 {
1302 chunk->mark = 0;
1303 }
1304 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1305 }
1306 while (chunk < end);
1307 }
1308 else
1309 {
1310 ((struct alloc_chunk *)p->page)->mark = 0;
1311 }
1312 }
1313 }
1314 }
1315
1316 /* Free dead zones. */
1317 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
1318 {
1319 if (zone->next_zone->dead)
1320 {
1321 struct alloc_zone *dead_zone = zone->next_zone;
1322
1323 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
1324
1325 /* The zone must be empty. */
1326 gcc_assert (!dead_zone->allocated);
1327
1328 /* Unchain the dead zone, release all its pages and free it. */
1329 zone->next_zone = zone->next_zone->next_zone;
1330 release_pages (dead_zone);
1331 free (dead_zone);
1332 }
1333 }
1334
1335 timevar_pop (TV_GC);
1336 }
1337
1338 /* Print allocation statistics. */
1339 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1340 ? (x) \
1341 : ((x) < 1024*1024*10 \
1342 ? (x) / 1024 \
1343 : (x) / (1024*1024))))
1344 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1345
1346 void
1347 ggc_print_statistics (void)
1348 {
1349 struct alloc_zone *zone;
1350 struct ggc_statistics stats;
1351 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
1352
1353 /* Clear the statistics. */
1354 memset (&stats, 0, sizeof (stats));
1355
1356 /* Make sure collection will really occur, in all zones. */
1357 always_collect = 1;
1358
1359 /* Collect and print the statistics common across collectors. */
1360 ggc_print_common_statistics (stderr, &stats);
1361
1362 always_collect = 0;
1363
1364 /* Release free pages so that we will not count the bytes allocated
1365 there as part of the total allocated memory. */
1366 for (zone = G.zones; zone; zone = zone->next_zone)
1367 release_pages (zone);
1368
1369 /* Collect some information about the various sizes of
1370 allocation. */
1371 fprintf (stderr,
1372 "Memory still allocated at the end of the compilation process\n");
1373
1374 fprintf (stderr, "%20s %10s %10s %10s\n",
1375 "Zone", "Allocated", "Used", "Overhead");
1376 for (zone = G.zones; zone; zone = zone->next_zone)
1377 {
1378 page_entry *p;
1379 size_t allocated;
1380 size_t in_use;
1381 size_t overhead;
1382
1383 /* Skip empty entries. */
1384 if (!zone->pages)
1385 continue;
1386
1387 overhead = allocated = in_use = 0;
1388
1389 /* Figure out the total number of bytes allocated for objects of
1390 this size, and how many of them are actually in use. Also figure
1391 out how much memory the page table is using. */
1392 for (p = zone->pages; p; p = p->next)
1393 {
1394 struct alloc_chunk *chunk;
1395
1396 /* We've also allocated sizeof (page_entry), but it's not in the
1397 "managed" area... */
1398 allocated += p->bytes;
1399 overhead += sizeof (page_entry);
1400
1401 if (p->large_p)
1402 {
1403 in_use += p->bytes - CHUNK_OVERHEAD;
1404 chunk = (struct alloc_chunk *) p->page;
1405 overhead += CHUNK_OVERHEAD;
1406 gcc_assert (chunk->type && !chunk->mark);
1407 continue;
1408 }
1409
1410 for (chunk = (struct alloc_chunk *) p->page;
1411 (char *) chunk < (char *) p->page + p->bytes;
1412 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size))
1413 {
1414 overhead += CHUNK_OVERHEAD;
1415 if (chunk->type)
1416 in_use += chunk->size;
1417 gcc_assert (!chunk->mark);
1418 }
1419 }
1420 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
1421 zone->name,
1422 SCALE (allocated), LABEL (allocated),
1423 SCALE (in_use), LABEL (in_use),
1424 SCALE (overhead), LABEL (overhead));
1425
1426 gcc_assert (in_use == zone->allocated);
1427
1428 total_overhead += overhead;
1429 total_allocated += zone->allocated;
1430 total_bytes_mapped += zone->bytes_mapped;
1431 }
1432
1433 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
1434 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
1435 SCALE (total_allocated), LABEL(total_allocated),
1436 SCALE (total_overhead), LABEL (total_overhead));
1437
1438 #ifdef GATHER_STATISTICS
1439 {
1440 unsigned long long all_overhead = 0, all_allocated = 0;
1441 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
1442 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
1443 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
1444
1445 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
1446
1447 for (zone = G.zones; zone; zone = zone->next_zone)
1448 {
1449 all_overhead += zone->stats.total_overhead;
1450 all_allocated += zone->stats.total_allocated;
1451
1452 all_allocated_under32 += zone->stats.total_allocated_under32;
1453 all_overhead_under32 += zone->stats.total_overhead_under32;
1454
1455 all_allocated_under64 += zone->stats.total_allocated_under64;
1456 all_overhead_under64 += zone->stats.total_overhead_under64;
1457
1458 all_allocated_under128 += zone->stats.total_allocated_under128;
1459 all_overhead_under128 += zone->stats.total_overhead_under128;
1460
1461 fprintf (stderr, "%20s: %10lld\n",
1462 zone->name, zone->stats.total_allocated);
1463 }
1464
1465 fprintf (stderr, "\n");
1466
1467 fprintf (stderr, "Total Overhead: %10lld\n",
1468 all_overhead);
1469 fprintf (stderr, "Total Allocated: %10lld\n",
1470 all_allocated);
1471
1472 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
1473 all_overhead_under32);
1474 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
1475 all_allocated_under32);
1476 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
1477 all_overhead_under64);
1478 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
1479 all_allocated_under64);
1480 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
1481 all_overhead_under128);
1482 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
1483 all_allocated_under128);
1484 }
1485 #endif
1486 }
1487
1488 struct ggc_pch_data
1489 {
1490 struct ggc_pch_ondisk
1491 {
1492 unsigned total;
1493 } d;
1494 size_t base;
1495 size_t written;
1496 };
1497
1498 /* Initialize the PCH data structure. */
1499
1500 struct ggc_pch_data *
1501 init_ggc_pch (void)
1502 {
1503 return xcalloc (sizeof (struct ggc_pch_data), 1);
1504 }
1505
1506 /* Add the size of object X to the size of the PCH data. */
1507
1508 void
1509 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1510 size_t size, bool is_string)
1511 {
1512 if (!is_string)
1513 {
1514 d->d.total += size + CHUNK_OVERHEAD;
1515 }
1516 else
1517 d->d.total += size;
1518 }
1519
1520 /* Return the total size of the PCH data. */
1521
1522 size_t
1523 ggc_pch_total_size (struct ggc_pch_data *d)
1524 {
1525 return d->d.total;
1526 }
1527
1528 /* Set the base address for the objects in the PCH file. */
1529
1530 void
1531 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1532 {
1533 d->base = (size_t) base;
1534 }
1535
1536 /* Allocate a place for object X of size SIZE in the PCH file. */
1537
1538 char *
1539 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
1540 size_t size, bool is_string)
1541 {
1542 char *result;
1543 result = (char *)d->base;
1544 if (!is_string)
1545 {
1546 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1547 if (chunk->large)
1548 d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
1549 else
1550 d->base += chunk->size + CHUNK_OVERHEAD;
1551 return result + CHUNK_OVERHEAD;
1552 }
1553 else
1554 {
1555 d->base += size;
1556 return result;
1557 }
1558
1559 }
1560
1561 /* Prepare to write out the PCH data to file F. */
1562
1563 void
1564 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1565 FILE *f ATTRIBUTE_UNUSED)
1566 {
1567 /* Nothing to do. */
1568 }
1569
1570 /* Write out object X of SIZE to file F. */
1571
1572 void
1573 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1574 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1575 size_t size, bool is_string)
1576 {
1577 if (!is_string)
1578 {
1579 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1580 size = ggc_get_size (x);
1581 if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
1582 fatal_error ("can't write PCH file: %m");
1583 d->written += size + CHUNK_OVERHEAD;
1584 }
1585 else
1586 {
1587 if (fwrite (x, size, 1, f) != 1)
1588 fatal_error ("can't write PCH file: %m");
1589 d->written += size;
1590 }
1591 }
1592
1593 void
1594 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
1595 {
1596 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1597 fatal_error ("can't write PCH file: %m");
1598 free (d);
1599 }
1600 void
1601 ggc_pch_read (FILE *f, void *addr)
1602 {
1603 struct ggc_pch_ondisk d;
1604 struct page_entry *entry;
1605 struct alloc_zone *pch_zone;
1606 if (fread (&d, sizeof (d), 1, f) != 1)
1607 fatal_error ("can't read PCH file: %m");
1608 entry = xcalloc (1, sizeof (struct page_entry));
1609 entry->bytes = d.total;
1610 entry->page = addr;
1611 entry->context_depth = 0;
1612 pch_zone = new_ggc_zone ("PCH zone");
1613 entry->zone = pch_zone;
1614 entry->next = entry->zone->pages;
1615 entry->zone->pages = entry;
1616 }