1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3 2010 Free Software Foundation, Inc.
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
7 <dan@codesourcery.com>.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "diagnostic-core.h"
35 #include "ggc-internal.h"
41 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
42 file open. Prefer either to valloc. */
44 # undef HAVE_MMAP_DEV_ZERO
48 #ifdef HAVE_MMAP_DEV_ZERO
53 #error Zone collector requires mmap
56 #if (GCC_VERSION < 3001)
57 #define prefetch(X) ((void) X)
58 #define prefetchw(X) ((void) X)
60 #define prefetch(X) __builtin_prefetch (X)
61 #define prefetchw(X) __builtin_prefetch (X, 1, 3)
66 If we track inter-zone pointers, we can mark single zones at a
69 If we have a zone where we guarantee no inter-zone pointers, we
70 could mark that zone separately.
72 The garbage zone should not be marked, and we should return 1 in
73 ggc_set_mark for any object in the garbage zone, which cuts off
78 This garbage-collecting allocator segregates objects into zones.
79 It also segregates objects into "large" and "small" bins. Large
80 objects are greater than page size.
82 Pages for small objects are broken up into chunks. The page has
83 a bitmap which marks the start position of each chunk (whether
84 allocated or free). Free chunks are on one of the zone's free
85 lists and contain a pointer to the next free chunk. Chunks in
86 most of the free lists have a fixed size determined by the
87 free list. Chunks in the "other" sized free list have their size
88 stored right after their chain pointer.
90 Empty pages (of all sizes) are kept on a single page cache list,
91 and are considered first when new pages are required; they are
92 deallocated at the start of the next collection if they haven't
93 been recycled by then. The free page list is currently per-zone. */
95 /* Define GGC_DEBUG_LEVEL to print debugging information.
96 0: No debugging output.
97 1: GC statistics only.
98 2: Page-entry allocations/deallocations as well.
99 3: Object allocations as well.
100 4: Object marks as well. */
101 #define GGC_DEBUG_LEVEL (0)
103 #ifndef HOST_BITS_PER_PTR
104 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
107 /* This structure manages small free chunks. The SIZE field is only
108 initialized if the chunk is in the "other" sized free list. Large
109 chunks are allocated one at a time to their own page, and so don't
113 struct alloc_chunk
*next_free
;
117 /* The size of the fixed-size portion of a small page descriptor. */
118 #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
120 /* The collector's idea of the page size. This must be a power of two
121 no larger than the system page size, because pages must be aligned
122 to this amount and are tracked at this granularity in the page
123 table. We choose a size at compile time for efficiency.
125 We could make a better guess at compile time if PAGE_SIZE is a
126 constant in system headers, and PAGE_SHIFT is defined... */
127 #define GGC_PAGE_SIZE 4096
128 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
129 #define GGC_PAGE_SHIFT 12
132 /* Alternative definitions which use the runtime page size. */
133 #define GGC_PAGE_SIZE G.pagesize
134 #define GGC_PAGE_MASK G.page_mask
135 #define GGC_PAGE_SHIFT G.lg_pagesize
138 /* The size of a small page managed by the garbage collector. This
139 must currently be GGC_PAGE_SIZE, but with a few changes could
140 be any multiple of it to reduce certain kinds of overhead. */
141 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE
143 /* Free bin information. These numbers may be in need of re-tuning.
144 In general, decreasing the number of free bins would seem to
145 increase the time it takes to allocate... */
147 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
150 #define NUM_FREE_BINS 64
151 #define FREE_BIN_DELTA MAX_ALIGNMENT
152 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
154 /* Allocation and marking parameters. */
156 /* The smallest allocatable unit to keep track of. */
157 #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
159 /* The smallest markable unit. If we require each allocated object
160 to contain at least two allocatable units, we can use half as many
161 bits for the mark bitmap. But this adds considerable complexity
163 #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
165 #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
167 /* We use this structure to determine the alignment required for
170 There are several things wrong with this estimation of alignment.
172 The maximum alignment for a structure is often less than the
173 maximum alignment for a basic data type; for instance, on some
174 targets long long must be aligned to sizeof (int) in a structure
175 and sizeof (long long) in a variable. i386-linux is one example;
176 Darwin is another (sometimes, depending on the compiler in use).
178 Also, long double is not included. Nothing in GCC uses long
179 double, so we assume that this is OK. On powerpc-darwin, adding
180 long double would bring the maximum alignment up to 16 bytes,
181 and until we need long double (or to vectorize compiler operations)
182 that's painfully wasteful. This will need to change, some day. */
184 struct max_alignment
{
192 /* The biggest alignment required. */
194 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
196 /* Compute the smallest multiple of F that is >= X. */
198 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
200 /* Types to use for the allocation and mark bitmaps. It might be
201 a good idea to add ffsl to libiberty and use unsigned long
202 instead; that could speed us up where long is wider than int. */
204 typedef unsigned int alloc_type
;
205 typedef unsigned int mark_type
;
206 #define alloc_ffs(x) ffs(x)
208 /* A page_entry records the status of an allocation page. This is the
209 common data between all three kinds of pages - small, large, and
211 typedef struct page_entry
213 /* The address at which the memory is allocated. */
216 /* The zone that this page entry belongs to. */
217 struct alloc_zone
*zone
;
219 /* How many collections we've survived. */
222 /* Does this page contain small objects, or one large object? */
225 /* Is this page part of the loaded PCH? */
229 /* Additional data needed for small pages. */
230 struct small_page_entry
232 struct page_entry common
;
234 /* The next small page entry, or NULL if this is the last. */
235 struct small_page_entry
*next
;
237 /* If currently marking this zone, a pointer to the mark bits
238 for this page. If we aren't currently marking this zone,
239 this pointer may be stale (pointing to freed memory). */
240 mark_type
*mark_bits
;
242 /* The allocation bitmap. This array extends far enough to have
243 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
244 alloc_type alloc_bits
[1];
247 /* Additional data needed for large pages. */
248 struct large_page_entry
250 struct page_entry common
;
252 /* The next large page entry, or NULL if this is the last. */
253 struct large_page_entry
*next
;
255 /* The number of bytes allocated, not including the page entry. */
258 /* The previous page in the list, so that we can unlink this one. */
259 struct large_page_entry
*prev
;
261 /* During marking, is this object marked? */
265 /* A two-level tree is used to look up the page-entry for a given
266 pointer. Two chunks of the pointer's bits are extracted to index
267 the first and second levels of the tree, as follows:
271 msb +----------------+----+------+------+ lsb
277 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
278 pages are aligned on system page boundaries. The next most
279 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
280 index values in the lookup table, respectively.
282 For 32-bit architectures and the settings below, there are no
283 leftover bits. For architectures with wider pointers, the lookup
284 tree points to a list of pages, which must be scanned to find the
287 #define PAGE_L1_BITS (8)
288 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
289 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
290 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
292 #define LOOKUP_L1(p) \
293 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
295 #define LOOKUP_L2(p) \
296 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
298 #if HOST_BITS_PER_PTR <= 32
300 /* On 32-bit hosts, we use a two level page table, as pictured above. */
301 typedef page_entry
**page_table
[PAGE_L1_SIZE
];
305 /* On 64-bit hosts, we use the same two level page tables plus a linked
306 list that disambiguates the top 32-bits. There will almost always be
307 exactly one entry in the list. */
308 typedef struct page_table_chain
310 struct page_table_chain
*next
;
312 page_entry
**table
[PAGE_L1_SIZE
];
317 /* The global variables. */
318 static struct globals
320 /* The linked list of zones. */
321 struct alloc_zone
*zones
;
323 /* Lookup table for associating allocation pages with object addresses. */
326 /* The system's page size, and related constants. */
331 /* The size to allocate for a small page entry. This includes
332 the size of the structure and the size of the allocation
334 size_t small_page_overhead
;
336 #if defined (HAVE_MMAP_DEV_ZERO)
337 /* A file descriptor open to /dev/zero for reading. */
341 /* Allocate pages in chunks of this size, to throttle calls to memory
342 allocation routines. The first page is used, the rest go onto the
346 /* The file descriptor for debugging output. */
350 /* A zone allocation structure. There is one of these for every
351 distinct allocation zone. */
354 /* The most recent free chunk is saved here, instead of in the linked
355 free list, to decrease list manipulation. It is most likely that we
356 will want this one. */
358 size_t cached_free_size
;
360 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
361 FREE_BIN_DELTA. All other chunks are in slot 0. */
362 struct alloc_chunk
*free_chunks
[NUM_FREE_BINS
+ 1];
364 /* The highest bin index which might be non-empty. It may turn out
365 to be empty, in which case we have to search downwards. */
366 size_t high_free_bin
;
368 /* Bytes currently allocated in this zone. */
371 /* Linked list of the small pages in this zone. */
372 struct small_page_entry
*pages
;
374 /* Doubly linked list of large pages in this zone. */
375 struct large_page_entry
*large_pages
;
377 /* If we are currently marking this zone, a pointer to the mark bits. */
378 mark_type
*mark_bits
;
380 /* Name of the zone. */
383 /* The number of small pages currently allocated in this zone. */
384 size_t n_small_pages
;
386 /* Bytes allocated at the end of the last collection. */
387 size_t allocated_last_gc
;
389 /* Total amount of memory mapped. */
392 /* A cache of free system pages. */
393 struct small_page_entry
*free_pages
;
395 /* Next zone in the linked list of zones. */
396 struct alloc_zone
*next_zone
;
398 /* True if this zone was collected during this collection. */
401 /* True if this zone should be destroyed after the next collection. */
406 /* Total GC-allocated memory. */
407 unsigned long long total_allocated
;
408 /* Total overhead for GC-allocated memory. */
409 unsigned long long total_overhead
;
411 /* Total allocations and overhead for sizes less than 32, 64 and 128.
412 These sizes are interesting because they are typical cache line
415 unsigned long long total_allocated_under32
;
416 unsigned long long total_overhead_under32
;
418 unsigned long long total_allocated_under64
;
419 unsigned long long total_overhead_under64
;
421 unsigned long long total_allocated_under128
;
422 unsigned long long total_overhead_under128
;
426 /* Some default zones. */
427 struct alloc_zone rtl_zone
;
428 struct alloc_zone tree_zone
;
429 struct alloc_zone tree_id_zone
;
431 /* The PCH zone does not need a normal zone structure, and it does
432 not live on the linked list of zones. */
435 /* The start of the PCH zone. NULL if there is none. */
438 /* The end of the PCH zone. NULL if there is none. */
441 /* The size of the PCH zone. 0 if there is none. */
444 /* The allocation bitmap for the PCH zone. */
445 alloc_type
*alloc_bits
;
447 /* If we are currently marking, the mark bitmap for the PCH zone.
448 When it is first read in, we could avoid marking the PCH,
449 because it will not contain any pointers to GC memory outside
450 of the PCH; however, the PCH is currently mapped as writable,
451 so we must mark it in case new pointers are added. */
452 mark_type
*mark_bits
;
456 static char *alloc_anon (char *, size_t, struct alloc_zone
*);
458 static struct small_page_entry
* alloc_small_page (struct alloc_zone
*);
459 static struct large_page_entry
* alloc_large_page (size_t, struct alloc_zone
*);
460 static void free_chunk (char *, size_t, struct alloc_zone
*);
461 static void free_small_page (struct small_page_entry
*);
462 static void free_large_page (struct large_page_entry
*);
463 static void release_pages (struct alloc_zone
*);
464 static void sweep_pages (struct alloc_zone
*);
465 static bool ggc_collect_1 (struct alloc_zone
*, bool);
466 static void new_ggc_zone_1 (struct alloc_zone
*, const char *);
468 /* Traverse the page table and find the entry for a page.
469 Die (probably) if the object wasn't allocated via GC. */
471 static inline page_entry
*
472 lookup_page_table_entry (const void *p
)
477 #if HOST_BITS_PER_PTR <= 32
480 page_table table
= G
.lookup
;
481 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
482 while (table
->high_bits
!= high_bits
)
484 base
= &table
->table
[0];
487 /* Extract the level 1 and 2 indices. */
494 /* Traverse the page table and find the entry for a page.
495 Return NULL if the object wasn't allocated via the GC. */
497 static inline page_entry
*
498 lookup_page_table_if_allocated (const void *p
)
503 #if HOST_BITS_PER_PTR <= 32
506 page_table table
= G
.lookup
;
507 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
512 if (table
->high_bits
== high_bits
)
516 base
= &table
->table
[0];
519 /* Extract the level 1 and 2 indices. */
525 if (L2
>= PAGE_L2_SIZE
)
527 /* We might have a page entry which does not correspond exactly to a
529 if (base
[L1
][L2
] && (const char *) p
< base
[L1
][L2
]->page
)
535 /* Set the page table entry for the page that starts at P. If ENTRY
536 is NULL, clear the entry. */
539 set_page_table_entry (void *p
, page_entry
*entry
)
544 #if HOST_BITS_PER_PTR <= 32
548 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
549 for (table
= G
.lookup
; table
; table
= table
->next
)
550 if (table
->high_bits
== high_bits
)
553 /* Not found -- allocate a new table. */
554 table
= XCNEW (struct page_table_chain
);
555 table
->next
= G
.lookup
;
556 table
->high_bits
= high_bits
;
559 base
= &table
->table
[0];
562 /* Extract the level 1 and 2 indices. */
566 if (base
[L1
] == NULL
)
567 base
[L1
] = XCNEWVEC (page_entry
*, PAGE_L2_SIZE
);
569 base
[L1
][L2
] = entry
;
572 /* Find the page table entry associated with OBJECT. */
574 static inline struct page_entry
*
575 zone_get_object_page (const void *object
)
577 return lookup_page_table_entry (object
);
580 /* Find which element of the alloc_bits array OBJECT should be
582 static inline unsigned int
583 zone_get_object_alloc_word (const void *object
)
585 return (((size_t) object
& (GGC_PAGE_SIZE
- 1))
586 / (8 * sizeof (alloc_type
) * BYTES_PER_ALLOC_BIT
));
589 /* Find which bit of the appropriate word in the alloc_bits array
590 OBJECT should be recorded in. */
591 static inline unsigned int
592 zone_get_object_alloc_bit (const void *object
)
594 return (((size_t) object
/ BYTES_PER_ALLOC_BIT
)
595 % (8 * sizeof (alloc_type
)));
598 /* Find which element of the mark_bits array OBJECT should be recorded
600 static inline unsigned int
601 zone_get_object_mark_word (const void *object
)
603 return (((size_t) object
& (GGC_PAGE_SIZE
- 1))
604 / (8 * sizeof (mark_type
) * BYTES_PER_MARK_BIT
));
607 /* Find which bit of the appropriate word in the mark_bits array
608 OBJECT should be recorded in. */
609 static inline unsigned int
610 zone_get_object_mark_bit (const void *object
)
612 return (((size_t) object
/ BYTES_PER_MARK_BIT
)
613 % (8 * sizeof (mark_type
)));
616 /* Set the allocation bit corresponding to OBJECT in its page's
617 bitmap. Used to split this object from the preceding one. */
619 zone_set_object_alloc_bit (const void *object
)
621 struct small_page_entry
*page
622 = (struct small_page_entry
*) zone_get_object_page (object
);
623 unsigned int start_word
= zone_get_object_alloc_word (object
);
624 unsigned int start_bit
= zone_get_object_alloc_bit (object
);
626 page
->alloc_bits
[start_word
] |= 1L << start_bit
;
629 /* Clear the allocation bit corresponding to OBJECT in PAGE's
630 bitmap. Used to coalesce this object with the preceding
633 zone_clear_object_alloc_bit (struct small_page_entry
*page
,
636 unsigned int start_word
= zone_get_object_alloc_word (object
);
637 unsigned int start_bit
= zone_get_object_alloc_bit (object
);
639 /* Would xor be quicker? */
640 page
->alloc_bits
[start_word
] &= ~(1L << start_bit
);
643 /* Find the size of the object which starts at START_WORD and
644 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
645 Helper function for ggc_get_size and zone_find_object_size. */
648 zone_object_size_1 (alloc_type
*alloc_bits
,
649 size_t start_word
, size_t start_bit
,
653 alloc_type alloc_word
;
656 /* Load the first word. */
657 alloc_word
= alloc_bits
[start_word
++];
659 /* If that was the last bit in this word, we'll want to continue
660 with the next word. Otherwise, handle the rest of this word. */
663 indx
= alloc_ffs (alloc_word
>> start_bit
);
665 /* indx is 1-based. We started at the bit after the object's
666 start, but we also ended at the bit after the object's end.
668 return indx
* BYTES_PER_ALLOC_BIT
;
670 /* The extra 1 accounts for the starting unit, before start_bit. */
671 size
= (sizeof (alloc_type
) * 8 - start_bit
+ 1) * BYTES_PER_ALLOC_BIT
;
673 if (size
>= max_size
)
676 alloc_word
= alloc_bits
[start_word
++];
679 size
= BYTES_PER_ALLOC_BIT
;
681 while (alloc_word
== 0)
683 size
+= sizeof (alloc_type
) * 8 * BYTES_PER_ALLOC_BIT
;
684 if (size
>= max_size
)
686 alloc_word
= alloc_bits
[start_word
++];
689 indx
= alloc_ffs (alloc_word
);
690 return size
+ (indx
- 1) * BYTES_PER_ALLOC_BIT
;
693 /* Find the size of OBJECT on small page PAGE. */
696 zone_find_object_size (struct small_page_entry
*page
,
699 const char *object_midptr
= (const char *) object
+ BYTES_PER_ALLOC_BIT
;
700 unsigned int start_word
= zone_get_object_alloc_word (object_midptr
);
701 unsigned int start_bit
= zone_get_object_alloc_bit (object_midptr
);
702 size_t max_size
= (page
->common
.page
+ SMALL_PAGE_SIZE
703 - (const char *) object
);
705 return zone_object_size_1 (page
->alloc_bits
, start_word
, start_bit
,
709 /* highest_bit assumes that alloc_type is 32 bits. */
710 extern char check_alloc_type_size
[(sizeof (alloc_type
) == 4) ? 1 : -1];
712 /* Find the highest set bit in VALUE. Returns the bit number of that
713 bit, using the same values as ffs. */
714 static inline alloc_type
715 highest_bit (alloc_type value
)
717 /* This also assumes that alloc_type is unsigned. */
722 value
|= value
>> 16;
723 value
= value
^ (value
>> 1);
724 return alloc_ffs (value
);
727 /* Find the offset from the start of an object to P, which may point
728 into the interior of the object. */
731 zone_find_object_offset (alloc_type
*alloc_bits
, size_t start_word
,
734 unsigned int offset_in_bits
;
735 alloc_type alloc_word
= alloc_bits
[start_word
];
737 /* Mask off any bits after the initial bit, but make sure to include
738 the initial bit in the result. Note that START_BIT is
740 if (start_bit
< 8 * sizeof (alloc_type
) - 1)
741 alloc_word
&= (1 << (start_bit
+ 1)) - 1;
742 offset_in_bits
= start_bit
;
744 /* Search for the start of the object. */
745 while (alloc_word
== 0 && start_word
> 0)
747 alloc_word
= alloc_bits
[--start_word
];
748 offset_in_bits
+= 8 * sizeof (alloc_type
);
750 /* We must always find a set bit. */
751 gcc_assert (alloc_word
!= 0);
752 /* Note that the result of highest_bit is 1-based. */
753 offset_in_bits
-= highest_bit (alloc_word
) - 1;
755 return BYTES_PER_ALLOC_BIT
* offset_in_bits
;
758 /* Allocate the mark bits for every zone, and set the pointers on each
761 zone_allocate_marks (void)
763 struct alloc_zone
*zone
;
765 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
767 struct small_page_entry
*page
;
768 mark_type
*cur_marks
;
769 size_t mark_words
, mark_words_per_page
;
770 #ifdef ENABLE_CHECKING
775 = (GGC_PAGE_SIZE
+ BYTES_PER_MARK_WORD
- 1) / BYTES_PER_MARK_WORD
;
776 mark_words
= zone
->n_small_pages
* mark_words_per_page
;
777 zone
->mark_bits
= (mark_type
*) xcalloc (sizeof (mark_type
),
779 cur_marks
= zone
->mark_bits
;
780 for (page
= zone
->pages
; page
; page
= page
->next
)
782 page
->mark_bits
= cur_marks
;
783 cur_marks
+= mark_words_per_page
;
784 #ifdef ENABLE_CHECKING
788 gcc_checking_assert (n
== zone
->n_small_pages
);
791 /* We don't collect the PCH zone, but we do have to mark it
795 = (mark_type
*) xcalloc (sizeof (mark_type
),
796 CEIL (pch_zone
.bytes
, BYTES_PER_MARK_WORD
));
799 /* After marking and sweeping, release the memory used for mark bits. */
801 zone_free_marks (void)
803 struct alloc_zone
*zone
;
805 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
808 free (zone
->mark_bits
);
809 zone
->mark_bits
= NULL
;
814 free (pch_zone
.mark_bits
);
815 pch_zone
.mark_bits
= NULL
;
820 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
821 (if non-null). The ifdef structure here is intended to cause a
822 compile error unless exactly one of the HAVE_* is defined. */
825 alloc_anon (char *pref ATTRIBUTE_UNUSED
, size_t size
, struct alloc_zone
*zone
)
827 #ifdef HAVE_MMAP_ANON
828 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
829 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
831 #ifdef HAVE_MMAP_DEV_ZERO
832 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
833 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
836 if (page
== (char *) MAP_FAILED
)
838 perror ("virtual memory exhausted");
839 exit (FATAL_EXIT_CODE
);
842 /* Remember that we allocated this memory. */
843 zone
->bytes_mapped
+= size
;
845 /* Pretend we don't have access to the allocated pages. We'll enable
846 access to smaller pieces of the area in ggc_internal_alloc. Discard the
847 handle to avoid handle leak. */
848 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page
, size
));
854 /* Allocate a new page for allocating small objects in ZONE, and
855 return an entry for it. */
857 static struct small_page_entry
*
858 alloc_small_page (struct alloc_zone
*zone
)
860 struct small_page_entry
*entry
;
862 /* Check the list of free pages for one we can use. */
863 entry
= zone
->free_pages
;
866 /* Recycle the allocated memory from this page ... */
867 zone
->free_pages
= entry
->next
;
871 /* We want just one page. Allocate a bunch of them and put the
872 extras on the freelist. (Can only do this optimization with
873 mmap for backing store.) */
874 struct small_page_entry
*e
, *f
= zone
->free_pages
;
878 page
= alloc_anon (NULL
, GGC_PAGE_SIZE
* G
.quire_size
, zone
);
880 /* This loop counts down so that the chain will be in ascending
882 for (i
= G
.quire_size
- 1; i
>= 1; i
--)
884 e
= XCNEWVAR (struct small_page_entry
, G
.small_page_overhead
);
885 e
->common
.page
= page
+ (i
<< GGC_PAGE_SHIFT
);
886 e
->common
.zone
= zone
;
889 set_page_table_entry (e
->common
.page
, &e
->common
);
892 zone
->free_pages
= f
;
894 entry
= XCNEWVAR (struct small_page_entry
, G
.small_page_overhead
);
895 entry
->common
.page
= page
;
896 entry
->common
.zone
= zone
;
897 set_page_table_entry (page
, &entry
->common
);
900 zone
->n_small_pages
++;
902 if (GGC_DEBUG_LEVEL
>= 2)
903 fprintf (G
.debug_file
,
904 "Allocating %s page at %p, data %p-%p\n",
905 entry
->common
.zone
->name
, (PTR
) entry
, entry
->common
.page
,
906 entry
->common
.page
+ SMALL_PAGE_SIZE
- 1);
911 /* Allocate a large page of size SIZE in ZONE. */
913 static struct large_page_entry
*
914 alloc_large_page (size_t size
, struct alloc_zone
*zone
)
916 struct large_page_entry
*entry
;
920 needed_size
= size
+ sizeof (struct large_page_entry
);
921 page
= XNEWVAR (char, needed_size
);
923 entry
= (struct large_page_entry
*) page
;
926 entry
->common
.page
= page
+ sizeof (struct large_page_entry
);
927 entry
->common
.large_p
= true;
928 entry
->common
.pch_p
= false;
929 entry
->common
.zone
= zone
;
930 entry
->common
.survived
= 0;
931 entry
->mark_p
= false;
935 set_page_table_entry (entry
->common
.page
, &entry
->common
);
937 if (GGC_DEBUG_LEVEL
>= 2)
938 fprintf (G
.debug_file
,
939 "Allocating %s large page at %p, data %p-%p\n",
940 entry
->common
.zone
->name
, (PTR
) entry
, entry
->common
.page
,
941 entry
->common
.page
+ SMALL_PAGE_SIZE
- 1);
947 /* For a page that is no longer needed, put it on the free page list. */
950 free_small_page (struct small_page_entry
*entry
)
952 if (GGC_DEBUG_LEVEL
>= 2)
953 fprintf (G
.debug_file
,
954 "Deallocating %s page at %p, data %p-%p\n",
955 entry
->common
.zone
->name
, (PTR
) entry
,
956 entry
->common
.page
, entry
->common
.page
+ SMALL_PAGE_SIZE
- 1);
958 gcc_assert (!entry
->common
.large_p
);
960 /* Mark the page as inaccessible. Discard the handle to
961 avoid handle leak. */
962 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry
->common
.page
,
965 entry
->next
= entry
->common
.zone
->free_pages
;
966 entry
->common
.zone
->free_pages
= entry
;
967 entry
->common
.zone
->n_small_pages
--;
970 /* Release a large page that is no longer needed. */
973 free_large_page (struct large_page_entry
*entry
)
975 if (GGC_DEBUG_LEVEL
>= 2)
976 fprintf (G
.debug_file
,
977 "Deallocating %s page at %p, data %p-%p\n",
978 entry
->common
.zone
->name
, (PTR
) entry
,
979 entry
->common
.page
, entry
->common
.page
+ SMALL_PAGE_SIZE
- 1);
981 gcc_assert (entry
->common
.large_p
);
983 set_page_table_entry (entry
->common
.page
, NULL
);
987 /* Release the free page cache to the system. */
990 release_pages (struct alloc_zone
*zone
)
993 struct small_page_entry
*p
, *next
;
997 /* Gather up adjacent pages so they are unmapped together. */
998 p
= zone
->free_pages
;
1002 start
= p
->common
.page
;
1004 len
= SMALL_PAGE_SIZE
;
1005 set_page_table_entry (p
->common
.page
, NULL
);
1008 while (p
&& p
->common
.page
== start
+ len
)
1011 len
+= SMALL_PAGE_SIZE
;
1012 set_page_table_entry (p
->common
.page
, NULL
);
1016 munmap (start
, len
);
1017 zone
->bytes_mapped
-= len
;
1020 zone
->free_pages
= NULL
;
1024 /* Place the block at PTR of size SIZE on the free list for ZONE. */
1027 free_chunk (char *ptr
, size_t size
, struct alloc_zone
*zone
)
1029 struct alloc_chunk
*chunk
= (struct alloc_chunk
*) ptr
;
1032 bin
= SIZE_BIN_DOWN (size
);
1033 gcc_assert (bin
!= 0);
1034 if (bin
> NUM_FREE_BINS
)
1037 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk
,
1041 chunk
->next_free
= zone
->free_chunks
[bin
];
1042 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1051 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk
,
1054 chunk
->next_free
= zone
->free_chunks
[bin
];
1055 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1063 zone
->free_chunks
[bin
] = chunk
;
1064 if (bin
> zone
->high_free_bin
)
1065 zone
->high_free_bin
= bin
;
1066 if (GGC_DEBUG_LEVEL
>= 3)
1067 fprintf (G
.debug_file
, "Deallocating object, chunk=%p\n", (void *)chunk
);
1070 /* For a given size of memory requested for allocation, return the
1071 actual size that is going to be allocated. */
1074 ggc_round_alloc_size (size_t requested_size
)
1078 /* Make sure that zero-sized allocations get a unique and freeable
1080 if (requested_size
== 0)
1081 size
= MAX_ALIGNMENT
;
1083 size
= (requested_size
+ MAX_ALIGNMENT
- 1) & -MAX_ALIGNMENT
;
1088 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
1091 ggc_internal_alloc_zone_stat (size_t orig_size
, struct alloc_zone
*zone
1096 struct small_page_entry
*entry
;
1097 struct alloc_chunk
*chunk
, **pp
;
1099 size_t size
= ggc_round_alloc_size (orig_size
);
1101 /* Try to allocate the object from several different sources. Each
1102 of these cases is responsible for setting RESULT and SIZE to
1103 describe the allocated block, before jumping to FOUND. If a
1104 chunk is split, the allocate bit for the new chunk should also be
1107 Large objects are handled specially. However, they'll just fail
1108 the next couple of conditions, so we can wait to check for them
1109 below. The large object case is relatively rare (< 1%), so this
1112 /* First try to split the last chunk we allocated. For best
1113 fragmentation behavior it would be better to look for a
1114 free bin of the appropriate size for a small object. However,
1115 we're unlikely (1% - 7%) to find one, and this gives better
1116 locality behavior anyway. This case handles the lion's share
1117 of all calls to this function. */
1118 if (size
<= zone
->cached_free_size
)
1120 result
= zone
->cached_free
;
1122 zone
->cached_free_size
-= size
;
1123 if (zone
->cached_free_size
)
1125 zone
->cached_free
+= size
;
1126 zone_set_object_alloc_bit (zone
->cached_free
);
1132 /* Next, try to find a free bin of the exactly correct size. */
1134 /* We want to round SIZE up, rather than down, but we know it's
1135 already aligned to at least FREE_BIN_DELTA, so we can just
1137 bin
= SIZE_BIN_DOWN (size
);
1139 if (bin
<= NUM_FREE_BINS
1140 && (chunk
= zone
->free_chunks
[bin
]) != NULL
)
1142 /* We have a chunk of the right size. Pull it off the free list
1145 zone
->free_chunks
[bin
] = chunk
->next_free
;
1147 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1148 == FREE_BIN_DELTA. */
1151 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1152 may now be wrong, if this was the last chunk in the high bin.
1153 Rather than fixing it up now, wait until we need to search
1159 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1160 to split. We can find one in the too-big bin, or in the largest
1161 sized bin with a chunk in it. Try the largest normal-sized bin
1164 if (zone
->high_free_bin
> bin
)
1166 /* Find the highest numbered free bin. It will be at or below
1168 while (zone
->high_free_bin
> bin
1169 && zone
->free_chunks
[zone
->high_free_bin
] == NULL
)
1170 zone
->high_free_bin
--;
1172 if (zone
->high_free_bin
> bin
)
1174 size_t tbin
= zone
->high_free_bin
;
1175 chunk
= zone
->free_chunks
[tbin
];
1177 /* Remove the chunk from its previous bin. */
1178 zone
->free_chunks
[tbin
] = chunk
->next_free
;
1180 result
= (char *) chunk
;
1182 /* Save the rest of the chunk for future allocation. */
1183 if (zone
->cached_free_size
)
1184 free_chunk (zone
->cached_free
, zone
->cached_free_size
, zone
);
1186 chunk
= (struct alloc_chunk
*) ((char *) result
+ size
);
1187 zone
->cached_free
= (char *) chunk
;
1188 zone
->cached_free_size
= (tbin
- bin
) * FREE_BIN_DELTA
;
1190 /* Mark the new free chunk as an object, so that we can
1191 find the size of the newly allocated object. */
1192 zone_set_object_alloc_bit (chunk
);
1194 /* HIGH_FREE_BIN may now be wrong, if this was the last
1195 chunk in the high bin. Rather than fixing it up now,
1196 wait until we need to search the free bins. */
1202 /* Failing that, look through the "other" bucket for a chunk
1203 that is large enough. */
1204 pp
= &(zone
->free_chunks
[0]);
1206 while (chunk
&& chunk
->size
< size
)
1208 pp
= &chunk
->next_free
;
1214 /* Remove the chunk from its previous bin. */
1215 *pp
= chunk
->next_free
;
1217 result
= (char *) chunk
;
1219 /* Save the rest of the chunk for future allocation, if there's any
1221 csize
= chunk
->size
;
1224 if (zone
->cached_free_size
)
1225 free_chunk (zone
->cached_free
, zone
->cached_free_size
, zone
);
1227 chunk
= (struct alloc_chunk
*) ((char *) result
+ size
);
1228 zone
->cached_free
= (char *) chunk
;
1229 zone
->cached_free_size
= csize
- size
;
1231 /* Mark the new free chunk as an object. */
1232 zone_set_object_alloc_bit (chunk
);
1238 /* Handle large allocations. We could choose any threshold between
1239 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1240 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1241 be guaranteed to have a unique entry in the lookup table. Large
1242 allocations will always fall through to here. */
1243 if (size
> GGC_PAGE_SIZE
)
1245 struct large_page_entry
*entry
= alloc_large_page (size
, zone
);
1247 entry
->common
.survived
= 0;
1249 entry
->next
= zone
->large_pages
;
1250 if (zone
->large_pages
)
1251 zone
->large_pages
->prev
= entry
;
1252 zone
->large_pages
= entry
;
1254 result
= entry
->common
.page
;
1259 /* Failing everything above, allocate a new small page. */
1261 entry
= alloc_small_page (zone
);
1262 entry
->next
= zone
->pages
;
1263 zone
->pages
= entry
;
1265 /* Mark the first chunk in the new page. */
1266 entry
->alloc_bits
[0] = 1;
1268 result
= entry
->common
.page
;
1269 if (size
< SMALL_PAGE_SIZE
)
1271 if (zone
->cached_free_size
)
1272 free_chunk (zone
->cached_free
, zone
->cached_free_size
, zone
);
1274 zone
->cached_free
= (char *) result
+ size
;
1275 zone
->cached_free_size
= SMALL_PAGE_SIZE
- size
;
1277 /* Mark the new free chunk as an object. */
1278 zone_set_object_alloc_bit (zone
->cached_free
);
1283 /* We could save TYPE in the chunk, but we don't use that for
1284 anything yet. If we wanted to, we could do it by adding it
1285 either before the beginning of the chunk or after its end,
1286 and adjusting the size and pointer appropriately. */
1288 /* We'll probably write to this after we return. */
1291 #ifdef ENABLE_GC_CHECKING
1292 /* `Poison' the entire allocated object. */
1293 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, size
));
1294 memset (result
, 0xaf, size
);
1295 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result
+ orig_size
,
1299 /* Tell Valgrind that the memory is there, but its content isn't
1300 defined. The bytes at the end of the object are still marked
1302 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, orig_size
));
1304 /* Keep track of how many bytes are being allocated. This
1305 information is used in deciding when to collect. */
1306 zone
->allocated
+= size
;
1308 timevar_ggc_mem_total
+= size
;
1310 if (GATHER_STATISTICS
)
1311 ggc_record_overhead (orig_size
, size
- orig_size
, result FINAL_PASS_MEM_STAT
);
1314 size_t object_size
= size
;
1315 size_t overhead
= object_size
- orig_size
;
1317 zone
->stats
.total_overhead
+= overhead
;
1318 zone
->stats
.total_allocated
+= object_size
;
1320 if (orig_size
<= 32)
1322 zone
->stats
.total_overhead_under32
+= overhead
;
1323 zone
->stats
.total_allocated_under32
+= object_size
;
1325 if (orig_size
<= 64)
1327 zone
->stats
.total_overhead_under64
+= overhead
;
1328 zone
->stats
.total_allocated_under64
+= object_size
;
1330 if (orig_size
<= 128)
1332 zone
->stats
.total_overhead_under128
+= overhead
;
1333 zone
->stats
.total_allocated_under128
+= object_size
;
1338 if (GGC_DEBUG_LEVEL
>= 3)
1339 fprintf (G
.debug_file
, "Allocating object, size=%lu at %p\n",
1340 (unsigned long) size
, result
);
1345 #define ggc_internal_alloc_zone_pass_stat(s,z) \
1346 ggc_internal_alloc_zone_stat (s,z PASS_MEM_STAT)
1349 ggc_internal_cleared_alloc_zone_stat (size_t orig_size
,
1350 struct alloc_zone
*zone MEM_STAT_DECL
)
1352 void * result
= ggc_internal_alloc_zone_pass_stat (orig_size
, zone
);
1353 memset (result
, 0, orig_size
);
1358 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1362 ggc_alloc_typed_stat (enum gt_types_enum gte
, size_t size
1367 case gt_ggc_e_14lang_tree_node
:
1368 return ggc_internal_alloc_zone_pass_stat (size
, &tree_zone
);
1370 case gt_ggc_e_7rtx_def
:
1371 return ggc_internal_alloc_zone_pass_stat (size
, &rtl_zone
);
1373 case gt_ggc_e_9rtvec_def
:
1374 return ggc_internal_alloc_zone_pass_stat (size
, &rtl_zone
);
1377 return ggc_internal_alloc_zone_pass_stat (size
, &main_zone
);
1381 /* Normal GC allocation simply allocates into the main zone. */
1384 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL
)
1386 return ggc_internal_alloc_zone_pass_stat (size
, &main_zone
);
1389 /* Poison the chunk. */
1390 #ifdef ENABLE_GC_CHECKING
1391 #define poison_region(PTR, SIZE) \
1393 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED ((PTR), (SIZE))); \
1394 memset ((PTR), 0xa5, (SIZE)); \
1395 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((PTR), (SIZE))); \
1398 #define poison_region(PTR, SIZE)
1401 /* Free the object at P. */
1406 struct page_entry
*page
;
1408 if (GATHER_STATISTICS
)
1409 ggc_free_overhead (p
);
1411 poison_region (p
, ggc_get_size (p
));
1413 page
= zone_get_object_page (p
);
1417 struct large_page_entry
*large_page
1418 = (struct large_page_entry
*) page
;
1420 /* Remove the page from the linked list. */
1421 if (large_page
->prev
)
1422 large_page
->prev
->next
= large_page
->next
;
1425 gcc_assert (large_page
->common
.zone
->large_pages
== large_page
);
1426 large_page
->common
.zone
->large_pages
= large_page
->next
;
1428 if (large_page
->next
)
1429 large_page
->next
->prev
= large_page
->prev
;
1431 large_page
->common
.zone
->allocated
-= large_page
->bytes
;
1433 /* Release the memory associated with this object. */
1434 free_large_page (large_page
);
1436 else if (page
->pch_p
)
1437 /* Don't do anything. We won't allocate a new object from the
1438 PCH zone so there's no point in releasing anything. */
1442 size_t size
= ggc_get_size (p
);
1444 page
->zone
->allocated
-= size
;
1446 /* Add the chunk to the free list. We don't bother with coalescing,
1447 since we are likely to want a chunk of this size again. */
1448 free_chunk ((char *)p
, size
, page
->zone
);
1452 /* Mark function for strings. */
1455 gt_ggc_m_S (const void *p
)
1458 unsigned long offset
;
1463 /* Look up the page on which the object is alloced. . */
1464 entry
= lookup_page_table_if_allocated (p
);
1470 size_t alloc_word
, alloc_bit
, t
;
1471 t
= ((const char *) p
- pch_zone
.page
) / BYTES_PER_ALLOC_BIT
;
1472 alloc_word
= t
/ (8 * sizeof (alloc_type
));
1473 alloc_bit
= t
% (8 * sizeof (alloc_type
));
1474 offset
= zone_find_object_offset (pch_zone
.alloc_bits
, alloc_word
,
1477 else if (entry
->large_p
)
1479 struct large_page_entry
*le
= (struct large_page_entry
*) entry
;
1480 offset
= ((const char *) p
) - entry
->page
;
1481 gcc_assert (offset
< le
->bytes
);
1485 struct small_page_entry
*se
= (struct small_page_entry
*) entry
;
1486 unsigned int start_word
= zone_get_object_alloc_word (p
);
1487 unsigned int start_bit
= zone_get_object_alloc_bit (p
);
1488 offset
= zone_find_object_offset (se
->alloc_bits
, start_word
, start_bit
);
1490 /* On some platforms a char* will not necessarily line up on an
1491 allocation boundary, so we have to update the offset to
1492 account for the leftover bytes. */
1493 offset
+= (size_t) p
% BYTES_PER_ALLOC_BIT
;
1498 /* Here we've seen a char* which does not point to the beginning
1499 of an allocated object. We assume it points to the middle of
1501 gcc_assert (offset
== offsetof (struct tree_string
, str
));
1502 p
= ((const char *) p
) - offset
;
1503 gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p
));
1507 /* Inefficient, but also unlikely to matter. */
1511 /* If P is not marked, mark it and return false. Otherwise return true.
1512 P must have been allocated by the GC allocator; it mustn't point to
1513 static objects, stack variables, or memory allocated with malloc. */
1516 ggc_set_mark (const void *p
)
1518 struct page_entry
*page
;
1519 const char *ptr
= (const char *) p
;
1521 page
= zone_get_object_page (p
);
1525 size_t mark_word
, mark_bit
, offset
;
1526 offset
= (ptr
- pch_zone
.page
) / BYTES_PER_MARK_BIT
;
1527 mark_word
= offset
/ (8 * sizeof (mark_type
));
1528 mark_bit
= offset
% (8 * sizeof (mark_type
));
1530 if (pch_zone
.mark_bits
[mark_word
] & (1 << mark_bit
))
1532 pch_zone
.mark_bits
[mark_word
] |= (1 << mark_bit
);
1534 else if (page
->large_p
)
1536 struct large_page_entry
*large_page
1537 = (struct large_page_entry
*) page
;
1539 if (large_page
->mark_p
)
1541 large_page
->mark_p
= true;
1545 struct small_page_entry
*small_page
1546 = (struct small_page_entry
*) page
;
1548 if (small_page
->mark_bits
[zone_get_object_mark_word (p
)]
1549 & (1 << zone_get_object_mark_bit (p
)))
1551 small_page
->mark_bits
[zone_get_object_mark_word (p
)]
1552 |= (1 << zone_get_object_mark_bit (p
));
1555 if (GGC_DEBUG_LEVEL
>= 4)
1556 fprintf (G
.debug_file
, "Marking %p\n", p
);
1561 /* Return 1 if P has been marked, zero otherwise.
1562 P must have been allocated by the GC allocator; it mustn't point to
1563 static objects, stack variables, or memory allocated with malloc. */
1566 ggc_marked_p (const void *p
)
1568 struct page_entry
*page
;
1569 const char *ptr
= (const char *) p
;
1571 page
= zone_get_object_page (p
);
1575 size_t mark_word
, mark_bit
, offset
;
1576 offset
= (ptr
- pch_zone
.page
) / BYTES_PER_MARK_BIT
;
1577 mark_word
= offset
/ (8 * sizeof (mark_type
));
1578 mark_bit
= offset
% (8 * sizeof (mark_type
));
1580 return (pch_zone
.mark_bits
[mark_word
] & (1 << mark_bit
)) != 0;
1585 struct large_page_entry
*large_page
1586 = (struct large_page_entry
*) page
;
1588 return large_page
->mark_p
;
1592 struct small_page_entry
*small_page
1593 = (struct small_page_entry
*) page
;
1595 return 0 != (small_page
->mark_bits
[zone_get_object_mark_word (p
)]
1596 & (1 << zone_get_object_mark_bit (p
)));
1600 /* Return the size of the gc-able object P. */
1603 ggc_get_size (const void *p
)
1605 struct page_entry
*page
;
1606 const char *ptr
= (const char *) p
;
1608 page
= zone_get_object_page (p
);
1612 size_t alloc_word
, alloc_bit
, offset
, max_size
;
1613 offset
= (ptr
- pch_zone
.page
) / BYTES_PER_ALLOC_BIT
+ 1;
1614 alloc_word
= offset
/ (8 * sizeof (alloc_type
));
1615 alloc_bit
= offset
% (8 * sizeof (alloc_type
));
1616 max_size
= pch_zone
.bytes
- (ptr
- pch_zone
.page
);
1617 return zone_object_size_1 (pch_zone
.alloc_bits
, alloc_word
, alloc_bit
,
1622 return ((struct large_page_entry
*)page
)->bytes
;
1624 return zone_find_object_size ((struct small_page_entry
*) page
, p
);
1627 /* Initialize the ggc-zone-mmap allocator. */
1631 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1632 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1633 the current assumptions to hold. */
1635 gcc_assert (FREE_BIN_DELTA
== MAX_ALIGNMENT
);
1637 /* Set up the main zone by hand. */
1638 main_zone
.name
= "Main zone";
1639 G
.zones
= &main_zone
;
1641 /* Allocate the default zones. */
1642 new_ggc_zone_1 (&rtl_zone
, "RTL zone");
1643 new_ggc_zone_1 (&tree_zone
, "Tree zone");
1644 new_ggc_zone_1 (&tree_id_zone
, "Tree identifier zone");
1646 G
.pagesize
= getpagesize();
1647 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
1648 G
.page_mask
= ~(G
.pagesize
- 1);
1650 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1651 gcc_assert ((G
.pagesize
& (GGC_PAGE_SIZE
- 1)) == 0);
1653 /* Allocate 16 system pages at a time. */
1654 G
.quire_size
= 16 * G
.pagesize
/ GGC_PAGE_SIZE
;
1656 /* Calculate the size of the allocation bitmap and other overhead. */
1657 /* Right now we allocate bits for the page header and bitmap. These
1658 are wasted, but a little tricky to eliminate. */
1659 G
.small_page_overhead
1660 = PAGE_OVERHEAD
+ (GGC_PAGE_SIZE
/ BYTES_PER_ALLOC_BIT
/ 8);
1661 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1663 #ifdef HAVE_MMAP_DEV_ZERO
1664 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
1665 gcc_assert (G
.dev_zero_fd
!= -1);
1669 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
1670 setlinebuf (G
.debug_file
);
1672 G
.debug_file
= stdout
;
1676 /* StunOS has an amazing off-by-one error for the first mmap allocation
1677 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1678 believe, is an unaligned page allocation, which would cause us to
1679 hork badly if we tried to use it. */
1681 char *p
= alloc_anon (NULL
, G
.pagesize
, &main_zone
);
1682 struct small_page_entry
*e
;
1683 if ((size_t)p
& (G
.pagesize
- 1))
1685 /* How losing. Discard this one and try another. If we still
1686 can't get something useful, give up. */
1688 p
= alloc_anon (NULL
, G
.pagesize
, &main_zone
);
1689 gcc_assert (!((size_t)p
& (G
.pagesize
- 1)));
1692 if (GGC_PAGE_SIZE
== G
.pagesize
)
1694 /* We have a good page, might as well hold onto it... */
1695 e
= XCNEWVAR (struct small_page_entry
, G
.small_page_overhead
);
1697 e
->common
.zone
= &main_zone
;
1698 e
->next
= main_zone
.free_pages
;
1699 set_page_table_entry (e
->common
.page
, &e
->common
);
1700 main_zone
.free_pages
= e
;
1704 munmap (p
, G
.pagesize
);
1710 /* Start a new GGC zone. */
1713 new_ggc_zone_1 (struct alloc_zone
*new_zone
, const char * name
)
1715 new_zone
->name
= name
;
1716 new_zone
->next_zone
= G
.zones
->next_zone
;
1717 G
.zones
->next_zone
= new_zone
;
1720 /* Free all empty pages and objects within a page for a given zone */
1723 sweep_pages (struct alloc_zone
*zone
)
1725 struct large_page_entry
**lpp
, *lp
, *lnext
;
1726 struct small_page_entry
**spp
, *sp
, *snext
;
1728 size_t allocated
= 0;
1731 /* First, reset the free_chunks lists, since we are going to
1732 re-free free chunks in hopes of coalescing them into large chunks. */
1733 memset (zone
->free_chunks
, 0, sizeof (zone
->free_chunks
));
1734 zone
->high_free_bin
= 0;
1735 zone
->cached_free
= NULL
;
1736 zone
->cached_free_size
= 0;
1738 /* Large pages are all or none affairs. Either they are completely
1739 empty, or they are completely full. */
1740 lpp
= &zone
->large_pages
;
1741 for (lp
= zone
->large_pages
; lp
!= NULL
; lp
= lnext
)
1743 gcc_assert (lp
->common
.large_p
);
1747 /* This page has now survived another collection. */
1748 lp
->common
.survived
++;
1753 allocated
+= lp
->bytes
;
1759 #ifdef ENABLE_GC_CHECKING
1760 /* Poison the page. */
1761 memset (lp
->common
.page
, 0xb5, SMALL_PAGE_SIZE
);
1764 lp
->prev
->next
= lp
->next
;
1766 lp
->next
->prev
= lp
->prev
;
1767 free_large_page (lp
);
1772 for (sp
= zone
->pages
; sp
!= NULL
; sp
= snext
)
1774 char *object
, *last_object
;
1776 alloc_type
*alloc_word_p
;
1777 mark_type
*mark_word_p
;
1779 gcc_assert (!sp
->common
.large_p
);
1783 /* This page has now survived another collection. */
1784 sp
->common
.survived
++;
1786 /* Step through all chunks, consolidate those that are free and
1787 insert them into the free lists. Note that consolidation
1788 slows down collection slightly. */
1790 last_object
= object
= sp
->common
.page
;
1791 end
= sp
->common
.page
+ SMALL_PAGE_SIZE
;
1793 nomarksinpage
= true;
1794 mark_word_p
= sp
->mark_bits
;
1795 alloc_word_p
= sp
->alloc_bits
;
1797 gcc_assert (BYTES_PER_ALLOC_BIT
== BYTES_PER_MARK_BIT
);
1799 object
= sp
->common
.page
;
1803 alloc_type alloc_word
;
1804 mark_type mark_word
;
1806 alloc_word
= *alloc_word_p
++;
1807 mark_word
= *mark_word_p
++;
1810 nomarksinpage
= false;
1812 /* There ought to be some way to do this without looping... */
1814 while ((n
= alloc_ffs (alloc_word
)) != 0)
1816 /* Extend the current state for n - 1 bits. We can't
1817 shift alloc_word by n, even though it isn't used in the
1818 loop, in case only the highest bit was set. */
1819 alloc_word
>>= n
- 1;
1820 mark_word
>>= n
- 1;
1821 object
+= BYTES_PER_MARK_BIT
* (n
- 1);
1827 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free
,
1830 poison_region (last_free
, object
- last_free
);
1831 free_chunk (last_free
, object
- last_free
, zone
);
1835 allocated
+= object
- last_object
;
1836 last_object
= object
;
1840 if (last_free
== NULL
)
1843 allocated
+= object
- last_object
;
1846 zone_clear_object_alloc_bit (sp
, object
);
1849 /* Shift to just after the alloc bit we handled. */
1852 object
+= BYTES_PER_MARK_BIT
;
1857 object
+= BYTES_PER_MARK_BIT
* (8 * sizeof (alloc_type
) - i
);
1859 while (object
< end
);
1864 #ifdef ENABLE_GC_CHECKING
1865 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp
->common
.page
,
1867 /* Poison the page. */
1868 memset (sp
->common
.page
, 0xb5, SMALL_PAGE_SIZE
);
1870 free_small_page (sp
);
1875 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free
,
1876 object
- last_free
));
1877 poison_region (last_free
, object
- last_free
);
1878 free_chunk (last_free
, object
- last_free
, zone
);
1881 allocated
+= object
- last_object
;
1886 zone
->allocated
= allocated
;
1889 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1890 is true if we need to mark before sweeping, false if some other
1891 zone collection has already performed marking for us. Returns true
1892 if we collected, false otherwise. */
1895 ggc_collect_1 (struct alloc_zone
*zone
, bool need_marking
)
1901 for (i
= 0; i
< NUM_FREE_BINS
+ 1; i
++)
1903 struct alloc_chunk
*chunk
;
1908 chunk
= zone
->free_chunks
[i
];
1913 chunk
= chunk
->next_free
;
1915 fprintf (stderr
, "Bin %d: %d free chunks (%d bytes)\n",
1923 fprintf (stderr
, " {%s GC %luk -> ",
1924 zone
->name
, (unsigned long) zone
->allocated
/ 1024);
1926 /* Zero the total allocated bytes. This will be recalculated in the
1928 zone
->allocated
= 0;
1930 /* Release the pages we freed the last time we collected, but didn't
1931 reuse in the interim. */
1932 release_pages (zone
);
1936 zone_allocate_marks ();
1938 if (GATHER_STATISTICS
)
1939 ggc_prune_overhead_list ();
1943 zone
->was_collected
= true;
1944 zone
->allocated_last_gc
= zone
->allocated
;
1947 fprintf (stderr
, "%luk}", (unsigned long) zone
->allocated
/ 1024);
1951 /* Calculate the average page survival rate in terms of number of
1955 calculate_average_page_survival (struct alloc_zone
*zone
)
1958 float survival
= 0.0;
1959 struct small_page_entry
*p
;
1960 struct large_page_entry
*lp
;
1961 for (p
= zone
->pages
; p
; p
= p
->next
)
1964 survival
+= p
->common
.survived
;
1966 for (lp
= zone
->large_pages
; lp
; lp
= lp
->next
)
1969 survival
+= lp
->common
.survived
;
1971 return survival
/count
;
1974 /* Top level collection routine. */
1979 struct alloc_zone
*zone
;
1980 bool marked
= false;
1982 timevar_push (TV_GC
);
1984 if (!ggc_force_collect
)
1986 float allocated_last_gc
= 0, allocated
= 0, min_expand
;
1988 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1990 allocated_last_gc
+= zone
->allocated_last_gc
;
1991 allocated
+= zone
->allocated
;
1995 MAX (allocated_last_gc
,
1996 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE
) * 1024);
1997 min_expand
= allocated_last_gc
* PARAM_VALUE (GGC_MIN_EXPAND
) / 100;
1999 if (allocated
< allocated_last_gc
+ min_expand
)
2001 timevar_pop (TV_GC
);
2006 invoke_plugin_callbacks (PLUGIN_GGC_START
, NULL
);
2008 /* Start by possibly collecting the main zone. */
2009 main_zone
.was_collected
= false;
2010 marked
|= ggc_collect_1 (&main_zone
, true);
2012 /* In order to keep the number of collections down, we don't
2013 collect other zones unless we are collecting the main zone. This
2014 gives us roughly the same number of collections as we used to
2015 have with the old gc. The number of collection is important
2016 because our main slowdown (according to profiling) is now in
2017 marking. So if we mark twice as often as we used to, we'll be
2018 twice as slow. Hopefully we'll avoid this cost when we mark
2020 /* NOTE drow/2004-07-28: We now always collect the main zone, but
2021 keep this code in case the heuristics are further refined. */
2023 if (main_zone
.was_collected
)
2025 struct alloc_zone
*zone
;
2027 for (zone
= main_zone
.next_zone
; zone
; zone
= zone
->next_zone
)
2029 zone
->was_collected
= false;
2030 marked
|= ggc_collect_1 (zone
, !marked
);
2034 /* Print page survival stats, if someone wants them. */
2035 if (GATHER_STATISTICS
&& GGC_DEBUG_LEVEL
>= 2)
2037 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2039 if (zone
->was_collected
)
2041 float f
= calculate_average_page_survival (zone
);
2042 printf ("Average page survival in zone `%s' is %f\n",
2051 /* Free dead zones. */
2052 for (zone
= G
.zones
; zone
&& zone
->next_zone
; zone
= zone
->next_zone
)
2054 if (zone
->next_zone
->dead
)
2056 struct alloc_zone
*dead_zone
= zone
->next_zone
;
2058 printf ("Zone `%s' is dead and will be freed.\n", dead_zone
->name
);
2060 /* The zone must be empty. */
2061 gcc_assert (!dead_zone
->allocated
);
2063 /* Unchain the dead zone, release all its pages and free it. */
2064 zone
->next_zone
= zone
->next_zone
->next_zone
;
2065 release_pages (dead_zone
);
2070 invoke_plugin_callbacks (PLUGIN_GGC_END
, NULL
);
2072 timevar_pop (TV_GC
);
2075 /* Print allocation statistics. */
2076 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2078 : ((x) < 1024*1024*10 \
2080 : (x) / (1024*1024))))
2081 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2084 ggc_print_statistics (void)
2086 struct alloc_zone
*zone
;
2087 struct ggc_statistics stats
;
2088 size_t total_overhead
= 0, total_allocated
= 0, total_bytes_mapped
= 0;
2089 size_t pte_overhead
, i
;
2091 /* Clear the statistics. */
2092 memset (&stats
, 0, sizeof (stats
));
2094 /* Make sure collection will really occur. */
2095 ggc_force_collect
= true;
2097 /* Collect and print the statistics common across collectors. */
2098 ggc_print_common_statistics (stderr
, &stats
);
2100 ggc_force_collect
= false;
2102 /* Release free pages so that we will not count the bytes allocated
2103 there as part of the total allocated memory. */
2104 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2105 release_pages (zone
);
2107 /* Collect some information about the various sizes of
2110 "Memory still allocated at the end of the compilation process\n");
2112 fprintf (stderr
, "%20s %10s %10s %10s\n",
2113 "Zone", "Allocated", "Used", "Overhead");
2114 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2116 struct large_page_entry
*large_page
;
2117 size_t overhead
, allocated
, in_use
;
2119 /* Skip empty zones. */
2120 if (!zone
->pages
&& !zone
->large_pages
)
2123 allocated
= in_use
= 0;
2125 overhead
= sizeof (struct alloc_zone
);
2127 for (large_page
= zone
->large_pages
; large_page
!= NULL
;
2128 large_page
= large_page
->next
)
2130 allocated
+= large_page
->bytes
;
2131 in_use
+= large_page
->bytes
;
2132 overhead
+= sizeof (struct large_page_entry
);
2135 /* There's no easy way to walk through the small pages finding
2136 used and unused objects. Instead, add all the pages, and
2137 subtract out the free list. */
2139 allocated
+= GGC_PAGE_SIZE
* zone
->n_small_pages
;
2140 in_use
+= GGC_PAGE_SIZE
* zone
->n_small_pages
;
2141 overhead
+= G
.small_page_overhead
* zone
->n_small_pages
;
2143 for (i
= 0; i
<= NUM_FREE_BINS
; i
++)
2145 struct alloc_chunk
*chunk
= zone
->free_chunks
[i
];
2148 in_use
-= ggc_get_size (chunk
);
2149 chunk
= chunk
->next_free
;
2153 fprintf (stderr
, "%20s %10lu%c %10lu%c %10lu%c\n",
2155 SCALE (allocated
), LABEL (allocated
),
2156 SCALE (in_use
), LABEL (in_use
),
2157 SCALE (overhead
), LABEL (overhead
));
2159 gcc_assert (in_use
== zone
->allocated
);
2161 total_overhead
+= overhead
;
2162 total_allocated
+= zone
->allocated
;
2163 total_bytes_mapped
+= zone
->bytes_mapped
;
2166 /* Count the size of the page table as best we can. */
2167 #if HOST_BITS_PER_PTR <= 32
2168 pte_overhead
= sizeof (G
.lookup
);
2169 for (i
= 0; i
< PAGE_L1_SIZE
; i
++)
2171 pte_overhead
+= PAGE_L2_SIZE
* sizeof (struct page_entry
*);
2174 page_table table
= G
.lookup
;
2178 pte_overhead
+= sizeof (*table
);
2179 for (i
= 0; i
< PAGE_L1_SIZE
; i
++)
2180 if (table
->table
[i
])
2181 pte_overhead
+= PAGE_L2_SIZE
* sizeof (struct page_entry
*);
2182 table
= table
->next
;
2186 fprintf (stderr
, "%20s %11s %11s %10lu%c\n", "Page Table",
2187 "", "", SCALE (pte_overhead
), LABEL (pte_overhead
));
2188 total_overhead
+= pte_overhead
;
2190 fprintf (stderr
, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2191 SCALE (total_bytes_mapped
), LABEL (total_bytes_mapped
),
2192 SCALE (total_allocated
), LABEL(total_allocated
),
2193 SCALE (total_overhead
), LABEL (total_overhead
));
2195 if (GATHER_STATISTICS
)
2197 unsigned long long all_overhead
= 0, all_allocated
= 0;
2198 unsigned long long all_overhead_under32
= 0, all_allocated_under32
= 0;
2199 unsigned long long all_overhead_under64
= 0, all_allocated_under64
= 0;
2200 unsigned long long all_overhead_under128
= 0, all_allocated_under128
= 0;
2202 fprintf (stderr
, "\nTotal allocations and overheads during the compilation process\n");
2204 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2206 all_overhead
+= zone
->stats
.total_overhead
;
2207 all_allocated
+= zone
->stats
.total_allocated
;
2209 all_allocated_under32
+= zone
->stats
.total_allocated_under32
;
2210 all_overhead_under32
+= zone
->stats
.total_overhead_under32
;
2212 all_allocated_under64
+= zone
->stats
.total_allocated_under64
;
2213 all_overhead_under64
+= zone
->stats
.total_overhead_under64
;
2215 all_allocated_under128
+= zone
->stats
.total_allocated_under128
;
2216 all_overhead_under128
+= zone
->stats
.total_overhead_under128
;
2218 fprintf (stderr
, "%20s: %10lld\n",
2219 zone
->name
, zone
->stats
.total_allocated
);
2222 fprintf (stderr
, "\n");
2224 fprintf (stderr
, "Total Overhead: %10lld\n",
2226 fprintf (stderr
, "Total Allocated: %10lld\n",
2229 fprintf (stderr
, "Total Overhead under 32B: %10lld\n",
2230 all_overhead_under32
);
2231 fprintf (stderr
, "Total Allocated under 32B: %10lld\n",
2232 all_allocated_under32
);
2233 fprintf (stderr
, "Total Overhead under 64B: %10lld\n",
2234 all_overhead_under64
);
2235 fprintf (stderr
, "Total Allocated under 64B: %10lld\n",
2236 all_allocated_under64
);
2237 fprintf (stderr
, "Total Overhead under 128B: %10lld\n",
2238 all_overhead_under128
);
2239 fprintf (stderr
, "Total Allocated under 128B: %10lld\n",
2240 all_allocated_under128
);
2244 /* Precompiled header support. */
2246 /* For precompiled headers, we sort objects based on their type. We
2247 also sort various objects into their own buckets; currently this
2248 covers strings and IDENTIFIER_NODE trees. The choices of how
2249 to sort buckets have not yet been tuned. */
2251 #define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2253 #define OTHER_BUCKET (gt_types_enum_last + 0)
2254 #define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2255 #define STRING_BUCKET (gt_types_enum_last + 2)
2257 struct ggc_pch_ondisk
2260 size_t type_totals
[NUM_PCH_BUCKETS
];
2265 struct ggc_pch_ondisk d
;
2269 alloc_type
*alloc_bits
;
2270 size_t type_bases
[NUM_PCH_BUCKETS
];
2271 size_t start_offset
;
2274 /* Initialize the PCH data structure. */
2276 struct ggc_pch_data
*
2279 return XCNEW (struct ggc_pch_data
);
2282 /* Return which of the page-aligned buckets the object at X, with type
2283 TYPE, should be sorted into in the PCH. Strings will have
2284 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2285 of unknown type will also have TYPE equal to gt_types_enum_last. */
2288 pch_bucket (void *x
, enum gt_types_enum type
,
2291 /* Sort identifiers into their own bucket, to improve locality
2292 when searching the identifier hash table. */
2293 if (type
== gt_ggc_e_14lang_tree_node
2294 && TREE_CODE ((tree
) x
) == IDENTIFIER_NODE
)
2295 return IDENTIFIER_BUCKET
;
2296 else if (type
== gt_types_enum_last
)
2299 return STRING_BUCKET
;
2300 return OTHER_BUCKET
;
2305 /* Add the size of object X to the size of the PCH data. */
2308 ggc_pch_count_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2309 size_t size
, bool is_string
, enum gt_types_enum type
)
2311 /* NOTE: Right now we don't need to align up the size of any objects.
2312 Strings can be unaligned, and everything else is allocated to a
2313 MAX_ALIGNMENT boundary already. */
2315 d
->d
.type_totals
[pch_bucket (x
, type
, is_string
)] += size
;
2318 /* Return the total size of the PCH data. */
2321 ggc_pch_total_size (struct ggc_pch_data
*d
)
2324 size_t alloc_size
, total_size
;
2327 for (i
= 0; i
< NUM_PCH_BUCKETS
; i
++)
2329 d
->d
.type_totals
[i
] = ROUND_UP (d
->d
.type_totals
[i
], GGC_PAGE_SIZE
);
2330 total_size
+= d
->d
.type_totals
[i
];
2332 d
->d
.total
= total_size
;
2334 /* Include the size of the allocation bitmap. */
2335 alloc_size
= CEIL (d
->d
.total
, BYTES_PER_ALLOC_BIT
* 8);
2336 alloc_size
= ROUND_UP (alloc_size
, MAX_ALIGNMENT
);
2337 d
->alloc_size
= alloc_size
;
2339 return d
->d
.total
+ alloc_size
;
2342 /* Set the base address for the objects in the PCH file. */
2345 ggc_pch_this_base (struct ggc_pch_data
*d
, void *base_
)
2348 size_t base
= (size_t) base_
;
2350 d
->base
= d
->orig_base
= base
;
2351 for (i
= 0; i
< NUM_PCH_BUCKETS
; i
++)
2353 d
->type_bases
[i
] = base
;
2354 base
+= d
->d
.type_totals
[i
];
2357 if (d
->alloc_bits
== NULL
)
2358 d
->alloc_bits
= XCNEWVAR (alloc_type
, d
->alloc_size
);
2361 /* Allocate a place for object X of size SIZE in the PCH file. */
2364 ggc_pch_alloc_object (struct ggc_pch_data
*d
, void *x
,
2365 size_t size
, bool is_string
,
2366 enum gt_types_enum type
)
2368 size_t alloc_word
, alloc_bit
;
2370 int bucket
= pch_bucket (x
, type
, is_string
);
2372 /* Record the start of the object in the allocation bitmap. We
2373 can't assert that the allocation bit is previously clear, because
2374 strings may violate the invariant that they are at least
2375 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2376 should not be called for strings. */
2377 alloc_word
= ((d
->type_bases
[bucket
] - d
->orig_base
)
2378 / (8 * sizeof (alloc_type
) * BYTES_PER_ALLOC_BIT
));
2379 alloc_bit
= ((d
->type_bases
[bucket
] - d
->orig_base
)
2380 / BYTES_PER_ALLOC_BIT
) % (8 * sizeof (alloc_type
));
2381 d
->alloc_bits
[alloc_word
] |= 1L << alloc_bit
;
2383 /* Place the object at the current pointer for this bucket. */
2384 result
= (char *) d
->type_bases
[bucket
];
2385 d
->type_bases
[bucket
] += size
;
2389 /* Prepare to write out the PCH data to file F. */
2392 ggc_pch_prepare_write (struct ggc_pch_data
*d
,
2395 /* We seek around a lot while writing. Record where the end
2396 of the padding in the PCH file is, so that we can
2397 locate each object's offset. */
2398 d
->start_offset
= ftell (f
);
2401 /* Write out object X of SIZE to file F. */
2404 ggc_pch_write_object (struct ggc_pch_data
*d
,
2405 FILE *f
, void *x
, void *newx
,
2406 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2408 if (fseek (f
, (size_t) newx
- d
->orig_base
+ d
->start_offset
, SEEK_SET
) != 0)
2409 fatal_error ("can%'t seek PCH file: %m");
2411 if (fwrite (x
, size
, 1, f
) != 1)
2412 fatal_error ("can%'t write PCH file: %m");
2416 ggc_pch_finish (struct ggc_pch_data
*d
, FILE *f
)
2418 /* Write out the allocation bitmap. */
2419 if (fseek (f
, d
->start_offset
+ d
->d
.total
, SEEK_SET
) != 0)
2420 fatal_error ("can%'t seek PCH file: %m");
2422 if (fwrite (d
->alloc_bits
, d
->alloc_size
, 1, f
) != 1)
2423 fatal_error ("can%'t write PCH file: %m");
2425 /* Done with the PCH, so write out our footer. */
2426 if (fwrite (&d
->d
, sizeof (d
->d
), 1, f
) != 1)
2427 fatal_error ("can%'t write PCH file: %m");
2429 free (d
->alloc_bits
);
2433 /* The PCH file from F has been mapped at ADDR. Read in any
2434 additional data from the file and set up the GC state. */
2437 ggc_pch_read (FILE *f
, void *addr
)
2439 struct ggc_pch_ondisk d
;
2441 struct alloc_zone
*zone
;
2442 struct page_entry
*pch_page
;
2445 if (fread (&d
, sizeof (d
), 1, f
) != 1)
2446 fatal_error ("can%'t read PCH file: %m");
2448 alloc_size
= CEIL (d
.total
, BYTES_PER_ALLOC_BIT
* 8);
2449 alloc_size
= ROUND_UP (alloc_size
, MAX_ALIGNMENT
);
2451 pch_zone
.bytes
= d
.total
;
2452 pch_zone
.alloc_bits
= (alloc_type
*) ((char *) addr
+ pch_zone
.bytes
);
2453 pch_zone
.page
= (char *) addr
;
2454 pch_zone
.end
= (char *) pch_zone
.alloc_bits
;
2456 if (GATHER_STATISTICS
)
2458 /* We've just read in a PCH file. So, every object that used to be
2459 allocated is now free. */
2460 zone_allocate_marks ();
2461 ggc_prune_overhead_list ();
2465 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2467 struct small_page_entry
*page
, *next_page
;
2468 struct large_page_entry
*large_page
, *next_large_page
;
2470 zone
->allocated
= 0;
2472 /* Clear the zone's free chunk list. */
2473 memset (zone
->free_chunks
, 0, sizeof (zone
->free_chunks
));
2474 zone
->high_free_bin
= 0;
2475 zone
->cached_free
= NULL
;
2476 zone
->cached_free_size
= 0;
2478 /* Move all the small pages onto the free list. */
2479 for (page
= zone
->pages
; page
!= NULL
; page
= next_page
)
2481 next_page
= page
->next
;
2482 memset (page
->alloc_bits
, 0,
2483 G
.small_page_overhead
- PAGE_OVERHEAD
);
2484 free_small_page (page
);
2487 /* Discard all the large pages. */
2488 for (large_page
= zone
->large_pages
; large_page
!= NULL
;
2489 large_page
= next_large_page
)
2491 next_large_page
= large_page
->next
;
2492 free_large_page (large_page
);
2496 zone
->large_pages
= NULL
;
2499 /* Allocate the dummy page entry for the PCH, and set all pages
2500 mapped into the PCH to reference it. */
2501 pch_page
= XCNEW (struct page_entry
);
2502 pch_page
->page
= pch_zone
.page
;
2503 pch_page
->pch_p
= true;
2505 for (p
= pch_zone
.page
; p
< pch_zone
.end
; p
+= GGC_PAGE_SIZE
)
2506 set_page_table_entry (p
, pch_page
);