Support valgrind 3.3 for --enable-checking=valgrind.
[gcc.git] / gcc / ggc-zone.c
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007
3 Free Software Foundation, Inc.
4
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
7 <dan@codesourcery.com>.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "toplev.h"
33 #include "varray.h"
34 #include "flags.h"
35 #include "ggc.h"
36 #include "timevar.h"
37 #include "params.h"
38 #include "bitmap.h"
39
40 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
41 file open. Prefer either to valloc. */
42 #ifdef HAVE_MMAP_ANON
43 # undef HAVE_MMAP_DEV_ZERO
44
45 # include <sys/mman.h>
46 # ifndef MAP_FAILED
47 # define MAP_FAILED -1
48 # endif
49 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
50 # define MAP_ANONYMOUS MAP_ANON
51 # endif
52 # define USING_MMAP
53 #endif
54
55 #ifdef HAVE_MMAP_DEV_ZERO
56 # include <sys/mman.h>
57 # ifndef MAP_FAILED
58 # define MAP_FAILED -1
59 # endif
60 # define USING_MMAP
61 #endif
62
63 #ifndef USING_MMAP
64 #error Zone collector requires mmap
65 #endif
66
67 #if (GCC_VERSION < 3001)
68 #define prefetch(X) ((void) X)
69 #define prefetchw(X) ((void) X)
70 #else
71 #define prefetch(X) __builtin_prefetch (X)
72 #define prefetchw(X) __builtin_prefetch (X, 1, 3)
73 #endif
74
75 /* FUTURE NOTES:
76
77 If we track inter-zone pointers, we can mark single zones at a
78 time.
79
80 If we have a zone where we guarantee no inter-zone pointers, we
81 could mark that zone separately.
82
83 The garbage zone should not be marked, and we should return 1 in
84 ggc_set_mark for any object in the garbage zone, which cuts off
85 marking quickly. */
86
87 /* Strategy:
88
89 This garbage-collecting allocator segregates objects into zones.
90 It also segregates objects into "large" and "small" bins. Large
91 objects are greater than page size.
92
93 Pages for small objects are broken up into chunks. The page has
94 a bitmap which marks the start position of each chunk (whether
95 allocated or free). Free chunks are on one of the zone's free
96 lists and contain a pointer to the next free chunk. Chunks in
97 most of the free lists have a fixed size determined by the
98 free list. Chunks in the "other" sized free list have their size
99 stored right after their chain pointer.
100
101 Empty pages (of all sizes) are kept on a single page cache list,
102 and are considered first when new pages are required; they are
103 deallocated at the start of the next collection if they haven't
104 been recycled by then. The free page list is currently per-zone. */
105
106 /* Define GGC_DEBUG_LEVEL to print debugging information.
107 0: No debugging output.
108 1: GC statistics only.
109 2: Page-entry allocations/deallocations as well.
110 3: Object allocations as well.
111 4: Object marks as well. */
112 #define GGC_DEBUG_LEVEL (0)
113
114 #ifndef HOST_BITS_PER_PTR
115 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
116 #endif
117
118 /* This structure manages small free chunks. The SIZE field is only
119 initialized if the chunk is in the "other" sized free list. Large
120 chunks are allocated one at a time to their own page, and so don't
121 come in here. */
122
123 struct alloc_chunk {
124 struct alloc_chunk *next_free;
125 unsigned int size;
126 };
127
128 /* The size of the fixed-size portion of a small page descriptor. */
129 #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
130
131 /* The collector's idea of the page size. This must be a power of two
132 no larger than the system page size, because pages must be aligned
133 to this amount and are tracked at this granularity in the page
134 table. We choose a size at compile time for efficiency.
135
136 We could make a better guess at compile time if PAGE_SIZE is a
137 constant in system headers, and PAGE_SHIFT is defined... */
138 #define GGC_PAGE_SIZE 4096
139 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
140 #define GGC_PAGE_SHIFT 12
141
142 #if 0
143 /* Alternative definitions which use the runtime page size. */
144 #define GGC_PAGE_SIZE G.pagesize
145 #define GGC_PAGE_MASK G.page_mask
146 #define GGC_PAGE_SHIFT G.lg_pagesize
147 #endif
148
149 /* The size of a small page managed by the garbage collector. This
150 must currently be GGC_PAGE_SIZE, but with a few changes could
151 be any multiple of it to reduce certain kinds of overhead. */
152 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE
153
154 /* Free bin information. These numbers may be in need of re-tuning.
155 In general, decreasing the number of free bins would seem to
156 increase the time it takes to allocate... */
157
158 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
159 today. */
160
161 #define NUM_FREE_BINS 64
162 #define FREE_BIN_DELTA MAX_ALIGNMENT
163 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
164
165 /* Allocation and marking parameters. */
166
167 /* The smallest allocatable unit to keep track of. */
168 #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
169
170 /* The smallest markable unit. If we require each allocated object
171 to contain at least two allocatable units, we can use half as many
172 bits for the mark bitmap. But this adds considerable complexity
173 to sweeping. */
174 #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
175
176 #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
177
178 /* We use this structure to determine the alignment required for
179 allocations.
180
181 There are several things wrong with this estimation of alignment.
182
183 The maximum alignment for a structure is often less than the
184 maximum alignment for a basic data type; for instance, on some
185 targets long long must be aligned to sizeof (int) in a structure
186 and sizeof (long long) in a variable. i386-linux is one example;
187 Darwin is another (sometimes, depending on the compiler in use).
188
189 Also, long double is not included. Nothing in GCC uses long
190 double, so we assume that this is OK. On powerpc-darwin, adding
191 long double would bring the maximum alignment up to 16 bytes,
192 and until we need long double (or to vectorize compiler operations)
193 that's painfully wasteful. This will need to change, some day. */
194
195 struct max_alignment {
196 char c;
197 union {
198 HOST_WIDEST_INT i;
199 double d;
200 } u;
201 };
202
203 /* The biggest alignment required. */
204
205 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
206
207 /* Compute the smallest multiple of F that is >= X. */
208
209 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
210
211 /* Types to use for the allocation and mark bitmaps. It might be
212 a good idea to add ffsl to libiberty and use unsigned long
213 instead; that could speed us up where long is wider than int. */
214
215 typedef unsigned int alloc_type;
216 typedef unsigned int mark_type;
217 #define alloc_ffs(x) ffs(x)
218
219 /* A page_entry records the status of an allocation page. This is the
220 common data between all three kinds of pages - small, large, and
221 PCH. */
222 typedef struct page_entry
223 {
224 /* The address at which the memory is allocated. */
225 char *page;
226
227 /* The zone that this page entry belongs to. */
228 struct alloc_zone *zone;
229
230 #ifdef GATHER_STATISTICS
231 /* How many collections we've survived. */
232 size_t survived;
233 #endif
234
235 /* Does this page contain small objects, or one large object? */
236 bool large_p;
237
238 /* Is this page part of the loaded PCH? */
239 bool pch_p;
240 } page_entry;
241
242 /* Additional data needed for small pages. */
243 struct small_page_entry
244 {
245 struct page_entry common;
246
247 /* The next small page entry, or NULL if this is the last. */
248 struct small_page_entry *next;
249
250 /* If currently marking this zone, a pointer to the mark bits
251 for this page. If we aren't currently marking this zone,
252 this pointer may be stale (pointing to freed memory). */
253 mark_type *mark_bits;
254
255 /* The allocation bitmap. This array extends far enough to have
256 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
257 alloc_type alloc_bits[1];
258 };
259
260 /* Additional data needed for large pages. */
261 struct large_page_entry
262 {
263 struct page_entry common;
264
265 /* The next large page entry, or NULL if this is the last. */
266 struct large_page_entry *next;
267
268 /* The number of bytes allocated, not including the page entry. */
269 size_t bytes;
270
271 /* The previous page in the list, so that we can unlink this one. */
272 struct large_page_entry *prev;
273
274 /* During marking, is this object marked? */
275 bool mark_p;
276 };
277
278 /* A two-level tree is used to look up the page-entry for a given
279 pointer. Two chunks of the pointer's bits are extracted to index
280 the first and second levels of the tree, as follows:
281
282 HOST_PAGE_SIZE_BITS
283 32 | |
284 msb +----------------+----+------+------+ lsb
285 | | |
286 PAGE_L1_BITS |
287 | |
288 PAGE_L2_BITS
289
290 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
291 pages are aligned on system page boundaries. The next most
292 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
293 index values in the lookup table, respectively.
294
295 For 32-bit architectures and the settings below, there are no
296 leftover bits. For architectures with wider pointers, the lookup
297 tree points to a list of pages, which must be scanned to find the
298 correct one. */
299
300 #define PAGE_L1_BITS (8)
301 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
302 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
303 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
304
305 #define LOOKUP_L1(p) \
306 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
307
308 #define LOOKUP_L2(p) \
309 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
310
311 #if HOST_BITS_PER_PTR <= 32
312
313 /* On 32-bit hosts, we use a two level page table, as pictured above. */
314 typedef page_entry **page_table[PAGE_L1_SIZE];
315
316 #else
317
318 /* On 64-bit hosts, we use the same two level page tables plus a linked
319 list that disambiguates the top 32-bits. There will almost always be
320 exactly one entry in the list. */
321 typedef struct page_table_chain
322 {
323 struct page_table_chain *next;
324 size_t high_bits;
325 page_entry **table[PAGE_L1_SIZE];
326 } *page_table;
327
328 #endif
329
330 /* The global variables. */
331 static struct globals
332 {
333 /* The linked list of zones. */
334 struct alloc_zone *zones;
335
336 /* Lookup table for associating allocation pages with object addresses. */
337 page_table lookup;
338
339 /* The system's page size, and related constants. */
340 size_t pagesize;
341 size_t lg_pagesize;
342 size_t page_mask;
343
344 /* The size to allocate for a small page entry. This includes
345 the size of the structure and the size of the allocation
346 bitmap. */
347 size_t small_page_overhead;
348
349 #if defined (HAVE_MMAP_DEV_ZERO)
350 /* A file descriptor open to /dev/zero for reading. */
351 int dev_zero_fd;
352 #endif
353
354 /* Allocate pages in chunks of this size, to throttle calls to memory
355 allocation routines. The first page is used, the rest go onto the
356 free list. */
357 size_t quire_size;
358
359 /* The file descriptor for debugging output. */
360 FILE *debug_file;
361 } G;
362
363 /* A zone allocation structure. There is one of these for every
364 distinct allocation zone. */
365 struct alloc_zone
366 {
367 /* The most recent free chunk is saved here, instead of in the linked
368 free list, to decrease list manipulation. It is most likely that we
369 will want this one. */
370 char *cached_free;
371 size_t cached_free_size;
372
373 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
374 FREE_BIN_DELTA. All other chunks are in slot 0. */
375 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
376
377 /* The highest bin index which might be non-empty. It may turn out
378 to be empty, in which case we have to search downwards. */
379 size_t high_free_bin;
380
381 /* Bytes currently allocated in this zone. */
382 size_t allocated;
383
384 /* Linked list of the small pages in this zone. */
385 struct small_page_entry *pages;
386
387 /* Doubly linked list of large pages in this zone. */
388 struct large_page_entry *large_pages;
389
390 /* If we are currently marking this zone, a pointer to the mark bits. */
391 mark_type *mark_bits;
392
393 /* Name of the zone. */
394 const char *name;
395
396 /* The number of small pages currently allocated in this zone. */
397 size_t n_small_pages;
398
399 /* Bytes allocated at the end of the last collection. */
400 size_t allocated_last_gc;
401
402 /* Total amount of memory mapped. */
403 size_t bytes_mapped;
404
405 /* A cache of free system pages. */
406 struct small_page_entry *free_pages;
407
408 /* Next zone in the linked list of zones. */
409 struct alloc_zone *next_zone;
410
411 /* True if this zone was collected during this collection. */
412 bool was_collected;
413
414 /* True if this zone should be destroyed after the next collection. */
415 bool dead;
416
417 #ifdef GATHER_STATISTICS
418 struct
419 {
420 /* Total memory allocated with ggc_alloc. */
421 unsigned long long total_allocated;
422 /* Total overhead for memory to be allocated with ggc_alloc. */
423 unsigned long long total_overhead;
424
425 /* Total allocations and overhead for sizes less than 32, 64 and 128.
426 These sizes are interesting because they are typical cache line
427 sizes. */
428
429 unsigned long long total_allocated_under32;
430 unsigned long long total_overhead_under32;
431
432 unsigned long long total_allocated_under64;
433 unsigned long long total_overhead_under64;
434
435 unsigned long long total_allocated_under128;
436 unsigned long long total_overhead_under128;
437 } stats;
438 #endif
439 } main_zone;
440
441 /* Some default zones. */
442 struct alloc_zone rtl_zone;
443 struct alloc_zone tree_zone;
444 struct alloc_zone tree_id_zone;
445
446 /* The PCH zone does not need a normal zone structure, and it does
447 not live on the linked list of zones. */
448 struct pch_zone
449 {
450 /* The start of the PCH zone. NULL if there is none. */
451 char *page;
452
453 /* The end of the PCH zone. NULL if there is none. */
454 char *end;
455
456 /* The size of the PCH zone. 0 if there is none. */
457 size_t bytes;
458
459 /* The allocation bitmap for the PCH zone. */
460 alloc_type *alloc_bits;
461
462 /* If we are currently marking, the mark bitmap for the PCH zone.
463 When it is first read in, we could avoid marking the PCH,
464 because it will not contain any pointers to GC memory outside
465 of the PCH; however, the PCH is currently mapped as writable,
466 so we must mark it in case new pointers are added. */
467 mark_type *mark_bits;
468 } pch_zone;
469
470 #ifdef USING_MMAP
471 static char *alloc_anon (char *, size_t, struct alloc_zone *);
472 #endif
473 static struct small_page_entry * alloc_small_page (struct alloc_zone *);
474 static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
475 static void free_chunk (char *, size_t, struct alloc_zone *);
476 static void free_small_page (struct small_page_entry *);
477 static void free_large_page (struct large_page_entry *);
478 static void release_pages (struct alloc_zone *);
479 static void sweep_pages (struct alloc_zone *);
480 static bool ggc_collect_1 (struct alloc_zone *, bool);
481 static void new_ggc_zone_1 (struct alloc_zone *, const char *);
482
483 /* Traverse the page table and find the entry for a page.
484 Die (probably) if the object wasn't allocated via GC. */
485
486 static inline page_entry *
487 lookup_page_table_entry (const void *p)
488 {
489 page_entry ***base;
490 size_t L1, L2;
491
492 #if HOST_BITS_PER_PTR <= 32
493 base = &G.lookup[0];
494 #else
495 page_table table = G.lookup;
496 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
497 while (table->high_bits != high_bits)
498 table = table->next;
499 base = &table->table[0];
500 #endif
501
502 /* Extract the level 1 and 2 indices. */
503 L1 = LOOKUP_L1 (p);
504 L2 = LOOKUP_L2 (p);
505
506 return base[L1][L2];
507 }
508
509 /* Set the page table entry for the page that starts at P. If ENTRY
510 is NULL, clear the entry. */
511
512 static void
513 set_page_table_entry (void *p, page_entry *entry)
514 {
515 page_entry ***base;
516 size_t L1, L2;
517
518 #if HOST_BITS_PER_PTR <= 32
519 base = &G.lookup[0];
520 #else
521 page_table table;
522 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
523 for (table = G.lookup; table; table = table->next)
524 if (table->high_bits == high_bits)
525 goto found;
526
527 /* Not found -- allocate a new table. */
528 table = xcalloc (1, sizeof(*table));
529 table->next = G.lookup;
530 table->high_bits = high_bits;
531 G.lookup = table;
532 found:
533 base = &table->table[0];
534 #endif
535
536 /* Extract the level 1 and 2 indices. */
537 L1 = LOOKUP_L1 (p);
538 L2 = LOOKUP_L2 (p);
539
540 if (base[L1] == NULL)
541 base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
542
543 base[L1][L2] = entry;
544 }
545
546 /* Find the page table entry associated with OBJECT. */
547
548 static inline struct page_entry *
549 zone_get_object_page (const void *object)
550 {
551 return lookup_page_table_entry (object);
552 }
553
554 /* Find which element of the alloc_bits array OBJECT should be
555 recorded in. */
556 static inline unsigned int
557 zone_get_object_alloc_word (const void *object)
558 {
559 return (((size_t) object & (GGC_PAGE_SIZE - 1))
560 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
561 }
562
563 /* Find which bit of the appropriate word in the alloc_bits array
564 OBJECT should be recorded in. */
565 static inline unsigned int
566 zone_get_object_alloc_bit (const void *object)
567 {
568 return (((size_t) object / BYTES_PER_ALLOC_BIT)
569 % (8 * sizeof (alloc_type)));
570 }
571
572 /* Find which element of the mark_bits array OBJECT should be recorded
573 in. */
574 static inline unsigned int
575 zone_get_object_mark_word (const void *object)
576 {
577 return (((size_t) object & (GGC_PAGE_SIZE - 1))
578 / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
579 }
580
581 /* Find which bit of the appropriate word in the mark_bits array
582 OBJECT should be recorded in. */
583 static inline unsigned int
584 zone_get_object_mark_bit (const void *object)
585 {
586 return (((size_t) object / BYTES_PER_MARK_BIT)
587 % (8 * sizeof (mark_type)));
588 }
589
590 /* Set the allocation bit corresponding to OBJECT in its page's
591 bitmap. Used to split this object from the preceding one. */
592 static inline void
593 zone_set_object_alloc_bit (const void *object)
594 {
595 struct small_page_entry *page
596 = (struct small_page_entry *) zone_get_object_page (object);
597 unsigned int start_word = zone_get_object_alloc_word (object);
598 unsigned int start_bit = zone_get_object_alloc_bit (object);
599
600 page->alloc_bits[start_word] |= 1L << start_bit;
601 }
602
603 /* Clear the allocation bit corresponding to OBJECT in PAGE's
604 bitmap. Used to coalesce this object with the preceding
605 one. */
606 static inline void
607 zone_clear_object_alloc_bit (struct small_page_entry *page,
608 const void *object)
609 {
610 unsigned int start_word = zone_get_object_alloc_word (object);
611 unsigned int start_bit = zone_get_object_alloc_bit (object);
612
613 /* Would xor be quicker? */
614 page->alloc_bits[start_word] &= ~(1L << start_bit);
615 }
616
617 /* Find the size of the object which starts at START_WORD and
618 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
619 Helper function for ggc_get_size and zone_find_object_size. */
620
621 static inline size_t
622 zone_object_size_1 (alloc_type *alloc_bits,
623 size_t start_word, size_t start_bit,
624 size_t max_size)
625 {
626 size_t size;
627 alloc_type alloc_word;
628 int indx;
629
630 /* Load the first word. */
631 alloc_word = alloc_bits[start_word++];
632
633 /* If that was the last bit in this word, we'll want to continue
634 with the next word. Otherwise, handle the rest of this word. */
635 if (start_bit)
636 {
637 indx = alloc_ffs (alloc_word >> start_bit);
638 if (indx)
639 /* indx is 1-based. We started at the bit after the object's
640 start, but we also ended at the bit after the object's end.
641 It cancels out. */
642 return indx * BYTES_PER_ALLOC_BIT;
643
644 /* The extra 1 accounts for the starting unit, before start_bit. */
645 size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
646
647 if (size >= max_size)
648 return max_size;
649
650 alloc_word = alloc_bits[start_word++];
651 }
652 else
653 size = BYTES_PER_ALLOC_BIT;
654
655 while (alloc_word == 0)
656 {
657 size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
658 if (size >= max_size)
659 return max_size;
660 alloc_word = alloc_bits[start_word++];
661 }
662
663 indx = alloc_ffs (alloc_word);
664 return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
665 }
666
667 /* Find the size of OBJECT on small page PAGE. */
668
669 static inline size_t
670 zone_find_object_size (struct small_page_entry *page,
671 const void *object)
672 {
673 const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
674 unsigned int start_word = zone_get_object_alloc_word (object_midptr);
675 unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
676 size_t max_size = (page->common.page + SMALL_PAGE_SIZE
677 - (char *) object);
678
679 return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
680 max_size);
681 }
682
683 /* Allocate the mark bits for every zone, and set the pointers on each
684 page. */
685 static void
686 zone_allocate_marks (void)
687 {
688 struct alloc_zone *zone;
689
690 for (zone = G.zones; zone; zone = zone->next_zone)
691 {
692 struct small_page_entry *page;
693 mark_type *cur_marks;
694 size_t mark_words, mark_words_per_page;
695 #ifdef ENABLE_CHECKING
696 size_t n = 0;
697 #endif
698
699 mark_words_per_page
700 = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
701 mark_words = zone->n_small_pages * mark_words_per_page;
702 zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
703 mark_words);
704 cur_marks = zone->mark_bits;
705 for (page = zone->pages; page; page = page->next)
706 {
707 page->mark_bits = cur_marks;
708 cur_marks += mark_words_per_page;
709 #ifdef ENABLE_CHECKING
710 n++;
711 #endif
712 }
713 #ifdef ENABLE_CHECKING
714 gcc_assert (n == zone->n_small_pages);
715 #endif
716 }
717
718 /* We don't collect the PCH zone, but we do have to mark it
719 (for now). */
720 if (pch_zone.bytes)
721 pch_zone.mark_bits
722 = (mark_type *) xcalloc (sizeof (mark_type),
723 CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
724 }
725
726 /* After marking and sweeping, release the memory used for mark bits. */
727 static void
728 zone_free_marks (void)
729 {
730 struct alloc_zone *zone;
731
732 for (zone = G.zones; zone; zone = zone->next_zone)
733 if (zone->mark_bits)
734 {
735 free (zone->mark_bits);
736 zone->mark_bits = NULL;
737 }
738
739 if (pch_zone.bytes)
740 {
741 free (pch_zone.mark_bits);
742 pch_zone.mark_bits = NULL;
743 }
744 }
745
746 #ifdef USING_MMAP
747 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
748 (if non-null). The ifdef structure here is intended to cause a
749 compile error unless exactly one of the HAVE_* is defined. */
750
751 static inline char *
752 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
753 {
754 #ifdef HAVE_MMAP_ANON
755 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
756 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
757 #endif
758 #ifdef HAVE_MMAP_DEV_ZERO
759 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
760 MAP_PRIVATE, G.dev_zero_fd, 0);
761 #endif
762
763 if (page == (char *) MAP_FAILED)
764 {
765 perror ("virtual memory exhausted");
766 exit (FATAL_EXIT_CODE);
767 }
768
769 /* Remember that we allocated this memory. */
770 zone->bytes_mapped += size;
771
772 /* Pretend we don't have access to the allocated pages. We'll enable
773 access to smaller pieces of the area in ggc_alloc. Discard the
774 handle to avoid handle leak. */
775 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
776
777 return page;
778 }
779 #endif
780
781 /* Allocate a new page for allocating small objects in ZONE, and
782 return an entry for it. */
783
784 static struct small_page_entry *
785 alloc_small_page (struct alloc_zone *zone)
786 {
787 struct small_page_entry *entry;
788
789 /* Check the list of free pages for one we can use. */
790 entry = zone->free_pages;
791 if (entry != NULL)
792 {
793 /* Recycle the allocated memory from this page ... */
794 zone->free_pages = entry->next;
795 }
796 else
797 {
798 /* We want just one page. Allocate a bunch of them and put the
799 extras on the freelist. (Can only do this optimization with
800 mmap for backing store.) */
801 struct small_page_entry *e, *f = zone->free_pages;
802 int i;
803 char *page;
804
805 page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
806
807 /* This loop counts down so that the chain will be in ascending
808 memory order. */
809 for (i = G.quire_size - 1; i >= 1; i--)
810 {
811 e = xcalloc (1, G.small_page_overhead);
812 e->common.page = page + (i << GGC_PAGE_SHIFT);
813 e->common.zone = zone;
814 e->next = f;
815 f = e;
816 set_page_table_entry (e->common.page, &e->common);
817 }
818
819 zone->free_pages = f;
820
821 entry = xcalloc (1, G.small_page_overhead);
822 entry->common.page = page;
823 entry->common.zone = zone;
824 set_page_table_entry (page, &entry->common);
825 }
826
827 zone->n_small_pages++;
828
829 if (GGC_DEBUG_LEVEL >= 2)
830 fprintf (G.debug_file,
831 "Allocating %s page at %p, data %p-%p\n",
832 entry->common.zone->name, (PTR) entry, entry->common.page,
833 entry->common.page + SMALL_PAGE_SIZE - 1);
834
835 return entry;
836 }
837
838 /* Allocate a large page of size SIZE in ZONE. */
839
840 static struct large_page_entry *
841 alloc_large_page (size_t size, struct alloc_zone *zone)
842 {
843 struct large_page_entry *entry;
844 char *page;
845 size_t needed_size;
846
847 needed_size = size + sizeof (struct large_page_entry);
848 page = xmalloc (needed_size);
849
850 entry = (struct large_page_entry *) page;
851
852 entry->next = NULL;
853 entry->common.page = page + sizeof (struct large_page_entry);
854 entry->common.large_p = true;
855 entry->common.pch_p = false;
856 entry->common.zone = zone;
857 #ifdef GATHER_STATISTICS
858 entry->common.survived = 0;
859 #endif
860 entry->mark_p = false;
861 entry->bytes = size;
862 entry->prev = NULL;
863
864 set_page_table_entry (entry->common.page, &entry->common);
865
866 if (GGC_DEBUG_LEVEL >= 2)
867 fprintf (G.debug_file,
868 "Allocating %s large page at %p, data %p-%p\n",
869 entry->common.zone->name, (PTR) entry, entry->common.page,
870 entry->common.page + SMALL_PAGE_SIZE - 1);
871
872 return entry;
873 }
874
875
876 /* For a page that is no longer needed, put it on the free page list. */
877
878 static inline void
879 free_small_page (struct small_page_entry *entry)
880 {
881 if (GGC_DEBUG_LEVEL >= 2)
882 fprintf (G.debug_file,
883 "Deallocating %s page at %p, data %p-%p\n",
884 entry->common.zone->name, (PTR) entry,
885 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
886
887 gcc_assert (!entry->common.large_p);
888
889 /* Mark the page as inaccessible. Discard the handle to
890 avoid handle leak. */
891 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page,
892 SMALL_PAGE_SIZE));
893
894 entry->next = entry->common.zone->free_pages;
895 entry->common.zone->free_pages = entry;
896 entry->common.zone->n_small_pages--;
897 }
898
899 /* Release a large page that is no longer needed. */
900
901 static inline void
902 free_large_page (struct large_page_entry *entry)
903 {
904 if (GGC_DEBUG_LEVEL >= 2)
905 fprintf (G.debug_file,
906 "Deallocating %s page at %p, data %p-%p\n",
907 entry->common.zone->name, (PTR) entry,
908 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
909
910 gcc_assert (entry->common.large_p);
911
912 set_page_table_entry (entry->common.page, NULL);
913 free (entry);
914 }
915
916 /* Release the free page cache to the system. */
917
918 static void
919 release_pages (struct alloc_zone *zone)
920 {
921 #ifdef USING_MMAP
922 struct small_page_entry *p, *next;
923 char *start;
924 size_t len;
925
926 /* Gather up adjacent pages so they are unmapped together. */
927 p = zone->free_pages;
928
929 while (p)
930 {
931 start = p->common.page;
932 next = p->next;
933 len = SMALL_PAGE_SIZE;
934 set_page_table_entry (p->common.page, NULL);
935 p = next;
936
937 while (p && p->common.page == start + len)
938 {
939 next = p->next;
940 len += SMALL_PAGE_SIZE;
941 set_page_table_entry (p->common.page, NULL);
942 p = next;
943 }
944
945 munmap (start, len);
946 zone->bytes_mapped -= len;
947 }
948
949 zone->free_pages = NULL;
950 #endif
951 }
952
953 /* Place the block at PTR of size SIZE on the free list for ZONE. */
954
955 static inline void
956 free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
957 {
958 struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
959 size_t bin = 0;
960
961 bin = SIZE_BIN_DOWN (size);
962 gcc_assert (bin != 0);
963 if (bin > NUM_FREE_BINS)
964 {
965 bin = 0;
966 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
967 sizeof (struct
968 alloc_chunk)));
969 chunk->size = size;
970 chunk->next_free = zone->free_chunks[bin];
971 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
972 + sizeof (struct
973 alloc_chunk),
974 size
975 - sizeof (struct
976 alloc_chunk)));
977 }
978 else
979 {
980 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
981 sizeof (struct
982 alloc_chunk *)));
983 chunk->next_free = zone->free_chunks[bin];
984 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
985 + sizeof (struct
986 alloc_chunk *),
987 size
988 - sizeof (struct
989 alloc_chunk *)));
990 }
991
992 zone->free_chunks[bin] = chunk;
993 if (bin > zone->high_free_bin)
994 zone->high_free_bin = bin;
995 if (GGC_DEBUG_LEVEL >= 3)
996 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
997 }
998
999 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
1000
1001 void *
1002 ggc_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1003 MEM_STAT_DECL)
1004 {
1005 size_t bin;
1006 size_t csize;
1007 struct small_page_entry *entry;
1008 struct alloc_chunk *chunk, **pp;
1009 void *result;
1010 size_t size = orig_size;
1011
1012 /* Make sure that zero-sized allocations get a unique and freeable
1013 pointer. */
1014 if (size == 0)
1015 size = MAX_ALIGNMENT;
1016 else
1017 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1018
1019 /* Try to allocate the object from several different sources. Each
1020 of these cases is responsible for setting RESULT and SIZE to
1021 describe the allocated block, before jumping to FOUND. If a
1022 chunk is split, the allocate bit for the new chunk should also be
1023 set.
1024
1025 Large objects are handled specially. However, they'll just fail
1026 the next couple of conditions, so we can wait to check for them
1027 below. The large object case is relatively rare (< 1%), so this
1028 is a win. */
1029
1030 /* First try to split the last chunk we allocated. For best
1031 fragmentation behavior it would be better to look for a
1032 free bin of the appropriate size for a small object. However,
1033 we're unlikely (1% - 7%) to find one, and this gives better
1034 locality behavior anyway. This case handles the lion's share
1035 of all calls to this function. */
1036 if (size <= zone->cached_free_size)
1037 {
1038 result = zone->cached_free;
1039
1040 zone->cached_free_size -= size;
1041 if (zone->cached_free_size)
1042 {
1043 zone->cached_free += size;
1044 zone_set_object_alloc_bit (zone->cached_free);
1045 }
1046
1047 goto found;
1048 }
1049
1050 /* Next, try to find a free bin of the exactly correct size. */
1051
1052 /* We want to round SIZE up, rather than down, but we know it's
1053 already aligned to at least FREE_BIN_DELTA, so we can just
1054 shift. */
1055 bin = SIZE_BIN_DOWN (size);
1056
1057 if (bin <= NUM_FREE_BINS
1058 && (chunk = zone->free_chunks[bin]) != NULL)
1059 {
1060 /* We have a chunk of the right size. Pull it off the free list
1061 and use it. */
1062
1063 zone->free_chunks[bin] = chunk->next_free;
1064
1065 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1066 == FREE_BIN_DELTA. */
1067 result = chunk;
1068
1069 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1070 may now be wrong, if this was the last chunk in the high bin.
1071 Rather than fixing it up now, wait until we need to search
1072 the free bins. */
1073
1074 goto found;
1075 }
1076
1077 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1078 to split. We can find one in the too-big bin, or in the largest
1079 sized bin with a chunk in it. Try the largest normal-sized bin
1080 first. */
1081
1082 if (zone->high_free_bin > bin)
1083 {
1084 /* Find the highest numbered free bin. It will be at or below
1085 the watermark. */
1086 while (zone->high_free_bin > bin
1087 && zone->free_chunks[zone->high_free_bin] == NULL)
1088 zone->high_free_bin--;
1089
1090 if (zone->high_free_bin > bin)
1091 {
1092 size_t tbin = zone->high_free_bin;
1093 chunk = zone->free_chunks[tbin];
1094
1095 /* Remove the chunk from its previous bin. */
1096 zone->free_chunks[tbin] = chunk->next_free;
1097
1098 result = (char *) chunk;
1099
1100 /* Save the rest of the chunk for future allocation. */
1101 if (zone->cached_free_size)
1102 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1103
1104 chunk = (struct alloc_chunk *) ((char *) result + size);
1105 zone->cached_free = (char *) chunk;
1106 zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1107
1108 /* Mark the new free chunk as an object, so that we can
1109 find the size of the newly allocated object. */
1110 zone_set_object_alloc_bit (chunk);
1111
1112 /* HIGH_FREE_BIN may now be wrong, if this was the last
1113 chunk in the high bin. Rather than fixing it up now,
1114 wait until we need to search the free bins. */
1115
1116 goto found;
1117 }
1118 }
1119
1120 /* Failing that, look through the "other" bucket for a chunk
1121 that is large enough. */
1122 pp = &(zone->free_chunks[0]);
1123 chunk = *pp;
1124 while (chunk && chunk->size < size)
1125 {
1126 pp = &chunk->next_free;
1127 chunk = *pp;
1128 }
1129
1130 if (chunk)
1131 {
1132 /* Remove the chunk from its previous bin. */
1133 *pp = chunk->next_free;
1134
1135 result = (char *) chunk;
1136
1137 /* Save the rest of the chunk for future allocation, if there's any
1138 left over. */
1139 csize = chunk->size;
1140 if (csize > size)
1141 {
1142 if (zone->cached_free_size)
1143 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1144
1145 chunk = (struct alloc_chunk *) ((char *) result + size);
1146 zone->cached_free = (char *) chunk;
1147 zone->cached_free_size = csize - size;
1148
1149 /* Mark the new free chunk as an object. */
1150 zone_set_object_alloc_bit (chunk);
1151 }
1152
1153 goto found;
1154 }
1155
1156 /* Handle large allocations. We could choose any threshold between
1157 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1158 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1159 be guaranteed to have a unique entry in the lookup table. Large
1160 allocations will always fall through to here. */
1161 if (size > GGC_PAGE_SIZE)
1162 {
1163 struct large_page_entry *entry = alloc_large_page (size, zone);
1164
1165 #ifdef GATHER_STATISTICS
1166 entry->common.survived = 0;
1167 #endif
1168
1169 entry->next = zone->large_pages;
1170 if (zone->large_pages)
1171 zone->large_pages->prev = entry;
1172 zone->large_pages = entry;
1173
1174 result = entry->common.page;
1175
1176 goto found;
1177 }
1178
1179 /* Failing everything above, allocate a new small page. */
1180
1181 entry = alloc_small_page (zone);
1182 entry->next = zone->pages;
1183 zone->pages = entry;
1184
1185 /* Mark the first chunk in the new page. */
1186 entry->alloc_bits[0] = 1;
1187
1188 result = entry->common.page;
1189 if (size < SMALL_PAGE_SIZE)
1190 {
1191 if (zone->cached_free_size)
1192 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1193
1194 zone->cached_free = (char *) result + size;
1195 zone->cached_free_size = SMALL_PAGE_SIZE - size;
1196
1197 /* Mark the new free chunk as an object. */
1198 zone_set_object_alloc_bit (zone->cached_free);
1199 }
1200
1201 found:
1202
1203 /* We could save TYPE in the chunk, but we don't use that for
1204 anything yet. If we wanted to, we could do it by adding it
1205 either before the beginning of the chunk or after its end,
1206 and adjusting the size and pointer appropriately. */
1207
1208 /* We'll probably write to this after we return. */
1209 prefetchw (result);
1210
1211 #ifdef ENABLE_GC_CHECKING
1212 /* `Poison' the entire allocated object. */
1213 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1214 memset (result, 0xaf, size);
1215 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size,
1216 size - orig_size));
1217 #endif
1218
1219 /* Tell Valgrind that the memory is there, but its content isn't
1220 defined. The bytes at the end of the object are still marked
1221 unaccessible. */
1222 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size));
1223
1224 /* Keep track of how many bytes are being allocated. This
1225 information is used in deciding when to collect. */
1226 zone->allocated += size;
1227
1228 timevar_ggc_mem_total += size;
1229
1230 #ifdef GATHER_STATISTICS
1231 ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
1232
1233 {
1234 size_t object_size = size;
1235 size_t overhead = object_size - orig_size;
1236
1237 zone->stats.total_overhead += overhead;
1238 zone->stats.total_allocated += object_size;
1239
1240 if (orig_size <= 32)
1241 {
1242 zone->stats.total_overhead_under32 += overhead;
1243 zone->stats.total_allocated_under32 += object_size;
1244 }
1245 if (orig_size <= 64)
1246 {
1247 zone->stats.total_overhead_under64 += overhead;
1248 zone->stats.total_allocated_under64 += object_size;
1249 }
1250 if (orig_size <= 128)
1251 {
1252 zone->stats.total_overhead_under128 += overhead;
1253 zone->stats.total_allocated_under128 += object_size;
1254 }
1255 }
1256 #endif
1257
1258 if (GGC_DEBUG_LEVEL >= 3)
1259 fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1260 (unsigned long) size, result);
1261
1262 return result;
1263 }
1264
1265 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1266 for that type. */
1267
1268 void *
1269 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1270 MEM_STAT_DECL)
1271 {
1272 switch (gte)
1273 {
1274 case gt_ggc_e_14lang_tree_node:
1275 return ggc_alloc_zone_pass_stat (size, &tree_zone);
1276
1277 case gt_ggc_e_7rtx_def:
1278 return ggc_alloc_zone_pass_stat (size, &rtl_zone);
1279
1280 case gt_ggc_e_9rtvec_def:
1281 return ggc_alloc_zone_pass_stat (size, &rtl_zone);
1282
1283 default:
1284 return ggc_alloc_zone_pass_stat (size, &main_zone);
1285 }
1286 }
1287
1288 /* Normal ggc_alloc simply allocates into the main zone. */
1289
1290 void *
1291 ggc_alloc_stat (size_t size MEM_STAT_DECL)
1292 {
1293 return ggc_alloc_zone_pass_stat (size, &main_zone);
1294 }
1295
1296 /* Poison the chunk. */
1297 #ifdef ENABLE_GC_CHECKING
1298 #define poison_region(PTR, SIZE) \
1299 memset ((PTR), 0xa5, (SIZE))
1300 #else
1301 #define poison_region(PTR, SIZE)
1302 #endif
1303
1304 /* Free the object at P. */
1305
1306 void
1307 ggc_free (void *p)
1308 {
1309 struct page_entry *page;
1310
1311 #ifdef GATHER_STATISTICS
1312 ggc_free_overhead (p);
1313 #endif
1314
1315 poison_region (p, ggc_get_size (p));
1316
1317 page = zone_get_object_page (p);
1318
1319 if (page->large_p)
1320 {
1321 struct large_page_entry *large_page
1322 = (struct large_page_entry *) page;
1323
1324 /* Remove the page from the linked list. */
1325 if (large_page->prev)
1326 large_page->prev->next = large_page->next;
1327 else
1328 {
1329 gcc_assert (large_page->common.zone->large_pages == large_page);
1330 large_page->common.zone->large_pages = large_page->next;
1331 }
1332 if (large_page->next)
1333 large_page->next->prev = large_page->prev;
1334
1335 large_page->common.zone->allocated -= large_page->bytes;
1336
1337 /* Release the memory associated with this object. */
1338 free_large_page (large_page);
1339 }
1340 else if (page->pch_p)
1341 /* Don't do anything. We won't allocate a new object from the
1342 PCH zone so there's no point in releasing anything. */
1343 ;
1344 else
1345 {
1346 size_t size = ggc_get_size (p);
1347
1348 page->zone->allocated -= size;
1349
1350 /* Add the chunk to the free list. We don't bother with coalescing,
1351 since we are likely to want a chunk of this size again. */
1352 free_chunk (p, size, page->zone);
1353 }
1354 }
1355
1356 /* If P is not marked, mark it and return false. Otherwise return true.
1357 P must have been allocated by the GC allocator; it mustn't point to
1358 static objects, stack variables, or memory allocated with malloc. */
1359
1360 int
1361 ggc_set_mark (const void *p)
1362 {
1363 struct page_entry *page;
1364 const char *ptr = (const char *) p;
1365
1366 page = zone_get_object_page (p);
1367
1368 if (page->pch_p)
1369 {
1370 size_t mark_word, mark_bit, offset;
1371 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1372 mark_word = offset / (8 * sizeof (mark_type));
1373 mark_bit = offset % (8 * sizeof (mark_type));
1374
1375 if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1376 return 1;
1377 pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1378 }
1379 else if (page->large_p)
1380 {
1381 struct large_page_entry *large_page
1382 = (struct large_page_entry *) page;
1383
1384 if (large_page->mark_p)
1385 return 1;
1386 large_page->mark_p = true;
1387 }
1388 else
1389 {
1390 struct small_page_entry *small_page
1391 = (struct small_page_entry *) page;
1392
1393 if (small_page->mark_bits[zone_get_object_mark_word (p)]
1394 & (1 << zone_get_object_mark_bit (p)))
1395 return 1;
1396 small_page->mark_bits[zone_get_object_mark_word (p)]
1397 |= (1 << zone_get_object_mark_bit (p));
1398 }
1399
1400 if (GGC_DEBUG_LEVEL >= 4)
1401 fprintf (G.debug_file, "Marking %p\n", p);
1402
1403 return 0;
1404 }
1405
1406 /* Return 1 if P has been marked, zero otherwise.
1407 P must have been allocated by the GC allocator; it mustn't point to
1408 static objects, stack variables, or memory allocated with malloc. */
1409
1410 int
1411 ggc_marked_p (const void *p)
1412 {
1413 struct page_entry *page;
1414 const char *ptr = p;
1415
1416 page = zone_get_object_page (p);
1417
1418 if (page->pch_p)
1419 {
1420 size_t mark_word, mark_bit, offset;
1421 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1422 mark_word = offset / (8 * sizeof (mark_type));
1423 mark_bit = offset % (8 * sizeof (mark_type));
1424
1425 return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1426 }
1427
1428 if (page->large_p)
1429 {
1430 struct large_page_entry *large_page
1431 = (struct large_page_entry *) page;
1432
1433 return large_page->mark_p;
1434 }
1435 else
1436 {
1437 struct small_page_entry *small_page
1438 = (struct small_page_entry *) page;
1439
1440 return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1441 & (1 << zone_get_object_mark_bit (p)));
1442 }
1443 }
1444
1445 /* Return the size of the gc-able object P. */
1446
1447 size_t
1448 ggc_get_size (const void *p)
1449 {
1450 struct page_entry *page;
1451 const char *ptr = (const char *) p;
1452
1453 page = zone_get_object_page (p);
1454
1455 if (page->pch_p)
1456 {
1457 size_t alloc_word, alloc_bit, offset, max_size;
1458 offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1459 alloc_word = offset / (8 * sizeof (alloc_type));
1460 alloc_bit = offset % (8 * sizeof (alloc_type));
1461 max_size = pch_zone.bytes - (ptr - pch_zone.page);
1462 return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1463 max_size);
1464 }
1465
1466 if (page->large_p)
1467 return ((struct large_page_entry *)page)->bytes;
1468 else
1469 return zone_find_object_size ((struct small_page_entry *) page, p);
1470 }
1471
1472 /* Initialize the ggc-zone-mmap allocator. */
1473 void
1474 init_ggc (void)
1475 {
1476 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1477 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1478 the current assumptions to hold. */
1479
1480 gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1481
1482 /* Set up the main zone by hand. */
1483 main_zone.name = "Main zone";
1484 G.zones = &main_zone;
1485
1486 /* Allocate the default zones. */
1487 new_ggc_zone_1 (&rtl_zone, "RTL zone");
1488 new_ggc_zone_1 (&tree_zone, "Tree zone");
1489 new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
1490
1491 G.pagesize = getpagesize();
1492 G.lg_pagesize = exact_log2 (G.pagesize);
1493 G.page_mask = ~(G.pagesize - 1);
1494
1495 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1496 gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1497
1498 /* Allocate 16 system pages at a time. */
1499 G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1500
1501 /* Calculate the size of the allocation bitmap and other overhead. */
1502 /* Right now we allocate bits for the page header and bitmap. These
1503 are wasted, but a little tricky to eliminate. */
1504 G.small_page_overhead
1505 = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1506 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1507
1508 #ifdef HAVE_MMAP_DEV_ZERO
1509 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1510 gcc_assert (G.dev_zero_fd != -1);
1511 #endif
1512
1513 #if 0
1514 G.debug_file = fopen ("ggc-mmap.debug", "w");
1515 setlinebuf (G.debug_file);
1516 #else
1517 G.debug_file = stdout;
1518 #endif
1519
1520 #ifdef USING_MMAP
1521 /* StunOS has an amazing off-by-one error for the first mmap allocation
1522 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1523 believe, is an unaligned page allocation, which would cause us to
1524 hork badly if we tried to use it. */
1525 {
1526 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1527 struct small_page_entry *e;
1528 if ((size_t)p & (G.pagesize - 1))
1529 {
1530 /* How losing. Discard this one and try another. If we still
1531 can't get something useful, give up. */
1532
1533 p = alloc_anon (NULL, G.pagesize, &main_zone);
1534 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1535 }
1536
1537 if (GGC_PAGE_SIZE == G.pagesize)
1538 {
1539 /* We have a good page, might as well hold onto it... */
1540 e = xcalloc (1, G.small_page_overhead);
1541 e->common.page = p;
1542 e->common.zone = &main_zone;
1543 e->next = main_zone.free_pages;
1544 set_page_table_entry (e->common.page, &e->common);
1545 main_zone.free_pages = e;
1546 }
1547 else
1548 {
1549 munmap (p, G.pagesize);
1550 }
1551 }
1552 #endif
1553 }
1554
1555 /* Start a new GGC zone. */
1556
1557 static void
1558 new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
1559 {
1560 new_zone->name = name;
1561 new_zone->next_zone = G.zones->next_zone;
1562 G.zones->next_zone = new_zone;
1563 }
1564
1565 struct alloc_zone *
1566 new_ggc_zone (const char * name)
1567 {
1568 struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
1569 new_ggc_zone_1 (new_zone, name);
1570 return new_zone;
1571 }
1572
1573 /* Destroy a GGC zone. */
1574 void
1575 destroy_ggc_zone (struct alloc_zone * dead_zone)
1576 {
1577 struct alloc_zone *z;
1578
1579 for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
1580 /* Just find that zone. */
1581 continue;
1582
1583 /* We should have found the zone in the list. Anything else is fatal. */
1584 gcc_assert (z);
1585
1586 /* z is dead, baby. z is dead. */
1587 z->dead = true;
1588 }
1589
1590 /* Free all empty pages and objects within a page for a given zone */
1591
1592 static void
1593 sweep_pages (struct alloc_zone *zone)
1594 {
1595 struct large_page_entry **lpp, *lp, *lnext;
1596 struct small_page_entry **spp, *sp, *snext;
1597 char *last_free;
1598 size_t allocated = 0;
1599 bool nomarksinpage;
1600
1601 /* First, reset the free_chunks lists, since we are going to
1602 re-free free chunks in hopes of coalescing them into large chunks. */
1603 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1604 zone->high_free_bin = 0;
1605 zone->cached_free = NULL;
1606 zone->cached_free_size = 0;
1607
1608 /* Large pages are all or none affairs. Either they are completely
1609 empty, or they are completely full. */
1610 lpp = &zone->large_pages;
1611 for (lp = zone->large_pages; lp != NULL; lp = lnext)
1612 {
1613 gcc_assert (lp->common.large_p);
1614
1615 lnext = lp->next;
1616
1617 #ifdef GATHER_STATISTICS
1618 /* This page has now survived another collection. */
1619 lp->common.survived++;
1620 #endif
1621
1622 if (lp->mark_p)
1623 {
1624 lp->mark_p = false;
1625 allocated += lp->bytes;
1626 lpp = &lp->next;
1627 }
1628 else
1629 {
1630 *lpp = lnext;
1631 #ifdef ENABLE_GC_CHECKING
1632 /* Poison the page. */
1633 memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
1634 #endif
1635 if (lp->prev)
1636 lp->prev->next = lp->next;
1637 if (lp->next)
1638 lp->next->prev = lp->prev;
1639 free_large_page (lp);
1640 }
1641 }
1642
1643 spp = &zone->pages;
1644 for (sp = zone->pages; sp != NULL; sp = snext)
1645 {
1646 char *object, *last_object;
1647 char *end;
1648 alloc_type *alloc_word_p;
1649 mark_type *mark_word_p;
1650
1651 gcc_assert (!sp->common.large_p);
1652
1653 snext = sp->next;
1654
1655 #ifdef GATHER_STATISTICS
1656 /* This page has now survived another collection. */
1657 sp->common.survived++;
1658 #endif
1659
1660 /* Step through all chunks, consolidate those that are free and
1661 insert them into the free lists. Note that consolidation
1662 slows down collection slightly. */
1663
1664 last_object = object = sp->common.page;
1665 end = sp->common.page + SMALL_PAGE_SIZE;
1666 last_free = NULL;
1667 nomarksinpage = true;
1668 mark_word_p = sp->mark_bits;
1669 alloc_word_p = sp->alloc_bits;
1670
1671 gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1672
1673 object = sp->common.page;
1674 do
1675 {
1676 unsigned int i, n;
1677 alloc_type alloc_word;
1678 mark_type mark_word;
1679
1680 alloc_word = *alloc_word_p++;
1681 mark_word = *mark_word_p++;
1682
1683 if (mark_word)
1684 nomarksinpage = false;
1685
1686 /* There ought to be some way to do this without looping... */
1687 i = 0;
1688 while ((n = alloc_ffs (alloc_word)) != 0)
1689 {
1690 /* Extend the current state for n - 1 bits. We can't
1691 shift alloc_word by n, even though it isn't used in the
1692 loop, in case only the highest bit was set. */
1693 alloc_word >>= n - 1;
1694 mark_word >>= n - 1;
1695 object += BYTES_PER_MARK_BIT * (n - 1);
1696
1697 if (mark_word & 1)
1698 {
1699 if (last_free)
1700 {
1701 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1702 object
1703 - last_free));
1704 poison_region (last_free, object - last_free);
1705 free_chunk (last_free, object - last_free, zone);
1706 last_free = NULL;
1707 }
1708 else
1709 allocated += object - last_object;
1710 last_object = object;
1711 }
1712 else
1713 {
1714 if (last_free == NULL)
1715 {
1716 last_free = object;
1717 allocated += object - last_object;
1718 }
1719 else
1720 zone_clear_object_alloc_bit (sp, object);
1721 }
1722
1723 /* Shift to just after the alloc bit we handled. */
1724 alloc_word >>= 1;
1725 mark_word >>= 1;
1726 object += BYTES_PER_MARK_BIT;
1727
1728 i += n;
1729 }
1730
1731 object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
1732 }
1733 while (object < end);
1734
1735 if (nomarksinpage)
1736 {
1737 *spp = snext;
1738 #ifdef ENABLE_GC_CHECKING
1739 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page,
1740 SMALL_PAGE_SIZE));
1741 /* Poison the page. */
1742 memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
1743 #endif
1744 free_small_page (sp);
1745 continue;
1746 }
1747 else if (last_free)
1748 {
1749 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1750 object - last_free));
1751 poison_region (last_free, object - last_free);
1752 free_chunk (last_free, object - last_free, zone);
1753 }
1754 else
1755 allocated += object - last_object;
1756
1757 spp = &sp->next;
1758 }
1759
1760 zone->allocated = allocated;
1761 }
1762
1763 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1764 is true if we need to mark before sweeping, false if some other
1765 zone collection has already performed marking for us. Returns true
1766 if we collected, false otherwise. */
1767
1768 static bool
1769 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1770 {
1771 #if 0
1772 /* */
1773 {
1774 int i;
1775 for (i = 0; i < NUM_FREE_BINS + 1; i++)
1776 {
1777 struct alloc_chunk *chunk;
1778 int n, tot;
1779
1780 n = 0;
1781 tot = 0;
1782 chunk = zone->free_chunks[i];
1783 while (chunk)
1784 {
1785 n++;
1786 tot += chunk->size;
1787 chunk = chunk->next_free;
1788 }
1789 fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1790 i, n, tot);
1791 }
1792 }
1793 /* */
1794 #endif
1795
1796 if (!quiet_flag)
1797 fprintf (stderr, " {%s GC %luk -> ",
1798 zone->name, (unsigned long) zone->allocated / 1024);
1799
1800 /* Zero the total allocated bytes. This will be recalculated in the
1801 sweep phase. */
1802 zone->allocated = 0;
1803
1804 /* Release the pages we freed the last time we collected, but didn't
1805 reuse in the interim. */
1806 release_pages (zone);
1807
1808 if (need_marking)
1809 {
1810 zone_allocate_marks ();
1811 ggc_mark_roots ();
1812 #ifdef GATHER_STATISTICS
1813 ggc_prune_overhead_list ();
1814 #endif
1815 }
1816
1817 sweep_pages (zone);
1818 zone->was_collected = true;
1819 zone->allocated_last_gc = zone->allocated;
1820
1821 if (!quiet_flag)
1822 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1823 return true;
1824 }
1825
1826 #ifdef GATHER_STATISTICS
1827 /* Calculate the average page survival rate in terms of number of
1828 collections. */
1829
1830 static float
1831 calculate_average_page_survival (struct alloc_zone *zone)
1832 {
1833 float count = 0.0;
1834 float survival = 0.0;
1835 struct small_page_entry *p;
1836 struct large_page_entry *lp;
1837 for (p = zone->pages; p; p = p->next)
1838 {
1839 count += 1.0;
1840 survival += p->common.survived;
1841 }
1842 for (lp = zone->large_pages; lp; lp = lp->next)
1843 {
1844 count += 1.0;
1845 survival += lp->common.survived;
1846 }
1847 return survival/count;
1848 }
1849 #endif
1850
1851 /* Top level collection routine. */
1852
1853 void
1854 ggc_collect (void)
1855 {
1856 struct alloc_zone *zone;
1857 bool marked = false;
1858
1859 timevar_push (TV_GC);
1860
1861 if (!ggc_force_collect)
1862 {
1863 float allocated_last_gc = 0, allocated = 0, min_expand;
1864
1865 for (zone = G.zones; zone; zone = zone->next_zone)
1866 {
1867 allocated_last_gc += zone->allocated_last_gc;
1868 allocated += zone->allocated;
1869 }
1870
1871 allocated_last_gc =
1872 MAX (allocated_last_gc,
1873 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1874 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1875
1876 if (allocated < allocated_last_gc + min_expand)
1877 {
1878 timevar_pop (TV_GC);
1879 return;
1880 }
1881 }
1882
1883 /* Start by possibly collecting the main zone. */
1884 main_zone.was_collected = false;
1885 marked |= ggc_collect_1 (&main_zone, true);
1886
1887 /* In order to keep the number of collections down, we don't
1888 collect other zones unless we are collecting the main zone. This
1889 gives us roughly the same number of collections as we used to
1890 have with the old gc. The number of collection is important
1891 because our main slowdown (according to profiling) is now in
1892 marking. So if we mark twice as often as we used to, we'll be
1893 twice as slow. Hopefully we'll avoid this cost when we mark
1894 zone-at-a-time. */
1895 /* NOTE drow/2004-07-28: We now always collect the main zone, but
1896 keep this code in case the heuristics are further refined. */
1897
1898 if (main_zone.was_collected)
1899 {
1900 struct alloc_zone *zone;
1901
1902 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1903 {
1904 zone->was_collected = false;
1905 marked |= ggc_collect_1 (zone, !marked);
1906 }
1907 }
1908
1909 #ifdef GATHER_STATISTICS
1910 /* Print page survival stats, if someone wants them. */
1911 if (GGC_DEBUG_LEVEL >= 2)
1912 {
1913 for (zone = G.zones; zone; zone = zone->next_zone)
1914 {
1915 if (zone->was_collected)
1916 {
1917 float f = calculate_average_page_survival (zone);
1918 printf ("Average page survival in zone `%s' is %f\n",
1919 zone->name, f);
1920 }
1921 }
1922 }
1923 #endif
1924
1925 if (marked)
1926 zone_free_marks ();
1927
1928 /* Free dead zones. */
1929 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
1930 {
1931 if (zone->next_zone->dead)
1932 {
1933 struct alloc_zone *dead_zone = zone->next_zone;
1934
1935 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
1936
1937 /* The zone must be empty. */
1938 gcc_assert (!dead_zone->allocated);
1939
1940 /* Unchain the dead zone, release all its pages and free it. */
1941 zone->next_zone = zone->next_zone->next_zone;
1942 release_pages (dead_zone);
1943 free (dead_zone);
1944 }
1945 }
1946
1947 timevar_pop (TV_GC);
1948 }
1949
1950 /* Print allocation statistics. */
1951 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1952 ? (x) \
1953 : ((x) < 1024*1024*10 \
1954 ? (x) / 1024 \
1955 : (x) / (1024*1024))))
1956 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1957
1958 void
1959 ggc_print_statistics (void)
1960 {
1961 struct alloc_zone *zone;
1962 struct ggc_statistics stats;
1963 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
1964 size_t pte_overhead, i;
1965
1966 /* Clear the statistics. */
1967 memset (&stats, 0, sizeof (stats));
1968
1969 /* Make sure collection will really occur. */
1970 ggc_force_collect = true;
1971
1972 /* Collect and print the statistics common across collectors. */
1973 ggc_print_common_statistics (stderr, &stats);
1974
1975 ggc_force_collect = false;
1976
1977 /* Release free pages so that we will not count the bytes allocated
1978 there as part of the total allocated memory. */
1979 for (zone = G.zones; zone; zone = zone->next_zone)
1980 release_pages (zone);
1981
1982 /* Collect some information about the various sizes of
1983 allocation. */
1984 fprintf (stderr,
1985 "Memory still allocated at the end of the compilation process\n");
1986
1987 fprintf (stderr, "%20s %10s %10s %10s\n",
1988 "Zone", "Allocated", "Used", "Overhead");
1989 for (zone = G.zones; zone; zone = zone->next_zone)
1990 {
1991 struct large_page_entry *large_page;
1992 size_t overhead, allocated, in_use;
1993
1994 /* Skip empty zones. */
1995 if (!zone->pages && !zone->large_pages)
1996 continue;
1997
1998 allocated = in_use = 0;
1999
2000 overhead = sizeof (struct alloc_zone);
2001
2002 for (large_page = zone->large_pages; large_page != NULL;
2003 large_page = large_page->next)
2004 {
2005 allocated += large_page->bytes;
2006 in_use += large_page->bytes;
2007 overhead += sizeof (struct large_page_entry);
2008 }
2009
2010 /* There's no easy way to walk through the small pages finding
2011 used and unused objects. Instead, add all the pages, and
2012 subtract out the free list. */
2013
2014 allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2015 in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2016 overhead += G.small_page_overhead * zone->n_small_pages;
2017
2018 for (i = 0; i <= NUM_FREE_BINS; i++)
2019 {
2020 struct alloc_chunk *chunk = zone->free_chunks[i];
2021 while (chunk)
2022 {
2023 in_use -= ggc_get_size (chunk);
2024 chunk = chunk->next_free;
2025 }
2026 }
2027
2028 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2029 zone->name,
2030 SCALE (allocated), LABEL (allocated),
2031 SCALE (in_use), LABEL (in_use),
2032 SCALE (overhead), LABEL (overhead));
2033
2034 gcc_assert (in_use == zone->allocated);
2035
2036 total_overhead += overhead;
2037 total_allocated += zone->allocated;
2038 total_bytes_mapped += zone->bytes_mapped;
2039 }
2040
2041 /* Count the size of the page table as best we can. */
2042 #if HOST_BITS_PER_PTR <= 32
2043 pte_overhead = sizeof (G.lookup);
2044 for (i = 0; i < PAGE_L1_SIZE; i++)
2045 if (G.lookup[i])
2046 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2047 #else
2048 {
2049 page_table table = G.lookup;
2050 pte_overhead = 0;
2051 while (table)
2052 {
2053 pte_overhead += sizeof (*table);
2054 for (i = 0; i < PAGE_L1_SIZE; i++)
2055 if (table->table[i])
2056 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2057 table = table->next;
2058 }
2059 }
2060 #endif
2061 fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2062 "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2063 total_overhead += pte_overhead;
2064
2065 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2066 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2067 SCALE (total_allocated), LABEL(total_allocated),
2068 SCALE (total_overhead), LABEL (total_overhead));
2069
2070 #ifdef GATHER_STATISTICS
2071 {
2072 unsigned long long all_overhead = 0, all_allocated = 0;
2073 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2074 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2075 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2076
2077 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2078
2079 for (zone = G.zones; zone; zone = zone->next_zone)
2080 {
2081 all_overhead += zone->stats.total_overhead;
2082 all_allocated += zone->stats.total_allocated;
2083
2084 all_allocated_under32 += zone->stats.total_allocated_under32;
2085 all_overhead_under32 += zone->stats.total_overhead_under32;
2086
2087 all_allocated_under64 += zone->stats.total_allocated_under64;
2088 all_overhead_under64 += zone->stats.total_overhead_under64;
2089
2090 all_allocated_under128 += zone->stats.total_allocated_under128;
2091 all_overhead_under128 += zone->stats.total_overhead_under128;
2092
2093 fprintf (stderr, "%20s: %10lld\n",
2094 zone->name, zone->stats.total_allocated);
2095 }
2096
2097 fprintf (stderr, "\n");
2098
2099 fprintf (stderr, "Total Overhead: %10lld\n",
2100 all_overhead);
2101 fprintf (stderr, "Total Allocated: %10lld\n",
2102 all_allocated);
2103
2104 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2105 all_overhead_under32);
2106 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2107 all_allocated_under32);
2108 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2109 all_overhead_under64);
2110 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2111 all_allocated_under64);
2112 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2113 all_overhead_under128);
2114 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2115 all_allocated_under128);
2116 }
2117 #endif
2118 }
2119
2120 /* Precompiled header support. */
2121
2122 /* For precompiled headers, we sort objects based on their type. We
2123 also sort various objects into their own buckets; currently this
2124 covers strings and IDENTIFIER_NODE trees. The choices of how
2125 to sort buckets have not yet been tuned. */
2126
2127 #define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2128
2129 #define OTHER_BUCKET (gt_types_enum_last + 0)
2130 #define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2131 #define STRING_BUCKET (gt_types_enum_last + 2)
2132
2133 struct ggc_pch_ondisk
2134 {
2135 size_t total;
2136 size_t type_totals[NUM_PCH_BUCKETS];
2137 };
2138
2139 struct ggc_pch_data
2140 {
2141 struct ggc_pch_ondisk d;
2142 size_t base;
2143 size_t orig_base;
2144 size_t alloc_size;
2145 alloc_type *alloc_bits;
2146 size_t type_bases[NUM_PCH_BUCKETS];
2147 size_t start_offset;
2148 };
2149
2150 /* Initialize the PCH data structure. */
2151
2152 struct ggc_pch_data *
2153 init_ggc_pch (void)
2154 {
2155 return xcalloc (sizeof (struct ggc_pch_data), 1);
2156 }
2157
2158 /* Return which of the page-aligned buckets the object at X, with type
2159 TYPE, should be sorted into in the PCH. Strings will have
2160 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2161 of unknown type will also have TYPE equal to gt_types_enum_last. */
2162
2163 static int
2164 pch_bucket (void *x, enum gt_types_enum type,
2165 bool is_string)
2166 {
2167 /* Sort identifiers into their own bucket, to improve locality
2168 when searching the identifier hash table. */
2169 if (type == gt_ggc_e_14lang_tree_node
2170 && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2171 return IDENTIFIER_BUCKET;
2172 else if (type == gt_types_enum_last)
2173 {
2174 if (is_string)
2175 return STRING_BUCKET;
2176 return OTHER_BUCKET;
2177 }
2178 return type;
2179 }
2180
2181 /* Add the size of object X to the size of the PCH data. */
2182
2183 void
2184 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2185 size_t size, bool is_string, enum gt_types_enum type)
2186 {
2187 /* NOTE: Right now we don't need to align up the size of any objects.
2188 Strings can be unaligned, and everything else is allocated to a
2189 MAX_ALIGNMENT boundary already. */
2190
2191 d->d.type_totals[pch_bucket (x, type, is_string)] += size;
2192 }
2193
2194 /* Return the total size of the PCH data. */
2195
2196 size_t
2197 ggc_pch_total_size (struct ggc_pch_data *d)
2198 {
2199 enum gt_types_enum i;
2200 size_t alloc_size, total_size;
2201
2202 total_size = 0;
2203 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2204 {
2205 d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2206 total_size += d->d.type_totals[i];
2207 }
2208 d->d.total = total_size;
2209
2210 /* Include the size of the allocation bitmap. */
2211 alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2212 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2213 d->alloc_size = alloc_size;
2214
2215 return d->d.total + alloc_size;
2216 }
2217
2218 /* Set the base address for the objects in the PCH file. */
2219
2220 void
2221 ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
2222 {
2223 int i;
2224 size_t base = (size_t) base_;
2225
2226 d->base = d->orig_base = base;
2227 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2228 {
2229 d->type_bases[i] = base;
2230 base += d->d.type_totals[i];
2231 }
2232
2233 if (d->alloc_bits == NULL)
2234 d->alloc_bits = xcalloc (1, d->alloc_size);
2235 }
2236
2237 /* Allocate a place for object X of size SIZE in the PCH file. */
2238
2239 char *
2240 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
2241 size_t size, bool is_string,
2242 enum gt_types_enum type)
2243 {
2244 size_t alloc_word, alloc_bit;
2245 char *result;
2246 int bucket = pch_bucket (x, type, is_string);
2247
2248 /* Record the start of the object in the allocation bitmap. We
2249 can't assert that the allocation bit is previously clear, because
2250 strings may violate the invariant that they are at least
2251 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2252 should not be called for strings. */
2253 alloc_word = ((d->type_bases[bucket] - d->orig_base)
2254 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2255 alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2256 / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2257 d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2258
2259 /* Place the object at the current pointer for this bucket. */
2260 result = (char *) d->type_bases[bucket];
2261 d->type_bases[bucket] += size;
2262 return result;
2263 }
2264
2265 /* Prepare to write out the PCH data to file F. */
2266
2267 void
2268 ggc_pch_prepare_write (struct ggc_pch_data *d,
2269 FILE *f)
2270 {
2271 /* We seek around a lot while writing. Record where the end
2272 of the padding in the PCH file is, so that we can
2273 locate each object's offset. */
2274 d->start_offset = ftell (f);
2275 }
2276
2277 /* Write out object X of SIZE to file F. */
2278
2279 void
2280 ggc_pch_write_object (struct ggc_pch_data *d,
2281 FILE *f, void *x, void *newx,
2282 size_t size, bool is_string ATTRIBUTE_UNUSED)
2283 {
2284 if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
2285 fatal_error ("can't seek PCH file: %m");
2286
2287 if (fwrite (x, size, 1, f) != 1)
2288 fatal_error ("can't write PCH file: %m");
2289 }
2290
2291 void
2292 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2293 {
2294 /* Write out the allocation bitmap. */
2295 if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
2296 fatal_error ("can't seek PCH file: %m");
2297
2298 if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
2299 fatal_error ("can't write PCH fle: %m");
2300
2301 /* Done with the PCH, so write out our footer. */
2302 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2303 fatal_error ("can't write PCH file: %m");
2304
2305 free (d->alloc_bits);
2306 free (d);
2307 }
2308
2309 /* The PCH file from F has been mapped at ADDR. Read in any
2310 additional data from the file and set up the GC state. */
2311
2312 void
2313 ggc_pch_read (FILE *f, void *addr)
2314 {
2315 struct ggc_pch_ondisk d;
2316 size_t alloc_size;
2317 struct alloc_zone *zone;
2318 struct page_entry *pch_page;
2319 char *p;
2320
2321 if (fread (&d, sizeof (d), 1, f) != 1)
2322 fatal_error ("can't read PCH file: %m");
2323
2324 alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2325 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2326
2327 pch_zone.bytes = d.total;
2328 pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2329 pch_zone.page = (char *) addr;
2330 pch_zone.end = (char *) pch_zone.alloc_bits;
2331
2332 /* We've just read in a PCH file. So, every object that used to be
2333 allocated is now free. */
2334 for (zone = G.zones; zone; zone = zone->next_zone)
2335 {
2336 struct small_page_entry *page, *next_page;
2337 struct large_page_entry *large_page, *next_large_page;
2338
2339 zone->allocated = 0;
2340
2341 /* Clear the zone's free chunk list. */
2342 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2343 zone->high_free_bin = 0;
2344 zone->cached_free = NULL;
2345 zone->cached_free_size = 0;
2346
2347 /* Move all the small pages onto the free list. */
2348 for (page = zone->pages; page != NULL; page = next_page)
2349 {
2350 next_page = page->next;
2351 memset (page->alloc_bits, 0,
2352 G.small_page_overhead - PAGE_OVERHEAD);
2353 free_small_page (page);
2354 }
2355
2356 /* Discard all the large pages. */
2357 for (large_page = zone->large_pages; large_page != NULL;
2358 large_page = next_large_page)
2359 {
2360 next_large_page = large_page->next;
2361 free_large_page (large_page);
2362 }
2363
2364 zone->pages = NULL;
2365 zone->large_pages = NULL;
2366 }
2367
2368 /* Allocate the dummy page entry for the PCH, and set all pages
2369 mapped into the PCH to reference it. */
2370 pch_page = xcalloc (1, sizeof (struct page_entry));
2371 pch_page->page = pch_zone.page;
2372 pch_page->pch_p = true;
2373
2374 for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2375 set_page_table_entry (p, pch_page);
2376 }