real.h (struct real_format): Split the signbit field into two two fields, signbit_ro...
[gcc.git] / gcc / ggc-zone.c
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
3 Free Software Foundation, Inc.
4
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
7 <dan@codesourcery.com>.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 2, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to the Free
23 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
24 02111-1307, USA. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "rtl.h"
32 #include "tm_p.h"
33 #include "toplev.h"
34 #include "varray.h"
35 #include "flags.h"
36 #include "ggc.h"
37 #include "timevar.h"
38 #include "params.h"
39 #include "bitmap.h"
40
41 #ifdef ENABLE_VALGRIND_CHECKING
42 # ifdef HAVE_VALGRIND_MEMCHECK_H
43 # include <valgrind/memcheck.h>
44 # elif defined HAVE_MEMCHECK_H
45 # include <memcheck.h>
46 # else
47 # include <valgrind.h>
48 # endif
49 #else
50 /* Avoid #ifdef:s when we can help it. */
51 #define VALGRIND_DISCARD(x)
52 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
53 #define VALGRIND_FREELIKE_BLOCK(x,y)
54 #endif
55
56 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
57 file open. Prefer either to valloc. */
58 #ifdef HAVE_MMAP_ANON
59 # undef HAVE_MMAP_DEV_ZERO
60
61 # include <sys/mman.h>
62 # ifndef MAP_FAILED
63 # define MAP_FAILED -1
64 # endif
65 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
66 # define MAP_ANONYMOUS MAP_ANON
67 # endif
68 # define USING_MMAP
69 #endif
70
71 #ifdef HAVE_MMAP_DEV_ZERO
72 # include <sys/mman.h>
73 # ifndef MAP_FAILED
74 # define MAP_FAILED -1
75 # endif
76 # define USING_MMAP
77 #endif
78
79 #ifndef USING_MMAP
80 #error Zone collector requires mmap
81 #endif
82
83 #if (GCC_VERSION < 3001)
84 #define prefetch(X) ((void) X)
85 #define prefetchw(X) ((void) X)
86 #else
87 #define prefetch(X) __builtin_prefetch (X)
88 #define prefetchw(X) __builtin_prefetch (X, 1, 3)
89 #endif
90
91 /* FUTURE NOTES:
92
93 If we track inter-zone pointers, we can mark single zones at a
94 time.
95
96 If we have a zone where we guarantee no inter-zone pointers, we
97 could mark that zone separately.
98
99 The garbage zone should not be marked, and we should return 1 in
100 ggc_set_mark for any object in the garbage zone, which cuts off
101 marking quickly. */
102
103 /* Stategy:
104
105 This garbage-collecting allocator segregates objects into zones.
106 It also segregates objects into "large" and "small" bins. Large
107 objects are greater than page size.
108
109 Pages for small objects are broken up into chunks. The page has
110 a bitmap which marks the start position of each chunk (whether
111 allocated or free). Free chunks are on one of the zone's free
112 lists and contain a pointer to the next free chunk. Chunks in
113 most of the free lists have a fixed size determined by the
114 free list. Chunks in the "other" sized free list have their size
115 stored right after their chain pointer.
116
117 Empty pages (of all sizes) are kept on a single page cache list,
118 and are considered first when new pages are required; they are
119 deallocated at the start of the next collection if they haven't
120 been recycled by then. The free page list is currently per-zone. */
121
122 /* Define GGC_DEBUG_LEVEL to print debugging information.
123 0: No debugging output.
124 1: GC statistics only.
125 2: Page-entry allocations/deallocations as well.
126 3: Object allocations as well.
127 4: Object marks as well. */
128 #define GGC_DEBUG_LEVEL (0)
129
130 #ifndef HOST_BITS_PER_PTR
131 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
132 #endif
133
134 /* This structure manages small free chunks. The SIZE field is only
135 initialized if the chunk is in the "other" sized free list. Large
136 chunks are allocated one at a time to their own page, and so don't
137 come in here. */
138
139 struct alloc_chunk {
140 struct alloc_chunk *next_free;
141 unsigned int size;
142 };
143
144 /* The size of the fixed-size portion of a small page descriptor. */
145 #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
146
147 /* The collector's idea of the page size. This must be a power of two
148 no larger than the system page size, because pages must be aligned
149 to this amount and are tracked at this granularity in the page
150 table. We choose a size at compile time for efficiency.
151
152 We could make a better guess at compile time if PAGE_SIZE is a
153 constant in system headers, and PAGE_SHIFT is defined... */
154 #define GGC_PAGE_SIZE 4096
155 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
156 #define GGC_PAGE_SHIFT 12
157
158 #if 0
159 /* Alternative definitions which use the runtime page size. */
160 #define GGC_PAGE_SIZE G.pagesize
161 #define GGC_PAGE_MASK G.page_mask
162 #define GGC_PAGE_SHIFT G.lg_pagesize
163 #endif
164
165 /* The size of a small page managed by the garbage collector. This
166 must currently be GGC_PAGE_SIZE, but with a few changes could
167 be any multiple of it to reduce certain kinds of overhead. */
168 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE
169
170 /* Free bin information. These numbers may be in need of re-tuning.
171 In general, decreasing the number of free bins would seem to
172 increase the time it takes to allocate... */
173
174 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
175 today. */
176
177 #define NUM_FREE_BINS 64
178 #define FREE_BIN_DELTA MAX_ALIGNMENT
179 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
180
181 /* Allocation and marking parameters. */
182
183 /* The smallest allocatable unit to keep track of. */
184 #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
185
186 /* The smallest markable unit. If we require each allocated object
187 to contain at least two allocatable units, we can use half as many
188 bits for the mark bitmap. But this adds considerable complexity
189 to sweeping. */
190 #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
191
192 #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
193
194 /* We use this structure to determine the alignment required for
195 allocations.
196
197 There are several things wrong with this estimation of alignment.
198
199 The maximum alignment for a structure is often less than the
200 maximum alignment for a basic data type; for instance, on some
201 targets long long must be aligned to sizeof (int) in a structure
202 and sizeof (long long) in a variable. i386-linux is one example;
203 Darwin is another (sometimes, depending on the compiler in use).
204
205 Also, long double is not included. Nothing in GCC uses long
206 double, so we assume that this is OK. On powerpc-darwin, adding
207 long double would bring the maximum alignment up to 16 bytes,
208 and until we need long double (or to vectorize compiler operations)
209 that's painfully wasteful. This will need to change, some day. */
210
211 struct max_alignment {
212 char c;
213 union {
214 HOST_WIDEST_INT i;
215 double d;
216 } u;
217 };
218
219 /* The biggest alignment required. */
220
221 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
222
223 /* Compute the smallest multiple of F that is >= X. */
224
225 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
226
227 /* Types to use for the allocation and mark bitmaps. It might be
228 a good idea to add ffsl to libiberty and use unsigned long
229 instead; that could speed us up where long is wider than int. */
230
231 typedef unsigned int alloc_type;
232 typedef unsigned int mark_type;
233 #define alloc_ffs(x) ffs(x)
234
235 /* A page_entry records the status of an allocation page. This is the
236 common data between all three kinds of pages - small, large, and
237 PCH. */
238 typedef struct page_entry
239 {
240 /* The address at which the memory is allocated. */
241 char *page;
242
243 /* The zone that this page entry belongs to. */
244 struct alloc_zone *zone;
245
246 #ifdef GATHER_STATISTICS
247 /* How many collections we've survived. */
248 size_t survived;
249 #endif
250
251 /* Does this page contain small objects, or one large object? */
252 bool large_p;
253
254 /* Is this page part of the loaded PCH? */
255 bool pch_p;
256 } page_entry;
257
258 /* Additional data needed for small pages. */
259 struct small_page_entry
260 {
261 struct page_entry common;
262
263 /* The next small page entry, or NULL if this is the last. */
264 struct small_page_entry *next;
265
266 /* If currently marking this zone, a pointer to the mark bits
267 for this page. If we aren't currently marking this zone,
268 this pointer may be stale (pointing to freed memory). */
269 mark_type *mark_bits;
270
271 /* The allocation bitmap. This array extends far enough to have
272 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
273 alloc_type alloc_bits[1];
274 };
275
276 /* Additional data needed for large pages. */
277 struct large_page_entry
278 {
279 struct page_entry common;
280
281 /* The next large page entry, or NULL if this is the last. */
282 struct large_page_entry *next;
283
284 /* The number of bytes allocated, not including the page entry. */
285 size_t bytes;
286
287 /* The previous page in the list, so that we can unlink this one. */
288 struct large_page_entry *prev;
289
290 /* During marking, is this object marked? */
291 bool mark_p;
292 };
293
294 /* A two-level tree is used to look up the page-entry for a given
295 pointer. Two chunks of the pointer's bits are extracted to index
296 the first and second levels of the tree, as follows:
297
298 HOST_PAGE_SIZE_BITS
299 32 | |
300 msb +----------------+----+------+------+ lsb
301 | | |
302 PAGE_L1_BITS |
303 | |
304 PAGE_L2_BITS
305
306 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
307 pages are aligned on system page boundaries. The next most
308 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
309 index values in the lookup table, respectively.
310
311 For 32-bit architectures and the settings below, there are no
312 leftover bits. For architectures with wider pointers, the lookup
313 tree points to a list of pages, which must be scanned to find the
314 correct one. */
315
316 #define PAGE_L1_BITS (8)
317 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
318 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
319 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
320
321 #define LOOKUP_L1(p) \
322 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
323
324 #define LOOKUP_L2(p) \
325 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
326
327 #if HOST_BITS_PER_PTR <= 32
328
329 /* On 32-bit hosts, we use a two level page table, as pictured above. */
330 typedef page_entry **page_table[PAGE_L1_SIZE];
331
332 #else
333
334 /* On 64-bit hosts, we use the same two level page tables plus a linked
335 list that disambiguates the top 32-bits. There will almost always be
336 exactly one entry in the list. */
337 typedef struct page_table_chain
338 {
339 struct page_table_chain *next;
340 size_t high_bits;
341 page_entry **table[PAGE_L1_SIZE];
342 } *page_table;
343
344 #endif
345
346 /* The global variables. */
347 static struct globals
348 {
349 /* The linked list of zones. */
350 struct alloc_zone *zones;
351
352 /* Lookup table for associating allocation pages with object addresses. */
353 page_table lookup;
354
355 /* The system's page size, and related constants. */
356 size_t pagesize;
357 size_t lg_pagesize;
358 size_t page_mask;
359
360 /* The size to allocate for a small page entry. This includes
361 the size of the structure and the size of the allocation
362 bitmap. */
363 size_t small_page_overhead;
364
365 #if defined (HAVE_MMAP_DEV_ZERO)
366 /* A file descriptor open to /dev/zero for reading. */
367 int dev_zero_fd;
368 #endif
369
370 /* Allocate pages in chunks of this size, to throttle calls to memory
371 allocation routines. The first page is used, the rest go onto the
372 free list. */
373 size_t quire_size;
374
375 /* The file descriptor for debugging output. */
376 FILE *debug_file;
377 } G;
378
379 /* A zone allocation structure. There is one of these for every
380 distinct allocation zone. */
381 struct alloc_zone
382 {
383 /* The most recent free chunk is saved here, instead of in the linked
384 free list, to decrease list manipulation. It is most likely that we
385 will want this one. */
386 char *cached_free;
387 size_t cached_free_size;
388
389 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
390 FREE_BIN_DELTA. All other chunks are in slot 0. */
391 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
392
393 /* The highest bin index which might be non-empty. It may turn out
394 to be empty, in which case we have to search downwards. */
395 size_t high_free_bin;
396
397 /* Bytes currently allocated in this zone. */
398 size_t allocated;
399
400 /* Linked list of the small pages in this zone. */
401 struct small_page_entry *pages;
402
403 /* Doubly linked list of large pages in this zone. */
404 struct large_page_entry *large_pages;
405
406 /* If we are currently marking this zone, a pointer to the mark bits. */
407 mark_type *mark_bits;
408
409 /* Name of the zone. */
410 const char *name;
411
412 /* The number of small pages currently allocated in this zone. */
413 size_t n_small_pages;
414
415 /* Bytes allocated at the end of the last collection. */
416 size_t allocated_last_gc;
417
418 /* Total amount of memory mapped. */
419 size_t bytes_mapped;
420
421 /* A cache of free system pages. */
422 struct small_page_entry *free_pages;
423
424 /* Next zone in the linked list of zones. */
425 struct alloc_zone *next_zone;
426
427 /* True if this zone was collected during this collection. */
428 bool was_collected;
429
430 /* True if this zone should be destroyed after the next collection. */
431 bool dead;
432
433 #ifdef GATHER_STATISTICS
434 struct
435 {
436 /* Total memory allocated with ggc_alloc. */
437 unsigned long long total_allocated;
438 /* Total overhead for memory to be allocated with ggc_alloc. */
439 unsigned long long total_overhead;
440
441 /* Total allocations and overhead for sizes less than 32, 64 and 128.
442 These sizes are interesting because they are typical cache line
443 sizes. */
444
445 unsigned long long total_allocated_under32;
446 unsigned long long total_overhead_under32;
447
448 unsigned long long total_allocated_under64;
449 unsigned long long total_overhead_under64;
450
451 unsigned long long total_allocated_under128;
452 unsigned long long total_overhead_under128;
453 } stats;
454 #endif
455 } main_zone;
456
457 /* Some default zones. */
458 struct alloc_zone rtl_zone;
459 struct alloc_zone tree_zone;
460 struct alloc_zone tree_id_zone;
461
462 /* The PCH zone does not need a normal zone structure, and it does
463 not live on the linked list of zones. */
464 struct pch_zone
465 {
466 /* The start of the PCH zone. NULL if there is none. */
467 char *page;
468
469 /* The end of the PCH zone. NULL if there is none. */
470 char *end;
471
472 /* The size of the PCH zone. 0 if there is none. */
473 size_t bytes;
474
475 /* The allocation bitmap for the PCH zone. */
476 alloc_type *alloc_bits;
477
478 /* If we are currently marking, the mark bitmap for the PCH zone.
479 When it is first read in, we could avoid marking the PCH,
480 because it will not contain any pointers to GC memory outside
481 of the PCH; however, the PCH is currently mapped as writable,
482 so we must mark it in case new pointers are added. */
483 mark_type *mark_bits;
484 } pch_zone;
485
486 #ifdef USING_MMAP
487 static char *alloc_anon (char *, size_t, struct alloc_zone *);
488 #endif
489 static struct small_page_entry * alloc_small_page (struct alloc_zone *);
490 static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
491 static void free_chunk (char *, size_t, struct alloc_zone *);
492 static void free_small_page (struct small_page_entry *);
493 static void free_large_page (struct large_page_entry *);
494 static void release_pages (struct alloc_zone *);
495 static void sweep_pages (struct alloc_zone *);
496 static bool ggc_collect_1 (struct alloc_zone *, bool);
497 static void new_ggc_zone_1 (struct alloc_zone *, const char *);
498
499 /* Traverse the page table and find the entry for a page.
500 Die (probably) if the object wasn't allocated via GC. */
501
502 static inline page_entry *
503 lookup_page_table_entry (const void *p)
504 {
505 page_entry ***base;
506 size_t L1, L2;
507
508 #if HOST_BITS_PER_PTR <= 32
509 base = &G.lookup[0];
510 #else
511 page_table table = G.lookup;
512 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
513 while (table->high_bits != high_bits)
514 table = table->next;
515 base = &table->table[0];
516 #endif
517
518 /* Extract the level 1 and 2 indices. */
519 L1 = LOOKUP_L1 (p);
520 L2 = LOOKUP_L2 (p);
521
522 return base[L1][L2];
523 }
524
525 /* Set the page table entry for the page that starts at P. If ENTRY
526 is NULL, clear the entry. */
527
528 static void
529 set_page_table_entry (void *p, page_entry *entry)
530 {
531 page_entry ***base;
532 size_t L1, L2;
533
534 #if HOST_BITS_PER_PTR <= 32
535 base = &G.lookup[0];
536 #else
537 page_table table;
538 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
539 for (table = G.lookup; table; table = table->next)
540 if (table->high_bits == high_bits)
541 goto found;
542
543 /* Not found -- allocate a new table. */
544 table = xcalloc (1, sizeof(*table));
545 table->next = G.lookup;
546 table->high_bits = high_bits;
547 G.lookup = table;
548 found:
549 base = &table->table[0];
550 #endif
551
552 /* Extract the level 1 and 2 indices. */
553 L1 = LOOKUP_L1 (p);
554 L2 = LOOKUP_L2 (p);
555
556 if (base[L1] == NULL)
557 base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
558
559 base[L1][L2] = entry;
560 }
561
562 /* Find the page table entry associated with OBJECT. */
563
564 static inline struct page_entry *
565 zone_get_object_page (const void *object)
566 {
567 return lookup_page_table_entry (object);
568 }
569
570 /* Find which element of the alloc_bits array OBJECT should be
571 recorded in. */
572 static inline unsigned int
573 zone_get_object_alloc_word (const void *object)
574 {
575 return (((size_t) object & (GGC_PAGE_SIZE - 1))
576 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
577 }
578
579 /* Find which bit of the appropriate word in the alloc_bits array
580 OBJECT should be recorded in. */
581 static inline unsigned int
582 zone_get_object_alloc_bit (const void *object)
583 {
584 return (((size_t) object / BYTES_PER_ALLOC_BIT)
585 % (8 * sizeof (alloc_type)));
586 }
587
588 /* Find which element of the mark_bits array OBJECT should be recorded
589 in. */
590 static inline unsigned int
591 zone_get_object_mark_word (const void *object)
592 {
593 return (((size_t) object & (GGC_PAGE_SIZE - 1))
594 / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
595 }
596
597 /* Find which bit of the appropriate word in the mark_bits array
598 OBJECT should be recorded in. */
599 static inline unsigned int
600 zone_get_object_mark_bit (const void *object)
601 {
602 return (((size_t) object / BYTES_PER_MARK_BIT)
603 % (8 * sizeof (mark_type)));
604 }
605
606 /* Set the allocation bit corresponding to OBJECT in its page's
607 bitmap. Used to split this object from the preceding one. */
608 static inline void
609 zone_set_object_alloc_bit (const void *object)
610 {
611 struct small_page_entry *page
612 = (struct small_page_entry *) zone_get_object_page (object);
613 unsigned int start_word = zone_get_object_alloc_word (object);
614 unsigned int start_bit = zone_get_object_alloc_bit (object);
615
616 page->alloc_bits[start_word] |= 1L << start_bit;
617 }
618
619 /* Clear the allocation bit corresponding to OBJECT in PAGE's
620 bitmap. Used to coalesce this object with the preceding
621 one. */
622 static inline void
623 zone_clear_object_alloc_bit (struct small_page_entry *page,
624 const void *object)
625 {
626 unsigned int start_word = zone_get_object_alloc_word (object);
627 unsigned int start_bit = zone_get_object_alloc_bit (object);
628
629 /* Would xor be quicker? */
630 page->alloc_bits[start_word] &= ~(1L << start_bit);
631 }
632
633 /* Find the size of the object which starts at START_WORD and
634 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
635 Helper function for ggc_get_size and zone_find_object_size. */
636
637 static inline size_t
638 zone_object_size_1 (alloc_type *alloc_bits,
639 size_t start_word, size_t start_bit,
640 size_t max_size)
641 {
642 size_t size;
643 alloc_type alloc_word;
644 int indx;
645
646 /* Load the first word. */
647 alloc_word = alloc_bits[start_word++];
648
649 /* If that was the last bit in this word, we'll want to continue
650 with the next word. Otherwise, handle the rest of this word. */
651 if (start_bit)
652 {
653 indx = alloc_ffs (alloc_word >> start_bit);
654 if (indx)
655 /* indx is 1-based. We started at the bit after the object's
656 start, but we also ended at the bit after the object's end.
657 It cancels out. */
658 return indx * BYTES_PER_ALLOC_BIT;
659
660 /* The extra 1 accounts for the starting unit, before start_bit. */
661 size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
662
663 if (size >= max_size)
664 return max_size;
665
666 alloc_word = alloc_bits[start_word++];
667 }
668 else
669 size = BYTES_PER_ALLOC_BIT;
670
671 while (alloc_word == 0)
672 {
673 size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
674 if (size >= max_size)
675 return max_size;
676 alloc_word = alloc_bits[start_word++];
677 }
678
679 indx = alloc_ffs (alloc_word);
680 return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
681 }
682
683 /* Find the size of OBJECT on small page PAGE. */
684
685 static inline size_t
686 zone_find_object_size (struct small_page_entry *page,
687 const void *object)
688 {
689 const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
690 unsigned int start_word = zone_get_object_alloc_word (object_midptr);
691 unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
692 size_t max_size = (page->common.page + SMALL_PAGE_SIZE
693 - (char *) object);
694
695 return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
696 max_size);
697 }
698
699 /* Allocate the mark bits for every zone, and set the pointers on each
700 page. */
701 static void
702 zone_allocate_marks (void)
703 {
704 struct alloc_zone *zone;
705
706 for (zone = G.zones; zone; zone = zone->next_zone)
707 {
708 struct small_page_entry *page;
709 mark_type *cur_marks;
710 size_t mark_words, mark_words_per_page;
711 #ifdef ENABLE_CHECKING
712 size_t n = 0;
713 #endif
714
715 mark_words_per_page
716 = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
717 mark_words = zone->n_small_pages * mark_words_per_page;
718 zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
719 mark_words);
720 cur_marks = zone->mark_bits;
721 for (page = zone->pages; page; page = page->next)
722 {
723 page->mark_bits = cur_marks;
724 cur_marks += mark_words_per_page;
725 #ifdef ENABLE_CHECKING
726 n++;
727 #endif
728 }
729 #ifdef ENABLE_CHECKING
730 gcc_assert (n == zone->n_small_pages);
731 #endif
732 }
733
734 /* We don't collect the PCH zone, but we do have to mark it
735 (for now). */
736 if (pch_zone.bytes)
737 pch_zone.mark_bits
738 = (mark_type *) xcalloc (sizeof (mark_type),
739 CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
740 }
741
742 /* After marking and sweeping, release the memory used for mark bits. */
743 static void
744 zone_free_marks (void)
745 {
746 struct alloc_zone *zone;
747
748 for (zone = G.zones; zone; zone = zone->next_zone)
749 if (zone->mark_bits)
750 {
751 free (zone->mark_bits);
752 zone->mark_bits = NULL;
753 }
754
755 if (pch_zone.bytes)
756 {
757 free (pch_zone.mark_bits);
758 pch_zone.mark_bits = NULL;
759 }
760 }
761
762 #ifdef USING_MMAP
763 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
764 (if non-null). The ifdef structure here is intended to cause a
765 compile error unless exactly one of the HAVE_* is defined. */
766
767 static inline char *
768 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
769 {
770 #ifdef HAVE_MMAP_ANON
771 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
772 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
773 #endif
774 #ifdef HAVE_MMAP_DEV_ZERO
775 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
776 MAP_PRIVATE, G.dev_zero_fd, 0);
777 #endif
778
779 if (page == (char *) MAP_FAILED)
780 {
781 perror ("virtual memory exhausted");
782 exit (FATAL_EXIT_CODE);
783 }
784
785 /* Remember that we allocated this memory. */
786 zone->bytes_mapped += size;
787
788 /* Pretend we don't have access to the allocated pages. We'll enable
789 access to smaller pieces of the area in ggc_alloc. Discard the
790 handle to avoid handle leak. */
791 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
792
793 return page;
794 }
795 #endif
796
797 /* Allocate a new page for allocating small objects in ZONE, and
798 return an entry for it. */
799
800 static struct small_page_entry *
801 alloc_small_page (struct alloc_zone *zone)
802 {
803 struct small_page_entry *entry;
804
805 /* Check the list of free pages for one we can use. */
806 entry = zone->free_pages;
807 if (entry != NULL)
808 {
809 /* Recycle the allocated memory from this page ... */
810 zone->free_pages = entry->next;
811 }
812 else
813 {
814 /* We want just one page. Allocate a bunch of them and put the
815 extras on the freelist. (Can only do this optimization with
816 mmap for backing store.) */
817 struct small_page_entry *e, *f = zone->free_pages;
818 int i;
819 char *page;
820
821 page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
822
823 /* This loop counts down so that the chain will be in ascending
824 memory order. */
825 for (i = G.quire_size - 1; i >= 1; i--)
826 {
827 e = xcalloc (1, G.small_page_overhead);
828 e->common.page = page + (i << GGC_PAGE_SHIFT);
829 e->common.zone = zone;
830 e->next = f;
831 f = e;
832 set_page_table_entry (e->common.page, &e->common);
833 }
834
835 zone->free_pages = f;
836
837 entry = xcalloc (1, G.small_page_overhead);
838 entry->common.page = page;
839 entry->common.zone = zone;
840 set_page_table_entry (page, &entry->common);
841 }
842
843 zone->n_small_pages++;
844
845 if (GGC_DEBUG_LEVEL >= 2)
846 fprintf (G.debug_file,
847 "Allocating %s page at %p, data %p-%p\n",
848 entry->common.zone->name, (PTR) entry, entry->common.page,
849 entry->common.page + SMALL_PAGE_SIZE - 1);
850
851 return entry;
852 }
853
854 /* Allocate a large page of size SIZE in ZONE. */
855
856 static struct large_page_entry *
857 alloc_large_page (size_t size, struct alloc_zone *zone)
858 {
859 struct large_page_entry *entry;
860 char *page;
861 size_t needed_size;
862
863 needed_size = size + sizeof (struct large_page_entry);
864 page = xmalloc (needed_size);
865
866 entry = (struct large_page_entry *) page;
867
868 entry->next = NULL;
869 entry->common.page = page + sizeof (struct large_page_entry);
870 entry->common.large_p = true;
871 entry->common.pch_p = false;
872 entry->common.zone = zone;
873 #ifdef GATHER_STATISTICS
874 entry->common.survived = 0;
875 #endif
876 entry->mark_p = false;
877 entry->bytes = size;
878 entry->prev = NULL;
879
880 set_page_table_entry (entry->common.page, &entry->common);
881
882 if (GGC_DEBUG_LEVEL >= 2)
883 fprintf (G.debug_file,
884 "Allocating %s large page at %p, data %p-%p\n",
885 entry->common.zone->name, (PTR) entry, entry->common.page,
886 entry->common.page + SMALL_PAGE_SIZE - 1);
887
888 return entry;
889 }
890
891
892 /* For a page that is no longer needed, put it on the free page list. */
893
894 static inline void
895 free_small_page (struct small_page_entry *entry)
896 {
897 if (GGC_DEBUG_LEVEL >= 2)
898 fprintf (G.debug_file,
899 "Deallocating %s page at %p, data %p-%p\n",
900 entry->common.zone->name, (PTR) entry,
901 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
902
903 gcc_assert (!entry->common.large_p);
904
905 /* Mark the page as inaccessible. Discard the handle to
906 avoid handle leak. */
907 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->common.page,
908 SMALL_PAGE_SIZE));
909
910 entry->next = entry->common.zone->free_pages;
911 entry->common.zone->free_pages = entry;
912 entry->common.zone->n_small_pages--;
913 }
914
915 /* Release a large page that is no longer needed. */
916
917 static inline void
918 free_large_page (struct large_page_entry *entry)
919 {
920 if (GGC_DEBUG_LEVEL >= 2)
921 fprintf (G.debug_file,
922 "Deallocating %s page at %p, data %p-%p\n",
923 entry->common.zone->name, (PTR) entry,
924 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
925
926 gcc_assert (entry->common.large_p);
927
928 set_page_table_entry (entry->common.page, NULL);
929 free (entry);
930 }
931
932 /* Release the free page cache to the system. */
933
934 static void
935 release_pages (struct alloc_zone *zone)
936 {
937 #ifdef USING_MMAP
938 struct small_page_entry *p, *next;
939 char *start;
940 size_t len;
941
942 /* Gather up adjacent pages so they are unmapped together. */
943 p = zone->free_pages;
944
945 while (p)
946 {
947 start = p->common.page;
948 next = p->next;
949 len = SMALL_PAGE_SIZE;
950 set_page_table_entry (p->common.page, NULL);
951 p = next;
952
953 while (p && p->common.page == start + len)
954 {
955 next = p->next;
956 len += SMALL_PAGE_SIZE;
957 set_page_table_entry (p->common.page, NULL);
958 p = next;
959 }
960
961 munmap (start, len);
962 zone->bytes_mapped -= len;
963 }
964
965 zone->free_pages = NULL;
966 #endif
967 }
968
969 /* Place the block at PTR of size SIZE on the free list for ZONE. */
970
971 static inline void
972 free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
973 {
974 struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
975 size_t bin = 0;
976
977 bin = SIZE_BIN_DOWN (size);
978 gcc_assert (bin != 0);
979 if (bin > NUM_FREE_BINS)
980 {
981 bin = 0;
982 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
983 chunk->size = size;
984 chunk->next_free = zone->free_chunks[bin];
985 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk),
986 size - sizeof (struct alloc_chunk)));
987 }
988 else
989 {
990 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk *)));
991 chunk->next_free = zone->free_chunks[bin];
992 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk *),
993 size - sizeof (struct alloc_chunk *)));
994 }
995
996 zone->free_chunks[bin] = chunk;
997 if (bin > zone->high_free_bin)
998 zone->high_free_bin = bin;
999 if (GGC_DEBUG_LEVEL >= 3)
1000 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
1001 }
1002
1003 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
1004
1005 void *
1006 ggc_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1007 MEM_STAT_DECL)
1008 {
1009 size_t bin;
1010 size_t csize;
1011 struct small_page_entry *entry;
1012 struct alloc_chunk *chunk, **pp;
1013 void *result;
1014 size_t size = orig_size;
1015
1016 /* Make sure that zero-sized allocations get a unique and freeable
1017 pointer. */
1018 if (size == 0)
1019 size = MAX_ALIGNMENT;
1020 else
1021 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1022
1023 /* Try to allocate the object from several different sources. Each
1024 of these cases is responsible for setting RESULT and SIZE to
1025 describe the allocated block, before jumping to FOUND. If a
1026 chunk is split, the allocate bit for the new chunk should also be
1027 set.
1028
1029 Large objects are handled specially. However, they'll just fail
1030 the next couple of conditions, so we can wait to check for them
1031 below. The large object case is relatively rare (< 1%), so this
1032 is a win. */
1033
1034 /* First try to split the last chunk we allocated. For best
1035 fragmentation behavior it would be better to look for a
1036 free bin of the appropriate size for a small object. However,
1037 we're unlikely (1% - 7%) to find one, and this gives better
1038 locality behavior anyway. This case handles the lion's share
1039 of all calls to this function. */
1040 if (size <= zone->cached_free_size)
1041 {
1042 result = zone->cached_free;
1043
1044 zone->cached_free_size -= size;
1045 if (zone->cached_free_size)
1046 {
1047 zone->cached_free += size;
1048 zone_set_object_alloc_bit (zone->cached_free);
1049 }
1050
1051 goto found;
1052 }
1053
1054 /* Next, try to find a free bin of the exactly correct size. */
1055
1056 /* We want to round SIZE up, rather than down, but we know it's
1057 already aligned to at least FREE_BIN_DELTA, so we can just
1058 shift. */
1059 bin = SIZE_BIN_DOWN (size);
1060
1061 if (bin <= NUM_FREE_BINS
1062 && (chunk = zone->free_chunks[bin]) != NULL)
1063 {
1064 /* We have a chunk of the right size. Pull it off the free list
1065 and use it. */
1066
1067 zone->free_chunks[bin] = chunk->next_free;
1068
1069 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1070 == FREE_BIN_DELTA. */
1071 result = chunk;
1072
1073 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1074 may now be wrong, if this was the last chunk in the high bin.
1075 Rather than fixing it up now, wait until we need to search
1076 the free bins. */
1077
1078 goto found;
1079 }
1080
1081 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1082 to split. We can find one in the too-big bin, or in the largest
1083 sized bin with a chunk in it. Try the largest normal-sized bin
1084 first. */
1085
1086 if (zone->high_free_bin > bin)
1087 {
1088 /* Find the highest numbered free bin. It will be at or below
1089 the watermark. */
1090 while (zone->high_free_bin > bin
1091 && zone->free_chunks[zone->high_free_bin] == NULL)
1092 zone->high_free_bin--;
1093
1094 if (zone->high_free_bin > bin)
1095 {
1096 size_t tbin = zone->high_free_bin;
1097 chunk = zone->free_chunks[tbin];
1098
1099 /* Remove the chunk from its previous bin. */
1100 zone->free_chunks[tbin] = chunk->next_free;
1101
1102 result = (char *) chunk;
1103
1104 /* Save the rest of the chunk for future allocation. */
1105 if (zone->cached_free_size)
1106 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1107
1108 chunk = (struct alloc_chunk *) ((char *) result + size);
1109 zone->cached_free = (char *) chunk;
1110 zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1111
1112 /* Mark the new free chunk as an object, so that we can
1113 find the size of the newly allocated object. */
1114 zone_set_object_alloc_bit (chunk);
1115
1116 /* HIGH_FREE_BIN may now be wrong, if this was the last
1117 chunk in the high bin. Rather than fixing it up now,
1118 wait until we need to search the free bins. */
1119
1120 goto found;
1121 }
1122 }
1123
1124 /* Failing that, look through the "other" bucket for a chunk
1125 that is large enough. */
1126 pp = &(zone->free_chunks[0]);
1127 chunk = *pp;
1128 while (chunk && chunk->size < size)
1129 {
1130 pp = &chunk->next_free;
1131 chunk = *pp;
1132 }
1133
1134 if (chunk)
1135 {
1136 /* Remove the chunk from its previous bin. */
1137 *pp = chunk->next_free;
1138
1139 result = (char *) chunk;
1140
1141 /* Save the rest of the chunk for future allocation, if there's any
1142 left over. */
1143 csize = chunk->size;
1144 if (csize > size)
1145 {
1146 if (zone->cached_free_size)
1147 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1148
1149 chunk = (struct alloc_chunk *) ((char *) result + size);
1150 zone->cached_free = (char *) chunk;
1151 zone->cached_free_size = csize - size;
1152
1153 /* Mark the new free chunk as an object. */
1154 zone_set_object_alloc_bit (chunk);
1155 }
1156
1157 goto found;
1158 }
1159
1160 /* Handle large allocations. We could choose any threshold between
1161 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1162 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1163 be guaranteed to have a unique entry in the lookup table. Large
1164 allocations will always fall through to here. */
1165 if (size > GGC_PAGE_SIZE)
1166 {
1167 struct large_page_entry *entry = alloc_large_page (size, zone);
1168
1169 #ifdef GATHER_STATISTICS
1170 entry->common.survived = 0;
1171 #endif
1172
1173 entry->next = zone->large_pages;
1174 if (zone->large_pages)
1175 zone->large_pages->prev = entry;
1176 zone->large_pages = entry;
1177
1178 result = entry->common.page;
1179
1180 goto found;
1181 }
1182
1183 /* Failing everything above, allocate a new small page. */
1184
1185 entry = alloc_small_page (zone);
1186 entry->next = zone->pages;
1187 zone->pages = entry;
1188
1189 /* Mark the first chunk in the new page. */
1190 entry->alloc_bits[0] = 1;
1191
1192 result = entry->common.page;
1193 if (size < SMALL_PAGE_SIZE)
1194 {
1195 if (zone->cached_free_size)
1196 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1197
1198 zone->cached_free = (char *) result + size;
1199 zone->cached_free_size = SMALL_PAGE_SIZE - size;
1200
1201 /* Mark the new free chunk as an object. */
1202 zone_set_object_alloc_bit (zone->cached_free);
1203 }
1204
1205 found:
1206
1207 /* We could save TYPE in the chunk, but we don't use that for
1208 anything yet. If we wanted to, we could do it by adding it
1209 either before the beginning of the chunk or after its end,
1210 and adjusting the size and pointer appropriately. */
1211
1212 /* We'll probably write to this after we return. */
1213 prefetchw (result);
1214
1215 #ifdef ENABLE_GC_CHECKING
1216 /* `Poison' the entire allocated object. */
1217 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1218 memset (result, 0xaf, size);
1219 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (result + orig_size,
1220 size - orig_size));
1221 #endif
1222
1223 /* Tell Valgrind that the memory is there, but its content isn't
1224 defined. The bytes at the end of the object are still marked
1225 unaccessible. */
1226 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, orig_size));
1227
1228 /* Keep track of how many bytes are being allocated. This
1229 information is used in deciding when to collect. */
1230 zone->allocated += size;
1231
1232 #ifdef GATHER_STATISTICS
1233 ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
1234
1235 {
1236 size_t object_size = size;
1237 size_t overhead = object_size - orig_size;
1238
1239 zone->stats.total_overhead += overhead;
1240 zone->stats.total_allocated += object_size;
1241
1242 if (orig_size <= 32)
1243 {
1244 zone->stats.total_overhead_under32 += overhead;
1245 zone->stats.total_allocated_under32 += object_size;
1246 }
1247 if (orig_size <= 64)
1248 {
1249 zone->stats.total_overhead_under64 += overhead;
1250 zone->stats.total_allocated_under64 += object_size;
1251 }
1252 if (orig_size <= 128)
1253 {
1254 zone->stats.total_overhead_under128 += overhead;
1255 zone->stats.total_allocated_under128 += object_size;
1256 }
1257 }
1258 #endif
1259
1260 if (GGC_DEBUG_LEVEL >= 3)
1261 fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1262 (unsigned long) size, result);
1263
1264 return result;
1265 }
1266
1267 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1268 for that type. */
1269
1270 void *
1271 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1272 MEM_STAT_DECL)
1273 {
1274 switch (gte)
1275 {
1276 case gt_ggc_e_14lang_tree_node:
1277 return ggc_alloc_zone_stat (size, &tree_zone PASS_MEM_STAT);
1278
1279 case gt_ggc_e_7rtx_def:
1280 return ggc_alloc_zone_stat (size, &rtl_zone PASS_MEM_STAT);
1281
1282 case gt_ggc_e_9rtvec_def:
1283 return ggc_alloc_zone_stat (size, &rtl_zone PASS_MEM_STAT);
1284
1285 default:
1286 return ggc_alloc_zone_stat (size, &main_zone PASS_MEM_STAT);
1287 }
1288 }
1289
1290 /* Normal ggc_alloc simply allocates into the main zone. */
1291
1292 void *
1293 ggc_alloc_stat (size_t size MEM_STAT_DECL)
1294 {
1295 return ggc_alloc_zone_stat (size, &main_zone PASS_MEM_STAT);
1296 }
1297
1298 /* Poison the chunk. */
1299 #ifdef ENABLE_GC_CHECKING
1300 #define poison_region(PTR, SIZE) \
1301 memset ((PTR), 0xa5, (SIZE))
1302 #else
1303 #define poison_region(PTR, SIZE)
1304 #endif
1305
1306 /* Free the object at P. */
1307
1308 void
1309 ggc_free (void *p)
1310 {
1311 struct page_entry *page;
1312
1313 #ifdef GATHER_STATISTICS
1314 ggc_free_overhead (p);
1315 #endif
1316
1317 poison_region (p, ggc_get_size (p));
1318
1319 page = zone_get_object_page (p);
1320
1321 if (page->large_p)
1322 {
1323 struct large_page_entry *large_page
1324 = (struct large_page_entry *) page;
1325
1326 /* Remove the page from the linked list. */
1327 if (large_page->prev)
1328 large_page->prev->next = large_page->next;
1329 else
1330 {
1331 gcc_assert (large_page->common.zone->large_pages == large_page);
1332 large_page->common.zone->large_pages = large_page->next;
1333 }
1334 if (large_page->next)
1335 large_page->next->prev = large_page->prev;
1336
1337 large_page->common.zone->allocated -= large_page->bytes;
1338
1339 /* Release the memory associated with this object. */
1340 free_large_page (large_page);
1341 }
1342 else if (page->pch_p)
1343 /* Don't do anything. We won't allocate a new object from the
1344 PCH zone so there's no point in releasing anything. */
1345 ;
1346 else
1347 {
1348 size_t size = ggc_get_size (p);
1349
1350 page->zone->allocated -= size;
1351
1352 /* Add the chunk to the free list. We don't bother with coalescing,
1353 since we are likely to want a chunk of this size again. */
1354 free_chunk (p, size, page->zone);
1355 }
1356 }
1357
1358 /* If P is not marked, mark it and return false. Otherwise return true.
1359 P must have been allocated by the GC allocator; it mustn't point to
1360 static objects, stack variables, or memory allocated with malloc. */
1361
1362 int
1363 ggc_set_mark (const void *p)
1364 {
1365 struct page_entry *page;
1366 const char *ptr = (const char *) p;
1367
1368 page = zone_get_object_page (p);
1369
1370 if (page->pch_p)
1371 {
1372 size_t mark_word, mark_bit, offset;
1373 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1374 mark_word = offset / (8 * sizeof (mark_type));
1375 mark_bit = offset % (8 * sizeof (mark_type));
1376
1377 if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1378 return 1;
1379 pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1380 }
1381 else if (page->large_p)
1382 {
1383 struct large_page_entry *large_page
1384 = (struct large_page_entry *) page;
1385
1386 if (large_page->mark_p)
1387 return 1;
1388 large_page->mark_p = true;
1389 }
1390 else
1391 {
1392 struct small_page_entry *small_page
1393 = (struct small_page_entry *) page;
1394
1395 if (small_page->mark_bits[zone_get_object_mark_word (p)]
1396 & (1 << zone_get_object_mark_bit (p)))
1397 return 1;
1398 small_page->mark_bits[zone_get_object_mark_word (p)]
1399 |= (1 << zone_get_object_mark_bit (p));
1400 }
1401
1402 if (GGC_DEBUG_LEVEL >= 4)
1403 fprintf (G.debug_file, "Marking %p\n", p);
1404
1405 return 0;
1406 }
1407
1408 /* Return 1 if P has been marked, zero otherwise.
1409 P must have been allocated by the GC allocator; it mustn't point to
1410 static objects, stack variables, or memory allocated with malloc. */
1411
1412 int
1413 ggc_marked_p (const void *p)
1414 {
1415 struct page_entry *page;
1416 const char *ptr = p;
1417
1418 page = zone_get_object_page (p);
1419
1420 if (page->pch_p)
1421 {
1422 size_t mark_word, mark_bit, offset;
1423 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1424 mark_word = offset / (8 * sizeof (mark_type));
1425 mark_bit = offset % (8 * sizeof (mark_type));
1426
1427 return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1428 }
1429
1430 if (page->large_p)
1431 {
1432 struct large_page_entry *large_page
1433 = (struct large_page_entry *) page;
1434
1435 return large_page->mark_p;
1436 }
1437 else
1438 {
1439 struct small_page_entry *small_page
1440 = (struct small_page_entry *) page;
1441
1442 return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1443 & (1 << zone_get_object_mark_bit (p)));
1444 }
1445 }
1446
1447 /* Return the size of the gc-able object P. */
1448
1449 size_t
1450 ggc_get_size (const void *p)
1451 {
1452 struct page_entry *page;
1453 const char *ptr = (const char *) p;
1454
1455 page = zone_get_object_page (p);
1456
1457 if (page->pch_p)
1458 {
1459 size_t alloc_word, alloc_bit, offset, max_size;
1460 offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1461 alloc_word = offset / (8 * sizeof (alloc_type));
1462 alloc_bit = offset % (8 * sizeof (alloc_type));
1463 max_size = pch_zone.bytes - (ptr - pch_zone.page);
1464 return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1465 max_size);
1466 }
1467
1468 if (page->large_p)
1469 return ((struct large_page_entry *)page)->bytes;
1470 else
1471 return zone_find_object_size ((struct small_page_entry *) page, p);
1472 }
1473
1474 /* Initialize the ggc-zone-mmap allocator. */
1475 void
1476 init_ggc (void)
1477 {
1478 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1479 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1480 the current assumptions to hold. */
1481
1482 gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1483
1484 /* Set up the main zone by hand. */
1485 main_zone.name = "Main zone";
1486 G.zones = &main_zone;
1487
1488 /* Allocate the default zones. */
1489 new_ggc_zone_1 (&rtl_zone, "RTL zone");
1490 new_ggc_zone_1 (&tree_zone, "Tree zone");
1491 new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
1492
1493 G.pagesize = getpagesize();
1494 G.lg_pagesize = exact_log2 (G.pagesize);
1495 G.page_mask = ~(G.pagesize - 1);
1496
1497 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1498 gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1499
1500 /* Allocate 16 system pages at a time. */
1501 G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1502
1503 /* Calculate the size of the allocation bitmap and other overhead. */
1504 /* Right now we allocate bits for the page header and bitmap. These
1505 are wasted, but a little tricky to eliminate. */
1506 G.small_page_overhead
1507 = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1508 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1509
1510 #ifdef HAVE_MMAP_DEV_ZERO
1511 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1512 gcc_assert (G.dev_zero_fd != -1);
1513 #endif
1514
1515 #if 0
1516 G.debug_file = fopen ("ggc-mmap.debug", "w");
1517 setlinebuf (G.debug_file);
1518 #else
1519 G.debug_file = stdout;
1520 #endif
1521
1522 #ifdef USING_MMAP
1523 /* StunOS has an amazing off-by-one error for the first mmap allocation
1524 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1525 believe, is an unaligned page allocation, which would cause us to
1526 hork badly if we tried to use it. */
1527 {
1528 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1529 struct small_page_entry *e;
1530 if ((size_t)p & (G.pagesize - 1))
1531 {
1532 /* How losing. Discard this one and try another. If we still
1533 can't get something useful, give up. */
1534
1535 p = alloc_anon (NULL, G.pagesize, &main_zone);
1536 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1537 }
1538
1539 if (GGC_PAGE_SIZE == G.pagesize)
1540 {
1541 /* We have a good page, might as well hold onto it... */
1542 e = xcalloc (1, G.small_page_overhead);
1543 e->common.page = p;
1544 e->common.zone = &main_zone;
1545 e->next = main_zone.free_pages;
1546 set_page_table_entry (e->common.page, &e->common);
1547 main_zone.free_pages = e;
1548 }
1549 else
1550 {
1551 munmap (p, G.pagesize);
1552 }
1553 }
1554 #endif
1555 }
1556
1557 /* Start a new GGC zone. */
1558
1559 static void
1560 new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
1561 {
1562 new_zone->name = name;
1563 new_zone->next_zone = G.zones->next_zone;
1564 G.zones->next_zone = new_zone;
1565 }
1566
1567 struct alloc_zone *
1568 new_ggc_zone (const char * name)
1569 {
1570 struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
1571 new_ggc_zone_1 (new_zone, name);
1572 return new_zone;
1573 }
1574
1575 /* Destroy a GGC zone. */
1576 void
1577 destroy_ggc_zone (struct alloc_zone * dead_zone)
1578 {
1579 struct alloc_zone *z;
1580
1581 for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
1582 /* Just find that zone. */
1583 continue;
1584
1585 /* We should have found the zone in the list. Anything else is fatal. */
1586 gcc_assert (z);
1587
1588 /* z is dead, baby. z is dead. */
1589 z->dead = true;
1590 }
1591
1592 /* Free all empty pages and objects within a page for a given zone */
1593
1594 static void
1595 sweep_pages (struct alloc_zone *zone)
1596 {
1597 struct large_page_entry **lpp, *lp, *lnext;
1598 struct small_page_entry **spp, *sp, *snext;
1599 char *last_free;
1600 size_t allocated = 0;
1601 bool nomarksinpage;
1602
1603 /* First, reset the free_chunks lists, since we are going to
1604 re-free free chunks in hopes of coalescing them into large chunks. */
1605 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1606 zone->high_free_bin = 0;
1607 zone->cached_free = NULL;
1608 zone->cached_free_size = 0;
1609
1610 /* Large pages are all or none affairs. Either they are completely
1611 empty, or they are completely full. */
1612 lpp = &zone->large_pages;
1613 for (lp = zone->large_pages; lp != NULL; lp = lnext)
1614 {
1615 gcc_assert (lp->common.large_p);
1616
1617 lnext = lp->next;
1618
1619 #ifdef GATHER_STATISTICS
1620 /* This page has now survived another collection. */
1621 lp->common.survived++;
1622 #endif
1623
1624 if (lp->mark_p)
1625 {
1626 lp->mark_p = false;
1627 allocated += lp->bytes;
1628 lpp = &lp->next;
1629 }
1630 else
1631 {
1632 *lpp = lnext;
1633 #ifdef ENABLE_GC_CHECKING
1634 /* Poison the page. */
1635 memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
1636 #endif
1637 if (lp->prev)
1638 lp->prev->next = lp->next;
1639 if (lp->next)
1640 lp->next->prev = lp->prev;
1641 free_large_page (lp);
1642 }
1643 }
1644
1645 spp = &zone->pages;
1646 for (sp = zone->pages; sp != NULL; sp = snext)
1647 {
1648 char *object, *last_object;
1649 char *end;
1650 alloc_type *alloc_word_p;
1651 mark_type *mark_word_p;
1652
1653 gcc_assert (!sp->common.large_p);
1654
1655 snext = sp->next;
1656
1657 #ifdef GATHER_STATISTICS
1658 /* This page has now survived another collection. */
1659 sp->common.survived++;
1660 #endif
1661
1662 /* Step through all chunks, consolidate those that are free and
1663 insert them into the free lists. Note that consolidation
1664 slows down collection slightly. */
1665
1666 last_object = object = sp->common.page;
1667 end = sp->common.page + SMALL_PAGE_SIZE;
1668 last_free = NULL;
1669 nomarksinpage = true;
1670 mark_word_p = sp->mark_bits;
1671 alloc_word_p = sp->alloc_bits;
1672
1673 gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1674
1675 object = sp->common.page;
1676 do
1677 {
1678 unsigned int i, n;
1679 alloc_type alloc_word;
1680 mark_type mark_word;
1681
1682 alloc_word = *alloc_word_p++;
1683 mark_word = *mark_word_p++;
1684
1685 if (mark_word)
1686 nomarksinpage = false;
1687
1688 /* There ought to be some way to do this without looping... */
1689 i = 0;
1690 while ((n = alloc_ffs (alloc_word)) != 0)
1691 {
1692 /* Extend the current state for n - 1 bits. We can't
1693 shift alloc_word by n, even though it isn't used in the
1694 loop, in case only the highest bit was set. */
1695 alloc_word >>= n - 1;
1696 mark_word >>= n - 1;
1697 object += BYTES_PER_MARK_BIT * (n - 1);
1698
1699 if (mark_word & 1)
1700 {
1701 if (last_free)
1702 {
1703 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
1704 object
1705 - last_free));
1706 poison_region (last_free, object - last_free);
1707 free_chunk (last_free, object - last_free, zone);
1708 last_free = NULL;
1709 }
1710 else
1711 allocated += object - last_object;
1712 last_object = object;
1713 }
1714 else
1715 {
1716 if (last_free == NULL)
1717 {
1718 last_free = object;
1719 allocated += object - last_object;
1720 }
1721 else
1722 zone_clear_object_alloc_bit (sp, object);
1723 }
1724
1725 /* Shift to just after the alloc bit we handled. */
1726 alloc_word >>= 1;
1727 mark_word >>= 1;
1728 object += BYTES_PER_MARK_BIT;
1729
1730 i += n;
1731 }
1732
1733 object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
1734 }
1735 while (object < end);
1736
1737 if (nomarksinpage)
1738 {
1739 *spp = snext;
1740 #ifdef ENABLE_GC_CHECKING
1741 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (sp->common.page, SMALL_PAGE_SIZE));
1742 /* Poison the page. */
1743 memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
1744 #endif
1745 free_small_page (sp);
1746 continue;
1747 }
1748 else if (last_free)
1749 {
1750 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
1751 object - last_free));
1752 poison_region (last_free, object - last_free);
1753 free_chunk (last_free, object - last_free, zone);
1754 }
1755 else
1756 allocated += object - last_object;
1757
1758 spp = &sp->next;
1759 }
1760
1761 zone->allocated = allocated;
1762 }
1763
1764 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1765 is true if we need to mark before sweeping, false if some other
1766 zone collection has already performed marking for us. Returns true
1767 if we collected, false otherwise. */
1768
1769 static bool
1770 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1771 {
1772 #if 0
1773 /* */
1774 {
1775 int i;
1776 for (i = 0; i < NUM_FREE_BINS + 1; i++)
1777 {
1778 struct alloc_chunk *chunk;
1779 int n, tot;
1780
1781 n = 0;
1782 tot = 0;
1783 chunk = zone->free_chunks[i];
1784 while (chunk)
1785 {
1786 n++;
1787 tot += chunk->size;
1788 chunk = chunk->next_free;
1789 }
1790 fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1791 i, n, tot);
1792 }
1793 }
1794 /* */
1795 #endif
1796
1797 if (!quiet_flag)
1798 fprintf (stderr, " {%s GC %luk -> ",
1799 zone->name, (unsigned long) zone->allocated / 1024);
1800
1801 /* Zero the total allocated bytes. This will be recalculated in the
1802 sweep phase. */
1803 zone->allocated = 0;
1804
1805 /* Release the pages we freed the last time we collected, but didn't
1806 reuse in the interim. */
1807 release_pages (zone);
1808
1809 if (need_marking)
1810 {
1811 zone_allocate_marks ();
1812 ggc_mark_roots ();
1813 #ifdef GATHER_STATISTICS
1814 ggc_prune_overhead_list ();
1815 #endif
1816 }
1817
1818 sweep_pages (zone);
1819 zone->was_collected = true;
1820 zone->allocated_last_gc = zone->allocated;
1821
1822 if (!quiet_flag)
1823 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1824 return true;
1825 }
1826
1827 #ifdef GATHER_STATISTICS
1828 /* Calculate the average page survival rate in terms of number of
1829 collections. */
1830
1831 static float
1832 calculate_average_page_survival (struct alloc_zone *zone)
1833 {
1834 float count = 0.0;
1835 float survival = 0.0;
1836 struct small_page_entry *p;
1837 struct large_page_entry *lp;
1838 for (p = zone->pages; p; p = p->next)
1839 {
1840 count += 1.0;
1841 survival += p->common.survived;
1842 }
1843 for (lp = zone->large_pages; lp; lp = lp->next)
1844 {
1845 count += 1.0;
1846 survival += lp->common.survived;
1847 }
1848 return survival/count;
1849 }
1850 #endif
1851
1852 /* Top level collection routine. */
1853
1854 void
1855 ggc_collect (void)
1856 {
1857 struct alloc_zone *zone;
1858 bool marked = false;
1859
1860 timevar_push (TV_GC);
1861
1862 if (!ggc_force_collect)
1863 {
1864 float allocated_last_gc = 0, allocated = 0, min_expand;
1865
1866 for (zone = G.zones; zone; zone = zone->next_zone)
1867 {
1868 allocated_last_gc += zone->allocated_last_gc;
1869 allocated += zone->allocated;
1870 }
1871
1872 allocated_last_gc =
1873 MAX (allocated_last_gc,
1874 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1875 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1876
1877 if (allocated < allocated_last_gc + min_expand)
1878 {
1879 timevar_pop (TV_GC);
1880 return;
1881 }
1882 }
1883
1884 /* Start by possibly collecting the main zone. */
1885 main_zone.was_collected = false;
1886 marked |= ggc_collect_1 (&main_zone, true);
1887
1888 /* In order to keep the number of collections down, we don't
1889 collect other zones unless we are collecting the main zone. This
1890 gives us roughly the same number of collections as we used to
1891 have with the old gc. The number of collection is important
1892 because our main slowdown (according to profiling) is now in
1893 marking. So if we mark twice as often as we used to, we'll be
1894 twice as slow. Hopefully we'll avoid this cost when we mark
1895 zone-at-a-time. */
1896 /* NOTE drow/2004-07-28: We now always collect the main zone, but
1897 keep this code in case the heuristics are further refined. */
1898
1899 if (main_zone.was_collected)
1900 {
1901 struct alloc_zone *zone;
1902
1903 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1904 {
1905 zone->was_collected = false;
1906 marked |= ggc_collect_1 (zone, !marked);
1907 }
1908 }
1909
1910 #ifdef GATHER_STATISTICS
1911 /* Print page survival stats, if someone wants them. */
1912 if (GGC_DEBUG_LEVEL >= 2)
1913 {
1914 for (zone = G.zones; zone; zone = zone->next_zone)
1915 {
1916 if (zone->was_collected)
1917 {
1918 float f = calculate_average_page_survival (zone);
1919 printf ("Average page survival in zone `%s' is %f\n",
1920 zone->name, f);
1921 }
1922 }
1923 }
1924 #endif
1925
1926 if (marked)
1927 zone_free_marks ();
1928
1929 /* Free dead zones. */
1930 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
1931 {
1932 if (zone->next_zone->dead)
1933 {
1934 struct alloc_zone *dead_zone = zone->next_zone;
1935
1936 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
1937
1938 /* The zone must be empty. */
1939 gcc_assert (!dead_zone->allocated);
1940
1941 /* Unchain the dead zone, release all its pages and free it. */
1942 zone->next_zone = zone->next_zone->next_zone;
1943 release_pages (dead_zone);
1944 free (dead_zone);
1945 }
1946 }
1947
1948 timevar_pop (TV_GC);
1949 }
1950
1951 /* Print allocation statistics. */
1952 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1953 ? (x) \
1954 : ((x) < 1024*1024*10 \
1955 ? (x) / 1024 \
1956 : (x) / (1024*1024))))
1957 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1958
1959 void
1960 ggc_print_statistics (void)
1961 {
1962 struct alloc_zone *zone;
1963 struct ggc_statistics stats;
1964 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
1965 size_t pte_overhead, i;
1966
1967 /* Clear the statistics. */
1968 memset (&stats, 0, sizeof (stats));
1969
1970 /* Make sure collection will really occur. */
1971 ggc_force_collect = true;
1972
1973 /* Collect and print the statistics common across collectors. */
1974 ggc_print_common_statistics (stderr, &stats);
1975
1976 ggc_force_collect = false;
1977
1978 /* Release free pages so that we will not count the bytes allocated
1979 there as part of the total allocated memory. */
1980 for (zone = G.zones; zone; zone = zone->next_zone)
1981 release_pages (zone);
1982
1983 /* Collect some information about the various sizes of
1984 allocation. */
1985 fprintf (stderr,
1986 "Memory still allocated at the end of the compilation process\n");
1987
1988 fprintf (stderr, "%20s %10s %10s %10s\n",
1989 "Zone", "Allocated", "Used", "Overhead");
1990 for (zone = G.zones; zone; zone = zone->next_zone)
1991 {
1992 struct large_page_entry *large_page;
1993 size_t overhead, allocated, in_use;
1994
1995 /* Skip empty zones. */
1996 if (!zone->pages && !zone->large_pages)
1997 continue;
1998
1999 allocated = in_use = 0;
2000
2001 overhead = sizeof (struct alloc_zone);
2002
2003 for (large_page = zone->large_pages; large_page != NULL;
2004 large_page = large_page->next)
2005 {
2006 allocated += large_page->bytes;
2007 in_use += large_page->bytes;
2008 overhead += sizeof (struct large_page_entry);
2009 }
2010
2011 /* There's no easy way to walk through the small pages finding
2012 used and unused objects. Instead, add all the pages, and
2013 subtract out the free list. */
2014
2015 allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2016 in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2017 overhead += G.small_page_overhead * zone->n_small_pages;
2018
2019 for (i = 0; i <= NUM_FREE_BINS; i++)
2020 {
2021 struct alloc_chunk *chunk = zone->free_chunks[i];
2022 while (chunk)
2023 {
2024 in_use -= ggc_get_size (chunk);
2025 chunk = chunk->next_free;
2026 }
2027 }
2028
2029 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2030 zone->name,
2031 SCALE (allocated), LABEL (allocated),
2032 SCALE (in_use), LABEL (in_use),
2033 SCALE (overhead), LABEL (overhead));
2034
2035 gcc_assert (in_use == zone->allocated);
2036
2037 total_overhead += overhead;
2038 total_allocated += zone->allocated;
2039 total_bytes_mapped += zone->bytes_mapped;
2040 }
2041
2042 /* Count the size of the page table as best we can. */
2043 #if HOST_BITS_PER_PTR <= 32
2044 pte_overhead = sizeof (G.lookup);
2045 for (i = 0; i < PAGE_L1_SIZE; i++)
2046 if (G.lookup[i])
2047 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2048 #else
2049 {
2050 struct page_table_chain *table;
2051 pte_overhead = 0;
2052 while (table)
2053 {
2054 pte_overhead += sizeof (*table);
2055 for (i = 0; i < PAGE_L1_SIZE; i++)
2056 if (table->table[i])
2057 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2058 table = table->next;
2059 }
2060 }
2061 #endif
2062 fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2063 "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2064 total_overhead += pte_overhead;
2065
2066 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2067 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2068 SCALE (total_allocated), LABEL(total_allocated),
2069 SCALE (total_overhead), LABEL (total_overhead));
2070
2071 #ifdef GATHER_STATISTICS
2072 {
2073 unsigned long long all_overhead = 0, all_allocated = 0;
2074 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2075 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2076 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2077
2078 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2079
2080 for (zone = G.zones; zone; zone = zone->next_zone)
2081 {
2082 all_overhead += zone->stats.total_overhead;
2083 all_allocated += zone->stats.total_allocated;
2084
2085 all_allocated_under32 += zone->stats.total_allocated_under32;
2086 all_overhead_under32 += zone->stats.total_overhead_under32;
2087
2088 all_allocated_under64 += zone->stats.total_allocated_under64;
2089 all_overhead_under64 += zone->stats.total_overhead_under64;
2090
2091 all_allocated_under128 += zone->stats.total_allocated_under128;
2092 all_overhead_under128 += zone->stats.total_overhead_under128;
2093
2094 fprintf (stderr, "%20s: %10lld\n",
2095 zone->name, zone->stats.total_allocated);
2096 }
2097
2098 fprintf (stderr, "\n");
2099
2100 fprintf (stderr, "Total Overhead: %10lld\n",
2101 all_overhead);
2102 fprintf (stderr, "Total Allocated: %10lld\n",
2103 all_allocated);
2104
2105 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2106 all_overhead_under32);
2107 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2108 all_allocated_under32);
2109 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2110 all_overhead_under64);
2111 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2112 all_allocated_under64);
2113 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2114 all_overhead_under128);
2115 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2116 all_allocated_under128);
2117 }
2118 #endif
2119 }
2120
2121 /* Precompiled header support. */
2122
2123 /* For precompiled headers, we sort objects based on their type. We
2124 also sort various objects into their own buckets; currently this
2125 covers strings and IDENTIFIER_NODE trees. The choices of how
2126 to sort buckets have not yet been tuned. */
2127
2128 #define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2129
2130 #define OTHER_BUCKET (gt_types_enum_last + 0)
2131 #define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2132 #define STRING_BUCKET (gt_types_enum_last + 2)
2133
2134 struct ggc_pch_ondisk
2135 {
2136 size_t total;
2137 size_t type_totals[NUM_PCH_BUCKETS];
2138 };
2139
2140 struct ggc_pch_data
2141 {
2142 struct ggc_pch_ondisk d;
2143 size_t base;
2144 size_t orig_base;
2145 size_t alloc_size;
2146 alloc_type *alloc_bits;
2147 size_t type_bases[NUM_PCH_BUCKETS];
2148 size_t start_offset;
2149 };
2150
2151 /* Initialize the PCH data structure. */
2152
2153 struct ggc_pch_data *
2154 init_ggc_pch (void)
2155 {
2156 return xcalloc (sizeof (struct ggc_pch_data), 1);
2157 }
2158
2159 /* Return which of the page-aligned buckets the object at X, with type
2160 TYPE, should be sorted into in the PCH. Strings will have
2161 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2162 of unknown type will also have TYPE equal to gt_types_enum_last. */
2163
2164 static int
2165 pch_bucket (void *x, enum gt_types_enum type,
2166 bool is_string)
2167 {
2168 /* Sort identifiers into their own bucket, to improve locality
2169 when searching the identifier hash table. */
2170 if (type == gt_ggc_e_14lang_tree_node
2171 && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2172 return IDENTIFIER_BUCKET;
2173 else if (type == gt_types_enum_last)
2174 {
2175 if (is_string)
2176 return STRING_BUCKET;
2177 return OTHER_BUCKET;
2178 }
2179 return type;
2180 }
2181
2182 /* Add the size of object X to the size of the PCH data. */
2183
2184 void
2185 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2186 size_t size, bool is_string, enum gt_types_enum type)
2187 {
2188 /* NOTE: Right now we don't need to align up the size of any objects.
2189 Strings can be unaligned, and everything else is allocated to a
2190 MAX_ALIGNMENT boundary already. */
2191
2192 d->d.type_totals[pch_bucket (x, type, is_string)] += size;
2193 }
2194
2195 /* Return the total size of the PCH data. */
2196
2197 size_t
2198 ggc_pch_total_size (struct ggc_pch_data *d)
2199 {
2200 enum gt_types_enum i;
2201 size_t alloc_size, total_size;
2202
2203 total_size = 0;
2204 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2205 {
2206 d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2207 total_size += d->d.type_totals[i];
2208 }
2209 d->d.total = total_size;
2210
2211 /* Include the size of the allocation bitmap. */
2212 alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2213 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2214 d->alloc_size = alloc_size;
2215
2216 return d->d.total + alloc_size;
2217 }
2218
2219 /* Set the base address for the objects in the PCH file. */
2220
2221 void
2222 ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
2223 {
2224 int i;
2225 size_t base = (size_t) base_;
2226
2227 d->base = d->orig_base = base;
2228 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2229 {
2230 d->type_bases[i] = base;
2231 base += d->d.type_totals[i];
2232 }
2233
2234 if (d->alloc_bits == NULL)
2235 d->alloc_bits = xcalloc (1, d->alloc_size);
2236 }
2237
2238 /* Allocate a place for object X of size SIZE in the PCH file. */
2239
2240 char *
2241 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
2242 size_t size, bool is_string,
2243 enum gt_types_enum type)
2244 {
2245 size_t alloc_word, alloc_bit;
2246 char *result;
2247 int bucket = pch_bucket (x, type, is_string);
2248
2249 /* Record the start of the object in the allocation bitmap. We
2250 can't assert that the allocation bit is previously clear, because
2251 strings may violate the invariant that they are at least
2252 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2253 should not be called for strings. */
2254 alloc_word = ((d->type_bases[bucket] - d->orig_base)
2255 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2256 alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2257 / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2258 d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2259
2260 /* Place the object at the current pointer for this bucket. */
2261 result = (char *) d->type_bases[bucket];
2262 d->type_bases[bucket] += size;
2263 return result;
2264 }
2265
2266 /* Prepare to write out the PCH data to file F. */
2267
2268 void
2269 ggc_pch_prepare_write (struct ggc_pch_data *d,
2270 FILE *f)
2271 {
2272 /* We seek around a lot while writing. Record where the end
2273 of the padding in the PCH file is, so that we can
2274 locate each object's offset. */
2275 d->start_offset = ftell (f);
2276 }
2277
2278 /* Write out object X of SIZE to file F. */
2279
2280 void
2281 ggc_pch_write_object (struct ggc_pch_data *d,
2282 FILE *f, void *x, void *newx,
2283 size_t size, bool is_string ATTRIBUTE_UNUSED)
2284 {
2285 if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
2286 fatal_error ("can't seek PCH file: %m");
2287
2288 if (fwrite (x, size, 1, f) != 1)
2289 fatal_error ("can't write PCH file: %m");
2290 }
2291
2292 void
2293 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2294 {
2295 /* Write out the allocation bitmap. */
2296 if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
2297 fatal_error ("can't seek PCH file: %m");
2298
2299 if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
2300 fatal_error ("can't write PCH fle: %m");
2301
2302 /* Done with the PCH, so write out our footer. */
2303 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2304 fatal_error ("can't write PCH file: %m");
2305
2306 free (d->alloc_bits);
2307 free (d);
2308 }
2309
2310 /* The PCH file from F has been mapped at ADDR. Read in any
2311 additional data from the file and set up the GC state. */
2312
2313 void
2314 ggc_pch_read (FILE *f, void *addr)
2315 {
2316 struct ggc_pch_ondisk d;
2317 size_t alloc_size;
2318 struct alloc_zone *zone;
2319 struct page_entry *pch_page;
2320 char *p;
2321
2322 if (fread (&d, sizeof (d), 1, f) != 1)
2323 fatal_error ("can't read PCH file: %m");
2324
2325 alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2326 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2327
2328 pch_zone.bytes = d.total;
2329 pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2330 pch_zone.page = (char *) addr;
2331 pch_zone.end = (char *) pch_zone.alloc_bits;
2332
2333 /* We've just read in a PCH file. So, every object that used to be
2334 allocated is now free. */
2335 for (zone = G.zones; zone; zone = zone->next_zone)
2336 {
2337 struct small_page_entry *page, *next_page;
2338 struct large_page_entry *large_page, *next_large_page;
2339
2340 zone->allocated = 0;
2341
2342 /* Clear the zone's free chunk list. */
2343 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2344 zone->high_free_bin = 0;
2345 zone->cached_free = NULL;
2346 zone->cached_free_size = 0;
2347
2348 /* Move all the small pages onto the free list. */
2349 for (page = zone->pages; page != NULL; page = next_page)
2350 {
2351 next_page = page->next;
2352 memset (page->alloc_bits, 0,
2353 G.small_page_overhead - PAGE_OVERHEAD);
2354 free_small_page (page);
2355 }
2356
2357 /* Discard all the large pages. */
2358 for (large_page = zone->large_pages; large_page != NULL;
2359 large_page = next_large_page)
2360 {
2361 next_large_page = large_page->next;
2362 free_large_page (large_page);
2363 }
2364
2365 zone->pages = NULL;
2366 zone->large_pages = NULL;
2367 }
2368
2369 /* Allocate the dummy page entry for the PCH, and set all pages
2370 mapped into the PCH to reference it. */
2371 pch_page = xcalloc (1, sizeof (struct page_entry));
2372 pch_page->page = pch_zone.page;
2373 pch_page->pch_p = true;
2374
2375 for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2376 set_page_table_entry (p, pch_page);
2377 }