Give the anonymous namespace a null DECL_NAME.
[gcc.git] / gcc / ggc-zone.c
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin (dberlin@dberlin.org)
4
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "toplev.h"
31 #include "varray.h"
32 #include "flags.h"
33 #include "ggc.h"
34 #include "timevar.h"
35 #include "params.h"
36 #include "bitmap.h"
37
38 #ifdef ENABLE_VALGRIND_CHECKING
39 # ifdef HAVE_VALGRIND_MEMCHECK_H
40 # include <valgrind/memcheck.h>
41 # elif defined HAVE_MEMCHECK_H
42 # include <memcheck.h>
43 # else
44 # include <valgrind.h>
45 # endif
46 #else
47 /* Avoid #ifdef:s when we can help it. */
48 #define VALGRIND_DISCARD(x)
49 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
50 #define VALGRIND_FREELIKE_BLOCK(x,y)
51 #endif
52 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
53 file open. Prefer either to valloc. */
54 #ifdef HAVE_MMAP_ANON
55 # undef HAVE_MMAP_DEV_ZERO
56
57 # include <sys/mman.h>
58 # ifndef MAP_FAILED
59 # define MAP_FAILED -1
60 # endif
61 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
62 # define MAP_ANONYMOUS MAP_ANON
63 # endif
64 # define USING_MMAP
65
66 #endif
67
68 #ifdef HAVE_MMAP_DEV_ZERO
69
70 # include <sys/mman.h>
71 # ifndef MAP_FAILED
72 # define MAP_FAILED -1
73 # endif
74 # define USING_MMAP
75
76 #endif
77
78 #ifndef USING_MMAP
79 #define USING_MALLOC_PAGE_GROUPS
80 #endif
81
82 #if (GCC_VERSION < 3001)
83 #define prefetch(X) ((void) X)
84 #else
85 #define prefetch(X) __builtin_prefetch (X)
86 #endif
87
88 /* NOTES:
89 If we track inter-zone pointers, we can mark single zones at a
90 time.
91 If we have a zone where we guarantee no inter-zone pointers, we
92 could mark that zone seperately.
93 The garbage zone should not be marked, and we should return 1 in
94 ggc_set_mark for any object in the garbage zone, which cuts off
95 marking quickly. */
96 /* Stategy:
97
98 This garbage-collecting allocator segregates objects into zones.
99 It also segregates objects into "large" and "small" bins. Large
100 objects are greater or equal to page size.
101
102 Pages for small objects are broken up into chunks, each of which
103 are described by a struct alloc_chunk. One can walk over all
104 chunks on the page by adding the chunk size to the chunk's data
105 address. The free space for a page exists in the free chunk bins.
106
107 Each page-entry also has a context depth, which is used to track
108 pushing and popping of allocation contexts. Only objects allocated
109 in the current (highest-numbered) context may be collected.
110
111 Empty pages (of all sizes) are kept on a single page cache list,
112 and are considered first when new pages are required; they are
113 deallocated at the start of the next collection if they haven't
114 been recycled by then. */
115
116 /* Define GGC_DEBUG_LEVEL to print debugging information.
117 0: No debugging output.
118 1: GC statistics only.
119 2: Page-entry allocations/deallocations as well.
120 3: Object allocations as well.
121 4: Object marks as well. */
122 #define GGC_DEBUG_LEVEL (0)
123
124 #ifndef HOST_BITS_PER_PTR
125 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
126 #endif
127 #ifdef COOKIE_CHECKING
128 #define CHUNK_MAGIC 0x95321123
129 #define DEADCHUNK_MAGIC 0x12817317
130 #endif
131
132 /* This structure manages small chunks. When the chunk is free, it's
133 linked with other chunks via free_next. When the chunk is allocated,
134 the data starts at u. Large chunks are allocated one at a time to
135 their own page, and so don't come in here.
136
137 The "type" field is a placeholder for a future change to do
138 generational collection. At present it is 0 when free and
139 and 1 when allocated. */
140
141 struct alloc_chunk {
142 #ifdef COOKIE_CHECKING
143 unsigned int magic;
144 #endif
145 unsigned int type:1;
146 unsigned int typecode:15;
147 unsigned int size:15;
148 unsigned int mark:1;
149 union {
150 struct alloc_chunk *next_free;
151 char data[1];
152
153 /* Make sure the data is sufficiently aligned. */
154 HOST_WIDEST_INT align_i;
155 #ifdef HAVE_LONG_DOUBLE
156 long double align_d;
157 #else
158 double align_d;
159 #endif
160 } u;
161 } __attribute__ ((packed));
162
163 #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
164
165 /* We maintain several bins of free lists for chunks for very small
166 objects. We never exhaustively search other bins -- if we don't
167 find one of the proper size, we allocate from the "larger" bin. */
168
169 /* Decreasing the number of free bins increases the time it takes to allocate.
170 Similar with increasing max_free_bin_size without increasing num_free_bins.
171
172 After much histogramming of allocation sizes and time spent on gc,
173 on a powerpc G4 7450 - 667 mhz, and an pentium 4 - 2.8ghz,
174 these were determined to be the optimal values. */
175 #define NUM_FREE_BINS 64
176 #define MAX_FREE_BIN_SIZE 256
177 #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
178 #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
179 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
180
181 /* Marker used as chunk->size for a large object. Should correspond
182 to the size of the bitfield above. */
183 #define LARGE_OBJECT_SIZE 0x7fff
184
185 /* We use this structure to determine the alignment required for
186 allocations. For power-of-two sized allocations, that's not a
187 problem, but it does matter for odd-sized allocations. */
188
189 struct max_alignment {
190 char c;
191 union {
192 HOST_WIDEST_INT i;
193 #ifdef HAVE_LONG_DOUBLE
194 long double d;
195 #else
196 double d;
197 #endif
198 } u;
199 };
200
201 /* The biggest alignment required. */
202
203 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
204
205 /* Compute the smallest nonnegative number which when added to X gives
206 a multiple of F. */
207
208 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
209
210 /* Compute the smallest multiple of F that is >= X. */
211
212 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
213
214 /* A two-level tree is used to look up the page-entry for a given
215 pointer. Two chunks of the pointer's bits are extracted to index
216 the first and second levels of the tree, as follows:
217
218 HOST_PAGE_SIZE_BITS
219 32 | |
220 msb +----------------+----+------+------+ lsb
221 | | |
222 PAGE_L1_BITS |
223 | |
224 PAGE_L2_BITS
225
226 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
227 pages are aligned on system page boundaries. The next most
228 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
229 index values in the lookup table, respectively.
230
231 For 32-bit architectures and the settings below, there are no
232 leftover bits. For architectures with wider pointers, the lookup
233 tree points to a list of pages, which must be scanned to find the
234 correct one. */
235
236 #define PAGE_L1_BITS (8)
237 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
238 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
239 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
240
241 #define LOOKUP_L1(p) \
242 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
243
244 #define LOOKUP_L2(p) \
245 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
246
247 struct alloc_zone;
248 /* A page_entry records the status of an allocation page. */
249 typedef struct page_entry
250 {
251 /* The next page-entry with objects of the same size, or NULL if
252 this is the last page-entry. */
253 struct page_entry *next;
254
255 /* The number of bytes allocated. (This will always be a multiple
256 of the host system page size.) */
257 size_t bytes;
258
259 /* How many collections we've survived. */
260 size_t survived;
261
262 /* The address at which the memory is allocated. */
263 char *page;
264
265 #ifdef USING_MALLOC_PAGE_GROUPS
266 /* Back pointer to the page group this page came from. */
267 struct page_group *group;
268 #endif
269
270 /* Number of bytes on the page unallocated. Only used during
271 collection, and even then large pages merely set this non-zero. */
272 size_t bytes_free;
273
274 /* Context depth of this page. */
275 unsigned short context_depth;
276
277 /* Does this page contain small objects, or one large object? */
278 bool large_p;
279
280 /* The zone that this page entry belongs to. */
281 struct alloc_zone *zone;
282 } page_entry;
283
284 #ifdef USING_MALLOC_PAGE_GROUPS
285 /* A page_group describes a large allocation from malloc, from which
286 we parcel out aligned pages. */
287 typedef struct page_group
288 {
289 /* A linked list of all extant page groups. */
290 struct page_group *next;
291
292 /* The address we received from malloc. */
293 char *allocation;
294
295 /* The size of the block. */
296 size_t alloc_size;
297
298 /* A bitmask of pages in use. */
299 unsigned int in_use;
300 } page_group;
301 #endif
302
303 #if HOST_BITS_PER_PTR <= 32
304
305 /* On 32-bit hosts, we use a two level page table, as pictured above. */
306 typedef page_entry **page_table[PAGE_L1_SIZE];
307
308 #else
309
310 /* On 64-bit hosts, we use the same two level page tables plus a linked
311 list that disambiguates the top 32-bits. There will almost always be
312 exactly one entry in the list. */
313 typedef struct page_table_chain
314 {
315 struct page_table_chain *next;
316 size_t high_bits;
317 page_entry **table[PAGE_L1_SIZE];
318 } *page_table;
319
320 #endif
321
322 /* The global variables. */
323 static struct globals
324 {
325 /* The page lookup table. A single page can only belong to one
326 zone. This means free pages are zone-specific ATM. */
327 page_table lookup;
328 /* The linked list of zones. */
329 struct alloc_zone *zones;
330
331 /* The system's page size. */
332 size_t pagesize;
333 size_t lg_pagesize;
334
335 /* A file descriptor open to /dev/zero for reading. */
336 #if defined (HAVE_MMAP_DEV_ZERO)
337 int dev_zero_fd;
338 #endif
339
340 /* The file descriptor for debugging output. */
341 FILE *debug_file;
342 } G;
343
344 /* The zone allocation structure. */
345 struct alloc_zone
346 {
347 /* Name of the zone. */
348 const char *name;
349
350 /* Linked list of pages in a zone. */
351 page_entry *pages;
352
353 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
354 FREE_BIN_DELTA. All other chunks are in slot 0. */
355 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
356
357 /* Bytes currently allocated. */
358 size_t allocated;
359
360 /* Bytes currently allocated at the end of the last collection. */
361 size_t allocated_last_gc;
362
363 /* Total amount of memory mapped. */
364 size_t bytes_mapped;
365
366 /* Bit N set if any allocations have been done at context depth N. */
367 unsigned long context_depth_allocations;
368
369 /* Bit N set if any collections have been done at context depth N. */
370 unsigned long context_depth_collections;
371
372 /* The current depth in the context stack. */
373 unsigned short context_depth;
374
375 /* A cache of free system pages. */
376 page_entry *free_pages;
377
378 #ifdef USING_MALLOC_PAGE_GROUPS
379 page_group *page_groups;
380 #endif
381
382 /* Next zone in the linked list of zones. */
383 struct alloc_zone *next_zone;
384
385 /* Return true if this zone was collected during this collection. */
386 bool was_collected;
387 } main_zone;
388
389 struct alloc_zone *rtl_zone;
390 struct alloc_zone *garbage_zone;
391 struct alloc_zone *tree_zone;
392
393 /* Allocate pages in chunks of this size, to throttle calls to memory
394 allocation routines. The first page is used, the rest go onto the
395 free list. This cannot be larger than HOST_BITS_PER_INT for the
396 in_use bitmask for page_group. */
397 #define GGC_QUIRE_SIZE 16
398
399 static int ggc_allocated_p (const void *);
400 static page_entry *lookup_page_table_entry (const void *);
401 static void set_page_table_entry (void *, page_entry *);
402 #ifdef USING_MMAP
403 static char *alloc_anon (char *, size_t, struct alloc_zone *);
404 #endif
405 #ifdef USING_MALLOC_PAGE_GROUPS
406 static size_t page_group_index (char *, char *);
407 static void set_page_group_in_use (page_group *, char *);
408 static void clear_page_group_in_use (page_group *, char *);
409 #endif
410 static struct page_entry * alloc_small_page ( struct alloc_zone *);
411 static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
412 static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
413 static void free_page (struct page_entry *);
414 static void release_pages (struct alloc_zone *);
415 static void sweep_pages (struct alloc_zone *);
416 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short);
417 static bool ggc_collect_1 (struct alloc_zone *, bool);
418 static void check_cookies (void);
419
420
421 /* Returns nonzero if P was allocated in GC'able memory. */
422
423 static inline int
424 ggc_allocated_p (const void *p)
425 {
426 page_entry ***base;
427 size_t L1, L2;
428
429 #if HOST_BITS_PER_PTR <= 32
430 base = &G.lookup[0];
431 #else
432 page_table table = G.lookup;
433 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
434 while (1)
435 {
436 if (table == NULL)
437 return 0;
438 if (table->high_bits == high_bits)
439 break;
440 table = table->next;
441 }
442 base = &table->table[0];
443 #endif
444
445 /* Extract the level 1 and 2 indices. */
446 L1 = LOOKUP_L1 (p);
447 L2 = LOOKUP_L2 (p);
448
449 return base[L1] && base[L1][L2];
450 }
451
452 /* Traverse the page table and find the entry for a page.
453 Die (probably) if the object wasn't allocated via GC. */
454
455 static inline page_entry *
456 lookup_page_table_entry(const void *p)
457 {
458 page_entry ***base;
459 size_t L1, L2;
460
461 #if HOST_BITS_PER_PTR <= 32
462 base = &G.lookup[0];
463 #else
464 page_table table = G.lookup;
465 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
466 while (table->high_bits != high_bits)
467 table = table->next;
468 base = &table->table[0];
469 #endif
470
471 /* Extract the level 1 and 2 indices. */
472 L1 = LOOKUP_L1 (p);
473 L2 = LOOKUP_L2 (p);
474
475 return base[L1][L2];
476
477 }
478
479 /* Set the page table entry for a page. */
480
481 static void
482 set_page_table_entry(void *p, page_entry *entry)
483 {
484 page_entry ***base;
485 size_t L1, L2;
486
487 #if HOST_BITS_PER_PTR <= 32
488 base = &G.lookup[0];
489 #else
490 page_table table;
491 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
492 for (table = G.lookup; table; table = table->next)
493 if (table->high_bits == high_bits)
494 goto found;
495
496 /* Not found -- allocate a new table. */
497 table = (page_table) xcalloc (1, sizeof(*table));
498 table->next = G.lookup;
499 table->high_bits = high_bits;
500 G.lookup = table;
501 found:
502 base = &table->table[0];
503 #endif
504
505 /* Extract the level 1 and 2 indices. */
506 L1 = LOOKUP_L1 (p);
507 L2 = LOOKUP_L2 (p);
508
509 if (base[L1] == NULL)
510 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
511
512 base[L1][L2] = entry;
513 }
514
515 #ifdef USING_MMAP
516 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
517 (if non-null). The ifdef structure here is intended to cause a
518 compile error unless exactly one of the HAVE_* is defined. */
519
520 static inline char *
521 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
522 {
523 #ifdef HAVE_MMAP_ANON
524 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
525 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
526 #endif
527 #ifdef HAVE_MMAP_DEV_ZERO
528 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
529 MAP_PRIVATE, G.dev_zero_fd, 0);
530 #endif
531 VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0);
532
533 if (page == (char *) MAP_FAILED)
534 {
535 perror ("virtual memory exhausted");
536 exit (FATAL_EXIT_CODE);
537 }
538
539 /* Remember that we allocated this memory. */
540 zone->bytes_mapped += size;
541 /* Pretend we don't have access to the allocated pages. We'll enable
542 access to smaller pieces of the area in ggc_alloc. Discard the
543 handle to avoid handle leak. */
544 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
545 return page;
546 }
547 #endif
548 #ifdef USING_MALLOC_PAGE_GROUPS
549 /* Compute the index for this page into the page group. */
550
551 static inline size_t
552 page_group_index (char *allocation, char *page)
553 {
554 return (size_t) (page - allocation) >> G.lg_pagesize;
555 }
556
557 /* Set and clear the in_use bit for this page in the page group. */
558
559 static inline void
560 set_page_group_in_use (page_group *group, char *page)
561 {
562 group->in_use |= 1 << page_group_index (group->allocation, page);
563 }
564
565 static inline void
566 clear_page_group_in_use (page_group *group, char *page)
567 {
568 group->in_use &= ~(1 << page_group_index (group->allocation, page));
569 }
570 #endif
571
572 /* Allocate a new page for allocating objects of size 2^ORDER,
573 and return an entry for it. The entry is not added to the
574 appropriate page_table list. */
575
576 static inline struct page_entry *
577 alloc_small_page (struct alloc_zone *zone)
578 {
579 struct page_entry *entry;
580 char *page;
581 #ifdef USING_MALLOC_PAGE_GROUPS
582 page_group *group;
583 #endif
584
585 page = NULL;
586
587 /* Check the list of free pages for one we can use. */
588 entry = zone->free_pages;
589 if (entry != NULL)
590 {
591 /* Recycle the allocated memory from this page ... */
592 zone->free_pages = entry->next;
593 page = entry->page;
594
595 #ifdef USING_MALLOC_PAGE_GROUPS
596 group = entry->group;
597 #endif
598 }
599 #ifdef USING_MMAP
600 else
601 {
602 /* We want just one page. Allocate a bunch of them and put the
603 extras on the freelist. (Can only do this optimization with
604 mmap for backing store.) */
605 struct page_entry *e, *f = zone->free_pages;
606 int i;
607
608 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone);
609
610 /* This loop counts down so that the chain will be in ascending
611 memory order. */
612 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
613 {
614 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
615 e->bytes = G.pagesize;
616 e->page = page + (i << G.lg_pagesize);
617 e->next = f;
618 f = e;
619 }
620
621 zone->free_pages = f;
622 }
623 #endif
624 #ifdef USING_MALLOC_PAGE_GROUPS
625 else
626 {
627 /* Allocate a large block of memory and serve out the aligned
628 pages therein. This results in much less memory wastage
629 than the traditional implementation of valloc. */
630
631 char *allocation, *a, *enda;
632 size_t alloc_size, head_slop, tail_slop;
633 int multiple_pages = (entry_size == G.pagesize);
634
635 if (multiple_pages)
636 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
637 else
638 alloc_size = entry_size + G.pagesize - 1;
639 allocation = xmalloc (alloc_size);
640 VALGRIND_MALLOCLIKE_BLOCK(addr, alloc_size, 0, 0);
641
642 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
643 head_slop = page - allocation;
644 if (multiple_pages)
645 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
646 else
647 tail_slop = alloc_size - entry_size - head_slop;
648 enda = allocation + alloc_size - tail_slop;
649
650 /* We allocated N pages, which are likely not aligned, leaving
651 us with N-1 usable pages. We plan to place the page_group
652 structure somewhere in the slop. */
653 if (head_slop >= sizeof (page_group))
654 group = (page_group *)page - 1;
655 else
656 {
657 /* We magically got an aligned allocation. Too bad, we have
658 to waste a page anyway. */
659 if (tail_slop == 0)
660 {
661 enda -= G.pagesize;
662 tail_slop += G.pagesize;
663 }
664 if (tail_slop < sizeof (page_group))
665 abort ();
666 group = (page_group *)enda;
667 tail_slop -= sizeof (page_group);
668 }
669
670 /* Remember that we allocated this memory. */
671 group->next = G.page_groups;
672 group->allocation = allocation;
673 group->alloc_size = alloc_size;
674 group->in_use = 0;
675 zone->page_groups = group;
676 G.bytes_mapped += alloc_size;
677
678 /* If we allocated multiple pages, put the rest on the free list. */
679 if (multiple_pages)
680 {
681 struct page_entry *e, *f = G.free_pages;
682 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
683 {
684 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
685 e->bytes = G.pagesize;
686 e->page = a;
687 e->group = group;
688 e->next = f;
689 f = e;
690 }
691 zone->free_pages = f;
692 }
693 }
694 #endif
695
696 if (entry == NULL)
697 entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
698
699 entry->next = 0;
700 entry->bytes = G.pagesize;
701 entry->bytes_free = G.pagesize;
702 entry->page = page;
703 entry->context_depth = zone->context_depth;
704 entry->large_p = false;
705 entry->zone = zone;
706 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
707
708 #ifdef USING_MALLOC_PAGE_GROUPS
709 entry->group = group;
710 set_page_group_in_use (group, page);
711 #endif
712
713 set_page_table_entry (page, entry);
714
715 if (GGC_DEBUG_LEVEL >= 2)
716 fprintf (G.debug_file,
717 "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
718 (PTR) entry, page, page + G.pagesize - 1);
719
720 return entry;
721 }
722
723 /* Allocate a large page of size SIZE in ZONE. */
724
725 static inline struct page_entry *
726 alloc_large_page (size_t size, struct alloc_zone *zone)
727 {
728 struct page_entry *entry;
729 char *page;
730
731 page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
732 entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
733
734 entry->next = 0;
735 entry->bytes = size;
736 entry->bytes_free = LARGE_OBJECT_SIZE + CHUNK_OVERHEAD;
737 entry->page = page;
738 entry->context_depth = zone->context_depth;
739 entry->large_p = true;
740 entry->zone = zone;
741 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
742
743 #ifdef USING_MALLOC_PAGE_GROUPS
744 entry->group = NULL;
745 #endif
746 set_page_table_entry (page, entry);
747
748 if (GGC_DEBUG_LEVEL >= 2)
749 fprintf (G.debug_file,
750 "Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
751 (PTR) entry, page, page + size - 1);
752
753 return entry;
754 }
755
756
757 /* For a page that is no longer needed, put it on the free page list. */
758
759 static inline void
760 free_page (page_entry *entry)
761 {
762 if (GGC_DEBUG_LEVEL >= 2)
763 fprintf (G.debug_file,
764 "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
765 entry->page, entry->page + entry->bytes - 1);
766
767 set_page_table_entry (entry->page, NULL);
768
769 if (entry->large_p)
770 {
771 free (entry->page);
772 VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
773 }
774 else
775 {
776 /* Mark the page as inaccessible. Discard the handle to
777 avoid handle leak. */
778 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
779
780 #ifdef USING_MALLOC_PAGE_GROUPS
781 clear_page_group_in_use (entry->group, entry->page);
782 #endif
783
784 entry->next = entry->zone->free_pages;
785 entry->zone->free_pages = entry;
786 }
787 }
788
789 /* Release the free page cache to the system. */
790
791 static void
792 release_pages (struct alloc_zone *zone)
793 {
794 #ifdef USING_MMAP
795 page_entry *p, *next;
796 char *start;
797 size_t len;
798
799 /* Gather up adjacent pages so they are unmapped together. */
800 p = zone->free_pages;
801
802 while (p)
803 {
804 start = p->page;
805 next = p->next;
806 len = p->bytes;
807 free (p);
808 p = next;
809
810 while (p && p->page == start + len)
811 {
812 next = p->next;
813 len += p->bytes;
814 free (p);
815 p = next;
816 }
817
818 munmap (start, len);
819 zone->bytes_mapped -= len;
820 }
821
822 zone->free_pages = NULL;
823 #endif
824 #ifdef USING_MALLOC_PAGE_GROUPS
825 page_entry **pp, *p;
826 page_group **gp, *g;
827
828 /* Remove all pages from free page groups from the list. */
829 pp = &(zone->free_pages);
830 while ((p = *pp) != NULL)
831 if (p->group->in_use == 0)
832 {
833 *pp = p->next;
834 free (p);
835 }
836 else
837 pp = &p->next;
838
839 /* Remove all free page groups, and release the storage. */
840 gp = &(zone->page_groups);
841 while ((g = *gp) != NULL)
842 if (g->in_use == 0)
843 {
844 *gp = g->next;
845 zone->bytes_mapped -= g->alloc_size;
846 free (g->allocation);
847 VALGRIND_FREELIKE_BLOCK(g->allocation, 0);
848 }
849 else
850 gp = &g->next;
851 #endif
852 }
853
854 /* Place CHUNK of size SIZE on the free list for ZONE. */
855
856 static inline void
857 free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone)
858 {
859 size_t bin = 0;
860
861 bin = SIZE_BIN_DOWN (size);
862 if (bin == 0)
863 abort ();
864 if (bin > NUM_FREE_BINS)
865 bin = 0;
866 #ifdef COOKIE_CHECKING
867 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
868 abort ();
869 chunk->magic = DEADCHUNK_MAGIC;
870 #endif
871 chunk->u.next_free = zone->free_chunks[bin];
872 zone->free_chunks[bin] = chunk;
873 if (GGC_DEBUG_LEVEL >= 3)
874 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
875 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
876 }
877
878 /* Allocate a chunk of memory of SIZE bytes. */
879
880 static void *
881 ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type)
882 {
883 size_t bin = 0;
884 size_t lsize = 0;
885 struct page_entry *entry;
886 struct alloc_chunk *chunk, *lchunk, **pp;
887 void *result;
888
889 /* Align size, so that we're assured of aligned allocations. */
890 if (size < FREE_BIN_DELTA)
891 size = FREE_BIN_DELTA;
892 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
893
894 /* Large objects are handled specially. */
895 if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
896 {
897 entry = alloc_large_page (size, zone);
898 entry->survived = 0;
899 entry->next = entry->zone->pages;
900 entry->zone->pages = entry;
901
902
903 chunk = (struct alloc_chunk *) entry->page;
904 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
905 chunk->size = LARGE_OBJECT_SIZE;
906
907 goto found;
908 }
909
910 /* First look for a tiny object already segregated into its own
911 size bucket. */
912 bin = SIZE_BIN_UP (size);
913 if (bin <= NUM_FREE_BINS)
914 {
915 chunk = zone->free_chunks[bin];
916 if (chunk)
917 {
918 zone->free_chunks[bin] = chunk->u.next_free;
919 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
920 goto found;
921 }
922 }
923
924 /* Failing that, look through the "other" bucket for a chunk
925 that is large enough. */
926 pp = &(zone->free_chunks[0]);
927 chunk = *pp;
928 while (chunk && chunk->size < size)
929 {
930 pp = &chunk->u.next_free;
931 chunk = *pp;
932 }
933
934 /* Failing that, allocate new storage. */
935 if (!chunk)
936 {
937 entry = alloc_small_page (zone);
938 entry->next = entry->zone->pages;
939 entry->zone->pages = entry;
940
941 chunk = (struct alloc_chunk *) entry->page;
942 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
943 chunk->size = G.pagesize - CHUNK_OVERHEAD;
944 }
945 else
946 {
947 *pp = chunk->u.next_free;
948 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
949 }
950 /* Release extra memory from a chunk that's too big. */
951 lsize = chunk->size - size;
952 if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA)
953 {
954 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
955 chunk->size = size;
956
957 lsize -= CHUNK_OVERHEAD;
958 lchunk = (struct alloc_chunk *)(chunk->u.data + size);
959 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk)));
960 #ifdef COOKIE_CHECKING
961 lchunk->magic = CHUNK_MAGIC;
962 #endif
963 lchunk->type = 0;
964 lchunk->mark = 0;
965 lchunk->size = lsize;
966 free_chunk (lchunk, lsize, zone);
967 }
968 /* Calculate the object's address. */
969 found:
970 #ifdef COOKIE_CHECKING
971 chunk->magic = CHUNK_MAGIC;
972 #endif
973 chunk->type = 1;
974 chunk->mark = 0;
975 chunk->typecode = type;
976 result = chunk->u.data;
977
978 #ifdef ENABLE_GC_CHECKING
979 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
980 exact same semantics in presence of memory bugs, regardless of
981 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
982 handle to avoid handle leak. */
983 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
984
985 /* `Poison' the entire allocated object. */
986 memset (result, 0xaf, size);
987 #endif
988
989 /* Tell Valgrind that the memory is there, but its content isn't
990 defined. The bytes at the end of the object are still marked
991 unaccessible. */
992 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
993
994 /* Keep track of how many bytes are being allocated. This
995 information is used in deciding when to collect. */
996 zone->allocated += size + CHUNK_OVERHEAD;
997
998 if (GGC_DEBUG_LEVEL >= 3)
999 fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
1000 (void *)chunk, (unsigned long) size, result);
1001
1002 return result;
1003 }
1004
1005 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1006 for that type. */
1007
1008 void *
1009 ggc_alloc_typed (enum gt_types_enum gte, size_t size)
1010 {
1011 switch (gte)
1012 {
1013 case gt_ggc_e_14lang_tree_node:
1014 return ggc_alloc_zone_1 (size, tree_zone, gte);
1015
1016 case gt_ggc_e_7rtx_def:
1017 return ggc_alloc_zone_1 (size, rtl_zone, gte);
1018
1019 case gt_ggc_e_9rtvec_def:
1020 return ggc_alloc_zone_1 (size, rtl_zone, gte);
1021
1022 default:
1023 return ggc_alloc_zone_1 (size, &main_zone, gte);
1024 }
1025 }
1026
1027 /* Normal ggc_alloc simply allocates into the main zone. */
1028
1029 void *
1030 ggc_alloc (size_t size)
1031 {
1032 return ggc_alloc_zone_1 (size, &main_zone, -1);
1033 }
1034
1035 /* Zone allocation allocates into the specified zone. */
1036
1037 void *
1038 ggc_alloc_zone (size_t size, struct alloc_zone *zone)
1039 {
1040 return ggc_alloc_zone_1 (size, zone, -1);
1041 }
1042
1043 /* If P is not marked, mark it and return false. Otherwise return true.
1044 P must have been allocated by the GC allocator; it mustn't point to
1045 static objects, stack variables, or memory allocated with malloc. */
1046
1047 int
1048 ggc_set_mark (const void *p)
1049 {
1050 page_entry *entry;
1051 struct alloc_chunk *chunk;
1052
1053 #ifdef ENABLE_CHECKING
1054 /* Look up the page on which the object is alloced. If the object
1055 wasn't allocated by the collector, we'll probably die. */
1056 entry = lookup_page_table_entry (p);
1057 if (entry == NULL)
1058 abort ();
1059 #endif
1060 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1061 #ifdef COOKIE_CHECKING
1062 if (chunk->magic != CHUNK_MAGIC)
1063 abort ();
1064 #endif
1065 if (chunk->mark)
1066 return 1;
1067 chunk->mark = 1;
1068
1069 #ifndef ENABLE_CHECKING
1070 entry = lookup_page_table_entry (p);
1071 #endif
1072
1073 /* Large pages are either completely full or completely empty. So if
1074 they are marked, they are completely full. */
1075 if (entry->large_p)
1076 entry->bytes_free = 0;
1077 else
1078 entry->bytes_free -= chunk->size + CHUNK_OVERHEAD;
1079
1080 if (GGC_DEBUG_LEVEL >= 4)
1081 fprintf (G.debug_file, "Marking %p\n", p);
1082
1083 return 0;
1084 }
1085
1086 /* Return 1 if P has been marked, zero otherwise.
1087 P must have been allocated by the GC allocator; it mustn't point to
1088 static objects, stack variables, or memory allocated with malloc. */
1089
1090 int
1091 ggc_marked_p (const void *p)
1092 {
1093 struct alloc_chunk *chunk;
1094
1095 #ifdef ENABLE_CHECKING
1096 {
1097 page_entry *entry = lookup_page_table_entry (p);
1098 if (entry == NULL)
1099 abort ();
1100 }
1101 #endif
1102
1103 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1104 #ifdef COOKIE_CHECKING
1105 if (chunk->magic != CHUNK_MAGIC)
1106 abort ();
1107 #endif
1108 return chunk->mark;
1109 }
1110
1111 /* Return the size of the gc-able object P. */
1112
1113 size_t
1114 ggc_get_size (const void *p)
1115 {
1116 struct alloc_chunk *chunk;
1117 struct page_entry *entry;
1118
1119 #ifdef ENABLE_CHECKING
1120 entry = lookup_page_table_entry (p);
1121 if (entry == NULL)
1122 abort ();
1123 #endif
1124
1125 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1126 #ifdef COOKIE_CHECKING
1127 if (chunk->magic != CHUNK_MAGIC)
1128 abort ();
1129 #endif
1130 if (chunk->size == LARGE_OBJECT_SIZE)
1131 {
1132 #ifndef ENABLE_CHECKING
1133 entry = lookup_page_table_entry (p);
1134 #endif
1135 return entry->bytes;
1136 }
1137
1138 return chunk->size;
1139 }
1140
1141 /* Initialize the ggc-zone-mmap allocator. */
1142 void
1143 init_ggc (void)
1144 {
1145 /* Set up the main zone by hand. */
1146 main_zone.name = "Main zone";
1147 G.zones = &main_zone;
1148
1149 /* Allocate the default zones. */
1150 rtl_zone = new_ggc_zone ("RTL zone");
1151 tree_zone = new_ggc_zone ("Tree zone");
1152 garbage_zone = new_ggc_zone ("Garbage zone");
1153
1154 G.pagesize = getpagesize();
1155 G.lg_pagesize = exact_log2 (G.pagesize);
1156 #ifdef HAVE_MMAP_DEV_ZERO
1157 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1158 if (G.dev_zero_fd == -1)
1159 abort ();
1160 #endif
1161
1162 #if 0
1163 G.debug_file = fopen ("ggc-mmap.debug", "w");
1164 setlinebuf (G.debug_file);
1165 #else
1166 G.debug_file = stdout;
1167 #endif
1168
1169 #ifdef USING_MMAP
1170 /* StunOS has an amazing off-by-one error for the first mmap allocation
1171 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1172 believe, is an unaligned page allocation, which would cause us to
1173 hork badly if we tried to use it. */
1174 {
1175 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1176 struct page_entry *e;
1177 if ((size_t)p & (G.pagesize - 1))
1178 {
1179 /* How losing. Discard this one and try another. If we still
1180 can't get something useful, give up. */
1181
1182 p = alloc_anon (NULL, G.pagesize, &main_zone);
1183 if ((size_t)p & (G.pagesize - 1))
1184 abort ();
1185 }
1186
1187 /* We have a good page, might as well hold onto it... */
1188 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
1189 e->bytes = G.pagesize;
1190 e->page = p;
1191 e->next = main_zone.free_pages;
1192 main_zone.free_pages = e;
1193 }
1194 #endif
1195 }
1196
1197 /* Start a new GGC zone. */
1198
1199 struct alloc_zone *
1200 new_ggc_zone (const char * name)
1201 {
1202 struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
1203 new_zone->name = name;
1204 new_zone->next_zone = G.zones->next_zone;
1205 G.zones->next_zone = new_zone;
1206 return new_zone;
1207 }
1208
1209 /* Destroy a GGC zone. */
1210 void
1211 destroy_ggc_zone (struct alloc_zone * dead_zone)
1212 {
1213 struct alloc_zone *z;
1214
1215 for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
1216 /* Just find that zone. */ ;
1217
1218 /* We should have found the zone in the list. Anything else is
1219 fatal.
1220 If we did find the zone, we expect this zone to be empty.
1221 A ggc_collect should have emptied it before we can destroy it. */
1222 if (!z || dead_zone->allocated != 0)
1223 abort ();
1224
1225 /* Unchain the dead zone, release all its pages and free it. */
1226 z->next_zone = z->next_zone->next_zone;
1227 release_pages (dead_zone);
1228 free (dead_zone);
1229 }
1230
1231 /* Increment the `GC context'. Objects allocated in an outer context
1232 are never freed, eliminating the need to register their roots. */
1233
1234 void
1235 ggc_push_context (void)
1236 {
1237 struct alloc_zone *zone;
1238 for (zone = G.zones; zone; zone = zone->next_zone)
1239 ++(zone->context_depth);
1240 /* Die on wrap. */
1241 if (main_zone.context_depth >= HOST_BITS_PER_LONG)
1242 abort ();
1243 }
1244
1245 /* Decrement the `GC context'. All objects allocated since the
1246 previous ggc_push_context are migrated to the outer context. */
1247
1248 static void
1249 ggc_pop_context_1 (struct alloc_zone *zone)
1250 {
1251 unsigned long omask;
1252 unsigned depth;
1253 page_entry *p;
1254
1255 depth = --(zone->context_depth);
1256 omask = (unsigned long)1 << (depth + 1);
1257
1258 if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask))
1259 return;
1260
1261 zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1;
1262 zone->context_depth_allocations &= omask - 1;
1263 zone->context_depth_collections &= omask - 1;
1264
1265 /* Any remaining pages in the popped context are lowered to the new
1266 current context; i.e. objects allocated in the popped context and
1267 left over are imported into the previous context. */
1268 for (p = zone->pages; p != NULL; p = p->next)
1269 if (p->context_depth > depth)
1270 p->context_depth = depth;
1271 }
1272
1273 /* Pop all the zone contexts. */
1274
1275 void
1276 ggc_pop_context (void)
1277 {
1278 struct alloc_zone *zone;
1279 for (zone = G.zones; zone; zone = zone->next_zone)
1280 ggc_pop_context_1 (zone);
1281 }
1282
1283
1284 /* Poison the chunk. */
1285 #ifdef ENABLE_GC_CHECKING
1286 #define poison_chunk(CHUNK, SIZE) \
1287 memset ((CHUNK)->u.data, 0xa5, (SIZE))
1288 #else
1289 #define poison_chunk(CHUNK, SIZE)
1290 #endif
1291
1292 /* Free all empty pages and objects within a page for a given zone */
1293
1294 static void
1295 sweep_pages (struct alloc_zone *zone)
1296 {
1297 page_entry **pp, *p, *next;
1298 struct alloc_chunk *chunk, *last_free, *end;
1299 size_t last_free_size, allocated = 0;
1300
1301 /* First, reset the free_chunks lists, since we are going to
1302 re-free free chunks in hopes of coalescing them into large chunks. */
1303 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1304 pp = &zone->pages;
1305 for (p = zone->pages; p ; p = next)
1306 {
1307 next = p->next;
1308
1309 /* For empty pages, just free the page. */
1310 if (p->bytes_free == G.pagesize && p->context_depth == zone->context_depth)
1311 {
1312 *pp = next;
1313 #ifdef ENABLE_GC_CHECKING
1314 /* Poison the page. */
1315 memset (p->page, 0xb5, p->bytes);
1316 #endif
1317 free_page (p);
1318 continue;
1319 }
1320
1321 /* Large pages are all or none affairs. Either they are
1322 completely empty, or they are completely full.
1323 Thus, if the above didn't catch it, we need not do anything
1324 except remove the mark and reset the bytes_free.
1325
1326 XXX: Should we bother to increment allocated. */
1327 else if (p->large_p)
1328 {
1329 p->bytes_free = p->bytes;
1330 ((struct alloc_chunk *)p->page)->mark = 0;
1331 continue;
1332 }
1333 pp = &p->next;
1334
1335 /* This page has now survived another collection. */
1336 p->survived++;
1337
1338 /* Which leaves full and partial pages. Step through all chunks,
1339 consolidate those that are free and insert them into the free
1340 lists. Note that consolidation slows down collection
1341 slightly. */
1342
1343 chunk = (struct alloc_chunk *)p->page;
1344 end = (struct alloc_chunk *)(p->page + G.pagesize);
1345 last_free = NULL;
1346 last_free_size = 0;
1347
1348 do
1349 {
1350 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1351 if (chunk->mark || p->context_depth < zone->context_depth)
1352 {
1353 if (last_free)
1354 {
1355 last_free->type = 0;
1356 last_free->size = last_free_size;
1357 last_free->mark = 0;
1358 poison_chunk (last_free, last_free_size);
1359 free_chunk (last_free, last_free_size, zone);
1360 last_free = NULL;
1361 }
1362 if (chunk->mark)
1363 {
1364 allocated += chunk->size + CHUNK_OVERHEAD;
1365 p->bytes_free += chunk->size + CHUNK_OVERHEAD;
1366 }
1367 chunk->mark = 0;
1368 #ifdef ENABLE_CHECKING
1369 if (p->bytes_free > p->bytes)
1370 abort ();
1371 #endif
1372 }
1373 else
1374 {
1375 if (last_free)
1376 {
1377 last_free_size += CHUNK_OVERHEAD + chunk->size;
1378 }
1379 else
1380 {
1381 last_free = chunk;
1382 last_free_size = chunk->size;
1383 }
1384 }
1385
1386 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1387 }
1388 while (chunk < end);
1389
1390 if (last_free)
1391 {
1392 last_free->type = 0;
1393 last_free->size = last_free_size;
1394 last_free->mark = 0;
1395 poison_chunk (last_free, last_free_size);
1396 free_chunk (last_free, last_free_size, zone);
1397 }
1398 }
1399
1400 zone->allocated = allocated;
1401 }
1402
1403 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1404 is true if we need to mark before sweeping, false if some other
1405 zone collection has already performed marking for us. Returns true
1406 if we collected, false otherwise. */
1407
1408 static bool
1409 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1410 {
1411 /* Avoid frequent unnecessary work by skipping collection if the
1412 total allocations haven't expanded much since the last
1413 collection. */
1414 float allocated_last_gc =
1415 MAX (zone->allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1416
1417 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1418
1419 if (zone->allocated < allocated_last_gc + min_expand)
1420 return false;
1421
1422 if (!quiet_flag)
1423 fprintf (stderr, " {%s GC %luk -> ", zone->name, (unsigned long) zone->allocated / 1024);
1424
1425 /* Zero the total allocated bytes. This will be recalculated in the
1426 sweep phase. */
1427 zone->allocated = 0;
1428
1429 /* Release the pages we freed the last time we collected, but didn't
1430 reuse in the interim. */
1431 release_pages (zone);
1432
1433 /* Indicate that we've seen collections at this context depth. */
1434 zone->context_depth_collections
1435 = ((unsigned long)1 << (zone->context_depth + 1)) - 1;
1436 if (need_marking)
1437 ggc_mark_roots ();
1438 sweep_pages (zone);
1439 zone->was_collected = true;
1440 zone->allocated_last_gc = zone->allocated;
1441
1442
1443 if (!quiet_flag)
1444 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1445 return true;
1446 }
1447
1448 /* Calculate the average page survival rate in terms of number of
1449 collections. */
1450
1451 static float
1452 calculate_average_page_survival (struct alloc_zone *zone)
1453 {
1454 float count = 0.0;
1455 float survival = 0.0;
1456 page_entry *p;
1457 for (p = zone->pages; p; p = p->next)
1458 {
1459 count += 1.0;
1460 survival += p->survived;
1461 }
1462 return survival/count;
1463 }
1464
1465 /* Check the magic cookies all of the chunks contain, to make sure we
1466 aren't doing anything stupid, like stomping on alloc_chunk
1467 structures. */
1468
1469 static inline void
1470 check_cookies (void)
1471 {
1472 #ifdef COOKIE_CHECKING
1473 page_entry *p;
1474 struct alloc_zone *zone;
1475
1476 for (zone = G.zones; zone; zone = zone->next_zone)
1477 {
1478 for (p = zone->pages; p; p = p->next)
1479 {
1480 if (!p->large_p)
1481 {
1482 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1483 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1484 do
1485 {
1486 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
1487 abort ();
1488 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1489 }
1490 while (chunk < end);
1491 }
1492 }
1493 }
1494 #endif
1495 }
1496
1497
1498 /* Top level collection routine. */
1499
1500 void
1501 ggc_collect (void)
1502 {
1503 struct alloc_zone *zone;
1504 bool marked = false;
1505 float f;
1506
1507 timevar_push (TV_GC);
1508 check_cookies ();
1509 /* Start by possibly collecting the main zone. */
1510 main_zone.was_collected = false;
1511 marked |= ggc_collect_1 (&main_zone, true);
1512
1513 /* In order to keep the number of collections down, we don't
1514 collect other zones unless we are collecting the main zone. This
1515 gives us roughly the same number of collections as we used to
1516 have with the old gc. The number of collection is important
1517 because our main slowdown (according to profiling) is now in
1518 marking. So if we mark twice as often as we used to, we'll be
1519 twice as slow. Hopefully we'll avoid this cost when we mark
1520 zone-at-a-time. */
1521
1522 if (main_zone.was_collected)
1523 {
1524 struct alloc_zone *zone;
1525
1526 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1527 {
1528 check_cookies ();
1529 zone->was_collected = false;
1530 marked |= ggc_collect_1 (zone, !marked);
1531 }
1532 }
1533
1534 /* Print page survival stats, if someone wants them. */
1535 if (GGC_DEBUG_LEVEL >= 2)
1536 {
1537 for (zone = G.zones; zone; zone = zone->next_zone)
1538 {
1539 if (zone->was_collected)
1540 {
1541 f = calculate_average_page_survival (zone);
1542 printf ("Average page survival in zone `%s' is %f\n",
1543 zone->name, f);
1544 }
1545 }
1546 }
1547
1548 /* Since we don't mark zone at a time right now, marking in any
1549 zone means marking in every zone. So we have to clear all the
1550 marks in all the zones that weren't collected already. */
1551 if (marked)
1552 {
1553 page_entry *p;
1554 for (zone = G.zones; zone; zone = zone->next_zone)
1555 {
1556 if (zone->was_collected)
1557 continue;
1558 for (p = zone->pages; p; p = p->next)
1559 {
1560 if (!p->large_p)
1561 {
1562 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1563 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1564 do
1565 {
1566 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1567 if (chunk->mark || p->context_depth < zone->context_depth)
1568 {
1569 if (chunk->mark)
1570 p->bytes_free += chunk->size + CHUNK_OVERHEAD;
1571 #ifdef ENABLE_CHECKING
1572 if (p->bytes_free > p->bytes)
1573 abort ();
1574 #endif
1575 chunk->mark = 0;
1576 }
1577 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1578 }
1579 while (chunk < end);
1580 }
1581 else
1582 {
1583 p->bytes_free = p->bytes;
1584 ((struct alloc_chunk *)p->page)->mark = 0;
1585 }
1586 }
1587 }
1588 }
1589 timevar_pop (TV_GC);
1590 }
1591
1592 /* Print allocation statistics. */
1593
1594 void
1595 ggc_print_statistics (void)
1596 {
1597 }
1598
1599 struct ggc_pch_data
1600 {
1601 struct ggc_pch_ondisk
1602 {
1603 unsigned total;
1604 } d;
1605 size_t base;
1606 size_t written;
1607 };
1608
1609 /* Initialize the PCH datastructure. */
1610
1611 struct ggc_pch_data *
1612 init_ggc_pch (void)
1613 {
1614 return xcalloc (sizeof (struct ggc_pch_data), 1);
1615 }
1616
1617 /* Add the size of object X to the size of the PCH data. */
1618
1619 void
1620 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1621 size_t size, bool is_string)
1622 {
1623 if (!is_string)
1624 {
1625 d->d.total += size + CHUNK_OVERHEAD;
1626 }
1627 else
1628 d->d.total += size;
1629 }
1630
1631 /* Return the total size of the PCH data. */
1632
1633 size_t
1634 ggc_pch_total_size (struct ggc_pch_data *d)
1635 {
1636 return d->d.total;
1637 }
1638
1639 /* Set the base address for the objects in the PCH file. */
1640
1641 void
1642 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1643 {
1644 d->base = (size_t) base;
1645 }
1646
1647 /* Allocate a place for object X of size SIZE in the PCH file. */
1648
1649 char *
1650 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
1651 size_t size, bool is_string)
1652 {
1653 char *result;
1654 result = (char *)d->base;
1655 if (!is_string)
1656 {
1657 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1658 if (chunk->size == LARGE_OBJECT_SIZE)
1659 d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
1660 else
1661 d->base += chunk->size + CHUNK_OVERHEAD;
1662 return result + CHUNK_OVERHEAD;
1663 }
1664 else
1665 {
1666 d->base += size;
1667 return result;
1668 }
1669
1670 }
1671
1672 /* Prepare to write out the PCH data to file F. */
1673
1674 void
1675 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1676 FILE *f ATTRIBUTE_UNUSED)
1677 {
1678 /* Nothing to do. */
1679 }
1680
1681 /* Write out object X of SIZE to file F. */
1682
1683 void
1684 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1685 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1686 size_t size, bool is_string)
1687 {
1688 if (!is_string)
1689 {
1690 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1691 size = ggc_get_size (x);
1692 if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
1693 fatal_error ("can't write PCH file: %m");
1694 d->written += size + CHUNK_OVERHEAD;
1695 }
1696 else
1697 {
1698 if (fwrite (x, size, 1, f) != 1)
1699 fatal_error ("can't write PCH file: %m");
1700 d->written += size;
1701 }
1702 if (d->written == d->d.total
1703 && fseek (f, ROUND_UP_VALUE (d->d.total, G.pagesize), SEEK_CUR) != 0)
1704 fatal_error ("can't write PCH file: %m");
1705 }
1706
1707 void
1708 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
1709 {
1710 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1711 fatal_error ("can't write PCH file: %m");
1712 free (d);
1713 }
1714
1715
1716 void
1717 ggc_pch_read (FILE *f, void *addr)
1718 {
1719 struct ggc_pch_ondisk d;
1720 struct page_entry *entry;
1721 char *pte;
1722 if (fread (&d, sizeof (d), 1, f) != 1)
1723 fatal_error ("can't read PCH file: %m");
1724 entry = xcalloc (1, sizeof (struct page_entry));
1725 entry->bytes = d.total;
1726 entry->page = addr;
1727 entry->context_depth = 0;
1728 entry->zone = &main_zone;
1729 for (pte = entry->page;
1730 pte < entry->page + entry->bytes;
1731 pte += G.pagesize)
1732 set_page_table_entry (pte, entry);
1733
1734 }