flags.h (time_report, mem_report): New global flags.
[gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "tree.h"
24 #include "rtl.h"
25 #include "tm_p.h"
26 #include "toplev.h"
27 #include "varray.h"
28 #include "flags.h"
29 #include "ggc.h"
30 #include "timevar.h"
31
32 #ifdef HAVE_MMAP_ANYWHERE
33 #include <sys/mman.h>
34 #endif
35
36 #ifndef MAP_FAILED
37 #define MAP_FAILED -1
38 #endif
39
40 #if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
41 #define MAP_ANONYMOUS MAP_ANON
42 #endif
43
44 /* Stategy:
45
46 This garbage-collecting allocator allocates objects on one of a set
47 of pages. Each page can allocate objects of a single size only;
48 available sizes are powers of two starting at four bytes. The size
49 of an allocation request is rounded up to the next power of two
50 (`order'), and satisfied from the appropriate page.
51
52 Each page is recorded in a page-entry, which also maintains an
53 in-use bitmap of object positions on the page. This allows the
54 allocation state of a particular object to be flipped without
55 touching the page itself.
56
57 Each page-entry also has a context depth, which is used to track
58 pushing and popping of allocation contexts. Only objects allocated
59 in the current (highest-numbered) context may be collected.
60
61 Page entries are arranged in an array of singly-linked lists. The
62 array is indexed by the allocation size, in bits, of the pages on
63 it; i.e. all pages on a list allocate objects of the same size.
64 Pages are ordered on the list such that all non-full pages precede
65 all full pages, with non-full pages arranged in order of decreasing
66 context depth.
67
68 Empty pages (of all orders) are kept on a single page cache list,
69 and are considered first when new pages are required; they are
70 deallocated at the start of the next collection if they haven't
71 been recycled by then. */
72
73
74 /* Define GGC_POISON to poison memory marked unused by the collector. */
75 #undef GGC_POISON
76
77 /* Define GGC_ALWAYS_COLLECT to perform collection every time
78 ggc_collect is invoked. Otherwise, collection is performed only
79 when a significant amount of memory has been allocated since the
80 last collection. */
81 #undef GGC_ALWAYS_COLLECT
82
83 #ifdef ENABLE_GC_CHECKING
84 #define GGC_POISON
85 #endif
86 #ifdef ENABLE_GC_ALWAYS_COLLECT
87 #define GGC_ALWAYS_COLLECT
88 #endif
89
90 /* Define GGC_DEBUG_LEVEL to print debugging information.
91 0: No debugging output.
92 1: GC statistics only.
93 2: Page-entry allocations/deallocations as well.
94 3: Object allocations as well.
95 4: Object marks as well. */
96 #define GGC_DEBUG_LEVEL (0)
97 \f
98 #ifndef HOST_BITS_PER_PTR
99 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
100 #endif
101
102 /* The "" allocated string. */
103 char *empty_string;
104 \f
105 /* A two-level tree is used to look up the page-entry for a given
106 pointer. Two chunks of the pointer's bits are extracted to index
107 the first and second levels of the tree, as follows:
108
109 HOST_PAGE_SIZE_BITS
110 32 | |
111 msb +----------------+----+------+------+ lsb
112 | | |
113 PAGE_L1_BITS |
114 | |
115 PAGE_L2_BITS
116
117 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
118 pages are aligned on system page boundaries. The next most
119 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
120 index values in the lookup table, respectively.
121
122 For 32-bit architectures and the settings below, there are no
123 leftover bits. For architectures with wider pointers, the lookup
124 tree points to a list of pages, which must be scanned to find the
125 correct one. */
126
127 #define PAGE_L1_BITS (8)
128 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
129 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
130 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
131
132 #define LOOKUP_L1(p) \
133 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
134
135 #define LOOKUP_L2(p) \
136 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
137
138
139 /* A page_entry records the status of an allocation page. This
140 structure is dynamically sized to fit the bitmap in_use_p. */
141 typedef struct page_entry
142 {
143 /* The next page-entry with objects of the same size, or NULL if
144 this is the last page-entry. */
145 struct page_entry *next;
146
147 /* The number of bytes allocated. (This will always be a multiple
148 of the host system page size.) */
149 size_t bytes;
150
151 /* The address at which the memory is allocated. */
152 char *page;
153
154 /* Saved in-use bit vector for pages that aren't in the topmost
155 context during collection. */
156 unsigned long *save_in_use_p;
157
158 /* Context depth of this page. */
159 unsigned short context_depth;
160
161 /* The number of free objects remaining on this page. */
162 unsigned short num_free_objects;
163
164 /* A likely candidate for the bit position of a free object for the
165 next allocation from this page. */
166 unsigned short next_bit_hint;
167
168 /* The lg of size of objects allocated from this page. */
169 unsigned char order;
170
171 /* A bit vector indicating whether or not objects are in use. The
172 Nth bit is one if the Nth object on this page is allocated. This
173 array is dynamically sized. */
174 unsigned long in_use_p[1];
175 } page_entry;
176
177
178 #if HOST_BITS_PER_PTR <= 32
179
180 /* On 32-bit hosts, we use a two level page table, as pictured above. */
181 typedef page_entry **page_table[PAGE_L1_SIZE];
182
183 #else
184
185 /* On 64-bit hosts, we use the same two level page tables plus a linked
186 list that disambiguates the top 32-bits. There will almost always be
187 exactly one entry in the list. */
188 typedef struct page_table_chain
189 {
190 struct page_table_chain *next;
191 size_t high_bits;
192 page_entry **table[PAGE_L1_SIZE];
193 } *page_table;
194
195 #endif
196
197 /* The rest of the global variables. */
198 static struct globals
199 {
200 /* The Nth element in this array is a page with objects of size 2^N.
201 If there are any pages with free objects, they will be at the
202 head of the list. NULL if there are no page-entries for this
203 object size. */
204 page_entry *pages[HOST_BITS_PER_PTR];
205
206 /* The Nth element in this array is the last page with objects of
207 size 2^N. NULL if there are no page-entries for this object
208 size. */
209 page_entry *page_tails[HOST_BITS_PER_PTR];
210
211 /* Lookup table for associating allocation pages with object addresses. */
212 page_table lookup;
213
214 /* The system's page size. */
215 size_t pagesize;
216 size_t lg_pagesize;
217
218 /* Bytes currently allocated. */
219 size_t allocated;
220
221 /* Bytes currently allocated at the end of the last collection. */
222 size_t allocated_last_gc;
223
224 /* Total amount of memory mapped. */
225 size_t bytes_mapped;
226
227 /* The current depth in the context stack. */
228 unsigned short context_depth;
229
230 /* A file descriptor open to /dev/zero for reading. */
231 #if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
232 int dev_zero_fd;
233 #endif
234
235 /* A cache of free system pages. */
236 page_entry *free_pages;
237
238 /* The file descriptor for debugging output. */
239 FILE *debug_file;
240 } G;
241
242
243 /* Compute DIVIDEND / DIVISOR, rounded up. */
244 #define DIV_ROUND_UP(Dividend, Divisor) \
245 (((Dividend) + (Divisor) - 1) / (Divisor))
246
247 /* The number of objects per allocation page, for objects of size
248 2^ORDER. */
249 #define OBJECTS_PER_PAGE(Order) \
250 ((Order) >= G.lg_pagesize ? 1 : G.pagesize / ((size_t)1 << (Order)))
251
252 /* The size in bytes required to maintain a bitmap for the objects
253 on a page-entry. */
254 #define BITMAP_SIZE(Num_objects) \
255 (DIV_ROUND_UP ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
256
257 /* Skip garbage collection if the current allocation is not at least
258 this factor times the allocation at the end of the last collection.
259 In other words, total allocation must expand by (this factor minus
260 one) before collection is performed. */
261 #define GGC_MIN_EXPAND_FOR_GC (1.3)
262
263 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
264 test from triggering too often when the heap is small. */
265 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
266
267 /* Allocate pages in chunks of this size, to throttle calls to mmap.
268 The first page is used, the rest go onto the free list. */
269 #define GGC_QUIRE_SIZE 16
270
271 \f
272 static int ggc_allocated_p PARAMS ((const void *));
273 static page_entry *lookup_page_table_entry PARAMS ((const void *));
274 static void set_page_table_entry PARAMS ((void *, page_entry *));
275 static char *alloc_anon PARAMS ((char *, size_t));
276 static struct page_entry * alloc_page PARAMS ((unsigned));
277 static void free_page PARAMS ((struct page_entry *));
278 static void release_pages PARAMS ((void));
279 static void clear_marks PARAMS ((void));
280 static void sweep_pages PARAMS ((void));
281 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
282
283 #ifdef GGC_POISON
284 static void poison_pages PARAMS ((void));
285 #endif
286
287 void debug_print_page_list PARAMS ((int));
288 \f
289 /* Returns non-zero if P was allocated in GC'able memory. */
290
291 static inline int
292 ggc_allocated_p (p)
293 const void *p;
294 {
295 page_entry ***base;
296 size_t L1, L2;
297
298 #if HOST_BITS_PER_PTR <= 32
299 base = &G.lookup[0];
300 #else
301 page_table table = G.lookup;
302 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
303 while (1)
304 {
305 if (table == NULL)
306 return 0;
307 if (table->high_bits == high_bits)
308 break;
309 table = table->next;
310 }
311 base = &table->table[0];
312 #endif
313
314 /* Extract the level 1 and 2 indicies. */
315 L1 = LOOKUP_L1 (p);
316 L2 = LOOKUP_L2 (p);
317
318 return base[L1] && base[L1][L2];
319 }
320
321 /* Traverse the page table and find the entry for a page.
322 Die (probably) if the object wasn't allocated via GC. */
323
324 static inline page_entry *
325 lookup_page_table_entry(p)
326 const void *p;
327 {
328 page_entry ***base;
329 size_t L1, L2;
330
331 #if HOST_BITS_PER_PTR <= 32
332 base = &G.lookup[0];
333 #else
334 page_table table = G.lookup;
335 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
336 while (table->high_bits != high_bits)
337 table = table->next;
338 base = &table->table[0];
339 #endif
340
341 /* Extract the level 1 and 2 indicies. */
342 L1 = LOOKUP_L1 (p);
343 L2 = LOOKUP_L2 (p);
344
345 return base[L1][L2];
346 }
347
348 /* Set the page table entry for a page. */
349
350 static void
351 set_page_table_entry(p, entry)
352 void *p;
353 page_entry *entry;
354 {
355 page_entry ***base;
356 size_t L1, L2;
357
358 #if HOST_BITS_PER_PTR <= 32
359 base = &G.lookup[0];
360 #else
361 page_table table;
362 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
363 for (table = G.lookup; table; table = table->next)
364 if (table->high_bits == high_bits)
365 goto found;
366
367 /* Not found -- allocate a new table. */
368 table = (page_table) xcalloc (1, sizeof(*table));
369 table->next = G.lookup;
370 table->high_bits = high_bits;
371 G.lookup = table;
372 found:
373 base = &table->table[0];
374 #endif
375
376 /* Extract the level 1 and 2 indicies. */
377 L1 = LOOKUP_L1 (p);
378 L2 = LOOKUP_L2 (p);
379
380 if (base[L1] == NULL)
381 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
382
383 base[L1][L2] = entry;
384 }
385
386 /* Prints the page-entry for object size ORDER, for debugging. */
387
388 void
389 debug_print_page_list (order)
390 int order;
391 {
392 page_entry *p;
393 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
394 (PTR) G.page_tails[order]);
395 p = G.pages[order];
396 while (p != NULL)
397 {
398 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
399 p->num_free_objects);
400 p = p->next;
401 }
402 printf ("NULL\n");
403 fflush (stdout);
404 }
405
406 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
407 (if non-null). */
408
409 static inline char *
410 alloc_anon (pref, size)
411 char *pref ATTRIBUTE_UNUSED;
412 size_t size;
413 {
414 char *page;
415
416 #ifdef HAVE_MMAP_ANYWHERE
417 #ifdef MAP_ANONYMOUS
418 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
419 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
420 #else
421 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
422 MAP_PRIVATE, G.dev_zero_fd, 0);
423 #endif
424 if (page == (char *) MAP_FAILED)
425 {
426 fputs ("Virtual memory exhausted!\n", stderr);
427 exit(1);
428 }
429 #else
430 #ifdef HAVE_VALLOC
431 page = (char *) valloc (size);
432 if (!page)
433 {
434 fputs ("Virtual memory exhausted!\n", stderr);
435 exit(1);
436 }
437 #endif /* HAVE_VALLOC */
438 #endif /* HAVE_MMAP_ANYWHERE */
439
440 /* Remember that we allocated this memory. */
441 G.bytes_mapped += size;
442
443 return page;
444 }
445
446 /* Allocate a new page for allocating objects of size 2^ORDER,
447 and return an entry for it. The entry is not added to the
448 appropriate page_table list. */
449
450 static inline struct page_entry *
451 alloc_page (order)
452 unsigned order;
453 {
454 struct page_entry *entry, *p, **pp;
455 char *page;
456 size_t num_objects;
457 size_t bitmap_size;
458 size_t page_entry_size;
459 size_t entry_size;
460
461 num_objects = OBJECTS_PER_PAGE (order);
462 bitmap_size = BITMAP_SIZE (num_objects + 1);
463 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
464 entry_size = num_objects * (1 << order);
465
466 entry = NULL;
467 page = NULL;
468
469 /* Check the list of free pages for one we can use. */
470 for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
471 if (p->bytes == entry_size)
472 break;
473
474 if (p != NULL)
475 {
476 /* Recycle the allocated memory from this page ... */
477 *pp = p->next;
478 page = p->page;
479 /* ... and, if possible, the page entry itself. */
480 if (p->order == order)
481 {
482 entry = p;
483 memset (entry, 0, page_entry_size);
484 }
485 else
486 free (p);
487 }
488 #ifdef HAVE_MMAP_ANYWHERE
489 else if (entry_size == G.pagesize)
490 {
491 /* We want just one page. Allocate a bunch of them and put the
492 extras on the freelist. (Can only do this optimization with
493 mmap for backing store.) */
494 struct page_entry *e, *f = G.free_pages;
495 int i;
496
497 page = alloc_anon (NULL, entry_size * GGC_QUIRE_SIZE);
498 /* This loop counts down so that the chain will be in ascending
499 memory order. */
500 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
501 {
502 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
503 e->bytes = entry_size;
504 e->page = page + i*entry_size;
505 e->next = f;
506 f = e;
507 }
508 G.free_pages = f;
509 }
510 #endif
511 else
512 page = alloc_anon (NULL, entry_size);
513
514 if (entry == NULL)
515 entry = (struct page_entry *) xcalloc (1, page_entry_size);
516
517 entry->bytes = entry_size;
518 entry->page = page;
519 entry->context_depth = G.context_depth;
520 entry->order = order;
521 entry->num_free_objects = num_objects;
522 entry->next_bit_hint = 1;
523
524 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
525 increment the hint. */
526 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
527 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
528
529 set_page_table_entry (page, entry);
530
531 if (GGC_DEBUG_LEVEL >= 2)
532 fprintf (G.debug_file,
533 "Allocating page at %p, object size=%d, data %p-%p\n",
534 (PTR) entry, 1 << order, page, page + entry_size - 1);
535
536 return entry;
537 }
538
539 /* For a page that is no longer needed, put it on the free page list. */
540
541 static inline void
542 free_page (entry)
543 page_entry *entry;
544 {
545 if (GGC_DEBUG_LEVEL >= 2)
546 fprintf (G.debug_file,
547 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
548 entry->page, entry->page + entry->bytes - 1);
549
550 set_page_table_entry (entry->page, NULL);
551
552 entry->next = G.free_pages;
553 G.free_pages = entry;
554 }
555
556 /* Release the free page cache to the system. */
557
558 static void
559 release_pages ()
560 {
561 page_entry *p, *next;
562
563 #ifdef HAVE_MMAP_ANYWHERE
564 char *start;
565 size_t len;
566
567 /* Gather up adjacent pages so they are unmapped together. */
568 p = G.free_pages;
569
570 while (p)
571 {
572 start = p->page;
573 next = p->next;
574 len = p->bytes;
575 free (p);
576 p = next;
577
578 while (p && p->page == start + len)
579 {
580 next = p->next;
581 len += p->bytes;
582 free (p);
583 p = next;
584 }
585
586 munmap (start, len);
587 G.bytes_mapped -= len;
588 }
589 #else
590 #ifdef HAVE_VALLOC
591
592 for (p = G.free_pages; p; p = next)
593 {
594 next = p->next;
595 free (p->page);
596 G.bytes_mapped -= p->bytes;
597 free (p);
598 }
599 #endif /* HAVE_VALLOC */
600 #endif /* HAVE_MMAP_ANYWHERE */
601
602 G.free_pages = NULL;
603 }
604
605 /* This table provides a fast way to determine ceil(log_2(size)) for
606 allocation requests. The minimum allocation size is four bytes. */
607
608 static unsigned char const size_lookup[257] =
609 {
610 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
611 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
612 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
613 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
614 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
615 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
616 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
617 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
618 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
619 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
620 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
621 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
622 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
623 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
624 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
625 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
626 8
627 };
628
629 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
630 memory is zeroed; otherwise, its contents are undefined. */
631
632 void *
633 ggc_alloc (size)
634 size_t size;
635 {
636 unsigned order, word, bit, object_offset;
637 struct page_entry *entry;
638 void *result;
639
640 if (size <= 256)
641 order = size_lookup[size];
642 else
643 {
644 order = 9;
645 while (size > ((size_t) 1 << order))
646 order++;
647 }
648
649 /* If there are non-full pages for this size allocation, they are at
650 the head of the list. */
651 entry = G.pages[order];
652
653 /* If there is no page for this object size, or all pages in this
654 context are full, allocate a new page. */
655 if (entry == NULL || entry->num_free_objects == 0)
656 {
657 struct page_entry *new_entry;
658 new_entry = alloc_page (order);
659
660 /* If this is the only entry, it's also the tail. */
661 if (entry == NULL)
662 G.page_tails[order] = new_entry;
663
664 /* Put new pages at the head of the page list. */
665 new_entry->next = entry;
666 entry = new_entry;
667 G.pages[order] = new_entry;
668
669 /* For a new page, we know the word and bit positions (in the
670 in_use bitmap) of the first available object -- they're zero. */
671 new_entry->next_bit_hint = 1;
672 word = 0;
673 bit = 0;
674 object_offset = 0;
675 }
676 else
677 {
678 /* First try to use the hint left from the previous allocation
679 to locate a clear bit in the in-use bitmap. We've made sure
680 that the one-past-the-end bit is always set, so if the hint
681 has run over, this test will fail. */
682 unsigned hint = entry->next_bit_hint;
683 word = hint / HOST_BITS_PER_LONG;
684 bit = hint % HOST_BITS_PER_LONG;
685
686 /* If the hint didn't work, scan the bitmap from the beginning. */
687 if ((entry->in_use_p[word] >> bit) & 1)
688 {
689 word = bit = 0;
690 while (~entry->in_use_p[word] == 0)
691 ++word;
692 while ((entry->in_use_p[word] >> bit) & 1)
693 ++bit;
694 hint = word * HOST_BITS_PER_LONG + bit;
695 }
696
697 /* Next time, try the next bit. */
698 entry->next_bit_hint = hint + 1;
699
700 object_offset = hint << order;
701 }
702
703 /* Set the in-use bit. */
704 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
705
706 /* Keep a running total of the number of free objects. If this page
707 fills up, we may have to move it to the end of the list if the
708 next page isn't full. If the next page is full, all subsequent
709 pages are full, so there's no need to move it. */
710 if (--entry->num_free_objects == 0
711 && entry->next != NULL
712 && entry->next->num_free_objects > 0)
713 {
714 G.pages[order] = entry->next;
715 entry->next = NULL;
716 G.page_tails[order]->next = entry;
717 G.page_tails[order] = entry;
718 }
719
720 /* Calculate the object's address. */
721 result = entry->page + object_offset;
722
723 #ifdef GGC_POISON
724 /* `Poison' the entire allocated object, including any padding at
725 the end. */
726 memset (result, 0xaf, 1 << order);
727 #endif
728
729 /* Keep track of how many bytes are being allocated. This
730 information is used in deciding when to collect. */
731 G.allocated += (size_t) 1 << order;
732
733 if (GGC_DEBUG_LEVEL >= 3)
734 fprintf (G.debug_file,
735 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
736 (int) size, 1 << order, result, (PTR) entry);
737
738 return result;
739 }
740
741 /* If P is not marked, marks it and return false. Otherwise return true.
742 P must have been allocated by the GC allocator; it mustn't point to
743 static objects, stack variables, or memory allocated with malloc. */
744
745 int
746 ggc_set_mark (p)
747 const void *p;
748 {
749 page_entry *entry;
750 unsigned bit, word;
751 unsigned long mask;
752
753 /* Look up the page on which the object is alloced. If the object
754 wasn't allocated by the collector, we'll probably die. */
755 entry = lookup_page_table_entry (p);
756 #ifdef ENABLE_CHECKING
757 if (entry == NULL)
758 abort ();
759 #endif
760
761 /* Calculate the index of the object on the page; this is its bit
762 position in the in_use_p bitmap. */
763 bit = (((const char *) p) - entry->page) >> entry->order;
764 word = bit / HOST_BITS_PER_LONG;
765 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
766
767 /* If the bit was previously set, skip it. */
768 if (entry->in_use_p[word] & mask)
769 return 1;
770
771 /* Otherwise set it, and decrement the free object count. */
772 entry->in_use_p[word] |= mask;
773 entry->num_free_objects -= 1;
774
775 if (GGC_DEBUG_LEVEL >= 4)
776 fprintf (G.debug_file, "Marking %p\n", p);
777
778 return 0;
779 }
780
781 /* Mark P, but check first that it was allocated by the collector. */
782
783 void
784 ggc_mark_if_gcable (p)
785 const void *p;
786 {
787 if (p && ggc_allocated_p (p))
788 ggc_set_mark (p);
789 }
790
791 /* Return the size of the gc-able object P. */
792
793 size_t
794 ggc_get_size (p)
795 const void *p;
796 {
797 page_entry *pe = lookup_page_table_entry (p);
798 return 1 << pe->order;
799 }
800 \f
801 /* Initialize the ggc-mmap allocator. */
802
803 void
804 init_ggc ()
805 {
806 G.pagesize = getpagesize();
807 G.lg_pagesize = exact_log2 (G.pagesize);
808
809 #if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
810 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
811 if (G.dev_zero_fd == -1)
812 abort ();
813 #endif
814
815 #if 0
816 G.debug_file = fopen ("ggc-mmap.debug", "w");
817 #else
818 G.debug_file = stdout;
819 #endif
820
821 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
822
823 #ifdef HAVE_MMAP_ANYWHERE
824 /* StunOS has an amazing off-by-one error for the first mmap allocation
825 after fiddling with RLIMIT_STACK. The result, as hard as it is to
826 believe, is an unaligned page allocation, which would cause us to
827 hork badly if we tried to use it. */
828 {
829 char *p = alloc_anon (NULL, G.pagesize);
830 if ((size_t)p & (G.pagesize - 1))
831 {
832 /* How losing. Discard this one and try another. If we still
833 can't get something useful, give up. */
834
835 p = alloc_anon (NULL, G.pagesize);
836 if ((size_t)p & (G.pagesize - 1))
837 abort ();
838 }
839 munmap (p, G.pagesize);
840 }
841 #endif
842
843 empty_string = ggc_alloc_string ("", 0);
844 ggc_add_string_root (&empty_string, 1);
845 }
846
847 /* Increment the `GC context'. Objects allocated in an outer context
848 are never freed, eliminating the need to register their roots. */
849
850 void
851 ggc_push_context ()
852 {
853 ++G.context_depth;
854
855 /* Die on wrap. */
856 if (G.context_depth == 0)
857 abort ();
858 }
859
860 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
861 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
862
863 static void
864 ggc_recalculate_in_use_p (p)
865 page_entry *p;
866 {
867 unsigned int i;
868 size_t num_objects;
869
870 /* Because the past-the-end bit in in_use_p is always set, we
871 pretend there is one additional object. */
872 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
873
874 /* Reset the free object count. */
875 p->num_free_objects = num_objects;
876
877 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
878 for (i = 0;
879 i < DIV_ROUND_UP (BITMAP_SIZE (num_objects),
880 sizeof (*p->in_use_p));
881 ++i)
882 {
883 unsigned long j;
884
885 /* Something is in use if it is marked, or if it was in use in a
886 context further down the context stack. */
887 p->in_use_p[i] |= p->save_in_use_p[i];
888
889 /* Decrement the free object count for every object allocated. */
890 for (j = p->in_use_p[i]; j; j >>= 1)
891 p->num_free_objects -= (j & 1);
892 }
893
894 if (p->num_free_objects >= num_objects)
895 abort ();
896 }
897
898 /* Decrement the `GC context'. All objects allocated since the
899 previous ggc_push_context are migrated to the outer context. */
900
901 void
902 ggc_pop_context ()
903 {
904 unsigned order, depth;
905
906 depth = --G.context_depth;
907
908 /* Any remaining pages in the popped context are lowered to the new
909 current context; i.e. objects allocated in the popped context and
910 left over are imported into the previous context. */
911 for (order = 2; order < HOST_BITS_PER_PTR; order++)
912 {
913 page_entry *p;
914
915 for (p = G.pages[order]; p != NULL; p = p->next)
916 {
917 if (p->context_depth > depth)
918 p->context_depth = depth;
919
920 /* If this page is now in the topmost context, and we'd
921 saved its allocation state, restore it. */
922 else if (p->context_depth == depth && p->save_in_use_p)
923 {
924 ggc_recalculate_in_use_p (p);
925 free (p->save_in_use_p);
926 p->save_in_use_p = 0;
927 }
928 }
929 }
930 }
931 \f
932 /* Unmark all objects. */
933
934 static inline void
935 clear_marks ()
936 {
937 unsigned order;
938
939 for (order = 2; order < HOST_BITS_PER_PTR; order++)
940 {
941 size_t num_objects = OBJECTS_PER_PAGE (order);
942 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
943 page_entry *p;
944
945 for (p = G.pages[order]; p != NULL; p = p->next)
946 {
947 #ifdef ENABLE_CHECKING
948 /* The data should be page-aligned. */
949 if ((size_t) p->page & (G.pagesize - 1))
950 abort ();
951 #endif
952
953 /* Pages that aren't in the topmost context are not collected;
954 nevertheless, we need their in-use bit vectors to store GC
955 marks. So, back them up first. */
956 if (p->context_depth < G.context_depth)
957 {
958 if (! p->save_in_use_p)
959 p->save_in_use_p = xmalloc (bitmap_size);
960 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
961 }
962
963 /* Reset reset the number of free objects and clear the
964 in-use bits. These will be adjusted by mark_obj. */
965 p->num_free_objects = num_objects;
966 memset (p->in_use_p, 0, bitmap_size);
967
968 /* Make sure the one-past-the-end bit is always set. */
969 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
970 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
971 }
972 }
973 }
974
975 /* Free all empty pages. Partially empty pages need no attention
976 because the `mark' bit doubles as an `unused' bit. */
977
978 static inline void
979 sweep_pages ()
980 {
981 unsigned order;
982
983 for (order = 2; order < HOST_BITS_PER_PTR; order++)
984 {
985 /* The last page-entry to consider, regardless of entries
986 placed at the end of the list. */
987 page_entry * const last = G.page_tails[order];
988
989 size_t num_objects = OBJECTS_PER_PAGE (order);
990 size_t live_objects;
991 page_entry *p, *previous;
992 int done;
993
994 p = G.pages[order];
995 if (p == NULL)
996 continue;
997
998 previous = NULL;
999 do
1000 {
1001 page_entry *next = p->next;
1002
1003 /* Loop until all entries have been examined. */
1004 done = (p == last);
1005
1006 /* Add all live objects on this page to the count of
1007 allocated memory. */
1008 live_objects = num_objects - p->num_free_objects;
1009
1010 G.allocated += ((size_t) 1 << order) * live_objects;
1011
1012 /* Only objects on pages in the topmost context should get
1013 collected. */
1014 if (p->context_depth < G.context_depth)
1015 ;
1016
1017 /* Remove the page if it's empty. */
1018 else if (live_objects == 0)
1019 {
1020 if (! previous)
1021 G.pages[order] = next;
1022 else
1023 previous->next = next;
1024
1025 /* Are we removing the last element? */
1026 if (p == G.page_tails[order])
1027 G.page_tails[order] = previous;
1028 free_page (p);
1029 p = previous;
1030 }
1031
1032 /* If the page is full, move it to the end. */
1033 else if (p->num_free_objects == 0)
1034 {
1035 /* Don't move it if it's already at the end. */
1036 if (p != G.page_tails[order])
1037 {
1038 /* Move p to the end of the list. */
1039 p->next = NULL;
1040 G.page_tails[order]->next = p;
1041
1042 /* Update the tail pointer... */
1043 G.page_tails[order] = p;
1044
1045 /* ... and the head pointer, if necessary. */
1046 if (! previous)
1047 G.pages[order] = next;
1048 else
1049 previous->next = next;
1050 p = previous;
1051 }
1052 }
1053
1054 /* If we've fallen through to here, it's a page in the
1055 topmost context that is neither full nor empty. Such a
1056 page must precede pages at lesser context depth in the
1057 list, so move it to the head. */
1058 else if (p != G.pages[order])
1059 {
1060 previous->next = p->next;
1061 p->next = G.pages[order];
1062 G.pages[order] = p;
1063 /* Are we moving the last element? */
1064 if (G.page_tails[order] == p)
1065 G.page_tails[order] = previous;
1066 p = previous;
1067 }
1068
1069 previous = p;
1070 p = next;
1071 }
1072 while (! done);
1073
1074 /* Now, restore the in_use_p vectors for any pages from contexts
1075 other than the current one. */
1076 for (p = G.pages[order]; p; p = p->next)
1077 if (p->context_depth != G.context_depth)
1078 ggc_recalculate_in_use_p (p);
1079 }
1080 }
1081
1082 #ifdef GGC_POISON
1083 /* Clobber all free objects. */
1084
1085 static inline void
1086 poison_pages ()
1087 {
1088 unsigned order;
1089
1090 for (order = 2; order < HOST_BITS_PER_PTR; order++)
1091 {
1092 size_t num_objects = OBJECTS_PER_PAGE (order);
1093 size_t size = (size_t) 1 << order;
1094 page_entry *p;
1095
1096 for (p = G.pages[order]; p != NULL; p = p->next)
1097 {
1098 size_t i;
1099
1100 if (p->context_depth != G.context_depth)
1101 /* Since we don't do any collection for pages in pushed
1102 contexts, there's no need to do any poisoning. And
1103 besides, the IN_USE_P array isn't valid until we pop
1104 contexts. */
1105 continue;
1106
1107 for (i = 0; i < num_objects; i++)
1108 {
1109 size_t word, bit;
1110 word = i / HOST_BITS_PER_LONG;
1111 bit = i % HOST_BITS_PER_LONG;
1112 if (((p->in_use_p[word] >> bit) & 1) == 0)
1113 memset (p->page + i * size, 0xa5, size);
1114 }
1115 }
1116 }
1117 }
1118 #endif
1119
1120 /* Top level mark-and-sweep routine. */
1121
1122 void
1123 ggc_collect ()
1124 {
1125 /* Avoid frequent unnecessary work by skipping collection if the
1126 total allocations haven't expanded much since the last
1127 collection. */
1128 #ifndef GGC_ALWAYS_COLLECT
1129 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1130 return;
1131 #endif
1132
1133 timevar_push (TV_GC);
1134 if (!quiet_flag)
1135 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1136
1137 /* Zero the total allocated bytes. This will be recalculated in the
1138 sweep phase. */
1139 G.allocated = 0;
1140
1141 /* Release the pages we freed the last time we collected, but didn't
1142 reuse in the interim. */
1143 release_pages ();
1144
1145 clear_marks ();
1146 ggc_mark_roots ();
1147
1148 #ifdef GGC_POISON
1149 poison_pages ();
1150 #endif
1151
1152 sweep_pages ();
1153
1154 G.allocated_last_gc = G.allocated;
1155 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1156 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1157
1158 timevar_pop (TV_GC);
1159
1160 if (!quiet_flag)
1161 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1162 }
1163
1164 /* Print allocation statistics. */
1165 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1166 ? (x) \
1167 : ((x) < 1024*1024*10 \
1168 ? (x) / 1024 \
1169 : (x) / (1024*1024))))
1170 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1171
1172 void
1173 ggc_print_statistics ()
1174 {
1175 struct ggc_statistics stats;
1176 unsigned int i;
1177 size_t total_overhead = 0;
1178
1179 /* Clear the statistics. */
1180 memset (&stats, 0, sizeof (stats));
1181
1182 /* Make sure collection will really occur. */
1183 G.allocated_last_gc = 0;
1184
1185 /* Collect and print the statistics common across collectors. */
1186 ggc_print_common_statistics (stderr, &stats);
1187
1188 /* Release free pages so that we will not count the bytes allocated
1189 there as part of the total allocated memory. */
1190 release_pages ();
1191
1192 /* Collect some information about the various sizes of
1193 allocation. */
1194 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1195 "Log", "Allocated", "Used", "Overhead");
1196 for (i = 0; i < HOST_BITS_PER_PTR; ++i)
1197 {
1198 page_entry *p;
1199 size_t allocated;
1200 size_t in_use;
1201 size_t overhead;
1202
1203 /* Skip empty entries. */
1204 if (!G.pages[i])
1205 continue;
1206
1207 overhead = allocated = in_use = 0;
1208
1209 /* Figure out the total number of bytes allocated for objects of
1210 this size, and how many of them are actually in use. Also figure
1211 out how much memory the page table is using. */
1212 for (p = G.pages[i]; p; p = p->next)
1213 {
1214 allocated += p->bytes;
1215 in_use +=
1216 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * (1 << i);
1217
1218 overhead += (sizeof (page_entry) - sizeof (long)
1219 + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
1220 }
1221 fprintf (stderr, "%-5d %10ld%c %10ld%c %10ld%c\n", i,
1222 SCALE (allocated), LABEL (allocated),
1223 SCALE (in_use), LABEL (in_use),
1224 SCALE (overhead), LABEL (overhead));
1225 total_overhead += overhead;
1226 }
1227 fprintf (stderr, "%-5s %10ld%c %10ld%c %10ld%c\n", "Total",
1228 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1229 SCALE (G.allocated), LABEL(G.allocated),
1230 SCALE (total_overhead), LABEL (total_overhead));
1231 }