ggc-none.c, [...] (ggc_alloc_obj): Rename it ggc_alloc, drop second argument, never...
[gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "tree.h"
24 #include "rtl.h"
25 #include "tm_p.h"
26 #include "toplev.h"
27 #include "varray.h"
28 #include "flags.h"
29 #include "ggc.h"
30 #include "timevar.h"
31
32 #ifdef HAVE_MMAP_ANYWHERE
33 #include <sys/mman.h>
34 #endif
35
36 #ifndef MAP_FAILED
37 #define MAP_FAILED -1
38 #endif
39
40 #if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
41 #define MAP_ANONYMOUS MAP_ANON
42 #endif
43
44 /* Stategy:
45
46 This garbage-collecting allocator allocates objects on one of a set
47 of pages. Each page can allocate objects of a single size only;
48 available sizes are powers of two starting at four bytes. The size
49 of an allocation request is rounded up to the next power of two
50 (`order'), and satisfied from the appropriate page.
51
52 Each page is recorded in a page-entry, which also maintains an
53 in-use bitmap of object positions on the page. This allows the
54 allocation state of a particular object to be flipped without
55 touching the page itself.
56
57 Each page-entry also has a context depth, which is used to track
58 pushing and popping of allocation contexts. Only objects allocated
59 in the current (highest-numbered) context may be collected.
60
61 Page entries are arranged in an array of singly-linked lists. The
62 array is indexed by the allocation size, in bits, of the pages on
63 it; i.e. all pages on a list allocate objects of the same size.
64 Pages are ordered on the list such that all non-full pages precede
65 all full pages, with non-full pages arranged in order of decreasing
66 context depth.
67
68 Empty pages (of all orders) are kept on a single page cache list,
69 and are considered first when new pages are required; they are
70 deallocated at the start of the next collection if they haven't
71 been recycled by then. */
72
73
74 /* Define GGC_POISON to poison memory marked unused by the collector. */
75 #undef GGC_POISON
76
77 /* Define GGC_ALWAYS_COLLECT to perform collection every time
78 ggc_collect is invoked. Otherwise, collection is performed only
79 when a significant amount of memory has been allocated since the
80 last collection. */
81 #undef GGC_ALWAYS_COLLECT
82
83 #ifdef ENABLE_GC_CHECKING
84 #define GGC_POISON
85 #endif
86 #ifdef ENABLE_GC_ALWAYS_COLLECT
87 #define GGC_ALWAYS_COLLECT
88 #endif
89
90 /* Define GGC_DEBUG_LEVEL to print debugging information.
91 0: No debugging output.
92 1: GC statistics only.
93 2: Page-entry allocations/deallocations as well.
94 3: Object allocations as well.
95 4: Object marks as well. */
96 #define GGC_DEBUG_LEVEL (0)
97 \f
98 #ifndef HOST_BITS_PER_PTR
99 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
100 #endif
101
102 /* The "" allocated string. */
103 char *empty_string;
104 \f
105 /* A two-level tree is used to look up the page-entry for a given
106 pointer. Two chunks of the pointer's bits are extracted to index
107 the first and second levels of the tree, as follows:
108
109 HOST_PAGE_SIZE_BITS
110 32 | |
111 msb +----------------+----+------+------+ lsb
112 | | |
113 PAGE_L1_BITS |
114 | |
115 PAGE_L2_BITS
116
117 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
118 pages are aligned on system page boundaries. The next most
119 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
120 index values in the lookup table, respectively.
121
122 For 32-bit architectures and the settings below, there are no
123 leftover bits. For architectures with wider pointers, the lookup
124 tree points to a list of pages, which must be scanned to find the
125 correct one. */
126
127 #define PAGE_L1_BITS (8)
128 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
129 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
130 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
131
132 #define LOOKUP_L1(p) \
133 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
134
135 #define LOOKUP_L2(p) \
136 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
137
138
139 /* A page_entry records the status of an allocation page. This
140 structure is dynamically sized to fit the bitmap in_use_p. */
141 typedef struct page_entry
142 {
143 /* The next page-entry with objects of the same size, or NULL if
144 this is the last page-entry. */
145 struct page_entry *next;
146
147 /* The number of bytes allocated. (This will always be a multiple
148 of the host system page size.) */
149 size_t bytes;
150
151 /* The address at which the memory is allocated. */
152 char *page;
153
154 /* Saved in-use bit vector for pages that aren't in the topmost
155 context during collection. */
156 unsigned long *save_in_use_p;
157
158 /* Context depth of this page. */
159 unsigned short context_depth;
160
161 /* The number of free objects remaining on this page. */
162 unsigned short num_free_objects;
163
164 /* A likely candidate for the bit position of a free object for the
165 next allocation from this page. */
166 unsigned short next_bit_hint;
167
168 /* The lg of size of objects allocated from this page. */
169 unsigned char order;
170
171 /* A bit vector indicating whether or not objects are in use. The
172 Nth bit is one if the Nth object on this page is allocated. This
173 array is dynamically sized. */
174 unsigned long in_use_p[1];
175 } page_entry;
176
177
178 #if HOST_BITS_PER_PTR <= 32
179
180 /* On 32-bit hosts, we use a two level page table, as pictured above. */
181 typedef page_entry **page_table[PAGE_L1_SIZE];
182
183 #else
184
185 /* On 64-bit hosts, we use the same two level page tables plus a linked
186 list that disambiguates the top 32-bits. There will almost always be
187 exactly one entry in the list. */
188 typedef struct page_table_chain
189 {
190 struct page_table_chain *next;
191 size_t high_bits;
192 page_entry **table[PAGE_L1_SIZE];
193 } *page_table;
194
195 #endif
196
197 /* The rest of the global variables. */
198 static struct globals
199 {
200 /* The Nth element in this array is a page with objects of size 2^N.
201 If there are any pages with free objects, they will be at the
202 head of the list. NULL if there are no page-entries for this
203 object size. */
204 page_entry *pages[HOST_BITS_PER_PTR];
205
206 /* The Nth element in this array is the last page with objects of
207 size 2^N. NULL if there are no page-entries for this object
208 size. */
209 page_entry *page_tails[HOST_BITS_PER_PTR];
210
211 /* Lookup table for associating allocation pages with object addresses. */
212 page_table lookup;
213
214 /* The system's page size. */
215 size_t pagesize;
216 size_t lg_pagesize;
217
218 /* Bytes currently allocated. */
219 size_t allocated;
220
221 /* Bytes currently allocated at the end of the last collection. */
222 size_t allocated_last_gc;
223
224 /* Total amount of memory mapped. */
225 size_t bytes_mapped;
226
227 /* The current depth in the context stack. */
228 unsigned short context_depth;
229
230 /* A file descriptor open to /dev/zero for reading. */
231 #if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
232 int dev_zero_fd;
233 #endif
234
235 /* A cache of free system pages. */
236 page_entry *free_pages;
237
238 /* The file descriptor for debugging output. */
239 FILE *debug_file;
240 } G;
241
242
243 /* Compute DIVIDEND / DIVISOR, rounded up. */
244 #define DIV_ROUND_UP(Dividend, Divisor) \
245 (((Dividend) + (Divisor) - 1) / (Divisor))
246
247 /* The number of objects per allocation page, for objects of size
248 2^ORDER. */
249 #define OBJECTS_PER_PAGE(Order) \
250 ((Order) >= G.lg_pagesize ? 1 : G.pagesize / ((size_t)1 << (Order)))
251
252 /* The size in bytes required to maintain a bitmap for the objects
253 on a page-entry. */
254 #define BITMAP_SIZE(Num_objects) \
255 (DIV_ROUND_UP ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
256
257 /* Skip garbage collection if the current allocation is not at least
258 this factor times the allocation at the end of the last collection.
259 In other words, total allocation must expand by (this factor minus
260 one) before collection is performed. */
261 #define GGC_MIN_EXPAND_FOR_GC (1.3)
262
263 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
264 test from triggering too often when the heap is small. */
265 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
266
267 \f
268 static int ggc_allocated_p PARAMS ((const void *));
269 static page_entry *lookup_page_table_entry PARAMS ((const void *));
270 static void set_page_table_entry PARAMS ((void *, page_entry *));
271 static char *alloc_anon PARAMS ((char *, size_t));
272 static struct page_entry * alloc_page PARAMS ((unsigned));
273 static void free_page PARAMS ((struct page_entry *));
274 static void release_pages PARAMS ((void));
275 static void clear_marks PARAMS ((void));
276 static void sweep_pages PARAMS ((void));
277 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
278
279 #ifdef GGC_POISON
280 static void poison_pages PARAMS ((void));
281 #endif
282
283 void debug_print_page_list PARAMS ((int));
284 \f
285 /* Returns non-zero if P was allocated in GC'able memory. */
286
287 static inline int
288 ggc_allocated_p (p)
289 const void *p;
290 {
291 page_entry ***base;
292 size_t L1, L2;
293
294 #if HOST_BITS_PER_PTR <= 32
295 base = &G.lookup[0];
296 #else
297 page_table table = G.lookup;
298 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
299 while (1)
300 {
301 if (table == NULL)
302 return 0;
303 if (table->high_bits == high_bits)
304 break;
305 table = table->next;
306 }
307 base = &table->table[0];
308 #endif
309
310 /* Extract the level 1 and 2 indicies. */
311 L1 = LOOKUP_L1 (p);
312 L2 = LOOKUP_L2 (p);
313
314 return base[L1] && base[L1][L2];
315 }
316
317 /* Traverse the page table and find the entry for a page.
318 Die (probably) if the object wasn't allocated via GC. */
319
320 static inline page_entry *
321 lookup_page_table_entry(p)
322 const void *p;
323 {
324 page_entry ***base;
325 size_t L1, L2;
326
327 #if HOST_BITS_PER_PTR <= 32
328 base = &G.lookup[0];
329 #else
330 page_table table = G.lookup;
331 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
332 while (table->high_bits != high_bits)
333 table = table->next;
334 base = &table->table[0];
335 #endif
336
337 /* Extract the level 1 and 2 indicies. */
338 L1 = LOOKUP_L1 (p);
339 L2 = LOOKUP_L2 (p);
340
341 return base[L1][L2];
342 }
343
344 /* Set the page table entry for a page. */
345
346 static void
347 set_page_table_entry(p, entry)
348 void *p;
349 page_entry *entry;
350 {
351 page_entry ***base;
352 size_t L1, L2;
353
354 #if HOST_BITS_PER_PTR <= 32
355 base = &G.lookup[0];
356 #else
357 page_table table;
358 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
359 for (table = G.lookup; table; table = table->next)
360 if (table->high_bits == high_bits)
361 goto found;
362
363 /* Not found -- allocate a new table. */
364 table = (page_table) xcalloc (1, sizeof(*table));
365 table->next = G.lookup;
366 table->high_bits = high_bits;
367 G.lookup = table;
368 found:
369 base = &table->table[0];
370 #endif
371
372 /* Extract the level 1 and 2 indicies. */
373 L1 = LOOKUP_L1 (p);
374 L2 = LOOKUP_L2 (p);
375
376 if (base[L1] == NULL)
377 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
378
379 base[L1][L2] = entry;
380 }
381
382 /* Prints the page-entry for object size ORDER, for debugging. */
383
384 void
385 debug_print_page_list (order)
386 int order;
387 {
388 page_entry *p;
389 printf ("Head=%p, Tail=%p:\n", G.pages[order], G.page_tails[order]);
390 p = G.pages[order];
391 while (p != NULL)
392 {
393 printf ("%p(%1d|%3d) -> ", p, p->context_depth, p->num_free_objects);
394 p = p->next;
395 }
396 printf ("NULL\n");
397 fflush (stdout);
398 }
399
400 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
401 (if non-null). */
402
403 static inline char *
404 alloc_anon (pref, size)
405 char *pref ATTRIBUTE_UNUSED;
406 size_t size;
407 {
408 char *page;
409
410 #ifdef HAVE_MMAP_ANYWHERE
411 #ifdef MAP_ANONYMOUS
412 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
413 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
414 #else
415 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
416 MAP_PRIVATE, G.dev_zero_fd, 0);
417 #endif
418 if (page == (char *) MAP_FAILED)
419 {
420 fputs ("Virtual memory exhausted!\n", stderr);
421 exit(1);
422 }
423 #else
424 #ifdef HAVE_VALLOC
425 page = (char *) valloc (size);
426 if (!page)
427 {
428 fputs ("Virtual memory exhausted!\n", stderr);
429 exit(1);
430 }
431 #endif /* HAVE_VALLOC */
432 #endif /* HAVE_MMAP_ANYWHERE */
433
434 /* Remember that we allocated this memory. */
435 G.bytes_mapped += size;
436
437 return page;
438 }
439
440 /* Allocate a new page for allocating objects of size 2^ORDER,
441 and return an entry for it. The entry is not added to the
442 appropriate page_table list. */
443
444 static inline struct page_entry *
445 alloc_page (order)
446 unsigned order;
447 {
448 struct page_entry *entry, *p, **pp;
449 char *page;
450 size_t num_objects;
451 size_t bitmap_size;
452 size_t page_entry_size;
453 size_t entry_size;
454
455 num_objects = OBJECTS_PER_PAGE (order);
456 bitmap_size = BITMAP_SIZE (num_objects + 1);
457 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
458 entry_size = num_objects * (1 << order);
459
460 entry = NULL;
461 page = NULL;
462
463 /* Check the list of free pages for one we can use. */
464 for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
465 if (p->bytes == entry_size)
466 break;
467
468 if (p != NULL)
469 {
470 /* Recycle the allocated memory from this page ... */
471 *pp = p->next;
472 page = p->page;
473 /* ... and, if possible, the page entry itself. */
474 if (p->order == order)
475 {
476 entry = p;
477 memset (entry, 0, page_entry_size);
478 }
479 else
480 free (p);
481 }
482 else
483 {
484 /* Actually allocate the memory. */
485 page = alloc_anon (NULL, entry_size);
486 }
487
488 if (entry == NULL)
489 entry = (struct page_entry *) xcalloc (1, page_entry_size);
490
491 entry->bytes = entry_size;
492 entry->page = page;
493 entry->context_depth = G.context_depth;
494 entry->order = order;
495 entry->num_free_objects = num_objects;
496 entry->next_bit_hint = 1;
497
498 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
499 increment the hint. */
500 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
501 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
502
503 set_page_table_entry (page, entry);
504
505 if (GGC_DEBUG_LEVEL >= 2)
506 fprintf (G.debug_file,
507 "Allocating page at %p, object size=%d, data %p-%p\n", entry,
508 1 << order, page, page + entry_size - 1);
509
510 return entry;
511 }
512
513 /* For a page that is no longer needed, put it on the free page list. */
514
515 static inline void
516 free_page (entry)
517 page_entry *entry;
518 {
519 if (GGC_DEBUG_LEVEL >= 2)
520 fprintf (G.debug_file,
521 "Deallocating page at %p, data %p-%p\n", entry,
522 entry->page, entry->page + entry->bytes - 1);
523
524 set_page_table_entry (entry->page, NULL);
525
526 entry->next = G.free_pages;
527 G.free_pages = entry;
528 }
529
530 /* Release the free page cache to the system. */
531
532 static void
533 release_pages ()
534 {
535 #ifdef HAVE_MMAP_ANYWHERE
536 page_entry *p, *next;
537 char *start;
538 size_t len;
539
540 p = G.free_pages;
541 if (p == NULL)
542 return;
543
544 next = p->next;
545 start = p->page;
546 len = p->bytes;
547 free (p);
548 p = next;
549
550 while (p)
551 {
552 next = p->next;
553 /* Gather up adjacent pages so they are unmapped together. */
554 if (p->page == start + len)
555 len += p->bytes;
556 else
557 {
558 munmap (start, len);
559 G.bytes_mapped -= len;
560 start = p->page;
561 len = p->bytes;
562 }
563 free (p);
564 p = next;
565 }
566
567 munmap (start, len);
568 G.bytes_mapped -= len;
569 #else
570 #ifdef HAVE_VALLOC
571 page_entry *p, *next;
572
573 for (p = G.free_pages; p ; p = next)
574 {
575 next = p->next;
576 free (p->page);
577 G.bytes_mapped -= p->bytes;
578 free (p);
579 }
580 #endif /* HAVE_VALLOC */
581 #endif /* HAVE_MMAP_ANYWHERE */
582
583 G.free_pages = NULL;
584 }
585
586 /* This table provides a fast way to determine ceil(log_2(size)) for
587 allocation requests. The minimum allocation size is four bytes. */
588
589 static unsigned char const size_lookup[257] =
590 {
591 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
592 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
593 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
594 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
595 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
596 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
597 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
598 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
599 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
600 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
601 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
602 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
603 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
604 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
605 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
606 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
607 8
608 };
609
610 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
611 memory is zeroed; otherwise, its contents are undefined. */
612
613 void *
614 ggc_alloc (size)
615 size_t size;
616 {
617 unsigned order, word, bit, object_offset;
618 struct page_entry *entry;
619 void *result;
620
621 if (size <= 256)
622 order = size_lookup[size];
623 else
624 {
625 order = 9;
626 while (size > ((size_t) 1 << order))
627 order++;
628 }
629
630 /* If there are non-full pages for this size allocation, they are at
631 the head of the list. */
632 entry = G.pages[order];
633
634 /* If there is no page for this object size, or all pages in this
635 context are full, allocate a new page. */
636 if (entry == NULL || entry->num_free_objects == 0)
637 {
638 struct page_entry *new_entry;
639 new_entry = alloc_page (order);
640
641 /* If this is the only entry, it's also the tail. */
642 if (entry == NULL)
643 G.page_tails[order] = new_entry;
644
645 /* Put new pages at the head of the page list. */
646 new_entry->next = entry;
647 entry = new_entry;
648 G.pages[order] = new_entry;
649
650 /* For a new page, we know the word and bit positions (in the
651 in_use bitmap) of the first available object -- they're zero. */
652 new_entry->next_bit_hint = 1;
653 word = 0;
654 bit = 0;
655 object_offset = 0;
656 }
657 else
658 {
659 /* First try to use the hint left from the previous allocation
660 to locate a clear bit in the in-use bitmap. We've made sure
661 that the one-past-the-end bit is always set, so if the hint
662 has run over, this test will fail. */
663 unsigned hint = entry->next_bit_hint;
664 word = hint / HOST_BITS_PER_LONG;
665 bit = hint % HOST_BITS_PER_LONG;
666
667 /* If the hint didn't work, scan the bitmap from the beginning. */
668 if ((entry->in_use_p[word] >> bit) & 1)
669 {
670 word = bit = 0;
671 while (~entry->in_use_p[word] == 0)
672 ++word;
673 while ((entry->in_use_p[word] >> bit) & 1)
674 ++bit;
675 hint = word * HOST_BITS_PER_LONG + bit;
676 }
677
678 /* Next time, try the next bit. */
679 entry->next_bit_hint = hint + 1;
680
681 object_offset = hint << order;
682 }
683
684 /* Set the in-use bit. */
685 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
686
687 /* Keep a running total of the number of free objects. If this page
688 fills up, we may have to move it to the end of the list if the
689 next page isn't full. If the next page is full, all subsequent
690 pages are full, so there's no need to move it. */
691 if (--entry->num_free_objects == 0
692 && entry->next != NULL
693 && entry->next->num_free_objects > 0)
694 {
695 G.pages[order] = entry->next;
696 entry->next = NULL;
697 G.page_tails[order]->next = entry;
698 G.page_tails[order] = entry;
699 }
700
701 /* Calculate the object's address. */
702 result = entry->page + object_offset;
703
704 #ifdef GGC_POISON
705 /* `Poison' the entire allocated object, including any padding at
706 the end. */
707 memset (result, 0xaf, 1 << order);
708 #endif
709
710 /* Keep track of how many bytes are being allocated. This
711 information is used in deciding when to collect. */
712 G.allocated += (size_t) 1 << order;
713
714 if (GGC_DEBUG_LEVEL >= 3)
715 fprintf (G.debug_file,
716 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
717 (int) size, 1 << order, result, entry);
718
719 return result;
720 }
721
722 /* If P is not marked, marks it and return false. Otherwise return true.
723 P must have been allocated by the GC allocator; it mustn't point to
724 static objects, stack variables, or memory allocated with malloc. */
725
726 int
727 ggc_set_mark (p)
728 const void *p;
729 {
730 page_entry *entry;
731 unsigned bit, word;
732 unsigned long mask;
733
734 /* Look up the page on which the object is alloced. If the object
735 wasn't allocated by the collector, we'll probably die. */
736 entry = lookup_page_table_entry (p);
737 #ifdef ENABLE_CHECKING
738 if (entry == NULL)
739 abort ();
740 #endif
741
742 /* Calculate the index of the object on the page; this is its bit
743 position in the in_use_p bitmap. */
744 bit = (((const char *) p) - entry->page) >> entry->order;
745 word = bit / HOST_BITS_PER_LONG;
746 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
747
748 /* If the bit was previously set, skip it. */
749 if (entry->in_use_p[word] & mask)
750 return 1;
751
752 /* Otherwise set it, and decrement the free object count. */
753 entry->in_use_p[word] |= mask;
754 entry->num_free_objects -= 1;
755
756 G.allocated += (size_t) 1 << entry->order;
757
758 if (GGC_DEBUG_LEVEL >= 4)
759 fprintf (G.debug_file, "Marking %p\n", p);
760
761 return 0;
762 }
763
764 /* Mark P, but check first that it was allocated by the collector. */
765
766 void
767 ggc_mark_if_gcable (p)
768 const void *p;
769 {
770 if (p && ggc_allocated_p (p))
771 ggc_set_mark (p);
772 }
773
774 /* Return the size of the gc-able object P. */
775
776 size_t
777 ggc_get_size (p)
778 const void *p;
779 {
780 page_entry *pe = lookup_page_table_entry (p);
781 return 1 << pe->order;
782 }
783 \f
784 /* Initialize the ggc-mmap allocator. */
785
786 void
787 init_ggc ()
788 {
789 G.pagesize = getpagesize();
790 G.lg_pagesize = exact_log2 (G.pagesize);
791
792 #if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
793 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
794 if (G.dev_zero_fd == -1)
795 abort ();
796 #endif
797
798 #if 0
799 G.debug_file = fopen ("ggc-mmap.debug", "w");
800 #else
801 G.debug_file = stdout;
802 #endif
803
804 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
805
806 #ifdef HAVE_MMAP_ANYWHERE
807 /* StunOS has an amazing off-by-one error for the first mmap allocation
808 after fiddling with RLIMIT_STACK. The result, as hard as it is to
809 believe, is an unaligned page allocation, which would cause us to
810 hork badly if we tried to use it. */
811 {
812 char *p = alloc_anon (NULL, G.pagesize);
813 if ((size_t)p & (G.pagesize - 1))
814 {
815 /* How losing. Discard this one and try another. If we still
816 can't get something useful, give up. */
817
818 p = alloc_anon (NULL, G.pagesize);
819 if ((size_t)p & (G.pagesize - 1))
820 abort ();
821 }
822 munmap (p, G.pagesize);
823 }
824 #endif
825
826 empty_string = ggc_alloc_string ("", 0);
827 ggc_add_string_root (&empty_string, 1);
828 }
829
830 /* Increment the `GC context'. Objects allocated in an outer context
831 are never freed, eliminating the need to register their roots. */
832
833 void
834 ggc_push_context ()
835 {
836 ++G.context_depth;
837
838 /* Die on wrap. */
839 if (G.context_depth == 0)
840 abort ();
841 }
842
843 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
844 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
845
846 static void
847 ggc_recalculate_in_use_p (p)
848 page_entry *p;
849 {
850 unsigned int i;
851 size_t num_objects;
852
853 /* Because the past-the-end bit in in_use_p is always set, we
854 pretend there is one additional object. */
855 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
856
857 /* Reset the free object count. */
858 p->num_free_objects = num_objects;
859
860 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
861 for (i = 0;
862 i < DIV_ROUND_UP (BITMAP_SIZE (num_objects),
863 sizeof (*p->in_use_p));
864 ++i)
865 {
866 unsigned long j;
867
868 /* Something is in use if it is marked, or if it was in use in a
869 context further down the context stack. */
870 p->in_use_p[i] |= p->save_in_use_p[i];
871
872 /* Decrement the free object count for every object allocated. */
873 for (j = p->in_use_p[i]; j; j >>= 1)
874 p->num_free_objects -= (j & 1);
875 }
876
877 if (p->num_free_objects >= num_objects)
878 abort ();
879 }
880
881 /* Decrement the `GC context'. All objects allocated since the
882 previous ggc_push_context are migrated to the outer context. */
883
884 void
885 ggc_pop_context ()
886 {
887 unsigned order, depth;
888
889 depth = --G.context_depth;
890
891 /* Any remaining pages in the popped context are lowered to the new
892 current context; i.e. objects allocated in the popped context and
893 left over are imported into the previous context. */
894 for (order = 2; order < HOST_BITS_PER_PTR; order++)
895 {
896 page_entry *p;
897
898 for (p = G.pages[order]; p != NULL; p = p->next)
899 {
900 if (p->context_depth > depth)
901 p->context_depth = depth;
902
903 /* If this page is now in the topmost context, and we'd
904 saved its allocation state, restore it. */
905 else if (p->context_depth == depth && p->save_in_use_p)
906 {
907 ggc_recalculate_in_use_p (p);
908 free (p->save_in_use_p);
909 p->save_in_use_p = 0;
910 }
911 }
912 }
913 }
914 \f
915 /* Unmark all objects. */
916
917 static inline void
918 clear_marks ()
919 {
920 unsigned order;
921
922 for (order = 2; order < HOST_BITS_PER_PTR; order++)
923 {
924 size_t num_objects = OBJECTS_PER_PAGE (order);
925 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
926 page_entry *p;
927
928 for (p = G.pages[order]; p != NULL; p = p->next)
929 {
930 #ifdef ENABLE_CHECKING
931 /* The data should be page-aligned. */
932 if ((size_t) p->page & (G.pagesize - 1))
933 abort ();
934 #endif
935
936 /* Pages that aren't in the topmost context are not collected;
937 nevertheless, we need their in-use bit vectors to store GC
938 marks. So, back them up first. */
939 if (p->context_depth < G.context_depth)
940 {
941 if (! p->save_in_use_p)
942 p->save_in_use_p = xmalloc (bitmap_size);
943 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
944 }
945
946 /* Reset reset the number of free objects and clear the
947 in-use bits. These will be adjusted by mark_obj. */
948 p->num_free_objects = num_objects;
949 memset (p->in_use_p, 0, bitmap_size);
950
951 /* Make sure the one-past-the-end bit is always set. */
952 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
953 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
954 }
955 }
956 }
957
958 /* Free all empty pages. Partially empty pages need no attention
959 because the `mark' bit doubles as an `unused' bit. */
960
961 static inline void
962 sweep_pages ()
963 {
964 unsigned order;
965
966 for (order = 2; order < HOST_BITS_PER_PTR; order++)
967 {
968 /* The last page-entry to consider, regardless of entries
969 placed at the end of the list. */
970 page_entry * const last = G.page_tails[order];
971
972 size_t num_objects = OBJECTS_PER_PAGE (order);
973 page_entry *p, *previous;
974 int done;
975
976 p = G.pages[order];
977 if (p == NULL)
978 continue;
979
980 previous = NULL;
981 do
982 {
983 page_entry *next = p->next;
984
985 /* Loop until all entries have been examined. */
986 done = (p == last);
987
988 /* Only objects on pages in the topmost context should get
989 collected. */
990 if (p->context_depth < G.context_depth)
991 ;
992
993 /* Remove the page if it's empty. */
994 else if (p->num_free_objects == num_objects)
995 {
996 if (! previous)
997 G.pages[order] = next;
998 else
999 previous->next = next;
1000
1001 /* Are we removing the last element? */
1002 if (p == G.page_tails[order])
1003 G.page_tails[order] = previous;
1004 free_page (p);
1005 p = previous;
1006 }
1007
1008 /* If the page is full, move it to the end. */
1009 else if (p->num_free_objects == 0)
1010 {
1011 /* Don't move it if it's already at the end. */
1012 if (p != G.page_tails[order])
1013 {
1014 /* Move p to the end of the list. */
1015 p->next = NULL;
1016 G.page_tails[order]->next = p;
1017
1018 /* Update the tail pointer... */
1019 G.page_tails[order] = p;
1020
1021 /* ... and the head pointer, if necessary. */
1022 if (! previous)
1023 G.pages[order] = next;
1024 else
1025 previous->next = next;
1026 p = previous;
1027 }
1028 }
1029
1030 /* If we've fallen through to here, it's a page in the
1031 topmost context that is neither full nor empty. Such a
1032 page must precede pages at lesser context depth in the
1033 list, so move it to the head. */
1034 else if (p != G.pages[order])
1035 {
1036 previous->next = p->next;
1037 p->next = G.pages[order];
1038 G.pages[order] = p;
1039 /* Are we moving the last element? */
1040 if (G.page_tails[order] == p)
1041 G.page_tails[order] = previous;
1042 p = previous;
1043 }
1044
1045 previous = p;
1046 p = next;
1047 }
1048 while (! done);
1049
1050 /* Now, restore the in_use_p vectors for any pages from contexts
1051 other than the current one. */
1052 for (p = G.pages[order]; p; p = p->next)
1053 if (p->context_depth != G.context_depth)
1054 ggc_recalculate_in_use_p (p);
1055 }
1056 }
1057
1058 #ifdef GGC_POISON
1059 /* Clobber all free objects. */
1060
1061 static inline void
1062 poison_pages ()
1063 {
1064 unsigned order;
1065
1066 for (order = 2; order < HOST_BITS_PER_PTR; order++)
1067 {
1068 size_t num_objects = OBJECTS_PER_PAGE (order);
1069 size_t size = (size_t) 1 << order;
1070 page_entry *p;
1071
1072 for (p = G.pages[order]; p != NULL; p = p->next)
1073 {
1074 size_t i;
1075
1076 if (p->context_depth != G.context_depth)
1077 /* Since we don't do any collection for pages in pushed
1078 contexts, there's no need to do any poisoning. And
1079 besides, the IN_USE_P array isn't valid until we pop
1080 contexts. */
1081 continue;
1082
1083 for (i = 0; i < num_objects; i++)
1084 {
1085 size_t word, bit;
1086 word = i / HOST_BITS_PER_LONG;
1087 bit = i % HOST_BITS_PER_LONG;
1088 if (((p->in_use_p[word] >> bit) & 1) == 0)
1089 memset (p->page + i * size, 0xa5, size);
1090 }
1091 }
1092 }
1093 }
1094 #endif
1095
1096 /* Top level mark-and-sweep routine. */
1097
1098 void
1099 ggc_collect ()
1100 {
1101 /* Avoid frequent unnecessary work by skipping collection if the
1102 total allocations haven't expanded much since the last
1103 collection. */
1104 #ifndef GGC_ALWAYS_COLLECT
1105 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1106 return;
1107 #endif
1108
1109 timevar_push (TV_GC);
1110 if (!quiet_flag)
1111 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1112
1113 /* Zero the total allocated bytes. We'll reaccumulate this while
1114 marking. */
1115 G.allocated = 0;
1116
1117 /* Release the pages we freed the last time we collected, but didn't
1118 reuse in the interim. */
1119 release_pages ();
1120
1121 clear_marks ();
1122 ggc_mark_roots ();
1123
1124 #ifdef GGC_POISON
1125 poison_pages ();
1126 #endif
1127
1128 sweep_pages ();
1129
1130 G.allocated_last_gc = G.allocated;
1131 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1132 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1133
1134 timevar_pop (TV_GC);
1135
1136 if (!quiet_flag)
1137 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1138 }
1139
1140 /* Print allocation statistics. */
1141
1142 void
1143 ggc_page_print_statistics ()
1144 {
1145 struct ggc_statistics stats;
1146 unsigned int i;
1147
1148 /* Clear the statistics. */
1149 memset (&stats, 0, sizeof (stats));
1150
1151 /* Make sure collection will really occur. */
1152 G.allocated_last_gc = 0;
1153
1154 /* Collect and print the statistics common across collectors. */
1155 ggc_print_statistics (stderr, &stats);
1156
1157 /* Release free pages so that we will not count the bytes allocated
1158 there as part of the total allocated memory. */
1159 release_pages ();
1160
1161 /* Collect some information about the various sizes of
1162 allocation. */
1163 fprintf (stderr, "\n%-4s%-16s%-16s\n", "Log", "Allocated", "Used");
1164 for (i = 0; i < HOST_BITS_PER_PTR; ++i)
1165 {
1166 page_entry *p;
1167 size_t allocated;
1168 size_t in_use;
1169
1170 /* Skip empty entries. */
1171 if (!G.pages[i])
1172 continue;
1173
1174 allocated = in_use = 0;
1175
1176 /* Figure out the total number of bytes allocated for objects of
1177 this size, and how many of them are actually in use. */
1178 for (p = G.pages[i]; p; p = p->next)
1179 {
1180 allocated += p->bytes;
1181 in_use +=
1182 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * (1 << i);
1183 }
1184 fprintf (stderr, "%-3d %-15lu %-15lu\n", i,
1185 (unsigned long) allocated, (unsigned long) in_use);
1186 }
1187
1188 /* Print out some global information. */
1189 fprintf (stderr, "\nTotal bytes marked: %lu\n",
1190 (unsigned long) G.allocated);
1191 fprintf (stderr, "Total bytes mapped: %lu\n",
1192 (unsigned long) G.bytes_mapped);
1193 }