ggc-page.c (max_alignment): New structure.
[gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "tree.h"
24 #include "rtl.h"
25 #include "tm_p.h"
26 #include "toplev.h"
27 #include "varray.h"
28 #include "flags.h"
29 #include "ggc.h"
30 #include "timevar.h"
31
32 #ifdef HAVE_MMAP_ANYWHERE
33 #include <sys/mman.h>
34 #endif
35
36 #ifndef MAP_FAILED
37 #define MAP_FAILED -1
38 #endif
39
40 #if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
41 #define MAP_ANONYMOUS MAP_ANON
42 #endif
43
44 /* Stategy:
45
46 This garbage-collecting allocator allocates objects on one of a set
47 of pages. Each page can allocate objects of a single size only;
48 available sizes are powers of two starting at four bytes. The size
49 of an allocation request is rounded up to the next power of two
50 (`order'), and satisfied from the appropriate page.
51
52 Each page is recorded in a page-entry, which also maintains an
53 in-use bitmap of object positions on the page. This allows the
54 allocation state of a particular object to be flipped without
55 touching the page itself.
56
57 Each page-entry also has a context depth, which is used to track
58 pushing and popping of allocation contexts. Only objects allocated
59 in the current (highest-numbered) context may be collected.
60
61 Page entries are arranged in an array of singly-linked lists. The
62 array is indexed by the allocation size, in bits, of the pages on
63 it; i.e. all pages on a list allocate objects of the same size.
64 Pages are ordered on the list such that all non-full pages precede
65 all full pages, with non-full pages arranged in order of decreasing
66 context depth.
67
68 Empty pages (of all orders) are kept on a single page cache list,
69 and are considered first when new pages are required; they are
70 deallocated at the start of the next collection if they haven't
71 been recycled by then. */
72
73
74 /* Define GGC_POISON to poison memory marked unused by the collector. */
75 #undef GGC_POISON
76
77 /* Define GGC_ALWAYS_COLLECT to perform collection every time
78 ggc_collect is invoked. Otherwise, collection is performed only
79 when a significant amount of memory has been allocated since the
80 last collection. */
81 #undef GGC_ALWAYS_COLLECT
82
83 #ifdef ENABLE_GC_CHECKING
84 #define GGC_POISON
85 #endif
86 #ifdef ENABLE_GC_ALWAYS_COLLECT
87 #define GGC_ALWAYS_COLLECT
88 #endif
89
90 /* Define GGC_DEBUG_LEVEL to print debugging information.
91 0: No debugging output.
92 1: GC statistics only.
93 2: Page-entry allocations/deallocations as well.
94 3: Object allocations as well.
95 4: Object marks as well. */
96 #define GGC_DEBUG_LEVEL (0)
97 \f
98 #ifndef HOST_BITS_PER_PTR
99 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
100 #endif
101
102 \f
103 /* A two-level tree is used to look up the page-entry for a given
104 pointer. Two chunks of the pointer's bits are extracted to index
105 the first and second levels of the tree, as follows:
106
107 HOST_PAGE_SIZE_BITS
108 32 | |
109 msb +----------------+----+------+------+ lsb
110 | | |
111 PAGE_L1_BITS |
112 | |
113 PAGE_L2_BITS
114
115 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
116 pages are aligned on system page boundaries. The next most
117 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
118 index values in the lookup table, respectively.
119
120 For 32-bit architectures and the settings below, there are no
121 leftover bits. For architectures with wider pointers, the lookup
122 tree points to a list of pages, which must be scanned to find the
123 correct one. */
124
125 #define PAGE_L1_BITS (8)
126 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
127 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
128 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
129
130 #define LOOKUP_L1(p) \
131 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
132
133 #define LOOKUP_L2(p) \
134 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
135
136 /* The number of objects per allocation page, for objects on a page of
137 the indicated ORDER. */
138 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
139
140 /* The size of an object on a page of the indicated ORDER. */
141 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
142
143 /* The number of extra orders, not corresponding to power-of-two sized
144 objects. */
145
146 #define NUM_EXTRA_ORDERS \
147 (sizeof (extra_order_size_table) / sizeof (extra_order_size_table[0]))
148
149 /* The Ith entry is the maximum size of an object to be stored in the
150 Ith extra order. Adding a new entry to this array is the *only*
151 thing you need to do to add a new special allocation size. */
152
153 static const size_t extra_order_size_table[] = {
154 sizeof (struct tree_decl),
155 sizeof (struct tree_list)
156 };
157
158 /* The total number of orders. */
159
160 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
161
162 /* We use this structure to determine the alignment required for
163 allocations. For power-of-two sized allocations, that's not a
164 problem, but it does matter for odd-sized allocations. */
165
166 struct max_alignment {
167 char c;
168 union {
169 HOST_WIDEST_INT i;
170 #ifdef HAVE_LONG_DOUBLE
171 long double d;
172 #else
173 double d;
174 #endif
175 } u;
176 };
177
178 /* The biggest alignment required. */
179
180 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
181
182 /* The Ith entry is the number of objects on a page or order I. */
183
184 static unsigned objects_per_page_table[NUM_ORDERS];
185
186 /* The Ith entry is the size of an object on a page of order I. */
187
188 static size_t object_size_table[NUM_ORDERS];
189
190 /* A page_entry records the status of an allocation page. This
191 structure is dynamically sized to fit the bitmap in_use_p. */
192 typedef struct page_entry
193 {
194 /* The next page-entry with objects of the same size, or NULL if
195 this is the last page-entry. */
196 struct page_entry *next;
197
198 /* The number of bytes allocated. (This will always be a multiple
199 of the host system page size.) */
200 size_t bytes;
201
202 /* The address at which the memory is allocated. */
203 char *page;
204
205 /* Saved in-use bit vector for pages that aren't in the topmost
206 context during collection. */
207 unsigned long *save_in_use_p;
208
209 /* Context depth of this page. */
210 unsigned short context_depth;
211
212 /* The number of free objects remaining on this page. */
213 unsigned short num_free_objects;
214
215 /* A likely candidate for the bit position of a free object for the
216 next allocation from this page. */
217 unsigned short next_bit_hint;
218
219 /* The lg of size of objects allocated from this page. */
220 unsigned char order;
221
222 /* A bit vector indicating whether or not objects are in use. The
223 Nth bit is one if the Nth object on this page is allocated. This
224 array is dynamically sized. */
225 unsigned long in_use_p[1];
226 } page_entry;
227
228
229 #if HOST_BITS_PER_PTR <= 32
230
231 /* On 32-bit hosts, we use a two level page table, as pictured above. */
232 typedef page_entry **page_table[PAGE_L1_SIZE];
233
234 #else
235
236 /* On 64-bit hosts, we use the same two level page tables plus a linked
237 list that disambiguates the top 32-bits. There will almost always be
238 exactly one entry in the list. */
239 typedef struct page_table_chain
240 {
241 struct page_table_chain *next;
242 size_t high_bits;
243 page_entry **table[PAGE_L1_SIZE];
244 } *page_table;
245
246 #endif
247
248 /* The rest of the global variables. */
249 static struct globals
250 {
251 /* The Nth element in this array is a page with objects of size 2^N.
252 If there are any pages with free objects, they will be at the
253 head of the list. NULL if there are no page-entries for this
254 object size. */
255 page_entry *pages[NUM_ORDERS];
256
257 /* The Nth element in this array is the last page with objects of
258 size 2^N. NULL if there are no page-entries for this object
259 size. */
260 page_entry *page_tails[NUM_ORDERS];
261
262 /* Lookup table for associating allocation pages with object addresses. */
263 page_table lookup;
264
265 /* The system's page size. */
266 size_t pagesize;
267 size_t lg_pagesize;
268
269 /* Bytes currently allocated. */
270 size_t allocated;
271
272 /* Bytes currently allocated at the end of the last collection. */
273 size_t allocated_last_gc;
274
275 /* Total amount of memory mapped. */
276 size_t bytes_mapped;
277
278 /* The current depth in the context stack. */
279 unsigned short context_depth;
280
281 /* A file descriptor open to /dev/zero for reading. */
282 #if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
283 int dev_zero_fd;
284 #endif
285
286 /* A cache of free system pages. */
287 page_entry *free_pages;
288
289 /* The file descriptor for debugging output. */
290 FILE *debug_file;
291 } G;
292
293 /* The size in bytes required to maintain a bitmap for the objects
294 on a page-entry. */
295 #define BITMAP_SIZE(Num_objects) \
296 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
297
298 /* Skip garbage collection if the current allocation is not at least
299 this factor times the allocation at the end of the last collection.
300 In other words, total allocation must expand by (this factor minus
301 one) before collection is performed. */
302 #define GGC_MIN_EXPAND_FOR_GC (1.3)
303
304 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
305 test from triggering too often when the heap is small. */
306 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
307
308 /* Allocate pages in chunks of this size, to throttle calls to mmap.
309 The first page is used, the rest go onto the free list. */
310 #define GGC_QUIRE_SIZE 16
311
312 \f
313 static int ggc_allocated_p PARAMS ((const void *));
314 static page_entry *lookup_page_table_entry PARAMS ((const void *));
315 static void set_page_table_entry PARAMS ((void *, page_entry *));
316 static char *alloc_anon PARAMS ((char *, size_t));
317 static struct page_entry * alloc_page PARAMS ((unsigned));
318 static void free_page PARAMS ((struct page_entry *));
319 static void release_pages PARAMS ((void));
320 static void clear_marks PARAMS ((void));
321 static void sweep_pages PARAMS ((void));
322 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
323
324 #ifdef GGC_POISON
325 static void poison_pages PARAMS ((void));
326 #endif
327
328 void debug_print_page_list PARAMS ((int));
329 \f
330 /* Returns non-zero if P was allocated in GC'able memory. */
331
332 static inline int
333 ggc_allocated_p (p)
334 const void *p;
335 {
336 page_entry ***base;
337 size_t L1, L2;
338
339 #if HOST_BITS_PER_PTR <= 32
340 base = &G.lookup[0];
341 #else
342 page_table table = G.lookup;
343 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
344 while (1)
345 {
346 if (table == NULL)
347 return 0;
348 if (table->high_bits == high_bits)
349 break;
350 table = table->next;
351 }
352 base = &table->table[0];
353 #endif
354
355 /* Extract the level 1 and 2 indicies. */
356 L1 = LOOKUP_L1 (p);
357 L2 = LOOKUP_L2 (p);
358
359 return base[L1] && base[L1][L2];
360 }
361
362 /* Traverse the page table and find the entry for a page.
363 Die (probably) if the object wasn't allocated via GC. */
364
365 static inline page_entry *
366 lookup_page_table_entry(p)
367 const void *p;
368 {
369 page_entry ***base;
370 size_t L1, L2;
371
372 #if HOST_BITS_PER_PTR <= 32
373 base = &G.lookup[0];
374 #else
375 page_table table = G.lookup;
376 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
377 while (table->high_bits != high_bits)
378 table = table->next;
379 base = &table->table[0];
380 #endif
381
382 /* Extract the level 1 and 2 indicies. */
383 L1 = LOOKUP_L1 (p);
384 L2 = LOOKUP_L2 (p);
385
386 return base[L1][L2];
387 }
388
389 /* Set the page table entry for a page. */
390
391 static void
392 set_page_table_entry(p, entry)
393 void *p;
394 page_entry *entry;
395 {
396 page_entry ***base;
397 size_t L1, L2;
398
399 #if HOST_BITS_PER_PTR <= 32
400 base = &G.lookup[0];
401 #else
402 page_table table;
403 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
404 for (table = G.lookup; table; table = table->next)
405 if (table->high_bits == high_bits)
406 goto found;
407
408 /* Not found -- allocate a new table. */
409 table = (page_table) xcalloc (1, sizeof(*table));
410 table->next = G.lookup;
411 table->high_bits = high_bits;
412 G.lookup = table;
413 found:
414 base = &table->table[0];
415 #endif
416
417 /* Extract the level 1 and 2 indicies. */
418 L1 = LOOKUP_L1 (p);
419 L2 = LOOKUP_L2 (p);
420
421 if (base[L1] == NULL)
422 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
423
424 base[L1][L2] = entry;
425 }
426
427 /* Prints the page-entry for object size ORDER, for debugging. */
428
429 void
430 debug_print_page_list (order)
431 int order;
432 {
433 page_entry *p;
434 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
435 (PTR) G.page_tails[order]);
436 p = G.pages[order];
437 while (p != NULL)
438 {
439 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
440 p->num_free_objects);
441 p = p->next;
442 }
443 printf ("NULL\n");
444 fflush (stdout);
445 }
446
447 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
448 (if non-null). */
449
450 static inline char *
451 alloc_anon (pref, size)
452 char *pref ATTRIBUTE_UNUSED;
453 size_t size;
454 {
455 char *page;
456
457 #ifdef HAVE_MMAP_ANYWHERE
458 #ifdef MAP_ANONYMOUS
459 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
460 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
461 #else
462 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
463 MAP_PRIVATE, G.dev_zero_fd, 0);
464 #endif
465 if (page == (char *) MAP_FAILED)
466 {
467 fputs ("Virtual memory exhausted!\n", stderr);
468 exit(1);
469 }
470 #else
471 #ifdef HAVE_VALLOC
472 page = (char *) valloc (size);
473 if (!page)
474 {
475 fputs ("Virtual memory exhausted!\n", stderr);
476 exit(1);
477 }
478 #endif /* HAVE_VALLOC */
479 #endif /* HAVE_MMAP_ANYWHERE */
480
481 /* Remember that we allocated this memory. */
482 G.bytes_mapped += size;
483
484 return page;
485 }
486
487 /* Allocate a new page for allocating objects of size 2^ORDER,
488 and return an entry for it. The entry is not added to the
489 appropriate page_table list. */
490
491 static inline struct page_entry *
492 alloc_page (order)
493 unsigned order;
494 {
495 struct page_entry *entry, *p, **pp;
496 char *page;
497 size_t num_objects;
498 size_t bitmap_size;
499 size_t page_entry_size;
500 size_t entry_size;
501
502 num_objects = OBJECTS_PER_PAGE (order);
503 bitmap_size = BITMAP_SIZE (num_objects + 1);
504 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
505 entry_size = num_objects * OBJECT_SIZE (order);
506
507 entry = NULL;
508 page = NULL;
509
510 /* Check the list of free pages for one we can use. */
511 for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
512 if (p->bytes == entry_size)
513 break;
514
515 if (p != NULL)
516 {
517 /* Recycle the allocated memory from this page ... */
518 *pp = p->next;
519 page = p->page;
520 /* ... and, if possible, the page entry itself. */
521 if (p->order == order)
522 {
523 entry = p;
524 memset (entry, 0, page_entry_size);
525 }
526 else
527 free (p);
528 }
529 #ifdef HAVE_MMAP_ANYWHERE
530 else if (entry_size == G.pagesize)
531 {
532 /* We want just one page. Allocate a bunch of them and put the
533 extras on the freelist. (Can only do this optimization with
534 mmap for backing store.) */
535 struct page_entry *e, *f = G.free_pages;
536 int i;
537
538 page = alloc_anon (NULL, entry_size * GGC_QUIRE_SIZE);
539 /* This loop counts down so that the chain will be in ascending
540 memory order. */
541 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
542 {
543 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
544 e->bytes = entry_size;
545 e->page = page + i*entry_size;
546 e->next = f;
547 f = e;
548 }
549 G.free_pages = f;
550 }
551 #endif
552 else
553 page = alloc_anon (NULL, entry_size);
554
555 if (entry == NULL)
556 entry = (struct page_entry *) xcalloc (1, page_entry_size);
557
558 entry->bytes = entry_size;
559 entry->page = page;
560 entry->context_depth = G.context_depth;
561 entry->order = order;
562 entry->num_free_objects = num_objects;
563 entry->next_bit_hint = 1;
564
565 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
566 increment the hint. */
567 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
568 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
569
570 set_page_table_entry (page, entry);
571
572 if (GGC_DEBUG_LEVEL >= 2)
573 fprintf (G.debug_file,
574 "Allocating page at %p, object size=%d, data %p-%p\n",
575 (PTR) entry, OBJECT_SIZE (order), page, page + entry_size - 1);
576
577 return entry;
578 }
579
580 /* For a page that is no longer needed, put it on the free page list. */
581
582 static inline void
583 free_page (entry)
584 page_entry *entry;
585 {
586 if (GGC_DEBUG_LEVEL >= 2)
587 fprintf (G.debug_file,
588 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
589 entry->page, entry->page + entry->bytes - 1);
590
591 set_page_table_entry (entry->page, NULL);
592
593 entry->next = G.free_pages;
594 G.free_pages = entry;
595 }
596
597 /* Release the free page cache to the system. */
598
599 static void
600 release_pages ()
601 {
602 page_entry *p, *next;
603
604 #ifdef HAVE_MMAP_ANYWHERE
605 char *start;
606 size_t len;
607
608 /* Gather up adjacent pages so they are unmapped together. */
609 p = G.free_pages;
610
611 while (p)
612 {
613 start = p->page;
614 next = p->next;
615 len = p->bytes;
616 free (p);
617 p = next;
618
619 while (p && p->page == start + len)
620 {
621 next = p->next;
622 len += p->bytes;
623 free (p);
624 p = next;
625 }
626
627 munmap (start, len);
628 G.bytes_mapped -= len;
629 }
630 #else
631 #ifdef HAVE_VALLOC
632
633 for (p = G.free_pages; p; p = next)
634 {
635 next = p->next;
636 free (p->page);
637 G.bytes_mapped -= p->bytes;
638 free (p);
639 }
640 #endif /* HAVE_VALLOC */
641 #endif /* HAVE_MMAP_ANYWHERE */
642
643 G.free_pages = NULL;
644 }
645
646 /* This table provides a fast way to determine ceil(log_2(size)) for
647 allocation requests. The minimum allocation size is four bytes. */
648
649 static unsigned char size_lookup[257] =
650 {
651 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
652 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
653 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
654 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
655 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
656 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
657 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
658 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
659 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
660 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
661 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
662 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
663 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
664 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
665 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
666 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
667 8
668 };
669
670 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
671 memory is zeroed; otherwise, its contents are undefined. */
672
673 void *
674 ggc_alloc (size)
675 size_t size;
676 {
677 unsigned order, word, bit, object_offset;
678 struct page_entry *entry;
679 void *result;
680
681 if (size <= 256)
682 order = size_lookup[size];
683 else
684 {
685 order = 9;
686 while (size > OBJECT_SIZE (order))
687 order++;
688 }
689
690 /* If there are non-full pages for this size allocation, they are at
691 the head of the list. */
692 entry = G.pages[order];
693
694 /* If there is no page for this object size, or all pages in this
695 context are full, allocate a new page. */
696 if (entry == NULL || entry->num_free_objects == 0)
697 {
698 struct page_entry *new_entry;
699 new_entry = alloc_page (order);
700
701 /* If this is the only entry, it's also the tail. */
702 if (entry == NULL)
703 G.page_tails[order] = new_entry;
704
705 /* Put new pages at the head of the page list. */
706 new_entry->next = entry;
707 entry = new_entry;
708 G.pages[order] = new_entry;
709
710 /* For a new page, we know the word and bit positions (in the
711 in_use bitmap) of the first available object -- they're zero. */
712 new_entry->next_bit_hint = 1;
713 word = 0;
714 bit = 0;
715 object_offset = 0;
716 }
717 else
718 {
719 /* First try to use the hint left from the previous allocation
720 to locate a clear bit in the in-use bitmap. We've made sure
721 that the one-past-the-end bit is always set, so if the hint
722 has run over, this test will fail. */
723 unsigned hint = entry->next_bit_hint;
724 word = hint / HOST_BITS_PER_LONG;
725 bit = hint % HOST_BITS_PER_LONG;
726
727 /* If the hint didn't work, scan the bitmap from the beginning. */
728 if ((entry->in_use_p[word] >> bit) & 1)
729 {
730 word = bit = 0;
731 while (~entry->in_use_p[word] == 0)
732 ++word;
733 while ((entry->in_use_p[word] >> bit) & 1)
734 ++bit;
735 hint = word * HOST_BITS_PER_LONG + bit;
736 }
737
738 /* Next time, try the next bit. */
739 entry->next_bit_hint = hint + 1;
740
741 object_offset = hint * OBJECT_SIZE (order);
742 }
743
744 /* Set the in-use bit. */
745 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
746
747 /* Keep a running total of the number of free objects. If this page
748 fills up, we may have to move it to the end of the list if the
749 next page isn't full. If the next page is full, all subsequent
750 pages are full, so there's no need to move it. */
751 if (--entry->num_free_objects == 0
752 && entry->next != NULL
753 && entry->next->num_free_objects > 0)
754 {
755 G.pages[order] = entry->next;
756 entry->next = NULL;
757 G.page_tails[order]->next = entry;
758 G.page_tails[order] = entry;
759 }
760
761 /* Calculate the object's address. */
762 result = entry->page + object_offset;
763
764 #ifdef GGC_POISON
765 /* `Poison' the entire allocated object, including any padding at
766 the end. */
767 memset (result, 0xaf, OBJECT_SIZE (order));
768 #endif
769
770 /* Keep track of how many bytes are being allocated. This
771 information is used in deciding when to collect. */
772 G.allocated += OBJECT_SIZE (order);
773
774 if (GGC_DEBUG_LEVEL >= 3)
775 fprintf (G.debug_file,
776 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
777 (int) size, OBJECT_SIZE (order), result, (PTR) entry);
778
779 return result;
780 }
781
782 /* If P is not marked, marks it and return false. Otherwise return true.
783 P must have been allocated by the GC allocator; it mustn't point to
784 static objects, stack variables, or memory allocated with malloc. */
785
786 int
787 ggc_set_mark (p)
788 const void *p;
789 {
790 page_entry *entry;
791 unsigned bit, word;
792 unsigned long mask;
793
794 /* Look up the page on which the object is alloced. If the object
795 wasn't allocated by the collector, we'll probably die. */
796 entry = lookup_page_table_entry (p);
797 #ifdef ENABLE_CHECKING
798 if (entry == NULL)
799 abort ();
800 #endif
801
802 /* Calculate the index of the object on the page; this is its bit
803 position in the in_use_p bitmap. */
804 bit = (((const char *) p) - entry->page) / OBJECT_SIZE (entry->order);
805 word = bit / HOST_BITS_PER_LONG;
806 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
807
808 /* If the bit was previously set, skip it. */
809 if (entry->in_use_p[word] & mask)
810 return 1;
811
812 /* Otherwise set it, and decrement the free object count. */
813 entry->in_use_p[word] |= mask;
814 entry->num_free_objects -= 1;
815
816 if (GGC_DEBUG_LEVEL >= 4)
817 fprintf (G.debug_file, "Marking %p\n", p);
818
819 return 0;
820 }
821
822 /* Mark P, but check first that it was allocated by the collector. */
823
824 void
825 ggc_mark_if_gcable (p)
826 const void *p;
827 {
828 if (p && ggc_allocated_p (p))
829 ggc_set_mark (p);
830 }
831
832 /* Return the size of the gc-able object P. */
833
834 size_t
835 ggc_get_size (p)
836 const void *p;
837 {
838 page_entry *pe = lookup_page_table_entry (p);
839 return OBJECT_SIZE (pe->order);
840 }
841 \f
842 /* Initialize the ggc-mmap allocator. */
843
844 void
845 init_ggc ()
846 {
847 unsigned order;
848
849 G.pagesize = getpagesize();
850 G.lg_pagesize = exact_log2 (G.pagesize);
851
852 #if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
853 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
854 if (G.dev_zero_fd == -1)
855 abort ();
856 #endif
857
858 #if 0
859 G.debug_file = fopen ("ggc-mmap.debug", "w");
860 #else
861 G.debug_file = stdout;
862 #endif
863
864 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
865
866 #ifdef HAVE_MMAP_ANYWHERE
867 /* StunOS has an amazing off-by-one error for the first mmap allocation
868 after fiddling with RLIMIT_STACK. The result, as hard as it is to
869 believe, is an unaligned page allocation, which would cause us to
870 hork badly if we tried to use it. */
871 {
872 char *p = alloc_anon (NULL, G.pagesize);
873 if ((size_t)p & (G.pagesize - 1))
874 {
875 /* How losing. Discard this one and try another. If we still
876 can't get something useful, give up. */
877
878 p = alloc_anon (NULL, G.pagesize);
879 if ((size_t)p & (G.pagesize - 1))
880 abort ();
881 }
882 munmap (p, G.pagesize);
883 }
884 #endif
885
886 /* Initialize the object size table. */
887 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
888 object_size_table[order] = (size_t) 1 << order;
889 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
890 {
891 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
892
893 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
894 so that we're sure of getting aligned memory. */
895 s = CEIL (s, MAX_ALIGNMENT) * MAX_ALIGNMENT;
896 object_size_table[order] = s;
897 }
898
899 /* Initialize the objects-per-page table. */
900 for (order = 0; order < NUM_ORDERS; ++order)
901 {
902 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
903 if (objects_per_page_table[order] == 0)
904 objects_per_page_table[order] = 1;
905 }
906
907 /* Reset the size_lookup array to put appropriately sized objects in
908 the special orders. All objects bigger than the previous power
909 of two, but no greater than the special size, should go in the
910 new order. */
911 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
912 {
913 int o;
914 int i;
915
916 o = size_lookup[OBJECT_SIZE (order)];
917 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
918 size_lookup[i] = order;
919 }
920 }
921
922 /* Increment the `GC context'. Objects allocated in an outer context
923 are never freed, eliminating the need to register their roots. */
924
925 void
926 ggc_push_context ()
927 {
928 ++G.context_depth;
929
930 /* Die on wrap. */
931 if (G.context_depth == 0)
932 abort ();
933 }
934
935 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
936 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
937
938 static void
939 ggc_recalculate_in_use_p (p)
940 page_entry *p;
941 {
942 unsigned int i;
943 size_t num_objects;
944
945 /* Because the past-the-end bit in in_use_p is always set, we
946 pretend there is one additional object. */
947 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
948
949 /* Reset the free object count. */
950 p->num_free_objects = num_objects;
951
952 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
953 for (i = 0;
954 i < CEIL (BITMAP_SIZE (num_objects),
955 sizeof (*p->in_use_p));
956 ++i)
957 {
958 unsigned long j;
959
960 /* Something is in use if it is marked, or if it was in use in a
961 context further down the context stack. */
962 p->in_use_p[i] |= p->save_in_use_p[i];
963
964 /* Decrement the free object count for every object allocated. */
965 for (j = p->in_use_p[i]; j; j >>= 1)
966 p->num_free_objects -= (j & 1);
967 }
968
969 if (p->num_free_objects >= num_objects)
970 abort ();
971 }
972
973 /* Decrement the `GC context'. All objects allocated since the
974 previous ggc_push_context are migrated to the outer context. */
975
976 void
977 ggc_pop_context ()
978 {
979 unsigned order, depth;
980
981 depth = --G.context_depth;
982
983 /* Any remaining pages in the popped context are lowered to the new
984 current context; i.e. objects allocated in the popped context and
985 left over are imported into the previous context. */
986 for (order = 2; order < NUM_ORDERS; order++)
987 {
988 page_entry *p;
989
990 for (p = G.pages[order]; p != NULL; p = p->next)
991 {
992 if (p->context_depth > depth)
993 p->context_depth = depth;
994
995 /* If this page is now in the topmost context, and we'd
996 saved its allocation state, restore it. */
997 else if (p->context_depth == depth && p->save_in_use_p)
998 {
999 ggc_recalculate_in_use_p (p);
1000 free (p->save_in_use_p);
1001 p->save_in_use_p = 0;
1002 }
1003 }
1004 }
1005 }
1006 \f
1007 /* Unmark all objects. */
1008
1009 static inline void
1010 clear_marks ()
1011 {
1012 unsigned order;
1013
1014 for (order = 2; order < NUM_ORDERS; order++)
1015 {
1016 size_t num_objects = OBJECTS_PER_PAGE (order);
1017 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1018 page_entry *p;
1019
1020 for (p = G.pages[order]; p != NULL; p = p->next)
1021 {
1022 #ifdef ENABLE_CHECKING
1023 /* The data should be page-aligned. */
1024 if ((size_t) p->page & (G.pagesize - 1))
1025 abort ();
1026 #endif
1027
1028 /* Pages that aren't in the topmost context are not collected;
1029 nevertheless, we need their in-use bit vectors to store GC
1030 marks. So, back them up first. */
1031 if (p->context_depth < G.context_depth)
1032 {
1033 if (! p->save_in_use_p)
1034 p->save_in_use_p = xmalloc (bitmap_size);
1035 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
1036 }
1037
1038 /* Reset reset the number of free objects and clear the
1039 in-use bits. These will be adjusted by mark_obj. */
1040 p->num_free_objects = num_objects;
1041 memset (p->in_use_p, 0, bitmap_size);
1042
1043 /* Make sure the one-past-the-end bit is always set. */
1044 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1045 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1046 }
1047 }
1048 }
1049
1050 /* Free all empty pages. Partially empty pages need no attention
1051 because the `mark' bit doubles as an `unused' bit. */
1052
1053 static inline void
1054 sweep_pages ()
1055 {
1056 unsigned order;
1057
1058 for (order = 2; order < NUM_ORDERS; order++)
1059 {
1060 /* The last page-entry to consider, regardless of entries
1061 placed at the end of the list. */
1062 page_entry * const last = G.page_tails[order];
1063
1064 size_t num_objects = OBJECTS_PER_PAGE (order);
1065 size_t live_objects;
1066 page_entry *p, *previous;
1067 int done;
1068
1069 p = G.pages[order];
1070 if (p == NULL)
1071 continue;
1072
1073 previous = NULL;
1074 do
1075 {
1076 page_entry *next = p->next;
1077
1078 /* Loop until all entries have been examined. */
1079 done = (p == last);
1080
1081 /* Add all live objects on this page to the count of
1082 allocated memory. */
1083 live_objects = num_objects - p->num_free_objects;
1084
1085 G.allocated += OBJECT_SIZE (order) * live_objects;
1086
1087 /* Only objects on pages in the topmost context should get
1088 collected. */
1089 if (p->context_depth < G.context_depth)
1090 ;
1091
1092 /* Remove the page if it's empty. */
1093 else if (live_objects == 0)
1094 {
1095 if (! previous)
1096 G.pages[order] = next;
1097 else
1098 previous->next = next;
1099
1100 /* Are we removing the last element? */
1101 if (p == G.page_tails[order])
1102 G.page_tails[order] = previous;
1103 free_page (p);
1104 p = previous;
1105 }
1106
1107 /* If the page is full, move it to the end. */
1108 else if (p->num_free_objects == 0)
1109 {
1110 /* Don't move it if it's already at the end. */
1111 if (p != G.page_tails[order])
1112 {
1113 /* Move p to the end of the list. */
1114 p->next = NULL;
1115 G.page_tails[order]->next = p;
1116
1117 /* Update the tail pointer... */
1118 G.page_tails[order] = p;
1119
1120 /* ... and the head pointer, if necessary. */
1121 if (! previous)
1122 G.pages[order] = next;
1123 else
1124 previous->next = next;
1125 p = previous;
1126 }
1127 }
1128
1129 /* If we've fallen through to here, it's a page in the
1130 topmost context that is neither full nor empty. Such a
1131 page must precede pages at lesser context depth in the
1132 list, so move it to the head. */
1133 else if (p != G.pages[order])
1134 {
1135 previous->next = p->next;
1136 p->next = G.pages[order];
1137 G.pages[order] = p;
1138 /* Are we moving the last element? */
1139 if (G.page_tails[order] == p)
1140 G.page_tails[order] = previous;
1141 p = previous;
1142 }
1143
1144 previous = p;
1145 p = next;
1146 }
1147 while (! done);
1148
1149 /* Now, restore the in_use_p vectors for any pages from contexts
1150 other than the current one. */
1151 for (p = G.pages[order]; p; p = p->next)
1152 if (p->context_depth != G.context_depth)
1153 ggc_recalculate_in_use_p (p);
1154 }
1155 }
1156
1157 #ifdef GGC_POISON
1158 /* Clobber all free objects. */
1159
1160 static inline void
1161 poison_pages ()
1162 {
1163 unsigned order;
1164
1165 for (order = 2; order < NUM_ORDERS; order++)
1166 {
1167 size_t num_objects = OBJECTS_PER_PAGE (order);
1168 size_t size = OBJECT_SIZE (order);
1169 page_entry *p;
1170
1171 for (p = G.pages[order]; p != NULL; p = p->next)
1172 {
1173 size_t i;
1174
1175 if (p->context_depth != G.context_depth)
1176 /* Since we don't do any collection for pages in pushed
1177 contexts, there's no need to do any poisoning. And
1178 besides, the IN_USE_P array isn't valid until we pop
1179 contexts. */
1180 continue;
1181
1182 for (i = 0; i < num_objects; i++)
1183 {
1184 size_t word, bit;
1185 word = i / HOST_BITS_PER_LONG;
1186 bit = i % HOST_BITS_PER_LONG;
1187 if (((p->in_use_p[word] >> bit) & 1) == 0)
1188 memset (p->page + i * size, 0xa5, size);
1189 }
1190 }
1191 }
1192 }
1193 #endif
1194
1195 /* Top level mark-and-sweep routine. */
1196
1197 void
1198 ggc_collect ()
1199 {
1200 /* Avoid frequent unnecessary work by skipping collection if the
1201 total allocations haven't expanded much since the last
1202 collection. */
1203 #ifndef GGC_ALWAYS_COLLECT
1204 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1205 return;
1206 #endif
1207
1208 timevar_push (TV_GC);
1209 if (!quiet_flag)
1210 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1211
1212 /* Zero the total allocated bytes. This will be recalculated in the
1213 sweep phase. */
1214 G.allocated = 0;
1215
1216 /* Release the pages we freed the last time we collected, but didn't
1217 reuse in the interim. */
1218 release_pages ();
1219
1220 clear_marks ();
1221 ggc_mark_roots ();
1222
1223 #ifdef GGC_POISON
1224 poison_pages ();
1225 #endif
1226
1227 sweep_pages ();
1228
1229 G.allocated_last_gc = G.allocated;
1230 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1231 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1232
1233 timevar_pop (TV_GC);
1234
1235 if (!quiet_flag)
1236 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1237 }
1238
1239 /* Print allocation statistics. */
1240 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1241 ? (x) \
1242 : ((x) < 1024*1024*10 \
1243 ? (x) / 1024 \
1244 : (x) / (1024*1024))))
1245 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1246
1247 void
1248 ggc_print_statistics ()
1249 {
1250 struct ggc_statistics stats;
1251 unsigned int i;
1252 size_t total_overhead = 0;
1253
1254 /* Clear the statistics. */
1255 memset (&stats, 0, sizeof (stats));
1256
1257 /* Make sure collection will really occur. */
1258 G.allocated_last_gc = 0;
1259
1260 /* Collect and print the statistics common across collectors. */
1261 ggc_print_common_statistics (stderr, &stats);
1262
1263 /* Release free pages so that we will not count the bytes allocated
1264 there as part of the total allocated memory. */
1265 release_pages ();
1266
1267 /* Collect some information about the various sizes of
1268 allocation. */
1269 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1270 "Log", "Allocated", "Used", "Overhead");
1271 for (i = 0; i < NUM_ORDERS; ++i)
1272 {
1273 page_entry *p;
1274 size_t allocated;
1275 size_t in_use;
1276 size_t overhead;
1277
1278 /* Skip empty entries. */
1279 if (!G.pages[i])
1280 continue;
1281
1282 overhead = allocated = in_use = 0;
1283
1284 /* Figure out the total number of bytes allocated for objects of
1285 this size, and how many of them are actually in use. Also figure
1286 out how much memory the page table is using. */
1287 for (p = G.pages[i]; p; p = p->next)
1288 {
1289 allocated += p->bytes;
1290 in_use +=
1291 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * OBJECT_SIZE (i);
1292
1293 overhead += (sizeof (page_entry) - sizeof (long)
1294 + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
1295 }
1296 fprintf (stderr, "%-5d %10ld%c %10ld%c %10ld%c\n", i,
1297 SCALE (allocated), LABEL (allocated),
1298 SCALE (in_use), LABEL (in_use),
1299 SCALE (overhead), LABEL (overhead));
1300 total_overhead += overhead;
1301 }
1302 fprintf (stderr, "%-5s %10ld%c %10ld%c %10ld%c\n", "Total",
1303 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1304 SCALE (G.allocated), LABEL(G.allocated),
1305 SCALE (total_overhead), LABEL (total_overhead));
1306 }