* ggc-page.c (MAP_FAILED): Provide default.
[gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "tree.h"
24 #include "rtl.h"
25 #include "tm_p.h"
26 #include "varray.h"
27 #include "flags.h"
28 #include "ggc.h"
29
30 #ifdef HAVE_MMAP
31 #include <sys/mman.h>
32 #endif
33
34 #ifndef MAP_FAILED
35 #define MAP_FAILED -1
36 #endif
37
38
39 /* Stategy:
40
41 This garbage-collecting allocator allocates objects on one of a set
42 of pages. Each page can allocate objects of a single size only;
43 available sizes are powers of two starting at four bytes. The size
44 of an allocation request is rounded up to the next power of two
45 (`order'), and satisfied from the appropriate page.
46
47 Each page is recorded in a page-entry, which also maintains an
48 in-use bitmap of object positions on the page. This allows the
49 allocation state of a particular object to be flipped without
50 touching the page itself.
51
52 Each page-entry also has a context depth, which is used to track
53 pushing and popping of allocation contexts. Only objects allocated
54 in the current (highest-numbered) context may be collected.
55
56 Page entries are arranged in an array of singly-linked lists. The
57 array is indexed by the allocation size, in bits, of the pages on
58 it; i.e. all pages on a list allocate objects of the same size.
59 Pages are ordered on the list such that all non-full pages precede
60 all full pages, with non-full pages arranged in order of decreasing
61 context depth.
62
63 Empty pages (of all orders) are kept on a single page cache list,
64 and are considered first when new pages are required; they are
65 deallocated at the start of the next collection if they haven't
66 been recycled by then. */
67
68
69 /* Define GGC_POISON to poison memory marked unused by the collector. */
70 #undef GGC_POISON
71
72 /* Define GGC_ALWAYS_COLLECT to perform collection every time
73 ggc_collect is invoked. Otherwise, collection is performed only
74 when a significant amount of memory has been allocated since the
75 last collection. */
76 #undef GGC_ALWAYS_COLLECT
77
78 /* If ENABLE_CHECKING is defined, enable GGC_POISON and
79 GGC_ALWAYS_COLLECT automatically. */
80 #ifdef ENABLE_CHECKING
81 #define GGC_POISON
82 #define GGC_ALWAYS_COLLECT
83 #endif
84
85 /* Define GGC_DEBUG_LEVEL to print debugging information.
86 0: No debugging output.
87 1: GC statistics only.
88 2: Page-entry allocations/deallocations as well.
89 3: Object allocations as well.
90 4: Object marks as well. */
91 #define GGC_DEBUG_LEVEL (0)
92 \f
93 #ifndef HOST_BITS_PER_PTR
94 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
95 #endif
96
97 /* Timing information for collect execution goes into here. */
98 extern int gc_time;
99
100 /* The "" allocated string. */
101 char *empty_string;
102 \f
103 /* A two-level tree is used to look up the page-entry for a given
104 pointer. Two chunks of the pointer's bits are extracted to index
105 the first and second levels of the tree, as follows:
106
107 HOST_PAGE_SIZE_BITS
108 32 | |
109 msb +----------------+----+------+------+ lsb
110 | | |
111 PAGE_L1_BITS |
112 | |
113 PAGE_L2_BITS
114
115 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
116 pages are aligned on system page boundaries. The next most
117 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
118 index values in the lookup table, respectively.
119
120 For 32-bit architectures and the settings below, there are no
121 leftover bits. For architectures with wider pointers, the lookup
122 tree points to a list of pages, which must be scanned to find the
123 correct one. */
124
125 #define PAGE_L1_BITS (8)
126 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
127 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
128 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
129
130 #define LOOKUP_L1(p) \
131 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
132
133 #define LOOKUP_L2(p) \
134 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
135
136
137 /* A page_entry records the status of an allocation page. This
138 structure is dynamically sized to fit the bitmap in_use_p. */
139 typedef struct page_entry
140 {
141 /* The next page-entry with objects of the same size, or NULL if
142 this is the last page-entry. */
143 struct page_entry *next;
144
145 /* The number of bytes allocated. (This will always be a multiple
146 of the host system page size.) */
147 size_t bytes;
148
149 /* The address at which the memory is allocated. */
150 char *page;
151
152 /* Saved in-use bit vector for pages that aren't in the topmost
153 context during collection. */
154 unsigned long *save_in_use_p;
155
156 /* Context depth of this page. */
157 unsigned char context_depth;
158
159 /* The lg of size of objects allocated from this page. */
160 unsigned char order;
161
162 /* The number of free objects remaining on this page. */
163 unsigned short num_free_objects;
164
165 /* A likely candidate for the bit position of a free object for the
166 next allocation from this page. */
167 unsigned short next_bit_hint;
168
169 /* Saved number of free objects for pages that aren't in the topmost
170 context during colleciton. */
171 unsigned short save_num_free_objects;
172
173 /* A bit vector indicating whether or not objects are in use. The
174 Nth bit is one if the Nth object on this page is allocated. This
175 array is dynamically sized. */
176 unsigned long in_use_p[1];
177 } page_entry;
178
179
180 #if HOST_BITS_PER_PTR <= 32
181
182 /* On 32-bit hosts, we use a two level page table, as pictured above. */
183 typedef page_entry **page_table[PAGE_L1_SIZE];
184
185 #else
186
187 /* On 64-bit hosts, we use the same two level page tables plus a linked
188 list that disambiguates the top 32-bits. There will almost always be
189 exactly one entry in the list. */
190 typedef struct page_table_chain
191 {
192 struct page_table_chain *next;
193 size_t high_bits;
194 page_entry **table[PAGE_L1_SIZE];
195 } *page_table;
196
197 #endif
198
199 /* The rest of the global variables. */
200 static struct globals
201 {
202 /* The Nth element in this array is a page with objects of size 2^N.
203 If there are any pages with free objects, they will be at the
204 head of the list. NULL if there are no page-entries for this
205 object size. */
206 page_entry *pages[HOST_BITS_PER_PTR];
207
208 /* The Nth element in this array is the last page with objects of
209 size 2^N. NULL if there are no page-entries for this object
210 size. */
211 page_entry *page_tails[HOST_BITS_PER_PTR];
212
213 /* Lookup table for associating allocation pages with object addresses. */
214 page_table lookup;
215
216 /* The system's page size. */
217 size_t pagesize;
218 size_t lg_pagesize;
219
220 /* Bytes currently allocated. */
221 size_t allocated;
222
223 /* Bytes currently allocated at the end of the last collection. */
224 size_t allocated_last_gc;
225
226 /* The current depth in the context stack. */
227 unsigned char context_depth;
228
229 /* A file descriptor open to /dev/zero for reading. */
230 #if defined (HAVE_MMAP) && !defined(MAP_ANONYMOUS)
231 int dev_zero_fd;
232 #endif
233
234 /* A cache of free system pages. */
235 page_entry *free_pages;
236
237 /* The file descriptor for debugging output. */
238 FILE *debug_file;
239 } G;
240
241
242 /* Compute DIVIDEND / DIVISOR, rounded up. */
243 #define DIV_ROUND_UP(Dividend, Divisor) \
244 ((Dividend + Divisor - 1) / Divisor)
245
246 /* The number of objects per allocation page, for objects of size
247 2^ORDER. */
248 #define OBJECTS_PER_PAGE(Order) \
249 ((Order) >= G.lg_pagesize ? 1 : G.pagesize / ((size_t)1 << (Order)))
250
251 /* The size in bytes required to maintain a bitmap for the objects
252 on a page-entry. */
253 #define BITMAP_SIZE(Num_objects) \
254 (DIV_ROUND_UP ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
255
256 /* Skip garbage collection if the current allocation is not at least
257 this factor times the allocation at the end of the last collection.
258 In other words, total allocation must expand by (this factor minus
259 one) before collection is performed. */
260 #define GGC_MIN_EXPAND_FOR_GC (1.3)
261
262 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
263 test from triggering too often when the heap is small. */
264 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
265
266 \f
267 static int ggc_allocated_p PROTO ((const void *));
268 static page_entry *lookup_page_table_entry PROTO ((const void *));
269 static void set_page_table_entry PROTO ((void *, page_entry *));
270 static char *alloc_anon PROTO ((char *, size_t));
271 static struct page_entry * alloc_page PROTO ((unsigned));
272 static void free_page PROTO ((struct page_entry *));
273 static void release_pages PROTO ((void));
274 static void clear_marks PROTO ((void));
275 static void sweep_pages PROTO ((void));
276
277 #ifdef GGC_POISON
278 static void poison PROTO ((void *, size_t));
279 static void poison_pages PROTO ((void));
280 #endif
281
282 void debug_print_page_list PROTO ((int));
283 \f
284 /* Returns non-zero if P was allocated in GC'able memory. */
285
286 static inline int
287 ggc_allocated_p (p)
288 const void *p;
289 {
290 page_entry ***base;
291 size_t L1, L2;
292
293 #if HOST_BITS_PER_PTR <= 32
294 base = &G.lookup[0];
295 #else
296 page_table table = G.lookup;
297 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
298 while (1)
299 {
300 if (table == NULL)
301 return 0;
302 if (table->high_bits == high_bits)
303 break;
304 table = table->next;
305 }
306 base = &table->table[0];
307 #endif
308
309 /* Extract the level 1 and 2 indicies. */
310 L1 = LOOKUP_L1 (p);
311 L2 = LOOKUP_L2 (p);
312
313 return base[L1] && base[L1][L2];
314 }
315
316 /* Traverse the page table and find the entry for a page.
317 Die (probably) if the object wasn't allocated via GC. */
318
319 static inline page_entry *
320 lookup_page_table_entry(p)
321 const void *p;
322 {
323 page_entry ***base;
324 size_t L1, L2;
325
326 #if HOST_BITS_PER_PTR <= 32
327 base = &G.lookup[0];
328 #else
329 page_table table = G.lookup;
330 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
331 while (table->high_bits != high_bits)
332 table = table->next;
333 base = &table->table[0];
334 #endif
335
336 /* Extract the level 1 and 2 indicies. */
337 L1 = LOOKUP_L1 (p);
338 L2 = LOOKUP_L2 (p);
339
340 return base[L1][L2];
341 }
342
343
344 /* Set the page table entry for a page. */
345 static void
346 set_page_table_entry(p, entry)
347 void *p;
348 page_entry *entry;
349 {
350 page_entry ***base;
351 size_t L1, L2;
352
353 #if HOST_BITS_PER_PTR <= 32
354 base = &G.lookup[0];
355 #else
356 page_table table;
357 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
358 for (table = G.lookup; table; table = table->next)
359 if (table->high_bits == high_bits)
360 goto found;
361
362 /* Not found -- allocate a new table. */
363 table = (page_table) xcalloc (1, sizeof(*table));
364 table->next = G.lookup;
365 table->high_bits = high_bits;
366 G.lookup = table;
367 found:
368 base = &table->table[0];
369 #endif
370
371 /* Extract the level 1 and 2 indicies. */
372 L1 = LOOKUP_L1 (p);
373 L2 = LOOKUP_L2 (p);
374
375 if (base[L1] == NULL)
376 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
377
378 base[L1][L2] = entry;
379 }
380
381
382 /* Prints the page-entry for object size ORDER, for debugging. */
383 void
384 debug_print_page_list (order)
385 int order;
386 {
387 page_entry *p;
388 printf ("Head=%p, Tail=%p:\n", G.pages[order], G.page_tails[order]);
389 p = G.pages[order];
390 while (p != NULL)
391 {
392 printf ("%p(%1d|%3d) -> ", p, p->context_depth, p->num_free_objects);
393 p = p->next;
394 }
395 printf ("NULL\n");
396 fflush (stdout);
397 }
398
399 #ifdef GGC_POISON
400 /* `Poisons' the region of memory starting at START and extending for
401 LEN bytes. */
402 static inline void
403 poison (start, len)
404 void *start;
405 size_t len;
406 {
407 memset (start, 0xa5, len);
408 }
409 #endif
410
411 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
412 (if non-null). */
413 static inline char *
414 alloc_anon (pref, size)
415 char *pref ATTRIBUTE_UNUSED;
416 size_t size;
417 {
418 char *page;
419
420 #ifdef HAVE_MMAP
421 #ifdef MAP_ANONYMOUS
422 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
423 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
424 #else
425 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
426 MAP_PRIVATE, G.dev_zero_fd, 0);
427 #endif
428 if (page == (char *) MAP_FAILED)
429 {
430 fputs ("Virtual memory exhausted!\n", stderr);
431 exit(1);
432 }
433 #else
434 #ifdef HAVE_VALLOC
435 page = (char *) valloc (size);
436 if (!page)
437 {
438 fputs ("Virtual memory exhausted!\n", stderr);
439 exit(1);
440 }
441 #endif /* HAVE_VALLOC */
442 #endif /* HAVE_MMAP */
443
444 return page;
445 }
446
447 /* Allocate a new page for allocating objects of size 2^ORDER,
448 and return an entry for it. The entry is not added to the
449 appropriate page_table list. */
450 static inline struct page_entry *
451 alloc_page (order)
452 unsigned order;
453 {
454 struct page_entry *entry, *p, **pp;
455 char *page;
456 size_t num_objects;
457 size_t bitmap_size;
458 size_t page_entry_size;
459 size_t entry_size;
460
461 num_objects = OBJECTS_PER_PAGE (order);
462 bitmap_size = BITMAP_SIZE (num_objects + 1);
463 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
464 entry_size = num_objects * (1 << order);
465
466 entry = NULL;
467 page = NULL;
468
469 /* Check the list of free pages for one we can use. */
470 for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
471 if (p->bytes == entry_size)
472 break;
473
474 if (p != NULL)
475 {
476 /* Recycle the allocated memory from this page ... */
477 *pp = p->next;
478 page = p->page;
479 /* ... and, if possible, the page entry itself. */
480 if (p->order == order)
481 {
482 entry = p;
483 memset (entry, 0, page_entry_size);
484 }
485 else
486 free (p);
487 }
488 else
489 {
490 /* Actually allocate the memory, using mmap. */
491 page = alloc_anon (NULL, entry_size);
492 }
493
494 if (entry == NULL)
495 entry = (struct page_entry *) xcalloc (1, page_entry_size);
496
497 entry->bytes = entry_size;
498 entry->page = page;
499 entry->context_depth = G.context_depth;
500 entry->order = order;
501 entry->num_free_objects = num_objects;
502 entry->next_bit_hint = 1;
503
504 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
505 increment the hint. */
506 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
507 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
508
509 set_page_table_entry (page, entry);
510
511 if (GGC_DEBUG_LEVEL >= 2)
512 fprintf (G.debug_file,
513 "Allocating page at %p, object size=%d, data %p-%p\n", entry,
514 1 << order, page, page + entry_size - 1);
515
516 return entry;
517 }
518
519
520 /* Free a page when it's no longer needed. */
521 static inline void
522 free_page (entry)
523 page_entry *entry;
524 {
525 if (GGC_DEBUG_LEVEL >= 2)
526 fprintf (G.debug_file,
527 "Deallocating page at %p, data %p-%p\n", entry,
528 entry->page, entry->page + entry->bytes - 1);
529
530 set_page_table_entry (entry->page, NULL);
531
532 entry->next = G.free_pages;
533 G.free_pages = entry;
534 }
535
536
537 /* Release the page cache to the system. */
538 static inline void
539 release_pages ()
540 {
541 #ifdef HAVE_MMAP
542 page_entry *p, *next;
543 char *start;
544 size_t len;
545
546 p = G.free_pages;
547 if (p == NULL)
548 return;
549
550 next = p->next;
551 start = p->page;
552 len = p->bytes;
553 free (p);
554 p = next;
555
556 while (p)
557 {
558 next = p->next;
559 /* Gather up adjacent pages so they are unmapped together. */
560 if (p->page == start + len)
561 len += p->bytes;
562 else
563 {
564 munmap (start, len);
565 start = p->page;
566 len = p->bytes;
567 }
568 free (p);
569 p = next;
570 }
571
572 munmap (start, len);
573 #else
574 #ifdef HAVE_VALLOC
575 page_entry *p, *next;
576
577 for (p = G.free_pages; p ; p = next)
578 {
579 next = p->next;
580 free (p->page);
581 free (p);
582 }
583 #endif /* HAVE_VALLOC */
584 #endif /* HAVE_MMAP */
585
586 G.free_pages = NULL;
587 }
588
589
590 /* This table provides a fast way to determine ceil(log_2(size)) for
591 allocation requests. The minimum allocation size is four bytes. */
592 static unsigned char const size_lookup[257] =
593 {
594 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
595 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
596 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
597 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
598 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
599 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
600 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
601 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
602 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
603 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
604 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
605 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
606 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
607 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
608 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
609 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
610 8
611 };
612
613 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
614 memory is zeroed; otherwise, its contents are undefined. */
615 void *
616 ggc_alloc_obj (size, zero)
617 size_t size;
618 int zero;
619 {
620 unsigned order, word, bit, object_offset;
621 struct page_entry *entry;
622 void *result;
623
624 if (size <= 256)
625 order = size_lookup[size];
626 else
627 {
628 order = 9;
629 while (size > ((size_t) 1 << order))
630 order++;
631 }
632
633 /* If there are non-full pages for this size allocation, they are at
634 the head of the list. */
635 entry = G.pages[order];
636
637 /* If there is no page for this object size, or all pages in this
638 context are full, allocate a new page. */
639 if (entry == NULL
640 || entry->num_free_objects == 0
641 || entry->context_depth != G.context_depth)
642 {
643 struct page_entry *new_entry;
644 new_entry = alloc_page (order);
645
646 /* If this is the only entry, it's also the tail. */
647 if (entry == NULL)
648 G.page_tails[order] = new_entry;
649
650 /* Put new pages at the head of the page list. */
651 new_entry->next = entry;
652 entry = new_entry;
653 G.pages[order] = new_entry;
654
655 /* For a new page, we know the word and bit positions (in the
656 in_use bitmap) of the first available object -- they're zero. */
657 new_entry->next_bit_hint = 1;
658 word = 0;
659 bit = 0;
660 object_offset = 0;
661 }
662 else
663 {
664 /* First try to use the hint left from the previous allocation
665 to locate a clear bit in the in-use bitmap. We've made sure
666 that the one-past-the-end bit is always set, so if the hint
667 has run over, this test will fail. */
668 unsigned hint = entry->next_bit_hint;
669 word = hint / HOST_BITS_PER_LONG;
670 bit = hint % HOST_BITS_PER_LONG;
671
672 /* If the hint didn't work, scan the bitmap from the beginning. */
673 if ((entry->in_use_p[word] >> bit) & 1)
674 {
675 word = bit = 0;
676 while (~entry->in_use_p[word] == 0)
677 ++word;
678 while ((entry->in_use_p[word] >> bit) & 1)
679 ++bit;
680 hint = word * HOST_BITS_PER_LONG + bit;
681 }
682
683 /* Next time, try the next bit. */
684 entry->next_bit_hint = hint + 1;
685
686 object_offset = hint << order;
687 }
688
689 /* Set the in-use bit. */
690 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
691
692 /* Keep a running total of the number of free objects. If this page
693 fills up, we may have to move it to the end of the list if the
694 next page isn't full. If the next page is full, all subsequent
695 pages are full, so there's no need to move it. */
696 if (--entry->num_free_objects == 0
697 && entry->next != NULL
698 && entry->next->num_free_objects > 0)
699 {
700 G.pages[order] = entry->next;
701 entry->next = NULL;
702 G.page_tails[order]->next = entry;
703 G.page_tails[order] = entry;
704 }
705
706 /* Calculate the object's address. */
707 result = entry->page + object_offset;
708
709 #ifdef GGC_POISON
710 /* `Poison' the entire allocated object before zeroing the requested area,
711 so that bytes beyond the end, if any, will not necessarily be zero. */
712 poison (result, 1 << order);
713 #endif
714 if (zero)
715 memset (result, 0, size);
716
717 /* Keep track of how many bytes are being allocated. This
718 information is used in deciding when to collect. */
719 G.allocated += (size_t) 1 << order;
720
721 if (GGC_DEBUG_LEVEL >= 3)
722 fprintf (G.debug_file,
723 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
724 (int) size, 1 << order, result, entry);
725
726 return result;
727 }
728
729
730 /* If P is not marked, marks it and returns 0. Otherwise returns 1.
731 P must have been allocated by the GC allocator; it mustn't point to
732 static objects, stack variables, or memory allocated with malloc. */
733 int
734 ggc_set_mark (p)
735 void *p;
736 {
737 page_entry *entry;
738 unsigned bit, word;
739 unsigned long mask;
740
741 /* Look up the page on which the object is alloced. If the object
742 wasn't allocated by the collector, we'll probably die. */
743 entry = lookup_page_table_entry (p);
744 #ifdef ENABLE_CHECKING
745 if (entry == NULL)
746 abort ();
747 #endif
748
749 /* Calculate the index of the object on the page; this is its bit
750 position in the in_use_p bitmap. */
751 bit = (((char *) p) - entry->page) >> entry->order;
752 word = bit / HOST_BITS_PER_LONG;
753 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
754
755 /* If the bit was previously set, skip it. */
756 if (entry->in_use_p[word] & mask)
757 return 1;
758
759 /* Otherwise set it, and decrement the free object count. */
760 entry->in_use_p[word] |= mask;
761 entry->num_free_objects -= 1;
762
763 G.allocated += (size_t) 1 << entry->order;
764
765 if (GGC_DEBUG_LEVEL >= 4)
766 fprintf (G.debug_file, "Marking %p\n", p);
767
768 return 0;
769 }
770
771 void
772 ggc_mark_if_gcable (p)
773 void *p;
774 {
775 if (p && ggc_allocated_p (p))
776 ggc_set_mark (p);
777 }
778 \f
779 /* Initialize the ggc-mmap allocator. */
780 void
781 init_ggc ()
782 {
783 G.pagesize = getpagesize();
784 G.lg_pagesize = exact_log2 (G.pagesize);
785
786 #if defined (HAVE_MMAP) && !defined(MAP_ANONYMOUS)
787 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
788 if (G.dev_zero_fd == -1)
789 abort ();
790 #endif
791
792 #if 0
793 G.debug_file = fopen ("ggc-mmap.debug", "w");
794 #else
795 G.debug_file = stdout;
796 #endif
797
798 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
799
800 empty_string = ggc_alloc_string ("", 0);
801 ggc_add_string_root (&empty_string, 1);
802 }
803
804
805 void
806 ggc_push_context ()
807 {
808 ++G.context_depth;
809
810 /* Die on wrap. */
811 if (G.context_depth == 0)
812 abort ();
813 }
814
815
816 void
817 ggc_pop_context ()
818 {
819 unsigned order, depth;
820
821 depth = --G.context_depth;
822
823 /* Any remaining pages in the popped context are lowered to the new
824 current context; i.e. objects allocated in the popped context and
825 left over are imported into the previous context. */
826 for (order = 2; order < HOST_BITS_PER_PTR; order++)
827 {
828 size_t num_objects = OBJECTS_PER_PAGE (order);
829 size_t bitmap_size = BITMAP_SIZE (num_objects);
830
831 page_entry *p;
832
833 for (p = G.pages[order]; p != NULL; p = p->next)
834 {
835 if (p->context_depth > depth)
836 {
837 p->context_depth = depth;
838 }
839
840 /* If this page is now in the topmost context, and we'd
841 saved its allocation state, restore it. */
842 else if (p->context_depth == depth && p->save_in_use_p)
843 {
844 memcpy (p->in_use_p, p->save_in_use_p, bitmap_size);
845 free (p->save_in_use_p);
846 p->save_in_use_p = 0;
847 p->num_free_objects = p->save_num_free_objects;
848 }
849 }
850 }
851 }
852 \f
853 static inline void
854 clear_marks ()
855 {
856 unsigned order;
857
858 for (order = 2; order < HOST_BITS_PER_PTR; order++)
859 {
860 size_t num_objects = OBJECTS_PER_PAGE (order);
861 size_t bitmap_size = BITMAP_SIZE (num_objects);
862 page_entry *p;
863
864 for (p = G.pages[order]; p != NULL; p = p->next)
865 {
866 #ifdef ENABLE_CHECKING
867 /* The data should be page-aligned. */
868 if ((size_t) p->page & (G.pagesize - 1))
869 abort ();
870 #endif
871
872 /* Pages that aren't in the topmost context are not collected;
873 nevertheless, we need their in-use bit vectors to store GC
874 marks. So, back them up first. */
875 if (p->context_depth < G.context_depth
876 && ! p->save_in_use_p)
877 {
878 p->save_in_use_p = xmalloc (bitmap_size);
879 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
880 p->save_num_free_objects = p->num_free_objects;
881 }
882
883 /* Reset reset the number of free objects and clear the
884 in-use bits. These will be adjusted by mark_obj. */
885 p->num_free_objects = num_objects;
886 memset (p->in_use_p, 0, bitmap_size);
887
888 /* Make sure the one-past-the-end bit is always set. */
889 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
890 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
891 }
892 }
893 }
894
895 static inline void
896 sweep_pages ()
897 {
898 unsigned order;
899
900 for (order = 2; order < HOST_BITS_PER_PTR; order++)
901 {
902 /* The last page-entry to consider, regardless of entries
903 placed at the end of the list. */
904 page_entry * const last = G.page_tails[order];
905
906 size_t num_objects = OBJECTS_PER_PAGE (order);
907 page_entry *p, *previous;
908 int done;
909
910 p = G.pages[order];
911 if (p == NULL)
912 continue;
913
914 previous = NULL;
915 do
916 {
917 page_entry *next = p->next;
918
919 /* Loop until all entries have been examined. */
920 done = (p == last);
921
922 /* Only objects on pages in the topmost context should get
923 collected. */
924 if (p->context_depth < G.context_depth)
925 ;
926
927 /* Remove the page if it's empty. */
928 else if (p->num_free_objects == num_objects)
929 {
930 if (! previous)
931 G.pages[order] = next;
932 else
933 previous->next = next;
934
935 /* Are we removing the last element? */
936 if (p == G.page_tails[order])
937 G.page_tails[order] = previous;
938 free_page (p);
939 p = previous;
940 }
941
942 /* If the page is full, move it to the end. */
943 else if (p->num_free_objects == 0)
944 {
945 /* Don't move it if it's already at the end. */
946 if (p != G.page_tails[order])
947 {
948 /* Move p to the end of the list. */
949 p->next = NULL;
950 G.page_tails[order]->next = p;
951
952 /* Update the tail pointer... */
953 G.page_tails[order] = p;
954
955 /* ... and the head pointer, if necessary. */
956 if (! previous)
957 G.pages[order] = next;
958 else
959 previous->next = next;
960 p = previous;
961 }
962 }
963
964 /* If we've fallen through to here, it's a page in the
965 topmost context that is neither full nor empty. Such a
966 page must precede pages at lesser context depth in the
967 list, so move it to the head. */
968 else if (p != G.pages[order])
969 {
970 previous->next = p->next;
971 p->next = G.pages[order];
972 G.pages[order] = p;
973 /* Are we moving the last element? */
974 if (G.page_tails[order] == p)
975 G.page_tails[order] = previous;
976 p = previous;
977 }
978
979 previous = p;
980 p = next;
981 }
982 while (! done);
983 }
984 }
985
986 #ifdef GGC_POISON
987 static inline void
988 poison_pages ()
989 {
990 unsigned order;
991
992 for (order = 2; order < HOST_BITS_PER_PTR; order++)
993 {
994 size_t num_objects = OBJECTS_PER_PAGE (order);
995 size_t size = (size_t) 1 << order;
996 page_entry *p;
997
998 for (p = G.pages[order]; p != NULL; p = p->next)
999 {
1000 size_t i;
1001 for (i = 0; i < num_objects; i++)
1002 {
1003 size_t word, bit;
1004 word = i / HOST_BITS_PER_LONG;
1005 bit = i % HOST_BITS_PER_LONG;
1006 if (((p->in_use_p[word] >> bit) & 1) == 0)
1007 poison (p->page + i * size, size);
1008 }
1009 }
1010 }
1011 }
1012 #endif
1013
1014 void
1015 ggc_collect ()
1016 {
1017 int time;
1018
1019 /* Avoid frequent unnecessary work by skipping collection if the
1020 total allocations haven't expanded much since the last
1021 collection. */
1022 #ifndef GGC_ALWAYS_COLLECT
1023 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1024 return;
1025 #endif
1026
1027 time = get_run_time ();
1028 if (!quiet_flag)
1029 fprintf (stderr, " {GC %luk -> ", (unsigned long)G.allocated / 1024);
1030
1031 /* Zero the total allocated bytes. We'll reaccumulate this while
1032 marking. */
1033 G.allocated = 0;
1034
1035 /* Release the pages we freed the last time we collected, but didn't
1036 reuse in the interim. */
1037 release_pages ();
1038
1039 clear_marks ();
1040 ggc_mark_roots ();
1041 sweep_pages ();
1042
1043 #ifdef GGC_POISON
1044 poison_pages ();
1045 #endif
1046
1047 G.allocated_last_gc = G.allocated;
1048 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1049 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1050
1051 time = get_run_time () - time;
1052 gc_time += time;
1053
1054 if (!quiet_flag)
1055 {
1056 fprintf (stderr, "%luk in %.3f}",
1057 (unsigned long) G.allocated / 1024, time * 1e-6);
1058 }
1059 }