ggc.h (ggc_push_context): Fix comment.
[gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "tree.h"
24 #include "rtl.h"
25 #include "tm_p.h"
26 #include "varray.h"
27 #include "flags.h"
28 #include "ggc.h"
29
30 #include <sys/mman.h>
31
32
33 /* Stategy:
34
35 This garbage-collecting allocator allocates objects on one of a set
36 of pages. Each page can allocate objects of a single size only;
37 available sizes are powers of two starting at four bytes. The size
38 of an allocation request is rounded up to the next power of two
39 (`order'), and satisfied from the appropriate page.
40
41 Each page is recorded in a page-entry, which also maintains an
42 in-use bitmap of object positions on the page. This allows the
43 allocation state of a particular object to be flipped without
44 touching the page itself.
45
46 Each page-entry also has a context depth, which is used to track
47 pushing and popping of allocation contexts. Only objects allocated
48 in the current (highest-numbered) context may be collected.
49
50 Page entries are arranged in an array of singly-linked lists. The
51 array is indexed by the allocation size, in bits, of the pages on
52 it; i.e. all pages on a list allocate objects of the same size.
53 Pages are ordered on the list such that all non-full pages precede
54 all full pages, with non-full pages arranged in order of decreasing
55 context depth.
56
57 Empty pages (of all orders) are kept on a single page cache list,
58 and are considered first when new pages are required; they are
59 deallocated at the start of the next collection if they haven't
60 been recycled by then. */
61
62
63 /* Define GGC_POISON to poison memory marked unused by the collector. */
64 #undef GGC_POISON
65
66 /* Define GGC_ALWAYS_COLLECT to perform collection every time
67 ggc_collect is invoked. Otherwise, collection is performed only
68 when a significant amount of memory has been allocated since the
69 last collection. */
70 #undef GGC_ALWAYS_COLLECT
71
72 /* If ENABLE_CHECKING is defined, enable GGC_POISON and
73 GGC_ALWAYS_COLLECT automatically. */
74 #ifdef ENABLE_CHECKING
75 #define GGC_POISON
76 #define GGC_ALWAYS_COLLECT
77 #endif
78
79 /* Define GGC_DEBUG_LEVEL to print debugging information.
80 0: No debugging output.
81 1: GC statistics only.
82 2: Page-entry allocations/deallocations as well.
83 3: Object allocations as well.
84 4: Object marks as well. */
85 #define GGC_DEBUG_LEVEL (0)
86 \f
87 #ifndef HOST_BITS_PER_PTR
88 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
89 #endif
90
91 /* Timing information for collect execution goes into here. */
92 extern int gc_time;
93
94 /* The "" allocated string. */
95 char *empty_string;
96 \f
97 /* A two-level tree is used to look up the page-entry for a given
98 pointer. Two chunks of the pointer's bits are extracted to index
99 the first and second levels of the tree, as follows:
100
101 HOST_PAGE_SIZE_BITS
102 32 | |
103 msb +----------------+----+------+------+ lsb
104 | | |
105 PAGE_L1_BITS |
106 | |
107 PAGE_L2_BITS
108
109 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
110 pages are aligned on system page boundaries. The next most
111 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
112 index values in the lookup table, respectively.
113
114 The topmost leftover bits, if any, are ignored. For 32-bit
115 architectures and the settings below, there are no leftover bits.
116 For architectures with wider pointers, the lookup tree points to a
117 list of pages, which must be scanned to find the correct one. */
118
119 #define PAGE_L1_BITS (8)
120 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
121 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
122 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
123
124 #define LOOKUP_L1(p) \
125 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
126
127 #define LOOKUP_L2(p) \
128 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
129
130
131 /* A page_entry records the status of an allocation page. This
132 structure is dynamically sized to fit the bitmap in_use_p. */
133 typedef struct page_entry
134 {
135 /* The next page-entry with objects of the same size, or NULL if
136 this is the last page-entry. */
137 struct page_entry *next;
138
139 /* The number of bytes allocated. (This will always be a multiple
140 of the host system page size.) */
141 size_t bytes;
142
143 /* The address at which the memory is allocated. */
144 char *page;
145
146 /* Saved in-use bit vector for pages that aren't in the topmost
147 context during collection. */
148 unsigned long *save_in_use_p;
149
150 /* Context depth of this page. */
151 unsigned char context_depth;
152
153 /* The lg of size of objects allocated from this page. */
154 unsigned char order;
155
156 /* The number of free objects remaining on this page. */
157 unsigned short num_free_objects;
158
159 /* A likely candidate for the bit position of a free object for the
160 next allocation from this page. */
161 unsigned short next_bit_hint;
162
163 /* Saved number of free objects for pages that aren't in the topmost
164 context during colleciton. */
165 unsigned short save_num_free_objects;
166
167 /* A bit vector indicating whether or not objects are in use. The
168 Nth bit is one if the Nth object on this page is allocated. This
169 array is dynamically sized. */
170 unsigned long in_use_p[1];
171 } page_entry;
172
173
174 #if HOST_BITS_PER_PTR <= 32
175
176 /* On 32-bit hosts, we use a two level page table, as pictured above. */
177 typedef page_entry **page_table[PAGE_L1_SIZE];
178
179 #else
180
181 /* On 64-bit hosts, we use two level page tables plus a linked list
182 that disambiguates the top 32-bits. There will almost always be
183 exactly one entry in the list. */
184 typedef struct page_table_chain
185 {
186 struct page_table_chain *next;
187 size_t high_bits;
188 page_entry **table[PAGE_L1_SIZE];
189 } *page_table;
190
191 #endif
192
193 /* The rest of the global variables. */
194 static struct globals
195 {
196 /* The Nth element in this array is a page with objects of size 2^N.
197 If there are any pages with free objects, they will be at the
198 head of the list. NULL if there are no page-entries for this
199 object size. */
200 page_entry *pages[HOST_BITS_PER_PTR];
201
202 /* The Nth element in this array is the last page with objects of
203 size 2^N. NULL if there are no page-entries for this object
204 size. */
205 page_entry *page_tails[HOST_BITS_PER_PTR];
206
207 /* Lookup table for associating allocation pages with object addresses. */
208 page_table lookup;
209
210 /* The system's page size. */
211 size_t pagesize;
212 size_t lg_pagesize;
213
214 /* Bytes currently allocated. */
215 size_t allocated;
216
217 /* Bytes currently allocated at the end of the last collection. */
218 size_t allocated_last_gc;
219
220 /* The current depth in the context stack. */
221 unsigned char context_depth;
222
223 /* A file descriptor open to /dev/zero for reading. */
224 #ifndef MAP_ANONYMOUS
225 int dev_zero_fd;
226 #endif
227
228 /* A cache of free system pages. */
229 page_entry *free_pages;
230
231 /* The file descriptor for debugging output. */
232 FILE *debug_file;
233 } G;
234
235
236 /* Compute DIVIDEND / DIVISOR, rounded up. */
237 #define DIV_ROUND_UP(Dividend, Divisor) \
238 ((Dividend + Divisor - 1) / Divisor)
239
240 /* The number of objects per allocation page, for objects of size
241 2^ORDER. */
242 #define OBJECTS_PER_PAGE(Order) \
243 ((Order) >= G.lg_pagesize ? 1 : G.pagesize / ((size_t)1 << (Order)))
244
245 /* The size in bytes required to maintain a bitmap for the objects
246 on a page-entry. */
247 #define BITMAP_SIZE(Num_objects) \
248 (DIV_ROUND_UP ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
249
250 /* Skip garbage collection if the current allocation is not at least
251 this factor times the allocation at the end of the last collection.
252 In other words, total allocation must expand by (this factor minus
253 one) before collection is performed. */
254 #define GGC_MIN_EXPAND_FOR_GC (1.3)
255
256 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
257 test from triggering too often when the heap is small. */
258 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
259
260 \f
261 static page_entry *** ggc_lookup_page_table PROTO ((void));
262 static int ggc_allocated_p PROTO ((const void *));
263 static page_entry *lookup_page_table_entry PROTO ((void *));
264 static void set_page_table_entry PROTO ((void *, page_entry *));
265 static char *alloc_anon PROTO ((char *, size_t));
266 static struct page_entry * alloc_page PROTO ((unsigned));
267 static void free_page PROTO ((struct page_entry *));
268 static void release_pages PROTO ((void));
269 static void *alloc_obj PROTO ((size_t, int));
270 static int mark_obj PROTO ((void *));
271 static void clear_marks PROTO ((void));
272 static void sweep_pages PROTO ((void));
273
274 #ifdef GGC_POISON
275 static void poison PROTO ((void *, size_t));
276 static void poison_pages PROTO ((void));
277 #endif
278
279 void debug_print_page_list PROTO ((int));
280 \f
281 /* Returns the lookup table appropriate for looking up P. */
282
283 static inline page_entry ***
284 ggc_lookup_page_table ()
285 {
286 page_entry ***base;
287
288 #if HOST_BITS_PER_PTR <= 32
289 base = &G.lookup[0];
290 #else
291 page_table table = G.lookup;
292 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
293 while (table->high_bits != high_bits)
294 table = table->next;
295 base = &table->table[0];
296 #endif
297
298 return base;
299 }
300
301 /* Returns non-zero if P was allocated in GC'able memory. */
302
303 static inline int
304 ggc_allocated_p (p)
305 const void *p;
306 {
307 page_entry ***base;
308 size_t L1, L2;
309
310 base = ggc_lookup_page_table ();
311
312 /* Extract the level 1 and 2 indicies. */
313 L1 = LOOKUP_L1 (p);
314 L2 = LOOKUP_L2 (p);
315
316 return base[L1] && base[L1][L2];
317 }
318
319 /* Traverse the page table and find the entry for a page.
320 Die (probably) if the object wasn't allocated via GC. */
321
322 static inline page_entry *
323 lookup_page_table_entry(p)
324 void *p;
325 {
326 page_entry ***base;
327 size_t L1, L2;
328
329 base = ggc_lookup_page_table ();
330
331 /* Extract the level 1 and 2 indicies. */
332 L1 = LOOKUP_L1 (p);
333 L2 = LOOKUP_L2 (p);
334
335 return base[L1][L2];
336 }
337
338
339 /* Set the page table entry for a page. */
340 static void
341 set_page_table_entry(p, entry)
342 void *p;
343 page_entry *entry;
344 {
345 page_entry ***base;
346 size_t L1, L2;
347
348 #if HOST_BITS_PER_PTR <= 32
349 base = &G.lookup[0];
350 #else
351 page_table table;
352 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
353 for (table = G.lookup; table; table = table->next)
354 if (table->high_bits == high_bits)
355 goto found;
356
357 /* Not found -- allocate a new table. */
358 table = (page_table) xcalloc (1, sizeof(*table));
359 table->next = G.lookup;
360 table->high_bits = high_bits;
361 G.lookup = table;
362 found:
363 base = &table->table[0];
364 #endif
365
366 /* Extract the level 1 and 2 indicies. */
367 L1 = LOOKUP_L1 (p);
368 L2 = LOOKUP_L2 (p);
369
370 if (base[L1] == NULL)
371 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
372
373 base[L1][L2] = entry;
374 }
375
376
377 /* Prints the page-entry for object size ORDER, for debugging. */
378 void
379 debug_print_page_list (order)
380 int order;
381 {
382 page_entry *p;
383 printf ("Head=%p, Tail=%p:\n", G.pages[order], G.page_tails[order]);
384 p = G.pages[order];
385 while (p != NULL)
386 {
387 printf ("%p(%1d|%3d) -> ", p, p->context_depth, p->num_free_objects);
388 p = p->next;
389 }
390 printf ("NULL\n");
391 fflush (stdout);
392 }
393
394 #ifdef GGC_POISON
395 /* `Poisons' the region of memory starting at START and extending for
396 LEN bytes. */
397 static inline void
398 poison (start, len)
399 void *start;
400 size_t len;
401 {
402 memset (start, 0xa5, len);
403 }
404 #endif
405
406 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
407 (if non-null). */
408 static inline char *
409 alloc_anon (pref, size)
410 char *pref;
411 size_t size;
412 {
413 char *page;
414
415 #ifdef MAP_ANONYMOUS
416 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
417 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
418 #else
419 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
420 MAP_PRIVATE, G.dev_zero_fd, 0);
421 #endif
422 if (page == (char *) MAP_FAILED)
423 {
424 fputs ("Virtual memory exhausted!\n", stderr);
425 exit(1);
426 }
427
428 return page;
429 }
430
431 /* Allocate a new page for allocating objects of size 2^ORDER,
432 and return an entry for it. The entry is not added to the
433 appropriate page_table list. */
434 static inline struct page_entry *
435 alloc_page (order)
436 unsigned order;
437 {
438 struct page_entry *entry, *p, **pp;
439 char *page;
440 size_t num_objects;
441 size_t bitmap_size;
442 size_t page_entry_size;
443 size_t entry_size;
444
445 num_objects = OBJECTS_PER_PAGE (order);
446 bitmap_size = BITMAP_SIZE (num_objects + 1);
447 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
448 entry_size = num_objects * (1 << order);
449
450 entry = NULL;
451 page = NULL;
452
453 /* Check the list of free pages for one we can use. */
454 for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
455 if (p->bytes == entry_size)
456 break;
457
458 if (p != NULL)
459 {
460 /* Recycle the allocated memory from this page ... */
461 *pp = p->next;
462 page = p->page;
463 /* ... and, if possible, the page entry itself. */
464 if (p->order == order)
465 {
466 entry = p;
467 memset (entry, 0, page_entry_size);
468 }
469 else
470 free (p);
471 }
472 else
473 {
474 /* Actually allocate the memory, using mmap. */
475 page = alloc_anon (NULL, entry_size);
476 }
477
478 if (entry == NULL)
479 entry = (struct page_entry *) xcalloc (1, page_entry_size);
480
481 entry->bytes = entry_size;
482 entry->page = page;
483 entry->context_depth = G.context_depth;
484 entry->order = order;
485 entry->num_free_objects = num_objects;
486 entry->next_bit_hint = 1;
487
488 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
489 increment the hint. */
490 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
491 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
492
493 set_page_table_entry (page, entry);
494
495 if (GGC_DEBUG_LEVEL >= 2)
496 fprintf (G.debug_file,
497 "Allocating page at %p, object size=%d, data %p-%p\n", entry,
498 1 << order, page, page + entry_size - 1);
499
500 return entry;
501 }
502
503
504 /* Free a page when it's no longer needed. */
505 static inline void
506 free_page (entry)
507 page_entry *entry;
508 {
509 if (GGC_DEBUG_LEVEL >= 2)
510 fprintf (G.debug_file,
511 "Deallocating page at %p, data %p-%p\n", entry,
512 entry->page, entry->page + entry->bytes - 1);
513
514 set_page_table_entry (entry->page, NULL);
515
516 entry->next = G.free_pages;
517 G.free_pages = entry;
518 }
519
520
521 /* Release the page cache to the system. */
522 static inline void
523 release_pages ()
524 {
525 page_entry *p, *next;
526 char *start;
527 size_t len;
528
529 p = G.free_pages;
530 if (p == NULL)
531 return;
532
533 next = p->next;
534 start = p->page;
535 len = p->bytes;
536 free (p);
537 p = next;
538
539 while (p)
540 {
541 next = p->next;
542 /* Gather up adjacent pages so they are unmapped together. */
543 if (p->page == start + len)
544 len += p->bytes;
545 else
546 {
547 munmap (start, len);
548 start = p->page;
549 len = p->bytes;
550 }
551 free (p);
552 p = next;
553 }
554
555 munmap (start, len);
556 G.free_pages = NULL;
557 }
558
559
560 /* This table provides a fast way to determine ceil(log_2(size)) for
561 allocation requests. The minimum allocation size is four bytes. */
562 static unsigned char const size_lookup[257] =
563 {
564 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
565 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
566 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
567 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
568 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
569 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
570 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
571 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
572 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
573 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
574 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
575 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
576 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
577 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
578 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
579 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
580 8
581 };
582
583 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
584 memory is zeroed; otherwise, its contents are undefined. */
585 static void *
586 alloc_obj (size, zero)
587 size_t size;
588 int zero;
589 {
590 unsigned order, word, bit, object_offset;
591 struct page_entry *entry;
592 void *result;
593
594 if (size <= 256)
595 order = size_lookup[size];
596 else
597 {
598 order = 9;
599 while (size > ((size_t) 1 << order))
600 order++;
601 }
602
603 /* If there are non-full pages for this size allocation, they are at
604 the head of the list. */
605 entry = G.pages[order];
606
607 /* If there is no page for this object size, or all pages in this
608 context are full, allocate a new page. */
609 if (entry == NULL
610 || entry->num_free_objects == 0
611 || entry->context_depth != G.context_depth)
612 {
613 struct page_entry *new_entry;
614 new_entry = alloc_page (order);
615
616 /* If this is the only entry, it's also the tail. */
617 if (entry == NULL)
618 G.page_tails[order] = new_entry;
619
620 /* Put new pages at the head of the page list. */
621 new_entry->next = entry;
622 entry = new_entry;
623 G.pages[order] = new_entry;
624
625 /* For a new page, we know the word and bit positions (in the
626 in_use bitmap) of the first available object -- they're zero. */
627 new_entry->next_bit_hint = 1;
628 word = 0;
629 bit = 0;
630 object_offset = 0;
631 }
632 else
633 {
634 /* First try to use the hint left from the previous allocation
635 to locate a clear bit in the in-use bitmap. We've made sure
636 that the one-past-the-end bit is always set, so if the hint
637 has run over, this test will fail. */
638 unsigned hint = entry->next_bit_hint;
639 word = hint / HOST_BITS_PER_LONG;
640 bit = hint % HOST_BITS_PER_LONG;
641
642 /* If the hint didn't work, scan the bitmap from the beginning. */
643 if ((entry->in_use_p[word] >> bit) & 1)
644 {
645 word = bit = 0;
646 while (~entry->in_use_p[word] == 0)
647 ++word;
648 while ((entry->in_use_p[word] >> bit) & 1)
649 ++bit;
650 hint = word * HOST_BITS_PER_LONG + bit;
651 }
652
653 /* Next time, try the next bit. */
654 entry->next_bit_hint = hint + 1;
655
656 object_offset = hint << order;
657 }
658
659 /* Set the in-use bit. */
660 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
661
662 /* Keep a running total of the number of free objects. If this page
663 fills up, we may have to move it to the end of the list if the
664 next page isn't full. If the next page is full, all subsequent
665 pages are full, so there's no need to move it. */
666 if (--entry->num_free_objects == 0
667 && entry->next != NULL
668 && entry->next->num_free_objects > 0)
669 {
670 G.pages[order] = entry->next;
671 entry->next = NULL;
672 G.page_tails[order]->next = entry;
673 G.page_tails[order] = entry;
674 }
675
676 /* Calculate the object's address. */
677 result = entry->page + object_offset;
678
679 #ifdef GGC_POISON
680 /* `Poison' the entire allocated object before zeroing the requested area,
681 so that bytes beyond the end, if any, will not necessarily be zero. */
682 poison (result, 1 << order);
683 #endif
684 if (zero)
685 memset (result, 0, size);
686
687 /* Keep track of how many bytes are being allocated. This
688 information is used in deciding when to collect. */
689 G.allocated += (size_t) 1 << order;
690
691 if (GGC_DEBUG_LEVEL >= 3)
692 fprintf (G.debug_file,
693 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
694 (int) size, 1 << order, result, entry);
695
696 return result;
697 }
698
699
700 /* If P is not marked, marks it and returns 0. Otherwise returns 1.
701 P must have been allocated by the GC allocator; it mustn't point to
702 static objects, stack variables, or memory allocated with malloc. */
703 static int
704 mark_obj (p)
705 void *p;
706 {
707 page_entry *entry;
708 unsigned bit, word;
709 unsigned long mask;
710
711 /* Look up the page on which the object is alloced. If the object
712 wasn't allocated by the collector, we'll probably die. */
713 entry = lookup_page_table_entry (p);
714 #ifdef ENABLE_CHECKING
715 if (entry == NULL)
716 abort ();
717 #endif
718
719 /* Calculate the index of the object on the page; this is its bit
720 position in the in_use_p bitmap. */
721 bit = (((char *) p) - entry->page) >> entry->order;
722 word = bit / HOST_BITS_PER_LONG;
723 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
724
725 /* If the bit was previously set, skip it. */
726 if (entry->in_use_p[word] & mask)
727 return 1;
728
729 /* Otherwise set it, and decrement the free object count. */
730 entry->in_use_p[word] |= mask;
731 entry->num_free_objects -= 1;
732
733 G.allocated += (size_t) 1 << entry->order;
734
735 if (GGC_DEBUG_LEVEL >= 4)
736 fprintf (G.debug_file, "Marking %p\n", p);
737
738 return 0;
739 }
740
741 \f
742 /* Initialize the ggc-mmap allocator. */
743 void
744 init_ggc ()
745 {
746 G.pagesize = getpagesize();
747 G.lg_pagesize = exact_log2 (G.pagesize);
748
749 #ifndef MAP_ANONYMOUS
750 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
751 if (G.dev_zero_fd == -1)
752 abort ();
753 #endif
754
755 #if 0
756 G.debug_file = fopen ("ggc-mmap.debug", "w");
757 #else
758 G.debug_file = stdout;
759 #endif
760
761 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
762
763 empty_string = ggc_alloc_string ("", 0);
764 ggc_add_string_root (&empty_string, 1);
765 }
766
767
768 void
769 ggc_push_context ()
770 {
771 ++G.context_depth;
772
773 /* Die on wrap. */
774 if (G.context_depth == 0)
775 abort ();
776 }
777
778
779 void
780 ggc_pop_context ()
781 {
782 unsigned order, depth;
783
784 depth = --G.context_depth;
785
786 /* Any remaining pages in the popped context are lowered to the new
787 current context; i.e. objects allocated in the popped context and
788 left over are imported into the previous context. */
789 for (order = 2; order < HOST_BITS_PER_PTR; order++)
790 {
791 size_t num_objects = OBJECTS_PER_PAGE (order);
792 size_t bitmap_size = BITMAP_SIZE (num_objects);
793
794 page_entry *p;
795
796 for (p = G.pages[order]; p != NULL; p = p->next)
797 {
798 if (p->context_depth > depth)
799 {
800 p->context_depth = depth;
801 }
802
803 /* If this page is now in the topmost context, and we'd
804 saved its allocation state, restore it. */
805 else if (p->context_depth == depth && p->save_in_use_p)
806 {
807 memcpy (p->in_use_p, p->save_in_use_p, bitmap_size);
808 free (p->save_in_use_p);
809 p->save_in_use_p = 0;
810 p->num_free_objects = p->save_num_free_objects;
811 }
812 }
813 }
814 }
815
816
817 struct rtx_def *
818 ggc_alloc_rtx (nslots)
819 int nslots;
820 {
821 return (struct rtx_def *)
822 alloc_obj (sizeof (struct rtx_def) + (nslots - 1) * sizeof (rtunion), 1);
823 }
824
825
826 struct rtvec_def *
827 ggc_alloc_rtvec (nelt)
828 int nelt;
829 {
830 return (struct rtvec_def *)
831 alloc_obj (sizeof (struct rtvec_def) + (nelt - 1) * sizeof (rtx), 1);
832 }
833
834
835 union tree_node *
836 ggc_alloc_tree (length)
837 int length;
838 {
839 return (union tree_node *) alloc_obj (length, 1);
840 }
841
842
843 char *
844 ggc_alloc_string (contents, length)
845 const char *contents;
846 int length;
847 {
848 char *string;
849
850 if (length < 0)
851 {
852 if (contents == NULL)
853 return NULL;
854 length = strlen (contents);
855 }
856
857 string = (char *) alloc_obj (length + 1, 0);
858 if (contents != NULL)
859 memcpy (string, contents, length);
860 string[length] = 0;
861
862 return string;
863 }
864
865
866 void *
867 ggc_alloc (size)
868 size_t size;
869 {
870 return alloc_obj (size, 0);
871 }
872
873 \f
874 static inline void
875 clear_marks ()
876 {
877 unsigned order;
878
879 for (order = 2; order < HOST_BITS_PER_PTR; order++)
880 {
881 size_t num_objects = OBJECTS_PER_PAGE (order);
882 size_t bitmap_size = BITMAP_SIZE (num_objects);
883 page_entry *p;
884
885 for (p = G.pages[order]; p != NULL; p = p->next)
886 {
887 #ifdef ENABLE_CHECKING
888 /* The data should be page-aligned. */
889 if ((size_t) p->page & (G.pagesize - 1))
890 abort ();
891 #endif
892
893 /* Pages that aren't in the topmost context are not collected;
894 nevertheless, we need their in-use bit vectors to store GC
895 marks. So, back them up first. */
896 if (p->context_depth < G.context_depth
897 && ! p->save_in_use_p)
898 {
899 p->save_in_use_p = xmalloc (bitmap_size);
900 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
901 p->save_num_free_objects = p->num_free_objects;
902 }
903
904 /* Reset reset the number of free objects and clear the
905 in-use bits. These will be adjusted by mark_obj. */
906 p->num_free_objects = num_objects;
907 memset (p->in_use_p, 0, bitmap_size);
908
909 /* Make sure the one-past-the-end bit is always set. */
910 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
911 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
912 }
913 }
914 }
915
916 static inline void
917 sweep_pages ()
918 {
919 unsigned order;
920
921 for (order = 2; order < HOST_BITS_PER_PTR; order++)
922 {
923 /* The last page-entry to consider, regardless of entries
924 placed at the end of the list. */
925 page_entry * const last = G.page_tails[order];
926
927 size_t num_objects = OBJECTS_PER_PAGE (order);
928 page_entry *p, *previous;
929 int done;
930
931 p = G.pages[order];
932 if (p == NULL)
933 continue;
934
935 previous = NULL;
936 do
937 {
938 page_entry *next = p->next;
939
940 /* Loop until all entries have been examined. */
941 done = (p == last);
942
943 /* Only objects on pages in the topmost context should get
944 collected. */
945 if (p->context_depth < G.context_depth)
946 ;
947
948 /* Remove the page if it's empty. */
949 else if (p->num_free_objects == num_objects)
950 {
951 if (! previous)
952 G.pages[order] = next;
953 else
954 previous->next = next;
955
956 /* Are we removing the last element? */
957 if (p == G.page_tails[order])
958 G.page_tails[order] = previous;
959 free_page (p);
960 p = previous;
961 }
962
963 /* If the page is full, move it to the end. */
964 else if (p->num_free_objects == 0)
965 {
966 /* Don't move it if it's already at the end. */
967 if (p != G.page_tails[order])
968 {
969 /* Move p to the end of the list. */
970 p->next = NULL;
971 G.page_tails[order]->next = p;
972
973 /* Update the tail pointer... */
974 G.page_tails[order] = p;
975
976 /* ... and the head pointer, if necessary. */
977 if (! previous)
978 G.pages[order] = next;
979 else
980 previous->next = next;
981 p = previous;
982 }
983 }
984
985 /* If we've fallen through to here, it's a page in the
986 topmost context that is neither full nor empty. Such a
987 page must precede pages at lesser context depth in the
988 list, so move it to the head. */
989 else if (p != G.pages[order])
990 {
991 previous->next = p->next;
992 p->next = G.pages[order];
993 G.pages[order] = p;
994 /* Are we moving the last element? */
995 if (G.page_tails[order] == p)
996 G.page_tails[order] = previous;
997 p = previous;
998 }
999
1000 previous = p;
1001 p = next;
1002 }
1003 while (! done);
1004 }
1005 }
1006
1007 #ifdef GGC_POISON
1008 static inline void
1009 poison_pages ()
1010 {
1011 unsigned order;
1012
1013 for (order = 2; order < HOST_BITS_PER_PTR; order++)
1014 {
1015 size_t num_objects = OBJECTS_PER_PAGE (order);
1016 size_t size = (size_t) 1 << order;
1017 page_entry *p;
1018
1019 for (p = G.pages[order]; p != NULL; p = p->next)
1020 {
1021 size_t i;
1022 for (i = 0; i < num_objects; i++)
1023 {
1024 size_t word, bit;
1025 word = i / HOST_BITS_PER_LONG;
1026 bit = i % HOST_BITS_PER_LONG;
1027 if (((p->in_use_p[word] >> bit) & 1) == 0)
1028 poison (p->page + i * size, size);
1029 }
1030 }
1031 }
1032 }
1033 #endif
1034
1035 void
1036 ggc_collect ()
1037 {
1038 int time;
1039
1040 /* Avoid frequent unnecessary work by skipping collection if the
1041 total allocations haven't expanded much since the last
1042 collection. */
1043 #ifndef GGC_ALWAYS_COLLECT
1044 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1045 return;
1046 #endif
1047
1048 time = get_run_time ();
1049 if (!quiet_flag)
1050 fprintf (stderr, " {GC %luk -> ", (unsigned long)G.allocated / 1024);
1051
1052 /* Zero the total allocated bytes. We'll reaccumulate this while
1053 marking. */
1054 G.allocated = 0;
1055
1056 /* Release the pages we freed the last time we collected, but didn't
1057 reuse in the interim. */
1058 release_pages ();
1059
1060 clear_marks ();
1061 ggc_mark_roots ();
1062 sweep_pages ();
1063
1064 #ifdef GGC_POISON
1065 poison_pages ();
1066 #endif
1067
1068 G.allocated_last_gc = G.allocated;
1069 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1070 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1071
1072 time = get_run_time () - time;
1073 gc_time += time;
1074
1075 time = (time + 500) / 1000;
1076 if (!quiet_flag)
1077 fprintf (stderr, "%luk in %d.%03d}",
1078 (unsigned long) G.allocated / 1024, time / 1000, time % 1000);
1079 }
1080
1081
1082 int
1083 ggc_set_mark_rtx (r)
1084 rtx r;
1085 {
1086 return mark_obj (r);
1087 }
1088
1089 int
1090 ggc_set_mark_rtvec (v)
1091 rtvec v;
1092 {
1093 return mark_obj (v);
1094 }
1095
1096 int
1097 ggc_set_mark_tree (t)
1098 tree t;
1099 {
1100 return mark_obj (t);
1101 }
1102
1103 void
1104 ggc_mark_string (s)
1105 char *s;
1106 {
1107 if (s)
1108 mark_obj (s);
1109 }
1110
1111 void
1112 ggc_mark_string_if_gcable (s)
1113 char *s;
1114 {
1115 if (s && ggc_allocated_p (s))
1116 mark_obj (s);
1117 }
1118
1119 void
1120 ggc_mark (p)
1121 void *p;
1122 {
1123 if (p)
1124 mark_obj (p);
1125 }