Makefile.in, [...]: replace "GNU CC" with "GCC".
[gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "tree.h"
24 #include "rtl.h"
25 #include "tm_p.h"
26 #include "toplev.h"
27 #include "varray.h"
28 #include "flags.h"
29 #include "ggc.h"
30 #include "timevar.h"
31
32 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
33 file open. Prefer either to valloc. */
34 #ifdef HAVE_MMAP_ANON
35 # undef HAVE_MMAP_DEV_ZERO
36
37 # include <sys/mman.h>
38 # ifndef MAP_FAILED
39 # define MAP_FAILED -1
40 # endif
41 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
42 # define MAP_ANONYMOUS MAP_ANON
43 # endif
44 # define USING_MMAP
45
46 #endif
47
48 #ifdef HAVE_MMAP_DEV_ZERO
49
50 # include <sys/mman.h>
51 # ifndef MAP_FAILED
52 # define MAP_FAILED -1
53 # endif
54 # define USING_MMAP
55
56 #endif
57
58 #ifndef USING_MMAP
59 #define USING_MALLOC_PAGE_GROUPS
60 #endif
61
62 /* Stategy:
63
64 This garbage-collecting allocator allocates objects on one of a set
65 of pages. Each page can allocate objects of a single size only;
66 available sizes are powers of two starting at four bytes. The size
67 of an allocation request is rounded up to the next power of two
68 (`order'), and satisfied from the appropriate page.
69
70 Each page is recorded in a page-entry, which also maintains an
71 in-use bitmap of object positions on the page. This allows the
72 allocation state of a particular object to be flipped without
73 touching the page itself.
74
75 Each page-entry also has a context depth, which is used to track
76 pushing and popping of allocation contexts. Only objects allocated
77 in the current (highest-numbered) context may be collected.
78
79 Page entries are arranged in an array of singly-linked lists. The
80 array is indexed by the allocation size, in bits, of the pages on
81 it; i.e. all pages on a list allocate objects of the same size.
82 Pages are ordered on the list such that all non-full pages precede
83 all full pages, with non-full pages arranged in order of decreasing
84 context depth.
85
86 Empty pages (of all orders) are kept on a single page cache list,
87 and are considered first when new pages are required; they are
88 deallocated at the start of the next collection if they haven't
89 been recycled by then. */
90
91
92 /* Define GGC_POISON to poison memory marked unused by the collector. */
93 #undef GGC_POISON
94
95 /* Define GGC_ALWAYS_COLLECT to perform collection every time
96 ggc_collect is invoked. Otherwise, collection is performed only
97 when a significant amount of memory has been allocated since the
98 last collection. */
99 #undef GGC_ALWAYS_COLLECT
100
101 #ifdef ENABLE_GC_CHECKING
102 #define GGC_POISON
103 #endif
104 #ifdef ENABLE_GC_ALWAYS_COLLECT
105 #define GGC_ALWAYS_COLLECT
106 #endif
107
108 /* Define GGC_DEBUG_LEVEL to print debugging information.
109 0: No debugging output.
110 1: GC statistics only.
111 2: Page-entry allocations/deallocations as well.
112 3: Object allocations as well.
113 4: Object marks as well. */
114 #define GGC_DEBUG_LEVEL (0)
115 \f
116 #ifndef HOST_BITS_PER_PTR
117 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
118 #endif
119
120 \f
121 /* A two-level tree is used to look up the page-entry for a given
122 pointer. Two chunks of the pointer's bits are extracted to index
123 the first and second levels of the tree, as follows:
124
125 HOST_PAGE_SIZE_BITS
126 32 | |
127 msb +----------------+----+------+------+ lsb
128 | | |
129 PAGE_L1_BITS |
130 | |
131 PAGE_L2_BITS
132
133 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
134 pages are aligned on system page boundaries. The next most
135 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
136 index values in the lookup table, respectively.
137
138 For 32-bit architectures and the settings below, there are no
139 leftover bits. For architectures with wider pointers, the lookup
140 tree points to a list of pages, which must be scanned to find the
141 correct one. */
142
143 #define PAGE_L1_BITS (8)
144 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
145 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
146 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
147
148 #define LOOKUP_L1(p) \
149 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
150
151 #define LOOKUP_L2(p) \
152 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
153
154 /* The number of objects per allocation page, for objects on a page of
155 the indicated ORDER. */
156 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
157
158 /* The size of an object on a page of the indicated ORDER. */
159 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
160
161 /* The number of extra orders, not corresponding to power-of-two sized
162 objects. */
163
164 #define NUM_EXTRA_ORDERS \
165 (sizeof (extra_order_size_table) / sizeof (extra_order_size_table[0]))
166
167 /* The Ith entry is the maximum size of an object to be stored in the
168 Ith extra order. Adding a new entry to this array is the *only*
169 thing you need to do to add a new special allocation size. */
170
171 static const size_t extra_order_size_table[] = {
172 sizeof (struct tree_decl),
173 sizeof (struct tree_list)
174 };
175
176 /* The total number of orders. */
177
178 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
179
180 /* We use this structure to determine the alignment required for
181 allocations. For power-of-two sized allocations, that's not a
182 problem, but it does matter for odd-sized allocations. */
183
184 struct max_alignment {
185 char c;
186 union {
187 HOST_WIDEST_INT i;
188 #ifdef HAVE_LONG_DOUBLE
189 long double d;
190 #else
191 double d;
192 #endif
193 } u;
194 };
195
196 /* The biggest alignment required. */
197
198 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
199
200 /* The Ith entry is the number of objects on a page or order I. */
201
202 static unsigned objects_per_page_table[NUM_ORDERS];
203
204 /* The Ith entry is the size of an object on a page of order I. */
205
206 static size_t object_size_table[NUM_ORDERS];
207
208 /* A page_entry records the status of an allocation page. This
209 structure is dynamically sized to fit the bitmap in_use_p. */
210 typedef struct page_entry
211 {
212 /* The next page-entry with objects of the same size, or NULL if
213 this is the last page-entry. */
214 struct page_entry *next;
215
216 /* The number of bytes allocated. (This will always be a multiple
217 of the host system page size.) */
218 size_t bytes;
219
220 /* The address at which the memory is allocated. */
221 char *page;
222
223 #ifdef USING_MALLOC_PAGE_GROUPS
224 /* Back pointer to the page group this page came from. */
225 struct page_group *group;
226 #endif
227
228 /* Saved in-use bit vector for pages that aren't in the topmost
229 context during collection. */
230 unsigned long *save_in_use_p;
231
232 /* Context depth of this page. */
233 unsigned short context_depth;
234
235 /* The number of free objects remaining on this page. */
236 unsigned short num_free_objects;
237
238 /* A likely candidate for the bit position of a free object for the
239 next allocation from this page. */
240 unsigned short next_bit_hint;
241
242 /* The lg of size of objects allocated from this page. */
243 unsigned char order;
244
245 /* A bit vector indicating whether or not objects are in use. The
246 Nth bit is one if the Nth object on this page is allocated. This
247 array is dynamically sized. */
248 unsigned long in_use_p[1];
249 } page_entry;
250
251 #ifdef USING_MALLOC_PAGE_GROUPS
252 /* A page_group describes a large allocation from malloc, from which
253 we parcel out aligned pages. */
254 typedef struct page_group
255 {
256 /* A linked list of all extant page groups. */
257 struct page_group *next;
258
259 /* The address we received from malloc. */
260 char *allocation;
261
262 /* The size of the block. */
263 size_t alloc_size;
264
265 /* A bitmask of pages in use. */
266 unsigned int in_use;
267 } page_group;
268 #endif
269
270 #if HOST_BITS_PER_PTR <= 32
271
272 /* On 32-bit hosts, we use a two level page table, as pictured above. */
273 typedef page_entry **page_table[PAGE_L1_SIZE];
274
275 #else
276
277 /* On 64-bit hosts, we use the same two level page tables plus a linked
278 list that disambiguates the top 32-bits. There will almost always be
279 exactly one entry in the list. */
280 typedef struct page_table_chain
281 {
282 struct page_table_chain *next;
283 size_t high_bits;
284 page_entry **table[PAGE_L1_SIZE];
285 } *page_table;
286
287 #endif
288
289 /* The rest of the global variables. */
290 static struct globals
291 {
292 /* The Nth element in this array is a page with objects of size 2^N.
293 If there are any pages with free objects, they will be at the
294 head of the list. NULL if there are no page-entries for this
295 object size. */
296 page_entry *pages[NUM_ORDERS];
297
298 /* The Nth element in this array is the last page with objects of
299 size 2^N. NULL if there are no page-entries for this object
300 size. */
301 page_entry *page_tails[NUM_ORDERS];
302
303 /* Lookup table for associating allocation pages with object addresses. */
304 page_table lookup;
305
306 /* The system's page size. */
307 size_t pagesize;
308 size_t lg_pagesize;
309
310 /* Bytes currently allocated. */
311 size_t allocated;
312
313 /* Bytes currently allocated at the end of the last collection. */
314 size_t allocated_last_gc;
315
316 /* Total amount of memory mapped. */
317 size_t bytes_mapped;
318
319 /* The current depth in the context stack. */
320 unsigned short context_depth;
321
322 /* A file descriptor open to /dev/zero for reading. */
323 #if defined (HAVE_MMAP_DEV_ZERO)
324 int dev_zero_fd;
325 #endif
326
327 /* A cache of free system pages. */
328 page_entry *free_pages;
329
330 #ifdef USING_MALLOC_PAGE_GROUPS
331 page_group *page_groups;
332 #endif
333
334 /* The file descriptor for debugging output. */
335 FILE *debug_file;
336 } G;
337
338 /* The size in bytes required to maintain a bitmap for the objects
339 on a page-entry. */
340 #define BITMAP_SIZE(Num_objects) \
341 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
342
343 /* Skip garbage collection if the current allocation is not at least
344 this factor times the allocation at the end of the last collection.
345 In other words, total allocation must expand by (this factor minus
346 one) before collection is performed. */
347 #define GGC_MIN_EXPAND_FOR_GC (1.3)
348
349 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
350 test from triggering too often when the heap is small. */
351 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
352
353 /* Allocate pages in chunks of this size, to throttle calls to memory
354 allocation routines. The first page is used, the rest go onto the
355 free list. This cannot be larger than HOST_BITS_PER_INT for the
356 in_use bitmask for page_group. */
357 #define GGC_QUIRE_SIZE 16
358 \f
359 static int ggc_allocated_p PARAMS ((const void *));
360 static page_entry *lookup_page_table_entry PARAMS ((const void *));
361 static void set_page_table_entry PARAMS ((void *, page_entry *));
362 #ifdef USING_MMAP
363 static char *alloc_anon PARAMS ((char *, size_t));
364 #endif
365 #ifdef USING_MALLOC_PAGE_GROUPS
366 static size_t page_group_index PARAMS ((char *, char *));
367 static void set_page_group_in_use PARAMS ((page_group *, char *));
368 static void clear_page_group_in_use PARAMS ((page_group *, char *));
369 #endif
370 static struct page_entry * alloc_page PARAMS ((unsigned));
371 static void free_page PARAMS ((struct page_entry *));
372 static void release_pages PARAMS ((void));
373 static void clear_marks PARAMS ((void));
374 static void sweep_pages PARAMS ((void));
375 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
376
377 #ifdef GGC_POISON
378 static void poison_pages PARAMS ((void));
379 #endif
380
381 void debug_print_page_list PARAMS ((int));
382 \f
383 /* Returns non-zero if P was allocated in GC'able memory. */
384
385 static inline int
386 ggc_allocated_p (p)
387 const void *p;
388 {
389 page_entry ***base;
390 size_t L1, L2;
391
392 #if HOST_BITS_PER_PTR <= 32
393 base = &G.lookup[0];
394 #else
395 page_table table = G.lookup;
396 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
397 while (1)
398 {
399 if (table == NULL)
400 return 0;
401 if (table->high_bits == high_bits)
402 break;
403 table = table->next;
404 }
405 base = &table->table[0];
406 #endif
407
408 /* Extract the level 1 and 2 indicies. */
409 L1 = LOOKUP_L1 (p);
410 L2 = LOOKUP_L2 (p);
411
412 return base[L1] && base[L1][L2];
413 }
414
415 /* Traverse the page table and find the entry for a page.
416 Die (probably) if the object wasn't allocated via GC. */
417
418 static inline page_entry *
419 lookup_page_table_entry(p)
420 const void *p;
421 {
422 page_entry ***base;
423 size_t L1, L2;
424
425 #if HOST_BITS_PER_PTR <= 32
426 base = &G.lookup[0];
427 #else
428 page_table table = G.lookup;
429 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
430 while (table->high_bits != high_bits)
431 table = table->next;
432 base = &table->table[0];
433 #endif
434
435 /* Extract the level 1 and 2 indicies. */
436 L1 = LOOKUP_L1 (p);
437 L2 = LOOKUP_L2 (p);
438
439 return base[L1][L2];
440 }
441
442 /* Set the page table entry for a page. */
443
444 static void
445 set_page_table_entry(p, entry)
446 void *p;
447 page_entry *entry;
448 {
449 page_entry ***base;
450 size_t L1, L2;
451
452 #if HOST_BITS_PER_PTR <= 32
453 base = &G.lookup[0];
454 #else
455 page_table table;
456 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
457 for (table = G.lookup; table; table = table->next)
458 if (table->high_bits == high_bits)
459 goto found;
460
461 /* Not found -- allocate a new table. */
462 table = (page_table) xcalloc (1, sizeof(*table));
463 table->next = G.lookup;
464 table->high_bits = high_bits;
465 G.lookup = table;
466 found:
467 base = &table->table[0];
468 #endif
469
470 /* Extract the level 1 and 2 indicies. */
471 L1 = LOOKUP_L1 (p);
472 L2 = LOOKUP_L2 (p);
473
474 if (base[L1] == NULL)
475 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
476
477 base[L1][L2] = entry;
478 }
479
480 /* Prints the page-entry for object size ORDER, for debugging. */
481
482 void
483 debug_print_page_list (order)
484 int order;
485 {
486 page_entry *p;
487 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
488 (PTR) G.page_tails[order]);
489 p = G.pages[order];
490 while (p != NULL)
491 {
492 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
493 p->num_free_objects);
494 p = p->next;
495 }
496 printf ("NULL\n");
497 fflush (stdout);
498 }
499
500 #ifdef USING_MMAP
501 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
502 (if non-null). The ifdef structure here is intended to cause a
503 compile error unless exactly one of the HAVE_* is defined. */
504
505 static inline char *
506 alloc_anon (pref, size)
507 char *pref ATTRIBUTE_UNUSED;
508 size_t size;
509 {
510 #ifdef HAVE_MMAP_ANON
511 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
512 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
513 #endif
514 #ifdef HAVE_MMAP_DEV_ZERO
515 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
516 MAP_PRIVATE, G.dev_zero_fd, 0);
517 #endif
518
519 if (page == (char *) MAP_FAILED)
520 {
521 perror ("Virtual memory exhausted");
522 exit (FATAL_EXIT_CODE);
523 }
524
525 /* Remember that we allocated this memory. */
526 G.bytes_mapped += size;
527
528 return page;
529 }
530 #endif
531 #ifdef USING_MALLOC_PAGE_GROUPS
532 /* Compute the index for this page into the page group. */
533
534 static inline size_t
535 page_group_index (allocation, page)
536 char *allocation, *page;
537 {
538 return (size_t)(page - allocation) >> G.lg_pagesize;
539 }
540
541 /* Set and clear the in_use bit for this page in the page group. */
542
543 static inline void
544 set_page_group_in_use (group, page)
545 page_group *group;
546 char *page;
547 {
548 group->in_use |= 1 << page_group_index (group->allocation, page);
549 }
550
551 static inline void
552 clear_page_group_in_use (group, page)
553 page_group *group;
554 char *page;
555 {
556 group->in_use &= ~(1 << page_group_index (group->allocation, page));
557 }
558 #endif
559
560 /* Allocate a new page for allocating objects of size 2^ORDER,
561 and return an entry for it. The entry is not added to the
562 appropriate page_table list. */
563
564 static inline struct page_entry *
565 alloc_page (order)
566 unsigned order;
567 {
568 struct page_entry *entry, *p, **pp;
569 char *page;
570 size_t num_objects;
571 size_t bitmap_size;
572 size_t page_entry_size;
573 size_t entry_size;
574 #ifdef USING_MALLOC_PAGE_GROUPS
575 page_group *group;
576 #endif
577
578 num_objects = OBJECTS_PER_PAGE (order);
579 bitmap_size = BITMAP_SIZE (num_objects + 1);
580 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
581 entry_size = num_objects * OBJECT_SIZE (order);
582 if (entry_size < G.pagesize)
583 entry_size = G.pagesize;
584
585 entry = NULL;
586 page = NULL;
587
588 /* Check the list of free pages for one we can use. */
589 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
590 if (p->bytes == entry_size)
591 break;
592
593 if (p != NULL)
594 {
595 /* Recycle the allocated memory from this page ... */
596 *pp = p->next;
597 page = p->page;
598
599 #ifdef USING_MALLOC_PAGE_GROUPS
600 group = p->group;
601 #endif
602
603 /* ... and, if possible, the page entry itself. */
604 if (p->order == order)
605 {
606 entry = p;
607 memset (entry, 0, page_entry_size);
608 }
609 else
610 free (p);
611 }
612 #ifdef USING_MMAP
613 else if (entry_size == G.pagesize)
614 {
615 /* We want just one page. Allocate a bunch of them and put the
616 extras on the freelist. (Can only do this optimization with
617 mmap for backing store.) */
618 struct page_entry *e, *f = G.free_pages;
619 int i;
620
621 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
622
623 /* This loop counts down so that the chain will be in ascending
624 memory order. */
625 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
626 {
627 e = (struct page_entry *) xcalloc (1, page_entry_size);
628 e->order = order;
629 e->bytes = G.pagesize;
630 e->page = page + (i << G.lg_pagesize);
631 e->next = f;
632 f = e;
633 }
634
635 G.free_pages = f;
636 }
637 else
638 page = alloc_anon (NULL, entry_size);
639 #endif
640 #ifdef USING_MALLOC_PAGE_GROUPS
641 else
642 {
643 /* Allocate a large block of memory and serve out the aligned
644 pages therein. This results in much less memory wastage
645 than the traditional implementation of valloc. */
646
647 char *allocation, *a, *enda;
648 size_t alloc_size, head_slop, tail_slop;
649 int multiple_pages = (entry_size == G.pagesize);
650
651 if (multiple_pages)
652 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
653 else
654 alloc_size = entry_size + G.pagesize - 1;
655 allocation = xmalloc (alloc_size);
656
657 page = (char *)(((size_t) allocation + G.pagesize - 1) & -G.pagesize);
658 head_slop = page - allocation;
659 if (multiple_pages)
660 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
661 else
662 tail_slop = alloc_size - entry_size - head_slop;
663 enda = allocation + alloc_size - tail_slop;
664
665 /* We allocated N pages, which are likely not aligned, leaving
666 us with N-1 usable pages. We plan to place the page_group
667 structure somewhere in the slop. */
668 if (head_slop >= sizeof (page_group))
669 group = (page_group *)page - 1;
670 else
671 {
672 /* We magically got an aligned allocation. Too bad, we have
673 to waste a page anyway. */
674 if (tail_slop == 0)
675 {
676 enda -= G.pagesize;
677 tail_slop += G.pagesize;
678 }
679 if (tail_slop < sizeof (page_group))
680 abort ();
681 group = (page_group *)enda;
682 tail_slop -= sizeof (page_group);
683 }
684
685 /* Remember that we allocated this memory. */
686 group->next = G.page_groups;
687 group->allocation = allocation;
688 group->alloc_size = alloc_size;
689 group->in_use = 0;
690 G.page_groups = group;
691 G.bytes_mapped += alloc_size;
692
693 /* If we allocated multiple pages, put the rest on the free list. */
694 if (multiple_pages)
695 {
696 struct page_entry *e, *f = G.free_pages;
697 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
698 {
699 e = (struct page_entry *) xcalloc (1, page_entry_size);
700 e->order = order;
701 e->bytes = G.pagesize;
702 e->page = a;
703 e->group = group;
704 e->next = f;
705 f = e;
706 }
707 G.free_pages = f;
708 }
709 }
710 #endif
711
712 if (entry == NULL)
713 entry = (struct page_entry *) xcalloc (1, page_entry_size);
714
715 entry->bytes = entry_size;
716 entry->page = page;
717 entry->context_depth = G.context_depth;
718 entry->order = order;
719 entry->num_free_objects = num_objects;
720 entry->next_bit_hint = 1;
721
722 #ifdef USING_MALLOC_PAGE_GROUPS
723 entry->group = group;
724 set_page_group_in_use (group, page);
725 #endif
726
727 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
728 increment the hint. */
729 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
730 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
731
732 set_page_table_entry (page, entry);
733
734 if (GGC_DEBUG_LEVEL >= 2)
735 fprintf (G.debug_file,
736 "Allocating page at %p, object size=%ld, data %p-%p\n",
737 (PTR) entry, (long) OBJECT_SIZE (order), page,
738 page + entry_size - 1);
739
740 return entry;
741 }
742
743 /* For a page that is no longer needed, put it on the free page list. */
744
745 static inline void
746 free_page (entry)
747 page_entry *entry;
748 {
749 if (GGC_DEBUG_LEVEL >= 2)
750 fprintf (G.debug_file,
751 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
752 entry->page, entry->page + entry->bytes - 1);
753
754 set_page_table_entry (entry->page, NULL);
755
756 #ifdef USING_MALLOC_PAGE_GROUPS
757 clear_page_group_in_use (entry->group, entry->page);
758 #endif
759
760 entry->next = G.free_pages;
761 G.free_pages = entry;
762 }
763
764 /* Release the free page cache to the system. */
765
766 static void
767 release_pages ()
768 {
769 #ifdef USING_MMAP
770 page_entry *p, *next;
771 char *start;
772 size_t len;
773
774 /* Gather up adjacent pages so they are unmapped together. */
775 p = G.free_pages;
776
777 while (p)
778 {
779 start = p->page;
780 next = p->next;
781 len = p->bytes;
782 free (p);
783 p = next;
784
785 while (p && p->page == start + len)
786 {
787 next = p->next;
788 len += p->bytes;
789 free (p);
790 p = next;
791 }
792
793 munmap (start, len);
794 G.bytes_mapped -= len;
795 }
796
797 G.free_pages = NULL;
798 #endif
799 #ifdef USING_MALLOC_PAGE_GROUPS
800 page_entry **pp, *p;
801 page_group **gp, *g;
802
803 /* Remove all pages from free page groups from the list. */
804 pp = &G.free_pages;
805 while ((p = *pp) != NULL)
806 if (p->group->in_use == 0)
807 {
808 *pp = p->next;
809 free (p);
810 }
811 else
812 pp = &p->next;
813
814 /* Remove all free page groups, and release the storage. */
815 gp = &G.page_groups;
816 while ((g = *gp) != NULL)
817 if (g->in_use == 0)
818 {
819 *gp = g->next;
820 G.bytes_mapped -= g->alloc_size;
821 free (g->allocation);
822 }
823 else
824 gp = &g->next;
825 #endif
826 }
827
828 /* This table provides a fast way to determine ceil(log_2(size)) for
829 allocation requests. The minimum allocation size is eight bytes. */
830
831 static unsigned char size_lookup[257] =
832 {
833 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
834 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
835 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
836 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
837 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
838 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
839 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
840 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
841 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
842 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
843 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
844 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
845 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
846 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
847 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
848 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
849 8
850 };
851
852 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
853 memory is zeroed; otherwise, its contents are undefined. */
854
855 void *
856 ggc_alloc (size)
857 size_t size;
858 {
859 unsigned order, word, bit, object_offset;
860 struct page_entry *entry;
861 void *result;
862
863 if (size <= 256)
864 order = size_lookup[size];
865 else
866 {
867 order = 9;
868 while (size > OBJECT_SIZE (order))
869 order++;
870 }
871
872 /* If there are non-full pages for this size allocation, they are at
873 the head of the list. */
874 entry = G.pages[order];
875
876 /* If there is no page for this object size, or all pages in this
877 context are full, allocate a new page. */
878 if (entry == NULL || entry->num_free_objects == 0)
879 {
880 struct page_entry *new_entry;
881 new_entry = alloc_page (order);
882
883 /* If this is the only entry, it's also the tail. */
884 if (entry == NULL)
885 G.page_tails[order] = new_entry;
886
887 /* Put new pages at the head of the page list. */
888 new_entry->next = entry;
889 entry = new_entry;
890 G.pages[order] = new_entry;
891
892 /* For a new page, we know the word and bit positions (in the
893 in_use bitmap) of the first available object -- they're zero. */
894 new_entry->next_bit_hint = 1;
895 word = 0;
896 bit = 0;
897 object_offset = 0;
898 }
899 else
900 {
901 /* First try to use the hint left from the previous allocation
902 to locate a clear bit in the in-use bitmap. We've made sure
903 that the one-past-the-end bit is always set, so if the hint
904 has run over, this test will fail. */
905 unsigned hint = entry->next_bit_hint;
906 word = hint / HOST_BITS_PER_LONG;
907 bit = hint % HOST_BITS_PER_LONG;
908
909 /* If the hint didn't work, scan the bitmap from the beginning. */
910 if ((entry->in_use_p[word] >> bit) & 1)
911 {
912 word = bit = 0;
913 while (~entry->in_use_p[word] == 0)
914 ++word;
915 while ((entry->in_use_p[word] >> bit) & 1)
916 ++bit;
917 hint = word * HOST_BITS_PER_LONG + bit;
918 }
919
920 /* Next time, try the next bit. */
921 entry->next_bit_hint = hint + 1;
922
923 object_offset = hint * OBJECT_SIZE (order);
924 }
925
926 /* Set the in-use bit. */
927 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
928
929 /* Keep a running total of the number of free objects. If this page
930 fills up, we may have to move it to the end of the list if the
931 next page isn't full. If the next page is full, all subsequent
932 pages are full, so there's no need to move it. */
933 if (--entry->num_free_objects == 0
934 && entry->next != NULL
935 && entry->next->num_free_objects > 0)
936 {
937 G.pages[order] = entry->next;
938 entry->next = NULL;
939 G.page_tails[order]->next = entry;
940 G.page_tails[order] = entry;
941 }
942
943 /* Calculate the object's address. */
944 result = entry->page + object_offset;
945
946 #ifdef GGC_POISON
947 /* `Poison' the entire allocated object, including any padding at
948 the end. */
949 memset (result, 0xaf, OBJECT_SIZE (order));
950 #endif
951
952 /* Keep track of how many bytes are being allocated. This
953 information is used in deciding when to collect. */
954 G.allocated += OBJECT_SIZE (order);
955
956 if (GGC_DEBUG_LEVEL >= 3)
957 fprintf (G.debug_file,
958 "Allocating object, requested size=%ld, actual=%ld at %p on %p\n",
959 (long) size, (long) OBJECT_SIZE (order), result, (PTR) entry);
960
961 return result;
962 }
963
964 /* If P is not marked, marks it and return false. Otherwise return true.
965 P must have been allocated by the GC allocator; it mustn't point to
966 static objects, stack variables, or memory allocated with malloc. */
967
968 int
969 ggc_set_mark (p)
970 const void *p;
971 {
972 page_entry *entry;
973 unsigned bit, word;
974 unsigned long mask;
975
976 /* Look up the page on which the object is alloced. If the object
977 wasn't allocated by the collector, we'll probably die. */
978 entry = lookup_page_table_entry (p);
979 #ifdef ENABLE_CHECKING
980 if (entry == NULL)
981 abort ();
982 #endif
983
984 /* Calculate the index of the object on the page; this is its bit
985 position in the in_use_p bitmap. */
986 bit = (((const char *) p) - entry->page) / OBJECT_SIZE (entry->order);
987 word = bit / HOST_BITS_PER_LONG;
988 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
989
990 /* If the bit was previously set, skip it. */
991 if (entry->in_use_p[word] & mask)
992 return 1;
993
994 /* Otherwise set it, and decrement the free object count. */
995 entry->in_use_p[word] |= mask;
996 entry->num_free_objects -= 1;
997
998 if (GGC_DEBUG_LEVEL >= 4)
999 fprintf (G.debug_file, "Marking %p\n", p);
1000
1001 return 0;
1002 }
1003
1004 /* Return the size of the gc-able object P. */
1005
1006 size_t
1007 ggc_get_size (p)
1008 const void *p;
1009 {
1010 page_entry *pe = lookup_page_table_entry (p);
1011 return OBJECT_SIZE (pe->order);
1012 }
1013 \f
1014 /* Initialize the ggc-mmap allocator. */
1015
1016 void
1017 init_ggc ()
1018 {
1019 unsigned order;
1020
1021 G.pagesize = getpagesize();
1022 G.lg_pagesize = exact_log2 (G.pagesize);
1023
1024 #ifdef HAVE_MMAP_DEV_ZERO
1025 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1026 if (G.dev_zero_fd == -1)
1027 abort ();
1028 #endif
1029
1030 #if 0
1031 G.debug_file = fopen ("ggc-mmap.debug", "w");
1032 #else
1033 G.debug_file = stdout;
1034 #endif
1035
1036 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1037
1038 #ifdef USING_MMAP
1039 /* StunOS has an amazing off-by-one error for the first mmap allocation
1040 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1041 believe, is an unaligned page allocation, which would cause us to
1042 hork badly if we tried to use it. */
1043 {
1044 char *p = alloc_anon (NULL, G.pagesize);
1045 struct page_entry *e;
1046 if ((size_t)p & (G.pagesize - 1))
1047 {
1048 /* How losing. Discard this one and try another. If we still
1049 can't get something useful, give up. */
1050
1051 p = alloc_anon (NULL, G.pagesize);
1052 if ((size_t)p & (G.pagesize - 1))
1053 abort ();
1054 }
1055
1056 /* We have a good page, might as well hold onto it... */
1057 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
1058 e->bytes = G.pagesize;
1059 e->page = p;
1060 e->next = G.free_pages;
1061 G.free_pages = e;
1062 }
1063 #endif
1064
1065 /* Initialize the object size table. */
1066 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1067 object_size_table[order] = (size_t) 1 << order;
1068 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1069 {
1070 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1071
1072 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1073 so that we're sure of getting aligned memory. */
1074 s = CEIL (s, MAX_ALIGNMENT) * MAX_ALIGNMENT;
1075 object_size_table[order] = s;
1076 }
1077
1078 /* Initialize the objects-per-page table. */
1079 for (order = 0; order < NUM_ORDERS; ++order)
1080 {
1081 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1082 if (objects_per_page_table[order] == 0)
1083 objects_per_page_table[order] = 1;
1084 }
1085
1086 /* Reset the size_lookup array to put appropriately sized objects in
1087 the special orders. All objects bigger than the previous power
1088 of two, but no greater than the special size, should go in the
1089 new order. */
1090 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1091 {
1092 int o;
1093 int i;
1094
1095 o = size_lookup[OBJECT_SIZE (order)];
1096 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1097 size_lookup[i] = order;
1098 }
1099 }
1100
1101 /* Increment the `GC context'. Objects allocated in an outer context
1102 are never freed, eliminating the need to register their roots. */
1103
1104 void
1105 ggc_push_context ()
1106 {
1107 ++G.context_depth;
1108
1109 /* Die on wrap. */
1110 if (G.context_depth == 0)
1111 abort ();
1112 }
1113
1114 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1115 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1116
1117 static void
1118 ggc_recalculate_in_use_p (p)
1119 page_entry *p;
1120 {
1121 unsigned int i;
1122 size_t num_objects;
1123
1124 /* Because the past-the-end bit in in_use_p is always set, we
1125 pretend there is one additional object. */
1126 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
1127
1128 /* Reset the free object count. */
1129 p->num_free_objects = num_objects;
1130
1131 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1132 for (i = 0;
1133 i < CEIL (BITMAP_SIZE (num_objects),
1134 sizeof (*p->in_use_p));
1135 ++i)
1136 {
1137 unsigned long j;
1138
1139 /* Something is in use if it is marked, or if it was in use in a
1140 context further down the context stack. */
1141 p->in_use_p[i] |= p->save_in_use_p[i];
1142
1143 /* Decrement the free object count for every object allocated. */
1144 for (j = p->in_use_p[i]; j; j >>= 1)
1145 p->num_free_objects -= (j & 1);
1146 }
1147
1148 if (p->num_free_objects >= num_objects)
1149 abort ();
1150 }
1151
1152 /* Decrement the `GC context'. All objects allocated since the
1153 previous ggc_push_context are migrated to the outer context. */
1154
1155 void
1156 ggc_pop_context ()
1157 {
1158 unsigned order, depth;
1159
1160 depth = --G.context_depth;
1161
1162 /* Any remaining pages in the popped context are lowered to the new
1163 current context; i.e. objects allocated in the popped context and
1164 left over are imported into the previous context. */
1165 for (order = 2; order < NUM_ORDERS; order++)
1166 {
1167 page_entry *p;
1168
1169 for (p = G.pages[order]; p != NULL; p = p->next)
1170 {
1171 if (p->context_depth > depth)
1172 p->context_depth = depth;
1173
1174 /* If this page is now in the topmost context, and we'd
1175 saved its allocation state, restore it. */
1176 else if (p->context_depth == depth && p->save_in_use_p)
1177 {
1178 ggc_recalculate_in_use_p (p);
1179 free (p->save_in_use_p);
1180 p->save_in_use_p = 0;
1181 }
1182 }
1183 }
1184 }
1185 \f
1186 /* Unmark all objects. */
1187
1188 static inline void
1189 clear_marks ()
1190 {
1191 unsigned order;
1192
1193 for (order = 2; order < NUM_ORDERS; order++)
1194 {
1195 size_t num_objects = OBJECTS_PER_PAGE (order);
1196 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1197 page_entry *p;
1198
1199 for (p = G.pages[order]; p != NULL; p = p->next)
1200 {
1201 #ifdef ENABLE_CHECKING
1202 /* The data should be page-aligned. */
1203 if ((size_t) p->page & (G.pagesize - 1))
1204 abort ();
1205 #endif
1206
1207 /* Pages that aren't in the topmost context are not collected;
1208 nevertheless, we need their in-use bit vectors to store GC
1209 marks. So, back them up first. */
1210 if (p->context_depth < G.context_depth)
1211 {
1212 if (! p->save_in_use_p)
1213 p->save_in_use_p = xmalloc (bitmap_size);
1214 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
1215 }
1216
1217 /* Reset reset the number of free objects and clear the
1218 in-use bits. These will be adjusted by mark_obj. */
1219 p->num_free_objects = num_objects;
1220 memset (p->in_use_p, 0, bitmap_size);
1221
1222 /* Make sure the one-past-the-end bit is always set. */
1223 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1224 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1225 }
1226 }
1227 }
1228
1229 /* Free all empty pages. Partially empty pages need no attention
1230 because the `mark' bit doubles as an `unused' bit. */
1231
1232 static inline void
1233 sweep_pages ()
1234 {
1235 unsigned order;
1236
1237 for (order = 2; order < NUM_ORDERS; order++)
1238 {
1239 /* The last page-entry to consider, regardless of entries
1240 placed at the end of the list. */
1241 page_entry * const last = G.page_tails[order];
1242
1243 size_t num_objects = OBJECTS_PER_PAGE (order);
1244 size_t live_objects;
1245 page_entry *p, *previous;
1246 int done;
1247
1248 p = G.pages[order];
1249 if (p == NULL)
1250 continue;
1251
1252 previous = NULL;
1253 do
1254 {
1255 page_entry *next = p->next;
1256
1257 /* Loop until all entries have been examined. */
1258 done = (p == last);
1259
1260 /* Add all live objects on this page to the count of
1261 allocated memory. */
1262 live_objects = num_objects - p->num_free_objects;
1263
1264 G.allocated += OBJECT_SIZE (order) * live_objects;
1265
1266 /* Only objects on pages in the topmost context should get
1267 collected. */
1268 if (p->context_depth < G.context_depth)
1269 ;
1270
1271 /* Remove the page if it's empty. */
1272 else if (live_objects == 0)
1273 {
1274 if (! previous)
1275 G.pages[order] = next;
1276 else
1277 previous->next = next;
1278
1279 /* Are we removing the last element? */
1280 if (p == G.page_tails[order])
1281 G.page_tails[order] = previous;
1282 free_page (p);
1283 p = previous;
1284 }
1285
1286 /* If the page is full, move it to the end. */
1287 else if (p->num_free_objects == 0)
1288 {
1289 /* Don't move it if it's already at the end. */
1290 if (p != G.page_tails[order])
1291 {
1292 /* Move p to the end of the list. */
1293 p->next = NULL;
1294 G.page_tails[order]->next = p;
1295
1296 /* Update the tail pointer... */
1297 G.page_tails[order] = p;
1298
1299 /* ... and the head pointer, if necessary. */
1300 if (! previous)
1301 G.pages[order] = next;
1302 else
1303 previous->next = next;
1304 p = previous;
1305 }
1306 }
1307
1308 /* If we've fallen through to here, it's a page in the
1309 topmost context that is neither full nor empty. Such a
1310 page must precede pages at lesser context depth in the
1311 list, so move it to the head. */
1312 else if (p != G.pages[order])
1313 {
1314 previous->next = p->next;
1315 p->next = G.pages[order];
1316 G.pages[order] = p;
1317 /* Are we moving the last element? */
1318 if (G.page_tails[order] == p)
1319 G.page_tails[order] = previous;
1320 p = previous;
1321 }
1322
1323 previous = p;
1324 p = next;
1325 }
1326 while (! done);
1327
1328 /* Now, restore the in_use_p vectors for any pages from contexts
1329 other than the current one. */
1330 for (p = G.pages[order]; p; p = p->next)
1331 if (p->context_depth != G.context_depth)
1332 ggc_recalculate_in_use_p (p);
1333 }
1334 }
1335
1336 #ifdef GGC_POISON
1337 /* Clobber all free objects. */
1338
1339 static inline void
1340 poison_pages ()
1341 {
1342 unsigned order;
1343
1344 for (order = 2; order < NUM_ORDERS; order++)
1345 {
1346 size_t num_objects = OBJECTS_PER_PAGE (order);
1347 size_t size = OBJECT_SIZE (order);
1348 page_entry *p;
1349
1350 for (p = G.pages[order]; p != NULL; p = p->next)
1351 {
1352 size_t i;
1353
1354 if (p->context_depth != G.context_depth)
1355 /* Since we don't do any collection for pages in pushed
1356 contexts, there's no need to do any poisoning. And
1357 besides, the IN_USE_P array isn't valid until we pop
1358 contexts. */
1359 continue;
1360
1361 for (i = 0; i < num_objects; i++)
1362 {
1363 size_t word, bit;
1364 word = i / HOST_BITS_PER_LONG;
1365 bit = i % HOST_BITS_PER_LONG;
1366 if (((p->in_use_p[word] >> bit) & 1) == 0)
1367 memset (p->page + i * size, 0xa5, size);
1368 }
1369 }
1370 }
1371 }
1372 #endif
1373
1374 /* Top level mark-and-sweep routine. */
1375
1376 void
1377 ggc_collect ()
1378 {
1379 /* Avoid frequent unnecessary work by skipping collection if the
1380 total allocations haven't expanded much since the last
1381 collection. */
1382 #ifndef GGC_ALWAYS_COLLECT
1383 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1384 return;
1385 #endif
1386
1387 timevar_push (TV_GC);
1388 if (!quiet_flag)
1389 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1390
1391 /* Zero the total allocated bytes. This will be recalculated in the
1392 sweep phase. */
1393 G.allocated = 0;
1394
1395 /* Release the pages we freed the last time we collected, but didn't
1396 reuse in the interim. */
1397 release_pages ();
1398
1399 clear_marks ();
1400 ggc_mark_roots ();
1401
1402 #ifdef GGC_POISON
1403 poison_pages ();
1404 #endif
1405
1406 sweep_pages ();
1407
1408 G.allocated_last_gc = G.allocated;
1409 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1410 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1411
1412 timevar_pop (TV_GC);
1413
1414 if (!quiet_flag)
1415 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1416 }
1417
1418 /* Print allocation statistics. */
1419 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1420 ? (x) \
1421 : ((x) < 1024*1024*10 \
1422 ? (x) / 1024 \
1423 : (x) / (1024*1024))))
1424 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1425
1426 void
1427 ggc_print_statistics ()
1428 {
1429 struct ggc_statistics stats;
1430 unsigned int i;
1431 size_t total_overhead = 0;
1432
1433 /* Clear the statistics. */
1434 memset (&stats, 0, sizeof (stats));
1435
1436 /* Make sure collection will really occur. */
1437 G.allocated_last_gc = 0;
1438
1439 /* Collect and print the statistics common across collectors. */
1440 ggc_print_common_statistics (stderr, &stats);
1441
1442 /* Release free pages so that we will not count the bytes allocated
1443 there as part of the total allocated memory. */
1444 release_pages ();
1445
1446 /* Collect some information about the various sizes of
1447 allocation. */
1448 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1449 "Size", "Allocated", "Used", "Overhead");
1450 for (i = 0; i < NUM_ORDERS; ++i)
1451 {
1452 page_entry *p;
1453 size_t allocated;
1454 size_t in_use;
1455 size_t overhead;
1456
1457 /* Skip empty entries. */
1458 if (!G.pages[i])
1459 continue;
1460
1461 overhead = allocated = in_use = 0;
1462
1463 /* Figure out the total number of bytes allocated for objects of
1464 this size, and how many of them are actually in use. Also figure
1465 out how much memory the page table is using. */
1466 for (p = G.pages[i]; p; p = p->next)
1467 {
1468 allocated += p->bytes;
1469 in_use +=
1470 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * OBJECT_SIZE (i);
1471
1472 overhead += (sizeof (page_entry) - sizeof (long)
1473 + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
1474 }
1475 fprintf (stderr, "%-5d %10ld%c %10ld%c %10ld%c\n", OBJECT_SIZE (i),
1476 SCALE (allocated), LABEL (allocated),
1477 SCALE (in_use), LABEL (in_use),
1478 SCALE (overhead), LABEL (overhead));
1479 total_overhead += overhead;
1480 }
1481 fprintf (stderr, "%-5s %10ld%c %10ld%c %10ld%c\n", "Total",
1482 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1483 SCALE (G.allocated), LABEL(G.allocated),
1484 SCALE (total_overhead), LABEL (total_overhead));
1485 }