ggc-page.c (RTL_SIZE): New.
[gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "tree.h"
24 #include "rtl.h"
25 #include "tm_p.h"
26 #include "toplev.h"
27 #include "varray.h"
28 #include "flags.h"
29 #include "ggc.h"
30 #include "timevar.h"
31
32 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
33 file open. Prefer either to valloc. */
34 #ifdef HAVE_MMAP_ANON
35 # undef HAVE_MMAP_DEV_ZERO
36
37 # include <sys/mman.h>
38 # ifndef MAP_FAILED
39 # define MAP_FAILED -1
40 # endif
41 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
42 # define MAP_ANONYMOUS MAP_ANON
43 # endif
44 # define USING_MMAP
45
46 #endif
47
48 #ifdef HAVE_MMAP_DEV_ZERO
49
50 # include <sys/mman.h>
51 # ifndef MAP_FAILED
52 # define MAP_FAILED -1
53 # endif
54 # define USING_MMAP
55
56 #endif
57
58 #ifndef USING_MMAP
59 #define USING_MALLOC_PAGE_GROUPS
60 #endif
61
62 /* Stategy:
63
64 This garbage-collecting allocator allocates objects on one of a set
65 of pages. Each page can allocate objects of a single size only;
66 available sizes are powers of two starting at four bytes. The size
67 of an allocation request is rounded up to the next power of two
68 (`order'), and satisfied from the appropriate page.
69
70 Each page is recorded in a page-entry, which also maintains an
71 in-use bitmap of object positions on the page. This allows the
72 allocation state of a particular object to be flipped without
73 touching the page itself.
74
75 Each page-entry also has a context depth, which is used to track
76 pushing and popping of allocation contexts. Only objects allocated
77 in the current (highest-numbered) context may be collected.
78
79 Page entries are arranged in an array of singly-linked lists. The
80 array is indexed by the allocation size, in bits, of the pages on
81 it; i.e. all pages on a list allocate objects of the same size.
82 Pages are ordered on the list such that all non-full pages precede
83 all full pages, with non-full pages arranged in order of decreasing
84 context depth.
85
86 Empty pages (of all orders) are kept on a single page cache list,
87 and are considered first when new pages are required; they are
88 deallocated at the start of the next collection if they haven't
89 been recycled by then. */
90
91
92 /* Define GGC_POISON to poison memory marked unused by the collector. */
93 #undef GGC_POISON
94
95 /* Define GGC_ALWAYS_COLLECT to perform collection every time
96 ggc_collect is invoked. Otherwise, collection is performed only
97 when a significant amount of memory has been allocated since the
98 last collection. */
99 #undef GGC_ALWAYS_COLLECT
100
101 #ifdef ENABLE_GC_CHECKING
102 #define GGC_POISON
103 #endif
104 #ifdef ENABLE_GC_ALWAYS_COLLECT
105 #define GGC_ALWAYS_COLLECT
106 #endif
107
108 /* Define GGC_DEBUG_LEVEL to print debugging information.
109 0: No debugging output.
110 1: GC statistics only.
111 2: Page-entry allocations/deallocations as well.
112 3: Object allocations as well.
113 4: Object marks as well. */
114 #define GGC_DEBUG_LEVEL (0)
115 \f
116 #ifndef HOST_BITS_PER_PTR
117 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
118 #endif
119
120 \f
121 /* A two-level tree is used to look up the page-entry for a given
122 pointer. Two chunks of the pointer's bits are extracted to index
123 the first and second levels of the tree, as follows:
124
125 HOST_PAGE_SIZE_BITS
126 32 | |
127 msb +----------------+----+------+------+ lsb
128 | | |
129 PAGE_L1_BITS |
130 | |
131 PAGE_L2_BITS
132
133 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
134 pages are aligned on system page boundaries. The next most
135 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
136 index values in the lookup table, respectively.
137
138 For 32-bit architectures and the settings below, there are no
139 leftover bits. For architectures with wider pointers, the lookup
140 tree points to a list of pages, which must be scanned to find the
141 correct one. */
142
143 #define PAGE_L1_BITS (8)
144 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
145 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
146 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
147
148 #define LOOKUP_L1(p) \
149 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
150
151 #define LOOKUP_L2(p) \
152 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
153
154 /* The number of objects per allocation page, for objects on a page of
155 the indicated ORDER. */
156 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
157
158 /* The size of an object on a page of the indicated ORDER. */
159 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
160
161 /* The number of extra orders, not corresponding to power-of-two sized
162 objects. */
163
164 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
165
166 #define RTL_SIZE(NSLOTS) \
167 (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
168
169 /* The Ith entry is the maximum size of an object to be stored in the
170 Ith extra order. Adding a new entry to this array is the *only*
171 thing you need to do to add a new special allocation size. */
172
173 static const size_t extra_order_size_table[] = {
174 sizeof (struct tree_decl),
175 sizeof (struct tree_list),
176 RTL_SIZE (2), /* REG, MEM, PLUS, etc. */
177 RTL_SIZE (10), /* INSN, CALL_INSN, JUMP_INSN */
178 };
179
180 /* The total number of orders. */
181
182 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
183
184 /* We use this structure to determine the alignment required for
185 allocations. For power-of-two sized allocations, that's not a
186 problem, but it does matter for odd-sized allocations. */
187
188 struct max_alignment {
189 char c;
190 union {
191 HOST_WIDEST_INT i;
192 #ifdef HAVE_LONG_DOUBLE
193 long double d;
194 #else
195 double d;
196 #endif
197 } u;
198 };
199
200 /* The biggest alignment required. */
201
202 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
203
204 /* The Ith entry is the number of objects on a page or order I. */
205
206 static unsigned objects_per_page_table[NUM_ORDERS];
207
208 /* The Ith entry is the size of an object on a page of order I. */
209
210 static size_t object_size_table[NUM_ORDERS];
211
212 /* A page_entry records the status of an allocation page. This
213 structure is dynamically sized to fit the bitmap in_use_p. */
214 typedef struct page_entry
215 {
216 /* The next page-entry with objects of the same size, or NULL if
217 this is the last page-entry. */
218 struct page_entry *next;
219
220 /* The number of bytes allocated. (This will always be a multiple
221 of the host system page size.) */
222 size_t bytes;
223
224 /* The address at which the memory is allocated. */
225 char *page;
226
227 #ifdef USING_MALLOC_PAGE_GROUPS
228 /* Back pointer to the page group this page came from. */
229 struct page_group *group;
230 #endif
231
232 /* Saved in-use bit vector for pages that aren't in the topmost
233 context during collection. */
234 unsigned long *save_in_use_p;
235
236 /* Context depth of this page. */
237 unsigned short context_depth;
238
239 /* The number of free objects remaining on this page. */
240 unsigned short num_free_objects;
241
242 /* A likely candidate for the bit position of a free object for the
243 next allocation from this page. */
244 unsigned short next_bit_hint;
245
246 /* The lg of size of objects allocated from this page. */
247 unsigned char order;
248
249 /* A bit vector indicating whether or not objects are in use. The
250 Nth bit is one if the Nth object on this page is allocated. This
251 array is dynamically sized. */
252 unsigned long in_use_p[1];
253 } page_entry;
254
255 #ifdef USING_MALLOC_PAGE_GROUPS
256 /* A page_group describes a large allocation from malloc, from which
257 we parcel out aligned pages. */
258 typedef struct page_group
259 {
260 /* A linked list of all extant page groups. */
261 struct page_group *next;
262
263 /* The address we received from malloc. */
264 char *allocation;
265
266 /* The size of the block. */
267 size_t alloc_size;
268
269 /* A bitmask of pages in use. */
270 unsigned int in_use;
271 } page_group;
272 #endif
273
274 #if HOST_BITS_PER_PTR <= 32
275
276 /* On 32-bit hosts, we use a two level page table, as pictured above. */
277 typedef page_entry **page_table[PAGE_L1_SIZE];
278
279 #else
280
281 /* On 64-bit hosts, we use the same two level page tables plus a linked
282 list that disambiguates the top 32-bits. There will almost always be
283 exactly one entry in the list. */
284 typedef struct page_table_chain
285 {
286 struct page_table_chain *next;
287 size_t high_bits;
288 page_entry **table[PAGE_L1_SIZE];
289 } *page_table;
290
291 #endif
292
293 /* The rest of the global variables. */
294 static struct globals
295 {
296 /* The Nth element in this array is a page with objects of size 2^N.
297 If there are any pages with free objects, they will be at the
298 head of the list. NULL if there are no page-entries for this
299 object size. */
300 page_entry *pages[NUM_ORDERS];
301
302 /* The Nth element in this array is the last page with objects of
303 size 2^N. NULL if there are no page-entries for this object
304 size. */
305 page_entry *page_tails[NUM_ORDERS];
306
307 /* Lookup table for associating allocation pages with object addresses. */
308 page_table lookup;
309
310 /* The system's page size. */
311 size_t pagesize;
312 size_t lg_pagesize;
313
314 /* Bytes currently allocated. */
315 size_t allocated;
316
317 /* Bytes currently allocated at the end of the last collection. */
318 size_t allocated_last_gc;
319
320 /* Total amount of memory mapped. */
321 size_t bytes_mapped;
322
323 /* The current depth in the context stack. */
324 unsigned short context_depth;
325
326 /* A file descriptor open to /dev/zero for reading. */
327 #if defined (HAVE_MMAP_DEV_ZERO)
328 int dev_zero_fd;
329 #endif
330
331 /* A cache of free system pages. */
332 page_entry *free_pages;
333
334 #ifdef USING_MALLOC_PAGE_GROUPS
335 page_group *page_groups;
336 #endif
337
338 /* The file descriptor for debugging output. */
339 FILE *debug_file;
340 } G;
341
342 /* The size in bytes required to maintain a bitmap for the objects
343 on a page-entry. */
344 #define BITMAP_SIZE(Num_objects) \
345 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
346
347 /* Skip garbage collection if the current allocation is not at least
348 this factor times the allocation at the end of the last collection.
349 In other words, total allocation must expand by (this factor minus
350 one) before collection is performed. */
351 #define GGC_MIN_EXPAND_FOR_GC (1.3)
352
353 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
354 test from triggering too often when the heap is small. */
355 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
356
357 /* Allocate pages in chunks of this size, to throttle calls to memory
358 allocation routines. The first page is used, the rest go onto the
359 free list. This cannot be larger than HOST_BITS_PER_INT for the
360 in_use bitmask for page_group. */
361 #define GGC_QUIRE_SIZE 16
362 \f
363 static int ggc_allocated_p PARAMS ((const void *));
364 static page_entry *lookup_page_table_entry PARAMS ((const void *));
365 static void set_page_table_entry PARAMS ((void *, page_entry *));
366 #ifdef USING_MMAP
367 static char *alloc_anon PARAMS ((char *, size_t));
368 #endif
369 #ifdef USING_MALLOC_PAGE_GROUPS
370 static size_t page_group_index PARAMS ((char *, char *));
371 static void set_page_group_in_use PARAMS ((page_group *, char *));
372 static void clear_page_group_in_use PARAMS ((page_group *, char *));
373 #endif
374 static struct page_entry * alloc_page PARAMS ((unsigned));
375 static void free_page PARAMS ((struct page_entry *));
376 static void release_pages PARAMS ((void));
377 static void clear_marks PARAMS ((void));
378 static void sweep_pages PARAMS ((void));
379 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
380
381 #ifdef GGC_POISON
382 static void poison_pages PARAMS ((void));
383 #endif
384
385 void debug_print_page_list PARAMS ((int));
386 \f
387 /* Returns non-zero if P was allocated in GC'able memory. */
388
389 static inline int
390 ggc_allocated_p (p)
391 const void *p;
392 {
393 page_entry ***base;
394 size_t L1, L2;
395
396 #if HOST_BITS_PER_PTR <= 32
397 base = &G.lookup[0];
398 #else
399 page_table table = G.lookup;
400 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
401 while (1)
402 {
403 if (table == NULL)
404 return 0;
405 if (table->high_bits == high_bits)
406 break;
407 table = table->next;
408 }
409 base = &table->table[0];
410 #endif
411
412 /* Extract the level 1 and 2 indices. */
413 L1 = LOOKUP_L1 (p);
414 L2 = LOOKUP_L2 (p);
415
416 return base[L1] && base[L1][L2];
417 }
418
419 /* Traverse the page table and find the entry for a page.
420 Die (probably) if the object wasn't allocated via GC. */
421
422 static inline page_entry *
423 lookup_page_table_entry(p)
424 const void *p;
425 {
426 page_entry ***base;
427 size_t L1, L2;
428
429 #if HOST_BITS_PER_PTR <= 32
430 base = &G.lookup[0];
431 #else
432 page_table table = G.lookup;
433 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
434 while (table->high_bits != high_bits)
435 table = table->next;
436 base = &table->table[0];
437 #endif
438
439 /* Extract the level 1 and 2 indices. */
440 L1 = LOOKUP_L1 (p);
441 L2 = LOOKUP_L2 (p);
442
443 return base[L1][L2];
444 }
445
446 /* Set the page table entry for a page. */
447
448 static void
449 set_page_table_entry(p, entry)
450 void *p;
451 page_entry *entry;
452 {
453 page_entry ***base;
454 size_t L1, L2;
455
456 #if HOST_BITS_PER_PTR <= 32
457 base = &G.lookup[0];
458 #else
459 page_table table;
460 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
461 for (table = G.lookup; table; table = table->next)
462 if (table->high_bits == high_bits)
463 goto found;
464
465 /* Not found -- allocate a new table. */
466 table = (page_table) xcalloc (1, sizeof(*table));
467 table->next = G.lookup;
468 table->high_bits = high_bits;
469 G.lookup = table;
470 found:
471 base = &table->table[0];
472 #endif
473
474 /* Extract the level 1 and 2 indices. */
475 L1 = LOOKUP_L1 (p);
476 L2 = LOOKUP_L2 (p);
477
478 if (base[L1] == NULL)
479 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
480
481 base[L1][L2] = entry;
482 }
483
484 /* Prints the page-entry for object size ORDER, for debugging. */
485
486 void
487 debug_print_page_list (order)
488 int order;
489 {
490 page_entry *p;
491 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
492 (PTR) G.page_tails[order]);
493 p = G.pages[order];
494 while (p != NULL)
495 {
496 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
497 p->num_free_objects);
498 p = p->next;
499 }
500 printf ("NULL\n");
501 fflush (stdout);
502 }
503
504 #ifdef USING_MMAP
505 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
506 (if non-null). The ifdef structure here is intended to cause a
507 compile error unless exactly one of the HAVE_* is defined. */
508
509 static inline char *
510 alloc_anon (pref, size)
511 char *pref ATTRIBUTE_UNUSED;
512 size_t size;
513 {
514 #ifdef HAVE_MMAP_ANON
515 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
516 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
517 #endif
518 #ifdef HAVE_MMAP_DEV_ZERO
519 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
520 MAP_PRIVATE, G.dev_zero_fd, 0);
521 #endif
522
523 if (page == (char *) MAP_FAILED)
524 {
525 perror ("virtual memory exhausted");
526 exit (FATAL_EXIT_CODE);
527 }
528
529 /* Remember that we allocated this memory. */
530 G.bytes_mapped += size;
531
532 return page;
533 }
534 #endif
535 #ifdef USING_MALLOC_PAGE_GROUPS
536 /* Compute the index for this page into the page group. */
537
538 static inline size_t
539 page_group_index (allocation, page)
540 char *allocation, *page;
541 {
542 return (size_t) (page - allocation) >> G.lg_pagesize;
543 }
544
545 /* Set and clear the in_use bit for this page in the page group. */
546
547 static inline void
548 set_page_group_in_use (group, page)
549 page_group *group;
550 char *page;
551 {
552 group->in_use |= 1 << page_group_index (group->allocation, page);
553 }
554
555 static inline void
556 clear_page_group_in_use (group, page)
557 page_group *group;
558 char *page;
559 {
560 group->in_use &= ~(1 << page_group_index (group->allocation, page));
561 }
562 #endif
563
564 /* Allocate a new page for allocating objects of size 2^ORDER,
565 and return an entry for it. The entry is not added to the
566 appropriate page_table list. */
567
568 static inline struct page_entry *
569 alloc_page (order)
570 unsigned order;
571 {
572 struct page_entry *entry, *p, **pp;
573 char *page;
574 size_t num_objects;
575 size_t bitmap_size;
576 size_t page_entry_size;
577 size_t entry_size;
578 #ifdef USING_MALLOC_PAGE_GROUPS
579 page_group *group;
580 #endif
581
582 num_objects = OBJECTS_PER_PAGE (order);
583 bitmap_size = BITMAP_SIZE (num_objects + 1);
584 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
585 entry_size = num_objects * OBJECT_SIZE (order);
586 if (entry_size < G.pagesize)
587 entry_size = G.pagesize;
588
589 entry = NULL;
590 page = NULL;
591
592 /* Check the list of free pages for one we can use. */
593 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
594 if (p->bytes == entry_size)
595 break;
596
597 if (p != NULL)
598 {
599 /* Recycle the allocated memory from this page ... */
600 *pp = p->next;
601 page = p->page;
602
603 #ifdef USING_MALLOC_PAGE_GROUPS
604 group = p->group;
605 #endif
606
607 /* ... and, if possible, the page entry itself. */
608 if (p->order == order)
609 {
610 entry = p;
611 memset (entry, 0, page_entry_size);
612 }
613 else
614 free (p);
615 }
616 #ifdef USING_MMAP
617 else if (entry_size == G.pagesize)
618 {
619 /* We want just one page. Allocate a bunch of them and put the
620 extras on the freelist. (Can only do this optimization with
621 mmap for backing store.) */
622 struct page_entry *e, *f = G.free_pages;
623 int i;
624
625 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
626
627 /* This loop counts down so that the chain will be in ascending
628 memory order. */
629 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
630 {
631 e = (struct page_entry *) xcalloc (1, page_entry_size);
632 e->order = order;
633 e->bytes = G.pagesize;
634 e->page = page + (i << G.lg_pagesize);
635 e->next = f;
636 f = e;
637 }
638
639 G.free_pages = f;
640 }
641 else
642 page = alloc_anon (NULL, entry_size);
643 #endif
644 #ifdef USING_MALLOC_PAGE_GROUPS
645 else
646 {
647 /* Allocate a large block of memory and serve out the aligned
648 pages therein. This results in much less memory wastage
649 than the traditional implementation of valloc. */
650
651 char *allocation, *a, *enda;
652 size_t alloc_size, head_slop, tail_slop;
653 int multiple_pages = (entry_size == G.pagesize);
654
655 if (multiple_pages)
656 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
657 else
658 alloc_size = entry_size + G.pagesize - 1;
659 allocation = xmalloc (alloc_size);
660
661 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
662 head_slop = page - allocation;
663 if (multiple_pages)
664 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
665 else
666 tail_slop = alloc_size - entry_size - head_slop;
667 enda = allocation + alloc_size - tail_slop;
668
669 /* We allocated N pages, which are likely not aligned, leaving
670 us with N-1 usable pages. We plan to place the page_group
671 structure somewhere in the slop. */
672 if (head_slop >= sizeof (page_group))
673 group = (page_group *)page - 1;
674 else
675 {
676 /* We magically got an aligned allocation. Too bad, we have
677 to waste a page anyway. */
678 if (tail_slop == 0)
679 {
680 enda -= G.pagesize;
681 tail_slop += G.pagesize;
682 }
683 if (tail_slop < sizeof (page_group))
684 abort ();
685 group = (page_group *)enda;
686 tail_slop -= sizeof (page_group);
687 }
688
689 /* Remember that we allocated this memory. */
690 group->next = G.page_groups;
691 group->allocation = allocation;
692 group->alloc_size = alloc_size;
693 group->in_use = 0;
694 G.page_groups = group;
695 G.bytes_mapped += alloc_size;
696
697 /* If we allocated multiple pages, put the rest on the free list. */
698 if (multiple_pages)
699 {
700 struct page_entry *e, *f = G.free_pages;
701 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
702 {
703 e = (struct page_entry *) xcalloc (1, page_entry_size);
704 e->order = order;
705 e->bytes = G.pagesize;
706 e->page = a;
707 e->group = group;
708 e->next = f;
709 f = e;
710 }
711 G.free_pages = f;
712 }
713 }
714 #endif
715
716 if (entry == NULL)
717 entry = (struct page_entry *) xcalloc (1, page_entry_size);
718
719 entry->bytes = entry_size;
720 entry->page = page;
721 entry->context_depth = G.context_depth;
722 entry->order = order;
723 entry->num_free_objects = num_objects;
724 entry->next_bit_hint = 1;
725
726 #ifdef USING_MALLOC_PAGE_GROUPS
727 entry->group = group;
728 set_page_group_in_use (group, page);
729 #endif
730
731 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
732 increment the hint. */
733 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
734 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
735
736 set_page_table_entry (page, entry);
737
738 if (GGC_DEBUG_LEVEL >= 2)
739 fprintf (G.debug_file,
740 "Allocating page at %p, object size=%lu, data %p-%p\n",
741 (PTR) entry, (unsigned long) OBJECT_SIZE (order), page,
742 page + entry_size - 1);
743
744 return entry;
745 }
746
747 /* For a page that is no longer needed, put it on the free page list. */
748
749 static inline void
750 free_page (entry)
751 page_entry *entry;
752 {
753 if (GGC_DEBUG_LEVEL >= 2)
754 fprintf (G.debug_file,
755 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
756 entry->page, entry->page + entry->bytes - 1);
757
758 set_page_table_entry (entry->page, NULL);
759
760 #ifdef USING_MALLOC_PAGE_GROUPS
761 clear_page_group_in_use (entry->group, entry->page);
762 #endif
763
764 entry->next = G.free_pages;
765 G.free_pages = entry;
766 }
767
768 /* Release the free page cache to the system. */
769
770 static void
771 release_pages ()
772 {
773 #ifdef USING_MMAP
774 page_entry *p, *next;
775 char *start;
776 size_t len;
777
778 /* Gather up adjacent pages so they are unmapped together. */
779 p = G.free_pages;
780
781 while (p)
782 {
783 start = p->page;
784 next = p->next;
785 len = p->bytes;
786 free (p);
787 p = next;
788
789 while (p && p->page == start + len)
790 {
791 next = p->next;
792 len += p->bytes;
793 free (p);
794 p = next;
795 }
796
797 munmap (start, len);
798 G.bytes_mapped -= len;
799 }
800
801 G.free_pages = NULL;
802 #endif
803 #ifdef USING_MALLOC_PAGE_GROUPS
804 page_entry **pp, *p;
805 page_group **gp, *g;
806
807 /* Remove all pages from free page groups from the list. */
808 pp = &G.free_pages;
809 while ((p = *pp) != NULL)
810 if (p->group->in_use == 0)
811 {
812 *pp = p->next;
813 free (p);
814 }
815 else
816 pp = &p->next;
817
818 /* Remove all free page groups, and release the storage. */
819 gp = &G.page_groups;
820 while ((g = *gp) != NULL)
821 if (g->in_use == 0)
822 {
823 *gp = g->next;
824 G.bytes_mapped -= g->alloc_size;
825 free (g->allocation);
826 }
827 else
828 gp = &g->next;
829 #endif
830 }
831
832 /* This table provides a fast way to determine ceil(log_2(size)) for
833 allocation requests. The minimum allocation size is eight bytes. */
834
835 static unsigned char size_lookup[257] =
836 {
837 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
838 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
839 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
840 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
841 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
842 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
843 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
844 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
845 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
846 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
847 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
848 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
849 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
850 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
851 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
852 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
853 8
854 };
855
856 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
857 memory is zeroed; otherwise, its contents are undefined. */
858
859 void *
860 ggc_alloc (size)
861 size_t size;
862 {
863 unsigned order, word, bit, object_offset;
864 struct page_entry *entry;
865 void *result;
866
867 if (size <= 256)
868 order = size_lookup[size];
869 else
870 {
871 order = 9;
872 while (size > OBJECT_SIZE (order))
873 order++;
874 }
875
876 /* If there are non-full pages for this size allocation, they are at
877 the head of the list. */
878 entry = G.pages[order];
879
880 /* If there is no page for this object size, or all pages in this
881 context are full, allocate a new page. */
882 if (entry == NULL || entry->num_free_objects == 0)
883 {
884 struct page_entry *new_entry;
885 new_entry = alloc_page (order);
886
887 /* If this is the only entry, it's also the tail. */
888 if (entry == NULL)
889 G.page_tails[order] = new_entry;
890
891 /* Put new pages at the head of the page list. */
892 new_entry->next = entry;
893 entry = new_entry;
894 G.pages[order] = new_entry;
895
896 /* For a new page, we know the word and bit positions (in the
897 in_use bitmap) of the first available object -- they're zero. */
898 new_entry->next_bit_hint = 1;
899 word = 0;
900 bit = 0;
901 object_offset = 0;
902 }
903 else
904 {
905 /* First try to use the hint left from the previous allocation
906 to locate a clear bit in the in-use bitmap. We've made sure
907 that the one-past-the-end bit is always set, so if the hint
908 has run over, this test will fail. */
909 unsigned hint = entry->next_bit_hint;
910 word = hint / HOST_BITS_PER_LONG;
911 bit = hint % HOST_BITS_PER_LONG;
912
913 /* If the hint didn't work, scan the bitmap from the beginning. */
914 if ((entry->in_use_p[word] >> bit) & 1)
915 {
916 word = bit = 0;
917 while (~entry->in_use_p[word] == 0)
918 ++word;
919 while ((entry->in_use_p[word] >> bit) & 1)
920 ++bit;
921 hint = word * HOST_BITS_PER_LONG + bit;
922 }
923
924 /* Next time, try the next bit. */
925 entry->next_bit_hint = hint + 1;
926
927 object_offset = hint * OBJECT_SIZE (order);
928 }
929
930 /* Set the in-use bit. */
931 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
932
933 /* Keep a running total of the number of free objects. If this page
934 fills up, we may have to move it to the end of the list if the
935 next page isn't full. If the next page is full, all subsequent
936 pages are full, so there's no need to move it. */
937 if (--entry->num_free_objects == 0
938 && entry->next != NULL
939 && entry->next->num_free_objects > 0)
940 {
941 G.pages[order] = entry->next;
942 entry->next = NULL;
943 G.page_tails[order]->next = entry;
944 G.page_tails[order] = entry;
945 }
946
947 /* Calculate the object's address. */
948 result = entry->page + object_offset;
949
950 #ifdef GGC_POISON
951 /* `Poison' the entire allocated object, including any padding at
952 the end. */
953 memset (result, 0xaf, OBJECT_SIZE (order));
954 #endif
955
956 /* Keep track of how many bytes are being allocated. This
957 information is used in deciding when to collect. */
958 G.allocated += OBJECT_SIZE (order);
959
960 if (GGC_DEBUG_LEVEL >= 3)
961 fprintf (G.debug_file,
962 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
963 (unsigned long) size, (unsigned long) OBJECT_SIZE (order), result,
964 (PTR) entry);
965
966 return result;
967 }
968
969 /* If P is not marked, marks it and return false. Otherwise return true.
970 P must have been allocated by the GC allocator; it mustn't point to
971 static objects, stack variables, or memory allocated with malloc. */
972
973 int
974 ggc_set_mark (p)
975 const void *p;
976 {
977 page_entry *entry;
978 unsigned bit, word;
979 unsigned long mask;
980
981 /* Look up the page on which the object is alloced. If the object
982 wasn't allocated by the collector, we'll probably die. */
983 entry = lookup_page_table_entry (p);
984 #ifdef ENABLE_CHECKING
985 if (entry == NULL)
986 abort ();
987 #endif
988
989 /* Calculate the index of the object on the page; this is its bit
990 position in the in_use_p bitmap. */
991 bit = (((const char *) p) - entry->page) / OBJECT_SIZE (entry->order);
992 word = bit / HOST_BITS_PER_LONG;
993 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
994
995 /* If the bit was previously set, skip it. */
996 if (entry->in_use_p[word] & mask)
997 return 1;
998
999 /* Otherwise set it, and decrement the free object count. */
1000 entry->in_use_p[word] |= mask;
1001 entry->num_free_objects -= 1;
1002
1003 if (GGC_DEBUG_LEVEL >= 4)
1004 fprintf (G.debug_file, "Marking %p\n", p);
1005
1006 return 0;
1007 }
1008
1009 /* Return 1 if P has been marked, zero otherwise.
1010 P must have been allocated by the GC allocator; it mustn't point to
1011 static objects, stack variables, or memory allocated with malloc. */
1012
1013 int
1014 ggc_marked_p (p)
1015 const void *p;
1016 {
1017 page_entry *entry;
1018 unsigned bit, word;
1019 unsigned long mask;
1020
1021 /* Look up the page on which the object is alloced. If the object
1022 wasn't allocated by the collector, we'll probably die. */
1023 entry = lookup_page_table_entry (p);
1024 #ifdef ENABLE_CHECKING
1025 if (entry == NULL)
1026 abort ();
1027 #endif
1028
1029 /* Calculate the index of the object on the page; this is its bit
1030 position in the in_use_p bitmap. */
1031 bit = (((const char *) p) - entry->page) / OBJECT_SIZE (entry->order);
1032 word = bit / HOST_BITS_PER_LONG;
1033 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1034
1035 return (entry->in_use_p[word] & mask) != 0;
1036 }
1037
1038 /* Return the size of the gc-able object P. */
1039
1040 size_t
1041 ggc_get_size (p)
1042 const void *p;
1043 {
1044 page_entry *pe = lookup_page_table_entry (p);
1045 return OBJECT_SIZE (pe->order);
1046 }
1047 \f
1048 /* Initialize the ggc-mmap allocator. */
1049
1050 void
1051 init_ggc ()
1052 {
1053 unsigned order;
1054
1055 G.pagesize = getpagesize();
1056 G.lg_pagesize = exact_log2 (G.pagesize);
1057
1058 #ifdef HAVE_MMAP_DEV_ZERO
1059 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1060 if (G.dev_zero_fd == -1)
1061 abort ();
1062 #endif
1063
1064 #if 0
1065 G.debug_file = fopen ("ggc-mmap.debug", "w");
1066 #else
1067 G.debug_file = stdout;
1068 #endif
1069
1070 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1071
1072 #ifdef USING_MMAP
1073 /* StunOS has an amazing off-by-one error for the first mmap allocation
1074 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1075 believe, is an unaligned page allocation, which would cause us to
1076 hork badly if we tried to use it. */
1077 {
1078 char *p = alloc_anon (NULL, G.pagesize);
1079 struct page_entry *e;
1080 if ((size_t)p & (G.pagesize - 1))
1081 {
1082 /* How losing. Discard this one and try another. If we still
1083 can't get something useful, give up. */
1084
1085 p = alloc_anon (NULL, G.pagesize);
1086 if ((size_t)p & (G.pagesize - 1))
1087 abort ();
1088 }
1089
1090 /* We have a good page, might as well hold onto it... */
1091 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
1092 e->bytes = G.pagesize;
1093 e->page = p;
1094 e->next = G.free_pages;
1095 G.free_pages = e;
1096 }
1097 #endif
1098
1099 /* Initialize the object size table. */
1100 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1101 object_size_table[order] = (size_t) 1 << order;
1102 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1103 {
1104 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1105
1106 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1107 so that we're sure of getting aligned memory. */
1108 s = CEIL (s, MAX_ALIGNMENT) * MAX_ALIGNMENT;
1109 object_size_table[order] = s;
1110 }
1111
1112 /* Initialize the objects-per-page table. */
1113 for (order = 0; order < NUM_ORDERS; ++order)
1114 {
1115 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1116 if (objects_per_page_table[order] == 0)
1117 objects_per_page_table[order] = 1;
1118 }
1119
1120 /* Reset the size_lookup array to put appropriately sized objects in
1121 the special orders. All objects bigger than the previous power
1122 of two, but no greater than the special size, should go in the
1123 new order. */
1124 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1125 {
1126 int o;
1127 int i;
1128
1129 o = size_lookup[OBJECT_SIZE (order)];
1130 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1131 size_lookup[i] = order;
1132 }
1133 }
1134
1135 /* Increment the `GC context'. Objects allocated in an outer context
1136 are never freed, eliminating the need to register their roots. */
1137
1138 void
1139 ggc_push_context ()
1140 {
1141 ++G.context_depth;
1142
1143 /* Die on wrap. */
1144 if (G.context_depth == 0)
1145 abort ();
1146 }
1147
1148 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1149 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1150
1151 static void
1152 ggc_recalculate_in_use_p (p)
1153 page_entry *p;
1154 {
1155 unsigned int i;
1156 size_t num_objects;
1157
1158 /* Because the past-the-end bit in in_use_p is always set, we
1159 pretend there is one additional object. */
1160 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
1161
1162 /* Reset the free object count. */
1163 p->num_free_objects = num_objects;
1164
1165 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1166 for (i = 0;
1167 i < CEIL (BITMAP_SIZE (num_objects),
1168 sizeof (*p->in_use_p));
1169 ++i)
1170 {
1171 unsigned long j;
1172
1173 /* Something is in use if it is marked, or if it was in use in a
1174 context further down the context stack. */
1175 p->in_use_p[i] |= p->save_in_use_p[i];
1176
1177 /* Decrement the free object count for every object allocated. */
1178 for (j = p->in_use_p[i]; j; j >>= 1)
1179 p->num_free_objects -= (j & 1);
1180 }
1181
1182 if (p->num_free_objects >= num_objects)
1183 abort ();
1184 }
1185
1186 /* Decrement the `GC context'. All objects allocated since the
1187 previous ggc_push_context are migrated to the outer context. */
1188
1189 void
1190 ggc_pop_context ()
1191 {
1192 unsigned order, depth;
1193
1194 depth = --G.context_depth;
1195
1196 /* Any remaining pages in the popped context are lowered to the new
1197 current context; i.e. objects allocated in the popped context and
1198 left over are imported into the previous context. */
1199 for (order = 2; order < NUM_ORDERS; order++)
1200 {
1201 page_entry *p;
1202
1203 for (p = G.pages[order]; p != NULL; p = p->next)
1204 {
1205 if (p->context_depth > depth)
1206 p->context_depth = depth;
1207
1208 /* If this page is now in the topmost context, and we'd
1209 saved its allocation state, restore it. */
1210 else if (p->context_depth == depth && p->save_in_use_p)
1211 {
1212 ggc_recalculate_in_use_p (p);
1213 free (p->save_in_use_p);
1214 p->save_in_use_p = 0;
1215 }
1216 }
1217 }
1218 }
1219 \f
1220 /* Unmark all objects. */
1221
1222 static inline void
1223 clear_marks ()
1224 {
1225 unsigned order;
1226
1227 for (order = 2; order < NUM_ORDERS; order++)
1228 {
1229 size_t num_objects = OBJECTS_PER_PAGE (order);
1230 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1231 page_entry *p;
1232
1233 for (p = G.pages[order]; p != NULL; p = p->next)
1234 {
1235 #ifdef ENABLE_CHECKING
1236 /* The data should be page-aligned. */
1237 if ((size_t) p->page & (G.pagesize - 1))
1238 abort ();
1239 #endif
1240
1241 /* Pages that aren't in the topmost context are not collected;
1242 nevertheless, we need their in-use bit vectors to store GC
1243 marks. So, back them up first. */
1244 if (p->context_depth < G.context_depth)
1245 {
1246 if (! p->save_in_use_p)
1247 p->save_in_use_p = xmalloc (bitmap_size);
1248 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
1249 }
1250
1251 /* Reset reset the number of free objects and clear the
1252 in-use bits. These will be adjusted by mark_obj. */
1253 p->num_free_objects = num_objects;
1254 memset (p->in_use_p, 0, bitmap_size);
1255
1256 /* Make sure the one-past-the-end bit is always set. */
1257 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1258 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1259 }
1260 }
1261 }
1262
1263 /* Free all empty pages. Partially empty pages need no attention
1264 because the `mark' bit doubles as an `unused' bit. */
1265
1266 static inline void
1267 sweep_pages ()
1268 {
1269 unsigned order;
1270
1271 for (order = 2; order < NUM_ORDERS; order++)
1272 {
1273 /* The last page-entry to consider, regardless of entries
1274 placed at the end of the list. */
1275 page_entry * const last = G.page_tails[order];
1276
1277 size_t num_objects = OBJECTS_PER_PAGE (order);
1278 size_t live_objects;
1279 page_entry *p, *previous;
1280 int done;
1281
1282 p = G.pages[order];
1283 if (p == NULL)
1284 continue;
1285
1286 previous = NULL;
1287 do
1288 {
1289 page_entry *next = p->next;
1290
1291 /* Loop until all entries have been examined. */
1292 done = (p == last);
1293
1294 /* Add all live objects on this page to the count of
1295 allocated memory. */
1296 live_objects = num_objects - p->num_free_objects;
1297
1298 G.allocated += OBJECT_SIZE (order) * live_objects;
1299
1300 /* Only objects on pages in the topmost context should get
1301 collected. */
1302 if (p->context_depth < G.context_depth)
1303 ;
1304
1305 /* Remove the page if it's empty. */
1306 else if (live_objects == 0)
1307 {
1308 if (! previous)
1309 G.pages[order] = next;
1310 else
1311 previous->next = next;
1312
1313 /* Are we removing the last element? */
1314 if (p == G.page_tails[order])
1315 G.page_tails[order] = previous;
1316 free_page (p);
1317 p = previous;
1318 }
1319
1320 /* If the page is full, move it to the end. */
1321 else if (p->num_free_objects == 0)
1322 {
1323 /* Don't move it if it's already at the end. */
1324 if (p != G.page_tails[order])
1325 {
1326 /* Move p to the end of the list. */
1327 p->next = NULL;
1328 G.page_tails[order]->next = p;
1329
1330 /* Update the tail pointer... */
1331 G.page_tails[order] = p;
1332
1333 /* ... and the head pointer, if necessary. */
1334 if (! previous)
1335 G.pages[order] = next;
1336 else
1337 previous->next = next;
1338 p = previous;
1339 }
1340 }
1341
1342 /* If we've fallen through to here, it's a page in the
1343 topmost context that is neither full nor empty. Such a
1344 page must precede pages at lesser context depth in the
1345 list, so move it to the head. */
1346 else if (p != G.pages[order])
1347 {
1348 previous->next = p->next;
1349 p->next = G.pages[order];
1350 G.pages[order] = p;
1351 /* Are we moving the last element? */
1352 if (G.page_tails[order] == p)
1353 G.page_tails[order] = previous;
1354 p = previous;
1355 }
1356
1357 previous = p;
1358 p = next;
1359 }
1360 while (! done);
1361
1362 /* Now, restore the in_use_p vectors for any pages from contexts
1363 other than the current one. */
1364 for (p = G.pages[order]; p; p = p->next)
1365 if (p->context_depth != G.context_depth)
1366 ggc_recalculate_in_use_p (p);
1367 }
1368 }
1369
1370 #ifdef GGC_POISON
1371 /* Clobber all free objects. */
1372
1373 static inline void
1374 poison_pages ()
1375 {
1376 unsigned order;
1377
1378 for (order = 2; order < NUM_ORDERS; order++)
1379 {
1380 size_t num_objects = OBJECTS_PER_PAGE (order);
1381 size_t size = OBJECT_SIZE (order);
1382 page_entry *p;
1383
1384 for (p = G.pages[order]; p != NULL; p = p->next)
1385 {
1386 size_t i;
1387
1388 if (p->context_depth != G.context_depth)
1389 /* Since we don't do any collection for pages in pushed
1390 contexts, there's no need to do any poisoning. And
1391 besides, the IN_USE_P array isn't valid until we pop
1392 contexts. */
1393 continue;
1394
1395 for (i = 0; i < num_objects; i++)
1396 {
1397 size_t word, bit;
1398 word = i / HOST_BITS_PER_LONG;
1399 bit = i % HOST_BITS_PER_LONG;
1400 if (((p->in_use_p[word] >> bit) & 1) == 0)
1401 memset (p->page + i * size, 0xa5, size);
1402 }
1403 }
1404 }
1405 }
1406 #endif
1407
1408 /* Top level mark-and-sweep routine. */
1409
1410 void
1411 ggc_collect ()
1412 {
1413 /* Avoid frequent unnecessary work by skipping collection if the
1414 total allocations haven't expanded much since the last
1415 collection. */
1416 #ifndef GGC_ALWAYS_COLLECT
1417 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1418 return;
1419 #endif
1420
1421 timevar_push (TV_GC);
1422 if (!quiet_flag)
1423 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1424
1425 /* Zero the total allocated bytes. This will be recalculated in the
1426 sweep phase. */
1427 G.allocated = 0;
1428
1429 /* Release the pages we freed the last time we collected, but didn't
1430 reuse in the interim. */
1431 release_pages ();
1432
1433 clear_marks ();
1434 ggc_mark_roots ();
1435
1436 #ifdef GGC_POISON
1437 poison_pages ();
1438 #endif
1439
1440 sweep_pages ();
1441
1442 G.allocated_last_gc = G.allocated;
1443 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1444 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1445
1446 timevar_pop (TV_GC);
1447
1448 if (!quiet_flag)
1449 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1450 }
1451
1452 /* Print allocation statistics. */
1453 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1454 ? (x) \
1455 : ((x) < 1024*1024*10 \
1456 ? (x) / 1024 \
1457 : (x) / (1024*1024))))
1458 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1459
1460 void
1461 ggc_print_statistics ()
1462 {
1463 struct ggc_statistics stats;
1464 unsigned int i;
1465 size_t total_overhead = 0;
1466
1467 /* Clear the statistics. */
1468 memset (&stats, 0, sizeof (stats));
1469
1470 /* Make sure collection will really occur. */
1471 G.allocated_last_gc = 0;
1472
1473 /* Collect and print the statistics common across collectors. */
1474 ggc_print_common_statistics (stderr, &stats);
1475
1476 /* Release free pages so that we will not count the bytes allocated
1477 there as part of the total allocated memory. */
1478 release_pages ();
1479
1480 /* Collect some information about the various sizes of
1481 allocation. */
1482 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1483 "Size", "Allocated", "Used", "Overhead");
1484 for (i = 0; i < NUM_ORDERS; ++i)
1485 {
1486 page_entry *p;
1487 size_t allocated;
1488 size_t in_use;
1489 size_t overhead;
1490
1491 /* Skip empty entries. */
1492 if (!G.pages[i])
1493 continue;
1494
1495 overhead = allocated = in_use = 0;
1496
1497 /* Figure out the total number of bytes allocated for objects of
1498 this size, and how many of them are actually in use. Also figure
1499 out how much memory the page table is using. */
1500 for (p = G.pages[i]; p; p = p->next)
1501 {
1502 allocated += p->bytes;
1503 in_use +=
1504 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * OBJECT_SIZE (i);
1505
1506 overhead += (sizeof (page_entry) - sizeof (long)
1507 + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
1508 }
1509 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1510 (unsigned long) OBJECT_SIZE (i),
1511 SCALE (allocated), LABEL (allocated),
1512 SCALE (in_use), LABEL (in_use),
1513 SCALE (overhead), LABEL (overhead));
1514 total_overhead += overhead;
1515 }
1516 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1517 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1518 SCALE (G.allocated), LABEL(G.allocated),
1519 SCALE (total_overhead), LABEL (total_overhead));
1520 }