1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 #include "coretypes.h"
33 #ifdef ENABLE_VALGRIND_CHECKING
34 # ifdef HAVE_VALGRIND_MEMCHECK_H
35 # include <valgrind/memcheck.h>
36 # elif defined HAVE_MEMCHECK_H
37 # include <memcheck.h>
39 # include <valgrind.h>
42 /* Avoid #ifdef:s when we can help it. */
43 #define VALGRIND_DISCARD(x)
46 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
47 file open. Prefer either to valloc. */
49 # undef HAVE_MMAP_DEV_ZERO
51 # include <sys/mman.h>
53 # define MAP_FAILED -1
55 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
56 # define MAP_ANONYMOUS MAP_ANON
62 #ifdef HAVE_MMAP_DEV_ZERO
64 # include <sys/mman.h>
66 # define MAP_FAILED -1
73 #define USING_MALLOC_PAGE_GROUPS
78 This garbage-collecting allocator allocates objects on one of a set
79 of pages. Each page can allocate objects of a single size only;
80 available sizes are powers of two starting at four bytes. The size
81 of an allocation request is rounded up to the next power of two
82 (`order'), and satisfied from the appropriate page.
84 Each page is recorded in a page-entry, which also maintains an
85 in-use bitmap of object positions on the page. This allows the
86 allocation state of a particular object to be flipped without
87 touching the page itself.
89 Each page-entry also has a context depth, which is used to track
90 pushing and popping of allocation contexts. Only objects allocated
91 in the current (highest-numbered) context may be collected.
93 Page entries are arranged in an array of singly-linked lists. The
94 array is indexed by the allocation size, in bits, of the pages on
95 it; i.e. all pages on a list allocate objects of the same size.
96 Pages are ordered on the list such that all non-full pages precede
97 all full pages, with non-full pages arranged in order of decreasing
100 Empty pages (of all orders) are kept on a single page cache list,
101 and are considered first when new pages are required; they are
102 deallocated at the start of the next collection if they haven't
103 been recycled by then. */
105 /* Define GGC_DEBUG_LEVEL to print debugging information.
106 0: No debugging output.
107 1: GC statistics only.
108 2: Page-entry allocations/deallocations as well.
109 3: Object allocations as well.
110 4: Object marks as well. */
111 #define GGC_DEBUG_LEVEL (0)
113 #ifndef HOST_BITS_PER_PTR
114 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
118 /* A two-level tree is used to look up the page-entry for a given
119 pointer. Two chunks of the pointer's bits are extracted to index
120 the first and second levels of the tree, as follows:
124 msb +----------------+----+------+------+ lsb
130 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
131 pages are aligned on system page boundaries. The next most
132 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
133 index values in the lookup table, respectively.
135 For 32-bit architectures and the settings below, there are no
136 leftover bits. For architectures with wider pointers, the lookup
137 tree points to a list of pages, which must be scanned to find the
140 #define PAGE_L1_BITS (8)
141 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
142 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
143 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
145 #define LOOKUP_L1(p) \
146 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
148 #define LOOKUP_L2(p) \
149 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
151 /* The number of objects per allocation page, for objects on a page of
152 the indicated ORDER. */
153 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
155 /* The number of objects in P. */
156 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
158 /* The size of an object on a page of the indicated ORDER. */
159 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
161 /* For speed, we avoid doing a general integer divide to locate the
162 offset in the allocation bitmap, by precalculating numbers M, S
163 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
164 within the page which is evenly divisible by the object size Z. */
165 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
166 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
167 #define OFFSET_TO_BIT(OFFSET, ORDER) \
168 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
170 /* The number of extra orders, not corresponding to power-of-two sized
173 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
175 #define RTL_SIZE(NSLOTS) \
176 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
178 #define TREE_EXP_SIZE(OPS) \
179 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
181 /* The Ith entry is the maximum size of an object to be stored in the
182 Ith extra order. Adding a new entry to this array is the *only*
183 thing you need to do to add a new special allocation size. */
185 static const size_t extra_order_size_table
[] = {
186 sizeof (struct tree_decl
),
187 sizeof (struct tree_list
),
189 RTL_SIZE (2), /* MEM, PLUS, etc. */
190 RTL_SIZE (9), /* INSN */
193 /* The total number of orders. */
195 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
197 /* We use this structure to determine the alignment required for
198 allocations. For power-of-two sized allocations, that's not a
199 problem, but it does matter for odd-sized allocations. */
201 struct max_alignment
{
209 /* The biggest alignment required. */
211 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
213 /* Compute the smallest nonnegative number which when added to X gives
216 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
218 /* Compute the smallest multiple of F that is >= X. */
220 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
222 /* The Ith entry is the number of objects on a page or order I. */
224 static unsigned objects_per_page_table
[NUM_ORDERS
];
226 /* The Ith entry is the size of an object on a page of order I. */
228 static size_t object_size_table
[NUM_ORDERS
];
230 /* The Ith entry is a pair of numbers (mult, shift) such that
231 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
232 for all k evenly divisible by OBJECT_SIZE(I). */
239 inverse_table
[NUM_ORDERS
];
241 /* A page_entry records the status of an allocation page. This
242 structure is dynamically sized to fit the bitmap in_use_p. */
243 typedef struct page_entry
245 /* The next page-entry with objects of the same size, or NULL if
246 this is the last page-entry. */
247 struct page_entry
*next
;
249 /* The number of bytes allocated. (This will always be a multiple
250 of the host system page size.) */
253 /* The address at which the memory is allocated. */
256 #ifdef USING_MALLOC_PAGE_GROUPS
257 /* Back pointer to the page group this page came from. */
258 struct page_group
*group
;
261 /* This is the index in the by_depth varray where this page table
263 unsigned long index_by_depth
;
265 /* Context depth of this page. */
266 unsigned short context_depth
;
268 /* The number of free objects remaining on this page. */
269 unsigned short num_free_objects
;
271 /* A likely candidate for the bit position of a free object for the
272 next allocation from this page. */
273 unsigned short next_bit_hint
;
275 /* The lg of size of objects allocated from this page. */
278 /* A bit vector indicating whether or not objects are in use. The
279 Nth bit is one if the Nth object on this page is allocated. This
280 array is dynamically sized. */
281 unsigned long in_use_p
[1];
284 #ifdef USING_MALLOC_PAGE_GROUPS
285 /* A page_group describes a large allocation from malloc, from which
286 we parcel out aligned pages. */
287 typedef struct page_group
289 /* A linked list of all extant page groups. */
290 struct page_group
*next
;
292 /* The address we received from malloc. */
295 /* The size of the block. */
298 /* A bitmask of pages in use. */
303 #if HOST_BITS_PER_PTR <= 32
305 /* On 32-bit hosts, we use a two level page table, as pictured above. */
306 typedef page_entry
**page_table
[PAGE_L1_SIZE
];
310 /* On 64-bit hosts, we use the same two level page tables plus a linked
311 list that disambiguates the top 32-bits. There will almost always be
312 exactly one entry in the list. */
313 typedef struct page_table_chain
315 struct page_table_chain
*next
;
317 page_entry
**table
[PAGE_L1_SIZE
];
322 /* The rest of the global variables. */
323 static struct globals
325 /* The Nth element in this array is a page with objects of size 2^N.
326 If there are any pages with free objects, they will be at the
327 head of the list. NULL if there are no page-entries for this
329 page_entry
*pages
[NUM_ORDERS
];
331 /* The Nth element in this array is the last page with objects of
332 size 2^N. NULL if there are no page-entries for this object
334 page_entry
*page_tails
[NUM_ORDERS
];
336 /* Lookup table for associating allocation pages with object addresses. */
339 /* The system's page size. */
343 /* Bytes currently allocated. */
346 /* Bytes currently allocated at the end of the last collection. */
347 size_t allocated_last_gc
;
349 /* Total amount of memory mapped. */
352 /* Bit N set if any allocations have been done at context depth N. */
353 unsigned long context_depth_allocations
;
355 /* Bit N set if any collections have been done at context depth N. */
356 unsigned long context_depth_collections
;
358 /* The current depth in the context stack. */
359 unsigned short context_depth
;
361 /* A file descriptor open to /dev/zero for reading. */
362 #if defined (HAVE_MMAP_DEV_ZERO)
366 /* A cache of free system pages. */
367 page_entry
*free_pages
;
369 #ifdef USING_MALLOC_PAGE_GROUPS
370 page_group
*page_groups
;
373 /* The file descriptor for debugging output. */
376 /* Current number of elements in use in depth below. */
377 unsigned int depth_in_use
;
379 /* Maximum number of elements that can be used before resizing. */
380 unsigned int depth_max
;
382 /* Each element of this arry is an index in by_depth where the given
383 depth starts. This structure is indexed by that given depth we
384 are interested in. */
387 /* Current number of elements in use in by_depth below. */
388 unsigned int by_depth_in_use
;
390 /* Maximum number of elements that can be used before resizing. */
391 unsigned int by_depth_max
;
393 /* Each element of this array is a pointer to a page_entry, all
394 page_entries can be found in here by increasing depth.
395 index_by_depth in the page_entry is the index into this data
396 structure where that page_entry can be found. This is used to
397 speed up finding all page_entries at a particular depth. */
398 page_entry
**by_depth
;
400 /* Each element is a pointer to the saved in_use_p bits, if any,
401 zero otherwise. We allocate them all together, to enable a
402 better runtime data access pattern. */
403 unsigned long **save_in_use
;
405 #ifdef ENABLE_GC_ALWAYS_COLLECT
406 /* List of free objects to be verified as actually free on the
411 struct free_object
*next
;
415 #ifdef GATHER_STATISTICS
418 /* Total memory allocated with ggc_alloc. */
419 unsigned long long total_allocated
;
420 /* Total overhead for memory to be allocated with ggc_alloc. */
421 unsigned long long total_overhead
;
423 /* Total allocations and overhead for sizes less than 32, 64 and 128.
424 These sizes are interesting because they are typical cache line
427 unsigned long long total_allocated_under32
;
428 unsigned long long total_overhead_under32
;
430 unsigned long long total_allocated_under64
;
431 unsigned long long total_overhead_under64
;
433 unsigned long long total_allocated_under128
;
434 unsigned long long total_overhead_under128
;
436 /* The allocations for each of the allocation orders. */
437 unsigned long long total_allocated_per_order
[NUM_ORDERS
];
439 /* The overhead for each of the allocation orders. */
440 unsigned long long total_overhead_per_order
[NUM_ORDERS
];
445 /* The size in bytes required to maintain a bitmap for the objects
447 #define BITMAP_SIZE(Num_objects) \
448 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
450 /* Allocate pages in chunks of this size, to throttle calls to memory
451 allocation routines. The first page is used, the rest go onto the
452 free list. This cannot be larger than HOST_BITS_PER_INT for the
453 in_use bitmask for page_group. */
454 #define GGC_QUIRE_SIZE 16
456 /* Initial guess as to how many page table entries we might need. */
457 #define INITIAL_PTE_COUNT 128
459 static int ggc_allocated_p (const void *);
460 static page_entry
*lookup_page_table_entry (const void *);
461 static void set_page_table_entry (void *, page_entry
*);
463 static char *alloc_anon (char *, size_t);
465 #ifdef USING_MALLOC_PAGE_GROUPS
466 static size_t page_group_index (char *, char *);
467 static void set_page_group_in_use (page_group
*, char *);
468 static void clear_page_group_in_use (page_group
*, char *);
470 static struct page_entry
* alloc_page (unsigned);
471 static void free_page (struct page_entry
*);
472 static void release_pages (void);
473 static void clear_marks (void);
474 static void sweep_pages (void);
475 static void ggc_recalculate_in_use_p (page_entry
*);
476 static void compute_inverse (unsigned);
477 static inline void adjust_depth (void);
478 static void move_ptes_to_front (int, int);
480 #ifdef ENABLE_GC_CHECKING
481 static void poison_pages (void);
484 void debug_print_page_list (int);
485 static void push_depth (unsigned int);
486 static void push_by_depth (page_entry
*, unsigned long *);
487 struct alloc_zone
*rtl_zone
= NULL
;
488 struct alloc_zone
*tree_zone
= NULL
;
489 struct alloc_zone
*garbage_zone
= NULL
;
491 /* Push an entry onto G.depth. */
494 push_depth (unsigned int i
)
496 if (G
.depth_in_use
>= G
.depth_max
)
499 G
.depth
= xrealloc (G
.depth
, G
.depth_max
* sizeof (unsigned int));
501 G
.depth
[G
.depth_in_use
++] = i
;
504 /* Push an entry onto G.by_depth and G.save_in_use. */
507 push_by_depth (page_entry
*p
, unsigned long *s
)
509 if (G
.by_depth_in_use
>= G
.by_depth_max
)
512 G
.by_depth
= xrealloc (G
.by_depth
,
513 G
.by_depth_max
* sizeof (page_entry
*));
514 G
.save_in_use
= xrealloc (G
.save_in_use
,
515 G
.by_depth_max
* sizeof (unsigned long *));
517 G
.by_depth
[G
.by_depth_in_use
] = p
;
518 G
.save_in_use
[G
.by_depth_in_use
++] = s
;
521 #if (GCC_VERSION < 3001)
522 #define prefetch(X) ((void) X)
524 #define prefetch(X) __builtin_prefetch (X)
527 #define save_in_use_p_i(__i) \
529 #define save_in_use_p(__p) \
530 (save_in_use_p_i (__p->index_by_depth))
532 /* Returns nonzero if P was allocated in GC'able memory. */
535 ggc_allocated_p (const void *p
)
540 #if HOST_BITS_PER_PTR <= 32
543 page_table table
= G
.lookup
;
544 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
549 if (table
->high_bits
== high_bits
)
553 base
= &table
->table
[0];
556 /* Extract the level 1 and 2 indices. */
560 return base
[L1
] && base
[L1
][L2
];
563 /* Traverse the page table and find the entry for a page.
564 Die (probably) if the object wasn't allocated via GC. */
566 static inline page_entry
*
567 lookup_page_table_entry (const void *p
)
572 #if HOST_BITS_PER_PTR <= 32
575 page_table table
= G
.lookup
;
576 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
577 while (table
->high_bits
!= high_bits
)
579 base
= &table
->table
[0];
582 /* Extract the level 1 and 2 indices. */
589 /* Set the page table entry for a page. */
592 set_page_table_entry (void *p
, page_entry
*entry
)
597 #if HOST_BITS_PER_PTR <= 32
601 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
602 for (table
= G
.lookup
; table
; table
= table
->next
)
603 if (table
->high_bits
== high_bits
)
606 /* Not found -- allocate a new table. */
607 table
= xcalloc (1, sizeof(*table
));
608 table
->next
= G
.lookup
;
609 table
->high_bits
= high_bits
;
612 base
= &table
->table
[0];
615 /* Extract the level 1 and 2 indices. */
619 if (base
[L1
] == NULL
)
620 base
[L1
] = xcalloc (PAGE_L2_SIZE
, sizeof (page_entry
*));
622 base
[L1
][L2
] = entry
;
625 /* Prints the page-entry for object size ORDER, for debugging. */
628 debug_print_page_list (int order
)
631 printf ("Head=%p, Tail=%p:\n", (void *) G
.pages
[order
],
632 (void *) G
.page_tails
[order
]);
636 printf ("%p(%1d|%3d) -> ", (void *) p
, p
->context_depth
,
637 p
->num_free_objects
);
645 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
646 (if non-null). The ifdef structure here is intended to cause a
647 compile error unless exactly one of the HAVE_* is defined. */
650 alloc_anon (char *pref ATTRIBUTE_UNUSED
, size_t size
)
652 #ifdef HAVE_MMAP_ANON
653 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
654 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
656 #ifdef HAVE_MMAP_DEV_ZERO
657 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
658 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
661 if (page
== (char *) MAP_FAILED
)
663 perror ("virtual memory exhausted");
664 exit (FATAL_EXIT_CODE
);
667 /* Remember that we allocated this memory. */
668 G
.bytes_mapped
+= size
;
670 /* Pretend we don't have access to the allocated pages. We'll enable
671 access to smaller pieces of the area in ggc_alloc. Discard the
672 handle to avoid handle leak. */
673 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page
, size
));
678 #ifdef USING_MALLOC_PAGE_GROUPS
679 /* Compute the index for this page into the page group. */
682 page_group_index (char *allocation
, char *page
)
684 return (size_t) (page
- allocation
) >> G
.lg_pagesize
;
687 /* Set and clear the in_use bit for this page in the page group. */
690 set_page_group_in_use (page_group
*group
, char *page
)
692 group
->in_use
|= 1 << page_group_index (group
->allocation
, page
);
696 clear_page_group_in_use (page_group
*group
, char *page
)
698 group
->in_use
&= ~(1 << page_group_index (group
->allocation
, page
));
702 /* Allocate a new page for allocating objects of size 2^ORDER,
703 and return an entry for it. The entry is not added to the
704 appropriate page_table list. */
706 static inline struct page_entry
*
707 alloc_page (unsigned order
)
709 struct page_entry
*entry
, *p
, **pp
;
713 size_t page_entry_size
;
715 #ifdef USING_MALLOC_PAGE_GROUPS
719 num_objects
= OBJECTS_PER_PAGE (order
);
720 bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
721 page_entry_size
= sizeof (page_entry
) - sizeof (long) + bitmap_size
;
722 entry_size
= num_objects
* OBJECT_SIZE (order
);
723 if (entry_size
< G
.pagesize
)
724 entry_size
= G
.pagesize
;
729 /* Check the list of free pages for one we can use. */
730 for (pp
= &G
.free_pages
, p
= *pp
; p
; pp
= &p
->next
, p
= *pp
)
731 if (p
->bytes
== entry_size
)
736 /* Recycle the allocated memory from this page ... */
740 #ifdef USING_MALLOC_PAGE_GROUPS
744 /* ... and, if possible, the page entry itself. */
745 if (p
->order
== order
)
748 memset (entry
, 0, page_entry_size
);
754 else if (entry_size
== G
.pagesize
)
756 /* We want just one page. Allocate a bunch of them and put the
757 extras on the freelist. (Can only do this optimization with
758 mmap for backing store.) */
759 struct page_entry
*e
, *f
= G
.free_pages
;
762 page
= alloc_anon (NULL
, G
.pagesize
* GGC_QUIRE_SIZE
);
764 /* This loop counts down so that the chain will be in ascending
766 for (i
= GGC_QUIRE_SIZE
- 1; i
>= 1; i
--)
768 e
= xcalloc (1, page_entry_size
);
770 e
->bytes
= G
.pagesize
;
771 e
->page
= page
+ (i
<< G
.lg_pagesize
);
779 page
= alloc_anon (NULL
, entry_size
);
781 #ifdef USING_MALLOC_PAGE_GROUPS
784 /* Allocate a large block of memory and serve out the aligned
785 pages therein. This results in much less memory wastage
786 than the traditional implementation of valloc. */
788 char *allocation
, *a
, *enda
;
789 size_t alloc_size
, head_slop
, tail_slop
;
790 int multiple_pages
= (entry_size
== G
.pagesize
);
793 alloc_size
= GGC_QUIRE_SIZE
* G
.pagesize
;
795 alloc_size
= entry_size
+ G
.pagesize
- 1;
796 allocation
= xmalloc (alloc_size
);
798 page
= (char *) (((size_t) allocation
+ G
.pagesize
- 1) & -G
.pagesize
);
799 head_slop
= page
- allocation
;
801 tail_slop
= ((size_t) allocation
+ alloc_size
) & (G
.pagesize
- 1);
803 tail_slop
= alloc_size
- entry_size
- head_slop
;
804 enda
= allocation
+ alloc_size
- tail_slop
;
806 /* We allocated N pages, which are likely not aligned, leaving
807 us with N-1 usable pages. We plan to place the page_group
808 structure somewhere in the slop. */
809 if (head_slop
>= sizeof (page_group
))
810 group
= (page_group
*)page
- 1;
813 /* We magically got an aligned allocation. Too bad, we have
814 to waste a page anyway. */
818 tail_slop
+= G
.pagesize
;
820 if (tail_slop
< sizeof (page_group
))
822 group
= (page_group
*)enda
;
823 tail_slop
-= sizeof (page_group
);
826 /* Remember that we allocated this memory. */
827 group
->next
= G
.page_groups
;
828 group
->allocation
= allocation
;
829 group
->alloc_size
= alloc_size
;
831 G
.page_groups
= group
;
832 G
.bytes_mapped
+= alloc_size
;
834 /* If we allocated multiple pages, put the rest on the free list. */
837 struct page_entry
*e
, *f
= G
.free_pages
;
838 for (a
= enda
- G
.pagesize
; a
!= page
; a
-= G
.pagesize
)
840 e
= xcalloc (1, page_entry_size
);
842 e
->bytes
= G
.pagesize
;
854 entry
= xcalloc (1, page_entry_size
);
856 entry
->bytes
= entry_size
;
858 entry
->context_depth
= G
.context_depth
;
859 entry
->order
= order
;
860 entry
->num_free_objects
= num_objects
;
861 entry
->next_bit_hint
= 1;
863 G
.context_depth_allocations
|= (unsigned long)1 << G
.context_depth
;
865 #ifdef USING_MALLOC_PAGE_GROUPS
866 entry
->group
= group
;
867 set_page_group_in_use (group
, page
);
870 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
871 increment the hint. */
872 entry
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
873 = (unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
);
875 set_page_table_entry (page
, entry
);
877 if (GGC_DEBUG_LEVEL
>= 2)
878 fprintf (G
.debug_file
,
879 "Allocating page at %p, object size=%lu, data %p-%p\n",
880 (void *) entry
, (unsigned long) OBJECT_SIZE (order
), page
,
881 page
+ entry_size
- 1);
886 /* Adjust the size of G.depth so that no index greater than the one
887 used by the top of the G.by_depth is used. */
894 if (G
.by_depth_in_use
)
896 top
= G
.by_depth
[G
.by_depth_in_use
-1];
898 /* Peel back indices in depth that index into by_depth, so that
899 as new elements are added to by_depth, we note the indices
900 of those elements, if they are for new context depths. */
901 while (G
.depth_in_use
> (size_t)top
->context_depth
+1)
906 /* For a page that is no longer needed, put it on the free page list. */
909 free_page (page_entry
*entry
)
911 if (GGC_DEBUG_LEVEL
>= 2)
912 fprintf (G
.debug_file
,
913 "Deallocating page at %p, data %p-%p\n", (void *) entry
,
914 entry
->page
, entry
->page
+ entry
->bytes
- 1);
916 /* Mark the page as inaccessible. Discard the handle to avoid handle
918 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry
->page
, entry
->bytes
));
920 set_page_table_entry (entry
->page
, NULL
);
922 #ifdef USING_MALLOC_PAGE_GROUPS
923 clear_page_group_in_use (entry
->group
, entry
->page
);
926 if (G
.by_depth_in_use
> 1)
928 page_entry
*top
= G
.by_depth
[G
.by_depth_in_use
-1];
930 /* If they are at the same depth, put top element into freed
932 if (entry
->context_depth
== top
->context_depth
)
934 int i
= entry
->index_by_depth
;
936 G
.save_in_use
[i
] = G
.save_in_use
[G
.by_depth_in_use
-1];
937 top
->index_by_depth
= i
;
941 /* We cannot free a page from a context deeper than the
950 entry
->next
= G
.free_pages
;
951 G
.free_pages
= entry
;
954 /* Release the free page cache to the system. */
960 page_entry
*p
, *next
;
964 /* Gather up adjacent pages so they are unmapped together. */
975 while (p
&& p
->page
== start
+ len
)
984 G
.bytes_mapped
-= len
;
989 #ifdef USING_MALLOC_PAGE_GROUPS
993 /* Remove all pages from free page groups from the list. */
995 while ((p
= *pp
) != NULL
)
996 if (p
->group
->in_use
== 0)
1004 /* Remove all free page groups, and release the storage. */
1005 gp
= &G
.page_groups
;
1006 while ((g
= *gp
) != NULL
)
1010 G
.bytes_mapped
-= g
->alloc_size
;
1011 free (g
->allocation
);
1018 /* This table provides a fast way to determine ceil(log_2(size)) for
1019 allocation requests. The minimum allocation size is eight bytes. */
1021 static unsigned char size_lookup
[257] =
1023 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1024 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1025 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1026 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1027 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1028 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1029 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1030 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1031 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1032 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1033 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1034 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1035 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1036 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1037 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1038 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1042 /* Typed allocation function. Does nothing special in this collector. */
1045 ggc_alloc_typed (enum gt_types_enum type ATTRIBUTE_UNUSED
, size_t size
)
1047 return ggc_alloc (size
);
1050 /* Zone allocation function. Does nothing special in this collector. */
1053 ggc_alloc_zone (size_t size
, struct alloc_zone
*zone ATTRIBUTE_UNUSED
)
1055 return ggc_alloc (size
);
1058 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1061 ggc_alloc (size_t size
)
1063 size_t order
, word
, bit
, object_offset
, object_size
;
1064 struct page_entry
*entry
;
1069 order
= size_lookup
[size
];
1070 object_size
= OBJECT_SIZE (order
);
1075 while (size
> (object_size
= OBJECT_SIZE (order
)))
1079 /* If there are non-full pages for this size allocation, they are at
1080 the head of the list. */
1081 entry
= G
.pages
[order
];
1083 /* If there is no page for this object size, or all pages in this
1084 context are full, allocate a new page. */
1085 if (entry
== NULL
|| entry
->num_free_objects
== 0)
1087 struct page_entry
*new_entry
;
1088 new_entry
= alloc_page (order
);
1090 new_entry
->index_by_depth
= G
.by_depth_in_use
;
1091 push_by_depth (new_entry
, 0);
1093 /* We can skip context depths, if we do, make sure we go all the
1094 way to the new depth. */
1095 while (new_entry
->context_depth
>= G
.depth_in_use
)
1096 push_depth (G
.by_depth_in_use
-1);
1098 /* If this is the only entry, it's also the tail. */
1100 G
.page_tails
[order
] = new_entry
;
1102 /* Put new pages at the head of the page list. */
1103 new_entry
->next
= entry
;
1105 G
.pages
[order
] = new_entry
;
1107 /* For a new page, we know the word and bit positions (in the
1108 in_use bitmap) of the first available object -- they're zero. */
1109 new_entry
->next_bit_hint
= 1;
1116 /* First try to use the hint left from the previous allocation
1117 to locate a clear bit in the in-use bitmap. We've made sure
1118 that the one-past-the-end bit is always set, so if the hint
1119 has run over, this test will fail. */
1120 unsigned hint
= entry
->next_bit_hint
;
1121 word
= hint
/ HOST_BITS_PER_LONG
;
1122 bit
= hint
% HOST_BITS_PER_LONG
;
1124 /* If the hint didn't work, scan the bitmap from the beginning. */
1125 if ((entry
->in_use_p
[word
] >> bit
) & 1)
1128 while (~entry
->in_use_p
[word
] == 0)
1130 while ((entry
->in_use_p
[word
] >> bit
) & 1)
1132 hint
= word
* HOST_BITS_PER_LONG
+ bit
;
1135 /* Next time, try the next bit. */
1136 entry
->next_bit_hint
= hint
+ 1;
1138 object_offset
= hint
* object_size
;
1141 /* Set the in-use bit. */
1142 entry
->in_use_p
[word
] |= ((unsigned long) 1 << bit
);
1144 /* Keep a running total of the number of free objects. If this page
1145 fills up, we may have to move it to the end of the list if the
1146 next page isn't full. If the next page is full, all subsequent
1147 pages are full, so there's no need to move it. */
1148 if (--entry
->num_free_objects
== 0
1149 && entry
->next
!= NULL
1150 && entry
->next
->num_free_objects
> 0)
1152 G
.pages
[order
] = entry
->next
;
1154 G
.page_tails
[order
]->next
= entry
;
1155 G
.page_tails
[order
] = entry
;
1158 /* Calculate the object's address. */
1159 result
= entry
->page
+ object_offset
;
1161 #ifdef ENABLE_GC_CHECKING
1162 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1163 exact same semantics in presence of memory bugs, regardless of
1164 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1165 handle to avoid handle leak. */
1166 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result
, object_size
));
1168 /* `Poison' the entire allocated object, including any padding at
1170 memset (result
, 0xaf, object_size
);
1172 /* Make the bytes after the end of the object unaccessible. Discard the
1173 handle to avoid handle leak. */
1174 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result
+ size
,
1175 object_size
- size
));
1178 /* Tell Valgrind that the memory is there, but its content isn't
1179 defined. The bytes at the end of the object are still marked
1181 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result
, size
));
1183 /* Keep track of how many bytes are being allocated. This
1184 information is used in deciding when to collect. */
1185 G
.allocated
+= object_size
;
1187 #ifdef GATHER_STATISTICS
1189 size_t overhead
= object_size
- size
;
1191 G
.stats
.total_overhead
+= overhead
;
1192 G
.stats
.total_allocated
+= object_size
;
1193 G
.stats
.total_overhead_per_order
[order
] += overhead
;
1194 G
.stats
.total_allocated_per_order
[order
] += object_size
;
1198 G
.stats
.total_overhead_under32
+= overhead
;
1199 G
.stats
.total_allocated_under32
+= object_size
;
1203 G
.stats
.total_overhead_under64
+= overhead
;
1204 G
.stats
.total_allocated_under64
+= object_size
;
1208 G
.stats
.total_overhead_under128
+= overhead
;
1209 G
.stats
.total_allocated_under128
+= object_size
;
1214 if (GGC_DEBUG_LEVEL
>= 3)
1215 fprintf (G
.debug_file
,
1216 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1217 (unsigned long) size
, (unsigned long) object_size
, result
,
1223 /* If P is not marked, marks it and return false. Otherwise return true.
1224 P must have been allocated by the GC allocator; it mustn't point to
1225 static objects, stack variables, or memory allocated with malloc. */
1228 ggc_set_mark (const void *p
)
1234 /* Look up the page on which the object is alloced. If the object
1235 wasn't allocated by the collector, we'll probably die. */
1236 entry
= lookup_page_table_entry (p
);
1237 #ifdef ENABLE_CHECKING
1242 /* Calculate the index of the object on the page; this is its bit
1243 position in the in_use_p bitmap. */
1244 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1245 word
= bit
/ HOST_BITS_PER_LONG
;
1246 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1248 /* If the bit was previously set, skip it. */
1249 if (entry
->in_use_p
[word
] & mask
)
1252 /* Otherwise set it, and decrement the free object count. */
1253 entry
->in_use_p
[word
] |= mask
;
1254 entry
->num_free_objects
-= 1;
1256 if (GGC_DEBUG_LEVEL
>= 4)
1257 fprintf (G
.debug_file
, "Marking %p\n", p
);
1262 /* Return 1 if P has been marked, zero otherwise.
1263 P must have been allocated by the GC allocator; it mustn't point to
1264 static objects, stack variables, or memory allocated with malloc. */
1267 ggc_marked_p (const void *p
)
1273 /* Look up the page on which the object is alloced. If the object
1274 wasn't allocated by the collector, we'll probably die. */
1275 entry
= lookup_page_table_entry (p
);
1276 #ifdef ENABLE_CHECKING
1281 /* Calculate the index of the object on the page; this is its bit
1282 position in the in_use_p bitmap. */
1283 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1284 word
= bit
/ HOST_BITS_PER_LONG
;
1285 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1287 return (entry
->in_use_p
[word
] & mask
) != 0;
1290 /* Return the size of the gc-able object P. */
1293 ggc_get_size (const void *p
)
1295 page_entry
*pe
= lookup_page_table_entry (p
);
1296 return OBJECT_SIZE (pe
->order
);
1299 /* Release the memory for object P. */
1304 page_entry
*pe
= lookup_page_table_entry (p
);
1305 size_t order
= pe
->order
;
1306 size_t size
= OBJECT_SIZE (order
);
1308 if (GGC_DEBUG_LEVEL
>= 3)
1309 fprintf (G
.debug_file
,
1310 "Freeing object, actual size=%lu, at %p on %p\n",
1311 (unsigned long) size
, p
, (void *) pe
);
1313 #ifdef ENABLE_GC_CHECKING
1314 /* Poison the data, to indicate the data is garbage. */
1315 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p
, size
));
1316 memset (p
, 0xa5, size
);
1318 /* Let valgrind know the object is free. */
1319 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p
, size
));
1321 #ifdef ENABLE_GC_ALWAYS_COLLECT
1322 /* In the completely-anal-checking mode, we do *not* immediately free
1323 the data, but instead verify that the data is *actually* not
1324 reachable the next time we collect. */
1326 struct free_object
*fo
= xmalloc (sizeof (struct free_object
));
1328 fo
->next
= G
.free_object_list
;
1329 G
.free_object_list
= fo
;
1333 unsigned int bit_offset
, word
, bit
;
1335 G
.allocated
-= size
;
1337 /* Mark the object not-in-use. */
1338 bit_offset
= OFFSET_TO_BIT (((const char *) p
) - pe
->page
, order
);
1339 word
= bit_offset
/ HOST_BITS_PER_LONG
;
1340 bit
= bit_offset
% HOST_BITS_PER_LONG
;
1341 pe
->in_use_p
[word
] &= ~(1UL << bit
);
1343 if (pe
->num_free_objects
++ == 0)
1345 /* If the page is completely full, then it's supposed to
1346 be after all pages that aren't. Since we've freed one
1347 object from a page that was full, we need to move the
1348 page to the head of the list. */
1351 for (q
= NULL
, p
= G
.pages
[order
]; ; q
= p
, p
= p
->next
)
1354 if (q
&& q
->num_free_objects
== 0)
1359 G
.page_tails
[order
] = q
;
1360 pe
->next
= G
.pages
[order
];
1361 G
.pages
[order
] = pe
;
1364 /* Reset the hint bit to point to the only free object. */
1365 pe
->next_bit_hint
= bit_offset
;
1371 /* Subroutine of init_ggc which computes the pair of numbers used to
1372 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1374 This algorithm is taken from Granlund and Montgomery's paper
1375 "Division by Invariant Integers using Multiplication"
1376 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1380 compute_inverse (unsigned order
)
1385 size
= OBJECT_SIZE (order
);
1387 while (size
% 2 == 0)
1394 while (inv
* size
!= 1)
1395 inv
= inv
* (2 - inv
*size
);
1397 DIV_MULT (order
) = inv
;
1398 DIV_SHIFT (order
) = e
;
1401 /* Initialize the ggc-mmap allocator. */
1407 G
.pagesize
= getpagesize();
1408 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
1410 #ifdef HAVE_MMAP_DEV_ZERO
1411 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
1412 if (G
.dev_zero_fd
== -1)
1413 internal_error ("open /dev/zero: %m");
1417 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
1419 G
.debug_file
= stdout
;
1423 /* StunOS has an amazing off-by-one error for the first mmap allocation
1424 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1425 believe, is an unaligned page allocation, which would cause us to
1426 hork badly if we tried to use it. */
1428 char *p
= alloc_anon (NULL
, G
.pagesize
);
1429 struct page_entry
*e
;
1430 if ((size_t)p
& (G
.pagesize
- 1))
1432 /* How losing. Discard this one and try another. If we still
1433 can't get something useful, give up. */
1435 p
= alloc_anon (NULL
, G
.pagesize
);
1436 if ((size_t)p
& (G
.pagesize
- 1))
1440 /* We have a good page, might as well hold onto it... */
1441 e
= xcalloc (1, sizeof (struct page_entry
));
1442 e
->bytes
= G
.pagesize
;
1444 e
->next
= G
.free_pages
;
1449 /* Initialize the object size table. */
1450 for (order
= 0; order
< HOST_BITS_PER_PTR
; ++order
)
1451 object_size_table
[order
] = (size_t) 1 << order
;
1452 for (order
= HOST_BITS_PER_PTR
; order
< NUM_ORDERS
; ++order
)
1454 size_t s
= extra_order_size_table
[order
- HOST_BITS_PER_PTR
];
1456 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1457 so that we're sure of getting aligned memory. */
1458 s
= ROUND_UP (s
, MAX_ALIGNMENT
);
1459 object_size_table
[order
] = s
;
1462 /* Initialize the objects-per-page and inverse tables. */
1463 for (order
= 0; order
< NUM_ORDERS
; ++order
)
1465 objects_per_page_table
[order
] = G
.pagesize
/ OBJECT_SIZE (order
);
1466 if (objects_per_page_table
[order
] == 0)
1467 objects_per_page_table
[order
] = 1;
1468 compute_inverse (order
);
1471 /* Reset the size_lookup array to put appropriately sized objects in
1472 the special orders. All objects bigger than the previous power
1473 of two, but no greater than the special size, should go in the
1475 for (order
= HOST_BITS_PER_PTR
; order
< NUM_ORDERS
; ++order
)
1480 o
= size_lookup
[OBJECT_SIZE (order
)];
1481 for (i
= OBJECT_SIZE (order
); size_lookup
[i
] == o
; --i
)
1482 size_lookup
[i
] = order
;
1487 G
.depth
= xmalloc (G
.depth_max
* sizeof (unsigned int));
1489 G
.by_depth_in_use
= 0;
1490 G
.by_depth_max
= INITIAL_PTE_COUNT
;
1491 G
.by_depth
= xmalloc (G
.by_depth_max
* sizeof (page_entry
*));
1492 G
.save_in_use
= xmalloc (G
.by_depth_max
* sizeof (unsigned long *));
1495 /* Start a new GGC zone. */
1498 new_ggc_zone (const char *name ATTRIBUTE_UNUSED
)
1503 /* Destroy a GGC zone. */
1505 destroy_ggc_zone (struct alloc_zone
*zone ATTRIBUTE_UNUSED
)
1509 /* Increment the `GC context'. Objects allocated in an outer context
1510 are never freed, eliminating the need to register their roots. */
1513 ggc_push_context (void)
1518 if (G
.context_depth
>= HOST_BITS_PER_LONG
)
1522 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1523 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1526 ggc_recalculate_in_use_p (page_entry
*p
)
1531 /* Because the past-the-end bit in in_use_p is always set, we
1532 pretend there is one additional object. */
1533 num_objects
= OBJECTS_IN_PAGE (p
) + 1;
1535 /* Reset the free object count. */
1536 p
->num_free_objects
= num_objects
;
1538 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1540 i
< CEIL (BITMAP_SIZE (num_objects
),
1541 sizeof (*p
->in_use_p
));
1546 /* Something is in use if it is marked, or if it was in use in a
1547 context further down the context stack. */
1548 p
->in_use_p
[i
] |= save_in_use_p (p
)[i
];
1550 /* Decrement the free object count for every object allocated. */
1551 for (j
= p
->in_use_p
[i
]; j
; j
>>= 1)
1552 p
->num_free_objects
-= (j
& 1);
1555 if (p
->num_free_objects
>= num_objects
)
1559 /* Decrement the `GC context'. All objects allocated since the
1560 previous ggc_push_context are migrated to the outer context. */
1563 ggc_pop_context (void)
1565 unsigned long omask
;
1566 unsigned int depth
, i
, e
;
1567 #ifdef ENABLE_CHECKING
1571 depth
= --G
.context_depth
;
1572 omask
= (unsigned long)1 << (depth
+ 1);
1574 if (!((G
.context_depth_allocations
| G
.context_depth_collections
) & omask
))
1577 G
.context_depth_allocations
|= (G
.context_depth_allocations
& omask
) >> 1;
1578 G
.context_depth_allocations
&= omask
- 1;
1579 G
.context_depth_collections
&= omask
- 1;
1581 /* The G.depth array is shortened so that the last index is the
1582 context_depth of the top element of by_depth. */
1583 if (depth
+1 < G
.depth_in_use
)
1584 e
= G
.depth
[depth
+1];
1586 e
= G
.by_depth_in_use
;
1588 /* We might not have any PTEs of depth depth. */
1589 if (depth
< G
.depth_in_use
)
1592 /* First we go through all the pages at depth depth to
1593 recalculate the in use bits. */
1594 for (i
= G
.depth
[depth
]; i
< e
; ++i
)
1598 #ifdef ENABLE_CHECKING
1601 /* Check that all of the pages really are at the depth that
1603 if (p
->context_depth
!= depth
)
1605 if (p
->index_by_depth
!= i
)
1609 prefetch (&save_in_use_p_i (i
+8));
1610 prefetch (&save_in_use_p_i (i
+16));
1611 if (save_in_use_p_i (i
))
1614 ggc_recalculate_in_use_p (p
);
1615 free (save_in_use_p_i (i
));
1616 save_in_use_p_i (i
) = 0;
1621 /* Then, we reset all page_entries with a depth greater than depth
1623 for (i
= e
; i
< G
.by_depth_in_use
; ++i
)
1625 page_entry
*p
= G
.by_depth
[i
];
1627 /* Check that all of the pages really are at the depth we
1629 #ifdef ENABLE_CHECKING
1630 if (p
->context_depth
<= depth
)
1632 if (p
->index_by_depth
!= i
)
1635 p
->context_depth
= depth
;
1640 #ifdef ENABLE_CHECKING
1641 for (order
= 2; order
< NUM_ORDERS
; order
++)
1645 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
1647 if (p
->context_depth
> depth
)
1649 else if (p
->context_depth
== depth
&& save_in_use_p (p
))
1656 /* Unmark all objects. */
1663 for (order
= 2; order
< NUM_ORDERS
; order
++)
1667 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
1669 size_t num_objects
= OBJECTS_IN_PAGE (p
);
1670 size_t bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
1672 #ifdef ENABLE_CHECKING
1673 /* The data should be page-aligned. */
1674 if ((size_t) p
->page
& (G
.pagesize
- 1))
1678 /* Pages that aren't in the topmost context are not collected;
1679 nevertheless, we need their in-use bit vectors to store GC
1680 marks. So, back them up first. */
1681 if (p
->context_depth
< G
.context_depth
)
1683 if (! save_in_use_p (p
))
1684 save_in_use_p (p
) = xmalloc (bitmap_size
);
1685 memcpy (save_in_use_p (p
), p
->in_use_p
, bitmap_size
);
1688 /* Reset reset the number of free objects and clear the
1689 in-use bits. These will be adjusted by mark_obj. */
1690 p
->num_free_objects
= num_objects
;
1691 memset (p
->in_use_p
, 0, bitmap_size
);
1693 /* Make sure the one-past-the-end bit is always set. */
1694 p
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
1695 = ((unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
));
1700 /* Free all empty pages. Partially empty pages need no attention
1701 because the `mark' bit doubles as an `unused' bit. */
1708 for (order
= 2; order
< NUM_ORDERS
; order
++)
1710 /* The last page-entry to consider, regardless of entries
1711 placed at the end of the list. */
1712 page_entry
* const last
= G
.page_tails
[order
];
1715 size_t live_objects
;
1716 page_entry
*p
, *previous
;
1726 page_entry
*next
= p
->next
;
1728 /* Loop until all entries have been examined. */
1731 num_objects
= OBJECTS_IN_PAGE (p
);
1733 /* Add all live objects on this page to the count of
1734 allocated memory. */
1735 live_objects
= num_objects
- p
->num_free_objects
;
1737 G
.allocated
+= OBJECT_SIZE (order
) * live_objects
;
1739 /* Only objects on pages in the topmost context should get
1741 if (p
->context_depth
< G
.context_depth
)
1744 /* Remove the page if it's empty. */
1745 else if (live_objects
== 0)
1748 G
.pages
[order
] = next
;
1750 previous
->next
= next
;
1752 /* Are we removing the last element? */
1753 if (p
== G
.page_tails
[order
])
1754 G
.page_tails
[order
] = previous
;
1759 /* If the page is full, move it to the end. */
1760 else if (p
->num_free_objects
== 0)
1762 /* Don't move it if it's already at the end. */
1763 if (p
!= G
.page_tails
[order
])
1765 /* Move p to the end of the list. */
1767 G
.page_tails
[order
]->next
= p
;
1769 /* Update the tail pointer... */
1770 G
.page_tails
[order
] = p
;
1772 /* ... and the head pointer, if necessary. */
1774 G
.pages
[order
] = next
;
1776 previous
->next
= next
;
1781 /* If we've fallen through to here, it's a page in the
1782 topmost context that is neither full nor empty. Such a
1783 page must precede pages at lesser context depth in the
1784 list, so move it to the head. */
1785 else if (p
!= G
.pages
[order
])
1787 previous
->next
= p
->next
;
1788 p
->next
= G
.pages
[order
];
1790 /* Are we moving the last element? */
1791 if (G
.page_tails
[order
] == p
)
1792 G
.page_tails
[order
] = previous
;
1801 /* Now, restore the in_use_p vectors for any pages from contexts
1802 other than the current one. */
1803 for (p
= G
.pages
[order
]; p
; p
= p
->next
)
1804 if (p
->context_depth
!= G
.context_depth
)
1805 ggc_recalculate_in_use_p (p
);
1809 #ifdef ENABLE_GC_CHECKING
1810 /* Clobber all free objects. */
1817 for (order
= 2; order
< NUM_ORDERS
; order
++)
1819 size_t size
= OBJECT_SIZE (order
);
1822 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
1827 if (p
->context_depth
!= G
.context_depth
)
1828 /* Since we don't do any collection for pages in pushed
1829 contexts, there's no need to do any poisoning. And
1830 besides, the IN_USE_P array isn't valid until we pop
1834 num_objects
= OBJECTS_IN_PAGE (p
);
1835 for (i
= 0; i
< num_objects
; i
++)
1838 word
= i
/ HOST_BITS_PER_LONG
;
1839 bit
= i
% HOST_BITS_PER_LONG
;
1840 if (((p
->in_use_p
[word
] >> bit
) & 1) == 0)
1842 char *object
= p
->page
+ i
* size
;
1844 /* Keep poison-by-write when we expect to use Valgrind,
1845 so the exact same memory semantics is kept, in case
1846 there are memory errors. We override this request
1848 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object
, size
));
1849 memset (object
, 0xa5, size
);
1851 /* Drop the handle to avoid handle leak. */
1852 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object
, size
));
1860 /* Top level mark-and-sweep routine. */
1865 /* Avoid frequent unnecessary work by skipping collection if the
1866 total allocations haven't expanded much since the last
1868 float allocated_last_gc
=
1869 MAX (G
.allocated_last_gc
, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE
) * 1024);
1871 float min_expand
= allocated_last_gc
* PARAM_VALUE (GGC_MIN_EXPAND
) / 100;
1873 if (G
.allocated
< allocated_last_gc
+ min_expand
)
1876 timevar_push (TV_GC
);
1878 fprintf (stderr
, " {GC %luk -> ", (unsigned long) G
.allocated
/ 1024);
1879 if (GGC_DEBUG_LEVEL
>= 2)
1880 fprintf (G
.debug_file
, "BEGIN COLLECTING\n");
1882 /* Zero the total allocated bytes. This will be recalculated in the
1886 /* Release the pages we freed the last time we collected, but didn't
1887 reuse in the interim. */
1890 /* Indicate that we've seen collections at this context depth. */
1891 G
.context_depth_collections
= ((unsigned long)1 << (G
.context_depth
+ 1)) - 1;
1896 #ifdef ENABLE_GC_CHECKING
1902 #ifdef ENABLE_GC_ALWAYS_COLLECT
1903 /* Validate that the reportedly free objects actually are. */
1905 struct free_object
*f
, *n
;
1906 for (f
= G
.free_object_list
; f
; f
= n
)
1908 page_entry
*pe
= lookup_page_table_entry (f
->object
);
1910 /* If the page entry is null, that means the entire page is free.
1911 Otherwise, we have to examine the in-use bit for the object. */
1915 bit
= OFFSET_TO_BIT ((char *)f
->object
- pe
->page
, pe
->order
);
1916 word
= bit
/ HOST_BITS_PER_LONG
;
1917 bit
= bit
% HOST_BITS_PER_LONG
;
1919 if (pe
->in_use_p
[word
] & (1UL << bit
))
1926 G
.free_object_list
= NULL
;
1930 G
.allocated_last_gc
= G
.allocated
;
1932 timevar_pop (TV_GC
);
1935 fprintf (stderr
, "%luk}", (unsigned long) G
.allocated
/ 1024);
1936 if (GGC_DEBUG_LEVEL
>= 2)
1937 fprintf (G
.debug_file
, "END COLLECTING\n");
1940 /* Print allocation statistics. */
1941 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1943 : ((x) < 1024*1024*10 \
1945 : (x) / (1024*1024))))
1946 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1949 ggc_print_statistics (void)
1951 struct ggc_statistics stats
;
1953 size_t total_overhead
= 0;
1955 /* Clear the statistics. */
1956 memset (&stats
, 0, sizeof (stats
));
1958 /* Make sure collection will really occur. */
1959 G
.allocated_last_gc
= 0;
1961 /* Collect and print the statistics common across collectors. */
1962 ggc_print_common_statistics (stderr
, &stats
);
1964 /* Release free pages so that we will not count the bytes allocated
1965 there as part of the total allocated memory. */
1968 /* Collect some information about the various sizes of
1971 "Memory still allocated at the end of the compilation process\n");
1972 fprintf (stderr
, "%-5s %10s %10s %10s\n",
1973 "Size", "Allocated", "Used", "Overhead");
1974 for (i
= 0; i
< NUM_ORDERS
; ++i
)
1981 /* Skip empty entries. */
1985 overhead
= allocated
= in_use
= 0;
1987 /* Figure out the total number of bytes allocated for objects of
1988 this size, and how many of them are actually in use. Also figure
1989 out how much memory the page table is using. */
1990 for (p
= G
.pages
[i
]; p
; p
= p
->next
)
1992 allocated
+= p
->bytes
;
1994 (OBJECTS_IN_PAGE (p
) - p
->num_free_objects
) * OBJECT_SIZE (i
);
1996 overhead
+= (sizeof (page_entry
) - sizeof (long)
1997 + BITMAP_SIZE (OBJECTS_IN_PAGE (p
) + 1));
1999 fprintf (stderr
, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2000 (unsigned long) OBJECT_SIZE (i
),
2001 SCALE (allocated
), LABEL (allocated
),
2002 SCALE (in_use
), LABEL (in_use
),
2003 SCALE (overhead
), LABEL (overhead
));
2004 total_overhead
+= overhead
;
2006 fprintf (stderr
, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2007 SCALE (G
.bytes_mapped
), LABEL (G
.bytes_mapped
),
2008 SCALE (G
.allocated
), LABEL(G
.allocated
),
2009 SCALE (total_overhead
), LABEL (total_overhead
));
2011 #ifdef GATHER_STATISTICS
2013 fprintf (stderr
, "\nTotal allocations and overheads during the compilation process\n");
2015 fprintf (stderr
, "Total Overhead: %10lld\n",
2016 G
.stats
.total_overhead
);
2017 fprintf (stderr
, "Total Allocated: %10lld\n",
2018 G
.stats
.total_allocated
);
2020 fprintf (stderr
, "Total Overhead under 32B: %10lld\n",
2021 G
.stats
.total_overhead_under32
);
2022 fprintf (stderr
, "Total Allocated under 32B: %10lld\n",
2023 G
.stats
.total_allocated_under32
);
2024 fprintf (stderr
, "Total Overhead under 64B: %10lld\n",
2025 G
.stats
.total_overhead_under64
);
2026 fprintf (stderr
, "Total Allocated under 64B: %10lld\n",
2027 G
.stats
.total_allocated_under64
);
2028 fprintf (stderr
, "Total Overhead under 128B: %10lld\n",
2029 G
.stats
.total_overhead_under128
);
2030 fprintf (stderr
, "Total Allocated under 128B: %10lld\n",
2031 G
.stats
.total_allocated_under128
);
2033 for (i
= 0; i
< NUM_ORDERS
; i
++)
2034 if (G
.stats
.total_allocated_per_order
[i
])
2036 fprintf (stderr
, "Total Overhead page size %7d: %10lld\n",
2037 OBJECT_SIZE (i
), G
.stats
.total_overhead_per_order
[i
]);
2038 fprintf (stderr
, "Total Allocated page size %7d: %10lld\n",
2039 OBJECT_SIZE (i
), G
.stats
.total_allocated_per_order
[i
]);
2047 struct ggc_pch_ondisk
2049 unsigned totals
[NUM_ORDERS
];
2051 size_t base
[NUM_ORDERS
];
2052 size_t written
[NUM_ORDERS
];
2055 struct ggc_pch_data
*
2058 return xcalloc (sizeof (struct ggc_pch_data
), 1);
2062 ggc_pch_count_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2063 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2068 order
= size_lookup
[size
];
2072 while (size
> OBJECT_SIZE (order
))
2076 d
->d
.totals
[order
]++;
2080 ggc_pch_total_size (struct ggc_pch_data
*d
)
2085 for (i
= 0; i
< NUM_ORDERS
; i
++)
2086 a
+= ROUND_UP (d
->d
.totals
[i
] * OBJECT_SIZE (i
), G
.pagesize
);
2091 ggc_pch_this_base (struct ggc_pch_data
*d
, void *base
)
2093 size_t a
= (size_t) base
;
2096 for (i
= 0; i
< NUM_ORDERS
; i
++)
2099 a
+= ROUND_UP (d
->d
.totals
[i
] * OBJECT_SIZE (i
), G
.pagesize
);
2105 ggc_pch_alloc_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2106 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2112 order
= size_lookup
[size
];
2116 while (size
> OBJECT_SIZE (order
))
2120 result
= (char *) d
->base
[order
];
2121 d
->base
[order
] += OBJECT_SIZE (order
);
2126 ggc_pch_prepare_write (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
2127 FILE *f ATTRIBUTE_UNUSED
)
2129 /* Nothing to do. */
2133 ggc_pch_write_object (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
2134 FILE *f
, void *x
, void *newx ATTRIBUTE_UNUSED
,
2135 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2138 static const char emptyBytes
[256];
2141 order
= size_lookup
[size
];
2145 while (size
> OBJECT_SIZE (order
))
2149 if (fwrite (x
, size
, 1, f
) != 1)
2150 fatal_error ("can't write PCH file: %m");
2152 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2153 object out to OBJECT_SIZE(order). This happens for strings. */
2155 if (size
!= OBJECT_SIZE (order
))
2157 unsigned padding
= OBJECT_SIZE(order
) - size
;
2159 /* To speed small writes, we use a nulled-out array that's larger
2160 than most padding requests as the source for our null bytes. This
2161 permits us to do the padding with fwrite() rather than fseek(), and
2162 limits the chance the the OS may try to flush any outstanding
2164 if (padding
<= sizeof(emptyBytes
))
2166 if (fwrite (emptyBytes
, 1, padding
, f
) != padding
)
2167 fatal_error ("can't write PCH file");
2171 /* Larger than our buffer? Just default to fseek. */
2172 if (fseek (f
, padding
, SEEK_CUR
) != 0)
2173 fatal_error ("can't write PCH file");
2177 d
->written
[order
]++;
2178 if (d
->written
[order
] == d
->d
.totals
[order
]
2179 && fseek (f
, ROUND_UP_VALUE (d
->d
.totals
[order
] * OBJECT_SIZE (order
),
2182 fatal_error ("can't write PCH file: %m");
2186 ggc_pch_finish (struct ggc_pch_data
*d
, FILE *f
)
2188 if (fwrite (&d
->d
, sizeof (d
->d
), 1, f
) != 1)
2189 fatal_error ("can't write PCH file: %m");
2193 /* Move the PCH PTE entries just added to the end of by_depth, to the
2197 move_ptes_to_front (int count_old_page_tables
, int count_new_page_tables
)
2201 /* First, we swap the new entries to the front of the varrays. */
2202 page_entry
**new_by_depth
;
2203 unsigned long **new_save_in_use
;
2205 new_by_depth
= xmalloc (G
.by_depth_max
* sizeof (page_entry
*));
2206 new_save_in_use
= xmalloc (G
.by_depth_max
* sizeof (unsigned long *));
2208 memcpy (&new_by_depth
[0],
2209 &G
.by_depth
[count_old_page_tables
],
2210 count_new_page_tables
* sizeof (void *));
2211 memcpy (&new_by_depth
[count_new_page_tables
],
2213 count_old_page_tables
* sizeof (void *));
2214 memcpy (&new_save_in_use
[0],
2215 &G
.save_in_use
[count_old_page_tables
],
2216 count_new_page_tables
* sizeof (void *));
2217 memcpy (&new_save_in_use
[count_new_page_tables
],
2219 count_old_page_tables
* sizeof (void *));
2222 free (G
.save_in_use
);
2224 G
.by_depth
= new_by_depth
;
2225 G
.save_in_use
= new_save_in_use
;
2227 /* Now update all the index_by_depth fields. */
2228 for (i
= G
.by_depth_in_use
; i
> 0; --i
)
2230 page_entry
*p
= G
.by_depth
[i
-1];
2231 p
->index_by_depth
= i
-1;
2234 /* And last, we update the depth pointers in G.depth. The first
2235 entry is already 0, and context 0 entries always start at index
2236 0, so there is nothing to update in the first slot. We need a
2237 second slot, only if we have old ptes, and if we do, they start
2238 at index count_new_page_tables. */
2239 if (count_old_page_tables
)
2240 push_depth (count_new_page_tables
);
2244 ggc_pch_read (FILE *f
, void *addr
)
2246 struct ggc_pch_ondisk d
;
2249 unsigned long count_old_page_tables
;
2250 unsigned long count_new_page_tables
;
2252 count_old_page_tables
= G
.by_depth_in_use
;
2254 /* We've just read in a PCH file. So, every object that used to be
2255 allocated is now free. */
2257 #ifdef ENABLE_GC_CHECKING
2261 /* No object read from a PCH file should ever be freed. So, set the
2262 context depth to 1, and set the depth of all the currently-allocated
2263 pages to be 1 too. PCH pages will have depth 0. */
2264 if (G
.context_depth
!= 0)
2266 G
.context_depth
= 1;
2267 for (i
= 0; i
< NUM_ORDERS
; i
++)
2270 for (p
= G
.pages
[i
]; p
!= NULL
; p
= p
->next
)
2271 p
->context_depth
= G
.context_depth
;
2274 /* Allocate the appropriate page-table entries for the pages read from
2276 if (fread (&d
, sizeof (d
), 1, f
) != 1)
2277 fatal_error ("can't read PCH file: %m");
2279 for (i
= 0; i
< NUM_ORDERS
; i
++)
2281 struct page_entry
*entry
;
2287 if (d
.totals
[i
] == 0)
2290 bytes
= ROUND_UP (d
.totals
[i
] * OBJECT_SIZE (i
), G
.pagesize
);
2291 num_objs
= bytes
/ OBJECT_SIZE (i
);
2292 entry
= xcalloc (1, (sizeof (struct page_entry
)
2294 + BITMAP_SIZE (num_objs
+ 1)));
2295 entry
->bytes
= bytes
;
2297 entry
->context_depth
= 0;
2299 entry
->num_free_objects
= 0;
2303 j
+ HOST_BITS_PER_LONG
<= num_objs
+ 1;
2304 j
+= HOST_BITS_PER_LONG
)
2305 entry
->in_use_p
[j
/ HOST_BITS_PER_LONG
] = -1;
2306 for (; j
< num_objs
+ 1; j
++)
2307 entry
->in_use_p
[j
/ HOST_BITS_PER_LONG
]
2308 |= 1L << (j
% HOST_BITS_PER_LONG
);
2310 for (pte
= entry
->page
;
2311 pte
< entry
->page
+ entry
->bytes
;
2313 set_page_table_entry (pte
, entry
);
2315 if (G
.page_tails
[i
] != NULL
)
2316 G
.page_tails
[i
]->next
= entry
;
2319 G
.page_tails
[i
] = entry
;
2321 /* We start off by just adding all the new information to the
2322 end of the varrays, later, we will move the new information
2323 to the front of the varrays, as the PCH page tables are at
2325 push_by_depth (entry
, 0);
2328 /* Now, we update the various data structures that speed page table
2330 count_new_page_tables
= G
.by_depth_in_use
- count_old_page_tables
;
2332 move_ptes_to_front (count_old_page_tables
, count_new_page_tables
);
2334 /* Update the statistics. */
2335 G
.allocated
= G
.allocated_last_gc
= offs
- (char *)addr
;