* g++.dg/cpp0x/nullptr21.c: Remove printfs, make self-checking.
[gcc.git] / gcc / ggc-zone.c
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3 2010 Free Software Foundation, Inc.
4
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
7 <dan@codesourcery.com>.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "diagnostic-core.h"
33 #include "flags.h"
34 #include "ggc.h"
35 #include "ggc-internal.h"
36 #include "timevar.h"
37 #include "params.h"
38 #include "bitmap.h"
39 #include "plugin.h"
40
41 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
42 file open. Prefer either to valloc. */
43 #ifdef HAVE_MMAP_ANON
44 # undef HAVE_MMAP_DEV_ZERO
45 # define USING_MMAP
46 #endif
47
48 #ifdef HAVE_MMAP_DEV_ZERO
49 # define USING_MMAP
50 #endif
51
52 #ifndef USING_MMAP
53 #error Zone collector requires mmap
54 #endif
55
56 #if (GCC_VERSION < 3001)
57 #define prefetch(X) ((void) X)
58 #define prefetchw(X) ((void) X)
59 #else
60 #define prefetch(X) __builtin_prefetch (X)
61 #define prefetchw(X) __builtin_prefetch (X, 1, 3)
62 #endif
63
64 /* FUTURE NOTES:
65
66 If we track inter-zone pointers, we can mark single zones at a
67 time.
68
69 If we have a zone where we guarantee no inter-zone pointers, we
70 could mark that zone separately.
71
72 The garbage zone should not be marked, and we should return 1 in
73 ggc_set_mark for any object in the garbage zone, which cuts off
74 marking quickly. */
75
76 /* Strategy:
77
78 This garbage-collecting allocator segregates objects into zones.
79 It also segregates objects into "large" and "small" bins. Large
80 objects are greater than page size.
81
82 Pages for small objects are broken up into chunks. The page has
83 a bitmap which marks the start position of each chunk (whether
84 allocated or free). Free chunks are on one of the zone's free
85 lists and contain a pointer to the next free chunk. Chunks in
86 most of the free lists have a fixed size determined by the
87 free list. Chunks in the "other" sized free list have their size
88 stored right after their chain pointer.
89
90 Empty pages (of all sizes) are kept on a single page cache list,
91 and are considered first when new pages are required; they are
92 deallocated at the start of the next collection if they haven't
93 been recycled by then. The free page list is currently per-zone. */
94
95 /* Define GGC_DEBUG_LEVEL to print debugging information.
96 0: No debugging output.
97 1: GC statistics only.
98 2: Page-entry allocations/deallocations as well.
99 3: Object allocations as well.
100 4: Object marks as well. */
101 #define GGC_DEBUG_LEVEL (0)
102
103 #ifndef HOST_BITS_PER_PTR
104 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
105 #endif
106
107 /* This structure manages small free chunks. The SIZE field is only
108 initialized if the chunk is in the "other" sized free list. Large
109 chunks are allocated one at a time to their own page, and so don't
110 come in here. */
111
112 struct alloc_chunk {
113 struct alloc_chunk *next_free;
114 unsigned int size;
115 };
116
117 /* The size of the fixed-size portion of a small page descriptor. */
118 #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
119
120 /* The collector's idea of the page size. This must be a power of two
121 no larger than the system page size, because pages must be aligned
122 to this amount and are tracked at this granularity in the page
123 table. We choose a size at compile time for efficiency.
124
125 We could make a better guess at compile time if PAGE_SIZE is a
126 constant in system headers, and PAGE_SHIFT is defined... */
127 #define GGC_PAGE_SIZE 4096
128 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
129 #define GGC_PAGE_SHIFT 12
130
131 #if 0
132 /* Alternative definitions which use the runtime page size. */
133 #define GGC_PAGE_SIZE G.pagesize
134 #define GGC_PAGE_MASK G.page_mask
135 #define GGC_PAGE_SHIFT G.lg_pagesize
136 #endif
137
138 /* The size of a small page managed by the garbage collector. This
139 must currently be GGC_PAGE_SIZE, but with a few changes could
140 be any multiple of it to reduce certain kinds of overhead. */
141 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE
142
143 /* Free bin information. These numbers may be in need of re-tuning.
144 In general, decreasing the number of free bins would seem to
145 increase the time it takes to allocate... */
146
147 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
148 today. */
149
150 #define NUM_FREE_BINS 64
151 #define FREE_BIN_DELTA MAX_ALIGNMENT
152 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
153
154 /* Allocation and marking parameters. */
155
156 /* The smallest allocatable unit to keep track of. */
157 #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
158
159 /* The smallest markable unit. If we require each allocated object
160 to contain at least two allocatable units, we can use half as many
161 bits for the mark bitmap. But this adds considerable complexity
162 to sweeping. */
163 #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
164
165 #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
166
167 /* We use this structure to determine the alignment required for
168 allocations.
169
170 There are several things wrong with this estimation of alignment.
171
172 The maximum alignment for a structure is often less than the
173 maximum alignment for a basic data type; for instance, on some
174 targets long long must be aligned to sizeof (int) in a structure
175 and sizeof (long long) in a variable. i386-linux is one example;
176 Darwin is another (sometimes, depending on the compiler in use).
177
178 Also, long double is not included. Nothing in GCC uses long
179 double, so we assume that this is OK. On powerpc-darwin, adding
180 long double would bring the maximum alignment up to 16 bytes,
181 and until we need long double (or to vectorize compiler operations)
182 that's painfully wasteful. This will need to change, some day. */
183
184 struct max_alignment {
185 char c;
186 union {
187 HOST_WIDEST_INT i;
188 double d;
189 } u;
190 };
191
192 /* The biggest alignment required. */
193
194 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
195
196 /* Compute the smallest multiple of F that is >= X. */
197
198 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
199
200 /* Types to use for the allocation and mark bitmaps. It might be
201 a good idea to add ffsl to libiberty and use unsigned long
202 instead; that could speed us up where long is wider than int. */
203
204 typedef unsigned int alloc_type;
205 typedef unsigned int mark_type;
206 #define alloc_ffs(x) ffs(x)
207
208 /* A page_entry records the status of an allocation page. This is the
209 common data between all three kinds of pages - small, large, and
210 PCH. */
211 typedef struct page_entry
212 {
213 /* The address at which the memory is allocated. */
214 char *page;
215
216 /* The zone that this page entry belongs to. */
217 struct alloc_zone *zone;
218
219 /* How many collections we've survived. */
220 size_t survived;
221
222 /* Does this page contain small objects, or one large object? */
223 bool large_p;
224
225 /* Is this page part of the loaded PCH? */
226 bool pch_p;
227 } page_entry;
228
229 /* Additional data needed for small pages. */
230 struct small_page_entry
231 {
232 struct page_entry common;
233
234 /* The next small page entry, or NULL if this is the last. */
235 struct small_page_entry *next;
236
237 /* If currently marking this zone, a pointer to the mark bits
238 for this page. If we aren't currently marking this zone,
239 this pointer may be stale (pointing to freed memory). */
240 mark_type *mark_bits;
241
242 /* The allocation bitmap. This array extends far enough to have
243 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
244 alloc_type alloc_bits[1];
245 };
246
247 /* Additional data needed for large pages. */
248 struct large_page_entry
249 {
250 struct page_entry common;
251
252 /* The next large page entry, or NULL if this is the last. */
253 struct large_page_entry *next;
254
255 /* The number of bytes allocated, not including the page entry. */
256 size_t bytes;
257
258 /* The previous page in the list, so that we can unlink this one. */
259 struct large_page_entry *prev;
260
261 /* During marking, is this object marked? */
262 bool mark_p;
263 };
264
265 /* A two-level tree is used to look up the page-entry for a given
266 pointer. Two chunks of the pointer's bits are extracted to index
267 the first and second levels of the tree, as follows:
268
269 HOST_PAGE_SIZE_BITS
270 32 | |
271 msb +----------------+----+------+------+ lsb
272 | | |
273 PAGE_L1_BITS |
274 | |
275 PAGE_L2_BITS
276
277 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
278 pages are aligned on system page boundaries. The next most
279 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
280 index values in the lookup table, respectively.
281
282 For 32-bit architectures and the settings below, there are no
283 leftover bits. For architectures with wider pointers, the lookup
284 tree points to a list of pages, which must be scanned to find the
285 correct one. */
286
287 #define PAGE_L1_BITS (8)
288 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
289 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
290 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
291
292 #define LOOKUP_L1(p) \
293 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
294
295 #define LOOKUP_L2(p) \
296 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
297
298 #if HOST_BITS_PER_PTR <= 32
299
300 /* On 32-bit hosts, we use a two level page table, as pictured above. */
301 typedef page_entry **page_table[PAGE_L1_SIZE];
302
303 #else
304
305 /* On 64-bit hosts, we use the same two level page tables plus a linked
306 list that disambiguates the top 32-bits. There will almost always be
307 exactly one entry in the list. */
308 typedef struct page_table_chain
309 {
310 struct page_table_chain *next;
311 size_t high_bits;
312 page_entry **table[PAGE_L1_SIZE];
313 } *page_table;
314
315 #endif
316
317 /* The global variables. */
318 static struct globals
319 {
320 /* The linked list of zones. */
321 struct alloc_zone *zones;
322
323 /* Lookup table for associating allocation pages with object addresses. */
324 page_table lookup;
325
326 /* The system's page size, and related constants. */
327 size_t pagesize;
328 size_t lg_pagesize;
329 size_t page_mask;
330
331 /* The size to allocate for a small page entry. This includes
332 the size of the structure and the size of the allocation
333 bitmap. */
334 size_t small_page_overhead;
335
336 #if defined (HAVE_MMAP_DEV_ZERO)
337 /* A file descriptor open to /dev/zero for reading. */
338 int dev_zero_fd;
339 #endif
340
341 /* Allocate pages in chunks of this size, to throttle calls to memory
342 allocation routines. The first page is used, the rest go onto the
343 free list. */
344 size_t quire_size;
345
346 /* The file descriptor for debugging output. */
347 FILE *debug_file;
348 } G;
349
350 /* A zone allocation structure. There is one of these for every
351 distinct allocation zone. */
352 struct alloc_zone
353 {
354 /* The most recent free chunk is saved here, instead of in the linked
355 free list, to decrease list manipulation. It is most likely that we
356 will want this one. */
357 char *cached_free;
358 size_t cached_free_size;
359
360 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
361 FREE_BIN_DELTA. All other chunks are in slot 0. */
362 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
363
364 /* The highest bin index which might be non-empty. It may turn out
365 to be empty, in which case we have to search downwards. */
366 size_t high_free_bin;
367
368 /* Bytes currently allocated in this zone. */
369 size_t allocated;
370
371 /* Linked list of the small pages in this zone. */
372 struct small_page_entry *pages;
373
374 /* Doubly linked list of large pages in this zone. */
375 struct large_page_entry *large_pages;
376
377 /* If we are currently marking this zone, a pointer to the mark bits. */
378 mark_type *mark_bits;
379
380 /* Name of the zone. */
381 const char *name;
382
383 /* The number of small pages currently allocated in this zone. */
384 size_t n_small_pages;
385
386 /* Bytes allocated at the end of the last collection. */
387 size_t allocated_last_gc;
388
389 /* Total amount of memory mapped. */
390 size_t bytes_mapped;
391
392 /* A cache of free system pages. */
393 struct small_page_entry *free_pages;
394
395 /* Next zone in the linked list of zones. */
396 struct alloc_zone *next_zone;
397
398 /* True if this zone was collected during this collection. */
399 bool was_collected;
400
401 /* True if this zone should be destroyed after the next collection. */
402 bool dead;
403
404 struct
405 {
406 /* Total GC-allocated memory. */
407 unsigned long long total_allocated;
408 /* Total overhead for GC-allocated memory. */
409 unsigned long long total_overhead;
410
411 /* Total allocations and overhead for sizes less than 32, 64 and 128.
412 These sizes are interesting because they are typical cache line
413 sizes. */
414
415 unsigned long long total_allocated_under32;
416 unsigned long long total_overhead_under32;
417
418 unsigned long long total_allocated_under64;
419 unsigned long long total_overhead_under64;
420
421 unsigned long long total_allocated_under128;
422 unsigned long long total_overhead_under128;
423 } stats;
424 } main_zone;
425
426 /* Some default zones. */
427 struct alloc_zone rtl_zone;
428 struct alloc_zone tree_zone;
429 struct alloc_zone tree_id_zone;
430
431 /* The PCH zone does not need a normal zone structure, and it does
432 not live on the linked list of zones. */
433 struct pch_zone
434 {
435 /* The start of the PCH zone. NULL if there is none. */
436 char *page;
437
438 /* The end of the PCH zone. NULL if there is none. */
439 char *end;
440
441 /* The size of the PCH zone. 0 if there is none. */
442 size_t bytes;
443
444 /* The allocation bitmap for the PCH zone. */
445 alloc_type *alloc_bits;
446
447 /* If we are currently marking, the mark bitmap for the PCH zone.
448 When it is first read in, we could avoid marking the PCH,
449 because it will not contain any pointers to GC memory outside
450 of the PCH; however, the PCH is currently mapped as writable,
451 so we must mark it in case new pointers are added. */
452 mark_type *mark_bits;
453 } pch_zone;
454
455 #ifdef USING_MMAP
456 static char *alloc_anon (char *, size_t, struct alloc_zone *);
457 #endif
458 static struct small_page_entry * alloc_small_page (struct alloc_zone *);
459 static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
460 static void free_chunk (char *, size_t, struct alloc_zone *);
461 static void free_small_page (struct small_page_entry *);
462 static void free_large_page (struct large_page_entry *);
463 static void release_pages (struct alloc_zone *);
464 static void sweep_pages (struct alloc_zone *);
465 static bool ggc_collect_1 (struct alloc_zone *, bool);
466 static void new_ggc_zone_1 (struct alloc_zone *, const char *);
467
468 /* Traverse the page table and find the entry for a page.
469 Die (probably) if the object wasn't allocated via GC. */
470
471 static inline page_entry *
472 lookup_page_table_entry (const void *p)
473 {
474 page_entry ***base;
475 size_t L1, L2;
476
477 #if HOST_BITS_PER_PTR <= 32
478 base = &G.lookup[0];
479 #else
480 page_table table = G.lookup;
481 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
482 while (table->high_bits != high_bits)
483 table = table->next;
484 base = &table->table[0];
485 #endif
486
487 /* Extract the level 1 and 2 indices. */
488 L1 = LOOKUP_L1 (p);
489 L2 = LOOKUP_L2 (p);
490
491 return base[L1][L2];
492 }
493
494 /* Traverse the page table and find the entry for a page.
495 Return NULL if the object wasn't allocated via the GC. */
496
497 static inline page_entry *
498 lookup_page_table_if_allocated (const void *p)
499 {
500 page_entry ***base;
501 size_t L1, L2;
502
503 #if HOST_BITS_PER_PTR <= 32
504 base = &G.lookup[0];
505 #else
506 page_table table = G.lookup;
507 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
508 while (1)
509 {
510 if (table == NULL)
511 return NULL;
512 if (table->high_bits == high_bits)
513 break;
514 table = table->next;
515 }
516 base = &table->table[0];
517 #endif
518
519 /* Extract the level 1 and 2 indices. */
520 L1 = LOOKUP_L1 (p);
521 if (! base[L1])
522 return NULL;
523
524 L2 = LOOKUP_L2 (p);
525 if (L2 >= PAGE_L2_SIZE)
526 return NULL;
527 /* We might have a page entry which does not correspond exactly to a
528 system page. */
529 if (base[L1][L2] && (const char *) p < base[L1][L2]->page)
530 return NULL;
531
532 return base[L1][L2];
533 }
534
535 /* Set the page table entry for the page that starts at P. If ENTRY
536 is NULL, clear the entry. */
537
538 static void
539 set_page_table_entry (void *p, page_entry *entry)
540 {
541 page_entry ***base;
542 size_t L1, L2;
543
544 #if HOST_BITS_PER_PTR <= 32
545 base = &G.lookup[0];
546 #else
547 page_table table;
548 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
549 for (table = G.lookup; table; table = table->next)
550 if (table->high_bits == high_bits)
551 goto found;
552
553 /* Not found -- allocate a new table. */
554 table = XCNEW (struct page_table_chain);
555 table->next = G.lookup;
556 table->high_bits = high_bits;
557 G.lookup = table;
558 found:
559 base = &table->table[0];
560 #endif
561
562 /* Extract the level 1 and 2 indices. */
563 L1 = LOOKUP_L1 (p);
564 L2 = LOOKUP_L2 (p);
565
566 if (base[L1] == NULL)
567 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
568
569 base[L1][L2] = entry;
570 }
571
572 /* Find the page table entry associated with OBJECT. */
573
574 static inline struct page_entry *
575 zone_get_object_page (const void *object)
576 {
577 return lookup_page_table_entry (object);
578 }
579
580 /* Find which element of the alloc_bits array OBJECT should be
581 recorded in. */
582 static inline unsigned int
583 zone_get_object_alloc_word (const void *object)
584 {
585 return (((size_t) object & (GGC_PAGE_SIZE - 1))
586 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
587 }
588
589 /* Find which bit of the appropriate word in the alloc_bits array
590 OBJECT should be recorded in. */
591 static inline unsigned int
592 zone_get_object_alloc_bit (const void *object)
593 {
594 return (((size_t) object / BYTES_PER_ALLOC_BIT)
595 % (8 * sizeof (alloc_type)));
596 }
597
598 /* Find which element of the mark_bits array OBJECT should be recorded
599 in. */
600 static inline unsigned int
601 zone_get_object_mark_word (const void *object)
602 {
603 return (((size_t) object & (GGC_PAGE_SIZE - 1))
604 / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
605 }
606
607 /* Find which bit of the appropriate word in the mark_bits array
608 OBJECT should be recorded in. */
609 static inline unsigned int
610 zone_get_object_mark_bit (const void *object)
611 {
612 return (((size_t) object / BYTES_PER_MARK_BIT)
613 % (8 * sizeof (mark_type)));
614 }
615
616 /* Set the allocation bit corresponding to OBJECT in its page's
617 bitmap. Used to split this object from the preceding one. */
618 static inline void
619 zone_set_object_alloc_bit (const void *object)
620 {
621 struct small_page_entry *page
622 = (struct small_page_entry *) zone_get_object_page (object);
623 unsigned int start_word = zone_get_object_alloc_word (object);
624 unsigned int start_bit = zone_get_object_alloc_bit (object);
625
626 page->alloc_bits[start_word] |= 1L << start_bit;
627 }
628
629 /* Clear the allocation bit corresponding to OBJECT in PAGE's
630 bitmap. Used to coalesce this object with the preceding
631 one. */
632 static inline void
633 zone_clear_object_alloc_bit (struct small_page_entry *page,
634 const void *object)
635 {
636 unsigned int start_word = zone_get_object_alloc_word (object);
637 unsigned int start_bit = zone_get_object_alloc_bit (object);
638
639 /* Would xor be quicker? */
640 page->alloc_bits[start_word] &= ~(1L << start_bit);
641 }
642
643 /* Find the size of the object which starts at START_WORD and
644 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
645 Helper function for ggc_get_size and zone_find_object_size. */
646
647 static inline size_t
648 zone_object_size_1 (alloc_type *alloc_bits,
649 size_t start_word, size_t start_bit,
650 size_t max_size)
651 {
652 size_t size;
653 alloc_type alloc_word;
654 int indx;
655
656 /* Load the first word. */
657 alloc_word = alloc_bits[start_word++];
658
659 /* If that was the last bit in this word, we'll want to continue
660 with the next word. Otherwise, handle the rest of this word. */
661 if (start_bit)
662 {
663 indx = alloc_ffs (alloc_word >> start_bit);
664 if (indx)
665 /* indx is 1-based. We started at the bit after the object's
666 start, but we also ended at the bit after the object's end.
667 It cancels out. */
668 return indx * BYTES_PER_ALLOC_BIT;
669
670 /* The extra 1 accounts for the starting unit, before start_bit. */
671 size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
672
673 if (size >= max_size)
674 return max_size;
675
676 alloc_word = alloc_bits[start_word++];
677 }
678 else
679 size = BYTES_PER_ALLOC_BIT;
680
681 while (alloc_word == 0)
682 {
683 size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
684 if (size >= max_size)
685 return max_size;
686 alloc_word = alloc_bits[start_word++];
687 }
688
689 indx = alloc_ffs (alloc_word);
690 return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
691 }
692
693 /* Find the size of OBJECT on small page PAGE. */
694
695 static inline size_t
696 zone_find_object_size (struct small_page_entry *page,
697 const void *object)
698 {
699 const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
700 unsigned int start_word = zone_get_object_alloc_word (object_midptr);
701 unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
702 size_t max_size = (page->common.page + SMALL_PAGE_SIZE
703 - (const char *) object);
704
705 return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
706 max_size);
707 }
708
709 /* highest_bit assumes that alloc_type is 32 bits. */
710 extern char check_alloc_type_size[(sizeof (alloc_type) == 4) ? 1 : -1];
711
712 /* Find the highest set bit in VALUE. Returns the bit number of that
713 bit, using the same values as ffs. */
714 static inline alloc_type
715 highest_bit (alloc_type value)
716 {
717 /* This also assumes that alloc_type is unsigned. */
718 value |= value >> 1;
719 value |= value >> 2;
720 value |= value >> 4;
721 value |= value >> 8;
722 value |= value >> 16;
723 value = value ^ (value >> 1);
724 return alloc_ffs (value);
725 }
726
727 /* Find the offset from the start of an object to P, which may point
728 into the interior of the object. */
729
730 static unsigned long
731 zone_find_object_offset (alloc_type *alloc_bits, size_t start_word,
732 size_t start_bit)
733 {
734 unsigned int offset_in_bits;
735 alloc_type alloc_word = alloc_bits[start_word];
736
737 /* Mask off any bits after the initial bit, but make sure to include
738 the initial bit in the result. Note that START_BIT is
739 0-based. */
740 if (start_bit < 8 * sizeof (alloc_type) - 1)
741 alloc_word &= (1 << (start_bit + 1)) - 1;
742 offset_in_bits = start_bit;
743
744 /* Search for the start of the object. */
745 while (alloc_word == 0 && start_word > 0)
746 {
747 alloc_word = alloc_bits[--start_word];
748 offset_in_bits += 8 * sizeof (alloc_type);
749 }
750 /* We must always find a set bit. */
751 gcc_assert (alloc_word != 0);
752 /* Note that the result of highest_bit is 1-based. */
753 offset_in_bits -= highest_bit (alloc_word) - 1;
754
755 return BYTES_PER_ALLOC_BIT * offset_in_bits;
756 }
757
758 /* Allocate the mark bits for every zone, and set the pointers on each
759 page. */
760 static void
761 zone_allocate_marks (void)
762 {
763 struct alloc_zone *zone;
764
765 for (zone = G.zones; zone; zone = zone->next_zone)
766 {
767 struct small_page_entry *page;
768 mark_type *cur_marks;
769 size_t mark_words, mark_words_per_page;
770 #ifdef ENABLE_CHECKING
771 size_t n = 0;
772 #endif
773
774 mark_words_per_page
775 = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
776 mark_words = zone->n_small_pages * mark_words_per_page;
777 zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
778 mark_words);
779 cur_marks = zone->mark_bits;
780 for (page = zone->pages; page; page = page->next)
781 {
782 page->mark_bits = cur_marks;
783 cur_marks += mark_words_per_page;
784 #ifdef ENABLE_CHECKING
785 n++;
786 #endif
787 }
788 gcc_checking_assert (n == zone->n_small_pages);
789 }
790
791 /* We don't collect the PCH zone, but we do have to mark it
792 (for now). */
793 if (pch_zone.bytes)
794 pch_zone.mark_bits
795 = (mark_type *) xcalloc (sizeof (mark_type),
796 CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
797 }
798
799 /* After marking and sweeping, release the memory used for mark bits. */
800 static void
801 zone_free_marks (void)
802 {
803 struct alloc_zone *zone;
804
805 for (zone = G.zones; zone; zone = zone->next_zone)
806 if (zone->mark_bits)
807 {
808 free (zone->mark_bits);
809 zone->mark_bits = NULL;
810 }
811
812 if (pch_zone.bytes)
813 {
814 free (pch_zone.mark_bits);
815 pch_zone.mark_bits = NULL;
816 }
817 }
818
819 #ifdef USING_MMAP
820 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
821 (if non-null). The ifdef structure here is intended to cause a
822 compile error unless exactly one of the HAVE_* is defined. */
823
824 static inline char *
825 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
826 {
827 #ifdef HAVE_MMAP_ANON
828 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
829 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
830 #endif
831 #ifdef HAVE_MMAP_DEV_ZERO
832 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
833 MAP_PRIVATE, G.dev_zero_fd, 0);
834 #endif
835
836 if (page == (char *) MAP_FAILED)
837 {
838 perror ("virtual memory exhausted");
839 exit (FATAL_EXIT_CODE);
840 }
841
842 /* Remember that we allocated this memory. */
843 zone->bytes_mapped += size;
844
845 /* Pretend we don't have access to the allocated pages. We'll enable
846 access to smaller pieces of the area in ggc_internal_alloc. Discard the
847 handle to avoid handle leak. */
848 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
849
850 return page;
851 }
852 #endif
853
854 /* Allocate a new page for allocating small objects in ZONE, and
855 return an entry for it. */
856
857 static struct small_page_entry *
858 alloc_small_page (struct alloc_zone *zone)
859 {
860 struct small_page_entry *entry;
861
862 /* Check the list of free pages for one we can use. */
863 entry = zone->free_pages;
864 if (entry != NULL)
865 {
866 /* Recycle the allocated memory from this page ... */
867 zone->free_pages = entry->next;
868 }
869 else
870 {
871 /* We want just one page. Allocate a bunch of them and put the
872 extras on the freelist. (Can only do this optimization with
873 mmap for backing store.) */
874 struct small_page_entry *e, *f = zone->free_pages;
875 int i;
876 char *page;
877
878 page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
879
880 /* This loop counts down so that the chain will be in ascending
881 memory order. */
882 for (i = G.quire_size - 1; i >= 1; i--)
883 {
884 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
885 e->common.page = page + (i << GGC_PAGE_SHIFT);
886 e->common.zone = zone;
887 e->next = f;
888 f = e;
889 set_page_table_entry (e->common.page, &e->common);
890 }
891
892 zone->free_pages = f;
893
894 entry = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
895 entry->common.page = page;
896 entry->common.zone = zone;
897 set_page_table_entry (page, &entry->common);
898 }
899
900 zone->n_small_pages++;
901
902 if (GGC_DEBUG_LEVEL >= 2)
903 fprintf (G.debug_file,
904 "Allocating %s page at %p, data %p-%p\n",
905 entry->common.zone->name, (PTR) entry, entry->common.page,
906 entry->common.page + SMALL_PAGE_SIZE - 1);
907
908 return entry;
909 }
910
911 /* Allocate a large page of size SIZE in ZONE. */
912
913 static struct large_page_entry *
914 alloc_large_page (size_t size, struct alloc_zone *zone)
915 {
916 struct large_page_entry *entry;
917 char *page;
918 size_t needed_size;
919
920 needed_size = size + sizeof (struct large_page_entry);
921 page = XNEWVAR (char, needed_size);
922
923 entry = (struct large_page_entry *) page;
924
925 entry->next = NULL;
926 entry->common.page = page + sizeof (struct large_page_entry);
927 entry->common.large_p = true;
928 entry->common.pch_p = false;
929 entry->common.zone = zone;
930 entry->common.survived = 0;
931 entry->mark_p = false;
932 entry->bytes = size;
933 entry->prev = NULL;
934
935 set_page_table_entry (entry->common.page, &entry->common);
936
937 if (GGC_DEBUG_LEVEL >= 2)
938 fprintf (G.debug_file,
939 "Allocating %s large page at %p, data %p-%p\n",
940 entry->common.zone->name, (PTR) entry, entry->common.page,
941 entry->common.page + SMALL_PAGE_SIZE - 1);
942
943 return entry;
944 }
945
946
947 /* For a page that is no longer needed, put it on the free page list. */
948
949 static inline void
950 free_small_page (struct small_page_entry *entry)
951 {
952 if (GGC_DEBUG_LEVEL >= 2)
953 fprintf (G.debug_file,
954 "Deallocating %s page at %p, data %p-%p\n",
955 entry->common.zone->name, (PTR) entry,
956 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
957
958 gcc_assert (!entry->common.large_p);
959
960 /* Mark the page as inaccessible. Discard the handle to
961 avoid handle leak. */
962 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page,
963 SMALL_PAGE_SIZE));
964
965 entry->next = entry->common.zone->free_pages;
966 entry->common.zone->free_pages = entry;
967 entry->common.zone->n_small_pages--;
968 }
969
970 /* Release a large page that is no longer needed. */
971
972 static inline void
973 free_large_page (struct large_page_entry *entry)
974 {
975 if (GGC_DEBUG_LEVEL >= 2)
976 fprintf (G.debug_file,
977 "Deallocating %s page at %p, data %p-%p\n",
978 entry->common.zone->name, (PTR) entry,
979 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
980
981 gcc_assert (entry->common.large_p);
982
983 set_page_table_entry (entry->common.page, NULL);
984 free (entry);
985 }
986
987 /* Release the free page cache to the system. */
988
989 static void
990 release_pages (struct alloc_zone *zone)
991 {
992 #ifdef USING_MMAP
993 struct small_page_entry *p, *next;
994 char *start;
995 size_t len;
996
997 /* Gather up adjacent pages so they are unmapped together. */
998 p = zone->free_pages;
999
1000 while (p)
1001 {
1002 start = p->common.page;
1003 next = p->next;
1004 len = SMALL_PAGE_SIZE;
1005 set_page_table_entry (p->common.page, NULL);
1006 p = next;
1007
1008 while (p && p->common.page == start + len)
1009 {
1010 next = p->next;
1011 len += SMALL_PAGE_SIZE;
1012 set_page_table_entry (p->common.page, NULL);
1013 p = next;
1014 }
1015
1016 munmap (start, len);
1017 zone->bytes_mapped -= len;
1018 }
1019
1020 zone->free_pages = NULL;
1021 #endif
1022 }
1023
1024 /* Place the block at PTR of size SIZE on the free list for ZONE. */
1025
1026 static inline void
1027 free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
1028 {
1029 struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
1030 size_t bin = 0;
1031
1032 bin = SIZE_BIN_DOWN (size);
1033 gcc_assert (bin != 0);
1034 if (bin > NUM_FREE_BINS)
1035 {
1036 bin = 0;
1037 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1038 sizeof (struct
1039 alloc_chunk)));
1040 chunk->size = size;
1041 chunk->next_free = zone->free_chunks[bin];
1042 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1043 + sizeof (struct
1044 alloc_chunk),
1045 size
1046 - sizeof (struct
1047 alloc_chunk)));
1048 }
1049 else
1050 {
1051 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1052 sizeof (struct
1053 alloc_chunk *)));
1054 chunk->next_free = zone->free_chunks[bin];
1055 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1056 + sizeof (struct
1057 alloc_chunk *),
1058 size
1059 - sizeof (struct
1060 alloc_chunk *)));
1061 }
1062
1063 zone->free_chunks[bin] = chunk;
1064 if (bin > zone->high_free_bin)
1065 zone->high_free_bin = bin;
1066 if (GGC_DEBUG_LEVEL >= 3)
1067 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
1068 }
1069
1070 /* For a given size of memory requested for allocation, return the
1071 actual size that is going to be allocated. */
1072
1073 size_t
1074 ggc_round_alloc_size (size_t requested_size)
1075 {
1076 size_t size;
1077
1078 /* Make sure that zero-sized allocations get a unique and freeable
1079 pointer. */
1080 if (requested_size == 0)
1081 size = MAX_ALIGNMENT;
1082 else
1083 size = (requested_size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1084
1085 return size;
1086 }
1087
1088 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
1089
1090 void *
1091 ggc_internal_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1092 MEM_STAT_DECL)
1093 {
1094 size_t bin;
1095 size_t csize;
1096 struct small_page_entry *entry;
1097 struct alloc_chunk *chunk, **pp;
1098 void *result;
1099 size_t size = ggc_round_alloc_size (orig_size);
1100
1101 /* Try to allocate the object from several different sources. Each
1102 of these cases is responsible for setting RESULT and SIZE to
1103 describe the allocated block, before jumping to FOUND. If a
1104 chunk is split, the allocate bit for the new chunk should also be
1105 set.
1106
1107 Large objects are handled specially. However, they'll just fail
1108 the next couple of conditions, so we can wait to check for them
1109 below. The large object case is relatively rare (< 1%), so this
1110 is a win. */
1111
1112 /* First try to split the last chunk we allocated. For best
1113 fragmentation behavior it would be better to look for a
1114 free bin of the appropriate size for a small object. However,
1115 we're unlikely (1% - 7%) to find one, and this gives better
1116 locality behavior anyway. This case handles the lion's share
1117 of all calls to this function. */
1118 if (size <= zone->cached_free_size)
1119 {
1120 result = zone->cached_free;
1121
1122 zone->cached_free_size -= size;
1123 if (zone->cached_free_size)
1124 {
1125 zone->cached_free += size;
1126 zone_set_object_alloc_bit (zone->cached_free);
1127 }
1128
1129 goto found;
1130 }
1131
1132 /* Next, try to find a free bin of the exactly correct size. */
1133
1134 /* We want to round SIZE up, rather than down, but we know it's
1135 already aligned to at least FREE_BIN_DELTA, so we can just
1136 shift. */
1137 bin = SIZE_BIN_DOWN (size);
1138
1139 if (bin <= NUM_FREE_BINS
1140 && (chunk = zone->free_chunks[bin]) != NULL)
1141 {
1142 /* We have a chunk of the right size. Pull it off the free list
1143 and use it. */
1144
1145 zone->free_chunks[bin] = chunk->next_free;
1146
1147 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1148 == FREE_BIN_DELTA. */
1149 result = chunk;
1150
1151 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1152 may now be wrong, if this was the last chunk in the high bin.
1153 Rather than fixing it up now, wait until we need to search
1154 the free bins. */
1155
1156 goto found;
1157 }
1158
1159 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1160 to split. We can find one in the too-big bin, or in the largest
1161 sized bin with a chunk in it. Try the largest normal-sized bin
1162 first. */
1163
1164 if (zone->high_free_bin > bin)
1165 {
1166 /* Find the highest numbered free bin. It will be at or below
1167 the watermark. */
1168 while (zone->high_free_bin > bin
1169 && zone->free_chunks[zone->high_free_bin] == NULL)
1170 zone->high_free_bin--;
1171
1172 if (zone->high_free_bin > bin)
1173 {
1174 size_t tbin = zone->high_free_bin;
1175 chunk = zone->free_chunks[tbin];
1176
1177 /* Remove the chunk from its previous bin. */
1178 zone->free_chunks[tbin] = chunk->next_free;
1179
1180 result = (char *) chunk;
1181
1182 /* Save the rest of the chunk for future allocation. */
1183 if (zone->cached_free_size)
1184 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1185
1186 chunk = (struct alloc_chunk *) ((char *) result + size);
1187 zone->cached_free = (char *) chunk;
1188 zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1189
1190 /* Mark the new free chunk as an object, so that we can
1191 find the size of the newly allocated object. */
1192 zone_set_object_alloc_bit (chunk);
1193
1194 /* HIGH_FREE_BIN may now be wrong, if this was the last
1195 chunk in the high bin. Rather than fixing it up now,
1196 wait until we need to search the free bins. */
1197
1198 goto found;
1199 }
1200 }
1201
1202 /* Failing that, look through the "other" bucket for a chunk
1203 that is large enough. */
1204 pp = &(zone->free_chunks[0]);
1205 chunk = *pp;
1206 while (chunk && chunk->size < size)
1207 {
1208 pp = &chunk->next_free;
1209 chunk = *pp;
1210 }
1211
1212 if (chunk)
1213 {
1214 /* Remove the chunk from its previous bin. */
1215 *pp = chunk->next_free;
1216
1217 result = (char *) chunk;
1218
1219 /* Save the rest of the chunk for future allocation, if there's any
1220 left over. */
1221 csize = chunk->size;
1222 if (csize > size)
1223 {
1224 if (zone->cached_free_size)
1225 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1226
1227 chunk = (struct alloc_chunk *) ((char *) result + size);
1228 zone->cached_free = (char *) chunk;
1229 zone->cached_free_size = csize - size;
1230
1231 /* Mark the new free chunk as an object. */
1232 zone_set_object_alloc_bit (chunk);
1233 }
1234
1235 goto found;
1236 }
1237
1238 /* Handle large allocations. We could choose any threshold between
1239 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1240 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1241 be guaranteed to have a unique entry in the lookup table. Large
1242 allocations will always fall through to here. */
1243 if (size > GGC_PAGE_SIZE)
1244 {
1245 struct large_page_entry *entry = alloc_large_page (size, zone);
1246
1247 entry->common.survived = 0;
1248
1249 entry->next = zone->large_pages;
1250 if (zone->large_pages)
1251 zone->large_pages->prev = entry;
1252 zone->large_pages = entry;
1253
1254 result = entry->common.page;
1255
1256 goto found;
1257 }
1258
1259 /* Failing everything above, allocate a new small page. */
1260
1261 entry = alloc_small_page (zone);
1262 entry->next = zone->pages;
1263 zone->pages = entry;
1264
1265 /* Mark the first chunk in the new page. */
1266 entry->alloc_bits[0] = 1;
1267
1268 result = entry->common.page;
1269 if (size < SMALL_PAGE_SIZE)
1270 {
1271 if (zone->cached_free_size)
1272 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1273
1274 zone->cached_free = (char *) result + size;
1275 zone->cached_free_size = SMALL_PAGE_SIZE - size;
1276
1277 /* Mark the new free chunk as an object. */
1278 zone_set_object_alloc_bit (zone->cached_free);
1279 }
1280
1281 found:
1282
1283 /* We could save TYPE in the chunk, but we don't use that for
1284 anything yet. If we wanted to, we could do it by adding it
1285 either before the beginning of the chunk or after its end,
1286 and adjusting the size and pointer appropriately. */
1287
1288 /* We'll probably write to this after we return. */
1289 prefetchw (result);
1290
1291 #ifdef ENABLE_GC_CHECKING
1292 /* `Poison' the entire allocated object. */
1293 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1294 memset (result, 0xaf, size);
1295 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size,
1296 size - orig_size));
1297 #endif
1298
1299 /* Tell Valgrind that the memory is there, but its content isn't
1300 defined. The bytes at the end of the object are still marked
1301 unaccessible. */
1302 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size));
1303
1304 /* Keep track of how many bytes are being allocated. This
1305 information is used in deciding when to collect. */
1306 zone->allocated += size;
1307
1308 timevar_ggc_mem_total += size;
1309
1310 if (GATHER_STATISTICS)
1311 ggc_record_overhead (orig_size, size - orig_size, result FINAL_PASS_MEM_STAT);
1312
1313 {
1314 size_t object_size = size;
1315 size_t overhead = object_size - orig_size;
1316
1317 zone->stats.total_overhead += overhead;
1318 zone->stats.total_allocated += object_size;
1319
1320 if (orig_size <= 32)
1321 {
1322 zone->stats.total_overhead_under32 += overhead;
1323 zone->stats.total_allocated_under32 += object_size;
1324 }
1325 if (orig_size <= 64)
1326 {
1327 zone->stats.total_overhead_under64 += overhead;
1328 zone->stats.total_allocated_under64 += object_size;
1329 }
1330 if (orig_size <= 128)
1331 {
1332 zone->stats.total_overhead_under128 += overhead;
1333 zone->stats.total_allocated_under128 += object_size;
1334 }
1335 }
1336 #endif
1337
1338 if (GGC_DEBUG_LEVEL >= 3)
1339 fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1340 (unsigned long) size, result);
1341
1342 return result;
1343 }
1344
1345 #define ggc_internal_alloc_zone_pass_stat(s,z) \
1346 ggc_internal_alloc_zone_stat (s,z PASS_MEM_STAT)
1347
1348 void *
1349 ggc_internal_cleared_alloc_zone_stat (size_t orig_size,
1350 struct alloc_zone *zone MEM_STAT_DECL)
1351 {
1352 void * result = ggc_internal_alloc_zone_pass_stat (orig_size, zone);
1353 memset (result, 0, orig_size);
1354 return result;
1355 }
1356
1357
1358 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1359 for that type. */
1360
1361 void *
1362 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1363 MEM_STAT_DECL)
1364 {
1365 switch (gte)
1366 {
1367 case gt_ggc_e_14lang_tree_node:
1368 return ggc_internal_alloc_zone_pass_stat (size, &tree_zone);
1369
1370 case gt_ggc_e_7rtx_def:
1371 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone);
1372
1373 case gt_ggc_e_9rtvec_def:
1374 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone);
1375
1376 default:
1377 return ggc_internal_alloc_zone_pass_stat (size, &main_zone);
1378 }
1379 }
1380
1381 /* Normal GC allocation simply allocates into the main zone. */
1382
1383 void *
1384 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1385 {
1386 return ggc_internal_alloc_zone_pass_stat (size, &main_zone);
1387 }
1388
1389 /* Poison the chunk. */
1390 #ifdef ENABLE_GC_CHECKING
1391 #define poison_region(PTR, SIZE) \
1392 do { \
1393 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED ((PTR), (SIZE))); \
1394 memset ((PTR), 0xa5, (SIZE)); \
1395 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((PTR), (SIZE))); \
1396 } while (0)
1397 #else
1398 #define poison_region(PTR, SIZE)
1399 #endif
1400
1401 /* Free the object at P. */
1402
1403 void
1404 ggc_free (void *p)
1405 {
1406 struct page_entry *page;
1407
1408 if (GATHER_STATISTICS)
1409 ggc_free_overhead (p);
1410
1411 poison_region (p, ggc_get_size (p));
1412
1413 page = zone_get_object_page (p);
1414
1415 if (page->large_p)
1416 {
1417 struct large_page_entry *large_page
1418 = (struct large_page_entry *) page;
1419
1420 /* Remove the page from the linked list. */
1421 if (large_page->prev)
1422 large_page->prev->next = large_page->next;
1423 else
1424 {
1425 gcc_assert (large_page->common.zone->large_pages == large_page);
1426 large_page->common.zone->large_pages = large_page->next;
1427 }
1428 if (large_page->next)
1429 large_page->next->prev = large_page->prev;
1430
1431 large_page->common.zone->allocated -= large_page->bytes;
1432
1433 /* Release the memory associated with this object. */
1434 free_large_page (large_page);
1435 }
1436 else if (page->pch_p)
1437 /* Don't do anything. We won't allocate a new object from the
1438 PCH zone so there's no point in releasing anything. */
1439 ;
1440 else
1441 {
1442 size_t size = ggc_get_size (p);
1443
1444 page->zone->allocated -= size;
1445
1446 /* Add the chunk to the free list. We don't bother with coalescing,
1447 since we are likely to want a chunk of this size again. */
1448 free_chunk ((char *)p, size, page->zone);
1449 }
1450 }
1451
1452 /* Mark function for strings. */
1453
1454 void
1455 gt_ggc_m_S (const void *p)
1456 {
1457 page_entry *entry;
1458 unsigned long offset;
1459
1460 if (!p)
1461 return;
1462
1463 /* Look up the page on which the object is alloced. . */
1464 entry = lookup_page_table_if_allocated (p);
1465 if (! entry)
1466 return;
1467
1468 if (entry->pch_p)
1469 {
1470 size_t alloc_word, alloc_bit, t;
1471 t = ((const char *) p - pch_zone.page) / BYTES_PER_ALLOC_BIT;
1472 alloc_word = t / (8 * sizeof (alloc_type));
1473 alloc_bit = t % (8 * sizeof (alloc_type));
1474 offset = zone_find_object_offset (pch_zone.alloc_bits, alloc_word,
1475 alloc_bit);
1476 }
1477 else if (entry->large_p)
1478 {
1479 struct large_page_entry *le = (struct large_page_entry *) entry;
1480 offset = ((const char *) p) - entry->page;
1481 gcc_assert (offset < le->bytes);
1482 }
1483 else
1484 {
1485 struct small_page_entry *se = (struct small_page_entry *) entry;
1486 unsigned int start_word = zone_get_object_alloc_word (p);
1487 unsigned int start_bit = zone_get_object_alloc_bit (p);
1488 offset = zone_find_object_offset (se->alloc_bits, start_word, start_bit);
1489
1490 /* On some platforms a char* will not necessarily line up on an
1491 allocation boundary, so we have to update the offset to
1492 account for the leftover bytes. */
1493 offset += (size_t) p % BYTES_PER_ALLOC_BIT;
1494 }
1495
1496 if (offset)
1497 {
1498 /* Here we've seen a char* which does not point to the beginning
1499 of an allocated object. We assume it points to the middle of
1500 a STRING_CST. */
1501 gcc_assert (offset == offsetof (struct tree_string, str));
1502 p = ((const char *) p) - offset;
1503 gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p));
1504 return;
1505 }
1506
1507 /* Inefficient, but also unlikely to matter. */
1508 ggc_set_mark (p);
1509 }
1510
1511 /* If P is not marked, mark it and return false. Otherwise return true.
1512 P must have been allocated by the GC allocator; it mustn't point to
1513 static objects, stack variables, or memory allocated with malloc. */
1514
1515 int
1516 ggc_set_mark (const void *p)
1517 {
1518 struct page_entry *page;
1519 const char *ptr = (const char *) p;
1520
1521 page = zone_get_object_page (p);
1522
1523 if (page->pch_p)
1524 {
1525 size_t mark_word, mark_bit, offset;
1526 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1527 mark_word = offset / (8 * sizeof (mark_type));
1528 mark_bit = offset % (8 * sizeof (mark_type));
1529
1530 if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1531 return 1;
1532 pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1533 }
1534 else if (page->large_p)
1535 {
1536 struct large_page_entry *large_page
1537 = (struct large_page_entry *) page;
1538
1539 if (large_page->mark_p)
1540 return 1;
1541 large_page->mark_p = true;
1542 }
1543 else
1544 {
1545 struct small_page_entry *small_page
1546 = (struct small_page_entry *) page;
1547
1548 if (small_page->mark_bits[zone_get_object_mark_word (p)]
1549 & (1 << zone_get_object_mark_bit (p)))
1550 return 1;
1551 small_page->mark_bits[zone_get_object_mark_word (p)]
1552 |= (1 << zone_get_object_mark_bit (p));
1553 }
1554
1555 if (GGC_DEBUG_LEVEL >= 4)
1556 fprintf (G.debug_file, "Marking %p\n", p);
1557
1558 return 0;
1559 }
1560
1561 /* Return 1 if P has been marked, zero otherwise.
1562 P must have been allocated by the GC allocator; it mustn't point to
1563 static objects, stack variables, or memory allocated with malloc. */
1564
1565 int
1566 ggc_marked_p (const void *p)
1567 {
1568 struct page_entry *page;
1569 const char *ptr = (const char *) p;
1570
1571 page = zone_get_object_page (p);
1572
1573 if (page->pch_p)
1574 {
1575 size_t mark_word, mark_bit, offset;
1576 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1577 mark_word = offset / (8 * sizeof (mark_type));
1578 mark_bit = offset % (8 * sizeof (mark_type));
1579
1580 return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1581 }
1582
1583 if (page->large_p)
1584 {
1585 struct large_page_entry *large_page
1586 = (struct large_page_entry *) page;
1587
1588 return large_page->mark_p;
1589 }
1590 else
1591 {
1592 struct small_page_entry *small_page
1593 = (struct small_page_entry *) page;
1594
1595 return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1596 & (1 << zone_get_object_mark_bit (p)));
1597 }
1598 }
1599
1600 /* Return the size of the gc-able object P. */
1601
1602 size_t
1603 ggc_get_size (const void *p)
1604 {
1605 struct page_entry *page;
1606 const char *ptr = (const char *) p;
1607
1608 page = zone_get_object_page (p);
1609
1610 if (page->pch_p)
1611 {
1612 size_t alloc_word, alloc_bit, offset, max_size;
1613 offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1614 alloc_word = offset / (8 * sizeof (alloc_type));
1615 alloc_bit = offset % (8 * sizeof (alloc_type));
1616 max_size = pch_zone.bytes - (ptr - pch_zone.page);
1617 return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1618 max_size);
1619 }
1620
1621 if (page->large_p)
1622 return ((struct large_page_entry *)page)->bytes;
1623 else
1624 return zone_find_object_size ((struct small_page_entry *) page, p);
1625 }
1626
1627 /* Initialize the ggc-zone-mmap allocator. */
1628 void
1629 init_ggc (void)
1630 {
1631 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1632 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1633 the current assumptions to hold. */
1634
1635 gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1636
1637 /* Set up the main zone by hand. */
1638 main_zone.name = "Main zone";
1639 G.zones = &main_zone;
1640
1641 /* Allocate the default zones. */
1642 new_ggc_zone_1 (&rtl_zone, "RTL zone");
1643 new_ggc_zone_1 (&tree_zone, "Tree zone");
1644 new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
1645
1646 G.pagesize = getpagesize();
1647 G.lg_pagesize = exact_log2 (G.pagesize);
1648 G.page_mask = ~(G.pagesize - 1);
1649
1650 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1651 gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1652
1653 /* Allocate 16 system pages at a time. */
1654 G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1655
1656 /* Calculate the size of the allocation bitmap and other overhead. */
1657 /* Right now we allocate bits for the page header and bitmap. These
1658 are wasted, but a little tricky to eliminate. */
1659 G.small_page_overhead
1660 = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1661 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1662
1663 #ifdef HAVE_MMAP_DEV_ZERO
1664 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1665 gcc_assert (G.dev_zero_fd != -1);
1666 #endif
1667
1668 #if 0
1669 G.debug_file = fopen ("ggc-mmap.debug", "w");
1670 setlinebuf (G.debug_file);
1671 #else
1672 G.debug_file = stdout;
1673 #endif
1674
1675 #ifdef USING_MMAP
1676 /* StunOS has an amazing off-by-one error for the first mmap allocation
1677 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1678 believe, is an unaligned page allocation, which would cause us to
1679 hork badly if we tried to use it. */
1680 {
1681 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1682 struct small_page_entry *e;
1683 if ((size_t)p & (G.pagesize - 1))
1684 {
1685 /* How losing. Discard this one and try another. If we still
1686 can't get something useful, give up. */
1687
1688 p = alloc_anon (NULL, G.pagesize, &main_zone);
1689 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1690 }
1691
1692 if (GGC_PAGE_SIZE == G.pagesize)
1693 {
1694 /* We have a good page, might as well hold onto it... */
1695 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
1696 e->common.page = p;
1697 e->common.zone = &main_zone;
1698 e->next = main_zone.free_pages;
1699 set_page_table_entry (e->common.page, &e->common);
1700 main_zone.free_pages = e;
1701 }
1702 else
1703 {
1704 munmap (p, G.pagesize);
1705 }
1706 }
1707 #endif
1708 }
1709
1710 /* Start a new GGC zone. */
1711
1712 static void
1713 new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
1714 {
1715 new_zone->name = name;
1716 new_zone->next_zone = G.zones->next_zone;
1717 G.zones->next_zone = new_zone;
1718 }
1719
1720 /* Free all empty pages and objects within a page for a given zone */
1721
1722 static void
1723 sweep_pages (struct alloc_zone *zone)
1724 {
1725 struct large_page_entry **lpp, *lp, *lnext;
1726 struct small_page_entry **spp, *sp, *snext;
1727 char *last_free;
1728 size_t allocated = 0;
1729 bool nomarksinpage;
1730
1731 /* First, reset the free_chunks lists, since we are going to
1732 re-free free chunks in hopes of coalescing them into large chunks. */
1733 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1734 zone->high_free_bin = 0;
1735 zone->cached_free = NULL;
1736 zone->cached_free_size = 0;
1737
1738 /* Large pages are all or none affairs. Either they are completely
1739 empty, or they are completely full. */
1740 lpp = &zone->large_pages;
1741 for (lp = zone->large_pages; lp != NULL; lp = lnext)
1742 {
1743 gcc_assert (lp->common.large_p);
1744
1745 lnext = lp->next;
1746
1747 /* This page has now survived another collection. */
1748 lp->common.survived++;
1749
1750 if (lp->mark_p)
1751 {
1752 lp->mark_p = false;
1753 allocated += lp->bytes;
1754 lpp = &lp->next;
1755 }
1756 else
1757 {
1758 *lpp = lnext;
1759 #ifdef ENABLE_GC_CHECKING
1760 /* Poison the page. */
1761 memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
1762 #endif
1763 if (lp->prev)
1764 lp->prev->next = lp->next;
1765 if (lp->next)
1766 lp->next->prev = lp->prev;
1767 free_large_page (lp);
1768 }
1769 }
1770
1771 spp = &zone->pages;
1772 for (sp = zone->pages; sp != NULL; sp = snext)
1773 {
1774 char *object, *last_object;
1775 char *end;
1776 alloc_type *alloc_word_p;
1777 mark_type *mark_word_p;
1778
1779 gcc_assert (!sp->common.large_p);
1780
1781 snext = sp->next;
1782
1783 /* This page has now survived another collection. */
1784 sp->common.survived++;
1785
1786 /* Step through all chunks, consolidate those that are free and
1787 insert them into the free lists. Note that consolidation
1788 slows down collection slightly. */
1789
1790 last_object = object = sp->common.page;
1791 end = sp->common.page + SMALL_PAGE_SIZE;
1792 last_free = NULL;
1793 nomarksinpage = true;
1794 mark_word_p = sp->mark_bits;
1795 alloc_word_p = sp->alloc_bits;
1796
1797 gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1798
1799 object = sp->common.page;
1800 do
1801 {
1802 unsigned int i, n;
1803 alloc_type alloc_word;
1804 mark_type mark_word;
1805
1806 alloc_word = *alloc_word_p++;
1807 mark_word = *mark_word_p++;
1808
1809 if (mark_word)
1810 nomarksinpage = false;
1811
1812 /* There ought to be some way to do this without looping... */
1813 i = 0;
1814 while ((n = alloc_ffs (alloc_word)) != 0)
1815 {
1816 /* Extend the current state for n - 1 bits. We can't
1817 shift alloc_word by n, even though it isn't used in the
1818 loop, in case only the highest bit was set. */
1819 alloc_word >>= n - 1;
1820 mark_word >>= n - 1;
1821 object += BYTES_PER_MARK_BIT * (n - 1);
1822
1823 if (mark_word & 1)
1824 {
1825 if (last_free)
1826 {
1827 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1828 object
1829 - last_free));
1830 poison_region (last_free, object - last_free);
1831 free_chunk (last_free, object - last_free, zone);
1832 last_free = NULL;
1833 }
1834 else
1835 allocated += object - last_object;
1836 last_object = object;
1837 }
1838 else
1839 {
1840 if (last_free == NULL)
1841 {
1842 last_free = object;
1843 allocated += object - last_object;
1844 }
1845 else
1846 zone_clear_object_alloc_bit (sp, object);
1847 }
1848
1849 /* Shift to just after the alloc bit we handled. */
1850 alloc_word >>= 1;
1851 mark_word >>= 1;
1852 object += BYTES_PER_MARK_BIT;
1853
1854 i += n;
1855 }
1856
1857 object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
1858 }
1859 while (object < end);
1860
1861 if (nomarksinpage)
1862 {
1863 *spp = snext;
1864 #ifdef ENABLE_GC_CHECKING
1865 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page,
1866 SMALL_PAGE_SIZE));
1867 /* Poison the page. */
1868 memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
1869 #endif
1870 free_small_page (sp);
1871 continue;
1872 }
1873 else if (last_free)
1874 {
1875 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1876 object - last_free));
1877 poison_region (last_free, object - last_free);
1878 free_chunk (last_free, object - last_free, zone);
1879 }
1880 else
1881 allocated += object - last_object;
1882
1883 spp = &sp->next;
1884 }
1885
1886 zone->allocated = allocated;
1887 }
1888
1889 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1890 is true if we need to mark before sweeping, false if some other
1891 zone collection has already performed marking for us. Returns true
1892 if we collected, false otherwise. */
1893
1894 static bool
1895 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1896 {
1897 #if 0
1898 /* */
1899 {
1900 int i;
1901 for (i = 0; i < NUM_FREE_BINS + 1; i++)
1902 {
1903 struct alloc_chunk *chunk;
1904 int n, tot;
1905
1906 n = 0;
1907 tot = 0;
1908 chunk = zone->free_chunks[i];
1909 while (chunk)
1910 {
1911 n++;
1912 tot += chunk->size;
1913 chunk = chunk->next_free;
1914 }
1915 fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1916 i, n, tot);
1917 }
1918 }
1919 /* */
1920 #endif
1921
1922 if (!quiet_flag)
1923 fprintf (stderr, " {%s GC %luk -> ",
1924 zone->name, (unsigned long) zone->allocated / 1024);
1925
1926 /* Zero the total allocated bytes. This will be recalculated in the
1927 sweep phase. */
1928 zone->allocated = 0;
1929
1930 /* Release the pages we freed the last time we collected, but didn't
1931 reuse in the interim. */
1932 release_pages (zone);
1933
1934 if (need_marking)
1935 {
1936 zone_allocate_marks ();
1937 ggc_mark_roots ();
1938 if (GATHER_STATISTICS)
1939 ggc_prune_overhead_list ();
1940 }
1941
1942 sweep_pages (zone);
1943 zone->was_collected = true;
1944 zone->allocated_last_gc = zone->allocated;
1945
1946 if (!quiet_flag)
1947 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1948 return true;
1949 }
1950
1951 /* Calculate the average page survival rate in terms of number of
1952 collections. */
1953
1954 static float
1955 calculate_average_page_survival (struct alloc_zone *zone)
1956 {
1957 float count = 0.0;
1958 float survival = 0.0;
1959 struct small_page_entry *p;
1960 struct large_page_entry *lp;
1961 for (p = zone->pages; p; p = p->next)
1962 {
1963 count += 1.0;
1964 survival += p->common.survived;
1965 }
1966 for (lp = zone->large_pages; lp; lp = lp->next)
1967 {
1968 count += 1.0;
1969 survival += lp->common.survived;
1970 }
1971 return survival/count;
1972 }
1973
1974 /* Top level collection routine. */
1975
1976 void
1977 ggc_collect (void)
1978 {
1979 struct alloc_zone *zone;
1980 bool marked = false;
1981
1982 timevar_push (TV_GC);
1983
1984 if (!ggc_force_collect)
1985 {
1986 float allocated_last_gc = 0, allocated = 0, min_expand;
1987
1988 for (zone = G.zones; zone; zone = zone->next_zone)
1989 {
1990 allocated_last_gc += zone->allocated_last_gc;
1991 allocated += zone->allocated;
1992 }
1993
1994 allocated_last_gc =
1995 MAX (allocated_last_gc,
1996 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1997 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1998
1999 if (allocated < allocated_last_gc + min_expand)
2000 {
2001 timevar_pop (TV_GC);
2002 return;
2003 }
2004 }
2005
2006 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2007
2008 /* Start by possibly collecting the main zone. */
2009 main_zone.was_collected = false;
2010 marked |= ggc_collect_1 (&main_zone, true);
2011
2012 /* In order to keep the number of collections down, we don't
2013 collect other zones unless we are collecting the main zone. This
2014 gives us roughly the same number of collections as we used to
2015 have with the old gc. The number of collection is important
2016 because our main slowdown (according to profiling) is now in
2017 marking. So if we mark twice as often as we used to, we'll be
2018 twice as slow. Hopefully we'll avoid this cost when we mark
2019 zone-at-a-time. */
2020 /* NOTE drow/2004-07-28: We now always collect the main zone, but
2021 keep this code in case the heuristics are further refined. */
2022
2023 if (main_zone.was_collected)
2024 {
2025 struct alloc_zone *zone;
2026
2027 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
2028 {
2029 zone->was_collected = false;
2030 marked |= ggc_collect_1 (zone, !marked);
2031 }
2032 }
2033
2034 /* Print page survival stats, if someone wants them. */
2035 if (GATHER_STATISTICS && GGC_DEBUG_LEVEL >= 2)
2036 {
2037 for (zone = G.zones; zone; zone = zone->next_zone)
2038 {
2039 if (zone->was_collected)
2040 {
2041 float f = calculate_average_page_survival (zone);
2042 printf ("Average page survival in zone `%s' is %f\n",
2043 zone->name, f);
2044 }
2045 }
2046 }
2047
2048 if (marked)
2049 zone_free_marks ();
2050
2051 /* Free dead zones. */
2052 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
2053 {
2054 if (zone->next_zone->dead)
2055 {
2056 struct alloc_zone *dead_zone = zone->next_zone;
2057
2058 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
2059
2060 /* The zone must be empty. */
2061 gcc_assert (!dead_zone->allocated);
2062
2063 /* Unchain the dead zone, release all its pages and free it. */
2064 zone->next_zone = zone->next_zone->next_zone;
2065 release_pages (dead_zone);
2066 free (dead_zone);
2067 }
2068 }
2069
2070 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2071
2072 timevar_pop (TV_GC);
2073 }
2074
2075 /* Print allocation statistics. */
2076 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2077 ? (x) \
2078 : ((x) < 1024*1024*10 \
2079 ? (x) / 1024 \
2080 : (x) / (1024*1024))))
2081 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2082
2083 void
2084 ggc_print_statistics (void)
2085 {
2086 struct alloc_zone *zone;
2087 struct ggc_statistics stats;
2088 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
2089 size_t pte_overhead, i;
2090
2091 /* Clear the statistics. */
2092 memset (&stats, 0, sizeof (stats));
2093
2094 /* Make sure collection will really occur. */
2095 ggc_force_collect = true;
2096
2097 /* Collect and print the statistics common across collectors. */
2098 ggc_print_common_statistics (stderr, &stats);
2099
2100 ggc_force_collect = false;
2101
2102 /* Release free pages so that we will not count the bytes allocated
2103 there as part of the total allocated memory. */
2104 for (zone = G.zones; zone; zone = zone->next_zone)
2105 release_pages (zone);
2106
2107 /* Collect some information about the various sizes of
2108 allocation. */
2109 fprintf (stderr,
2110 "Memory still allocated at the end of the compilation process\n");
2111
2112 fprintf (stderr, "%20s %10s %10s %10s\n",
2113 "Zone", "Allocated", "Used", "Overhead");
2114 for (zone = G.zones; zone; zone = zone->next_zone)
2115 {
2116 struct large_page_entry *large_page;
2117 size_t overhead, allocated, in_use;
2118
2119 /* Skip empty zones. */
2120 if (!zone->pages && !zone->large_pages)
2121 continue;
2122
2123 allocated = in_use = 0;
2124
2125 overhead = sizeof (struct alloc_zone);
2126
2127 for (large_page = zone->large_pages; large_page != NULL;
2128 large_page = large_page->next)
2129 {
2130 allocated += large_page->bytes;
2131 in_use += large_page->bytes;
2132 overhead += sizeof (struct large_page_entry);
2133 }
2134
2135 /* There's no easy way to walk through the small pages finding
2136 used and unused objects. Instead, add all the pages, and
2137 subtract out the free list. */
2138
2139 allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2140 in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2141 overhead += G.small_page_overhead * zone->n_small_pages;
2142
2143 for (i = 0; i <= NUM_FREE_BINS; i++)
2144 {
2145 struct alloc_chunk *chunk = zone->free_chunks[i];
2146 while (chunk)
2147 {
2148 in_use -= ggc_get_size (chunk);
2149 chunk = chunk->next_free;
2150 }
2151 }
2152
2153 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2154 zone->name,
2155 SCALE (allocated), LABEL (allocated),
2156 SCALE (in_use), LABEL (in_use),
2157 SCALE (overhead), LABEL (overhead));
2158
2159 gcc_assert (in_use == zone->allocated);
2160
2161 total_overhead += overhead;
2162 total_allocated += zone->allocated;
2163 total_bytes_mapped += zone->bytes_mapped;
2164 }
2165
2166 /* Count the size of the page table as best we can. */
2167 #if HOST_BITS_PER_PTR <= 32
2168 pte_overhead = sizeof (G.lookup);
2169 for (i = 0; i < PAGE_L1_SIZE; i++)
2170 if (G.lookup[i])
2171 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2172 #else
2173 {
2174 page_table table = G.lookup;
2175 pte_overhead = 0;
2176 while (table)
2177 {
2178 pte_overhead += sizeof (*table);
2179 for (i = 0; i < PAGE_L1_SIZE; i++)
2180 if (table->table[i])
2181 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2182 table = table->next;
2183 }
2184 }
2185 #endif
2186 fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2187 "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2188 total_overhead += pte_overhead;
2189
2190 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2191 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2192 SCALE (total_allocated), LABEL(total_allocated),
2193 SCALE (total_overhead), LABEL (total_overhead));
2194
2195 if (GATHER_STATISTICS)
2196 {
2197 unsigned long long all_overhead = 0, all_allocated = 0;
2198 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2199 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2200 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2201
2202 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2203
2204 for (zone = G.zones; zone; zone = zone->next_zone)
2205 {
2206 all_overhead += zone->stats.total_overhead;
2207 all_allocated += zone->stats.total_allocated;
2208
2209 all_allocated_under32 += zone->stats.total_allocated_under32;
2210 all_overhead_under32 += zone->stats.total_overhead_under32;
2211
2212 all_allocated_under64 += zone->stats.total_allocated_under64;
2213 all_overhead_under64 += zone->stats.total_overhead_under64;
2214
2215 all_allocated_under128 += zone->stats.total_allocated_under128;
2216 all_overhead_under128 += zone->stats.total_overhead_under128;
2217
2218 fprintf (stderr, "%20s: %10lld\n",
2219 zone->name, zone->stats.total_allocated);
2220 }
2221
2222 fprintf (stderr, "\n");
2223
2224 fprintf (stderr, "Total Overhead: %10lld\n",
2225 all_overhead);
2226 fprintf (stderr, "Total Allocated: %10lld\n",
2227 all_allocated);
2228
2229 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2230 all_overhead_under32);
2231 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2232 all_allocated_under32);
2233 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2234 all_overhead_under64);
2235 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2236 all_allocated_under64);
2237 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2238 all_overhead_under128);
2239 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2240 all_allocated_under128);
2241 }
2242 }
2243
2244 /* Precompiled header support. */
2245
2246 /* For precompiled headers, we sort objects based on their type. We
2247 also sort various objects into their own buckets; currently this
2248 covers strings and IDENTIFIER_NODE trees. The choices of how
2249 to sort buckets have not yet been tuned. */
2250
2251 #define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2252
2253 #define OTHER_BUCKET (gt_types_enum_last + 0)
2254 #define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2255 #define STRING_BUCKET (gt_types_enum_last + 2)
2256
2257 struct ggc_pch_ondisk
2258 {
2259 size_t total;
2260 size_t type_totals[NUM_PCH_BUCKETS];
2261 };
2262
2263 struct ggc_pch_data
2264 {
2265 struct ggc_pch_ondisk d;
2266 size_t base;
2267 size_t orig_base;
2268 size_t alloc_size;
2269 alloc_type *alloc_bits;
2270 size_t type_bases[NUM_PCH_BUCKETS];
2271 size_t start_offset;
2272 };
2273
2274 /* Initialize the PCH data structure. */
2275
2276 struct ggc_pch_data *
2277 init_ggc_pch (void)
2278 {
2279 return XCNEW (struct ggc_pch_data);
2280 }
2281
2282 /* Return which of the page-aligned buckets the object at X, with type
2283 TYPE, should be sorted into in the PCH. Strings will have
2284 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2285 of unknown type will also have TYPE equal to gt_types_enum_last. */
2286
2287 static int
2288 pch_bucket (void *x, enum gt_types_enum type,
2289 bool is_string)
2290 {
2291 /* Sort identifiers into their own bucket, to improve locality
2292 when searching the identifier hash table. */
2293 if (type == gt_ggc_e_14lang_tree_node
2294 && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2295 return IDENTIFIER_BUCKET;
2296 else if (type == gt_types_enum_last)
2297 {
2298 if (is_string)
2299 return STRING_BUCKET;
2300 return OTHER_BUCKET;
2301 }
2302 return type;
2303 }
2304
2305 /* Add the size of object X to the size of the PCH data. */
2306
2307 void
2308 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2309 size_t size, bool is_string, enum gt_types_enum type)
2310 {
2311 /* NOTE: Right now we don't need to align up the size of any objects.
2312 Strings can be unaligned, and everything else is allocated to a
2313 MAX_ALIGNMENT boundary already. */
2314
2315 d->d.type_totals[pch_bucket (x, type, is_string)] += size;
2316 }
2317
2318 /* Return the total size of the PCH data. */
2319
2320 size_t
2321 ggc_pch_total_size (struct ggc_pch_data *d)
2322 {
2323 int i;
2324 size_t alloc_size, total_size;
2325
2326 total_size = 0;
2327 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2328 {
2329 d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2330 total_size += d->d.type_totals[i];
2331 }
2332 d->d.total = total_size;
2333
2334 /* Include the size of the allocation bitmap. */
2335 alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2336 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2337 d->alloc_size = alloc_size;
2338
2339 return d->d.total + alloc_size;
2340 }
2341
2342 /* Set the base address for the objects in the PCH file. */
2343
2344 void
2345 ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
2346 {
2347 int i;
2348 size_t base = (size_t) base_;
2349
2350 d->base = d->orig_base = base;
2351 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2352 {
2353 d->type_bases[i] = base;
2354 base += d->d.type_totals[i];
2355 }
2356
2357 if (d->alloc_bits == NULL)
2358 d->alloc_bits = XCNEWVAR (alloc_type, d->alloc_size);
2359 }
2360
2361 /* Allocate a place for object X of size SIZE in the PCH file. */
2362
2363 char *
2364 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
2365 size_t size, bool is_string,
2366 enum gt_types_enum type)
2367 {
2368 size_t alloc_word, alloc_bit;
2369 char *result;
2370 int bucket = pch_bucket (x, type, is_string);
2371
2372 /* Record the start of the object in the allocation bitmap. We
2373 can't assert that the allocation bit is previously clear, because
2374 strings may violate the invariant that they are at least
2375 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2376 should not be called for strings. */
2377 alloc_word = ((d->type_bases[bucket] - d->orig_base)
2378 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2379 alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2380 / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2381 d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2382
2383 /* Place the object at the current pointer for this bucket. */
2384 result = (char *) d->type_bases[bucket];
2385 d->type_bases[bucket] += size;
2386 return result;
2387 }
2388
2389 /* Prepare to write out the PCH data to file F. */
2390
2391 void
2392 ggc_pch_prepare_write (struct ggc_pch_data *d,
2393 FILE *f)
2394 {
2395 /* We seek around a lot while writing. Record where the end
2396 of the padding in the PCH file is, so that we can
2397 locate each object's offset. */
2398 d->start_offset = ftell (f);
2399 }
2400
2401 /* Write out object X of SIZE to file F. */
2402
2403 void
2404 ggc_pch_write_object (struct ggc_pch_data *d,
2405 FILE *f, void *x, void *newx,
2406 size_t size, bool is_string ATTRIBUTE_UNUSED)
2407 {
2408 if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
2409 fatal_error ("can%'t seek PCH file: %m");
2410
2411 if (fwrite (x, size, 1, f) != 1)
2412 fatal_error ("can%'t write PCH file: %m");
2413 }
2414
2415 void
2416 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2417 {
2418 /* Write out the allocation bitmap. */
2419 if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
2420 fatal_error ("can%'t seek PCH file: %m");
2421
2422 if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
2423 fatal_error ("can%'t write PCH file: %m");
2424
2425 /* Done with the PCH, so write out our footer. */
2426 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2427 fatal_error ("can%'t write PCH file: %m");
2428
2429 free (d->alloc_bits);
2430 free (d);
2431 }
2432
2433 /* The PCH file from F has been mapped at ADDR. Read in any
2434 additional data from the file and set up the GC state. */
2435
2436 void
2437 ggc_pch_read (FILE *f, void *addr)
2438 {
2439 struct ggc_pch_ondisk d;
2440 size_t alloc_size;
2441 struct alloc_zone *zone;
2442 struct page_entry *pch_page;
2443 char *p;
2444
2445 if (fread (&d, sizeof (d), 1, f) != 1)
2446 fatal_error ("can%'t read PCH file: %m");
2447
2448 alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2449 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2450
2451 pch_zone.bytes = d.total;
2452 pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2453 pch_zone.page = (char *) addr;
2454 pch_zone.end = (char *) pch_zone.alloc_bits;
2455
2456 if (GATHER_STATISTICS)
2457 {
2458 /* We've just read in a PCH file. So, every object that used to be
2459 allocated is now free. */
2460 zone_allocate_marks ();
2461 ggc_prune_overhead_list ();
2462 zone_free_marks ();
2463 }
2464
2465 for (zone = G.zones; zone; zone = zone->next_zone)
2466 {
2467 struct small_page_entry *page, *next_page;
2468 struct large_page_entry *large_page, *next_large_page;
2469
2470 zone->allocated = 0;
2471
2472 /* Clear the zone's free chunk list. */
2473 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2474 zone->high_free_bin = 0;
2475 zone->cached_free = NULL;
2476 zone->cached_free_size = 0;
2477
2478 /* Move all the small pages onto the free list. */
2479 for (page = zone->pages; page != NULL; page = next_page)
2480 {
2481 next_page = page->next;
2482 memset (page->alloc_bits, 0,
2483 G.small_page_overhead - PAGE_OVERHEAD);
2484 free_small_page (page);
2485 }
2486
2487 /* Discard all the large pages. */
2488 for (large_page = zone->large_pages; large_page != NULL;
2489 large_page = next_large_page)
2490 {
2491 next_large_page = large_page->next;
2492 free_large_page (large_page);
2493 }
2494
2495 zone->pages = NULL;
2496 zone->large_pages = NULL;
2497 }
2498
2499 /* Allocate the dummy page entry for the PCH, and set all pages
2500 mapped into the PCH to reference it. */
2501 pch_page = XCNEW (struct page_entry);
2502 pch_page->page = pch_zone.page;
2503 pch_page->pch_p = true;
2504
2505 for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2506 set_page_table_entry (p, pch_page);
2507 }