reassoc_4.f: Use dg-additional-options.
[gcc.git] / gcc / ggc-zone.c
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3 2010 Free Software Foundation, Inc.
4
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
7 <dan@codesourcery.com>.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "diagnostic-core.h"
33 #include "flags.h"
34 #include "ggc.h"
35 #include "ggc-internal.h"
36 #include "timevar.h"
37 #include "params.h"
38 #include "bitmap.h"
39 #include "plugin.h"
40
41 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
42 file open. Prefer either to valloc. */
43 #ifdef HAVE_MMAP_ANON
44 # undef HAVE_MMAP_DEV_ZERO
45 # define USING_MMAP
46 #endif
47
48 #ifdef HAVE_MMAP_DEV_ZERO
49 # define USING_MMAP
50 #endif
51
52 #ifndef USING_MMAP
53 #error Zone collector requires mmap
54 #endif
55
56 #if (GCC_VERSION < 3001)
57 #define prefetch(X) ((void) X)
58 #define prefetchw(X) ((void) X)
59 #else
60 #define prefetch(X) __builtin_prefetch (X)
61 #define prefetchw(X) __builtin_prefetch (X, 1, 3)
62 #endif
63
64 /* FUTURE NOTES:
65
66 If we track inter-zone pointers, we can mark single zones at a
67 time.
68
69 If we have a zone where we guarantee no inter-zone pointers, we
70 could mark that zone separately.
71
72 The garbage zone should not be marked, and we should return 1 in
73 ggc_set_mark for any object in the garbage zone, which cuts off
74 marking quickly. */
75
76 /* Strategy:
77
78 This garbage-collecting allocator segregates objects into zones.
79 It also segregates objects into "large" and "small" bins. Large
80 objects are greater than page size.
81
82 Pages for small objects are broken up into chunks. The page has
83 a bitmap which marks the start position of each chunk (whether
84 allocated or free). Free chunks are on one of the zone's free
85 lists and contain a pointer to the next free chunk. Chunks in
86 most of the free lists have a fixed size determined by the
87 free list. Chunks in the "other" sized free list have their size
88 stored right after their chain pointer.
89
90 Empty pages (of all sizes) are kept on a single page cache list,
91 and are considered first when new pages are required; they are
92 deallocated at the start of the next collection if they haven't
93 been recycled by then. The free page list is currently per-zone. */
94
95 /* Define GGC_DEBUG_LEVEL to print debugging information.
96 0: No debugging output.
97 1: GC statistics only.
98 2: Page-entry allocations/deallocations as well.
99 3: Object allocations as well.
100 4: Object marks as well. */
101 #define GGC_DEBUG_LEVEL (0)
102
103 #ifndef HOST_BITS_PER_PTR
104 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
105 #endif
106
107 /* This structure manages small free chunks. The SIZE field is only
108 initialized if the chunk is in the "other" sized free list. Large
109 chunks are allocated one at a time to their own page, and so don't
110 come in here. */
111
112 struct alloc_chunk {
113 struct alloc_chunk *next_free;
114 unsigned int size;
115 };
116
117 /* The size of the fixed-size portion of a small page descriptor. */
118 #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
119
120 /* The collector's idea of the page size. This must be a power of two
121 no larger than the system page size, because pages must be aligned
122 to this amount and are tracked at this granularity in the page
123 table. We choose a size at compile time for efficiency.
124
125 We could make a better guess at compile time if PAGE_SIZE is a
126 constant in system headers, and PAGE_SHIFT is defined... */
127 #define GGC_PAGE_SIZE 4096
128 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
129 #define GGC_PAGE_SHIFT 12
130
131 #if 0
132 /* Alternative definitions which use the runtime page size. */
133 #define GGC_PAGE_SIZE G.pagesize
134 #define GGC_PAGE_MASK G.page_mask
135 #define GGC_PAGE_SHIFT G.lg_pagesize
136 #endif
137
138 /* The size of a small page managed by the garbage collector. This
139 must currently be GGC_PAGE_SIZE, but with a few changes could
140 be any multiple of it to reduce certain kinds of overhead. */
141 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE
142
143 /* Free bin information. These numbers may be in need of re-tuning.
144 In general, decreasing the number of free bins would seem to
145 increase the time it takes to allocate... */
146
147 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
148 today. */
149
150 #define NUM_FREE_BINS 64
151 #define FREE_BIN_DELTA MAX_ALIGNMENT
152 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
153
154 /* Allocation and marking parameters. */
155
156 /* The smallest allocatable unit to keep track of. */
157 #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
158
159 /* The smallest markable unit. If we require each allocated object
160 to contain at least two allocatable units, we can use half as many
161 bits for the mark bitmap. But this adds considerable complexity
162 to sweeping. */
163 #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
164
165 #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
166
167 /* We use this structure to determine the alignment required for
168 allocations.
169
170 There are several things wrong with this estimation of alignment.
171
172 The maximum alignment for a structure is often less than the
173 maximum alignment for a basic data type; for instance, on some
174 targets long long must be aligned to sizeof (int) in a structure
175 and sizeof (long long) in a variable. i386-linux is one example;
176 Darwin is another (sometimes, depending on the compiler in use).
177
178 Also, long double is not included. Nothing in GCC uses long
179 double, so we assume that this is OK. On powerpc-darwin, adding
180 long double would bring the maximum alignment up to 16 bytes,
181 and until we need long double (or to vectorize compiler operations)
182 that's painfully wasteful. This will need to change, some day. */
183
184 struct max_alignment {
185 char c;
186 union {
187 HOST_WIDEST_INT i;
188 double d;
189 } u;
190 };
191
192 /* The biggest alignment required. */
193
194 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
195
196 /* Compute the smallest multiple of F that is >= X. */
197
198 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
199
200 /* Types to use for the allocation and mark bitmaps. It might be
201 a good idea to add ffsl to libiberty and use unsigned long
202 instead; that could speed us up where long is wider than int. */
203
204 typedef unsigned int alloc_type;
205 typedef unsigned int mark_type;
206 #define alloc_ffs(x) ffs(x)
207
208 /* A page_entry records the status of an allocation page. This is the
209 common data between all three kinds of pages - small, large, and
210 PCH. */
211 typedef struct page_entry
212 {
213 /* The address at which the memory is allocated. */
214 char *page;
215
216 /* The zone that this page entry belongs to. */
217 struct alloc_zone *zone;
218
219 #ifdef GATHER_STATISTICS
220 /* How many collections we've survived. */
221 size_t survived;
222 #endif
223
224 /* Does this page contain small objects, or one large object? */
225 bool large_p;
226
227 /* Is this page part of the loaded PCH? */
228 bool pch_p;
229 } page_entry;
230
231 /* Additional data needed for small pages. */
232 struct small_page_entry
233 {
234 struct page_entry common;
235
236 /* The next small page entry, or NULL if this is the last. */
237 struct small_page_entry *next;
238
239 /* If currently marking this zone, a pointer to the mark bits
240 for this page. If we aren't currently marking this zone,
241 this pointer may be stale (pointing to freed memory). */
242 mark_type *mark_bits;
243
244 /* The allocation bitmap. This array extends far enough to have
245 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
246 alloc_type alloc_bits[1];
247 };
248
249 /* Additional data needed for large pages. */
250 struct large_page_entry
251 {
252 struct page_entry common;
253
254 /* The next large page entry, or NULL if this is the last. */
255 struct large_page_entry *next;
256
257 /* The number of bytes allocated, not including the page entry. */
258 size_t bytes;
259
260 /* The previous page in the list, so that we can unlink this one. */
261 struct large_page_entry *prev;
262
263 /* During marking, is this object marked? */
264 bool mark_p;
265 };
266
267 /* A two-level tree is used to look up the page-entry for a given
268 pointer. Two chunks of the pointer's bits are extracted to index
269 the first and second levels of the tree, as follows:
270
271 HOST_PAGE_SIZE_BITS
272 32 | |
273 msb +----------------+----+------+------+ lsb
274 | | |
275 PAGE_L1_BITS |
276 | |
277 PAGE_L2_BITS
278
279 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
280 pages are aligned on system page boundaries. The next most
281 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
282 index values in the lookup table, respectively.
283
284 For 32-bit architectures and the settings below, there are no
285 leftover bits. For architectures with wider pointers, the lookup
286 tree points to a list of pages, which must be scanned to find the
287 correct one. */
288
289 #define PAGE_L1_BITS (8)
290 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
291 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
292 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
293
294 #define LOOKUP_L1(p) \
295 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
296
297 #define LOOKUP_L2(p) \
298 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
299
300 #if HOST_BITS_PER_PTR <= 32
301
302 /* On 32-bit hosts, we use a two level page table, as pictured above. */
303 typedef page_entry **page_table[PAGE_L1_SIZE];
304
305 #else
306
307 /* On 64-bit hosts, we use the same two level page tables plus a linked
308 list that disambiguates the top 32-bits. There will almost always be
309 exactly one entry in the list. */
310 typedef struct page_table_chain
311 {
312 struct page_table_chain *next;
313 size_t high_bits;
314 page_entry **table[PAGE_L1_SIZE];
315 } *page_table;
316
317 #endif
318
319 /* The global variables. */
320 static struct globals
321 {
322 /* The linked list of zones. */
323 struct alloc_zone *zones;
324
325 /* Lookup table for associating allocation pages with object addresses. */
326 page_table lookup;
327
328 /* The system's page size, and related constants. */
329 size_t pagesize;
330 size_t lg_pagesize;
331 size_t page_mask;
332
333 /* The size to allocate for a small page entry. This includes
334 the size of the structure and the size of the allocation
335 bitmap. */
336 size_t small_page_overhead;
337
338 #if defined (HAVE_MMAP_DEV_ZERO)
339 /* A file descriptor open to /dev/zero for reading. */
340 int dev_zero_fd;
341 #endif
342
343 /* Allocate pages in chunks of this size, to throttle calls to memory
344 allocation routines. The first page is used, the rest go onto the
345 free list. */
346 size_t quire_size;
347
348 /* The file descriptor for debugging output. */
349 FILE *debug_file;
350 } G;
351
352 /* A zone allocation structure. There is one of these for every
353 distinct allocation zone. */
354 struct alloc_zone
355 {
356 /* The most recent free chunk is saved here, instead of in the linked
357 free list, to decrease list manipulation. It is most likely that we
358 will want this one. */
359 char *cached_free;
360 size_t cached_free_size;
361
362 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
363 FREE_BIN_DELTA. All other chunks are in slot 0. */
364 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
365
366 /* The highest bin index which might be non-empty. It may turn out
367 to be empty, in which case we have to search downwards. */
368 size_t high_free_bin;
369
370 /* Bytes currently allocated in this zone. */
371 size_t allocated;
372
373 /* Linked list of the small pages in this zone. */
374 struct small_page_entry *pages;
375
376 /* Doubly linked list of large pages in this zone. */
377 struct large_page_entry *large_pages;
378
379 /* If we are currently marking this zone, a pointer to the mark bits. */
380 mark_type *mark_bits;
381
382 /* Name of the zone. */
383 const char *name;
384
385 /* The number of small pages currently allocated in this zone. */
386 size_t n_small_pages;
387
388 /* Bytes allocated at the end of the last collection. */
389 size_t allocated_last_gc;
390
391 /* Total amount of memory mapped. */
392 size_t bytes_mapped;
393
394 /* A cache of free system pages. */
395 struct small_page_entry *free_pages;
396
397 /* Next zone in the linked list of zones. */
398 struct alloc_zone *next_zone;
399
400 /* True if this zone was collected during this collection. */
401 bool was_collected;
402
403 /* True if this zone should be destroyed after the next collection. */
404 bool dead;
405
406 #ifdef GATHER_STATISTICS
407 struct
408 {
409 /* Total GC-allocated memory. */
410 unsigned long long total_allocated;
411 /* Total overhead for GC-allocated memory. */
412 unsigned long long total_overhead;
413
414 /* Total allocations and overhead for sizes less than 32, 64 and 128.
415 These sizes are interesting because they are typical cache line
416 sizes. */
417
418 unsigned long long total_allocated_under32;
419 unsigned long long total_overhead_under32;
420
421 unsigned long long total_allocated_under64;
422 unsigned long long total_overhead_under64;
423
424 unsigned long long total_allocated_under128;
425 unsigned long long total_overhead_under128;
426 } stats;
427 #endif
428 } main_zone;
429
430 /* Some default zones. */
431 struct alloc_zone rtl_zone;
432 struct alloc_zone tree_zone;
433 struct alloc_zone tree_id_zone;
434
435 /* The PCH zone does not need a normal zone structure, and it does
436 not live on the linked list of zones. */
437 struct pch_zone
438 {
439 /* The start of the PCH zone. NULL if there is none. */
440 char *page;
441
442 /* The end of the PCH zone. NULL if there is none. */
443 char *end;
444
445 /* The size of the PCH zone. 0 if there is none. */
446 size_t bytes;
447
448 /* The allocation bitmap for the PCH zone. */
449 alloc_type *alloc_bits;
450
451 /* If we are currently marking, the mark bitmap for the PCH zone.
452 When it is first read in, we could avoid marking the PCH,
453 because it will not contain any pointers to GC memory outside
454 of the PCH; however, the PCH is currently mapped as writable,
455 so we must mark it in case new pointers are added. */
456 mark_type *mark_bits;
457 } pch_zone;
458
459 #ifdef USING_MMAP
460 static char *alloc_anon (char *, size_t, struct alloc_zone *);
461 #endif
462 static struct small_page_entry * alloc_small_page (struct alloc_zone *);
463 static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
464 static void free_chunk (char *, size_t, struct alloc_zone *);
465 static void free_small_page (struct small_page_entry *);
466 static void free_large_page (struct large_page_entry *);
467 static void release_pages (struct alloc_zone *);
468 static void sweep_pages (struct alloc_zone *);
469 static bool ggc_collect_1 (struct alloc_zone *, bool);
470 static void new_ggc_zone_1 (struct alloc_zone *, const char *);
471
472 /* Traverse the page table and find the entry for a page.
473 Die (probably) if the object wasn't allocated via GC. */
474
475 static inline page_entry *
476 lookup_page_table_entry (const void *p)
477 {
478 page_entry ***base;
479 size_t L1, L2;
480
481 #if HOST_BITS_PER_PTR <= 32
482 base = &G.lookup[0];
483 #else
484 page_table table = G.lookup;
485 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
486 while (table->high_bits != high_bits)
487 table = table->next;
488 base = &table->table[0];
489 #endif
490
491 /* Extract the level 1 and 2 indices. */
492 L1 = LOOKUP_L1 (p);
493 L2 = LOOKUP_L2 (p);
494
495 return base[L1][L2];
496 }
497
498 /* Traverse the page table and find the entry for a page.
499 Return NULL if the object wasn't allocated via the GC. */
500
501 static inline page_entry *
502 lookup_page_table_if_allocated (const void *p)
503 {
504 page_entry ***base;
505 size_t L1, L2;
506
507 #if HOST_BITS_PER_PTR <= 32
508 base = &G.lookup[0];
509 #else
510 page_table table = G.lookup;
511 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
512 while (1)
513 {
514 if (table == NULL)
515 return NULL;
516 if (table->high_bits == high_bits)
517 break;
518 table = table->next;
519 }
520 base = &table->table[0];
521 #endif
522
523 /* Extract the level 1 and 2 indices. */
524 L1 = LOOKUP_L1 (p);
525 if (! base[L1])
526 return NULL;
527
528 L2 = LOOKUP_L2 (p);
529 if (L2 >= PAGE_L2_SIZE)
530 return NULL;
531 /* We might have a page entry which does not correspond exactly to a
532 system page. */
533 if (base[L1][L2] && (const char *) p < base[L1][L2]->page)
534 return NULL;
535
536 return base[L1][L2];
537 }
538
539 /* Set the page table entry for the page that starts at P. If ENTRY
540 is NULL, clear the entry. */
541
542 static void
543 set_page_table_entry (void *p, page_entry *entry)
544 {
545 page_entry ***base;
546 size_t L1, L2;
547
548 #if HOST_BITS_PER_PTR <= 32
549 base = &G.lookup[0];
550 #else
551 page_table table;
552 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
553 for (table = G.lookup; table; table = table->next)
554 if (table->high_bits == high_bits)
555 goto found;
556
557 /* Not found -- allocate a new table. */
558 table = XCNEW (struct page_table_chain);
559 table->next = G.lookup;
560 table->high_bits = high_bits;
561 G.lookup = table;
562 found:
563 base = &table->table[0];
564 #endif
565
566 /* Extract the level 1 and 2 indices. */
567 L1 = LOOKUP_L1 (p);
568 L2 = LOOKUP_L2 (p);
569
570 if (base[L1] == NULL)
571 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
572
573 base[L1][L2] = entry;
574 }
575
576 /* Find the page table entry associated with OBJECT. */
577
578 static inline struct page_entry *
579 zone_get_object_page (const void *object)
580 {
581 return lookup_page_table_entry (object);
582 }
583
584 /* Find which element of the alloc_bits array OBJECT should be
585 recorded in. */
586 static inline unsigned int
587 zone_get_object_alloc_word (const void *object)
588 {
589 return (((size_t) object & (GGC_PAGE_SIZE - 1))
590 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
591 }
592
593 /* Find which bit of the appropriate word in the alloc_bits array
594 OBJECT should be recorded in. */
595 static inline unsigned int
596 zone_get_object_alloc_bit (const void *object)
597 {
598 return (((size_t) object / BYTES_PER_ALLOC_BIT)
599 % (8 * sizeof (alloc_type)));
600 }
601
602 /* Find which element of the mark_bits array OBJECT should be recorded
603 in. */
604 static inline unsigned int
605 zone_get_object_mark_word (const void *object)
606 {
607 return (((size_t) object & (GGC_PAGE_SIZE - 1))
608 / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
609 }
610
611 /* Find which bit of the appropriate word in the mark_bits array
612 OBJECT should be recorded in. */
613 static inline unsigned int
614 zone_get_object_mark_bit (const void *object)
615 {
616 return (((size_t) object / BYTES_PER_MARK_BIT)
617 % (8 * sizeof (mark_type)));
618 }
619
620 /* Set the allocation bit corresponding to OBJECT in its page's
621 bitmap. Used to split this object from the preceding one. */
622 static inline void
623 zone_set_object_alloc_bit (const void *object)
624 {
625 struct small_page_entry *page
626 = (struct small_page_entry *) zone_get_object_page (object);
627 unsigned int start_word = zone_get_object_alloc_word (object);
628 unsigned int start_bit = zone_get_object_alloc_bit (object);
629
630 page->alloc_bits[start_word] |= 1L << start_bit;
631 }
632
633 /* Clear the allocation bit corresponding to OBJECT in PAGE's
634 bitmap. Used to coalesce this object with the preceding
635 one. */
636 static inline void
637 zone_clear_object_alloc_bit (struct small_page_entry *page,
638 const void *object)
639 {
640 unsigned int start_word = zone_get_object_alloc_word (object);
641 unsigned int start_bit = zone_get_object_alloc_bit (object);
642
643 /* Would xor be quicker? */
644 page->alloc_bits[start_word] &= ~(1L << start_bit);
645 }
646
647 /* Find the size of the object which starts at START_WORD and
648 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
649 Helper function for ggc_get_size and zone_find_object_size. */
650
651 static inline size_t
652 zone_object_size_1 (alloc_type *alloc_bits,
653 size_t start_word, size_t start_bit,
654 size_t max_size)
655 {
656 size_t size;
657 alloc_type alloc_word;
658 int indx;
659
660 /* Load the first word. */
661 alloc_word = alloc_bits[start_word++];
662
663 /* If that was the last bit in this word, we'll want to continue
664 with the next word. Otherwise, handle the rest of this word. */
665 if (start_bit)
666 {
667 indx = alloc_ffs (alloc_word >> start_bit);
668 if (indx)
669 /* indx is 1-based. We started at the bit after the object's
670 start, but we also ended at the bit after the object's end.
671 It cancels out. */
672 return indx * BYTES_PER_ALLOC_BIT;
673
674 /* The extra 1 accounts for the starting unit, before start_bit. */
675 size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
676
677 if (size >= max_size)
678 return max_size;
679
680 alloc_word = alloc_bits[start_word++];
681 }
682 else
683 size = BYTES_PER_ALLOC_BIT;
684
685 while (alloc_word == 0)
686 {
687 size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
688 if (size >= max_size)
689 return max_size;
690 alloc_word = alloc_bits[start_word++];
691 }
692
693 indx = alloc_ffs (alloc_word);
694 return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
695 }
696
697 /* Find the size of OBJECT on small page PAGE. */
698
699 static inline size_t
700 zone_find_object_size (struct small_page_entry *page,
701 const void *object)
702 {
703 const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
704 unsigned int start_word = zone_get_object_alloc_word (object_midptr);
705 unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
706 size_t max_size = (page->common.page + SMALL_PAGE_SIZE
707 - (const char *) object);
708
709 return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
710 max_size);
711 }
712
713 /* highest_bit assumes that alloc_type is 32 bits. */
714 extern char check_alloc_type_size[(sizeof (alloc_type) == 4) ? 1 : -1];
715
716 /* Find the highest set bit in VALUE. Returns the bit number of that
717 bit, using the same values as ffs. */
718 static inline alloc_type
719 highest_bit (alloc_type value)
720 {
721 /* This also assumes that alloc_type is unsigned. */
722 value |= value >> 1;
723 value |= value >> 2;
724 value |= value >> 4;
725 value |= value >> 8;
726 value |= value >> 16;
727 value = value ^ (value >> 1);
728 return alloc_ffs (value);
729 }
730
731 /* Find the offset from the start of an object to P, which may point
732 into the interior of the object. */
733
734 static unsigned long
735 zone_find_object_offset (alloc_type *alloc_bits, size_t start_word,
736 size_t start_bit)
737 {
738 unsigned int offset_in_bits;
739 alloc_type alloc_word = alloc_bits[start_word];
740
741 /* Mask off any bits after the initial bit, but make sure to include
742 the initial bit in the result. Note that START_BIT is
743 0-based. */
744 if (start_bit < 8 * sizeof (alloc_type) - 1)
745 alloc_word &= (1 << (start_bit + 1)) - 1;
746 offset_in_bits = start_bit;
747
748 /* Search for the start of the object. */
749 while (alloc_word == 0 && start_word > 0)
750 {
751 alloc_word = alloc_bits[--start_word];
752 offset_in_bits += 8 * sizeof (alloc_type);
753 }
754 /* We must always find a set bit. */
755 gcc_assert (alloc_word != 0);
756 /* Note that the result of highest_bit is 1-based. */
757 offset_in_bits -= highest_bit (alloc_word) - 1;
758
759 return BYTES_PER_ALLOC_BIT * offset_in_bits;
760 }
761
762 /* Allocate the mark bits for every zone, and set the pointers on each
763 page. */
764 static void
765 zone_allocate_marks (void)
766 {
767 struct alloc_zone *zone;
768
769 for (zone = G.zones; zone; zone = zone->next_zone)
770 {
771 struct small_page_entry *page;
772 mark_type *cur_marks;
773 size_t mark_words, mark_words_per_page;
774 #ifdef ENABLE_CHECKING
775 size_t n = 0;
776 #endif
777
778 mark_words_per_page
779 = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
780 mark_words = zone->n_small_pages * mark_words_per_page;
781 zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
782 mark_words);
783 cur_marks = zone->mark_bits;
784 for (page = zone->pages; page; page = page->next)
785 {
786 page->mark_bits = cur_marks;
787 cur_marks += mark_words_per_page;
788 #ifdef ENABLE_CHECKING
789 n++;
790 #endif
791 }
792 gcc_checking_assert (n == zone->n_small_pages);
793 }
794
795 /* We don't collect the PCH zone, but we do have to mark it
796 (for now). */
797 if (pch_zone.bytes)
798 pch_zone.mark_bits
799 = (mark_type *) xcalloc (sizeof (mark_type),
800 CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
801 }
802
803 /* After marking and sweeping, release the memory used for mark bits. */
804 static void
805 zone_free_marks (void)
806 {
807 struct alloc_zone *zone;
808
809 for (zone = G.zones; zone; zone = zone->next_zone)
810 if (zone->mark_bits)
811 {
812 free (zone->mark_bits);
813 zone->mark_bits = NULL;
814 }
815
816 if (pch_zone.bytes)
817 {
818 free (pch_zone.mark_bits);
819 pch_zone.mark_bits = NULL;
820 }
821 }
822
823 #ifdef USING_MMAP
824 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
825 (if non-null). The ifdef structure here is intended to cause a
826 compile error unless exactly one of the HAVE_* is defined. */
827
828 static inline char *
829 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
830 {
831 #ifdef HAVE_MMAP_ANON
832 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
833 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
834 #endif
835 #ifdef HAVE_MMAP_DEV_ZERO
836 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
837 MAP_PRIVATE, G.dev_zero_fd, 0);
838 #endif
839
840 if (page == (char *) MAP_FAILED)
841 {
842 perror ("virtual memory exhausted");
843 exit (FATAL_EXIT_CODE);
844 }
845
846 /* Remember that we allocated this memory. */
847 zone->bytes_mapped += size;
848
849 /* Pretend we don't have access to the allocated pages. We'll enable
850 access to smaller pieces of the area in ggc_internal_alloc. Discard the
851 handle to avoid handle leak. */
852 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
853
854 return page;
855 }
856 #endif
857
858 /* Allocate a new page for allocating small objects in ZONE, and
859 return an entry for it. */
860
861 static struct small_page_entry *
862 alloc_small_page (struct alloc_zone *zone)
863 {
864 struct small_page_entry *entry;
865
866 /* Check the list of free pages for one we can use. */
867 entry = zone->free_pages;
868 if (entry != NULL)
869 {
870 /* Recycle the allocated memory from this page ... */
871 zone->free_pages = entry->next;
872 }
873 else
874 {
875 /* We want just one page. Allocate a bunch of them and put the
876 extras on the freelist. (Can only do this optimization with
877 mmap for backing store.) */
878 struct small_page_entry *e, *f = zone->free_pages;
879 int i;
880 char *page;
881
882 page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
883
884 /* This loop counts down so that the chain will be in ascending
885 memory order. */
886 for (i = G.quire_size - 1; i >= 1; i--)
887 {
888 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
889 e->common.page = page + (i << GGC_PAGE_SHIFT);
890 e->common.zone = zone;
891 e->next = f;
892 f = e;
893 set_page_table_entry (e->common.page, &e->common);
894 }
895
896 zone->free_pages = f;
897
898 entry = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
899 entry->common.page = page;
900 entry->common.zone = zone;
901 set_page_table_entry (page, &entry->common);
902 }
903
904 zone->n_small_pages++;
905
906 if (GGC_DEBUG_LEVEL >= 2)
907 fprintf (G.debug_file,
908 "Allocating %s page at %p, data %p-%p\n",
909 entry->common.zone->name, (PTR) entry, entry->common.page,
910 entry->common.page + SMALL_PAGE_SIZE - 1);
911
912 return entry;
913 }
914
915 /* Allocate a large page of size SIZE in ZONE. */
916
917 static struct large_page_entry *
918 alloc_large_page (size_t size, struct alloc_zone *zone)
919 {
920 struct large_page_entry *entry;
921 char *page;
922 size_t needed_size;
923
924 needed_size = size + sizeof (struct large_page_entry);
925 page = XNEWVAR (char, needed_size);
926
927 entry = (struct large_page_entry *) page;
928
929 entry->next = NULL;
930 entry->common.page = page + sizeof (struct large_page_entry);
931 entry->common.large_p = true;
932 entry->common.pch_p = false;
933 entry->common.zone = zone;
934 #ifdef GATHER_STATISTICS
935 entry->common.survived = 0;
936 #endif
937 entry->mark_p = false;
938 entry->bytes = size;
939 entry->prev = NULL;
940
941 set_page_table_entry (entry->common.page, &entry->common);
942
943 if (GGC_DEBUG_LEVEL >= 2)
944 fprintf (G.debug_file,
945 "Allocating %s large page at %p, data %p-%p\n",
946 entry->common.zone->name, (PTR) entry, entry->common.page,
947 entry->common.page + SMALL_PAGE_SIZE - 1);
948
949 return entry;
950 }
951
952
953 /* For a page that is no longer needed, put it on the free page list. */
954
955 static inline void
956 free_small_page (struct small_page_entry *entry)
957 {
958 if (GGC_DEBUG_LEVEL >= 2)
959 fprintf (G.debug_file,
960 "Deallocating %s page at %p, data %p-%p\n",
961 entry->common.zone->name, (PTR) entry,
962 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
963
964 gcc_assert (!entry->common.large_p);
965
966 /* Mark the page as inaccessible. Discard the handle to
967 avoid handle leak. */
968 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page,
969 SMALL_PAGE_SIZE));
970
971 entry->next = entry->common.zone->free_pages;
972 entry->common.zone->free_pages = entry;
973 entry->common.zone->n_small_pages--;
974 }
975
976 /* Release a large page that is no longer needed. */
977
978 static inline void
979 free_large_page (struct large_page_entry *entry)
980 {
981 if (GGC_DEBUG_LEVEL >= 2)
982 fprintf (G.debug_file,
983 "Deallocating %s page at %p, data %p-%p\n",
984 entry->common.zone->name, (PTR) entry,
985 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
986
987 gcc_assert (entry->common.large_p);
988
989 set_page_table_entry (entry->common.page, NULL);
990 free (entry);
991 }
992
993 /* Release the free page cache to the system. */
994
995 static void
996 release_pages (struct alloc_zone *zone)
997 {
998 #ifdef USING_MMAP
999 struct small_page_entry *p, *next;
1000 char *start;
1001 size_t len;
1002
1003 /* Gather up adjacent pages so they are unmapped together. */
1004 p = zone->free_pages;
1005
1006 while (p)
1007 {
1008 start = p->common.page;
1009 next = p->next;
1010 len = SMALL_PAGE_SIZE;
1011 set_page_table_entry (p->common.page, NULL);
1012 p = next;
1013
1014 while (p && p->common.page == start + len)
1015 {
1016 next = p->next;
1017 len += SMALL_PAGE_SIZE;
1018 set_page_table_entry (p->common.page, NULL);
1019 p = next;
1020 }
1021
1022 munmap (start, len);
1023 zone->bytes_mapped -= len;
1024 }
1025
1026 zone->free_pages = NULL;
1027 #endif
1028 }
1029
1030 /* Place the block at PTR of size SIZE on the free list for ZONE. */
1031
1032 static inline void
1033 free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
1034 {
1035 struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
1036 size_t bin = 0;
1037
1038 bin = SIZE_BIN_DOWN (size);
1039 gcc_assert (bin != 0);
1040 if (bin > NUM_FREE_BINS)
1041 {
1042 bin = 0;
1043 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1044 sizeof (struct
1045 alloc_chunk)));
1046 chunk->size = size;
1047 chunk->next_free = zone->free_chunks[bin];
1048 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1049 + sizeof (struct
1050 alloc_chunk),
1051 size
1052 - sizeof (struct
1053 alloc_chunk)));
1054 }
1055 else
1056 {
1057 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1058 sizeof (struct
1059 alloc_chunk *)));
1060 chunk->next_free = zone->free_chunks[bin];
1061 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1062 + sizeof (struct
1063 alloc_chunk *),
1064 size
1065 - sizeof (struct
1066 alloc_chunk *)));
1067 }
1068
1069 zone->free_chunks[bin] = chunk;
1070 if (bin > zone->high_free_bin)
1071 zone->high_free_bin = bin;
1072 if (GGC_DEBUG_LEVEL >= 3)
1073 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
1074 }
1075
1076 /* For a given size of memory requested for allocation, return the
1077 actual size that is going to be allocated. */
1078
1079 size_t
1080 ggc_round_alloc_size (size_t requested_size)
1081 {
1082 size_t size;
1083
1084 /* Make sure that zero-sized allocations get a unique and freeable
1085 pointer. */
1086 if (requested_size == 0)
1087 size = MAX_ALIGNMENT;
1088 else
1089 size = (requested_size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1090
1091 return size;
1092 }
1093
1094 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
1095
1096 void *
1097 ggc_internal_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1098 MEM_STAT_DECL)
1099 {
1100 size_t bin;
1101 size_t csize;
1102 struct small_page_entry *entry;
1103 struct alloc_chunk *chunk, **pp;
1104 void *result;
1105 size_t size = ggc_round_alloc_size (orig_size);
1106
1107 /* Try to allocate the object from several different sources. Each
1108 of these cases is responsible for setting RESULT and SIZE to
1109 describe the allocated block, before jumping to FOUND. If a
1110 chunk is split, the allocate bit for the new chunk should also be
1111 set.
1112
1113 Large objects are handled specially. However, they'll just fail
1114 the next couple of conditions, so we can wait to check for them
1115 below. The large object case is relatively rare (< 1%), so this
1116 is a win. */
1117
1118 /* First try to split the last chunk we allocated. For best
1119 fragmentation behavior it would be better to look for a
1120 free bin of the appropriate size for a small object. However,
1121 we're unlikely (1% - 7%) to find one, and this gives better
1122 locality behavior anyway. This case handles the lion's share
1123 of all calls to this function. */
1124 if (size <= zone->cached_free_size)
1125 {
1126 result = zone->cached_free;
1127
1128 zone->cached_free_size -= size;
1129 if (zone->cached_free_size)
1130 {
1131 zone->cached_free += size;
1132 zone_set_object_alloc_bit (zone->cached_free);
1133 }
1134
1135 goto found;
1136 }
1137
1138 /* Next, try to find a free bin of the exactly correct size. */
1139
1140 /* We want to round SIZE up, rather than down, but we know it's
1141 already aligned to at least FREE_BIN_DELTA, so we can just
1142 shift. */
1143 bin = SIZE_BIN_DOWN (size);
1144
1145 if (bin <= NUM_FREE_BINS
1146 && (chunk = zone->free_chunks[bin]) != NULL)
1147 {
1148 /* We have a chunk of the right size. Pull it off the free list
1149 and use it. */
1150
1151 zone->free_chunks[bin] = chunk->next_free;
1152
1153 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1154 == FREE_BIN_DELTA. */
1155 result = chunk;
1156
1157 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1158 may now be wrong, if this was the last chunk in the high bin.
1159 Rather than fixing it up now, wait until we need to search
1160 the free bins. */
1161
1162 goto found;
1163 }
1164
1165 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1166 to split. We can find one in the too-big bin, or in the largest
1167 sized bin with a chunk in it. Try the largest normal-sized bin
1168 first. */
1169
1170 if (zone->high_free_bin > bin)
1171 {
1172 /* Find the highest numbered free bin. It will be at or below
1173 the watermark. */
1174 while (zone->high_free_bin > bin
1175 && zone->free_chunks[zone->high_free_bin] == NULL)
1176 zone->high_free_bin--;
1177
1178 if (zone->high_free_bin > bin)
1179 {
1180 size_t tbin = zone->high_free_bin;
1181 chunk = zone->free_chunks[tbin];
1182
1183 /* Remove the chunk from its previous bin. */
1184 zone->free_chunks[tbin] = chunk->next_free;
1185
1186 result = (char *) chunk;
1187
1188 /* Save the rest of the chunk for future allocation. */
1189 if (zone->cached_free_size)
1190 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1191
1192 chunk = (struct alloc_chunk *) ((char *) result + size);
1193 zone->cached_free = (char *) chunk;
1194 zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1195
1196 /* Mark the new free chunk as an object, so that we can
1197 find the size of the newly allocated object. */
1198 zone_set_object_alloc_bit (chunk);
1199
1200 /* HIGH_FREE_BIN may now be wrong, if this was the last
1201 chunk in the high bin. Rather than fixing it up now,
1202 wait until we need to search the free bins. */
1203
1204 goto found;
1205 }
1206 }
1207
1208 /* Failing that, look through the "other" bucket for a chunk
1209 that is large enough. */
1210 pp = &(zone->free_chunks[0]);
1211 chunk = *pp;
1212 while (chunk && chunk->size < size)
1213 {
1214 pp = &chunk->next_free;
1215 chunk = *pp;
1216 }
1217
1218 if (chunk)
1219 {
1220 /* Remove the chunk from its previous bin. */
1221 *pp = chunk->next_free;
1222
1223 result = (char *) chunk;
1224
1225 /* Save the rest of the chunk for future allocation, if there's any
1226 left over. */
1227 csize = chunk->size;
1228 if (csize > size)
1229 {
1230 if (zone->cached_free_size)
1231 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1232
1233 chunk = (struct alloc_chunk *) ((char *) result + size);
1234 zone->cached_free = (char *) chunk;
1235 zone->cached_free_size = csize - size;
1236
1237 /* Mark the new free chunk as an object. */
1238 zone_set_object_alloc_bit (chunk);
1239 }
1240
1241 goto found;
1242 }
1243
1244 /* Handle large allocations. We could choose any threshold between
1245 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1246 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1247 be guaranteed to have a unique entry in the lookup table. Large
1248 allocations will always fall through to here. */
1249 if (size > GGC_PAGE_SIZE)
1250 {
1251 struct large_page_entry *entry = alloc_large_page (size, zone);
1252
1253 #ifdef GATHER_STATISTICS
1254 entry->common.survived = 0;
1255 #endif
1256
1257 entry->next = zone->large_pages;
1258 if (zone->large_pages)
1259 zone->large_pages->prev = entry;
1260 zone->large_pages = entry;
1261
1262 result = entry->common.page;
1263
1264 goto found;
1265 }
1266
1267 /* Failing everything above, allocate a new small page. */
1268
1269 entry = alloc_small_page (zone);
1270 entry->next = zone->pages;
1271 zone->pages = entry;
1272
1273 /* Mark the first chunk in the new page. */
1274 entry->alloc_bits[0] = 1;
1275
1276 result = entry->common.page;
1277 if (size < SMALL_PAGE_SIZE)
1278 {
1279 if (zone->cached_free_size)
1280 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1281
1282 zone->cached_free = (char *) result + size;
1283 zone->cached_free_size = SMALL_PAGE_SIZE - size;
1284
1285 /* Mark the new free chunk as an object. */
1286 zone_set_object_alloc_bit (zone->cached_free);
1287 }
1288
1289 found:
1290
1291 /* We could save TYPE in the chunk, but we don't use that for
1292 anything yet. If we wanted to, we could do it by adding it
1293 either before the beginning of the chunk or after its end,
1294 and adjusting the size and pointer appropriately. */
1295
1296 /* We'll probably write to this after we return. */
1297 prefetchw (result);
1298
1299 #ifdef ENABLE_GC_CHECKING
1300 /* `Poison' the entire allocated object. */
1301 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1302 memset (result, 0xaf, size);
1303 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size,
1304 size - orig_size));
1305 #endif
1306
1307 /* Tell Valgrind that the memory is there, but its content isn't
1308 defined. The bytes at the end of the object are still marked
1309 unaccessible. */
1310 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size));
1311
1312 /* Keep track of how many bytes are being allocated. This
1313 information is used in deciding when to collect. */
1314 zone->allocated += size;
1315
1316 timevar_ggc_mem_total += size;
1317
1318 #ifdef GATHER_STATISTICS
1319 ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
1320
1321 {
1322 size_t object_size = size;
1323 size_t overhead = object_size - orig_size;
1324
1325 zone->stats.total_overhead += overhead;
1326 zone->stats.total_allocated += object_size;
1327
1328 if (orig_size <= 32)
1329 {
1330 zone->stats.total_overhead_under32 += overhead;
1331 zone->stats.total_allocated_under32 += object_size;
1332 }
1333 if (orig_size <= 64)
1334 {
1335 zone->stats.total_overhead_under64 += overhead;
1336 zone->stats.total_allocated_under64 += object_size;
1337 }
1338 if (orig_size <= 128)
1339 {
1340 zone->stats.total_overhead_under128 += overhead;
1341 zone->stats.total_allocated_under128 += object_size;
1342 }
1343 }
1344 #endif
1345
1346 if (GGC_DEBUG_LEVEL >= 3)
1347 fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1348 (unsigned long) size, result);
1349
1350 return result;
1351 }
1352
1353 #define ggc_internal_alloc_zone_pass_stat(s,z) \
1354 ggc_internal_alloc_zone_stat (s,z PASS_MEM_STAT)
1355
1356 void *
1357 ggc_internal_cleared_alloc_zone_stat (size_t orig_size,
1358 struct alloc_zone *zone MEM_STAT_DECL)
1359 {
1360 void * result = ggc_internal_alloc_zone_pass_stat (orig_size, zone);
1361 memset (result, 0, orig_size);
1362 return result;
1363 }
1364
1365
1366 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1367 for that type. */
1368
1369 void *
1370 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1371 MEM_STAT_DECL)
1372 {
1373 switch (gte)
1374 {
1375 case gt_ggc_e_14lang_tree_node:
1376 return ggc_internal_alloc_zone_pass_stat (size, &tree_zone);
1377
1378 case gt_ggc_e_7rtx_def:
1379 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone);
1380
1381 case gt_ggc_e_9rtvec_def:
1382 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone);
1383
1384 default:
1385 return ggc_internal_alloc_zone_pass_stat (size, &main_zone);
1386 }
1387 }
1388
1389 /* Normal GC allocation simply allocates into the main zone. */
1390
1391 void *
1392 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1393 {
1394 return ggc_internal_alloc_zone_pass_stat (size, &main_zone);
1395 }
1396
1397 /* Poison the chunk. */
1398 #ifdef ENABLE_GC_CHECKING
1399 #define poison_region(PTR, SIZE) \
1400 do { \
1401 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED ((PTR), (SIZE))); \
1402 memset ((PTR), 0xa5, (SIZE)); \
1403 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((PTR), (SIZE))); \
1404 } while (0)
1405 #else
1406 #define poison_region(PTR, SIZE)
1407 #endif
1408
1409 /* Free the object at P. */
1410
1411 void
1412 ggc_free (void *p)
1413 {
1414 struct page_entry *page;
1415
1416 #ifdef GATHER_STATISTICS
1417 ggc_free_overhead (p);
1418 #endif
1419
1420 poison_region (p, ggc_get_size (p));
1421
1422 page = zone_get_object_page (p);
1423
1424 if (page->large_p)
1425 {
1426 struct large_page_entry *large_page
1427 = (struct large_page_entry *) page;
1428
1429 /* Remove the page from the linked list. */
1430 if (large_page->prev)
1431 large_page->prev->next = large_page->next;
1432 else
1433 {
1434 gcc_assert (large_page->common.zone->large_pages == large_page);
1435 large_page->common.zone->large_pages = large_page->next;
1436 }
1437 if (large_page->next)
1438 large_page->next->prev = large_page->prev;
1439
1440 large_page->common.zone->allocated -= large_page->bytes;
1441
1442 /* Release the memory associated with this object. */
1443 free_large_page (large_page);
1444 }
1445 else if (page->pch_p)
1446 /* Don't do anything. We won't allocate a new object from the
1447 PCH zone so there's no point in releasing anything. */
1448 ;
1449 else
1450 {
1451 size_t size = ggc_get_size (p);
1452
1453 page->zone->allocated -= size;
1454
1455 /* Add the chunk to the free list. We don't bother with coalescing,
1456 since we are likely to want a chunk of this size again. */
1457 free_chunk ((char *)p, size, page->zone);
1458 }
1459 }
1460
1461 /* Mark function for strings. */
1462
1463 void
1464 gt_ggc_m_S (const void *p)
1465 {
1466 page_entry *entry;
1467 unsigned long offset;
1468
1469 if (!p)
1470 return;
1471
1472 /* Look up the page on which the object is alloced. . */
1473 entry = lookup_page_table_if_allocated (p);
1474 if (! entry)
1475 return;
1476
1477 if (entry->pch_p)
1478 {
1479 size_t alloc_word, alloc_bit, t;
1480 t = ((const char *) p - pch_zone.page) / BYTES_PER_ALLOC_BIT;
1481 alloc_word = t / (8 * sizeof (alloc_type));
1482 alloc_bit = t % (8 * sizeof (alloc_type));
1483 offset = zone_find_object_offset (pch_zone.alloc_bits, alloc_word,
1484 alloc_bit);
1485 }
1486 else if (entry->large_p)
1487 {
1488 struct large_page_entry *le = (struct large_page_entry *) entry;
1489 offset = ((const char *) p) - entry->page;
1490 gcc_assert (offset < le->bytes);
1491 }
1492 else
1493 {
1494 struct small_page_entry *se = (struct small_page_entry *) entry;
1495 unsigned int start_word = zone_get_object_alloc_word (p);
1496 unsigned int start_bit = zone_get_object_alloc_bit (p);
1497 offset = zone_find_object_offset (se->alloc_bits, start_word, start_bit);
1498
1499 /* On some platforms a char* will not necessarily line up on an
1500 allocation boundary, so we have to update the offset to
1501 account for the leftover bytes. */
1502 offset += (size_t) p % BYTES_PER_ALLOC_BIT;
1503 }
1504
1505 if (offset)
1506 {
1507 /* Here we've seen a char* which does not point to the beginning
1508 of an allocated object. We assume it points to the middle of
1509 a STRING_CST. */
1510 gcc_assert (offset == offsetof (struct tree_string, str));
1511 p = ((const char *) p) - offset;
1512 gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p));
1513 return;
1514 }
1515
1516 /* Inefficient, but also unlikely to matter. */
1517 ggc_set_mark (p);
1518 }
1519
1520 /* If P is not marked, mark it and return false. Otherwise return true.
1521 P must have been allocated by the GC allocator; it mustn't point to
1522 static objects, stack variables, or memory allocated with malloc. */
1523
1524 int
1525 ggc_set_mark (const void *p)
1526 {
1527 struct page_entry *page;
1528 const char *ptr = (const char *) p;
1529
1530 page = zone_get_object_page (p);
1531
1532 if (page->pch_p)
1533 {
1534 size_t mark_word, mark_bit, offset;
1535 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1536 mark_word = offset / (8 * sizeof (mark_type));
1537 mark_bit = offset % (8 * sizeof (mark_type));
1538
1539 if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1540 return 1;
1541 pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1542 }
1543 else if (page->large_p)
1544 {
1545 struct large_page_entry *large_page
1546 = (struct large_page_entry *) page;
1547
1548 if (large_page->mark_p)
1549 return 1;
1550 large_page->mark_p = true;
1551 }
1552 else
1553 {
1554 struct small_page_entry *small_page
1555 = (struct small_page_entry *) page;
1556
1557 if (small_page->mark_bits[zone_get_object_mark_word (p)]
1558 & (1 << zone_get_object_mark_bit (p)))
1559 return 1;
1560 small_page->mark_bits[zone_get_object_mark_word (p)]
1561 |= (1 << zone_get_object_mark_bit (p));
1562 }
1563
1564 if (GGC_DEBUG_LEVEL >= 4)
1565 fprintf (G.debug_file, "Marking %p\n", p);
1566
1567 return 0;
1568 }
1569
1570 /* Return 1 if P has been marked, zero otherwise.
1571 P must have been allocated by the GC allocator; it mustn't point to
1572 static objects, stack variables, or memory allocated with malloc. */
1573
1574 int
1575 ggc_marked_p (const void *p)
1576 {
1577 struct page_entry *page;
1578 const char *ptr = (const char *) p;
1579
1580 page = zone_get_object_page (p);
1581
1582 if (page->pch_p)
1583 {
1584 size_t mark_word, mark_bit, offset;
1585 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1586 mark_word = offset / (8 * sizeof (mark_type));
1587 mark_bit = offset % (8 * sizeof (mark_type));
1588
1589 return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1590 }
1591
1592 if (page->large_p)
1593 {
1594 struct large_page_entry *large_page
1595 = (struct large_page_entry *) page;
1596
1597 return large_page->mark_p;
1598 }
1599 else
1600 {
1601 struct small_page_entry *small_page
1602 = (struct small_page_entry *) page;
1603
1604 return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1605 & (1 << zone_get_object_mark_bit (p)));
1606 }
1607 }
1608
1609 /* Return the size of the gc-able object P. */
1610
1611 size_t
1612 ggc_get_size (const void *p)
1613 {
1614 struct page_entry *page;
1615 const char *ptr = (const char *) p;
1616
1617 page = zone_get_object_page (p);
1618
1619 if (page->pch_p)
1620 {
1621 size_t alloc_word, alloc_bit, offset, max_size;
1622 offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1623 alloc_word = offset / (8 * sizeof (alloc_type));
1624 alloc_bit = offset % (8 * sizeof (alloc_type));
1625 max_size = pch_zone.bytes - (ptr - pch_zone.page);
1626 return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1627 max_size);
1628 }
1629
1630 if (page->large_p)
1631 return ((struct large_page_entry *)page)->bytes;
1632 else
1633 return zone_find_object_size ((struct small_page_entry *) page, p);
1634 }
1635
1636 /* Initialize the ggc-zone-mmap allocator. */
1637 void
1638 init_ggc (void)
1639 {
1640 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1641 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1642 the current assumptions to hold. */
1643
1644 gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1645
1646 /* Set up the main zone by hand. */
1647 main_zone.name = "Main zone";
1648 G.zones = &main_zone;
1649
1650 /* Allocate the default zones. */
1651 new_ggc_zone_1 (&rtl_zone, "RTL zone");
1652 new_ggc_zone_1 (&tree_zone, "Tree zone");
1653 new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
1654
1655 G.pagesize = getpagesize();
1656 G.lg_pagesize = exact_log2 (G.pagesize);
1657 G.page_mask = ~(G.pagesize - 1);
1658
1659 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1660 gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1661
1662 /* Allocate 16 system pages at a time. */
1663 G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1664
1665 /* Calculate the size of the allocation bitmap and other overhead. */
1666 /* Right now we allocate bits for the page header and bitmap. These
1667 are wasted, but a little tricky to eliminate. */
1668 G.small_page_overhead
1669 = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1670 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1671
1672 #ifdef HAVE_MMAP_DEV_ZERO
1673 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1674 gcc_assert (G.dev_zero_fd != -1);
1675 #endif
1676
1677 #if 0
1678 G.debug_file = fopen ("ggc-mmap.debug", "w");
1679 setlinebuf (G.debug_file);
1680 #else
1681 G.debug_file = stdout;
1682 #endif
1683
1684 #ifdef USING_MMAP
1685 /* StunOS has an amazing off-by-one error for the first mmap allocation
1686 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1687 believe, is an unaligned page allocation, which would cause us to
1688 hork badly if we tried to use it. */
1689 {
1690 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1691 struct small_page_entry *e;
1692 if ((size_t)p & (G.pagesize - 1))
1693 {
1694 /* How losing. Discard this one and try another. If we still
1695 can't get something useful, give up. */
1696
1697 p = alloc_anon (NULL, G.pagesize, &main_zone);
1698 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1699 }
1700
1701 if (GGC_PAGE_SIZE == G.pagesize)
1702 {
1703 /* We have a good page, might as well hold onto it... */
1704 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
1705 e->common.page = p;
1706 e->common.zone = &main_zone;
1707 e->next = main_zone.free_pages;
1708 set_page_table_entry (e->common.page, &e->common);
1709 main_zone.free_pages = e;
1710 }
1711 else
1712 {
1713 munmap (p, G.pagesize);
1714 }
1715 }
1716 #endif
1717 }
1718
1719 /* Start a new GGC zone. */
1720
1721 static void
1722 new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
1723 {
1724 new_zone->name = name;
1725 new_zone->next_zone = G.zones->next_zone;
1726 G.zones->next_zone = new_zone;
1727 }
1728
1729 /* Free all empty pages and objects within a page for a given zone */
1730
1731 static void
1732 sweep_pages (struct alloc_zone *zone)
1733 {
1734 struct large_page_entry **lpp, *lp, *lnext;
1735 struct small_page_entry **spp, *sp, *snext;
1736 char *last_free;
1737 size_t allocated = 0;
1738 bool nomarksinpage;
1739
1740 /* First, reset the free_chunks lists, since we are going to
1741 re-free free chunks in hopes of coalescing them into large chunks. */
1742 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1743 zone->high_free_bin = 0;
1744 zone->cached_free = NULL;
1745 zone->cached_free_size = 0;
1746
1747 /* Large pages are all or none affairs. Either they are completely
1748 empty, or they are completely full. */
1749 lpp = &zone->large_pages;
1750 for (lp = zone->large_pages; lp != NULL; lp = lnext)
1751 {
1752 gcc_assert (lp->common.large_p);
1753
1754 lnext = lp->next;
1755
1756 #ifdef GATHER_STATISTICS
1757 /* This page has now survived another collection. */
1758 lp->common.survived++;
1759 #endif
1760
1761 if (lp->mark_p)
1762 {
1763 lp->mark_p = false;
1764 allocated += lp->bytes;
1765 lpp = &lp->next;
1766 }
1767 else
1768 {
1769 *lpp = lnext;
1770 #ifdef ENABLE_GC_CHECKING
1771 /* Poison the page. */
1772 memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
1773 #endif
1774 if (lp->prev)
1775 lp->prev->next = lp->next;
1776 if (lp->next)
1777 lp->next->prev = lp->prev;
1778 free_large_page (lp);
1779 }
1780 }
1781
1782 spp = &zone->pages;
1783 for (sp = zone->pages; sp != NULL; sp = snext)
1784 {
1785 char *object, *last_object;
1786 char *end;
1787 alloc_type *alloc_word_p;
1788 mark_type *mark_word_p;
1789
1790 gcc_assert (!sp->common.large_p);
1791
1792 snext = sp->next;
1793
1794 #ifdef GATHER_STATISTICS
1795 /* This page has now survived another collection. */
1796 sp->common.survived++;
1797 #endif
1798
1799 /* Step through all chunks, consolidate those that are free and
1800 insert them into the free lists. Note that consolidation
1801 slows down collection slightly. */
1802
1803 last_object = object = sp->common.page;
1804 end = sp->common.page + SMALL_PAGE_SIZE;
1805 last_free = NULL;
1806 nomarksinpage = true;
1807 mark_word_p = sp->mark_bits;
1808 alloc_word_p = sp->alloc_bits;
1809
1810 gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1811
1812 object = sp->common.page;
1813 do
1814 {
1815 unsigned int i, n;
1816 alloc_type alloc_word;
1817 mark_type mark_word;
1818
1819 alloc_word = *alloc_word_p++;
1820 mark_word = *mark_word_p++;
1821
1822 if (mark_word)
1823 nomarksinpage = false;
1824
1825 /* There ought to be some way to do this without looping... */
1826 i = 0;
1827 while ((n = alloc_ffs (alloc_word)) != 0)
1828 {
1829 /* Extend the current state for n - 1 bits. We can't
1830 shift alloc_word by n, even though it isn't used in the
1831 loop, in case only the highest bit was set. */
1832 alloc_word >>= n - 1;
1833 mark_word >>= n - 1;
1834 object += BYTES_PER_MARK_BIT * (n - 1);
1835
1836 if (mark_word & 1)
1837 {
1838 if (last_free)
1839 {
1840 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1841 object
1842 - last_free));
1843 poison_region (last_free, object - last_free);
1844 free_chunk (last_free, object - last_free, zone);
1845 last_free = NULL;
1846 }
1847 else
1848 allocated += object - last_object;
1849 last_object = object;
1850 }
1851 else
1852 {
1853 if (last_free == NULL)
1854 {
1855 last_free = object;
1856 allocated += object - last_object;
1857 }
1858 else
1859 zone_clear_object_alloc_bit (sp, object);
1860 }
1861
1862 /* Shift to just after the alloc bit we handled. */
1863 alloc_word >>= 1;
1864 mark_word >>= 1;
1865 object += BYTES_PER_MARK_BIT;
1866
1867 i += n;
1868 }
1869
1870 object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
1871 }
1872 while (object < end);
1873
1874 if (nomarksinpage)
1875 {
1876 *spp = snext;
1877 #ifdef ENABLE_GC_CHECKING
1878 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page,
1879 SMALL_PAGE_SIZE));
1880 /* Poison the page. */
1881 memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
1882 #endif
1883 free_small_page (sp);
1884 continue;
1885 }
1886 else if (last_free)
1887 {
1888 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1889 object - last_free));
1890 poison_region (last_free, object - last_free);
1891 free_chunk (last_free, object - last_free, zone);
1892 }
1893 else
1894 allocated += object - last_object;
1895
1896 spp = &sp->next;
1897 }
1898
1899 zone->allocated = allocated;
1900 }
1901
1902 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1903 is true if we need to mark before sweeping, false if some other
1904 zone collection has already performed marking for us. Returns true
1905 if we collected, false otherwise. */
1906
1907 static bool
1908 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1909 {
1910 #if 0
1911 /* */
1912 {
1913 int i;
1914 for (i = 0; i < NUM_FREE_BINS + 1; i++)
1915 {
1916 struct alloc_chunk *chunk;
1917 int n, tot;
1918
1919 n = 0;
1920 tot = 0;
1921 chunk = zone->free_chunks[i];
1922 while (chunk)
1923 {
1924 n++;
1925 tot += chunk->size;
1926 chunk = chunk->next_free;
1927 }
1928 fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1929 i, n, tot);
1930 }
1931 }
1932 /* */
1933 #endif
1934
1935 if (!quiet_flag)
1936 fprintf (stderr, " {%s GC %luk -> ",
1937 zone->name, (unsigned long) zone->allocated / 1024);
1938
1939 /* Zero the total allocated bytes. This will be recalculated in the
1940 sweep phase. */
1941 zone->allocated = 0;
1942
1943 /* Release the pages we freed the last time we collected, but didn't
1944 reuse in the interim. */
1945 release_pages (zone);
1946
1947 if (need_marking)
1948 {
1949 zone_allocate_marks ();
1950 ggc_mark_roots ();
1951 #ifdef GATHER_STATISTICS
1952 ggc_prune_overhead_list ();
1953 #endif
1954 }
1955
1956 sweep_pages (zone);
1957 zone->was_collected = true;
1958 zone->allocated_last_gc = zone->allocated;
1959
1960 if (!quiet_flag)
1961 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1962 return true;
1963 }
1964
1965 #ifdef GATHER_STATISTICS
1966 /* Calculate the average page survival rate in terms of number of
1967 collections. */
1968
1969 static float
1970 calculate_average_page_survival (struct alloc_zone *zone)
1971 {
1972 float count = 0.0;
1973 float survival = 0.0;
1974 struct small_page_entry *p;
1975 struct large_page_entry *lp;
1976 for (p = zone->pages; p; p = p->next)
1977 {
1978 count += 1.0;
1979 survival += p->common.survived;
1980 }
1981 for (lp = zone->large_pages; lp; lp = lp->next)
1982 {
1983 count += 1.0;
1984 survival += lp->common.survived;
1985 }
1986 return survival/count;
1987 }
1988 #endif
1989
1990 /* Top level collection routine. */
1991
1992 void
1993 ggc_collect (void)
1994 {
1995 struct alloc_zone *zone;
1996 bool marked = false;
1997
1998 timevar_push (TV_GC);
1999
2000 if (!ggc_force_collect)
2001 {
2002 float allocated_last_gc = 0, allocated = 0, min_expand;
2003
2004 for (zone = G.zones; zone; zone = zone->next_zone)
2005 {
2006 allocated_last_gc += zone->allocated_last_gc;
2007 allocated += zone->allocated;
2008 }
2009
2010 allocated_last_gc =
2011 MAX (allocated_last_gc,
2012 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2013 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2014
2015 if (allocated < allocated_last_gc + min_expand)
2016 {
2017 timevar_pop (TV_GC);
2018 return;
2019 }
2020 }
2021
2022 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2023
2024 /* Start by possibly collecting the main zone. */
2025 main_zone.was_collected = false;
2026 marked |= ggc_collect_1 (&main_zone, true);
2027
2028 /* In order to keep the number of collections down, we don't
2029 collect other zones unless we are collecting the main zone. This
2030 gives us roughly the same number of collections as we used to
2031 have with the old gc. The number of collection is important
2032 because our main slowdown (according to profiling) is now in
2033 marking. So if we mark twice as often as we used to, we'll be
2034 twice as slow. Hopefully we'll avoid this cost when we mark
2035 zone-at-a-time. */
2036 /* NOTE drow/2004-07-28: We now always collect the main zone, but
2037 keep this code in case the heuristics are further refined. */
2038
2039 if (main_zone.was_collected)
2040 {
2041 struct alloc_zone *zone;
2042
2043 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
2044 {
2045 zone->was_collected = false;
2046 marked |= ggc_collect_1 (zone, !marked);
2047 }
2048 }
2049
2050 #ifdef GATHER_STATISTICS
2051 /* Print page survival stats, if someone wants them. */
2052 if (GGC_DEBUG_LEVEL >= 2)
2053 {
2054 for (zone = G.zones; zone; zone = zone->next_zone)
2055 {
2056 if (zone->was_collected)
2057 {
2058 float f = calculate_average_page_survival (zone);
2059 printf ("Average page survival in zone `%s' is %f\n",
2060 zone->name, f);
2061 }
2062 }
2063 }
2064 #endif
2065
2066 if (marked)
2067 zone_free_marks ();
2068
2069 /* Free dead zones. */
2070 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
2071 {
2072 if (zone->next_zone->dead)
2073 {
2074 struct alloc_zone *dead_zone = zone->next_zone;
2075
2076 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
2077
2078 /* The zone must be empty. */
2079 gcc_assert (!dead_zone->allocated);
2080
2081 /* Unchain the dead zone, release all its pages and free it. */
2082 zone->next_zone = zone->next_zone->next_zone;
2083 release_pages (dead_zone);
2084 free (dead_zone);
2085 }
2086 }
2087
2088 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2089
2090 timevar_pop (TV_GC);
2091 }
2092
2093 /* Print allocation statistics. */
2094 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2095 ? (x) \
2096 : ((x) < 1024*1024*10 \
2097 ? (x) / 1024 \
2098 : (x) / (1024*1024))))
2099 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2100
2101 void
2102 ggc_print_statistics (void)
2103 {
2104 struct alloc_zone *zone;
2105 struct ggc_statistics stats;
2106 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
2107 size_t pte_overhead, i;
2108
2109 /* Clear the statistics. */
2110 memset (&stats, 0, sizeof (stats));
2111
2112 /* Make sure collection will really occur. */
2113 ggc_force_collect = true;
2114
2115 /* Collect and print the statistics common across collectors. */
2116 ggc_print_common_statistics (stderr, &stats);
2117
2118 ggc_force_collect = false;
2119
2120 /* Release free pages so that we will not count the bytes allocated
2121 there as part of the total allocated memory. */
2122 for (zone = G.zones; zone; zone = zone->next_zone)
2123 release_pages (zone);
2124
2125 /* Collect some information about the various sizes of
2126 allocation. */
2127 fprintf (stderr,
2128 "Memory still allocated at the end of the compilation process\n");
2129
2130 fprintf (stderr, "%20s %10s %10s %10s\n",
2131 "Zone", "Allocated", "Used", "Overhead");
2132 for (zone = G.zones; zone; zone = zone->next_zone)
2133 {
2134 struct large_page_entry *large_page;
2135 size_t overhead, allocated, in_use;
2136
2137 /* Skip empty zones. */
2138 if (!zone->pages && !zone->large_pages)
2139 continue;
2140
2141 allocated = in_use = 0;
2142
2143 overhead = sizeof (struct alloc_zone);
2144
2145 for (large_page = zone->large_pages; large_page != NULL;
2146 large_page = large_page->next)
2147 {
2148 allocated += large_page->bytes;
2149 in_use += large_page->bytes;
2150 overhead += sizeof (struct large_page_entry);
2151 }
2152
2153 /* There's no easy way to walk through the small pages finding
2154 used and unused objects. Instead, add all the pages, and
2155 subtract out the free list. */
2156
2157 allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2158 in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2159 overhead += G.small_page_overhead * zone->n_small_pages;
2160
2161 for (i = 0; i <= NUM_FREE_BINS; i++)
2162 {
2163 struct alloc_chunk *chunk = zone->free_chunks[i];
2164 while (chunk)
2165 {
2166 in_use -= ggc_get_size (chunk);
2167 chunk = chunk->next_free;
2168 }
2169 }
2170
2171 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2172 zone->name,
2173 SCALE (allocated), LABEL (allocated),
2174 SCALE (in_use), LABEL (in_use),
2175 SCALE (overhead), LABEL (overhead));
2176
2177 gcc_assert (in_use == zone->allocated);
2178
2179 total_overhead += overhead;
2180 total_allocated += zone->allocated;
2181 total_bytes_mapped += zone->bytes_mapped;
2182 }
2183
2184 /* Count the size of the page table as best we can. */
2185 #if HOST_BITS_PER_PTR <= 32
2186 pte_overhead = sizeof (G.lookup);
2187 for (i = 0; i < PAGE_L1_SIZE; i++)
2188 if (G.lookup[i])
2189 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2190 #else
2191 {
2192 page_table table = G.lookup;
2193 pte_overhead = 0;
2194 while (table)
2195 {
2196 pte_overhead += sizeof (*table);
2197 for (i = 0; i < PAGE_L1_SIZE; i++)
2198 if (table->table[i])
2199 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2200 table = table->next;
2201 }
2202 }
2203 #endif
2204 fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2205 "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2206 total_overhead += pte_overhead;
2207
2208 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2209 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2210 SCALE (total_allocated), LABEL(total_allocated),
2211 SCALE (total_overhead), LABEL (total_overhead));
2212
2213 #ifdef GATHER_STATISTICS
2214 {
2215 unsigned long long all_overhead = 0, all_allocated = 0;
2216 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2217 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2218 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2219
2220 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2221
2222 for (zone = G.zones; zone; zone = zone->next_zone)
2223 {
2224 all_overhead += zone->stats.total_overhead;
2225 all_allocated += zone->stats.total_allocated;
2226
2227 all_allocated_under32 += zone->stats.total_allocated_under32;
2228 all_overhead_under32 += zone->stats.total_overhead_under32;
2229
2230 all_allocated_under64 += zone->stats.total_allocated_under64;
2231 all_overhead_under64 += zone->stats.total_overhead_under64;
2232
2233 all_allocated_under128 += zone->stats.total_allocated_under128;
2234 all_overhead_under128 += zone->stats.total_overhead_under128;
2235
2236 fprintf (stderr, "%20s: %10lld\n",
2237 zone->name, zone->stats.total_allocated);
2238 }
2239
2240 fprintf (stderr, "\n");
2241
2242 fprintf (stderr, "Total Overhead: %10lld\n",
2243 all_overhead);
2244 fprintf (stderr, "Total Allocated: %10lld\n",
2245 all_allocated);
2246
2247 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2248 all_overhead_under32);
2249 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2250 all_allocated_under32);
2251 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2252 all_overhead_under64);
2253 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2254 all_allocated_under64);
2255 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2256 all_overhead_under128);
2257 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2258 all_allocated_under128);
2259 }
2260 #endif
2261 }
2262
2263 /* Precompiled header support. */
2264
2265 /* For precompiled headers, we sort objects based on their type. We
2266 also sort various objects into their own buckets; currently this
2267 covers strings and IDENTIFIER_NODE trees. The choices of how
2268 to sort buckets have not yet been tuned. */
2269
2270 #define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2271
2272 #define OTHER_BUCKET (gt_types_enum_last + 0)
2273 #define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2274 #define STRING_BUCKET (gt_types_enum_last + 2)
2275
2276 struct ggc_pch_ondisk
2277 {
2278 size_t total;
2279 size_t type_totals[NUM_PCH_BUCKETS];
2280 };
2281
2282 struct ggc_pch_data
2283 {
2284 struct ggc_pch_ondisk d;
2285 size_t base;
2286 size_t orig_base;
2287 size_t alloc_size;
2288 alloc_type *alloc_bits;
2289 size_t type_bases[NUM_PCH_BUCKETS];
2290 size_t start_offset;
2291 };
2292
2293 /* Initialize the PCH data structure. */
2294
2295 struct ggc_pch_data *
2296 init_ggc_pch (void)
2297 {
2298 return XCNEW (struct ggc_pch_data);
2299 }
2300
2301 /* Return which of the page-aligned buckets the object at X, with type
2302 TYPE, should be sorted into in the PCH. Strings will have
2303 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2304 of unknown type will also have TYPE equal to gt_types_enum_last. */
2305
2306 static int
2307 pch_bucket (void *x, enum gt_types_enum type,
2308 bool is_string)
2309 {
2310 /* Sort identifiers into their own bucket, to improve locality
2311 when searching the identifier hash table. */
2312 if (type == gt_ggc_e_14lang_tree_node
2313 && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2314 return IDENTIFIER_BUCKET;
2315 else if (type == gt_types_enum_last)
2316 {
2317 if (is_string)
2318 return STRING_BUCKET;
2319 return OTHER_BUCKET;
2320 }
2321 return type;
2322 }
2323
2324 /* Add the size of object X to the size of the PCH data. */
2325
2326 void
2327 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2328 size_t size, bool is_string, enum gt_types_enum type)
2329 {
2330 /* NOTE: Right now we don't need to align up the size of any objects.
2331 Strings can be unaligned, and everything else is allocated to a
2332 MAX_ALIGNMENT boundary already. */
2333
2334 d->d.type_totals[pch_bucket (x, type, is_string)] += size;
2335 }
2336
2337 /* Return the total size of the PCH data. */
2338
2339 size_t
2340 ggc_pch_total_size (struct ggc_pch_data *d)
2341 {
2342 int i;
2343 size_t alloc_size, total_size;
2344
2345 total_size = 0;
2346 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2347 {
2348 d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2349 total_size += d->d.type_totals[i];
2350 }
2351 d->d.total = total_size;
2352
2353 /* Include the size of the allocation bitmap. */
2354 alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2355 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2356 d->alloc_size = alloc_size;
2357
2358 return d->d.total + alloc_size;
2359 }
2360
2361 /* Set the base address for the objects in the PCH file. */
2362
2363 void
2364 ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
2365 {
2366 int i;
2367 size_t base = (size_t) base_;
2368
2369 d->base = d->orig_base = base;
2370 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2371 {
2372 d->type_bases[i] = base;
2373 base += d->d.type_totals[i];
2374 }
2375
2376 if (d->alloc_bits == NULL)
2377 d->alloc_bits = XCNEWVAR (alloc_type, d->alloc_size);
2378 }
2379
2380 /* Allocate a place for object X of size SIZE in the PCH file. */
2381
2382 char *
2383 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
2384 size_t size, bool is_string,
2385 enum gt_types_enum type)
2386 {
2387 size_t alloc_word, alloc_bit;
2388 char *result;
2389 int bucket = pch_bucket (x, type, is_string);
2390
2391 /* Record the start of the object in the allocation bitmap. We
2392 can't assert that the allocation bit is previously clear, because
2393 strings may violate the invariant that they are at least
2394 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2395 should not be called for strings. */
2396 alloc_word = ((d->type_bases[bucket] - d->orig_base)
2397 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2398 alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2399 / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2400 d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2401
2402 /* Place the object at the current pointer for this bucket. */
2403 result = (char *) d->type_bases[bucket];
2404 d->type_bases[bucket] += size;
2405 return result;
2406 }
2407
2408 /* Prepare to write out the PCH data to file F. */
2409
2410 void
2411 ggc_pch_prepare_write (struct ggc_pch_data *d,
2412 FILE *f)
2413 {
2414 /* We seek around a lot while writing. Record where the end
2415 of the padding in the PCH file is, so that we can
2416 locate each object's offset. */
2417 d->start_offset = ftell (f);
2418 }
2419
2420 /* Write out object X of SIZE to file F. */
2421
2422 void
2423 ggc_pch_write_object (struct ggc_pch_data *d,
2424 FILE *f, void *x, void *newx,
2425 size_t size, bool is_string ATTRIBUTE_UNUSED)
2426 {
2427 if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
2428 fatal_error ("can%'t seek PCH file: %m");
2429
2430 if (fwrite (x, size, 1, f) != 1)
2431 fatal_error ("can%'t write PCH file: %m");
2432 }
2433
2434 void
2435 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2436 {
2437 /* Write out the allocation bitmap. */
2438 if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
2439 fatal_error ("can%'t seek PCH file: %m");
2440
2441 if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
2442 fatal_error ("can%'t write PCH file: %m");
2443
2444 /* Done with the PCH, so write out our footer. */
2445 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2446 fatal_error ("can%'t write PCH file: %m");
2447
2448 free (d->alloc_bits);
2449 free (d);
2450 }
2451
2452 /* The PCH file from F has been mapped at ADDR. Read in any
2453 additional data from the file and set up the GC state. */
2454
2455 void
2456 ggc_pch_read (FILE *f, void *addr)
2457 {
2458 struct ggc_pch_ondisk d;
2459 size_t alloc_size;
2460 struct alloc_zone *zone;
2461 struct page_entry *pch_page;
2462 char *p;
2463
2464 if (fread (&d, sizeof (d), 1, f) != 1)
2465 fatal_error ("can%'t read PCH file: %m");
2466
2467 alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2468 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2469
2470 pch_zone.bytes = d.total;
2471 pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2472 pch_zone.page = (char *) addr;
2473 pch_zone.end = (char *) pch_zone.alloc_bits;
2474
2475 /* We've just read in a PCH file. So, every object that used to be
2476 allocated is now free. */
2477 #ifdef GATHER_STATISTICS
2478 zone_allocate_marks ();
2479 ggc_prune_overhead_list ();
2480 zone_free_marks ();
2481 #endif
2482
2483 for (zone = G.zones; zone; zone = zone->next_zone)
2484 {
2485 struct small_page_entry *page, *next_page;
2486 struct large_page_entry *large_page, *next_large_page;
2487
2488 zone->allocated = 0;
2489
2490 /* Clear the zone's free chunk list. */
2491 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2492 zone->high_free_bin = 0;
2493 zone->cached_free = NULL;
2494 zone->cached_free_size = 0;
2495
2496 /* Move all the small pages onto the free list. */
2497 for (page = zone->pages; page != NULL; page = next_page)
2498 {
2499 next_page = page->next;
2500 memset (page->alloc_bits, 0,
2501 G.small_page_overhead - PAGE_OVERHEAD);
2502 free_small_page (page);
2503 }
2504
2505 /* Discard all the large pages. */
2506 for (large_page = zone->large_pages; large_page != NULL;
2507 large_page = next_large_page)
2508 {
2509 next_large_page = large_page->next;
2510 free_large_page (large_page);
2511 }
2512
2513 zone->pages = NULL;
2514 zone->large_pages = NULL;
2515 }
2516
2517 /* Allocate the dummy page entry for the PCH, and set all pages
2518 mapped into the PCH to reference it. */
2519 pch_page = XCNEW (struct page_entry);
2520 pch_page->page = pch_zone.page;
2521 pch_page->pch_p = true;
2522
2523 for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2524 set_page_table_entry (p, pch_page);
2525 }