* jvspec.c (jvgenmain_spec): Don't handle -fnew-verifier.
[gcc.git] / gcc / ggc-zone.c
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3 2010 Free Software Foundation, Inc.
4
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
7 <dan@codesourcery.com>.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "diagnostic-core.h"
33 #include "toplev.h"
34 #include "flags.h"
35 #include "ggc.h"
36 #include "ggc-internal.h"
37 #include "timevar.h"
38 #include "params.h"
39 #include "bitmap.h"
40 #include "plugin.h"
41
42 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
43 file open. Prefer either to valloc. */
44 #ifdef HAVE_MMAP_ANON
45 # undef HAVE_MMAP_DEV_ZERO
46
47 # include <sys/mman.h>
48 # ifndef MAP_FAILED
49 # define MAP_FAILED -1
50 # endif
51 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
52 # define MAP_ANONYMOUS MAP_ANON
53 # endif
54 # define USING_MMAP
55 #endif
56
57 #ifdef HAVE_MMAP_DEV_ZERO
58 # include <sys/mman.h>
59 # ifndef MAP_FAILED
60 # define MAP_FAILED -1
61 # endif
62 # define USING_MMAP
63 #endif
64
65 #ifndef USING_MMAP
66 #error Zone collector requires mmap
67 #endif
68
69 #if (GCC_VERSION < 3001)
70 #define prefetch(X) ((void) X)
71 #define prefetchw(X) ((void) X)
72 #else
73 #define prefetch(X) __builtin_prefetch (X)
74 #define prefetchw(X) __builtin_prefetch (X, 1, 3)
75 #endif
76
77 /* FUTURE NOTES:
78
79 If we track inter-zone pointers, we can mark single zones at a
80 time.
81
82 If we have a zone where we guarantee no inter-zone pointers, we
83 could mark that zone separately.
84
85 The garbage zone should not be marked, and we should return 1 in
86 ggc_set_mark for any object in the garbage zone, which cuts off
87 marking quickly. */
88
89 /* Strategy:
90
91 This garbage-collecting allocator segregates objects into zones.
92 It also segregates objects into "large" and "small" bins. Large
93 objects are greater than page size.
94
95 Pages for small objects are broken up into chunks. The page has
96 a bitmap which marks the start position of each chunk (whether
97 allocated or free). Free chunks are on one of the zone's free
98 lists and contain a pointer to the next free chunk. Chunks in
99 most of the free lists have a fixed size determined by the
100 free list. Chunks in the "other" sized free list have their size
101 stored right after their chain pointer.
102
103 Empty pages (of all sizes) are kept on a single page cache list,
104 and are considered first when new pages are required; they are
105 deallocated at the start of the next collection if they haven't
106 been recycled by then. The free page list is currently per-zone. */
107
108 /* Define GGC_DEBUG_LEVEL to print debugging information.
109 0: No debugging output.
110 1: GC statistics only.
111 2: Page-entry allocations/deallocations as well.
112 3: Object allocations as well.
113 4: Object marks as well. */
114 #define GGC_DEBUG_LEVEL (0)
115
116 #ifndef HOST_BITS_PER_PTR
117 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
118 #endif
119
120 /* This structure manages small free chunks. The SIZE field is only
121 initialized if the chunk is in the "other" sized free list. Large
122 chunks are allocated one at a time to their own page, and so don't
123 come in here. */
124
125 struct alloc_chunk {
126 struct alloc_chunk *next_free;
127 unsigned int size;
128 };
129
130 /* The size of the fixed-size portion of a small page descriptor. */
131 #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
132
133 /* The collector's idea of the page size. This must be a power of two
134 no larger than the system page size, because pages must be aligned
135 to this amount and are tracked at this granularity in the page
136 table. We choose a size at compile time for efficiency.
137
138 We could make a better guess at compile time if PAGE_SIZE is a
139 constant in system headers, and PAGE_SHIFT is defined... */
140 #define GGC_PAGE_SIZE 4096
141 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
142 #define GGC_PAGE_SHIFT 12
143
144 #if 0
145 /* Alternative definitions which use the runtime page size. */
146 #define GGC_PAGE_SIZE G.pagesize
147 #define GGC_PAGE_MASK G.page_mask
148 #define GGC_PAGE_SHIFT G.lg_pagesize
149 #endif
150
151 /* The size of a small page managed by the garbage collector. This
152 must currently be GGC_PAGE_SIZE, but with a few changes could
153 be any multiple of it to reduce certain kinds of overhead. */
154 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE
155
156 /* Free bin information. These numbers may be in need of re-tuning.
157 In general, decreasing the number of free bins would seem to
158 increase the time it takes to allocate... */
159
160 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
161 today. */
162
163 #define NUM_FREE_BINS 64
164 #define FREE_BIN_DELTA MAX_ALIGNMENT
165 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
166
167 /* Allocation and marking parameters. */
168
169 /* The smallest allocatable unit to keep track of. */
170 #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
171
172 /* The smallest markable unit. If we require each allocated object
173 to contain at least two allocatable units, we can use half as many
174 bits for the mark bitmap. But this adds considerable complexity
175 to sweeping. */
176 #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
177
178 #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
179
180 /* We use this structure to determine the alignment required for
181 allocations.
182
183 There are several things wrong with this estimation of alignment.
184
185 The maximum alignment for a structure is often less than the
186 maximum alignment for a basic data type; for instance, on some
187 targets long long must be aligned to sizeof (int) in a structure
188 and sizeof (long long) in a variable. i386-linux is one example;
189 Darwin is another (sometimes, depending on the compiler in use).
190
191 Also, long double is not included. Nothing in GCC uses long
192 double, so we assume that this is OK. On powerpc-darwin, adding
193 long double would bring the maximum alignment up to 16 bytes,
194 and until we need long double (or to vectorize compiler operations)
195 that's painfully wasteful. This will need to change, some day. */
196
197 struct max_alignment {
198 char c;
199 union {
200 HOST_WIDEST_INT i;
201 double d;
202 } u;
203 };
204
205 /* The biggest alignment required. */
206
207 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
208
209 /* Compute the smallest multiple of F that is >= X. */
210
211 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
212
213 /* Types to use for the allocation and mark bitmaps. It might be
214 a good idea to add ffsl to libiberty and use unsigned long
215 instead; that could speed us up where long is wider than int. */
216
217 typedef unsigned int alloc_type;
218 typedef unsigned int mark_type;
219 #define alloc_ffs(x) ffs(x)
220
221 /* A page_entry records the status of an allocation page. This is the
222 common data between all three kinds of pages - small, large, and
223 PCH. */
224 typedef struct page_entry
225 {
226 /* The address at which the memory is allocated. */
227 char *page;
228
229 /* The zone that this page entry belongs to. */
230 struct alloc_zone *zone;
231
232 #ifdef GATHER_STATISTICS
233 /* How many collections we've survived. */
234 size_t survived;
235 #endif
236
237 /* Does this page contain small objects, or one large object? */
238 bool large_p;
239
240 /* Is this page part of the loaded PCH? */
241 bool pch_p;
242 } page_entry;
243
244 /* Additional data needed for small pages. */
245 struct small_page_entry
246 {
247 struct page_entry common;
248
249 /* The next small page entry, or NULL if this is the last. */
250 struct small_page_entry *next;
251
252 /* If currently marking this zone, a pointer to the mark bits
253 for this page. If we aren't currently marking this zone,
254 this pointer may be stale (pointing to freed memory). */
255 mark_type *mark_bits;
256
257 /* The allocation bitmap. This array extends far enough to have
258 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
259 alloc_type alloc_bits[1];
260 };
261
262 /* Additional data needed for large pages. */
263 struct large_page_entry
264 {
265 struct page_entry common;
266
267 /* The next large page entry, or NULL if this is the last. */
268 struct large_page_entry *next;
269
270 /* The number of bytes allocated, not including the page entry. */
271 size_t bytes;
272
273 /* The previous page in the list, so that we can unlink this one. */
274 struct large_page_entry *prev;
275
276 /* During marking, is this object marked? */
277 bool mark_p;
278 };
279
280 /* A two-level tree is used to look up the page-entry for a given
281 pointer. Two chunks of the pointer's bits are extracted to index
282 the first and second levels of the tree, as follows:
283
284 HOST_PAGE_SIZE_BITS
285 32 | |
286 msb +----------------+----+------+------+ lsb
287 | | |
288 PAGE_L1_BITS |
289 | |
290 PAGE_L2_BITS
291
292 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
293 pages are aligned on system page boundaries. The next most
294 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
295 index values in the lookup table, respectively.
296
297 For 32-bit architectures and the settings below, there are no
298 leftover bits. For architectures with wider pointers, the lookup
299 tree points to a list of pages, which must be scanned to find the
300 correct one. */
301
302 #define PAGE_L1_BITS (8)
303 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
304 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
305 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
306
307 #define LOOKUP_L1(p) \
308 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
309
310 #define LOOKUP_L2(p) \
311 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
312
313 #if HOST_BITS_PER_PTR <= 32
314
315 /* On 32-bit hosts, we use a two level page table, as pictured above. */
316 typedef page_entry **page_table[PAGE_L1_SIZE];
317
318 #else
319
320 /* On 64-bit hosts, we use the same two level page tables plus a linked
321 list that disambiguates the top 32-bits. There will almost always be
322 exactly one entry in the list. */
323 typedef struct page_table_chain
324 {
325 struct page_table_chain *next;
326 size_t high_bits;
327 page_entry **table[PAGE_L1_SIZE];
328 } *page_table;
329
330 #endif
331
332 /* The global variables. */
333 static struct globals
334 {
335 /* The linked list of zones. */
336 struct alloc_zone *zones;
337
338 /* Lookup table for associating allocation pages with object addresses. */
339 page_table lookup;
340
341 /* The system's page size, and related constants. */
342 size_t pagesize;
343 size_t lg_pagesize;
344 size_t page_mask;
345
346 /* The size to allocate for a small page entry. This includes
347 the size of the structure and the size of the allocation
348 bitmap. */
349 size_t small_page_overhead;
350
351 #if defined (HAVE_MMAP_DEV_ZERO)
352 /* A file descriptor open to /dev/zero for reading. */
353 int dev_zero_fd;
354 #endif
355
356 /* Allocate pages in chunks of this size, to throttle calls to memory
357 allocation routines. The first page is used, the rest go onto the
358 free list. */
359 size_t quire_size;
360
361 /* The file descriptor for debugging output. */
362 FILE *debug_file;
363 } G;
364
365 /* A zone allocation structure. There is one of these for every
366 distinct allocation zone. */
367 struct alloc_zone
368 {
369 /* The most recent free chunk is saved here, instead of in the linked
370 free list, to decrease list manipulation. It is most likely that we
371 will want this one. */
372 char *cached_free;
373 size_t cached_free_size;
374
375 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
376 FREE_BIN_DELTA. All other chunks are in slot 0. */
377 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
378
379 /* The highest bin index which might be non-empty. It may turn out
380 to be empty, in which case we have to search downwards. */
381 size_t high_free_bin;
382
383 /* Bytes currently allocated in this zone. */
384 size_t allocated;
385
386 /* Linked list of the small pages in this zone. */
387 struct small_page_entry *pages;
388
389 /* Doubly linked list of large pages in this zone. */
390 struct large_page_entry *large_pages;
391
392 /* If we are currently marking this zone, a pointer to the mark bits. */
393 mark_type *mark_bits;
394
395 /* Name of the zone. */
396 const char *name;
397
398 /* The number of small pages currently allocated in this zone. */
399 size_t n_small_pages;
400
401 /* Bytes allocated at the end of the last collection. */
402 size_t allocated_last_gc;
403
404 /* Total amount of memory mapped. */
405 size_t bytes_mapped;
406
407 /* A cache of free system pages. */
408 struct small_page_entry *free_pages;
409
410 /* Next zone in the linked list of zones. */
411 struct alloc_zone *next_zone;
412
413 /* True if this zone was collected during this collection. */
414 bool was_collected;
415
416 /* True if this zone should be destroyed after the next collection. */
417 bool dead;
418
419 #ifdef GATHER_STATISTICS
420 struct
421 {
422 /* Total GC-allocated memory. */
423 unsigned long long total_allocated;
424 /* Total overhead for GC-allocated memory. */
425 unsigned long long total_overhead;
426
427 /* Total allocations and overhead for sizes less than 32, 64 and 128.
428 These sizes are interesting because they are typical cache line
429 sizes. */
430
431 unsigned long long total_allocated_under32;
432 unsigned long long total_overhead_under32;
433
434 unsigned long long total_allocated_under64;
435 unsigned long long total_overhead_under64;
436
437 unsigned long long total_allocated_under128;
438 unsigned long long total_overhead_under128;
439 } stats;
440 #endif
441 } main_zone;
442
443 /* Some default zones. */
444 struct alloc_zone rtl_zone;
445 struct alloc_zone tree_zone;
446 struct alloc_zone tree_id_zone;
447
448 /* The PCH zone does not need a normal zone structure, and it does
449 not live on the linked list of zones. */
450 struct pch_zone
451 {
452 /* The start of the PCH zone. NULL if there is none. */
453 char *page;
454
455 /* The end of the PCH zone. NULL if there is none. */
456 char *end;
457
458 /* The size of the PCH zone. 0 if there is none. */
459 size_t bytes;
460
461 /* The allocation bitmap for the PCH zone. */
462 alloc_type *alloc_bits;
463
464 /* If we are currently marking, the mark bitmap for the PCH zone.
465 When it is first read in, we could avoid marking the PCH,
466 because it will not contain any pointers to GC memory outside
467 of the PCH; however, the PCH is currently mapped as writable,
468 so we must mark it in case new pointers are added. */
469 mark_type *mark_bits;
470 } pch_zone;
471
472 #ifdef USING_MMAP
473 static char *alloc_anon (char *, size_t, struct alloc_zone *);
474 #endif
475 static struct small_page_entry * alloc_small_page (struct alloc_zone *);
476 static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
477 static void free_chunk (char *, size_t, struct alloc_zone *);
478 static void free_small_page (struct small_page_entry *);
479 static void free_large_page (struct large_page_entry *);
480 static void release_pages (struct alloc_zone *);
481 static void sweep_pages (struct alloc_zone *);
482 static bool ggc_collect_1 (struct alloc_zone *, bool);
483 static void new_ggc_zone_1 (struct alloc_zone *, const char *);
484
485 /* Traverse the page table and find the entry for a page.
486 Die (probably) if the object wasn't allocated via GC. */
487
488 static inline page_entry *
489 lookup_page_table_entry (const void *p)
490 {
491 page_entry ***base;
492 size_t L1, L2;
493
494 #if HOST_BITS_PER_PTR <= 32
495 base = &G.lookup[0];
496 #else
497 page_table table = G.lookup;
498 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
499 while (table->high_bits != high_bits)
500 table = table->next;
501 base = &table->table[0];
502 #endif
503
504 /* Extract the level 1 and 2 indices. */
505 L1 = LOOKUP_L1 (p);
506 L2 = LOOKUP_L2 (p);
507
508 return base[L1][L2];
509 }
510
511 /* Traverse the page table and find the entry for a page.
512 Return NULL if the object wasn't allocated via the GC. */
513
514 static inline page_entry *
515 lookup_page_table_if_allocated (const void *p)
516 {
517 page_entry ***base;
518 size_t L1, L2;
519
520 #if HOST_BITS_PER_PTR <= 32
521 base = &G.lookup[0];
522 #else
523 page_table table = G.lookup;
524 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
525 while (1)
526 {
527 if (table == NULL)
528 return NULL;
529 if (table->high_bits == high_bits)
530 break;
531 table = table->next;
532 }
533 base = &table->table[0];
534 #endif
535
536 /* Extract the level 1 and 2 indices. */
537 L1 = LOOKUP_L1 (p);
538 if (! base[L1])
539 return NULL;
540
541 L2 = LOOKUP_L2 (p);
542 if (L2 >= PAGE_L2_SIZE)
543 return NULL;
544 /* We might have a page entry which does not correspond exactly to a
545 system page. */
546 if (base[L1][L2] && (const char *) p < base[L1][L2]->page)
547 return NULL;
548
549 return base[L1][L2];
550 }
551
552 /* Set the page table entry for the page that starts at P. If ENTRY
553 is NULL, clear the entry. */
554
555 static void
556 set_page_table_entry (void *p, page_entry *entry)
557 {
558 page_entry ***base;
559 size_t L1, L2;
560
561 #if HOST_BITS_PER_PTR <= 32
562 base = &G.lookup[0];
563 #else
564 page_table table;
565 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
566 for (table = G.lookup; table; table = table->next)
567 if (table->high_bits == high_bits)
568 goto found;
569
570 /* Not found -- allocate a new table. */
571 table = XCNEW (struct page_table_chain);
572 table->next = G.lookup;
573 table->high_bits = high_bits;
574 G.lookup = table;
575 found:
576 base = &table->table[0];
577 #endif
578
579 /* Extract the level 1 and 2 indices. */
580 L1 = LOOKUP_L1 (p);
581 L2 = LOOKUP_L2 (p);
582
583 if (base[L1] == NULL)
584 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
585
586 base[L1][L2] = entry;
587 }
588
589 /* Find the page table entry associated with OBJECT. */
590
591 static inline struct page_entry *
592 zone_get_object_page (const void *object)
593 {
594 return lookup_page_table_entry (object);
595 }
596
597 /* Find which element of the alloc_bits array OBJECT should be
598 recorded in. */
599 static inline unsigned int
600 zone_get_object_alloc_word (const void *object)
601 {
602 return (((size_t) object & (GGC_PAGE_SIZE - 1))
603 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
604 }
605
606 /* Find which bit of the appropriate word in the alloc_bits array
607 OBJECT should be recorded in. */
608 static inline unsigned int
609 zone_get_object_alloc_bit (const void *object)
610 {
611 return (((size_t) object / BYTES_PER_ALLOC_BIT)
612 % (8 * sizeof (alloc_type)));
613 }
614
615 /* Find which element of the mark_bits array OBJECT should be recorded
616 in. */
617 static inline unsigned int
618 zone_get_object_mark_word (const void *object)
619 {
620 return (((size_t) object & (GGC_PAGE_SIZE - 1))
621 / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
622 }
623
624 /* Find which bit of the appropriate word in the mark_bits array
625 OBJECT should be recorded in. */
626 static inline unsigned int
627 zone_get_object_mark_bit (const void *object)
628 {
629 return (((size_t) object / BYTES_PER_MARK_BIT)
630 % (8 * sizeof (mark_type)));
631 }
632
633 /* Set the allocation bit corresponding to OBJECT in its page's
634 bitmap. Used to split this object from the preceding one. */
635 static inline void
636 zone_set_object_alloc_bit (const void *object)
637 {
638 struct small_page_entry *page
639 = (struct small_page_entry *) zone_get_object_page (object);
640 unsigned int start_word = zone_get_object_alloc_word (object);
641 unsigned int start_bit = zone_get_object_alloc_bit (object);
642
643 page->alloc_bits[start_word] |= 1L << start_bit;
644 }
645
646 /* Clear the allocation bit corresponding to OBJECT in PAGE's
647 bitmap. Used to coalesce this object with the preceding
648 one. */
649 static inline void
650 zone_clear_object_alloc_bit (struct small_page_entry *page,
651 const void *object)
652 {
653 unsigned int start_word = zone_get_object_alloc_word (object);
654 unsigned int start_bit = zone_get_object_alloc_bit (object);
655
656 /* Would xor be quicker? */
657 page->alloc_bits[start_word] &= ~(1L << start_bit);
658 }
659
660 /* Find the size of the object which starts at START_WORD and
661 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
662 Helper function for ggc_get_size and zone_find_object_size. */
663
664 static inline size_t
665 zone_object_size_1 (alloc_type *alloc_bits,
666 size_t start_word, size_t start_bit,
667 size_t max_size)
668 {
669 size_t size;
670 alloc_type alloc_word;
671 int indx;
672
673 /* Load the first word. */
674 alloc_word = alloc_bits[start_word++];
675
676 /* If that was the last bit in this word, we'll want to continue
677 with the next word. Otherwise, handle the rest of this word. */
678 if (start_bit)
679 {
680 indx = alloc_ffs (alloc_word >> start_bit);
681 if (indx)
682 /* indx is 1-based. We started at the bit after the object's
683 start, but we also ended at the bit after the object's end.
684 It cancels out. */
685 return indx * BYTES_PER_ALLOC_BIT;
686
687 /* The extra 1 accounts for the starting unit, before start_bit. */
688 size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
689
690 if (size >= max_size)
691 return max_size;
692
693 alloc_word = alloc_bits[start_word++];
694 }
695 else
696 size = BYTES_PER_ALLOC_BIT;
697
698 while (alloc_word == 0)
699 {
700 size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
701 if (size >= max_size)
702 return max_size;
703 alloc_word = alloc_bits[start_word++];
704 }
705
706 indx = alloc_ffs (alloc_word);
707 return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
708 }
709
710 /* Find the size of OBJECT on small page PAGE. */
711
712 static inline size_t
713 zone_find_object_size (struct small_page_entry *page,
714 const void *object)
715 {
716 const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
717 unsigned int start_word = zone_get_object_alloc_word (object_midptr);
718 unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
719 size_t max_size = (page->common.page + SMALL_PAGE_SIZE
720 - (const char *) object);
721
722 return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
723 max_size);
724 }
725
726 /* highest_bit assumes that alloc_type is 32 bits. */
727 extern char check_alloc_type_size[(sizeof (alloc_type) == 4) ? 1 : -1];
728
729 /* Find the highest set bit in VALUE. Returns the bit number of that
730 bit, using the same values as ffs. */
731 static inline alloc_type
732 highest_bit (alloc_type value)
733 {
734 /* This also assumes that alloc_type is unsigned. */
735 value |= value >> 1;
736 value |= value >> 2;
737 value |= value >> 4;
738 value |= value >> 8;
739 value |= value >> 16;
740 value = value ^ (value >> 1);
741 return alloc_ffs (value);
742 }
743
744 /* Find the offset from the start of an object to P, which may point
745 into the interior of the object. */
746
747 static unsigned long
748 zone_find_object_offset (alloc_type *alloc_bits, size_t start_word,
749 size_t start_bit)
750 {
751 unsigned int offset_in_bits;
752 alloc_type alloc_word = alloc_bits[start_word];
753
754 /* Mask off any bits after the initial bit, but make sure to include
755 the initial bit in the result. Note that START_BIT is
756 0-based. */
757 if (start_bit < 8 * sizeof (alloc_type) - 1)
758 alloc_word &= (1 << (start_bit + 1)) - 1;
759 offset_in_bits = start_bit;
760
761 /* Search for the start of the object. */
762 while (alloc_word == 0 && start_word > 0)
763 {
764 alloc_word = alloc_bits[--start_word];
765 offset_in_bits += 8 * sizeof (alloc_type);
766 }
767 /* We must always find a set bit. */
768 gcc_assert (alloc_word != 0);
769 /* Note that the result of highest_bit is 1-based. */
770 offset_in_bits -= highest_bit (alloc_word) - 1;
771
772 return BYTES_PER_ALLOC_BIT * offset_in_bits;
773 }
774
775 /* Allocate the mark bits for every zone, and set the pointers on each
776 page. */
777 static void
778 zone_allocate_marks (void)
779 {
780 struct alloc_zone *zone;
781
782 for (zone = G.zones; zone; zone = zone->next_zone)
783 {
784 struct small_page_entry *page;
785 mark_type *cur_marks;
786 size_t mark_words, mark_words_per_page;
787 #ifdef ENABLE_CHECKING
788 size_t n = 0;
789 #endif
790
791 mark_words_per_page
792 = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
793 mark_words = zone->n_small_pages * mark_words_per_page;
794 zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
795 mark_words);
796 cur_marks = zone->mark_bits;
797 for (page = zone->pages; page; page = page->next)
798 {
799 page->mark_bits = cur_marks;
800 cur_marks += mark_words_per_page;
801 #ifdef ENABLE_CHECKING
802 n++;
803 #endif
804 }
805 #ifdef ENABLE_CHECKING
806 gcc_assert (n == zone->n_small_pages);
807 #endif
808 }
809
810 /* We don't collect the PCH zone, but we do have to mark it
811 (for now). */
812 if (pch_zone.bytes)
813 pch_zone.mark_bits
814 = (mark_type *) xcalloc (sizeof (mark_type),
815 CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
816 }
817
818 /* After marking and sweeping, release the memory used for mark bits. */
819 static void
820 zone_free_marks (void)
821 {
822 struct alloc_zone *zone;
823
824 for (zone = G.zones; zone; zone = zone->next_zone)
825 if (zone->mark_bits)
826 {
827 free (zone->mark_bits);
828 zone->mark_bits = NULL;
829 }
830
831 if (pch_zone.bytes)
832 {
833 free (pch_zone.mark_bits);
834 pch_zone.mark_bits = NULL;
835 }
836 }
837
838 #ifdef USING_MMAP
839 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
840 (if non-null). The ifdef structure here is intended to cause a
841 compile error unless exactly one of the HAVE_* is defined. */
842
843 static inline char *
844 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
845 {
846 #ifdef HAVE_MMAP_ANON
847 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
848 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
849 #endif
850 #ifdef HAVE_MMAP_DEV_ZERO
851 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
852 MAP_PRIVATE, G.dev_zero_fd, 0);
853 #endif
854
855 if (page == (char *) MAP_FAILED)
856 {
857 perror ("virtual memory exhausted");
858 exit (FATAL_EXIT_CODE);
859 }
860
861 /* Remember that we allocated this memory. */
862 zone->bytes_mapped += size;
863
864 /* Pretend we don't have access to the allocated pages. We'll enable
865 access to smaller pieces of the area in ggc_internal_alloc. Discard the
866 handle to avoid handle leak. */
867 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
868
869 return page;
870 }
871 #endif
872
873 /* Allocate a new page for allocating small objects in ZONE, and
874 return an entry for it. */
875
876 static struct small_page_entry *
877 alloc_small_page (struct alloc_zone *zone)
878 {
879 struct small_page_entry *entry;
880
881 /* Check the list of free pages for one we can use. */
882 entry = zone->free_pages;
883 if (entry != NULL)
884 {
885 /* Recycle the allocated memory from this page ... */
886 zone->free_pages = entry->next;
887 }
888 else
889 {
890 /* We want just one page. Allocate a bunch of them and put the
891 extras on the freelist. (Can only do this optimization with
892 mmap for backing store.) */
893 struct small_page_entry *e, *f = zone->free_pages;
894 int i;
895 char *page;
896
897 page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
898
899 /* This loop counts down so that the chain will be in ascending
900 memory order. */
901 for (i = G.quire_size - 1; i >= 1; i--)
902 {
903 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
904 e->common.page = page + (i << GGC_PAGE_SHIFT);
905 e->common.zone = zone;
906 e->next = f;
907 f = e;
908 set_page_table_entry (e->common.page, &e->common);
909 }
910
911 zone->free_pages = f;
912
913 entry = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
914 entry->common.page = page;
915 entry->common.zone = zone;
916 set_page_table_entry (page, &entry->common);
917 }
918
919 zone->n_small_pages++;
920
921 if (GGC_DEBUG_LEVEL >= 2)
922 fprintf (G.debug_file,
923 "Allocating %s page at %p, data %p-%p\n",
924 entry->common.zone->name, (PTR) entry, entry->common.page,
925 entry->common.page + SMALL_PAGE_SIZE - 1);
926
927 return entry;
928 }
929
930 /* Allocate a large page of size SIZE in ZONE. */
931
932 static struct large_page_entry *
933 alloc_large_page (size_t size, struct alloc_zone *zone)
934 {
935 struct large_page_entry *entry;
936 char *page;
937 size_t needed_size;
938
939 needed_size = size + sizeof (struct large_page_entry);
940 page = XNEWVAR (char, needed_size);
941
942 entry = (struct large_page_entry *) page;
943
944 entry->next = NULL;
945 entry->common.page = page + sizeof (struct large_page_entry);
946 entry->common.large_p = true;
947 entry->common.pch_p = false;
948 entry->common.zone = zone;
949 #ifdef GATHER_STATISTICS
950 entry->common.survived = 0;
951 #endif
952 entry->mark_p = false;
953 entry->bytes = size;
954 entry->prev = NULL;
955
956 set_page_table_entry (entry->common.page, &entry->common);
957
958 if (GGC_DEBUG_LEVEL >= 2)
959 fprintf (G.debug_file,
960 "Allocating %s large page at %p, data %p-%p\n",
961 entry->common.zone->name, (PTR) entry, entry->common.page,
962 entry->common.page + SMALL_PAGE_SIZE - 1);
963
964 return entry;
965 }
966
967
968 /* For a page that is no longer needed, put it on the free page list. */
969
970 static inline void
971 free_small_page (struct small_page_entry *entry)
972 {
973 if (GGC_DEBUG_LEVEL >= 2)
974 fprintf (G.debug_file,
975 "Deallocating %s page at %p, data %p-%p\n",
976 entry->common.zone->name, (PTR) entry,
977 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
978
979 gcc_assert (!entry->common.large_p);
980
981 /* Mark the page as inaccessible. Discard the handle to
982 avoid handle leak. */
983 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page,
984 SMALL_PAGE_SIZE));
985
986 entry->next = entry->common.zone->free_pages;
987 entry->common.zone->free_pages = entry;
988 entry->common.zone->n_small_pages--;
989 }
990
991 /* Release a large page that is no longer needed. */
992
993 static inline void
994 free_large_page (struct large_page_entry *entry)
995 {
996 if (GGC_DEBUG_LEVEL >= 2)
997 fprintf (G.debug_file,
998 "Deallocating %s page at %p, data %p-%p\n",
999 entry->common.zone->name, (PTR) entry,
1000 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
1001
1002 gcc_assert (entry->common.large_p);
1003
1004 set_page_table_entry (entry->common.page, NULL);
1005 free (entry);
1006 }
1007
1008 /* Release the free page cache to the system. */
1009
1010 static void
1011 release_pages (struct alloc_zone *zone)
1012 {
1013 #ifdef USING_MMAP
1014 struct small_page_entry *p, *next;
1015 char *start;
1016 size_t len;
1017
1018 /* Gather up adjacent pages so they are unmapped together. */
1019 p = zone->free_pages;
1020
1021 while (p)
1022 {
1023 start = p->common.page;
1024 next = p->next;
1025 len = SMALL_PAGE_SIZE;
1026 set_page_table_entry (p->common.page, NULL);
1027 p = next;
1028
1029 while (p && p->common.page == start + len)
1030 {
1031 next = p->next;
1032 len += SMALL_PAGE_SIZE;
1033 set_page_table_entry (p->common.page, NULL);
1034 p = next;
1035 }
1036
1037 munmap (start, len);
1038 zone->bytes_mapped -= len;
1039 }
1040
1041 zone->free_pages = NULL;
1042 #endif
1043 }
1044
1045 /* Place the block at PTR of size SIZE on the free list for ZONE. */
1046
1047 static inline void
1048 free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
1049 {
1050 struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
1051 size_t bin = 0;
1052
1053 bin = SIZE_BIN_DOWN (size);
1054 gcc_assert (bin != 0);
1055 if (bin > NUM_FREE_BINS)
1056 {
1057 bin = 0;
1058 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1059 sizeof (struct
1060 alloc_chunk)));
1061 chunk->size = size;
1062 chunk->next_free = zone->free_chunks[bin];
1063 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1064 + sizeof (struct
1065 alloc_chunk),
1066 size
1067 - sizeof (struct
1068 alloc_chunk)));
1069 }
1070 else
1071 {
1072 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1073 sizeof (struct
1074 alloc_chunk *)));
1075 chunk->next_free = zone->free_chunks[bin];
1076 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1077 + sizeof (struct
1078 alloc_chunk *),
1079 size
1080 - sizeof (struct
1081 alloc_chunk *)));
1082 }
1083
1084 zone->free_chunks[bin] = chunk;
1085 if (bin > zone->high_free_bin)
1086 zone->high_free_bin = bin;
1087 if (GGC_DEBUG_LEVEL >= 3)
1088 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
1089 }
1090
1091 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
1092
1093 void *
1094 ggc_internal_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1095 MEM_STAT_DECL)
1096 {
1097 size_t bin;
1098 size_t csize;
1099 struct small_page_entry *entry;
1100 struct alloc_chunk *chunk, **pp;
1101 void *result;
1102 size_t size = orig_size;
1103
1104 /* Make sure that zero-sized allocations get a unique and freeable
1105 pointer. */
1106 if (size == 0)
1107 size = MAX_ALIGNMENT;
1108 else
1109 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1110
1111 /* Try to allocate the object from several different sources. Each
1112 of these cases is responsible for setting RESULT and SIZE to
1113 describe the allocated block, before jumping to FOUND. If a
1114 chunk is split, the allocate bit for the new chunk should also be
1115 set.
1116
1117 Large objects are handled specially. However, they'll just fail
1118 the next couple of conditions, so we can wait to check for them
1119 below. The large object case is relatively rare (< 1%), so this
1120 is a win. */
1121
1122 /* First try to split the last chunk we allocated. For best
1123 fragmentation behavior it would be better to look for a
1124 free bin of the appropriate size for a small object. However,
1125 we're unlikely (1% - 7%) to find one, and this gives better
1126 locality behavior anyway. This case handles the lion's share
1127 of all calls to this function. */
1128 if (size <= zone->cached_free_size)
1129 {
1130 result = zone->cached_free;
1131
1132 zone->cached_free_size -= size;
1133 if (zone->cached_free_size)
1134 {
1135 zone->cached_free += size;
1136 zone_set_object_alloc_bit (zone->cached_free);
1137 }
1138
1139 goto found;
1140 }
1141
1142 /* Next, try to find a free bin of the exactly correct size. */
1143
1144 /* We want to round SIZE up, rather than down, but we know it's
1145 already aligned to at least FREE_BIN_DELTA, so we can just
1146 shift. */
1147 bin = SIZE_BIN_DOWN (size);
1148
1149 if (bin <= NUM_FREE_BINS
1150 && (chunk = zone->free_chunks[bin]) != NULL)
1151 {
1152 /* We have a chunk of the right size. Pull it off the free list
1153 and use it. */
1154
1155 zone->free_chunks[bin] = chunk->next_free;
1156
1157 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1158 == FREE_BIN_DELTA. */
1159 result = chunk;
1160
1161 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1162 may now be wrong, if this was the last chunk in the high bin.
1163 Rather than fixing it up now, wait until we need to search
1164 the free bins. */
1165
1166 goto found;
1167 }
1168
1169 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1170 to split. We can find one in the too-big bin, or in the largest
1171 sized bin with a chunk in it. Try the largest normal-sized bin
1172 first. */
1173
1174 if (zone->high_free_bin > bin)
1175 {
1176 /* Find the highest numbered free bin. It will be at or below
1177 the watermark. */
1178 while (zone->high_free_bin > bin
1179 && zone->free_chunks[zone->high_free_bin] == NULL)
1180 zone->high_free_bin--;
1181
1182 if (zone->high_free_bin > bin)
1183 {
1184 size_t tbin = zone->high_free_bin;
1185 chunk = zone->free_chunks[tbin];
1186
1187 /* Remove the chunk from its previous bin. */
1188 zone->free_chunks[tbin] = chunk->next_free;
1189
1190 result = (char *) chunk;
1191
1192 /* Save the rest of the chunk for future allocation. */
1193 if (zone->cached_free_size)
1194 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1195
1196 chunk = (struct alloc_chunk *) ((char *) result + size);
1197 zone->cached_free = (char *) chunk;
1198 zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1199
1200 /* Mark the new free chunk as an object, so that we can
1201 find the size of the newly allocated object. */
1202 zone_set_object_alloc_bit (chunk);
1203
1204 /* HIGH_FREE_BIN may now be wrong, if this was the last
1205 chunk in the high bin. Rather than fixing it up now,
1206 wait until we need to search the free bins. */
1207
1208 goto found;
1209 }
1210 }
1211
1212 /* Failing that, look through the "other" bucket for a chunk
1213 that is large enough. */
1214 pp = &(zone->free_chunks[0]);
1215 chunk = *pp;
1216 while (chunk && chunk->size < size)
1217 {
1218 pp = &chunk->next_free;
1219 chunk = *pp;
1220 }
1221
1222 if (chunk)
1223 {
1224 /* Remove the chunk from its previous bin. */
1225 *pp = chunk->next_free;
1226
1227 result = (char *) chunk;
1228
1229 /* Save the rest of the chunk for future allocation, if there's any
1230 left over. */
1231 csize = chunk->size;
1232 if (csize > size)
1233 {
1234 if (zone->cached_free_size)
1235 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1236
1237 chunk = (struct alloc_chunk *) ((char *) result + size);
1238 zone->cached_free = (char *) chunk;
1239 zone->cached_free_size = csize - size;
1240
1241 /* Mark the new free chunk as an object. */
1242 zone_set_object_alloc_bit (chunk);
1243 }
1244
1245 goto found;
1246 }
1247
1248 /* Handle large allocations. We could choose any threshold between
1249 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1250 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1251 be guaranteed to have a unique entry in the lookup table. Large
1252 allocations will always fall through to here. */
1253 if (size > GGC_PAGE_SIZE)
1254 {
1255 struct large_page_entry *entry = alloc_large_page (size, zone);
1256
1257 #ifdef GATHER_STATISTICS
1258 entry->common.survived = 0;
1259 #endif
1260
1261 entry->next = zone->large_pages;
1262 if (zone->large_pages)
1263 zone->large_pages->prev = entry;
1264 zone->large_pages = entry;
1265
1266 result = entry->common.page;
1267
1268 goto found;
1269 }
1270
1271 /* Failing everything above, allocate a new small page. */
1272
1273 entry = alloc_small_page (zone);
1274 entry->next = zone->pages;
1275 zone->pages = entry;
1276
1277 /* Mark the first chunk in the new page. */
1278 entry->alloc_bits[0] = 1;
1279
1280 result = entry->common.page;
1281 if (size < SMALL_PAGE_SIZE)
1282 {
1283 if (zone->cached_free_size)
1284 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1285
1286 zone->cached_free = (char *) result + size;
1287 zone->cached_free_size = SMALL_PAGE_SIZE - size;
1288
1289 /* Mark the new free chunk as an object. */
1290 zone_set_object_alloc_bit (zone->cached_free);
1291 }
1292
1293 found:
1294
1295 /* We could save TYPE in the chunk, but we don't use that for
1296 anything yet. If we wanted to, we could do it by adding it
1297 either before the beginning of the chunk or after its end,
1298 and adjusting the size and pointer appropriately. */
1299
1300 /* We'll probably write to this after we return. */
1301 prefetchw (result);
1302
1303 #ifdef ENABLE_GC_CHECKING
1304 /* `Poison' the entire allocated object. */
1305 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1306 memset (result, 0xaf, size);
1307 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size,
1308 size - orig_size));
1309 #endif
1310
1311 /* Tell Valgrind that the memory is there, but its content isn't
1312 defined. The bytes at the end of the object are still marked
1313 unaccessible. */
1314 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size));
1315
1316 /* Keep track of how many bytes are being allocated. This
1317 information is used in deciding when to collect. */
1318 zone->allocated += size;
1319
1320 timevar_ggc_mem_total += size;
1321
1322 #ifdef GATHER_STATISTICS
1323 ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
1324
1325 {
1326 size_t object_size = size;
1327 size_t overhead = object_size - orig_size;
1328
1329 zone->stats.total_overhead += overhead;
1330 zone->stats.total_allocated += object_size;
1331
1332 if (orig_size <= 32)
1333 {
1334 zone->stats.total_overhead_under32 += overhead;
1335 zone->stats.total_allocated_under32 += object_size;
1336 }
1337 if (orig_size <= 64)
1338 {
1339 zone->stats.total_overhead_under64 += overhead;
1340 zone->stats.total_allocated_under64 += object_size;
1341 }
1342 if (orig_size <= 128)
1343 {
1344 zone->stats.total_overhead_under128 += overhead;
1345 zone->stats.total_allocated_under128 += object_size;
1346 }
1347 }
1348 #endif
1349
1350 if (GGC_DEBUG_LEVEL >= 3)
1351 fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1352 (unsigned long) size, result);
1353
1354 return result;
1355 }
1356
1357 #define ggc_internal_alloc_zone_pass_stat(s,z) \
1358 ggc_internal_alloc_zone_stat (s,z PASS_MEM_STAT)
1359
1360 void *
1361 ggc_internal_cleared_alloc_zone_stat (size_t orig_size,
1362 struct alloc_zone *zone MEM_STAT_DECL)
1363 {
1364 void * result = ggc_internal_alloc_zone_pass_stat (orig_size, zone);
1365 memset (result, 0, orig_size);
1366 return result;
1367 }
1368
1369
1370 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1371 for that type. */
1372
1373 void *
1374 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1375 MEM_STAT_DECL)
1376 {
1377 switch (gte)
1378 {
1379 case gt_ggc_e_14lang_tree_node:
1380 return ggc_internal_alloc_zone_pass_stat (size, &tree_zone);
1381
1382 case gt_ggc_e_7rtx_def:
1383 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone);
1384
1385 case gt_ggc_e_9rtvec_def:
1386 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone);
1387
1388 default:
1389 return ggc_internal_alloc_zone_pass_stat (size, &main_zone);
1390 }
1391 }
1392
1393 /* Normal GC allocation simply allocates into the main zone. */
1394
1395 void *
1396 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1397 {
1398 return ggc_internal_alloc_zone_pass_stat (size, &main_zone);
1399 }
1400
1401 /* Poison the chunk. */
1402 #ifdef ENABLE_GC_CHECKING
1403 #define poison_region(PTR, SIZE) \
1404 do { \
1405 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED ((PTR), (SIZE))); \
1406 memset ((PTR), 0xa5, (SIZE)); \
1407 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((PTR), (SIZE))); \
1408 } while (0)
1409 #else
1410 #define poison_region(PTR, SIZE)
1411 #endif
1412
1413 /* Free the object at P. */
1414
1415 void
1416 ggc_free (void *p)
1417 {
1418 struct page_entry *page;
1419
1420 #ifdef GATHER_STATISTICS
1421 ggc_free_overhead (p);
1422 #endif
1423
1424 poison_region (p, ggc_get_size (p));
1425
1426 page = zone_get_object_page (p);
1427
1428 if (page->large_p)
1429 {
1430 struct large_page_entry *large_page
1431 = (struct large_page_entry *) page;
1432
1433 /* Remove the page from the linked list. */
1434 if (large_page->prev)
1435 large_page->prev->next = large_page->next;
1436 else
1437 {
1438 gcc_assert (large_page->common.zone->large_pages == large_page);
1439 large_page->common.zone->large_pages = large_page->next;
1440 }
1441 if (large_page->next)
1442 large_page->next->prev = large_page->prev;
1443
1444 large_page->common.zone->allocated -= large_page->bytes;
1445
1446 /* Release the memory associated with this object. */
1447 free_large_page (large_page);
1448 }
1449 else if (page->pch_p)
1450 /* Don't do anything. We won't allocate a new object from the
1451 PCH zone so there's no point in releasing anything. */
1452 ;
1453 else
1454 {
1455 size_t size = ggc_get_size (p);
1456
1457 page->zone->allocated -= size;
1458
1459 /* Add the chunk to the free list. We don't bother with coalescing,
1460 since we are likely to want a chunk of this size again. */
1461 free_chunk ((char *)p, size, page->zone);
1462 }
1463 }
1464
1465 /* Mark function for strings. */
1466
1467 void
1468 gt_ggc_m_S (const void *p)
1469 {
1470 page_entry *entry;
1471 unsigned long offset;
1472
1473 if (!p)
1474 return;
1475
1476 /* Look up the page on which the object is alloced. . */
1477 entry = lookup_page_table_if_allocated (p);
1478 if (! entry)
1479 return;
1480
1481 if (entry->pch_p)
1482 {
1483 size_t alloc_word, alloc_bit, t;
1484 t = ((const char *) p - pch_zone.page) / BYTES_PER_ALLOC_BIT;
1485 alloc_word = t / (8 * sizeof (alloc_type));
1486 alloc_bit = t % (8 * sizeof (alloc_type));
1487 offset = zone_find_object_offset (pch_zone.alloc_bits, alloc_word,
1488 alloc_bit);
1489 }
1490 else if (entry->large_p)
1491 {
1492 struct large_page_entry *le = (struct large_page_entry *) entry;
1493 offset = ((const char *) p) - entry->page;
1494 gcc_assert (offset < le->bytes);
1495 }
1496 else
1497 {
1498 struct small_page_entry *se = (struct small_page_entry *) entry;
1499 unsigned int start_word = zone_get_object_alloc_word (p);
1500 unsigned int start_bit = zone_get_object_alloc_bit (p);
1501 offset = zone_find_object_offset (se->alloc_bits, start_word, start_bit);
1502
1503 /* On some platforms a char* will not necessarily line up on an
1504 allocation boundary, so we have to update the offset to
1505 account for the leftover bytes. */
1506 offset += (size_t) p % BYTES_PER_ALLOC_BIT;
1507 }
1508
1509 if (offset)
1510 {
1511 /* Here we've seen a char* which does not point to the beginning
1512 of an allocated object. We assume it points to the middle of
1513 a STRING_CST. */
1514 gcc_assert (offset == offsetof (struct tree_string, str));
1515 p = ((const char *) p) - offset;
1516 gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p));
1517 return;
1518 }
1519
1520 /* Inefficient, but also unlikely to matter. */
1521 ggc_set_mark (p);
1522 }
1523
1524 /* If P is not marked, mark it and return false. Otherwise return true.
1525 P must have been allocated by the GC allocator; it mustn't point to
1526 static objects, stack variables, or memory allocated with malloc. */
1527
1528 int
1529 ggc_set_mark (const void *p)
1530 {
1531 struct page_entry *page;
1532 const char *ptr = (const char *) p;
1533
1534 page = zone_get_object_page (p);
1535
1536 if (page->pch_p)
1537 {
1538 size_t mark_word, mark_bit, offset;
1539 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1540 mark_word = offset / (8 * sizeof (mark_type));
1541 mark_bit = offset % (8 * sizeof (mark_type));
1542
1543 if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1544 return 1;
1545 pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1546 }
1547 else if (page->large_p)
1548 {
1549 struct large_page_entry *large_page
1550 = (struct large_page_entry *) page;
1551
1552 if (large_page->mark_p)
1553 return 1;
1554 large_page->mark_p = true;
1555 }
1556 else
1557 {
1558 struct small_page_entry *small_page
1559 = (struct small_page_entry *) page;
1560
1561 if (small_page->mark_bits[zone_get_object_mark_word (p)]
1562 & (1 << zone_get_object_mark_bit (p)))
1563 return 1;
1564 small_page->mark_bits[zone_get_object_mark_word (p)]
1565 |= (1 << zone_get_object_mark_bit (p));
1566 }
1567
1568 if (GGC_DEBUG_LEVEL >= 4)
1569 fprintf (G.debug_file, "Marking %p\n", p);
1570
1571 return 0;
1572 }
1573
1574 /* Return 1 if P has been marked, zero otherwise.
1575 P must have been allocated by the GC allocator; it mustn't point to
1576 static objects, stack variables, or memory allocated with malloc. */
1577
1578 int
1579 ggc_marked_p (const void *p)
1580 {
1581 struct page_entry *page;
1582 const char *ptr = (const char *) p;
1583
1584 page = zone_get_object_page (p);
1585
1586 if (page->pch_p)
1587 {
1588 size_t mark_word, mark_bit, offset;
1589 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1590 mark_word = offset / (8 * sizeof (mark_type));
1591 mark_bit = offset % (8 * sizeof (mark_type));
1592
1593 return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1594 }
1595
1596 if (page->large_p)
1597 {
1598 struct large_page_entry *large_page
1599 = (struct large_page_entry *) page;
1600
1601 return large_page->mark_p;
1602 }
1603 else
1604 {
1605 struct small_page_entry *small_page
1606 = (struct small_page_entry *) page;
1607
1608 return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1609 & (1 << zone_get_object_mark_bit (p)));
1610 }
1611 }
1612
1613 /* Return the size of the gc-able object P. */
1614
1615 size_t
1616 ggc_get_size (const void *p)
1617 {
1618 struct page_entry *page;
1619 const char *ptr = (const char *) p;
1620
1621 page = zone_get_object_page (p);
1622
1623 if (page->pch_p)
1624 {
1625 size_t alloc_word, alloc_bit, offset, max_size;
1626 offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1627 alloc_word = offset / (8 * sizeof (alloc_type));
1628 alloc_bit = offset % (8 * sizeof (alloc_type));
1629 max_size = pch_zone.bytes - (ptr - pch_zone.page);
1630 return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1631 max_size);
1632 }
1633
1634 if (page->large_p)
1635 return ((struct large_page_entry *)page)->bytes;
1636 else
1637 return zone_find_object_size ((struct small_page_entry *) page, p);
1638 }
1639
1640 /* Initialize the ggc-zone-mmap allocator. */
1641 void
1642 init_ggc (void)
1643 {
1644 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1645 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1646 the current assumptions to hold. */
1647
1648 gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1649
1650 /* Set up the main zone by hand. */
1651 main_zone.name = "Main zone";
1652 G.zones = &main_zone;
1653
1654 /* Allocate the default zones. */
1655 new_ggc_zone_1 (&rtl_zone, "RTL zone");
1656 new_ggc_zone_1 (&tree_zone, "Tree zone");
1657 new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
1658
1659 G.pagesize = getpagesize();
1660 G.lg_pagesize = exact_log2 (G.pagesize);
1661 G.page_mask = ~(G.pagesize - 1);
1662
1663 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1664 gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1665
1666 /* Allocate 16 system pages at a time. */
1667 G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1668
1669 /* Calculate the size of the allocation bitmap and other overhead. */
1670 /* Right now we allocate bits for the page header and bitmap. These
1671 are wasted, but a little tricky to eliminate. */
1672 G.small_page_overhead
1673 = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1674 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1675
1676 #ifdef HAVE_MMAP_DEV_ZERO
1677 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1678 gcc_assert (G.dev_zero_fd != -1);
1679 #endif
1680
1681 #if 0
1682 G.debug_file = fopen ("ggc-mmap.debug", "w");
1683 setlinebuf (G.debug_file);
1684 #else
1685 G.debug_file = stdout;
1686 #endif
1687
1688 #ifdef USING_MMAP
1689 /* StunOS has an amazing off-by-one error for the first mmap allocation
1690 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1691 believe, is an unaligned page allocation, which would cause us to
1692 hork badly if we tried to use it. */
1693 {
1694 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1695 struct small_page_entry *e;
1696 if ((size_t)p & (G.pagesize - 1))
1697 {
1698 /* How losing. Discard this one and try another. If we still
1699 can't get something useful, give up. */
1700
1701 p = alloc_anon (NULL, G.pagesize, &main_zone);
1702 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1703 }
1704
1705 if (GGC_PAGE_SIZE == G.pagesize)
1706 {
1707 /* We have a good page, might as well hold onto it... */
1708 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
1709 e->common.page = p;
1710 e->common.zone = &main_zone;
1711 e->next = main_zone.free_pages;
1712 set_page_table_entry (e->common.page, &e->common);
1713 main_zone.free_pages = e;
1714 }
1715 else
1716 {
1717 munmap (p, G.pagesize);
1718 }
1719 }
1720 #endif
1721 }
1722
1723 /* Start a new GGC zone. */
1724
1725 static void
1726 new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
1727 {
1728 new_zone->name = name;
1729 new_zone->next_zone = G.zones->next_zone;
1730 G.zones->next_zone = new_zone;
1731 }
1732
1733 /* Free all empty pages and objects within a page for a given zone */
1734
1735 static void
1736 sweep_pages (struct alloc_zone *zone)
1737 {
1738 struct large_page_entry **lpp, *lp, *lnext;
1739 struct small_page_entry **spp, *sp, *snext;
1740 char *last_free;
1741 size_t allocated = 0;
1742 bool nomarksinpage;
1743
1744 /* First, reset the free_chunks lists, since we are going to
1745 re-free free chunks in hopes of coalescing them into large chunks. */
1746 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1747 zone->high_free_bin = 0;
1748 zone->cached_free = NULL;
1749 zone->cached_free_size = 0;
1750
1751 /* Large pages are all or none affairs. Either they are completely
1752 empty, or they are completely full. */
1753 lpp = &zone->large_pages;
1754 for (lp = zone->large_pages; lp != NULL; lp = lnext)
1755 {
1756 gcc_assert (lp->common.large_p);
1757
1758 lnext = lp->next;
1759
1760 #ifdef GATHER_STATISTICS
1761 /* This page has now survived another collection. */
1762 lp->common.survived++;
1763 #endif
1764
1765 if (lp->mark_p)
1766 {
1767 lp->mark_p = false;
1768 allocated += lp->bytes;
1769 lpp = &lp->next;
1770 }
1771 else
1772 {
1773 *lpp = lnext;
1774 #ifdef ENABLE_GC_CHECKING
1775 /* Poison the page. */
1776 memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
1777 #endif
1778 if (lp->prev)
1779 lp->prev->next = lp->next;
1780 if (lp->next)
1781 lp->next->prev = lp->prev;
1782 free_large_page (lp);
1783 }
1784 }
1785
1786 spp = &zone->pages;
1787 for (sp = zone->pages; sp != NULL; sp = snext)
1788 {
1789 char *object, *last_object;
1790 char *end;
1791 alloc_type *alloc_word_p;
1792 mark_type *mark_word_p;
1793
1794 gcc_assert (!sp->common.large_p);
1795
1796 snext = sp->next;
1797
1798 #ifdef GATHER_STATISTICS
1799 /* This page has now survived another collection. */
1800 sp->common.survived++;
1801 #endif
1802
1803 /* Step through all chunks, consolidate those that are free and
1804 insert them into the free lists. Note that consolidation
1805 slows down collection slightly. */
1806
1807 last_object = object = sp->common.page;
1808 end = sp->common.page + SMALL_PAGE_SIZE;
1809 last_free = NULL;
1810 nomarksinpage = true;
1811 mark_word_p = sp->mark_bits;
1812 alloc_word_p = sp->alloc_bits;
1813
1814 gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1815
1816 object = sp->common.page;
1817 do
1818 {
1819 unsigned int i, n;
1820 alloc_type alloc_word;
1821 mark_type mark_word;
1822
1823 alloc_word = *alloc_word_p++;
1824 mark_word = *mark_word_p++;
1825
1826 if (mark_word)
1827 nomarksinpage = false;
1828
1829 /* There ought to be some way to do this without looping... */
1830 i = 0;
1831 while ((n = alloc_ffs (alloc_word)) != 0)
1832 {
1833 /* Extend the current state for n - 1 bits. We can't
1834 shift alloc_word by n, even though it isn't used in the
1835 loop, in case only the highest bit was set. */
1836 alloc_word >>= n - 1;
1837 mark_word >>= n - 1;
1838 object += BYTES_PER_MARK_BIT * (n - 1);
1839
1840 if (mark_word & 1)
1841 {
1842 if (last_free)
1843 {
1844 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1845 object
1846 - last_free));
1847 poison_region (last_free, object - last_free);
1848 free_chunk (last_free, object - last_free, zone);
1849 last_free = NULL;
1850 }
1851 else
1852 allocated += object - last_object;
1853 last_object = object;
1854 }
1855 else
1856 {
1857 if (last_free == NULL)
1858 {
1859 last_free = object;
1860 allocated += object - last_object;
1861 }
1862 else
1863 zone_clear_object_alloc_bit (sp, object);
1864 }
1865
1866 /* Shift to just after the alloc bit we handled. */
1867 alloc_word >>= 1;
1868 mark_word >>= 1;
1869 object += BYTES_PER_MARK_BIT;
1870
1871 i += n;
1872 }
1873
1874 object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
1875 }
1876 while (object < end);
1877
1878 if (nomarksinpage)
1879 {
1880 *spp = snext;
1881 #ifdef ENABLE_GC_CHECKING
1882 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page,
1883 SMALL_PAGE_SIZE));
1884 /* Poison the page. */
1885 memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
1886 #endif
1887 free_small_page (sp);
1888 continue;
1889 }
1890 else if (last_free)
1891 {
1892 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1893 object - last_free));
1894 poison_region (last_free, object - last_free);
1895 free_chunk (last_free, object - last_free, zone);
1896 }
1897 else
1898 allocated += object - last_object;
1899
1900 spp = &sp->next;
1901 }
1902
1903 zone->allocated = allocated;
1904 }
1905
1906 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1907 is true if we need to mark before sweeping, false if some other
1908 zone collection has already performed marking for us. Returns true
1909 if we collected, false otherwise. */
1910
1911 static bool
1912 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1913 {
1914 #if 0
1915 /* */
1916 {
1917 int i;
1918 for (i = 0; i < NUM_FREE_BINS + 1; i++)
1919 {
1920 struct alloc_chunk *chunk;
1921 int n, tot;
1922
1923 n = 0;
1924 tot = 0;
1925 chunk = zone->free_chunks[i];
1926 while (chunk)
1927 {
1928 n++;
1929 tot += chunk->size;
1930 chunk = chunk->next_free;
1931 }
1932 fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1933 i, n, tot);
1934 }
1935 }
1936 /* */
1937 #endif
1938
1939 if (!quiet_flag)
1940 fprintf (stderr, " {%s GC %luk -> ",
1941 zone->name, (unsigned long) zone->allocated / 1024);
1942
1943 /* Zero the total allocated bytes. This will be recalculated in the
1944 sweep phase. */
1945 zone->allocated = 0;
1946
1947 /* Release the pages we freed the last time we collected, but didn't
1948 reuse in the interim. */
1949 release_pages (zone);
1950
1951 if (need_marking)
1952 {
1953 zone_allocate_marks ();
1954 ggc_mark_roots ();
1955 #ifdef GATHER_STATISTICS
1956 ggc_prune_overhead_list ();
1957 #endif
1958 }
1959
1960 sweep_pages (zone);
1961 zone->was_collected = true;
1962 zone->allocated_last_gc = zone->allocated;
1963
1964 if (!quiet_flag)
1965 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1966 return true;
1967 }
1968
1969 #ifdef GATHER_STATISTICS
1970 /* Calculate the average page survival rate in terms of number of
1971 collections. */
1972
1973 static float
1974 calculate_average_page_survival (struct alloc_zone *zone)
1975 {
1976 float count = 0.0;
1977 float survival = 0.0;
1978 struct small_page_entry *p;
1979 struct large_page_entry *lp;
1980 for (p = zone->pages; p; p = p->next)
1981 {
1982 count += 1.0;
1983 survival += p->common.survived;
1984 }
1985 for (lp = zone->large_pages; lp; lp = lp->next)
1986 {
1987 count += 1.0;
1988 survival += lp->common.survived;
1989 }
1990 return survival/count;
1991 }
1992 #endif
1993
1994 /* Top level collection routine. */
1995
1996 void
1997 ggc_collect (void)
1998 {
1999 struct alloc_zone *zone;
2000 bool marked = false;
2001
2002 timevar_push (TV_GC);
2003
2004 if (!ggc_force_collect)
2005 {
2006 float allocated_last_gc = 0, allocated = 0, min_expand;
2007
2008 for (zone = G.zones; zone; zone = zone->next_zone)
2009 {
2010 allocated_last_gc += zone->allocated_last_gc;
2011 allocated += zone->allocated;
2012 }
2013
2014 allocated_last_gc =
2015 MAX (allocated_last_gc,
2016 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2017 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2018
2019 if (allocated < allocated_last_gc + min_expand)
2020 {
2021 timevar_pop (TV_GC);
2022 return;
2023 }
2024 }
2025
2026 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2027
2028 /* Start by possibly collecting the main zone. */
2029 main_zone.was_collected = false;
2030 marked |= ggc_collect_1 (&main_zone, true);
2031
2032 /* In order to keep the number of collections down, we don't
2033 collect other zones unless we are collecting the main zone. This
2034 gives us roughly the same number of collections as we used to
2035 have with the old gc. The number of collection is important
2036 because our main slowdown (according to profiling) is now in
2037 marking. So if we mark twice as often as we used to, we'll be
2038 twice as slow. Hopefully we'll avoid this cost when we mark
2039 zone-at-a-time. */
2040 /* NOTE drow/2004-07-28: We now always collect the main zone, but
2041 keep this code in case the heuristics are further refined. */
2042
2043 if (main_zone.was_collected)
2044 {
2045 struct alloc_zone *zone;
2046
2047 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
2048 {
2049 zone->was_collected = false;
2050 marked |= ggc_collect_1 (zone, !marked);
2051 }
2052 }
2053
2054 #ifdef GATHER_STATISTICS
2055 /* Print page survival stats, if someone wants them. */
2056 if (GGC_DEBUG_LEVEL >= 2)
2057 {
2058 for (zone = G.zones; zone; zone = zone->next_zone)
2059 {
2060 if (zone->was_collected)
2061 {
2062 float f = calculate_average_page_survival (zone);
2063 printf ("Average page survival in zone `%s' is %f\n",
2064 zone->name, f);
2065 }
2066 }
2067 }
2068 #endif
2069
2070 if (marked)
2071 zone_free_marks ();
2072
2073 /* Free dead zones. */
2074 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
2075 {
2076 if (zone->next_zone->dead)
2077 {
2078 struct alloc_zone *dead_zone = zone->next_zone;
2079
2080 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
2081
2082 /* The zone must be empty. */
2083 gcc_assert (!dead_zone->allocated);
2084
2085 /* Unchain the dead zone, release all its pages and free it. */
2086 zone->next_zone = zone->next_zone->next_zone;
2087 release_pages (dead_zone);
2088 free (dead_zone);
2089 }
2090 }
2091
2092 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2093
2094 timevar_pop (TV_GC);
2095 }
2096
2097 /* Print allocation statistics. */
2098 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2099 ? (x) \
2100 : ((x) < 1024*1024*10 \
2101 ? (x) / 1024 \
2102 : (x) / (1024*1024))))
2103 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2104
2105 void
2106 ggc_print_statistics (void)
2107 {
2108 struct alloc_zone *zone;
2109 struct ggc_statistics stats;
2110 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
2111 size_t pte_overhead, i;
2112
2113 /* Clear the statistics. */
2114 memset (&stats, 0, sizeof (stats));
2115
2116 /* Make sure collection will really occur. */
2117 ggc_force_collect = true;
2118
2119 /* Collect and print the statistics common across collectors. */
2120 ggc_print_common_statistics (stderr, &stats);
2121
2122 ggc_force_collect = false;
2123
2124 /* Release free pages so that we will not count the bytes allocated
2125 there as part of the total allocated memory. */
2126 for (zone = G.zones; zone; zone = zone->next_zone)
2127 release_pages (zone);
2128
2129 /* Collect some information about the various sizes of
2130 allocation. */
2131 fprintf (stderr,
2132 "Memory still allocated at the end of the compilation process\n");
2133
2134 fprintf (stderr, "%20s %10s %10s %10s\n",
2135 "Zone", "Allocated", "Used", "Overhead");
2136 for (zone = G.zones; zone; zone = zone->next_zone)
2137 {
2138 struct large_page_entry *large_page;
2139 size_t overhead, allocated, in_use;
2140
2141 /* Skip empty zones. */
2142 if (!zone->pages && !zone->large_pages)
2143 continue;
2144
2145 allocated = in_use = 0;
2146
2147 overhead = sizeof (struct alloc_zone);
2148
2149 for (large_page = zone->large_pages; large_page != NULL;
2150 large_page = large_page->next)
2151 {
2152 allocated += large_page->bytes;
2153 in_use += large_page->bytes;
2154 overhead += sizeof (struct large_page_entry);
2155 }
2156
2157 /* There's no easy way to walk through the small pages finding
2158 used and unused objects. Instead, add all the pages, and
2159 subtract out the free list. */
2160
2161 allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2162 in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2163 overhead += G.small_page_overhead * zone->n_small_pages;
2164
2165 for (i = 0; i <= NUM_FREE_BINS; i++)
2166 {
2167 struct alloc_chunk *chunk = zone->free_chunks[i];
2168 while (chunk)
2169 {
2170 in_use -= ggc_get_size (chunk);
2171 chunk = chunk->next_free;
2172 }
2173 }
2174
2175 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2176 zone->name,
2177 SCALE (allocated), LABEL (allocated),
2178 SCALE (in_use), LABEL (in_use),
2179 SCALE (overhead), LABEL (overhead));
2180
2181 gcc_assert (in_use == zone->allocated);
2182
2183 total_overhead += overhead;
2184 total_allocated += zone->allocated;
2185 total_bytes_mapped += zone->bytes_mapped;
2186 }
2187
2188 /* Count the size of the page table as best we can. */
2189 #if HOST_BITS_PER_PTR <= 32
2190 pte_overhead = sizeof (G.lookup);
2191 for (i = 0; i < PAGE_L1_SIZE; i++)
2192 if (G.lookup[i])
2193 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2194 #else
2195 {
2196 page_table table = G.lookup;
2197 pte_overhead = 0;
2198 while (table)
2199 {
2200 pte_overhead += sizeof (*table);
2201 for (i = 0; i < PAGE_L1_SIZE; i++)
2202 if (table->table[i])
2203 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2204 table = table->next;
2205 }
2206 }
2207 #endif
2208 fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2209 "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2210 total_overhead += pte_overhead;
2211
2212 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2213 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2214 SCALE (total_allocated), LABEL(total_allocated),
2215 SCALE (total_overhead), LABEL (total_overhead));
2216
2217 #ifdef GATHER_STATISTICS
2218 {
2219 unsigned long long all_overhead = 0, all_allocated = 0;
2220 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2221 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2222 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2223
2224 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2225
2226 for (zone = G.zones; zone; zone = zone->next_zone)
2227 {
2228 all_overhead += zone->stats.total_overhead;
2229 all_allocated += zone->stats.total_allocated;
2230
2231 all_allocated_under32 += zone->stats.total_allocated_under32;
2232 all_overhead_under32 += zone->stats.total_overhead_under32;
2233
2234 all_allocated_under64 += zone->stats.total_allocated_under64;
2235 all_overhead_under64 += zone->stats.total_overhead_under64;
2236
2237 all_allocated_under128 += zone->stats.total_allocated_under128;
2238 all_overhead_under128 += zone->stats.total_overhead_under128;
2239
2240 fprintf (stderr, "%20s: %10lld\n",
2241 zone->name, zone->stats.total_allocated);
2242 }
2243
2244 fprintf (stderr, "\n");
2245
2246 fprintf (stderr, "Total Overhead: %10lld\n",
2247 all_overhead);
2248 fprintf (stderr, "Total Allocated: %10lld\n",
2249 all_allocated);
2250
2251 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2252 all_overhead_under32);
2253 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2254 all_allocated_under32);
2255 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2256 all_overhead_under64);
2257 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2258 all_allocated_under64);
2259 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2260 all_overhead_under128);
2261 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2262 all_allocated_under128);
2263 }
2264 #endif
2265 }
2266
2267 /* Precompiled header support. */
2268
2269 /* For precompiled headers, we sort objects based on their type. We
2270 also sort various objects into their own buckets; currently this
2271 covers strings and IDENTIFIER_NODE trees. The choices of how
2272 to sort buckets have not yet been tuned. */
2273
2274 #define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2275
2276 #define OTHER_BUCKET (gt_types_enum_last + 0)
2277 #define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2278 #define STRING_BUCKET (gt_types_enum_last + 2)
2279
2280 struct ggc_pch_ondisk
2281 {
2282 size_t total;
2283 size_t type_totals[NUM_PCH_BUCKETS];
2284 };
2285
2286 struct ggc_pch_data
2287 {
2288 struct ggc_pch_ondisk d;
2289 size_t base;
2290 size_t orig_base;
2291 size_t alloc_size;
2292 alloc_type *alloc_bits;
2293 size_t type_bases[NUM_PCH_BUCKETS];
2294 size_t start_offset;
2295 };
2296
2297 /* Initialize the PCH data structure. */
2298
2299 struct ggc_pch_data *
2300 init_ggc_pch (void)
2301 {
2302 return XCNEW (struct ggc_pch_data);
2303 }
2304
2305 /* Return which of the page-aligned buckets the object at X, with type
2306 TYPE, should be sorted into in the PCH. Strings will have
2307 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2308 of unknown type will also have TYPE equal to gt_types_enum_last. */
2309
2310 static int
2311 pch_bucket (void *x, enum gt_types_enum type,
2312 bool is_string)
2313 {
2314 /* Sort identifiers into their own bucket, to improve locality
2315 when searching the identifier hash table. */
2316 if (type == gt_ggc_e_14lang_tree_node
2317 && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2318 return IDENTIFIER_BUCKET;
2319 else if (type == gt_types_enum_last)
2320 {
2321 if (is_string)
2322 return STRING_BUCKET;
2323 return OTHER_BUCKET;
2324 }
2325 return type;
2326 }
2327
2328 /* Add the size of object X to the size of the PCH data. */
2329
2330 void
2331 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2332 size_t size, bool is_string, enum gt_types_enum type)
2333 {
2334 /* NOTE: Right now we don't need to align up the size of any objects.
2335 Strings can be unaligned, and everything else is allocated to a
2336 MAX_ALIGNMENT boundary already. */
2337
2338 d->d.type_totals[pch_bucket (x, type, is_string)] += size;
2339 }
2340
2341 /* Return the total size of the PCH data. */
2342
2343 size_t
2344 ggc_pch_total_size (struct ggc_pch_data *d)
2345 {
2346 int i;
2347 size_t alloc_size, total_size;
2348
2349 total_size = 0;
2350 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2351 {
2352 d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2353 total_size += d->d.type_totals[i];
2354 }
2355 d->d.total = total_size;
2356
2357 /* Include the size of the allocation bitmap. */
2358 alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2359 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2360 d->alloc_size = alloc_size;
2361
2362 return d->d.total + alloc_size;
2363 }
2364
2365 /* Set the base address for the objects in the PCH file. */
2366
2367 void
2368 ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
2369 {
2370 int i;
2371 size_t base = (size_t) base_;
2372
2373 d->base = d->orig_base = base;
2374 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2375 {
2376 d->type_bases[i] = base;
2377 base += d->d.type_totals[i];
2378 }
2379
2380 if (d->alloc_bits == NULL)
2381 d->alloc_bits = XCNEWVAR (alloc_type, d->alloc_size);
2382 }
2383
2384 /* Allocate a place for object X of size SIZE in the PCH file. */
2385
2386 char *
2387 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
2388 size_t size, bool is_string,
2389 enum gt_types_enum type)
2390 {
2391 size_t alloc_word, alloc_bit;
2392 char *result;
2393 int bucket = pch_bucket (x, type, is_string);
2394
2395 /* Record the start of the object in the allocation bitmap. We
2396 can't assert that the allocation bit is previously clear, because
2397 strings may violate the invariant that they are at least
2398 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2399 should not be called for strings. */
2400 alloc_word = ((d->type_bases[bucket] - d->orig_base)
2401 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2402 alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2403 / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2404 d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2405
2406 /* Place the object at the current pointer for this bucket. */
2407 result = (char *) d->type_bases[bucket];
2408 d->type_bases[bucket] += size;
2409 return result;
2410 }
2411
2412 /* Prepare to write out the PCH data to file F. */
2413
2414 void
2415 ggc_pch_prepare_write (struct ggc_pch_data *d,
2416 FILE *f)
2417 {
2418 /* We seek around a lot while writing. Record where the end
2419 of the padding in the PCH file is, so that we can
2420 locate each object's offset. */
2421 d->start_offset = ftell (f);
2422 }
2423
2424 /* Write out object X of SIZE to file F. */
2425
2426 void
2427 ggc_pch_write_object (struct ggc_pch_data *d,
2428 FILE *f, void *x, void *newx,
2429 size_t size, bool is_string ATTRIBUTE_UNUSED)
2430 {
2431 if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
2432 fatal_error ("can't seek PCH file: %m");
2433
2434 if (fwrite (x, size, 1, f) != 1)
2435 fatal_error ("can't write PCH file: %m");
2436 }
2437
2438 void
2439 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2440 {
2441 /* Write out the allocation bitmap. */
2442 if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
2443 fatal_error ("can't seek PCH file: %m");
2444
2445 if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
2446 fatal_error ("can't write PCH file: %m");
2447
2448 /* Done with the PCH, so write out our footer. */
2449 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2450 fatal_error ("can't write PCH file: %m");
2451
2452 free (d->alloc_bits);
2453 free (d);
2454 }
2455
2456 /* The PCH file from F has been mapped at ADDR. Read in any
2457 additional data from the file and set up the GC state. */
2458
2459 void
2460 ggc_pch_read (FILE *f, void *addr)
2461 {
2462 struct ggc_pch_ondisk d;
2463 size_t alloc_size;
2464 struct alloc_zone *zone;
2465 struct page_entry *pch_page;
2466 char *p;
2467
2468 if (fread (&d, sizeof (d), 1, f) != 1)
2469 fatal_error ("can't read PCH file: %m");
2470
2471 alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2472 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2473
2474 pch_zone.bytes = d.total;
2475 pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2476 pch_zone.page = (char *) addr;
2477 pch_zone.end = (char *) pch_zone.alloc_bits;
2478
2479 /* We've just read in a PCH file. So, every object that used to be
2480 allocated is now free. */
2481 #ifdef 0 && GATHER_STATISTICS
2482 zone_allocate_marks ();
2483 ggc_prune_overhead_list ();
2484 zone_free_marks ();
2485 #endif
2486
2487 for (zone = G.zones; zone; zone = zone->next_zone)
2488 {
2489 struct small_page_entry *page, *next_page;
2490 struct large_page_entry *large_page, *next_large_page;
2491
2492 zone->allocated = 0;
2493
2494 /* Clear the zone's free chunk list. */
2495 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2496 zone->high_free_bin = 0;
2497 zone->cached_free = NULL;
2498 zone->cached_free_size = 0;
2499
2500 /* Move all the small pages onto the free list. */
2501 for (page = zone->pages; page != NULL; page = next_page)
2502 {
2503 next_page = page->next;
2504 memset (page->alloc_bits, 0,
2505 G.small_page_overhead - PAGE_OVERHEAD);
2506 free_small_page (page);
2507 }
2508
2509 /* Discard all the large pages. */
2510 for (large_page = zone->large_pages; large_page != NULL;
2511 large_page = next_large_page)
2512 {
2513 next_large_page = large_page->next;
2514 free_large_page (large_page);
2515 }
2516
2517 zone->pages = NULL;
2518 zone->large_pages = NULL;
2519 }
2520
2521 /* Allocate the dummy page entry for the PCH, and set all pages
2522 mapped into the PCH to reference it. */
2523 pch_page = XCNEW (struct page_entry);
2524 pch_page->page = pch_zone.page;
2525 pch_page->pch_p = true;
2526
2527 for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2528 set_page_table_entry (p, pch_page);
2529 }