system.h: Include "safe-ctype.h" instead of <safe-ctype.h>.
[gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009,
3 2010 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "toplev.h" /* exact_log2 */
29 #include "diagnostic-core.h"
30 #include "flags.h"
31 #include "ggc.h"
32 #include "ggc-internal.h"
33 #include "timevar.h"
34 #include "params.h"
35 #include "tree-flow.h"
36 #include "cfgloop.h"
37 #include "plugin.h"
38
39 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
40 file open. Prefer either to valloc. */
41 #ifdef HAVE_MMAP_ANON
42 # undef HAVE_MMAP_DEV_ZERO
43 # define USING_MMAP
44 #endif
45
46 #ifdef HAVE_MMAP_DEV_ZERO
47 # define USING_MMAP
48 #endif
49
50 #ifndef USING_MMAP
51 #define USING_MALLOC_PAGE_GROUPS
52 #endif
53
54 /* Strategy:
55
56 This garbage-collecting allocator allocates objects on one of a set
57 of pages. Each page can allocate objects of a single size only;
58 available sizes are powers of two starting at four bytes. The size
59 of an allocation request is rounded up to the next power of two
60 (`order'), and satisfied from the appropriate page.
61
62 Each page is recorded in a page-entry, which also maintains an
63 in-use bitmap of object positions on the page. This allows the
64 allocation state of a particular object to be flipped without
65 touching the page itself.
66
67 Each page-entry also has a context depth, which is used to track
68 pushing and popping of allocation contexts. Only objects allocated
69 in the current (highest-numbered) context may be collected.
70
71 Page entries are arranged in an array of singly-linked lists. The
72 array is indexed by the allocation size, in bits, of the pages on
73 it; i.e. all pages on a list allocate objects of the same size.
74 Pages are ordered on the list such that all non-full pages precede
75 all full pages, with non-full pages arranged in order of decreasing
76 context depth.
77
78 Empty pages (of all orders) are kept on a single page cache list,
79 and are considered first when new pages are required; they are
80 deallocated at the start of the next collection if they haven't
81 been recycled by then. */
82
83 /* Define GGC_DEBUG_LEVEL to print debugging information.
84 0: No debugging output.
85 1: GC statistics only.
86 2: Page-entry allocations/deallocations as well.
87 3: Object allocations as well.
88 4: Object marks as well. */
89 #define GGC_DEBUG_LEVEL (0)
90 \f
91 #ifndef HOST_BITS_PER_PTR
92 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
93 #endif
94
95 \f
96 /* A two-level tree is used to look up the page-entry for a given
97 pointer. Two chunks of the pointer's bits are extracted to index
98 the first and second levels of the tree, as follows:
99
100 HOST_PAGE_SIZE_BITS
101 32 | |
102 msb +----------------+----+------+------+ lsb
103 | | |
104 PAGE_L1_BITS |
105 | |
106 PAGE_L2_BITS
107
108 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
109 pages are aligned on system page boundaries. The next most
110 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
111 index values in the lookup table, respectively.
112
113 For 32-bit architectures and the settings below, there are no
114 leftover bits. For architectures with wider pointers, the lookup
115 tree points to a list of pages, which must be scanned to find the
116 correct one. */
117
118 #define PAGE_L1_BITS (8)
119 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
120 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
121 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
122
123 #define LOOKUP_L1(p) \
124 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
125
126 #define LOOKUP_L2(p) \
127 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
128
129 /* The number of objects per allocation page, for objects on a page of
130 the indicated ORDER. */
131 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
132
133 /* The number of objects in P. */
134 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
135
136 /* The size of an object on a page of the indicated ORDER. */
137 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
138
139 /* For speed, we avoid doing a general integer divide to locate the
140 offset in the allocation bitmap, by precalculating numbers M, S
141 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
142 within the page which is evenly divisible by the object size Z. */
143 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
144 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
145 #define OFFSET_TO_BIT(OFFSET, ORDER) \
146 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
147
148 /* We use this structure to determine the alignment required for
149 allocations. For power-of-two sized allocations, that's not a
150 problem, but it does matter for odd-sized allocations.
151 We do not care about alignment for floating-point types. */
152
153 struct max_alignment {
154 char c;
155 union {
156 HOST_WIDEST_INT i;
157 void *p;
158 } u;
159 };
160
161 /* The biggest alignment required. */
162
163 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
164
165
166 /* The number of extra orders, not corresponding to power-of-two sized
167 objects. */
168
169 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
170
171 #define RTL_SIZE(NSLOTS) \
172 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
173
174 #define TREE_EXP_SIZE(OPS) \
175 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
176
177 /* The Ith entry is the maximum size of an object to be stored in the
178 Ith extra order. Adding a new entry to this array is the *only*
179 thing you need to do to add a new special allocation size. */
180
181 static const size_t extra_order_size_table[] = {
182 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
183 There are a lot of structures with these sizes and explicitly
184 listing them risks orders being dropped because they changed size. */
185 MAX_ALIGNMENT * 3,
186 MAX_ALIGNMENT * 5,
187 MAX_ALIGNMENT * 6,
188 MAX_ALIGNMENT * 7,
189 MAX_ALIGNMENT * 9,
190 MAX_ALIGNMENT * 10,
191 MAX_ALIGNMENT * 11,
192 MAX_ALIGNMENT * 12,
193 MAX_ALIGNMENT * 13,
194 MAX_ALIGNMENT * 14,
195 MAX_ALIGNMENT * 15,
196 sizeof (struct tree_decl_non_common),
197 sizeof (struct tree_field_decl),
198 sizeof (struct tree_parm_decl),
199 sizeof (struct tree_var_decl),
200 sizeof (struct tree_type),
201 sizeof (struct function),
202 sizeof (struct basic_block_def),
203 sizeof (struct cgraph_node),
204 sizeof (struct loop),
205 };
206
207 /* The total number of orders. */
208
209 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
210
211 /* Compute the smallest nonnegative number which when added to X gives
212 a multiple of F. */
213
214 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
215
216 /* Compute the smallest multiple of F that is >= X. */
217
218 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
219
220 /* The Ith entry is the number of objects on a page or order I. */
221
222 static unsigned objects_per_page_table[NUM_ORDERS];
223
224 /* The Ith entry is the size of an object on a page of order I. */
225
226 static size_t object_size_table[NUM_ORDERS];
227
228 /* The Ith entry is a pair of numbers (mult, shift) such that
229 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
230 for all k evenly divisible by OBJECT_SIZE(I). */
231
232 static struct
233 {
234 size_t mult;
235 unsigned int shift;
236 }
237 inverse_table[NUM_ORDERS];
238
239 /* A page_entry records the status of an allocation page. This
240 structure is dynamically sized to fit the bitmap in_use_p. */
241 typedef struct page_entry
242 {
243 /* The next page-entry with objects of the same size, or NULL if
244 this is the last page-entry. */
245 struct page_entry *next;
246
247 /* The previous page-entry with objects of the same size, or NULL if
248 this is the first page-entry. The PREV pointer exists solely to
249 keep the cost of ggc_free manageable. */
250 struct page_entry *prev;
251
252 /* The number of bytes allocated. (This will always be a multiple
253 of the host system page size.) */
254 size_t bytes;
255
256 /* The address at which the memory is allocated. */
257 char *page;
258
259 #ifdef USING_MALLOC_PAGE_GROUPS
260 /* Back pointer to the page group this page came from. */
261 struct page_group *group;
262 #endif
263
264 /* This is the index in the by_depth varray where this page table
265 can be found. */
266 unsigned long index_by_depth;
267
268 /* Context depth of this page. */
269 unsigned short context_depth;
270
271 /* The number of free objects remaining on this page. */
272 unsigned short num_free_objects;
273
274 /* A likely candidate for the bit position of a free object for the
275 next allocation from this page. */
276 unsigned short next_bit_hint;
277
278 /* The lg of size of objects allocated from this page. */
279 unsigned char order;
280
281 /* A bit vector indicating whether or not objects are in use. The
282 Nth bit is one if the Nth object on this page is allocated. This
283 array is dynamically sized. */
284 unsigned long in_use_p[1];
285 } page_entry;
286
287 #ifdef USING_MALLOC_PAGE_GROUPS
288 /* A page_group describes a large allocation from malloc, from which
289 we parcel out aligned pages. */
290 typedef struct page_group
291 {
292 /* A linked list of all extant page groups. */
293 struct page_group *next;
294
295 /* The address we received from malloc. */
296 char *allocation;
297
298 /* The size of the block. */
299 size_t alloc_size;
300
301 /* A bitmask of pages in use. */
302 unsigned int in_use;
303 } page_group;
304 #endif
305
306 #if HOST_BITS_PER_PTR <= 32
307
308 /* On 32-bit hosts, we use a two level page table, as pictured above. */
309 typedef page_entry **page_table[PAGE_L1_SIZE];
310
311 #else
312
313 /* On 64-bit hosts, we use the same two level page tables plus a linked
314 list that disambiguates the top 32-bits. There will almost always be
315 exactly one entry in the list. */
316 typedef struct page_table_chain
317 {
318 struct page_table_chain *next;
319 size_t high_bits;
320 page_entry **table[PAGE_L1_SIZE];
321 } *page_table;
322
323 #endif
324
325 #ifdef ENABLE_GC_ALWAYS_COLLECT
326 /* List of free objects to be verified as actually free on the
327 next collection. */
328 struct free_object
329 {
330 void *object;
331 struct free_object *next;
332 };
333 #endif
334
335 /* The rest of the global variables. */
336 static struct globals
337 {
338 /* The Nth element in this array is a page with objects of size 2^N.
339 If there are any pages with free objects, they will be at the
340 head of the list. NULL if there are no page-entries for this
341 object size. */
342 page_entry *pages[NUM_ORDERS];
343
344 /* The Nth element in this array is the last page with objects of
345 size 2^N. NULL if there are no page-entries for this object
346 size. */
347 page_entry *page_tails[NUM_ORDERS];
348
349 /* Lookup table for associating allocation pages with object addresses. */
350 page_table lookup;
351
352 /* The system's page size. */
353 size_t pagesize;
354 size_t lg_pagesize;
355
356 /* Bytes currently allocated. */
357 size_t allocated;
358
359 /* Bytes currently allocated at the end of the last collection. */
360 size_t allocated_last_gc;
361
362 /* Total amount of memory mapped. */
363 size_t bytes_mapped;
364
365 /* Bit N set if any allocations have been done at context depth N. */
366 unsigned long context_depth_allocations;
367
368 /* Bit N set if any collections have been done at context depth N. */
369 unsigned long context_depth_collections;
370
371 /* The current depth in the context stack. */
372 unsigned short context_depth;
373
374 /* A file descriptor open to /dev/zero for reading. */
375 #if defined (HAVE_MMAP_DEV_ZERO)
376 int dev_zero_fd;
377 #endif
378
379 /* A cache of free system pages. */
380 page_entry *free_pages;
381
382 #ifdef USING_MALLOC_PAGE_GROUPS
383 page_group *page_groups;
384 #endif
385
386 /* The file descriptor for debugging output. */
387 FILE *debug_file;
388
389 /* Current number of elements in use in depth below. */
390 unsigned int depth_in_use;
391
392 /* Maximum number of elements that can be used before resizing. */
393 unsigned int depth_max;
394
395 /* Each element of this array is an index in by_depth where the given
396 depth starts. This structure is indexed by that given depth we
397 are interested in. */
398 unsigned int *depth;
399
400 /* Current number of elements in use in by_depth below. */
401 unsigned int by_depth_in_use;
402
403 /* Maximum number of elements that can be used before resizing. */
404 unsigned int by_depth_max;
405
406 /* Each element of this array is a pointer to a page_entry, all
407 page_entries can be found in here by increasing depth.
408 index_by_depth in the page_entry is the index into this data
409 structure where that page_entry can be found. This is used to
410 speed up finding all page_entries at a particular depth. */
411 page_entry **by_depth;
412
413 /* Each element is a pointer to the saved in_use_p bits, if any,
414 zero otherwise. We allocate them all together, to enable a
415 better runtime data access pattern. */
416 unsigned long **save_in_use;
417
418 #ifdef ENABLE_GC_ALWAYS_COLLECT
419 /* List of free objects to be verified as actually free on the
420 next collection. */
421 struct free_object *free_object_list;
422 #endif
423
424 #ifdef GATHER_STATISTICS
425 struct
426 {
427 /* Total GC-allocated memory. */
428 unsigned long long total_allocated;
429 /* Total overhead for GC-allocated memory. */
430 unsigned long long total_overhead;
431
432 /* Total allocations and overhead for sizes less than 32, 64 and 128.
433 These sizes are interesting because they are typical cache line
434 sizes. */
435
436 unsigned long long total_allocated_under32;
437 unsigned long long total_overhead_under32;
438
439 unsigned long long total_allocated_under64;
440 unsigned long long total_overhead_under64;
441
442 unsigned long long total_allocated_under128;
443 unsigned long long total_overhead_under128;
444
445 /* The allocations for each of the allocation orders. */
446 unsigned long long total_allocated_per_order[NUM_ORDERS];
447
448 /* The overhead for each of the allocation orders. */
449 unsigned long long total_overhead_per_order[NUM_ORDERS];
450 } stats;
451 #endif
452 } G;
453
454 /* The size in bytes required to maintain a bitmap for the objects
455 on a page-entry. */
456 #define BITMAP_SIZE(Num_objects) \
457 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
458
459 /* Allocate pages in chunks of this size, to throttle calls to memory
460 allocation routines. The first page is used, the rest go onto the
461 free list. This cannot be larger than HOST_BITS_PER_INT for the
462 in_use bitmask for page_group. Hosts that need a different value
463 can override this by defining GGC_QUIRE_SIZE explicitly. */
464 #ifndef GGC_QUIRE_SIZE
465 # ifdef USING_MMAP
466 # define GGC_QUIRE_SIZE 256
467 # else
468 # define GGC_QUIRE_SIZE 16
469 # endif
470 #endif
471
472 /* Initial guess as to how many page table entries we might need. */
473 #define INITIAL_PTE_COUNT 128
474 \f
475 static int ggc_allocated_p (const void *);
476 static page_entry *lookup_page_table_entry (const void *);
477 static void set_page_table_entry (void *, page_entry *);
478 #ifdef USING_MMAP
479 static char *alloc_anon (char *, size_t);
480 #endif
481 #ifdef USING_MALLOC_PAGE_GROUPS
482 static size_t page_group_index (char *, char *);
483 static void set_page_group_in_use (page_group *, char *);
484 static void clear_page_group_in_use (page_group *, char *);
485 #endif
486 static struct page_entry * alloc_page (unsigned);
487 static void free_page (struct page_entry *);
488 static void release_pages (void);
489 static void clear_marks (void);
490 static void sweep_pages (void);
491 static void ggc_recalculate_in_use_p (page_entry *);
492 static void compute_inverse (unsigned);
493 static inline void adjust_depth (void);
494 static void move_ptes_to_front (int, int);
495
496 void debug_print_page_list (int);
497 static void push_depth (unsigned int);
498 static void push_by_depth (page_entry *, unsigned long *);
499
500 /* Push an entry onto G.depth. */
501
502 inline static void
503 push_depth (unsigned int i)
504 {
505 if (G.depth_in_use >= G.depth_max)
506 {
507 G.depth_max *= 2;
508 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
509 }
510 G.depth[G.depth_in_use++] = i;
511 }
512
513 /* Push an entry onto G.by_depth and G.save_in_use. */
514
515 inline static void
516 push_by_depth (page_entry *p, unsigned long *s)
517 {
518 if (G.by_depth_in_use >= G.by_depth_max)
519 {
520 G.by_depth_max *= 2;
521 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
522 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
523 G.by_depth_max);
524 }
525 G.by_depth[G.by_depth_in_use] = p;
526 G.save_in_use[G.by_depth_in_use++] = s;
527 }
528
529 #if (GCC_VERSION < 3001)
530 #define prefetch(X) ((void) X)
531 #else
532 #define prefetch(X) __builtin_prefetch (X)
533 #endif
534
535 #define save_in_use_p_i(__i) \
536 (G.save_in_use[__i])
537 #define save_in_use_p(__p) \
538 (save_in_use_p_i (__p->index_by_depth))
539
540 /* Returns nonzero if P was allocated in GC'able memory. */
541
542 static inline int
543 ggc_allocated_p (const void *p)
544 {
545 page_entry ***base;
546 size_t L1, L2;
547
548 #if HOST_BITS_PER_PTR <= 32
549 base = &G.lookup[0];
550 #else
551 page_table table = G.lookup;
552 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
553 while (1)
554 {
555 if (table == NULL)
556 return 0;
557 if (table->high_bits == high_bits)
558 break;
559 table = table->next;
560 }
561 base = &table->table[0];
562 #endif
563
564 /* Extract the level 1 and 2 indices. */
565 L1 = LOOKUP_L1 (p);
566 L2 = LOOKUP_L2 (p);
567
568 return base[L1] && base[L1][L2];
569 }
570
571 /* Traverse the page table and find the entry for a page.
572 Die (probably) if the object wasn't allocated via GC. */
573
574 static inline page_entry *
575 lookup_page_table_entry (const void *p)
576 {
577 page_entry ***base;
578 size_t L1, L2;
579
580 #if HOST_BITS_PER_PTR <= 32
581 base = &G.lookup[0];
582 #else
583 page_table table = G.lookup;
584 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
585 while (table->high_bits != high_bits)
586 table = table->next;
587 base = &table->table[0];
588 #endif
589
590 /* Extract the level 1 and 2 indices. */
591 L1 = LOOKUP_L1 (p);
592 L2 = LOOKUP_L2 (p);
593
594 return base[L1][L2];
595 }
596
597 /* Set the page table entry for a page. */
598
599 static void
600 set_page_table_entry (void *p, page_entry *entry)
601 {
602 page_entry ***base;
603 size_t L1, L2;
604
605 #if HOST_BITS_PER_PTR <= 32
606 base = &G.lookup[0];
607 #else
608 page_table table;
609 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
610 for (table = G.lookup; table; table = table->next)
611 if (table->high_bits == high_bits)
612 goto found;
613
614 /* Not found -- allocate a new table. */
615 table = XCNEW (struct page_table_chain);
616 table->next = G.lookup;
617 table->high_bits = high_bits;
618 G.lookup = table;
619 found:
620 base = &table->table[0];
621 #endif
622
623 /* Extract the level 1 and 2 indices. */
624 L1 = LOOKUP_L1 (p);
625 L2 = LOOKUP_L2 (p);
626
627 if (base[L1] == NULL)
628 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
629
630 base[L1][L2] = entry;
631 }
632
633 /* Prints the page-entry for object size ORDER, for debugging. */
634
635 DEBUG_FUNCTION void
636 debug_print_page_list (int order)
637 {
638 page_entry *p;
639 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
640 (void *) G.page_tails[order]);
641 p = G.pages[order];
642 while (p != NULL)
643 {
644 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
645 p->num_free_objects);
646 p = p->next;
647 }
648 printf ("NULL\n");
649 fflush (stdout);
650 }
651
652 #ifdef USING_MMAP
653 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
654 (if non-null). The ifdef structure here is intended to cause a
655 compile error unless exactly one of the HAVE_* is defined. */
656
657 static inline char *
658 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
659 {
660 #ifdef HAVE_MMAP_ANON
661 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
662 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
663 #endif
664 #ifdef HAVE_MMAP_DEV_ZERO
665 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
666 MAP_PRIVATE, G.dev_zero_fd, 0);
667 #endif
668
669 if (page == (char *) MAP_FAILED)
670 {
671 perror ("virtual memory exhausted");
672 exit (FATAL_EXIT_CODE);
673 }
674
675 /* Remember that we allocated this memory. */
676 G.bytes_mapped += size;
677
678 /* Pretend we don't have access to the allocated pages. We'll enable
679 access to smaller pieces of the area in ggc_internal_alloc. Discard the
680 handle to avoid handle leak. */
681 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
682
683 return page;
684 }
685 #endif
686 #ifdef USING_MALLOC_PAGE_GROUPS
687 /* Compute the index for this page into the page group. */
688
689 static inline size_t
690 page_group_index (char *allocation, char *page)
691 {
692 return (size_t) (page - allocation) >> G.lg_pagesize;
693 }
694
695 /* Set and clear the in_use bit for this page in the page group. */
696
697 static inline void
698 set_page_group_in_use (page_group *group, char *page)
699 {
700 group->in_use |= 1 << page_group_index (group->allocation, page);
701 }
702
703 static inline void
704 clear_page_group_in_use (page_group *group, char *page)
705 {
706 group->in_use &= ~(1 << page_group_index (group->allocation, page));
707 }
708 #endif
709
710 /* Allocate a new page for allocating objects of size 2^ORDER,
711 and return an entry for it. The entry is not added to the
712 appropriate page_table list. */
713
714 static inline struct page_entry *
715 alloc_page (unsigned order)
716 {
717 struct page_entry *entry, *p, **pp;
718 char *page;
719 size_t num_objects;
720 size_t bitmap_size;
721 size_t page_entry_size;
722 size_t entry_size;
723 #ifdef USING_MALLOC_PAGE_GROUPS
724 page_group *group;
725 #endif
726
727 num_objects = OBJECTS_PER_PAGE (order);
728 bitmap_size = BITMAP_SIZE (num_objects + 1);
729 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
730 entry_size = num_objects * OBJECT_SIZE (order);
731 if (entry_size < G.pagesize)
732 entry_size = G.pagesize;
733
734 entry = NULL;
735 page = NULL;
736
737 /* Check the list of free pages for one we can use. */
738 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
739 if (p->bytes == entry_size)
740 break;
741
742 if (p != NULL)
743 {
744 /* Recycle the allocated memory from this page ... */
745 *pp = p->next;
746 page = p->page;
747
748 #ifdef USING_MALLOC_PAGE_GROUPS
749 group = p->group;
750 #endif
751
752 /* ... and, if possible, the page entry itself. */
753 if (p->order == order)
754 {
755 entry = p;
756 memset (entry, 0, page_entry_size);
757 }
758 else
759 free (p);
760 }
761 #ifdef USING_MMAP
762 else if (entry_size == G.pagesize)
763 {
764 /* We want just one page. Allocate a bunch of them and put the
765 extras on the freelist. (Can only do this optimization with
766 mmap for backing store.) */
767 struct page_entry *e, *f = G.free_pages;
768 int i;
769
770 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
771
772 /* This loop counts down so that the chain will be in ascending
773 memory order. */
774 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
775 {
776 e = XCNEWVAR (struct page_entry, page_entry_size);
777 e->order = order;
778 e->bytes = G.pagesize;
779 e->page = page + (i << G.lg_pagesize);
780 e->next = f;
781 f = e;
782 }
783
784 G.free_pages = f;
785 }
786 else
787 page = alloc_anon (NULL, entry_size);
788 #endif
789 #ifdef USING_MALLOC_PAGE_GROUPS
790 else
791 {
792 /* Allocate a large block of memory and serve out the aligned
793 pages therein. This results in much less memory wastage
794 than the traditional implementation of valloc. */
795
796 char *allocation, *a, *enda;
797 size_t alloc_size, head_slop, tail_slop;
798 int multiple_pages = (entry_size == G.pagesize);
799
800 if (multiple_pages)
801 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
802 else
803 alloc_size = entry_size + G.pagesize - 1;
804 allocation = XNEWVEC (char, alloc_size);
805
806 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
807 head_slop = page - allocation;
808 if (multiple_pages)
809 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
810 else
811 tail_slop = alloc_size - entry_size - head_slop;
812 enda = allocation + alloc_size - tail_slop;
813
814 /* We allocated N pages, which are likely not aligned, leaving
815 us with N-1 usable pages. We plan to place the page_group
816 structure somewhere in the slop. */
817 if (head_slop >= sizeof (page_group))
818 group = (page_group *)page - 1;
819 else
820 {
821 /* We magically got an aligned allocation. Too bad, we have
822 to waste a page anyway. */
823 if (tail_slop == 0)
824 {
825 enda -= G.pagesize;
826 tail_slop += G.pagesize;
827 }
828 gcc_assert (tail_slop >= sizeof (page_group));
829 group = (page_group *)enda;
830 tail_slop -= sizeof (page_group);
831 }
832
833 /* Remember that we allocated this memory. */
834 group->next = G.page_groups;
835 group->allocation = allocation;
836 group->alloc_size = alloc_size;
837 group->in_use = 0;
838 G.page_groups = group;
839 G.bytes_mapped += alloc_size;
840
841 /* If we allocated multiple pages, put the rest on the free list. */
842 if (multiple_pages)
843 {
844 struct page_entry *e, *f = G.free_pages;
845 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
846 {
847 e = XCNEWVAR (struct page_entry, page_entry_size);
848 e->order = order;
849 e->bytes = G.pagesize;
850 e->page = a;
851 e->group = group;
852 e->next = f;
853 f = e;
854 }
855 G.free_pages = f;
856 }
857 }
858 #endif
859
860 if (entry == NULL)
861 entry = XCNEWVAR (struct page_entry, page_entry_size);
862
863 entry->bytes = entry_size;
864 entry->page = page;
865 entry->context_depth = G.context_depth;
866 entry->order = order;
867 entry->num_free_objects = num_objects;
868 entry->next_bit_hint = 1;
869
870 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
871
872 #ifdef USING_MALLOC_PAGE_GROUPS
873 entry->group = group;
874 set_page_group_in_use (group, page);
875 #endif
876
877 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
878 increment the hint. */
879 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
880 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
881
882 set_page_table_entry (page, entry);
883
884 if (GGC_DEBUG_LEVEL >= 2)
885 fprintf (G.debug_file,
886 "Allocating page at %p, object size=%lu, data %p-%p\n",
887 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
888 page + entry_size - 1);
889
890 return entry;
891 }
892
893 /* Adjust the size of G.depth so that no index greater than the one
894 used by the top of the G.by_depth is used. */
895
896 static inline void
897 adjust_depth (void)
898 {
899 page_entry *top;
900
901 if (G.by_depth_in_use)
902 {
903 top = G.by_depth[G.by_depth_in_use-1];
904
905 /* Peel back indices in depth that index into by_depth, so that
906 as new elements are added to by_depth, we note the indices
907 of those elements, if they are for new context depths. */
908 while (G.depth_in_use > (size_t)top->context_depth+1)
909 --G.depth_in_use;
910 }
911 }
912
913 /* For a page that is no longer needed, put it on the free page list. */
914
915 static void
916 free_page (page_entry *entry)
917 {
918 if (GGC_DEBUG_LEVEL >= 2)
919 fprintf (G.debug_file,
920 "Deallocating page at %p, data %p-%p\n", (void *) entry,
921 entry->page, entry->page + entry->bytes - 1);
922
923 /* Mark the page as inaccessible. Discard the handle to avoid handle
924 leak. */
925 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
926
927 set_page_table_entry (entry->page, NULL);
928
929 #ifdef USING_MALLOC_PAGE_GROUPS
930 clear_page_group_in_use (entry->group, entry->page);
931 #endif
932
933 if (G.by_depth_in_use > 1)
934 {
935 page_entry *top = G.by_depth[G.by_depth_in_use-1];
936 int i = entry->index_by_depth;
937
938 /* We cannot free a page from a context deeper than the current
939 one. */
940 gcc_assert (entry->context_depth == top->context_depth);
941
942 /* Put top element into freed slot. */
943 G.by_depth[i] = top;
944 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
945 top->index_by_depth = i;
946 }
947 --G.by_depth_in_use;
948
949 adjust_depth ();
950
951 entry->next = G.free_pages;
952 G.free_pages = entry;
953 }
954
955 /* Release the free page cache to the system. */
956
957 static void
958 release_pages (void)
959 {
960 #ifdef USING_MMAP
961 page_entry *p, *next;
962 char *start;
963 size_t len;
964
965 /* Gather up adjacent pages so they are unmapped together. */
966 p = G.free_pages;
967
968 while (p)
969 {
970 start = p->page;
971 next = p->next;
972 len = p->bytes;
973 free (p);
974 p = next;
975
976 while (p && p->page == start + len)
977 {
978 next = p->next;
979 len += p->bytes;
980 free (p);
981 p = next;
982 }
983
984 munmap (start, len);
985 G.bytes_mapped -= len;
986 }
987
988 G.free_pages = NULL;
989 #endif
990 #ifdef USING_MALLOC_PAGE_GROUPS
991 page_entry **pp, *p;
992 page_group **gp, *g;
993
994 /* Remove all pages from free page groups from the list. */
995 pp = &G.free_pages;
996 while ((p = *pp) != NULL)
997 if (p->group->in_use == 0)
998 {
999 *pp = p->next;
1000 free (p);
1001 }
1002 else
1003 pp = &p->next;
1004
1005 /* Remove all free page groups, and release the storage. */
1006 gp = &G.page_groups;
1007 while ((g = *gp) != NULL)
1008 if (g->in_use == 0)
1009 {
1010 *gp = g->next;
1011 G.bytes_mapped -= g->alloc_size;
1012 free (g->allocation);
1013 }
1014 else
1015 gp = &g->next;
1016 #endif
1017 }
1018
1019 /* This table provides a fast way to determine ceil(log_2(size)) for
1020 allocation requests. The minimum allocation size is eight bytes. */
1021 #define NUM_SIZE_LOOKUP 512
1022 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1023 {
1024 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1025 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1026 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1027 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1028 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1029 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1030 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1031 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1032 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1033 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1034 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1035 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1036 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1037 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1038 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1039 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1040 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1041 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1042 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1043 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1044 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1045 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1046 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1047 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1048 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1049 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1050 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1051 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1052 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1053 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1054 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1055 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1056 };
1057
1058 /* Typed allocation function. Does nothing special in this collector. */
1059
1060 void *
1061 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1062 MEM_STAT_DECL)
1063 {
1064 return ggc_internal_alloc_stat (size PASS_MEM_STAT);
1065 }
1066
1067 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1068
1069 void *
1070 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1071 {
1072 size_t order, word, bit, object_offset, object_size;
1073 struct page_entry *entry;
1074 void *result;
1075
1076 if (size < NUM_SIZE_LOOKUP)
1077 {
1078 order = size_lookup[size];
1079 object_size = OBJECT_SIZE (order);
1080 }
1081 else
1082 {
1083 order = 10;
1084 while (size > (object_size = OBJECT_SIZE (order)))
1085 order++;
1086 }
1087
1088 /* If there are non-full pages for this size allocation, they are at
1089 the head of the list. */
1090 entry = G.pages[order];
1091
1092 /* If there is no page for this object size, or all pages in this
1093 context are full, allocate a new page. */
1094 if (entry == NULL || entry->num_free_objects == 0)
1095 {
1096 struct page_entry *new_entry;
1097 new_entry = alloc_page (order);
1098
1099 new_entry->index_by_depth = G.by_depth_in_use;
1100 push_by_depth (new_entry, 0);
1101
1102 /* We can skip context depths, if we do, make sure we go all the
1103 way to the new depth. */
1104 while (new_entry->context_depth >= G.depth_in_use)
1105 push_depth (G.by_depth_in_use-1);
1106
1107 /* If this is the only entry, it's also the tail. If it is not
1108 the only entry, then we must update the PREV pointer of the
1109 ENTRY (G.pages[order]) to point to our new page entry. */
1110 if (entry == NULL)
1111 G.page_tails[order] = new_entry;
1112 else
1113 entry->prev = new_entry;
1114
1115 /* Put new pages at the head of the page list. By definition the
1116 entry at the head of the list always has a NULL pointer. */
1117 new_entry->next = entry;
1118 new_entry->prev = NULL;
1119 entry = new_entry;
1120 G.pages[order] = new_entry;
1121
1122 /* For a new page, we know the word and bit positions (in the
1123 in_use bitmap) of the first available object -- they're zero. */
1124 new_entry->next_bit_hint = 1;
1125 word = 0;
1126 bit = 0;
1127 object_offset = 0;
1128 }
1129 else
1130 {
1131 /* First try to use the hint left from the previous allocation
1132 to locate a clear bit in the in-use bitmap. We've made sure
1133 that the one-past-the-end bit is always set, so if the hint
1134 has run over, this test will fail. */
1135 unsigned hint = entry->next_bit_hint;
1136 word = hint / HOST_BITS_PER_LONG;
1137 bit = hint % HOST_BITS_PER_LONG;
1138
1139 /* If the hint didn't work, scan the bitmap from the beginning. */
1140 if ((entry->in_use_p[word] >> bit) & 1)
1141 {
1142 word = bit = 0;
1143 while (~entry->in_use_p[word] == 0)
1144 ++word;
1145
1146 #if GCC_VERSION >= 3004
1147 bit = __builtin_ctzl (~entry->in_use_p[word]);
1148 #else
1149 while ((entry->in_use_p[word] >> bit) & 1)
1150 ++bit;
1151 #endif
1152
1153 hint = word * HOST_BITS_PER_LONG + bit;
1154 }
1155
1156 /* Next time, try the next bit. */
1157 entry->next_bit_hint = hint + 1;
1158
1159 object_offset = hint * object_size;
1160 }
1161
1162 /* Set the in-use bit. */
1163 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1164
1165 /* Keep a running total of the number of free objects. If this page
1166 fills up, we may have to move it to the end of the list if the
1167 next page isn't full. If the next page is full, all subsequent
1168 pages are full, so there's no need to move it. */
1169 if (--entry->num_free_objects == 0
1170 && entry->next != NULL
1171 && entry->next->num_free_objects > 0)
1172 {
1173 /* We have a new head for the list. */
1174 G.pages[order] = entry->next;
1175
1176 /* We are moving ENTRY to the end of the page table list.
1177 The new page at the head of the list will have NULL in
1178 its PREV field and ENTRY will have NULL in its NEXT field. */
1179 entry->next->prev = NULL;
1180 entry->next = NULL;
1181
1182 /* Append ENTRY to the tail of the list. */
1183 entry->prev = G.page_tails[order];
1184 G.page_tails[order]->next = entry;
1185 G.page_tails[order] = entry;
1186 }
1187
1188 /* Calculate the object's address. */
1189 result = entry->page + object_offset;
1190 #ifdef GATHER_STATISTICS
1191 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1192 result PASS_MEM_STAT);
1193 #endif
1194
1195 #ifdef ENABLE_GC_CHECKING
1196 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1197 exact same semantics in presence of memory bugs, regardless of
1198 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1199 handle to avoid handle leak. */
1200 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1201
1202 /* `Poison' the entire allocated object, including any padding at
1203 the end. */
1204 memset (result, 0xaf, object_size);
1205
1206 /* Make the bytes after the end of the object unaccessible. Discard the
1207 handle to avoid handle leak. */
1208 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1209 object_size - size));
1210 #endif
1211
1212 /* Tell Valgrind that the memory is there, but its content isn't
1213 defined. The bytes at the end of the object are still marked
1214 unaccessible. */
1215 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1216
1217 /* Keep track of how many bytes are being allocated. This
1218 information is used in deciding when to collect. */
1219 G.allocated += object_size;
1220
1221 /* For timevar statistics. */
1222 timevar_ggc_mem_total += object_size;
1223
1224 #ifdef GATHER_STATISTICS
1225 {
1226 size_t overhead = object_size - size;
1227
1228 G.stats.total_overhead += overhead;
1229 G.stats.total_allocated += object_size;
1230 G.stats.total_overhead_per_order[order] += overhead;
1231 G.stats.total_allocated_per_order[order] += object_size;
1232
1233 if (size <= 32)
1234 {
1235 G.stats.total_overhead_under32 += overhead;
1236 G.stats.total_allocated_under32 += object_size;
1237 }
1238 if (size <= 64)
1239 {
1240 G.stats.total_overhead_under64 += overhead;
1241 G.stats.total_allocated_under64 += object_size;
1242 }
1243 if (size <= 128)
1244 {
1245 G.stats.total_overhead_under128 += overhead;
1246 G.stats.total_allocated_under128 += object_size;
1247 }
1248 }
1249 #endif
1250
1251 if (GGC_DEBUG_LEVEL >= 3)
1252 fprintf (G.debug_file,
1253 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1254 (unsigned long) size, (unsigned long) object_size, result,
1255 (void *) entry);
1256
1257 return result;
1258 }
1259
1260 /* Mark function for strings. */
1261
1262 void
1263 gt_ggc_m_S (const void *p)
1264 {
1265 page_entry *entry;
1266 unsigned bit, word;
1267 unsigned long mask;
1268 unsigned long offset;
1269
1270 if (!p || !ggc_allocated_p (p))
1271 return;
1272
1273 /* Look up the page on which the object is alloced. . */
1274 entry = lookup_page_table_entry (p);
1275 gcc_assert (entry);
1276
1277 /* Calculate the index of the object on the page; this is its bit
1278 position in the in_use_p bitmap. Note that because a char* might
1279 point to the middle of an object, we need special code here to
1280 make sure P points to the start of an object. */
1281 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1282 if (offset)
1283 {
1284 /* Here we've seen a char* which does not point to the beginning
1285 of an allocated object. We assume it points to the middle of
1286 a STRING_CST. */
1287 gcc_assert (offset == offsetof (struct tree_string, str));
1288 p = ((const char *) p) - offset;
1289 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1290 return;
1291 }
1292
1293 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1294 word = bit / HOST_BITS_PER_LONG;
1295 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1296
1297 /* If the bit was previously set, skip it. */
1298 if (entry->in_use_p[word] & mask)
1299 return;
1300
1301 /* Otherwise set it, and decrement the free object count. */
1302 entry->in_use_p[word] |= mask;
1303 entry->num_free_objects -= 1;
1304
1305 if (GGC_DEBUG_LEVEL >= 4)
1306 fprintf (G.debug_file, "Marking %p\n", p);
1307
1308 return;
1309 }
1310
1311 /* If P is not marked, marks it and return false. Otherwise return true.
1312 P must have been allocated by the GC allocator; it mustn't point to
1313 static objects, stack variables, or memory allocated with malloc. */
1314
1315 int
1316 ggc_set_mark (const void *p)
1317 {
1318 page_entry *entry;
1319 unsigned bit, word;
1320 unsigned long mask;
1321
1322 /* Look up the page on which the object is alloced. If the object
1323 wasn't allocated by the collector, we'll probably die. */
1324 entry = lookup_page_table_entry (p);
1325 gcc_assert (entry);
1326
1327 /* Calculate the index of the object on the page; this is its bit
1328 position in the in_use_p bitmap. */
1329 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1330 word = bit / HOST_BITS_PER_LONG;
1331 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1332
1333 /* If the bit was previously set, skip it. */
1334 if (entry->in_use_p[word] & mask)
1335 return 1;
1336
1337 /* Otherwise set it, and decrement the free object count. */
1338 entry->in_use_p[word] |= mask;
1339 entry->num_free_objects -= 1;
1340
1341 if (GGC_DEBUG_LEVEL >= 4)
1342 fprintf (G.debug_file, "Marking %p\n", p);
1343
1344 return 0;
1345 }
1346
1347 /* Return 1 if P has been marked, zero otherwise.
1348 P must have been allocated by the GC allocator; it mustn't point to
1349 static objects, stack variables, or memory allocated with malloc. */
1350
1351 int
1352 ggc_marked_p (const void *p)
1353 {
1354 page_entry *entry;
1355 unsigned bit, word;
1356 unsigned long mask;
1357
1358 /* Look up the page on which the object is alloced. If the object
1359 wasn't allocated by the collector, we'll probably die. */
1360 entry = lookup_page_table_entry (p);
1361 gcc_assert (entry);
1362
1363 /* Calculate the index of the object on the page; this is its bit
1364 position in the in_use_p bitmap. */
1365 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1366 word = bit / HOST_BITS_PER_LONG;
1367 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1368
1369 return (entry->in_use_p[word] & mask) != 0;
1370 }
1371
1372 /* Return the size of the gc-able object P. */
1373
1374 size_t
1375 ggc_get_size (const void *p)
1376 {
1377 page_entry *pe = lookup_page_table_entry (p);
1378 return OBJECT_SIZE (pe->order);
1379 }
1380
1381 /* Release the memory for object P. */
1382
1383 void
1384 ggc_free (void *p)
1385 {
1386 page_entry *pe = lookup_page_table_entry (p);
1387 size_t order = pe->order;
1388 size_t size = OBJECT_SIZE (order);
1389
1390 #ifdef GATHER_STATISTICS
1391 ggc_free_overhead (p);
1392 #endif
1393
1394 if (GGC_DEBUG_LEVEL >= 3)
1395 fprintf (G.debug_file,
1396 "Freeing object, actual size=%lu, at %p on %p\n",
1397 (unsigned long) size, p, (void *) pe);
1398
1399 #ifdef ENABLE_GC_CHECKING
1400 /* Poison the data, to indicate the data is garbage. */
1401 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1402 memset (p, 0xa5, size);
1403 #endif
1404 /* Let valgrind know the object is free. */
1405 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1406
1407 #ifdef ENABLE_GC_ALWAYS_COLLECT
1408 /* In the completely-anal-checking mode, we do *not* immediately free
1409 the data, but instead verify that the data is *actually* not
1410 reachable the next time we collect. */
1411 {
1412 struct free_object *fo = XNEW (struct free_object);
1413 fo->object = p;
1414 fo->next = G.free_object_list;
1415 G.free_object_list = fo;
1416 }
1417 #else
1418 {
1419 unsigned int bit_offset, word, bit;
1420
1421 G.allocated -= size;
1422
1423 /* Mark the object not-in-use. */
1424 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1425 word = bit_offset / HOST_BITS_PER_LONG;
1426 bit = bit_offset % HOST_BITS_PER_LONG;
1427 pe->in_use_p[word] &= ~(1UL << bit);
1428
1429 if (pe->num_free_objects++ == 0)
1430 {
1431 page_entry *p, *q;
1432
1433 /* If the page is completely full, then it's supposed to
1434 be after all pages that aren't. Since we've freed one
1435 object from a page that was full, we need to move the
1436 page to the head of the list.
1437
1438 PE is the node we want to move. Q is the previous node
1439 and P is the next node in the list. */
1440 q = pe->prev;
1441 if (q && q->num_free_objects == 0)
1442 {
1443 p = pe->next;
1444
1445 q->next = p;
1446
1447 /* If PE was at the end of the list, then Q becomes the
1448 new end of the list. If PE was not the end of the
1449 list, then we need to update the PREV field for P. */
1450 if (!p)
1451 G.page_tails[order] = q;
1452 else
1453 p->prev = q;
1454
1455 /* Move PE to the head of the list. */
1456 pe->next = G.pages[order];
1457 pe->prev = NULL;
1458 G.pages[order]->prev = pe;
1459 G.pages[order] = pe;
1460 }
1461
1462 /* Reset the hint bit to point to the only free object. */
1463 pe->next_bit_hint = bit_offset;
1464 }
1465 }
1466 #endif
1467 }
1468 \f
1469 /* Subroutine of init_ggc which computes the pair of numbers used to
1470 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1471
1472 This algorithm is taken from Granlund and Montgomery's paper
1473 "Division by Invariant Integers using Multiplication"
1474 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1475 constants). */
1476
1477 static void
1478 compute_inverse (unsigned order)
1479 {
1480 size_t size, inv;
1481 unsigned int e;
1482
1483 size = OBJECT_SIZE (order);
1484 e = 0;
1485 while (size % 2 == 0)
1486 {
1487 e++;
1488 size >>= 1;
1489 }
1490
1491 inv = size;
1492 while (inv * size != 1)
1493 inv = inv * (2 - inv*size);
1494
1495 DIV_MULT (order) = inv;
1496 DIV_SHIFT (order) = e;
1497 }
1498
1499 /* Initialize the ggc-mmap allocator. */
1500 void
1501 init_ggc (void)
1502 {
1503 unsigned order;
1504
1505 G.pagesize = getpagesize();
1506 G.lg_pagesize = exact_log2 (G.pagesize);
1507
1508 #ifdef HAVE_MMAP_DEV_ZERO
1509 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1510 if (G.dev_zero_fd == -1)
1511 internal_error ("open /dev/zero: %m");
1512 #endif
1513
1514 #if 0
1515 G.debug_file = fopen ("ggc-mmap.debug", "w");
1516 #else
1517 G.debug_file = stdout;
1518 #endif
1519
1520 #ifdef USING_MMAP
1521 /* StunOS has an amazing off-by-one error for the first mmap allocation
1522 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1523 believe, is an unaligned page allocation, which would cause us to
1524 hork badly if we tried to use it. */
1525 {
1526 char *p = alloc_anon (NULL, G.pagesize);
1527 struct page_entry *e;
1528 if ((size_t)p & (G.pagesize - 1))
1529 {
1530 /* How losing. Discard this one and try another. If we still
1531 can't get something useful, give up. */
1532
1533 p = alloc_anon (NULL, G.pagesize);
1534 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1535 }
1536
1537 /* We have a good page, might as well hold onto it... */
1538 e = XCNEW (struct page_entry);
1539 e->bytes = G.pagesize;
1540 e->page = p;
1541 e->next = G.free_pages;
1542 G.free_pages = e;
1543 }
1544 #endif
1545
1546 /* Initialize the object size table. */
1547 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1548 object_size_table[order] = (size_t) 1 << order;
1549 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1550 {
1551 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1552
1553 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1554 so that we're sure of getting aligned memory. */
1555 s = ROUND_UP (s, MAX_ALIGNMENT);
1556 object_size_table[order] = s;
1557 }
1558
1559 /* Initialize the objects-per-page and inverse tables. */
1560 for (order = 0; order < NUM_ORDERS; ++order)
1561 {
1562 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1563 if (objects_per_page_table[order] == 0)
1564 objects_per_page_table[order] = 1;
1565 compute_inverse (order);
1566 }
1567
1568 /* Reset the size_lookup array to put appropriately sized objects in
1569 the special orders. All objects bigger than the previous power
1570 of two, but no greater than the special size, should go in the
1571 new order. */
1572 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1573 {
1574 int o;
1575 int i;
1576
1577 i = OBJECT_SIZE (order);
1578 if (i >= NUM_SIZE_LOOKUP)
1579 continue;
1580
1581 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1582 size_lookup[i] = order;
1583 }
1584
1585 G.depth_in_use = 0;
1586 G.depth_max = 10;
1587 G.depth = XNEWVEC (unsigned int, G.depth_max);
1588
1589 G.by_depth_in_use = 0;
1590 G.by_depth_max = INITIAL_PTE_COUNT;
1591 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1592 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1593 }
1594
1595 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1596 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1597
1598 static void
1599 ggc_recalculate_in_use_p (page_entry *p)
1600 {
1601 unsigned int i;
1602 size_t num_objects;
1603
1604 /* Because the past-the-end bit in in_use_p is always set, we
1605 pretend there is one additional object. */
1606 num_objects = OBJECTS_IN_PAGE (p) + 1;
1607
1608 /* Reset the free object count. */
1609 p->num_free_objects = num_objects;
1610
1611 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1612 for (i = 0;
1613 i < CEIL (BITMAP_SIZE (num_objects),
1614 sizeof (*p->in_use_p));
1615 ++i)
1616 {
1617 unsigned long j;
1618
1619 /* Something is in use if it is marked, or if it was in use in a
1620 context further down the context stack. */
1621 p->in_use_p[i] |= save_in_use_p (p)[i];
1622
1623 /* Decrement the free object count for every object allocated. */
1624 for (j = p->in_use_p[i]; j; j >>= 1)
1625 p->num_free_objects -= (j & 1);
1626 }
1627
1628 gcc_assert (p->num_free_objects < num_objects);
1629 }
1630 \f
1631 /* Unmark all objects. */
1632
1633 static void
1634 clear_marks (void)
1635 {
1636 unsigned order;
1637
1638 for (order = 2; order < NUM_ORDERS; order++)
1639 {
1640 page_entry *p;
1641
1642 for (p = G.pages[order]; p != NULL; p = p->next)
1643 {
1644 size_t num_objects = OBJECTS_IN_PAGE (p);
1645 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1646
1647 /* The data should be page-aligned. */
1648 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1649
1650 /* Pages that aren't in the topmost context are not collected;
1651 nevertheless, we need their in-use bit vectors to store GC
1652 marks. So, back them up first. */
1653 if (p->context_depth < G.context_depth)
1654 {
1655 if (! save_in_use_p (p))
1656 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1657 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1658 }
1659
1660 /* Reset reset the number of free objects and clear the
1661 in-use bits. These will be adjusted by mark_obj. */
1662 p->num_free_objects = num_objects;
1663 memset (p->in_use_p, 0, bitmap_size);
1664
1665 /* Make sure the one-past-the-end bit is always set. */
1666 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1667 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1668 }
1669 }
1670 }
1671
1672 /* Free all empty pages. Partially empty pages need no attention
1673 because the `mark' bit doubles as an `unused' bit. */
1674
1675 static void
1676 sweep_pages (void)
1677 {
1678 unsigned order;
1679
1680 for (order = 2; order < NUM_ORDERS; order++)
1681 {
1682 /* The last page-entry to consider, regardless of entries
1683 placed at the end of the list. */
1684 page_entry * const last = G.page_tails[order];
1685
1686 size_t num_objects;
1687 size_t live_objects;
1688 page_entry *p, *previous;
1689 int done;
1690
1691 p = G.pages[order];
1692 if (p == NULL)
1693 continue;
1694
1695 previous = NULL;
1696 do
1697 {
1698 page_entry *next = p->next;
1699
1700 /* Loop until all entries have been examined. */
1701 done = (p == last);
1702
1703 num_objects = OBJECTS_IN_PAGE (p);
1704
1705 /* Add all live objects on this page to the count of
1706 allocated memory. */
1707 live_objects = num_objects - p->num_free_objects;
1708
1709 G.allocated += OBJECT_SIZE (order) * live_objects;
1710
1711 /* Only objects on pages in the topmost context should get
1712 collected. */
1713 if (p->context_depth < G.context_depth)
1714 ;
1715
1716 /* Remove the page if it's empty. */
1717 else if (live_objects == 0)
1718 {
1719 /* If P was the first page in the list, then NEXT
1720 becomes the new first page in the list, otherwise
1721 splice P out of the forward pointers. */
1722 if (! previous)
1723 G.pages[order] = next;
1724 else
1725 previous->next = next;
1726
1727 /* Splice P out of the back pointers too. */
1728 if (next)
1729 next->prev = previous;
1730
1731 /* Are we removing the last element? */
1732 if (p == G.page_tails[order])
1733 G.page_tails[order] = previous;
1734 free_page (p);
1735 p = previous;
1736 }
1737
1738 /* If the page is full, move it to the end. */
1739 else if (p->num_free_objects == 0)
1740 {
1741 /* Don't move it if it's already at the end. */
1742 if (p != G.page_tails[order])
1743 {
1744 /* Move p to the end of the list. */
1745 p->next = NULL;
1746 p->prev = G.page_tails[order];
1747 G.page_tails[order]->next = p;
1748
1749 /* Update the tail pointer... */
1750 G.page_tails[order] = p;
1751
1752 /* ... and the head pointer, if necessary. */
1753 if (! previous)
1754 G.pages[order] = next;
1755 else
1756 previous->next = next;
1757
1758 /* And update the backpointer in NEXT if necessary. */
1759 if (next)
1760 next->prev = previous;
1761
1762 p = previous;
1763 }
1764 }
1765
1766 /* If we've fallen through to here, it's a page in the
1767 topmost context that is neither full nor empty. Such a
1768 page must precede pages at lesser context depth in the
1769 list, so move it to the head. */
1770 else if (p != G.pages[order])
1771 {
1772 previous->next = p->next;
1773
1774 /* Update the backchain in the next node if it exists. */
1775 if (p->next)
1776 p->next->prev = previous;
1777
1778 /* Move P to the head of the list. */
1779 p->next = G.pages[order];
1780 p->prev = NULL;
1781 G.pages[order]->prev = p;
1782
1783 /* Update the head pointer. */
1784 G.pages[order] = p;
1785
1786 /* Are we moving the last element? */
1787 if (G.page_tails[order] == p)
1788 G.page_tails[order] = previous;
1789 p = previous;
1790 }
1791
1792 previous = p;
1793 p = next;
1794 }
1795 while (! done);
1796
1797 /* Now, restore the in_use_p vectors for any pages from contexts
1798 other than the current one. */
1799 for (p = G.pages[order]; p; p = p->next)
1800 if (p->context_depth != G.context_depth)
1801 ggc_recalculate_in_use_p (p);
1802 }
1803 }
1804
1805 #ifdef ENABLE_GC_CHECKING
1806 /* Clobber all free objects. */
1807
1808 static void
1809 poison_pages (void)
1810 {
1811 unsigned order;
1812
1813 for (order = 2; order < NUM_ORDERS; order++)
1814 {
1815 size_t size = OBJECT_SIZE (order);
1816 page_entry *p;
1817
1818 for (p = G.pages[order]; p != NULL; p = p->next)
1819 {
1820 size_t num_objects;
1821 size_t i;
1822
1823 if (p->context_depth != G.context_depth)
1824 /* Since we don't do any collection for pages in pushed
1825 contexts, there's no need to do any poisoning. And
1826 besides, the IN_USE_P array isn't valid until we pop
1827 contexts. */
1828 continue;
1829
1830 num_objects = OBJECTS_IN_PAGE (p);
1831 for (i = 0; i < num_objects; i++)
1832 {
1833 size_t word, bit;
1834 word = i / HOST_BITS_PER_LONG;
1835 bit = i % HOST_BITS_PER_LONG;
1836 if (((p->in_use_p[word] >> bit) & 1) == 0)
1837 {
1838 char *object = p->page + i * size;
1839
1840 /* Keep poison-by-write when we expect to use Valgrind,
1841 so the exact same memory semantics is kept, in case
1842 there are memory errors. We override this request
1843 below. */
1844 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
1845 size));
1846 memset (object, 0xa5, size);
1847
1848 /* Drop the handle to avoid handle leak. */
1849 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
1850 }
1851 }
1852 }
1853 }
1854 }
1855 #else
1856 #define poison_pages()
1857 #endif
1858
1859 #ifdef ENABLE_GC_ALWAYS_COLLECT
1860 /* Validate that the reportedly free objects actually are. */
1861
1862 static void
1863 validate_free_objects (void)
1864 {
1865 struct free_object *f, *next, *still_free = NULL;
1866
1867 for (f = G.free_object_list; f ; f = next)
1868 {
1869 page_entry *pe = lookup_page_table_entry (f->object);
1870 size_t bit, word;
1871
1872 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1873 word = bit / HOST_BITS_PER_LONG;
1874 bit = bit % HOST_BITS_PER_LONG;
1875 next = f->next;
1876
1877 /* Make certain it isn't visible from any root. Notice that we
1878 do this check before sweep_pages merges save_in_use_p. */
1879 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1880
1881 /* If the object comes from an outer context, then retain the
1882 free_object entry, so that we can verify that the address
1883 isn't live on the stack in some outer context. */
1884 if (pe->context_depth != G.context_depth)
1885 {
1886 f->next = still_free;
1887 still_free = f;
1888 }
1889 else
1890 free (f);
1891 }
1892
1893 G.free_object_list = still_free;
1894 }
1895 #else
1896 #define validate_free_objects()
1897 #endif
1898
1899 /* Top level mark-and-sweep routine. */
1900
1901 void
1902 ggc_collect (void)
1903 {
1904 /* Avoid frequent unnecessary work by skipping collection if the
1905 total allocations haven't expanded much since the last
1906 collection. */
1907 float allocated_last_gc =
1908 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1909
1910 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1911
1912 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
1913 return;
1914
1915 timevar_push (TV_GC);
1916 if (!quiet_flag)
1917 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1918 if (GGC_DEBUG_LEVEL >= 2)
1919 fprintf (G.debug_file, "BEGIN COLLECTING\n");
1920
1921 /* Zero the total allocated bytes. This will be recalculated in the
1922 sweep phase. */
1923 G.allocated = 0;
1924
1925 /* Release the pages we freed the last time we collected, but didn't
1926 reuse in the interim. */
1927 release_pages ();
1928
1929 /* Indicate that we've seen collections at this context depth. */
1930 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1931
1932 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
1933
1934 clear_marks ();
1935 ggc_mark_roots ();
1936 #ifdef GATHER_STATISTICS
1937 ggc_prune_overhead_list ();
1938 #endif
1939 poison_pages ();
1940 validate_free_objects ();
1941 sweep_pages ();
1942
1943 G.allocated_last_gc = G.allocated;
1944
1945 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
1946
1947 timevar_pop (TV_GC);
1948
1949 if (!quiet_flag)
1950 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1951 if (GGC_DEBUG_LEVEL >= 2)
1952 fprintf (G.debug_file, "END COLLECTING\n");
1953 }
1954
1955 /* Print allocation statistics. */
1956 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1957 ? (x) \
1958 : ((x) < 1024*1024*10 \
1959 ? (x) / 1024 \
1960 : (x) / (1024*1024))))
1961 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1962
1963 void
1964 ggc_print_statistics (void)
1965 {
1966 struct ggc_statistics stats;
1967 unsigned int i;
1968 size_t total_overhead = 0;
1969
1970 /* Clear the statistics. */
1971 memset (&stats, 0, sizeof (stats));
1972
1973 /* Make sure collection will really occur. */
1974 G.allocated_last_gc = 0;
1975
1976 /* Collect and print the statistics common across collectors. */
1977 ggc_print_common_statistics (stderr, &stats);
1978
1979 /* Release free pages so that we will not count the bytes allocated
1980 there as part of the total allocated memory. */
1981 release_pages ();
1982
1983 /* Collect some information about the various sizes of
1984 allocation. */
1985 fprintf (stderr,
1986 "Memory still allocated at the end of the compilation process\n");
1987 fprintf (stderr, "%-5s %10s %10s %10s\n",
1988 "Size", "Allocated", "Used", "Overhead");
1989 for (i = 0; i < NUM_ORDERS; ++i)
1990 {
1991 page_entry *p;
1992 size_t allocated;
1993 size_t in_use;
1994 size_t overhead;
1995
1996 /* Skip empty entries. */
1997 if (!G.pages[i])
1998 continue;
1999
2000 overhead = allocated = in_use = 0;
2001
2002 /* Figure out the total number of bytes allocated for objects of
2003 this size, and how many of them are actually in use. Also figure
2004 out how much memory the page table is using. */
2005 for (p = G.pages[i]; p; p = p->next)
2006 {
2007 allocated += p->bytes;
2008 in_use +=
2009 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2010
2011 overhead += (sizeof (page_entry) - sizeof (long)
2012 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2013 }
2014 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2015 (unsigned long) OBJECT_SIZE (i),
2016 SCALE (allocated), STAT_LABEL (allocated),
2017 SCALE (in_use), STAT_LABEL (in_use),
2018 SCALE (overhead), STAT_LABEL (overhead));
2019 total_overhead += overhead;
2020 }
2021 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2022 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2023 SCALE (G.allocated), STAT_LABEL(G.allocated),
2024 SCALE (total_overhead), STAT_LABEL (total_overhead));
2025
2026 #ifdef GATHER_STATISTICS
2027 {
2028 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2029
2030 fprintf (stderr, "Total Overhead: %10lld\n",
2031 G.stats.total_overhead);
2032 fprintf (stderr, "Total Allocated: %10lld\n",
2033 G.stats.total_allocated);
2034
2035 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2036 G.stats.total_overhead_under32);
2037 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2038 G.stats.total_allocated_under32);
2039 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2040 G.stats.total_overhead_under64);
2041 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2042 G.stats.total_allocated_under64);
2043 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2044 G.stats.total_overhead_under128);
2045 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2046 G.stats.total_allocated_under128);
2047
2048 for (i = 0; i < NUM_ORDERS; i++)
2049 if (G.stats.total_allocated_per_order[i])
2050 {
2051 fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
2052 (unsigned long) OBJECT_SIZE (i),
2053 G.stats.total_overhead_per_order[i]);
2054 fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
2055 (unsigned long) OBJECT_SIZE (i),
2056 G.stats.total_allocated_per_order[i]);
2057 }
2058 }
2059 #endif
2060 }
2061 \f
2062 struct ggc_pch_ondisk
2063 {
2064 unsigned totals[NUM_ORDERS];
2065 };
2066
2067 struct ggc_pch_data
2068 {
2069 struct ggc_pch_ondisk d;
2070 size_t base[NUM_ORDERS];
2071 size_t written[NUM_ORDERS];
2072 };
2073
2074 struct ggc_pch_data *
2075 init_ggc_pch (void)
2076 {
2077 return XCNEW (struct ggc_pch_data);
2078 }
2079
2080 void
2081 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2082 size_t size, bool is_string ATTRIBUTE_UNUSED,
2083 enum gt_types_enum type ATTRIBUTE_UNUSED)
2084 {
2085 unsigned order;
2086
2087 if (size < NUM_SIZE_LOOKUP)
2088 order = size_lookup[size];
2089 else
2090 {
2091 order = 10;
2092 while (size > OBJECT_SIZE (order))
2093 order++;
2094 }
2095
2096 d->d.totals[order]++;
2097 }
2098
2099 size_t
2100 ggc_pch_total_size (struct ggc_pch_data *d)
2101 {
2102 size_t a = 0;
2103 unsigned i;
2104
2105 for (i = 0; i < NUM_ORDERS; i++)
2106 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2107 return a;
2108 }
2109
2110 void
2111 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2112 {
2113 size_t a = (size_t) base;
2114 unsigned i;
2115
2116 for (i = 0; i < NUM_ORDERS; i++)
2117 {
2118 d->base[i] = a;
2119 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2120 }
2121 }
2122
2123
2124 char *
2125 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2126 size_t size, bool is_string ATTRIBUTE_UNUSED,
2127 enum gt_types_enum type ATTRIBUTE_UNUSED)
2128 {
2129 unsigned order;
2130 char *result;
2131
2132 if (size < NUM_SIZE_LOOKUP)
2133 order = size_lookup[size];
2134 else
2135 {
2136 order = 10;
2137 while (size > OBJECT_SIZE (order))
2138 order++;
2139 }
2140
2141 result = (char *) d->base[order];
2142 d->base[order] += OBJECT_SIZE (order);
2143 return result;
2144 }
2145
2146 void
2147 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2148 FILE *f ATTRIBUTE_UNUSED)
2149 {
2150 /* Nothing to do. */
2151 }
2152
2153 void
2154 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2155 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2156 size_t size, bool is_string ATTRIBUTE_UNUSED)
2157 {
2158 unsigned order;
2159 static const char emptyBytes[256] = { 0 };
2160
2161 if (size < NUM_SIZE_LOOKUP)
2162 order = size_lookup[size];
2163 else
2164 {
2165 order = 10;
2166 while (size > OBJECT_SIZE (order))
2167 order++;
2168 }
2169
2170 if (fwrite (x, size, 1, f) != 1)
2171 fatal_error ("can%'t write PCH file: %m");
2172
2173 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2174 object out to OBJECT_SIZE(order). This happens for strings. */
2175
2176 if (size != OBJECT_SIZE (order))
2177 {
2178 unsigned padding = OBJECT_SIZE(order) - size;
2179
2180 /* To speed small writes, we use a nulled-out array that's larger
2181 than most padding requests as the source for our null bytes. This
2182 permits us to do the padding with fwrite() rather than fseek(), and
2183 limits the chance the OS may try to flush any outstanding writes. */
2184 if (padding <= sizeof(emptyBytes))
2185 {
2186 if (fwrite (emptyBytes, 1, padding, f) != padding)
2187 fatal_error ("can%'t write PCH file");
2188 }
2189 else
2190 {
2191 /* Larger than our buffer? Just default to fseek. */
2192 if (fseek (f, padding, SEEK_CUR) != 0)
2193 fatal_error ("can%'t write PCH file");
2194 }
2195 }
2196
2197 d->written[order]++;
2198 if (d->written[order] == d->d.totals[order]
2199 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2200 G.pagesize),
2201 SEEK_CUR) != 0)
2202 fatal_error ("can%'t write PCH file: %m");
2203 }
2204
2205 void
2206 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2207 {
2208 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2209 fatal_error ("can%'t write PCH file: %m");
2210 free (d);
2211 }
2212
2213 /* Move the PCH PTE entries just added to the end of by_depth, to the
2214 front. */
2215
2216 static void
2217 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2218 {
2219 unsigned i;
2220
2221 /* First, we swap the new entries to the front of the varrays. */
2222 page_entry **new_by_depth;
2223 unsigned long **new_save_in_use;
2224
2225 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2226 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2227
2228 memcpy (&new_by_depth[0],
2229 &G.by_depth[count_old_page_tables],
2230 count_new_page_tables * sizeof (void *));
2231 memcpy (&new_by_depth[count_new_page_tables],
2232 &G.by_depth[0],
2233 count_old_page_tables * sizeof (void *));
2234 memcpy (&new_save_in_use[0],
2235 &G.save_in_use[count_old_page_tables],
2236 count_new_page_tables * sizeof (void *));
2237 memcpy (&new_save_in_use[count_new_page_tables],
2238 &G.save_in_use[0],
2239 count_old_page_tables * sizeof (void *));
2240
2241 free (G.by_depth);
2242 free (G.save_in_use);
2243
2244 G.by_depth = new_by_depth;
2245 G.save_in_use = new_save_in_use;
2246
2247 /* Now update all the index_by_depth fields. */
2248 for (i = G.by_depth_in_use; i > 0; --i)
2249 {
2250 page_entry *p = G.by_depth[i-1];
2251 p->index_by_depth = i-1;
2252 }
2253
2254 /* And last, we update the depth pointers in G.depth. The first
2255 entry is already 0, and context 0 entries always start at index
2256 0, so there is nothing to update in the first slot. We need a
2257 second slot, only if we have old ptes, and if we do, they start
2258 at index count_new_page_tables. */
2259 if (count_old_page_tables)
2260 push_depth (count_new_page_tables);
2261 }
2262
2263 void
2264 ggc_pch_read (FILE *f, void *addr)
2265 {
2266 struct ggc_pch_ondisk d;
2267 unsigned i;
2268 char *offs = (char *) addr;
2269 unsigned long count_old_page_tables;
2270 unsigned long count_new_page_tables;
2271
2272 count_old_page_tables = G.by_depth_in_use;
2273
2274 /* We've just read in a PCH file. So, every object that used to be
2275 allocated is now free. */
2276 clear_marks ();
2277 #ifdef ENABLE_GC_CHECKING
2278 poison_pages ();
2279 #endif
2280 /* Since we free all the allocated objects, the free list becomes
2281 useless. Validate it now, which will also clear it. */
2282 validate_free_objects();
2283
2284 /* No object read from a PCH file should ever be freed. So, set the
2285 context depth to 1, and set the depth of all the currently-allocated
2286 pages to be 1 too. PCH pages will have depth 0. */
2287 gcc_assert (!G.context_depth);
2288 G.context_depth = 1;
2289 for (i = 0; i < NUM_ORDERS; i++)
2290 {
2291 page_entry *p;
2292 for (p = G.pages[i]; p != NULL; p = p->next)
2293 p->context_depth = G.context_depth;
2294 }
2295
2296 /* Allocate the appropriate page-table entries for the pages read from
2297 the PCH file. */
2298 if (fread (&d, sizeof (d), 1, f) != 1)
2299 fatal_error ("can%'t read PCH file: %m");
2300
2301 for (i = 0; i < NUM_ORDERS; i++)
2302 {
2303 struct page_entry *entry;
2304 char *pte;
2305 size_t bytes;
2306 size_t num_objs;
2307 size_t j;
2308
2309 if (d.totals[i] == 0)
2310 continue;
2311
2312 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2313 num_objs = bytes / OBJECT_SIZE (i);
2314 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2315 - sizeof (long)
2316 + BITMAP_SIZE (num_objs + 1)));
2317 entry->bytes = bytes;
2318 entry->page = offs;
2319 entry->context_depth = 0;
2320 offs += bytes;
2321 entry->num_free_objects = 0;
2322 entry->order = i;
2323
2324 for (j = 0;
2325 j + HOST_BITS_PER_LONG <= num_objs + 1;
2326 j += HOST_BITS_PER_LONG)
2327 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2328 for (; j < num_objs + 1; j++)
2329 entry->in_use_p[j / HOST_BITS_PER_LONG]
2330 |= 1L << (j % HOST_BITS_PER_LONG);
2331
2332 for (pte = entry->page;
2333 pte < entry->page + entry->bytes;
2334 pte += G.pagesize)
2335 set_page_table_entry (pte, entry);
2336
2337 if (G.page_tails[i] != NULL)
2338 G.page_tails[i]->next = entry;
2339 else
2340 G.pages[i] = entry;
2341 G.page_tails[i] = entry;
2342
2343 /* We start off by just adding all the new information to the
2344 end of the varrays, later, we will move the new information
2345 to the front of the varrays, as the PCH page tables are at
2346 context 0. */
2347 push_by_depth (entry, 0);
2348 }
2349
2350 /* Now, we update the various data structures that speed page table
2351 handling. */
2352 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2353
2354 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2355
2356 /* Update the statistics. */
2357 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2358 }
2359
2360 struct alloc_zone
2361 {
2362 int dummy;
2363 };
2364
2365 struct alloc_zone rtl_zone;
2366 struct alloc_zone tree_zone;
2367 struct alloc_zone tree_id_zone;