c-parse.in (extension): Use itype.
[gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "toplev.h"
29 #include "flags.h"
30 #include "ggc.h"
31 #include "timevar.h"
32 #include "params.h"
33 #ifdef ENABLE_VALGRIND_CHECKING
34 # ifdef HAVE_VALGRIND_MEMCHECK_H
35 # include <valgrind/memcheck.h>
36 # elif defined HAVE_MEMCHECK_H
37 # include <memcheck.h>
38 # else
39 # include <valgrind.h>
40 # endif
41 #else
42 /* Avoid #ifdef:s when we can help it. */
43 #define VALGRIND_DISCARD(x)
44 #endif
45
46 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
47 file open. Prefer either to valloc. */
48 #ifdef HAVE_MMAP_ANON
49 # undef HAVE_MMAP_DEV_ZERO
50
51 # include <sys/mman.h>
52 # ifndef MAP_FAILED
53 # define MAP_FAILED -1
54 # endif
55 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
56 # define MAP_ANONYMOUS MAP_ANON
57 # endif
58 # define USING_MMAP
59
60 #endif
61
62 #ifdef HAVE_MMAP_DEV_ZERO
63
64 # include <sys/mman.h>
65 # ifndef MAP_FAILED
66 # define MAP_FAILED -1
67 # endif
68 # define USING_MMAP
69
70 #endif
71
72 #ifndef USING_MMAP
73 #define USING_MALLOC_PAGE_GROUPS
74 #endif
75
76 /* Stategy:
77
78 This garbage-collecting allocator allocates objects on one of a set
79 of pages. Each page can allocate objects of a single size only;
80 available sizes are powers of two starting at four bytes. The size
81 of an allocation request is rounded up to the next power of two
82 (`order'), and satisfied from the appropriate page.
83
84 Each page is recorded in a page-entry, which also maintains an
85 in-use bitmap of object positions on the page. This allows the
86 allocation state of a particular object to be flipped without
87 touching the page itself.
88
89 Each page-entry also has a context depth, which is used to track
90 pushing and popping of allocation contexts. Only objects allocated
91 in the current (highest-numbered) context may be collected.
92
93 Page entries are arranged in an array of singly-linked lists. The
94 array is indexed by the allocation size, in bits, of the pages on
95 it; i.e. all pages on a list allocate objects of the same size.
96 Pages are ordered on the list such that all non-full pages precede
97 all full pages, with non-full pages arranged in order of decreasing
98 context depth.
99
100 Empty pages (of all orders) are kept on a single page cache list,
101 and are considered first when new pages are required; they are
102 deallocated at the start of the next collection if they haven't
103 been recycled by then. */
104
105 /* Define GGC_DEBUG_LEVEL to print debugging information.
106 0: No debugging output.
107 1: GC statistics only.
108 2: Page-entry allocations/deallocations as well.
109 3: Object allocations as well.
110 4: Object marks as well. */
111 #define GGC_DEBUG_LEVEL (0)
112 \f
113 #ifndef HOST_BITS_PER_PTR
114 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
115 #endif
116
117 \f
118 /* A two-level tree is used to look up the page-entry for a given
119 pointer. Two chunks of the pointer's bits are extracted to index
120 the first and second levels of the tree, as follows:
121
122 HOST_PAGE_SIZE_BITS
123 32 | |
124 msb +----------------+----+------+------+ lsb
125 | | |
126 PAGE_L1_BITS |
127 | |
128 PAGE_L2_BITS
129
130 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
131 pages are aligned on system page boundaries. The next most
132 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
133 index values in the lookup table, respectively.
134
135 For 32-bit architectures and the settings below, there are no
136 leftover bits. For architectures with wider pointers, the lookup
137 tree points to a list of pages, which must be scanned to find the
138 correct one. */
139
140 #define PAGE_L1_BITS (8)
141 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
142 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
143 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
144
145 #define LOOKUP_L1(p) \
146 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
147
148 #define LOOKUP_L2(p) \
149 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
150
151 /* The number of objects per allocation page, for objects on a page of
152 the indicated ORDER. */
153 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
154
155 /* The number of objects in P. */
156 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
157
158 /* The size of an object on a page of the indicated ORDER. */
159 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
160
161 /* For speed, we avoid doing a general integer divide to locate the
162 offset in the allocation bitmap, by precalculating numbers M, S
163 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
164 within the page which is evenly divisible by the object size Z. */
165 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
166 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
167 #define OFFSET_TO_BIT(OFFSET, ORDER) \
168 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
169
170 /* The number of extra orders, not corresponding to power-of-two sized
171 objects. */
172
173 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
174
175 #define RTL_SIZE(NSLOTS) \
176 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
177
178 #define TREE_EXP_SIZE(OPS) \
179 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
180
181 /* The Ith entry is the maximum size of an object to be stored in the
182 Ith extra order. Adding a new entry to this array is the *only*
183 thing you need to do to add a new special allocation size. */
184
185 static const size_t extra_order_size_table[] = {
186 sizeof (struct tree_decl),
187 sizeof (struct tree_list),
188 TREE_EXP_SIZE (2),
189 RTL_SIZE (2), /* MEM, PLUS, etc. */
190 RTL_SIZE (9), /* INSN */
191 };
192
193 /* The total number of orders. */
194
195 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
196
197 /* We use this structure to determine the alignment required for
198 allocations. For power-of-two sized allocations, that's not a
199 problem, but it does matter for odd-sized allocations. */
200
201 struct max_alignment {
202 char c;
203 union {
204 HOST_WIDEST_INT i;
205 long double d;
206 } u;
207 };
208
209 /* The biggest alignment required. */
210
211 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
212
213 /* Compute the smallest nonnegative number which when added to X gives
214 a multiple of F. */
215
216 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
217
218 /* Compute the smallest multiple of F that is >= X. */
219
220 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
221
222 /* The Ith entry is the number of objects on a page or order I. */
223
224 static unsigned objects_per_page_table[NUM_ORDERS];
225
226 /* The Ith entry is the size of an object on a page of order I. */
227
228 static size_t object_size_table[NUM_ORDERS];
229
230 /* The Ith entry is a pair of numbers (mult, shift) such that
231 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
232 for all k evenly divisible by OBJECT_SIZE(I). */
233
234 static struct
235 {
236 size_t mult;
237 unsigned int shift;
238 }
239 inverse_table[NUM_ORDERS];
240
241 /* A page_entry records the status of an allocation page. This
242 structure is dynamically sized to fit the bitmap in_use_p. */
243 typedef struct page_entry
244 {
245 /* The next page-entry with objects of the same size, or NULL if
246 this is the last page-entry. */
247 struct page_entry *next;
248
249 /* The number of bytes allocated. (This will always be a multiple
250 of the host system page size.) */
251 size_t bytes;
252
253 /* The address at which the memory is allocated. */
254 char *page;
255
256 #ifdef USING_MALLOC_PAGE_GROUPS
257 /* Back pointer to the page group this page came from. */
258 struct page_group *group;
259 #endif
260
261 /* This is the index in the by_depth varray where this page table
262 can be found. */
263 unsigned long index_by_depth;
264
265 /* Context depth of this page. */
266 unsigned short context_depth;
267
268 /* The number of free objects remaining on this page. */
269 unsigned short num_free_objects;
270
271 /* A likely candidate for the bit position of a free object for the
272 next allocation from this page. */
273 unsigned short next_bit_hint;
274
275 /* The lg of size of objects allocated from this page. */
276 unsigned char order;
277
278 /* A bit vector indicating whether or not objects are in use. The
279 Nth bit is one if the Nth object on this page is allocated. This
280 array is dynamically sized. */
281 unsigned long in_use_p[1];
282 } page_entry;
283
284 #ifdef USING_MALLOC_PAGE_GROUPS
285 /* A page_group describes a large allocation from malloc, from which
286 we parcel out aligned pages. */
287 typedef struct page_group
288 {
289 /* A linked list of all extant page groups. */
290 struct page_group *next;
291
292 /* The address we received from malloc. */
293 char *allocation;
294
295 /* The size of the block. */
296 size_t alloc_size;
297
298 /* A bitmask of pages in use. */
299 unsigned int in_use;
300 } page_group;
301 #endif
302
303 #if HOST_BITS_PER_PTR <= 32
304
305 /* On 32-bit hosts, we use a two level page table, as pictured above. */
306 typedef page_entry **page_table[PAGE_L1_SIZE];
307
308 #else
309
310 /* On 64-bit hosts, we use the same two level page tables plus a linked
311 list that disambiguates the top 32-bits. There will almost always be
312 exactly one entry in the list. */
313 typedef struct page_table_chain
314 {
315 struct page_table_chain *next;
316 size_t high_bits;
317 page_entry **table[PAGE_L1_SIZE];
318 } *page_table;
319
320 #endif
321
322 /* The rest of the global variables. */
323 static struct globals
324 {
325 /* The Nth element in this array is a page with objects of size 2^N.
326 If there are any pages with free objects, they will be at the
327 head of the list. NULL if there are no page-entries for this
328 object size. */
329 page_entry *pages[NUM_ORDERS];
330
331 /* The Nth element in this array is the last page with objects of
332 size 2^N. NULL if there are no page-entries for this object
333 size. */
334 page_entry *page_tails[NUM_ORDERS];
335
336 /* Lookup table for associating allocation pages with object addresses. */
337 page_table lookup;
338
339 /* The system's page size. */
340 size_t pagesize;
341 size_t lg_pagesize;
342
343 /* Bytes currently allocated. */
344 size_t allocated;
345
346 /* Bytes currently allocated at the end of the last collection. */
347 size_t allocated_last_gc;
348
349 /* Total amount of memory mapped. */
350 size_t bytes_mapped;
351
352 /* Bit N set if any allocations have been done at context depth N. */
353 unsigned long context_depth_allocations;
354
355 /* Bit N set if any collections have been done at context depth N. */
356 unsigned long context_depth_collections;
357
358 /* The current depth in the context stack. */
359 unsigned short context_depth;
360
361 /* A file descriptor open to /dev/zero for reading. */
362 #if defined (HAVE_MMAP_DEV_ZERO)
363 int dev_zero_fd;
364 #endif
365
366 /* A cache of free system pages. */
367 page_entry *free_pages;
368
369 #ifdef USING_MALLOC_PAGE_GROUPS
370 page_group *page_groups;
371 #endif
372
373 /* The file descriptor for debugging output. */
374 FILE *debug_file;
375
376 /* Current number of elements in use in depth below. */
377 unsigned int depth_in_use;
378
379 /* Maximum number of elements that can be used before resizing. */
380 unsigned int depth_max;
381
382 /* Each element of this arry is an index in by_depth where the given
383 depth starts. This structure is indexed by that given depth we
384 are interested in. */
385 unsigned int *depth;
386
387 /* Current number of elements in use in by_depth below. */
388 unsigned int by_depth_in_use;
389
390 /* Maximum number of elements that can be used before resizing. */
391 unsigned int by_depth_max;
392
393 /* Each element of this array is a pointer to a page_entry, all
394 page_entries can be found in here by increasing depth.
395 index_by_depth in the page_entry is the index into this data
396 structure where that page_entry can be found. This is used to
397 speed up finding all page_entries at a particular depth. */
398 page_entry **by_depth;
399
400 /* Each element is a pointer to the saved in_use_p bits, if any,
401 zero otherwise. We allocate them all together, to enable a
402 better runtime data access pattern. */
403 unsigned long **save_in_use;
404
405 #ifdef ENABLE_GC_ALWAYS_COLLECT
406 /* List of free objects to be verified as actually free on the
407 next collection. */
408 struct free_object
409 {
410 void *object;
411 struct free_object *next;
412 } *free_object_list;
413 #endif
414
415 #ifdef GATHER_STATISTICS
416 struct
417 {
418 /* Total memory allocated with ggc_alloc. */
419 unsigned long long total_allocated;
420 /* Total overhead for memory to be allocated with ggc_alloc. */
421 unsigned long long total_overhead;
422
423 /* Total allocations and overhead for sizes less than 32, 64 and 128.
424 These sizes are interesting because they are typical cache line
425 sizes. */
426
427 unsigned long long total_allocated_under32;
428 unsigned long long total_overhead_under32;
429
430 unsigned long long total_allocated_under64;
431 unsigned long long total_overhead_under64;
432
433 unsigned long long total_allocated_under128;
434 unsigned long long total_overhead_under128;
435
436 /* The allocations for each of the allocation orders. */
437 unsigned long long total_allocated_per_order[NUM_ORDERS];
438
439 /* The overhead for each of the allocation orders. */
440 unsigned long long total_overhead_per_order[NUM_ORDERS];
441 } stats;
442 #endif
443 } G;
444
445 /* The size in bytes required to maintain a bitmap for the objects
446 on a page-entry. */
447 #define BITMAP_SIZE(Num_objects) \
448 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
449
450 /* Allocate pages in chunks of this size, to throttle calls to memory
451 allocation routines. The first page is used, the rest go onto the
452 free list. This cannot be larger than HOST_BITS_PER_INT for the
453 in_use bitmask for page_group. */
454 #define GGC_QUIRE_SIZE 16
455
456 /* Initial guess as to how many page table entries we might need. */
457 #define INITIAL_PTE_COUNT 128
458 \f
459 static int ggc_allocated_p (const void *);
460 static page_entry *lookup_page_table_entry (const void *);
461 static void set_page_table_entry (void *, page_entry *);
462 #ifdef USING_MMAP
463 static char *alloc_anon (char *, size_t);
464 #endif
465 #ifdef USING_MALLOC_PAGE_GROUPS
466 static size_t page_group_index (char *, char *);
467 static void set_page_group_in_use (page_group *, char *);
468 static void clear_page_group_in_use (page_group *, char *);
469 #endif
470 static struct page_entry * alloc_page (unsigned);
471 static void free_page (struct page_entry *);
472 static void release_pages (void);
473 static void clear_marks (void);
474 static void sweep_pages (void);
475 static void ggc_recalculate_in_use_p (page_entry *);
476 static void compute_inverse (unsigned);
477 static inline void adjust_depth (void);
478 static void move_ptes_to_front (int, int);
479
480 #ifdef ENABLE_GC_CHECKING
481 static void poison_pages (void);
482 #endif
483
484 void debug_print_page_list (int);
485 static void push_depth (unsigned int);
486 static void push_by_depth (page_entry *, unsigned long *);
487 struct alloc_zone *rtl_zone = NULL;
488 struct alloc_zone *tree_zone = NULL;
489 struct alloc_zone *garbage_zone = NULL;
490
491 /* Push an entry onto G.depth. */
492
493 inline static void
494 push_depth (unsigned int i)
495 {
496 if (G.depth_in_use >= G.depth_max)
497 {
498 G.depth_max *= 2;
499 G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
500 }
501 G.depth[G.depth_in_use++] = i;
502 }
503
504 /* Push an entry onto G.by_depth and G.save_in_use. */
505
506 inline static void
507 push_by_depth (page_entry *p, unsigned long *s)
508 {
509 if (G.by_depth_in_use >= G.by_depth_max)
510 {
511 G.by_depth_max *= 2;
512 G.by_depth = xrealloc (G.by_depth,
513 G.by_depth_max * sizeof (page_entry *));
514 G.save_in_use = xrealloc (G.save_in_use,
515 G.by_depth_max * sizeof (unsigned long *));
516 }
517 G.by_depth[G.by_depth_in_use] = p;
518 G.save_in_use[G.by_depth_in_use++] = s;
519 }
520
521 #if (GCC_VERSION < 3001)
522 #define prefetch(X) ((void) X)
523 #else
524 #define prefetch(X) __builtin_prefetch (X)
525 #endif
526
527 #define save_in_use_p_i(__i) \
528 (G.save_in_use[__i])
529 #define save_in_use_p(__p) \
530 (save_in_use_p_i (__p->index_by_depth))
531
532 /* Returns nonzero if P was allocated in GC'able memory. */
533
534 static inline int
535 ggc_allocated_p (const void *p)
536 {
537 page_entry ***base;
538 size_t L1, L2;
539
540 #if HOST_BITS_PER_PTR <= 32
541 base = &G.lookup[0];
542 #else
543 page_table table = G.lookup;
544 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
545 while (1)
546 {
547 if (table == NULL)
548 return 0;
549 if (table->high_bits == high_bits)
550 break;
551 table = table->next;
552 }
553 base = &table->table[0];
554 #endif
555
556 /* Extract the level 1 and 2 indices. */
557 L1 = LOOKUP_L1 (p);
558 L2 = LOOKUP_L2 (p);
559
560 return base[L1] && base[L1][L2];
561 }
562
563 /* Traverse the page table and find the entry for a page.
564 Die (probably) if the object wasn't allocated via GC. */
565
566 static inline page_entry *
567 lookup_page_table_entry (const void *p)
568 {
569 page_entry ***base;
570 size_t L1, L2;
571
572 #if HOST_BITS_PER_PTR <= 32
573 base = &G.lookup[0];
574 #else
575 page_table table = G.lookup;
576 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
577 while (table->high_bits != high_bits)
578 table = table->next;
579 base = &table->table[0];
580 #endif
581
582 /* Extract the level 1 and 2 indices. */
583 L1 = LOOKUP_L1 (p);
584 L2 = LOOKUP_L2 (p);
585
586 return base[L1][L2];
587 }
588
589 /* Set the page table entry for a page. */
590
591 static void
592 set_page_table_entry (void *p, page_entry *entry)
593 {
594 page_entry ***base;
595 size_t L1, L2;
596
597 #if HOST_BITS_PER_PTR <= 32
598 base = &G.lookup[0];
599 #else
600 page_table table;
601 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
602 for (table = G.lookup; table; table = table->next)
603 if (table->high_bits == high_bits)
604 goto found;
605
606 /* Not found -- allocate a new table. */
607 table = xcalloc (1, sizeof(*table));
608 table->next = G.lookup;
609 table->high_bits = high_bits;
610 G.lookup = table;
611 found:
612 base = &table->table[0];
613 #endif
614
615 /* Extract the level 1 and 2 indices. */
616 L1 = LOOKUP_L1 (p);
617 L2 = LOOKUP_L2 (p);
618
619 if (base[L1] == NULL)
620 base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
621
622 base[L1][L2] = entry;
623 }
624
625 /* Prints the page-entry for object size ORDER, for debugging. */
626
627 void
628 debug_print_page_list (int order)
629 {
630 page_entry *p;
631 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
632 (void *) G.page_tails[order]);
633 p = G.pages[order];
634 while (p != NULL)
635 {
636 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
637 p->num_free_objects);
638 p = p->next;
639 }
640 printf ("NULL\n");
641 fflush (stdout);
642 }
643
644 #ifdef USING_MMAP
645 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
646 (if non-null). The ifdef structure here is intended to cause a
647 compile error unless exactly one of the HAVE_* is defined. */
648
649 static inline char *
650 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
651 {
652 #ifdef HAVE_MMAP_ANON
653 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
654 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
655 #endif
656 #ifdef HAVE_MMAP_DEV_ZERO
657 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
658 MAP_PRIVATE, G.dev_zero_fd, 0);
659 #endif
660
661 if (page == (char *) MAP_FAILED)
662 {
663 perror ("virtual memory exhausted");
664 exit (FATAL_EXIT_CODE);
665 }
666
667 /* Remember that we allocated this memory. */
668 G.bytes_mapped += size;
669
670 /* Pretend we don't have access to the allocated pages. We'll enable
671 access to smaller pieces of the area in ggc_alloc. Discard the
672 handle to avoid handle leak. */
673 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
674
675 return page;
676 }
677 #endif
678 #ifdef USING_MALLOC_PAGE_GROUPS
679 /* Compute the index for this page into the page group. */
680
681 static inline size_t
682 page_group_index (char *allocation, char *page)
683 {
684 return (size_t) (page - allocation) >> G.lg_pagesize;
685 }
686
687 /* Set and clear the in_use bit for this page in the page group. */
688
689 static inline void
690 set_page_group_in_use (page_group *group, char *page)
691 {
692 group->in_use |= 1 << page_group_index (group->allocation, page);
693 }
694
695 static inline void
696 clear_page_group_in_use (page_group *group, char *page)
697 {
698 group->in_use &= ~(1 << page_group_index (group->allocation, page));
699 }
700 #endif
701
702 /* Allocate a new page for allocating objects of size 2^ORDER,
703 and return an entry for it. The entry is not added to the
704 appropriate page_table list. */
705
706 static inline struct page_entry *
707 alloc_page (unsigned order)
708 {
709 struct page_entry *entry, *p, **pp;
710 char *page;
711 size_t num_objects;
712 size_t bitmap_size;
713 size_t page_entry_size;
714 size_t entry_size;
715 #ifdef USING_MALLOC_PAGE_GROUPS
716 page_group *group;
717 #endif
718
719 num_objects = OBJECTS_PER_PAGE (order);
720 bitmap_size = BITMAP_SIZE (num_objects + 1);
721 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
722 entry_size = num_objects * OBJECT_SIZE (order);
723 if (entry_size < G.pagesize)
724 entry_size = G.pagesize;
725
726 entry = NULL;
727 page = NULL;
728
729 /* Check the list of free pages for one we can use. */
730 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
731 if (p->bytes == entry_size)
732 break;
733
734 if (p != NULL)
735 {
736 /* Recycle the allocated memory from this page ... */
737 *pp = p->next;
738 page = p->page;
739
740 #ifdef USING_MALLOC_PAGE_GROUPS
741 group = p->group;
742 #endif
743
744 /* ... and, if possible, the page entry itself. */
745 if (p->order == order)
746 {
747 entry = p;
748 memset (entry, 0, page_entry_size);
749 }
750 else
751 free (p);
752 }
753 #ifdef USING_MMAP
754 else if (entry_size == G.pagesize)
755 {
756 /* We want just one page. Allocate a bunch of them and put the
757 extras on the freelist. (Can only do this optimization with
758 mmap for backing store.) */
759 struct page_entry *e, *f = G.free_pages;
760 int i;
761
762 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
763
764 /* This loop counts down so that the chain will be in ascending
765 memory order. */
766 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
767 {
768 e = xcalloc (1, page_entry_size);
769 e->order = order;
770 e->bytes = G.pagesize;
771 e->page = page + (i << G.lg_pagesize);
772 e->next = f;
773 f = e;
774 }
775
776 G.free_pages = f;
777 }
778 else
779 page = alloc_anon (NULL, entry_size);
780 #endif
781 #ifdef USING_MALLOC_PAGE_GROUPS
782 else
783 {
784 /* Allocate a large block of memory and serve out the aligned
785 pages therein. This results in much less memory wastage
786 than the traditional implementation of valloc. */
787
788 char *allocation, *a, *enda;
789 size_t alloc_size, head_slop, tail_slop;
790 int multiple_pages = (entry_size == G.pagesize);
791
792 if (multiple_pages)
793 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
794 else
795 alloc_size = entry_size + G.pagesize - 1;
796 allocation = xmalloc (alloc_size);
797
798 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
799 head_slop = page - allocation;
800 if (multiple_pages)
801 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
802 else
803 tail_slop = alloc_size - entry_size - head_slop;
804 enda = allocation + alloc_size - tail_slop;
805
806 /* We allocated N pages, which are likely not aligned, leaving
807 us with N-1 usable pages. We plan to place the page_group
808 structure somewhere in the slop. */
809 if (head_slop >= sizeof (page_group))
810 group = (page_group *)page - 1;
811 else
812 {
813 /* We magically got an aligned allocation. Too bad, we have
814 to waste a page anyway. */
815 if (tail_slop == 0)
816 {
817 enda -= G.pagesize;
818 tail_slop += G.pagesize;
819 }
820 if (tail_slop < sizeof (page_group))
821 abort ();
822 group = (page_group *)enda;
823 tail_slop -= sizeof (page_group);
824 }
825
826 /* Remember that we allocated this memory. */
827 group->next = G.page_groups;
828 group->allocation = allocation;
829 group->alloc_size = alloc_size;
830 group->in_use = 0;
831 G.page_groups = group;
832 G.bytes_mapped += alloc_size;
833
834 /* If we allocated multiple pages, put the rest on the free list. */
835 if (multiple_pages)
836 {
837 struct page_entry *e, *f = G.free_pages;
838 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
839 {
840 e = xcalloc (1, page_entry_size);
841 e->order = order;
842 e->bytes = G.pagesize;
843 e->page = a;
844 e->group = group;
845 e->next = f;
846 f = e;
847 }
848 G.free_pages = f;
849 }
850 }
851 #endif
852
853 if (entry == NULL)
854 entry = xcalloc (1, page_entry_size);
855
856 entry->bytes = entry_size;
857 entry->page = page;
858 entry->context_depth = G.context_depth;
859 entry->order = order;
860 entry->num_free_objects = num_objects;
861 entry->next_bit_hint = 1;
862
863 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
864
865 #ifdef USING_MALLOC_PAGE_GROUPS
866 entry->group = group;
867 set_page_group_in_use (group, page);
868 #endif
869
870 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
871 increment the hint. */
872 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
873 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
874
875 set_page_table_entry (page, entry);
876
877 if (GGC_DEBUG_LEVEL >= 2)
878 fprintf (G.debug_file,
879 "Allocating page at %p, object size=%lu, data %p-%p\n",
880 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
881 page + entry_size - 1);
882
883 return entry;
884 }
885
886 /* Adjust the size of G.depth so that no index greater than the one
887 used by the top of the G.by_depth is used. */
888
889 static inline void
890 adjust_depth (void)
891 {
892 page_entry *top;
893
894 if (G.by_depth_in_use)
895 {
896 top = G.by_depth[G.by_depth_in_use-1];
897
898 /* Peel back indices in depth that index into by_depth, so that
899 as new elements are added to by_depth, we note the indices
900 of those elements, if they are for new context depths. */
901 while (G.depth_in_use > (size_t)top->context_depth+1)
902 --G.depth_in_use;
903 }
904 }
905
906 /* For a page that is no longer needed, put it on the free page list. */
907
908 static void
909 free_page (page_entry *entry)
910 {
911 if (GGC_DEBUG_LEVEL >= 2)
912 fprintf (G.debug_file,
913 "Deallocating page at %p, data %p-%p\n", (void *) entry,
914 entry->page, entry->page + entry->bytes - 1);
915
916 /* Mark the page as inaccessible. Discard the handle to avoid handle
917 leak. */
918 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
919
920 set_page_table_entry (entry->page, NULL);
921
922 #ifdef USING_MALLOC_PAGE_GROUPS
923 clear_page_group_in_use (entry->group, entry->page);
924 #endif
925
926 if (G.by_depth_in_use > 1)
927 {
928 page_entry *top = G.by_depth[G.by_depth_in_use-1];
929
930 /* If they are at the same depth, put top element into freed
931 slot. */
932 if (entry->context_depth == top->context_depth)
933 {
934 int i = entry->index_by_depth;
935 G.by_depth[i] = top;
936 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
937 top->index_by_depth = i;
938 }
939 else
940 {
941 /* We cannot free a page from a context deeper than the
942 current one. */
943 abort ();
944 }
945 }
946 --G.by_depth_in_use;
947
948 adjust_depth ();
949
950 entry->next = G.free_pages;
951 G.free_pages = entry;
952 }
953
954 /* Release the free page cache to the system. */
955
956 static void
957 release_pages (void)
958 {
959 #ifdef USING_MMAP
960 page_entry *p, *next;
961 char *start;
962 size_t len;
963
964 /* Gather up adjacent pages so they are unmapped together. */
965 p = G.free_pages;
966
967 while (p)
968 {
969 start = p->page;
970 next = p->next;
971 len = p->bytes;
972 free (p);
973 p = next;
974
975 while (p && p->page == start + len)
976 {
977 next = p->next;
978 len += p->bytes;
979 free (p);
980 p = next;
981 }
982
983 munmap (start, len);
984 G.bytes_mapped -= len;
985 }
986
987 G.free_pages = NULL;
988 #endif
989 #ifdef USING_MALLOC_PAGE_GROUPS
990 page_entry **pp, *p;
991 page_group **gp, *g;
992
993 /* Remove all pages from free page groups from the list. */
994 pp = &G.free_pages;
995 while ((p = *pp) != NULL)
996 if (p->group->in_use == 0)
997 {
998 *pp = p->next;
999 free (p);
1000 }
1001 else
1002 pp = &p->next;
1003
1004 /* Remove all free page groups, and release the storage. */
1005 gp = &G.page_groups;
1006 while ((g = *gp) != NULL)
1007 if (g->in_use == 0)
1008 {
1009 *gp = g->next;
1010 G.bytes_mapped -= g->alloc_size;
1011 free (g->allocation);
1012 }
1013 else
1014 gp = &g->next;
1015 #endif
1016 }
1017
1018 /* This table provides a fast way to determine ceil(log_2(size)) for
1019 allocation requests. The minimum allocation size is eight bytes. */
1020
1021 static unsigned char size_lookup[257] =
1022 {
1023 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1024 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1025 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1026 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1027 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1028 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1029 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1030 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1031 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1032 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1033 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1034 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1035 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1036 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1037 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1038 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1039 8
1040 };
1041
1042 /* Typed allocation function. Does nothing special in this collector. */
1043
1044 void *
1045 ggc_alloc_typed (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size)
1046 {
1047 return ggc_alloc (size);
1048 }
1049
1050 /* Zone allocation function. Does nothing special in this collector. */
1051
1052 void *
1053 ggc_alloc_zone (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED)
1054 {
1055 return ggc_alloc (size);
1056 }
1057
1058 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1059
1060 void *
1061 ggc_alloc (size_t size)
1062 {
1063 size_t order, word, bit, object_offset, object_size;
1064 struct page_entry *entry;
1065 void *result;
1066
1067 if (size <= 256)
1068 {
1069 order = size_lookup[size];
1070 object_size = OBJECT_SIZE (order);
1071 }
1072 else
1073 {
1074 order = 9;
1075 while (size > (object_size = OBJECT_SIZE (order)))
1076 order++;
1077 }
1078
1079 /* If there are non-full pages for this size allocation, they are at
1080 the head of the list. */
1081 entry = G.pages[order];
1082
1083 /* If there is no page for this object size, or all pages in this
1084 context are full, allocate a new page. */
1085 if (entry == NULL || entry->num_free_objects == 0)
1086 {
1087 struct page_entry *new_entry;
1088 new_entry = alloc_page (order);
1089
1090 new_entry->index_by_depth = G.by_depth_in_use;
1091 push_by_depth (new_entry, 0);
1092
1093 /* We can skip context depths, if we do, make sure we go all the
1094 way to the new depth. */
1095 while (new_entry->context_depth >= G.depth_in_use)
1096 push_depth (G.by_depth_in_use-1);
1097
1098 /* If this is the only entry, it's also the tail. */
1099 if (entry == NULL)
1100 G.page_tails[order] = new_entry;
1101
1102 /* Put new pages at the head of the page list. */
1103 new_entry->next = entry;
1104 entry = new_entry;
1105 G.pages[order] = new_entry;
1106
1107 /* For a new page, we know the word and bit positions (in the
1108 in_use bitmap) of the first available object -- they're zero. */
1109 new_entry->next_bit_hint = 1;
1110 word = 0;
1111 bit = 0;
1112 object_offset = 0;
1113 }
1114 else
1115 {
1116 /* First try to use the hint left from the previous allocation
1117 to locate a clear bit in the in-use bitmap. We've made sure
1118 that the one-past-the-end bit is always set, so if the hint
1119 has run over, this test will fail. */
1120 unsigned hint = entry->next_bit_hint;
1121 word = hint / HOST_BITS_PER_LONG;
1122 bit = hint % HOST_BITS_PER_LONG;
1123
1124 /* If the hint didn't work, scan the bitmap from the beginning. */
1125 if ((entry->in_use_p[word] >> bit) & 1)
1126 {
1127 word = bit = 0;
1128 while (~entry->in_use_p[word] == 0)
1129 ++word;
1130 while ((entry->in_use_p[word] >> bit) & 1)
1131 ++bit;
1132 hint = word * HOST_BITS_PER_LONG + bit;
1133 }
1134
1135 /* Next time, try the next bit. */
1136 entry->next_bit_hint = hint + 1;
1137
1138 object_offset = hint * object_size;
1139 }
1140
1141 /* Set the in-use bit. */
1142 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1143
1144 /* Keep a running total of the number of free objects. If this page
1145 fills up, we may have to move it to the end of the list if the
1146 next page isn't full. If the next page is full, all subsequent
1147 pages are full, so there's no need to move it. */
1148 if (--entry->num_free_objects == 0
1149 && entry->next != NULL
1150 && entry->next->num_free_objects > 0)
1151 {
1152 G.pages[order] = entry->next;
1153 entry->next = NULL;
1154 G.page_tails[order]->next = entry;
1155 G.page_tails[order] = entry;
1156 }
1157
1158 /* Calculate the object's address. */
1159 result = entry->page + object_offset;
1160
1161 #ifdef ENABLE_GC_CHECKING
1162 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1163 exact same semantics in presence of memory bugs, regardless of
1164 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1165 handle to avoid handle leak. */
1166 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
1167
1168 /* `Poison' the entire allocated object, including any padding at
1169 the end. */
1170 memset (result, 0xaf, object_size);
1171
1172 /* Make the bytes after the end of the object unaccessible. Discard the
1173 handle to avoid handle leak. */
1174 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1175 object_size - size));
1176 #endif
1177
1178 /* Tell Valgrind that the memory is there, but its content isn't
1179 defined. The bytes at the end of the object are still marked
1180 unaccessible. */
1181 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1182
1183 /* Keep track of how many bytes are being allocated. This
1184 information is used in deciding when to collect. */
1185 G.allocated += object_size;
1186
1187 #ifdef GATHER_STATISTICS
1188 {
1189 size_t overhead = object_size - size;
1190
1191 G.stats.total_overhead += overhead;
1192 G.stats.total_allocated += object_size;
1193 G.stats.total_overhead_per_order[order] += overhead;
1194 G.stats.total_allocated_per_order[order] += object_size;
1195
1196 if (size <= 32)
1197 {
1198 G.stats.total_overhead_under32 += overhead;
1199 G.stats.total_allocated_under32 += object_size;
1200 }
1201 if (size <= 64)
1202 {
1203 G.stats.total_overhead_under64 += overhead;
1204 G.stats.total_allocated_under64 += object_size;
1205 }
1206 if (size <= 128)
1207 {
1208 G.stats.total_overhead_under128 += overhead;
1209 G.stats.total_allocated_under128 += object_size;
1210 }
1211 }
1212 #endif
1213
1214 if (GGC_DEBUG_LEVEL >= 3)
1215 fprintf (G.debug_file,
1216 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1217 (unsigned long) size, (unsigned long) object_size, result,
1218 (void *) entry);
1219
1220 return result;
1221 }
1222
1223 /* If P is not marked, marks it and return false. Otherwise return true.
1224 P must have been allocated by the GC allocator; it mustn't point to
1225 static objects, stack variables, or memory allocated with malloc. */
1226
1227 int
1228 ggc_set_mark (const void *p)
1229 {
1230 page_entry *entry;
1231 unsigned bit, word;
1232 unsigned long mask;
1233
1234 /* Look up the page on which the object is alloced. If the object
1235 wasn't allocated by the collector, we'll probably die. */
1236 entry = lookup_page_table_entry (p);
1237 #ifdef ENABLE_CHECKING
1238 if (entry == NULL)
1239 abort ();
1240 #endif
1241
1242 /* Calculate the index of the object on the page; this is its bit
1243 position in the in_use_p bitmap. */
1244 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1245 word = bit / HOST_BITS_PER_LONG;
1246 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1247
1248 /* If the bit was previously set, skip it. */
1249 if (entry->in_use_p[word] & mask)
1250 return 1;
1251
1252 /* Otherwise set it, and decrement the free object count. */
1253 entry->in_use_p[word] |= mask;
1254 entry->num_free_objects -= 1;
1255
1256 if (GGC_DEBUG_LEVEL >= 4)
1257 fprintf (G.debug_file, "Marking %p\n", p);
1258
1259 return 0;
1260 }
1261
1262 /* Return 1 if P has been marked, zero otherwise.
1263 P must have been allocated by the GC allocator; it mustn't point to
1264 static objects, stack variables, or memory allocated with malloc. */
1265
1266 int
1267 ggc_marked_p (const void *p)
1268 {
1269 page_entry *entry;
1270 unsigned bit, word;
1271 unsigned long mask;
1272
1273 /* Look up the page on which the object is alloced. If the object
1274 wasn't allocated by the collector, we'll probably die. */
1275 entry = lookup_page_table_entry (p);
1276 #ifdef ENABLE_CHECKING
1277 if (entry == NULL)
1278 abort ();
1279 #endif
1280
1281 /* Calculate the index of the object on the page; this is its bit
1282 position in the in_use_p bitmap. */
1283 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1284 word = bit / HOST_BITS_PER_LONG;
1285 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1286
1287 return (entry->in_use_p[word] & mask) != 0;
1288 }
1289
1290 /* Return the size of the gc-able object P. */
1291
1292 size_t
1293 ggc_get_size (const void *p)
1294 {
1295 page_entry *pe = lookup_page_table_entry (p);
1296 return OBJECT_SIZE (pe->order);
1297 }
1298
1299 /* Release the memory for object P. */
1300
1301 void
1302 ggc_free (void *p)
1303 {
1304 page_entry *pe = lookup_page_table_entry (p);
1305 size_t order = pe->order;
1306 size_t size = OBJECT_SIZE (order);
1307
1308 if (GGC_DEBUG_LEVEL >= 3)
1309 fprintf (G.debug_file,
1310 "Freeing object, actual size=%lu, at %p on %p\n",
1311 (unsigned long) size, p, (void *) pe);
1312
1313 #ifdef ENABLE_GC_CHECKING
1314 /* Poison the data, to indicate the data is garbage. */
1315 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size));
1316 memset (p, 0xa5, size);
1317 #endif
1318 /* Let valgrind know the object is free. */
1319 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size));
1320
1321 #ifdef ENABLE_GC_ALWAYS_COLLECT
1322 /* In the completely-anal-checking mode, we do *not* immediately free
1323 the data, but instead verify that the data is *actually* not
1324 reachable the next time we collect. */
1325 {
1326 struct free_object *fo = xmalloc (sizeof (struct free_object));
1327 fo->object = p;
1328 fo->next = G.free_object_list;
1329 G.free_object_list = fo;
1330 }
1331 #else
1332 {
1333 unsigned int bit_offset, word, bit;
1334
1335 G.allocated -= size;
1336
1337 /* Mark the object not-in-use. */
1338 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1339 word = bit_offset / HOST_BITS_PER_LONG;
1340 bit = bit_offset % HOST_BITS_PER_LONG;
1341 pe->in_use_p[word] &= ~(1UL << bit);
1342
1343 if (pe->num_free_objects++ == 0)
1344 {
1345 /* If the page is completely full, then it's supposed to
1346 be after all pages that aren't. Since we've freed one
1347 object from a page that was full, we need to move the
1348 page to the head of the list. */
1349
1350 page_entry *p, *q;
1351 for (q = NULL, p = G.pages[order]; ; q = p, p = p->next)
1352 if (p == pe)
1353 break;
1354 if (q && q->num_free_objects == 0)
1355 {
1356 p = pe->next;
1357 q->next = p;
1358 if (!p)
1359 G.page_tails[order] = q;
1360 pe->next = G.pages[order];
1361 G.pages[order] = pe;
1362 }
1363
1364 /* Reset the hint bit to point to the only free object. */
1365 pe->next_bit_hint = bit_offset;
1366 }
1367 }
1368 #endif
1369 }
1370 \f
1371 /* Subroutine of init_ggc which computes the pair of numbers used to
1372 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1373
1374 This algorithm is taken from Granlund and Montgomery's paper
1375 "Division by Invariant Integers using Multiplication"
1376 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1377 constants). */
1378
1379 static void
1380 compute_inverse (unsigned order)
1381 {
1382 size_t size, inv;
1383 unsigned int e;
1384
1385 size = OBJECT_SIZE (order);
1386 e = 0;
1387 while (size % 2 == 0)
1388 {
1389 e++;
1390 size >>= 1;
1391 }
1392
1393 inv = size;
1394 while (inv * size != 1)
1395 inv = inv * (2 - inv*size);
1396
1397 DIV_MULT (order) = inv;
1398 DIV_SHIFT (order) = e;
1399 }
1400
1401 /* Initialize the ggc-mmap allocator. */
1402 void
1403 init_ggc (void)
1404 {
1405 unsigned order;
1406
1407 G.pagesize = getpagesize();
1408 G.lg_pagesize = exact_log2 (G.pagesize);
1409
1410 #ifdef HAVE_MMAP_DEV_ZERO
1411 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1412 if (G.dev_zero_fd == -1)
1413 internal_error ("open /dev/zero: %m");
1414 #endif
1415
1416 #if 0
1417 G.debug_file = fopen ("ggc-mmap.debug", "w");
1418 #else
1419 G.debug_file = stdout;
1420 #endif
1421
1422 #ifdef USING_MMAP
1423 /* StunOS has an amazing off-by-one error for the first mmap allocation
1424 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1425 believe, is an unaligned page allocation, which would cause us to
1426 hork badly if we tried to use it. */
1427 {
1428 char *p = alloc_anon (NULL, G.pagesize);
1429 struct page_entry *e;
1430 if ((size_t)p & (G.pagesize - 1))
1431 {
1432 /* How losing. Discard this one and try another. If we still
1433 can't get something useful, give up. */
1434
1435 p = alloc_anon (NULL, G.pagesize);
1436 if ((size_t)p & (G.pagesize - 1))
1437 abort ();
1438 }
1439
1440 /* We have a good page, might as well hold onto it... */
1441 e = xcalloc (1, sizeof (struct page_entry));
1442 e->bytes = G.pagesize;
1443 e->page = p;
1444 e->next = G.free_pages;
1445 G.free_pages = e;
1446 }
1447 #endif
1448
1449 /* Initialize the object size table. */
1450 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1451 object_size_table[order] = (size_t) 1 << order;
1452 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1453 {
1454 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1455
1456 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1457 so that we're sure of getting aligned memory. */
1458 s = ROUND_UP (s, MAX_ALIGNMENT);
1459 object_size_table[order] = s;
1460 }
1461
1462 /* Initialize the objects-per-page and inverse tables. */
1463 for (order = 0; order < NUM_ORDERS; ++order)
1464 {
1465 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1466 if (objects_per_page_table[order] == 0)
1467 objects_per_page_table[order] = 1;
1468 compute_inverse (order);
1469 }
1470
1471 /* Reset the size_lookup array to put appropriately sized objects in
1472 the special orders. All objects bigger than the previous power
1473 of two, but no greater than the special size, should go in the
1474 new order. */
1475 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1476 {
1477 int o;
1478 int i;
1479
1480 o = size_lookup[OBJECT_SIZE (order)];
1481 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1482 size_lookup[i] = order;
1483 }
1484
1485 G.depth_in_use = 0;
1486 G.depth_max = 10;
1487 G.depth = xmalloc (G.depth_max * sizeof (unsigned int));
1488
1489 G.by_depth_in_use = 0;
1490 G.by_depth_max = INITIAL_PTE_COUNT;
1491 G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
1492 G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
1493 }
1494
1495 /* Start a new GGC zone. */
1496
1497 struct alloc_zone *
1498 new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
1499 {
1500 return NULL;
1501 }
1502
1503 /* Destroy a GGC zone. */
1504 void
1505 destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
1506 {
1507 }
1508
1509 /* Increment the `GC context'. Objects allocated in an outer context
1510 are never freed, eliminating the need to register their roots. */
1511
1512 void
1513 ggc_push_context (void)
1514 {
1515 ++G.context_depth;
1516
1517 /* Die on wrap. */
1518 if (G.context_depth >= HOST_BITS_PER_LONG)
1519 abort ();
1520 }
1521
1522 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1523 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1524
1525 static void
1526 ggc_recalculate_in_use_p (page_entry *p)
1527 {
1528 unsigned int i;
1529 size_t num_objects;
1530
1531 /* Because the past-the-end bit in in_use_p is always set, we
1532 pretend there is one additional object. */
1533 num_objects = OBJECTS_IN_PAGE (p) + 1;
1534
1535 /* Reset the free object count. */
1536 p->num_free_objects = num_objects;
1537
1538 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1539 for (i = 0;
1540 i < CEIL (BITMAP_SIZE (num_objects),
1541 sizeof (*p->in_use_p));
1542 ++i)
1543 {
1544 unsigned long j;
1545
1546 /* Something is in use if it is marked, or if it was in use in a
1547 context further down the context stack. */
1548 p->in_use_p[i] |= save_in_use_p (p)[i];
1549
1550 /* Decrement the free object count for every object allocated. */
1551 for (j = p->in_use_p[i]; j; j >>= 1)
1552 p->num_free_objects -= (j & 1);
1553 }
1554
1555 if (p->num_free_objects >= num_objects)
1556 abort ();
1557 }
1558
1559 /* Decrement the `GC context'. All objects allocated since the
1560 previous ggc_push_context are migrated to the outer context. */
1561
1562 void
1563 ggc_pop_context (void)
1564 {
1565 unsigned long omask;
1566 unsigned int depth, i, e;
1567 #ifdef ENABLE_CHECKING
1568 unsigned int order;
1569 #endif
1570
1571 depth = --G.context_depth;
1572 omask = (unsigned long)1 << (depth + 1);
1573
1574 if (!((G.context_depth_allocations | G.context_depth_collections) & omask))
1575 return;
1576
1577 G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1;
1578 G.context_depth_allocations &= omask - 1;
1579 G.context_depth_collections &= omask - 1;
1580
1581 /* The G.depth array is shortened so that the last index is the
1582 context_depth of the top element of by_depth. */
1583 if (depth+1 < G.depth_in_use)
1584 e = G.depth[depth+1];
1585 else
1586 e = G.by_depth_in_use;
1587
1588 /* We might not have any PTEs of depth depth. */
1589 if (depth < G.depth_in_use)
1590 {
1591
1592 /* First we go through all the pages at depth depth to
1593 recalculate the in use bits. */
1594 for (i = G.depth[depth]; i < e; ++i)
1595 {
1596 page_entry *p;
1597
1598 #ifdef ENABLE_CHECKING
1599 p = G.by_depth[i];
1600
1601 /* Check that all of the pages really are at the depth that
1602 we expect. */
1603 if (p->context_depth != depth)
1604 abort ();
1605 if (p->index_by_depth != i)
1606 abort ();
1607 #endif
1608
1609 prefetch (&save_in_use_p_i (i+8));
1610 prefetch (&save_in_use_p_i (i+16));
1611 if (save_in_use_p_i (i))
1612 {
1613 p = G.by_depth[i];
1614 ggc_recalculate_in_use_p (p);
1615 free (save_in_use_p_i (i));
1616 save_in_use_p_i (i) = 0;
1617 }
1618 }
1619 }
1620
1621 /* Then, we reset all page_entries with a depth greater than depth
1622 to be at depth. */
1623 for (i = e; i < G.by_depth_in_use; ++i)
1624 {
1625 page_entry *p = G.by_depth[i];
1626
1627 /* Check that all of the pages really are at the depth we
1628 expect. */
1629 #ifdef ENABLE_CHECKING
1630 if (p->context_depth <= depth)
1631 abort ();
1632 if (p->index_by_depth != i)
1633 abort ();
1634 #endif
1635 p->context_depth = depth;
1636 }
1637
1638 adjust_depth ();
1639
1640 #ifdef ENABLE_CHECKING
1641 for (order = 2; order < NUM_ORDERS; order++)
1642 {
1643 page_entry *p;
1644
1645 for (p = G.pages[order]; p != NULL; p = p->next)
1646 {
1647 if (p->context_depth > depth)
1648 abort ();
1649 else if (p->context_depth == depth && save_in_use_p (p))
1650 abort ();
1651 }
1652 }
1653 #endif
1654 }
1655 \f
1656 /* Unmark all objects. */
1657
1658 static inline void
1659 clear_marks (void)
1660 {
1661 unsigned order;
1662
1663 for (order = 2; order < NUM_ORDERS; order++)
1664 {
1665 page_entry *p;
1666
1667 for (p = G.pages[order]; p != NULL; p = p->next)
1668 {
1669 size_t num_objects = OBJECTS_IN_PAGE (p);
1670 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1671
1672 #ifdef ENABLE_CHECKING
1673 /* The data should be page-aligned. */
1674 if ((size_t) p->page & (G.pagesize - 1))
1675 abort ();
1676 #endif
1677
1678 /* Pages that aren't in the topmost context are not collected;
1679 nevertheless, we need their in-use bit vectors to store GC
1680 marks. So, back them up first. */
1681 if (p->context_depth < G.context_depth)
1682 {
1683 if (! save_in_use_p (p))
1684 save_in_use_p (p) = xmalloc (bitmap_size);
1685 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1686 }
1687
1688 /* Reset reset the number of free objects and clear the
1689 in-use bits. These will be adjusted by mark_obj. */
1690 p->num_free_objects = num_objects;
1691 memset (p->in_use_p, 0, bitmap_size);
1692
1693 /* Make sure the one-past-the-end bit is always set. */
1694 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1695 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1696 }
1697 }
1698 }
1699
1700 /* Free all empty pages. Partially empty pages need no attention
1701 because the `mark' bit doubles as an `unused' bit. */
1702
1703 static inline void
1704 sweep_pages (void)
1705 {
1706 unsigned order;
1707
1708 for (order = 2; order < NUM_ORDERS; order++)
1709 {
1710 /* The last page-entry to consider, regardless of entries
1711 placed at the end of the list. */
1712 page_entry * const last = G.page_tails[order];
1713
1714 size_t num_objects;
1715 size_t live_objects;
1716 page_entry *p, *previous;
1717 int done;
1718
1719 p = G.pages[order];
1720 if (p == NULL)
1721 continue;
1722
1723 previous = NULL;
1724 do
1725 {
1726 page_entry *next = p->next;
1727
1728 /* Loop until all entries have been examined. */
1729 done = (p == last);
1730
1731 num_objects = OBJECTS_IN_PAGE (p);
1732
1733 /* Add all live objects on this page to the count of
1734 allocated memory. */
1735 live_objects = num_objects - p->num_free_objects;
1736
1737 G.allocated += OBJECT_SIZE (order) * live_objects;
1738
1739 /* Only objects on pages in the topmost context should get
1740 collected. */
1741 if (p->context_depth < G.context_depth)
1742 ;
1743
1744 /* Remove the page if it's empty. */
1745 else if (live_objects == 0)
1746 {
1747 if (! previous)
1748 G.pages[order] = next;
1749 else
1750 previous->next = next;
1751
1752 /* Are we removing the last element? */
1753 if (p == G.page_tails[order])
1754 G.page_tails[order] = previous;
1755 free_page (p);
1756 p = previous;
1757 }
1758
1759 /* If the page is full, move it to the end. */
1760 else if (p->num_free_objects == 0)
1761 {
1762 /* Don't move it if it's already at the end. */
1763 if (p != G.page_tails[order])
1764 {
1765 /* Move p to the end of the list. */
1766 p->next = NULL;
1767 G.page_tails[order]->next = p;
1768
1769 /* Update the tail pointer... */
1770 G.page_tails[order] = p;
1771
1772 /* ... and the head pointer, if necessary. */
1773 if (! previous)
1774 G.pages[order] = next;
1775 else
1776 previous->next = next;
1777 p = previous;
1778 }
1779 }
1780
1781 /* If we've fallen through to here, it's a page in the
1782 topmost context that is neither full nor empty. Such a
1783 page must precede pages at lesser context depth in the
1784 list, so move it to the head. */
1785 else if (p != G.pages[order])
1786 {
1787 previous->next = p->next;
1788 p->next = G.pages[order];
1789 G.pages[order] = p;
1790 /* Are we moving the last element? */
1791 if (G.page_tails[order] == p)
1792 G.page_tails[order] = previous;
1793 p = previous;
1794 }
1795
1796 previous = p;
1797 p = next;
1798 }
1799 while (! done);
1800
1801 /* Now, restore the in_use_p vectors for any pages from contexts
1802 other than the current one. */
1803 for (p = G.pages[order]; p; p = p->next)
1804 if (p->context_depth != G.context_depth)
1805 ggc_recalculate_in_use_p (p);
1806 }
1807 }
1808
1809 #ifdef ENABLE_GC_CHECKING
1810 /* Clobber all free objects. */
1811
1812 static inline void
1813 poison_pages (void)
1814 {
1815 unsigned order;
1816
1817 for (order = 2; order < NUM_ORDERS; order++)
1818 {
1819 size_t size = OBJECT_SIZE (order);
1820 page_entry *p;
1821
1822 for (p = G.pages[order]; p != NULL; p = p->next)
1823 {
1824 size_t num_objects;
1825 size_t i;
1826
1827 if (p->context_depth != G.context_depth)
1828 /* Since we don't do any collection for pages in pushed
1829 contexts, there's no need to do any poisoning. And
1830 besides, the IN_USE_P array isn't valid until we pop
1831 contexts. */
1832 continue;
1833
1834 num_objects = OBJECTS_IN_PAGE (p);
1835 for (i = 0; i < num_objects; i++)
1836 {
1837 size_t word, bit;
1838 word = i / HOST_BITS_PER_LONG;
1839 bit = i % HOST_BITS_PER_LONG;
1840 if (((p->in_use_p[word] >> bit) & 1) == 0)
1841 {
1842 char *object = p->page + i * size;
1843
1844 /* Keep poison-by-write when we expect to use Valgrind,
1845 so the exact same memory semantics is kept, in case
1846 there are memory errors. We override this request
1847 below. */
1848 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1849 memset (object, 0xa5, size);
1850
1851 /* Drop the handle to avoid handle leak. */
1852 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1853 }
1854 }
1855 }
1856 }
1857 }
1858 #endif
1859
1860 /* Top level mark-and-sweep routine. */
1861
1862 void
1863 ggc_collect (void)
1864 {
1865 /* Avoid frequent unnecessary work by skipping collection if the
1866 total allocations haven't expanded much since the last
1867 collection. */
1868 float allocated_last_gc =
1869 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1870
1871 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1872
1873 if (G.allocated < allocated_last_gc + min_expand)
1874 return;
1875
1876 timevar_push (TV_GC);
1877 if (!quiet_flag)
1878 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1879 if (GGC_DEBUG_LEVEL >= 2)
1880 fprintf (G.debug_file, "BEGIN COLLECTING\n");
1881
1882 /* Zero the total allocated bytes. This will be recalculated in the
1883 sweep phase. */
1884 G.allocated = 0;
1885
1886 /* Release the pages we freed the last time we collected, but didn't
1887 reuse in the interim. */
1888 release_pages ();
1889
1890 /* Indicate that we've seen collections at this context depth. */
1891 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1892
1893 clear_marks ();
1894 ggc_mark_roots ();
1895
1896 #ifdef ENABLE_GC_CHECKING
1897 poison_pages ();
1898 #endif
1899
1900 sweep_pages ();
1901
1902 #ifdef ENABLE_GC_ALWAYS_COLLECT
1903 /* Validate that the reportedly free objects actually are. */
1904 {
1905 struct free_object *f, *n;
1906 for (f = G.free_object_list; f ; f = n)
1907 {
1908 page_entry *pe = lookup_page_table_entry (f->object);
1909
1910 /* If the page entry is null, that means the entire page is free.
1911 Otherwise, we have to examine the in-use bit for the object. */
1912 if (pe != NULL)
1913 {
1914 size_t bit, word;
1915 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1916 word = bit / HOST_BITS_PER_LONG;
1917 bit = bit % HOST_BITS_PER_LONG;
1918
1919 if (pe->in_use_p[word] & (1UL << bit))
1920 abort ();
1921 }
1922
1923 n = f->next;
1924 free (f);
1925 }
1926 G.free_object_list = NULL;
1927 }
1928 #endif
1929
1930 G.allocated_last_gc = G.allocated;
1931
1932 timevar_pop (TV_GC);
1933
1934 if (!quiet_flag)
1935 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1936 if (GGC_DEBUG_LEVEL >= 2)
1937 fprintf (G.debug_file, "END COLLECTING\n");
1938 }
1939
1940 /* Print allocation statistics. */
1941 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1942 ? (x) \
1943 : ((x) < 1024*1024*10 \
1944 ? (x) / 1024 \
1945 : (x) / (1024*1024))))
1946 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1947
1948 void
1949 ggc_print_statistics (void)
1950 {
1951 struct ggc_statistics stats;
1952 unsigned int i;
1953 size_t total_overhead = 0;
1954
1955 /* Clear the statistics. */
1956 memset (&stats, 0, sizeof (stats));
1957
1958 /* Make sure collection will really occur. */
1959 G.allocated_last_gc = 0;
1960
1961 /* Collect and print the statistics common across collectors. */
1962 ggc_print_common_statistics (stderr, &stats);
1963
1964 /* Release free pages so that we will not count the bytes allocated
1965 there as part of the total allocated memory. */
1966 release_pages ();
1967
1968 /* Collect some information about the various sizes of
1969 allocation. */
1970 fprintf (stderr,
1971 "Memory still allocated at the end of the compilation process\n");
1972 fprintf (stderr, "%-5s %10s %10s %10s\n",
1973 "Size", "Allocated", "Used", "Overhead");
1974 for (i = 0; i < NUM_ORDERS; ++i)
1975 {
1976 page_entry *p;
1977 size_t allocated;
1978 size_t in_use;
1979 size_t overhead;
1980
1981 /* Skip empty entries. */
1982 if (!G.pages[i])
1983 continue;
1984
1985 overhead = allocated = in_use = 0;
1986
1987 /* Figure out the total number of bytes allocated for objects of
1988 this size, and how many of them are actually in use. Also figure
1989 out how much memory the page table is using. */
1990 for (p = G.pages[i]; p; p = p->next)
1991 {
1992 allocated += p->bytes;
1993 in_use +=
1994 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
1995
1996 overhead += (sizeof (page_entry) - sizeof (long)
1997 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
1998 }
1999 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2000 (unsigned long) OBJECT_SIZE (i),
2001 SCALE (allocated), LABEL (allocated),
2002 SCALE (in_use), LABEL (in_use),
2003 SCALE (overhead), LABEL (overhead));
2004 total_overhead += overhead;
2005 }
2006 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2007 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
2008 SCALE (G.allocated), LABEL(G.allocated),
2009 SCALE (total_overhead), LABEL (total_overhead));
2010
2011 #ifdef GATHER_STATISTICS
2012 {
2013 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2014
2015 fprintf (stderr, "Total Overhead: %10lld\n",
2016 G.stats.total_overhead);
2017 fprintf (stderr, "Total Allocated: %10lld\n",
2018 G.stats.total_allocated);
2019
2020 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2021 G.stats.total_overhead_under32);
2022 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2023 G.stats.total_allocated_under32);
2024 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2025 G.stats.total_overhead_under64);
2026 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2027 G.stats.total_allocated_under64);
2028 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2029 G.stats.total_overhead_under128);
2030 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2031 G.stats.total_allocated_under128);
2032
2033 for (i = 0; i < NUM_ORDERS; i++)
2034 if (G.stats.total_allocated_per_order[i])
2035 {
2036 fprintf (stderr, "Total Overhead page size %7d: %10lld\n",
2037 OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
2038 fprintf (stderr, "Total Allocated page size %7d: %10lld\n",
2039 OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]);
2040 }
2041 }
2042 #endif
2043 }
2044 \f
2045 struct ggc_pch_data
2046 {
2047 struct ggc_pch_ondisk
2048 {
2049 unsigned totals[NUM_ORDERS];
2050 } d;
2051 size_t base[NUM_ORDERS];
2052 size_t written[NUM_ORDERS];
2053 };
2054
2055 struct ggc_pch_data *
2056 init_ggc_pch (void)
2057 {
2058 return xcalloc (sizeof (struct ggc_pch_data), 1);
2059 }
2060
2061 void
2062 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2063 size_t size, bool is_string ATTRIBUTE_UNUSED)
2064 {
2065 unsigned order;
2066
2067 if (size <= 256)
2068 order = size_lookup[size];
2069 else
2070 {
2071 order = 9;
2072 while (size > OBJECT_SIZE (order))
2073 order++;
2074 }
2075
2076 d->d.totals[order]++;
2077 }
2078
2079 size_t
2080 ggc_pch_total_size (struct ggc_pch_data *d)
2081 {
2082 size_t a = 0;
2083 unsigned i;
2084
2085 for (i = 0; i < NUM_ORDERS; i++)
2086 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2087 return a;
2088 }
2089
2090 void
2091 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2092 {
2093 size_t a = (size_t) base;
2094 unsigned i;
2095
2096 for (i = 0; i < NUM_ORDERS; i++)
2097 {
2098 d->base[i] = a;
2099 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2100 }
2101 }
2102
2103
2104 char *
2105 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2106 size_t size, bool is_string ATTRIBUTE_UNUSED)
2107 {
2108 unsigned order;
2109 char *result;
2110
2111 if (size <= 256)
2112 order = size_lookup[size];
2113 else
2114 {
2115 order = 9;
2116 while (size > OBJECT_SIZE (order))
2117 order++;
2118 }
2119
2120 result = (char *) d->base[order];
2121 d->base[order] += OBJECT_SIZE (order);
2122 return result;
2123 }
2124
2125 void
2126 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2127 FILE *f ATTRIBUTE_UNUSED)
2128 {
2129 /* Nothing to do. */
2130 }
2131
2132 void
2133 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2134 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2135 size_t size, bool is_string ATTRIBUTE_UNUSED)
2136 {
2137 unsigned order;
2138 static const char emptyBytes[256];
2139
2140 if (size <= 256)
2141 order = size_lookup[size];
2142 else
2143 {
2144 order = 9;
2145 while (size > OBJECT_SIZE (order))
2146 order++;
2147 }
2148
2149 if (fwrite (x, size, 1, f) != 1)
2150 fatal_error ("can't write PCH file: %m");
2151
2152 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2153 object out to OBJECT_SIZE(order). This happens for strings. */
2154
2155 if (size != OBJECT_SIZE (order))
2156 {
2157 unsigned padding = OBJECT_SIZE(order) - size;
2158
2159 /* To speed small writes, we use a nulled-out array that's larger
2160 than most padding requests as the source for our null bytes. This
2161 permits us to do the padding with fwrite() rather than fseek(), and
2162 limits the chance the the OS may try to flush any outstanding
2163 writes. */
2164 if (padding <= sizeof(emptyBytes))
2165 {
2166 if (fwrite (emptyBytes, 1, padding, f) != padding)
2167 fatal_error ("can't write PCH file");
2168 }
2169 else
2170 {
2171 /* Larger than our buffer? Just default to fseek. */
2172 if (fseek (f, padding, SEEK_CUR) != 0)
2173 fatal_error ("can't write PCH file");
2174 }
2175 }
2176
2177 d->written[order]++;
2178 if (d->written[order] == d->d.totals[order]
2179 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2180 G.pagesize),
2181 SEEK_CUR) != 0)
2182 fatal_error ("can't write PCH file: %m");
2183 }
2184
2185 void
2186 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2187 {
2188 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2189 fatal_error ("can't write PCH file: %m");
2190 free (d);
2191 }
2192
2193 /* Move the PCH PTE entries just added to the end of by_depth, to the
2194 front. */
2195
2196 static void
2197 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2198 {
2199 unsigned i;
2200
2201 /* First, we swap the new entries to the front of the varrays. */
2202 page_entry **new_by_depth;
2203 unsigned long **new_save_in_use;
2204
2205 new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
2206 new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
2207
2208 memcpy (&new_by_depth[0],
2209 &G.by_depth[count_old_page_tables],
2210 count_new_page_tables * sizeof (void *));
2211 memcpy (&new_by_depth[count_new_page_tables],
2212 &G.by_depth[0],
2213 count_old_page_tables * sizeof (void *));
2214 memcpy (&new_save_in_use[0],
2215 &G.save_in_use[count_old_page_tables],
2216 count_new_page_tables * sizeof (void *));
2217 memcpy (&new_save_in_use[count_new_page_tables],
2218 &G.save_in_use[0],
2219 count_old_page_tables * sizeof (void *));
2220
2221 free (G.by_depth);
2222 free (G.save_in_use);
2223
2224 G.by_depth = new_by_depth;
2225 G.save_in_use = new_save_in_use;
2226
2227 /* Now update all the index_by_depth fields. */
2228 for (i = G.by_depth_in_use; i > 0; --i)
2229 {
2230 page_entry *p = G.by_depth[i-1];
2231 p->index_by_depth = i-1;
2232 }
2233
2234 /* And last, we update the depth pointers in G.depth. The first
2235 entry is already 0, and context 0 entries always start at index
2236 0, so there is nothing to update in the first slot. We need a
2237 second slot, only if we have old ptes, and if we do, they start
2238 at index count_new_page_tables. */
2239 if (count_old_page_tables)
2240 push_depth (count_new_page_tables);
2241 }
2242
2243 void
2244 ggc_pch_read (FILE *f, void *addr)
2245 {
2246 struct ggc_pch_ondisk d;
2247 unsigned i;
2248 char *offs = addr;
2249 unsigned long count_old_page_tables;
2250 unsigned long count_new_page_tables;
2251
2252 count_old_page_tables = G.by_depth_in_use;
2253
2254 /* We've just read in a PCH file. So, every object that used to be
2255 allocated is now free. */
2256 clear_marks ();
2257 #ifdef ENABLE_GC_CHECKING
2258 poison_pages ();
2259 #endif
2260
2261 /* No object read from a PCH file should ever be freed. So, set the
2262 context depth to 1, and set the depth of all the currently-allocated
2263 pages to be 1 too. PCH pages will have depth 0. */
2264 if (G.context_depth != 0)
2265 abort ();
2266 G.context_depth = 1;
2267 for (i = 0; i < NUM_ORDERS; i++)
2268 {
2269 page_entry *p;
2270 for (p = G.pages[i]; p != NULL; p = p->next)
2271 p->context_depth = G.context_depth;
2272 }
2273
2274 /* Allocate the appropriate page-table entries for the pages read from
2275 the PCH file. */
2276 if (fread (&d, sizeof (d), 1, f) != 1)
2277 fatal_error ("can't read PCH file: %m");
2278
2279 for (i = 0; i < NUM_ORDERS; i++)
2280 {
2281 struct page_entry *entry;
2282 char *pte;
2283 size_t bytes;
2284 size_t num_objs;
2285 size_t j;
2286
2287 if (d.totals[i] == 0)
2288 continue;
2289
2290 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2291 num_objs = bytes / OBJECT_SIZE (i);
2292 entry = xcalloc (1, (sizeof (struct page_entry)
2293 - sizeof (long)
2294 + BITMAP_SIZE (num_objs + 1)));
2295 entry->bytes = bytes;
2296 entry->page = offs;
2297 entry->context_depth = 0;
2298 offs += bytes;
2299 entry->num_free_objects = 0;
2300 entry->order = i;
2301
2302 for (j = 0;
2303 j + HOST_BITS_PER_LONG <= num_objs + 1;
2304 j += HOST_BITS_PER_LONG)
2305 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2306 for (; j < num_objs + 1; j++)
2307 entry->in_use_p[j / HOST_BITS_PER_LONG]
2308 |= 1L << (j % HOST_BITS_PER_LONG);
2309
2310 for (pte = entry->page;
2311 pte < entry->page + entry->bytes;
2312 pte += G.pagesize)
2313 set_page_table_entry (pte, entry);
2314
2315 if (G.page_tails[i] != NULL)
2316 G.page_tails[i]->next = entry;
2317 else
2318 G.pages[i] = entry;
2319 G.page_tails[i] = entry;
2320
2321 /* We start off by just adding all the new information to the
2322 end of the varrays, later, we will move the new information
2323 to the front of the varrays, as the PCH page tables are at
2324 context 0. */
2325 push_by_depth (entry, 0);
2326 }
2327
2328 /* Now, we update the various data structures that speed page table
2329 handling. */
2330 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2331
2332 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2333
2334 /* Update the statistics. */
2335 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2336 }