gospec.c (lang_specific_driver): If we see -S without -o, add -o BASE.s rather than...
[gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009,
3 2010, 2011 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "diagnostic-core.h"
29 #include "flags.h"
30 #include "ggc.h"
31 #include "ggc-internal.h"
32 #include "timevar.h"
33 #include "params.h"
34 #include "tree-flow.h"
35 #include "cfgloop.h"
36 #include "plugin.h"
37
38 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
39 file open. Prefer either to valloc. */
40 #ifdef HAVE_MMAP_ANON
41 # undef HAVE_MMAP_DEV_ZERO
42 # define USING_MMAP
43 #endif
44
45 #ifdef HAVE_MMAP_DEV_ZERO
46 # define USING_MMAP
47 #endif
48
49 #ifndef USING_MMAP
50 #define USING_MALLOC_PAGE_GROUPS
51 #endif
52
53 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
54 && defined(USING_MMAP)
55 # define USING_MADVISE
56 #endif
57
58 /* Strategy:
59
60 This garbage-collecting allocator allocates objects on one of a set
61 of pages. Each page can allocate objects of a single size only;
62 available sizes are powers of two starting at four bytes. The size
63 of an allocation request is rounded up to the next power of two
64 (`order'), and satisfied from the appropriate page.
65
66 Each page is recorded in a page-entry, which also maintains an
67 in-use bitmap of object positions on the page. This allows the
68 allocation state of a particular object to be flipped without
69 touching the page itself.
70
71 Each page-entry also has a context depth, which is used to track
72 pushing and popping of allocation contexts. Only objects allocated
73 in the current (highest-numbered) context may be collected.
74
75 Page entries are arranged in an array of singly-linked lists. The
76 array is indexed by the allocation size, in bits, of the pages on
77 it; i.e. all pages on a list allocate objects of the same size.
78 Pages are ordered on the list such that all non-full pages precede
79 all full pages, with non-full pages arranged in order of decreasing
80 context depth.
81
82 Empty pages (of all orders) are kept on a single page cache list,
83 and are considered first when new pages are required; they are
84 deallocated at the start of the next collection if they haven't
85 been recycled by then. */
86
87 /* Define GGC_DEBUG_LEVEL to print debugging information.
88 0: No debugging output.
89 1: GC statistics only.
90 2: Page-entry allocations/deallocations as well.
91 3: Object allocations as well.
92 4: Object marks as well. */
93 #define GGC_DEBUG_LEVEL (0)
94 \f
95 #ifndef HOST_BITS_PER_PTR
96 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
97 #endif
98
99 \f
100 /* A two-level tree is used to look up the page-entry for a given
101 pointer. Two chunks of the pointer's bits are extracted to index
102 the first and second levels of the tree, as follows:
103
104 HOST_PAGE_SIZE_BITS
105 32 | |
106 msb +----------------+----+------+------+ lsb
107 | | |
108 PAGE_L1_BITS |
109 | |
110 PAGE_L2_BITS
111
112 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
113 pages are aligned on system page boundaries. The next most
114 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
115 index values in the lookup table, respectively.
116
117 For 32-bit architectures and the settings below, there are no
118 leftover bits. For architectures with wider pointers, the lookup
119 tree points to a list of pages, which must be scanned to find the
120 correct one. */
121
122 #define PAGE_L1_BITS (8)
123 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
124 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
125 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
126
127 #define LOOKUP_L1(p) \
128 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
129
130 #define LOOKUP_L2(p) \
131 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
132
133 /* The number of objects per allocation page, for objects on a page of
134 the indicated ORDER. */
135 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
136
137 /* The number of objects in P. */
138 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
139
140 /* The size of an object on a page of the indicated ORDER. */
141 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
142
143 /* For speed, we avoid doing a general integer divide to locate the
144 offset in the allocation bitmap, by precalculating numbers M, S
145 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
146 within the page which is evenly divisible by the object size Z. */
147 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
148 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
149 #define OFFSET_TO_BIT(OFFSET, ORDER) \
150 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
151
152 /* We use this structure to determine the alignment required for
153 allocations. For power-of-two sized allocations, that's not a
154 problem, but it does matter for odd-sized allocations.
155 We do not care about alignment for floating-point types. */
156
157 struct max_alignment {
158 char c;
159 union {
160 HOST_WIDEST_INT i;
161 void *p;
162 } u;
163 };
164
165 /* The biggest alignment required. */
166
167 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
168
169
170 /* The number of extra orders, not corresponding to power-of-two sized
171 objects. */
172
173 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
174
175 #define RTL_SIZE(NSLOTS) \
176 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
177
178 #define TREE_EXP_SIZE(OPS) \
179 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
180
181 /* The Ith entry is the maximum size of an object to be stored in the
182 Ith extra order. Adding a new entry to this array is the *only*
183 thing you need to do to add a new special allocation size. */
184
185 static const size_t extra_order_size_table[] = {
186 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
187 There are a lot of structures with these sizes and explicitly
188 listing them risks orders being dropped because they changed size. */
189 MAX_ALIGNMENT * 3,
190 MAX_ALIGNMENT * 5,
191 MAX_ALIGNMENT * 6,
192 MAX_ALIGNMENT * 7,
193 MAX_ALIGNMENT * 9,
194 MAX_ALIGNMENT * 10,
195 MAX_ALIGNMENT * 11,
196 MAX_ALIGNMENT * 12,
197 MAX_ALIGNMENT * 13,
198 MAX_ALIGNMENT * 14,
199 MAX_ALIGNMENT * 15,
200 sizeof (struct tree_decl_non_common),
201 sizeof (struct tree_field_decl),
202 sizeof (struct tree_parm_decl),
203 sizeof (struct tree_var_decl),
204 sizeof (struct tree_type_non_common),
205 sizeof (struct function),
206 sizeof (struct basic_block_def),
207 sizeof (struct cgraph_node),
208 sizeof (struct loop),
209 };
210
211 /* The total number of orders. */
212
213 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
214
215 /* Compute the smallest nonnegative number which when added to X gives
216 a multiple of F. */
217
218 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
219
220 /* Compute the smallest multiple of F that is >= X. */
221
222 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
223
224 /* Round X to next multiple of the page size */
225
226 #define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1))
227
228 /* The Ith entry is the number of objects on a page or order I. */
229
230 static unsigned objects_per_page_table[NUM_ORDERS];
231
232 /* The Ith entry is the size of an object on a page of order I. */
233
234 static size_t object_size_table[NUM_ORDERS];
235
236 /* The Ith entry is a pair of numbers (mult, shift) such that
237 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
238 for all k evenly divisible by OBJECT_SIZE(I). */
239
240 static struct
241 {
242 size_t mult;
243 unsigned int shift;
244 }
245 inverse_table[NUM_ORDERS];
246
247 /* A page_entry records the status of an allocation page. This
248 structure is dynamically sized to fit the bitmap in_use_p. */
249 typedef struct page_entry
250 {
251 /* The next page-entry with objects of the same size, or NULL if
252 this is the last page-entry. */
253 struct page_entry *next;
254
255 /* The previous page-entry with objects of the same size, or NULL if
256 this is the first page-entry. The PREV pointer exists solely to
257 keep the cost of ggc_free manageable. */
258 struct page_entry *prev;
259
260 /* The number of bytes allocated. (This will always be a multiple
261 of the host system page size.) */
262 size_t bytes;
263
264 /* The address at which the memory is allocated. */
265 char *page;
266
267 #ifdef USING_MALLOC_PAGE_GROUPS
268 /* Back pointer to the page group this page came from. */
269 struct page_group *group;
270 #endif
271
272 /* This is the index in the by_depth varray where this page table
273 can be found. */
274 unsigned long index_by_depth;
275
276 /* Context depth of this page. */
277 unsigned short context_depth;
278
279 /* The number of free objects remaining on this page. */
280 unsigned short num_free_objects;
281
282 /* A likely candidate for the bit position of a free object for the
283 next allocation from this page. */
284 unsigned short next_bit_hint;
285
286 /* The lg of size of objects allocated from this page. */
287 unsigned char order;
288
289 /* Discarded page? */
290 bool discarded;
291
292 /* A bit vector indicating whether or not objects are in use. The
293 Nth bit is one if the Nth object on this page is allocated. This
294 array is dynamically sized. */
295 unsigned long in_use_p[1];
296 } page_entry;
297
298 #ifdef USING_MALLOC_PAGE_GROUPS
299 /* A page_group describes a large allocation from malloc, from which
300 we parcel out aligned pages. */
301 typedef struct page_group
302 {
303 /* A linked list of all extant page groups. */
304 struct page_group *next;
305
306 /* The address we received from malloc. */
307 char *allocation;
308
309 /* The size of the block. */
310 size_t alloc_size;
311
312 /* A bitmask of pages in use. */
313 unsigned int in_use;
314 } page_group;
315 #endif
316
317 #if HOST_BITS_PER_PTR <= 32
318
319 /* On 32-bit hosts, we use a two level page table, as pictured above. */
320 typedef page_entry **page_table[PAGE_L1_SIZE];
321
322 #else
323
324 /* On 64-bit hosts, we use the same two level page tables plus a linked
325 list that disambiguates the top 32-bits. There will almost always be
326 exactly one entry in the list. */
327 typedef struct page_table_chain
328 {
329 struct page_table_chain *next;
330 size_t high_bits;
331 page_entry **table[PAGE_L1_SIZE];
332 } *page_table;
333
334 #endif
335
336 #ifdef ENABLE_GC_ALWAYS_COLLECT
337 /* List of free objects to be verified as actually free on the
338 next collection. */
339 struct free_object
340 {
341 void *object;
342 struct free_object *next;
343 };
344 #endif
345
346 /* The rest of the global variables. */
347 static struct globals
348 {
349 /* The Nth element in this array is a page with objects of size 2^N.
350 If there are any pages with free objects, they will be at the
351 head of the list. NULL if there are no page-entries for this
352 object size. */
353 page_entry *pages[NUM_ORDERS];
354
355 /* The Nth element in this array is the last page with objects of
356 size 2^N. NULL if there are no page-entries for this object
357 size. */
358 page_entry *page_tails[NUM_ORDERS];
359
360 /* Lookup table for associating allocation pages with object addresses. */
361 page_table lookup;
362
363 /* The system's page size. */
364 size_t pagesize;
365 size_t lg_pagesize;
366
367 /* Bytes currently allocated. */
368 size_t allocated;
369
370 /* Bytes currently allocated at the end of the last collection. */
371 size_t allocated_last_gc;
372
373 /* Total amount of memory mapped. */
374 size_t bytes_mapped;
375
376 /* Bit N set if any allocations have been done at context depth N. */
377 unsigned long context_depth_allocations;
378
379 /* Bit N set if any collections have been done at context depth N. */
380 unsigned long context_depth_collections;
381
382 /* The current depth in the context stack. */
383 unsigned short context_depth;
384
385 /* A file descriptor open to /dev/zero for reading. */
386 #if defined (HAVE_MMAP_DEV_ZERO)
387 int dev_zero_fd;
388 #endif
389
390 /* A cache of free system pages. */
391 page_entry *free_pages;
392
393 #ifdef USING_MALLOC_PAGE_GROUPS
394 page_group *page_groups;
395 #endif
396
397 /* The file descriptor for debugging output. */
398 FILE *debug_file;
399
400 /* Current number of elements in use in depth below. */
401 unsigned int depth_in_use;
402
403 /* Maximum number of elements that can be used before resizing. */
404 unsigned int depth_max;
405
406 /* Each element of this array is an index in by_depth where the given
407 depth starts. This structure is indexed by that given depth we
408 are interested in. */
409 unsigned int *depth;
410
411 /* Current number of elements in use in by_depth below. */
412 unsigned int by_depth_in_use;
413
414 /* Maximum number of elements that can be used before resizing. */
415 unsigned int by_depth_max;
416
417 /* Each element of this array is a pointer to a page_entry, all
418 page_entries can be found in here by increasing depth.
419 index_by_depth in the page_entry is the index into this data
420 structure where that page_entry can be found. This is used to
421 speed up finding all page_entries at a particular depth. */
422 page_entry **by_depth;
423
424 /* Each element is a pointer to the saved in_use_p bits, if any,
425 zero otherwise. We allocate them all together, to enable a
426 better runtime data access pattern. */
427 unsigned long **save_in_use;
428
429 #ifdef ENABLE_GC_ALWAYS_COLLECT
430 /* List of free objects to be verified as actually free on the
431 next collection. */
432 struct free_object *free_object_list;
433 #endif
434
435 #ifdef GATHER_STATISTICS
436 struct
437 {
438 /* Total GC-allocated memory. */
439 unsigned long long total_allocated;
440 /* Total overhead for GC-allocated memory. */
441 unsigned long long total_overhead;
442
443 /* Total allocations and overhead for sizes less than 32, 64 and 128.
444 These sizes are interesting because they are typical cache line
445 sizes. */
446
447 unsigned long long total_allocated_under32;
448 unsigned long long total_overhead_under32;
449
450 unsigned long long total_allocated_under64;
451 unsigned long long total_overhead_under64;
452
453 unsigned long long total_allocated_under128;
454 unsigned long long total_overhead_under128;
455
456 /* The allocations for each of the allocation orders. */
457 unsigned long long total_allocated_per_order[NUM_ORDERS];
458
459 /* The overhead for each of the allocation orders. */
460 unsigned long long total_overhead_per_order[NUM_ORDERS];
461 } stats;
462 #endif
463 } G;
464
465 /* The size in bytes required to maintain a bitmap for the objects
466 on a page-entry. */
467 #define BITMAP_SIZE(Num_objects) \
468 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
469
470 /* Allocate pages in chunks of this size, to throttle calls to memory
471 allocation routines. The first page is used, the rest go onto the
472 free list. This cannot be larger than HOST_BITS_PER_INT for the
473 in_use bitmask for page_group. Hosts that need a different value
474 can override this by defining GGC_QUIRE_SIZE explicitly. */
475 #ifndef GGC_QUIRE_SIZE
476 # ifdef USING_MMAP
477 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
478 # else
479 # define GGC_QUIRE_SIZE 16
480 # endif
481 #endif
482
483 /* Initial guess as to how many page table entries we might need. */
484 #define INITIAL_PTE_COUNT 128
485 \f
486 static int ggc_allocated_p (const void *);
487 static page_entry *lookup_page_table_entry (const void *);
488 static void set_page_table_entry (void *, page_entry *);
489 #ifdef USING_MMAP
490 static char *alloc_anon (char *, size_t, bool check);
491 #endif
492 #ifdef USING_MALLOC_PAGE_GROUPS
493 static size_t page_group_index (char *, char *);
494 static void set_page_group_in_use (page_group *, char *);
495 static void clear_page_group_in_use (page_group *, char *);
496 #endif
497 static struct page_entry * alloc_page (unsigned);
498 static void free_page (struct page_entry *);
499 static void release_pages (void);
500 static void clear_marks (void);
501 static void sweep_pages (void);
502 static void ggc_recalculate_in_use_p (page_entry *);
503 static void compute_inverse (unsigned);
504 static inline void adjust_depth (void);
505 static void move_ptes_to_front (int, int);
506
507 void debug_print_page_list (int);
508 static void push_depth (unsigned int);
509 static void push_by_depth (page_entry *, unsigned long *);
510
511 /* Push an entry onto G.depth. */
512
513 inline static void
514 push_depth (unsigned int i)
515 {
516 if (G.depth_in_use >= G.depth_max)
517 {
518 G.depth_max *= 2;
519 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
520 }
521 G.depth[G.depth_in_use++] = i;
522 }
523
524 /* Push an entry onto G.by_depth and G.save_in_use. */
525
526 inline static void
527 push_by_depth (page_entry *p, unsigned long *s)
528 {
529 if (G.by_depth_in_use >= G.by_depth_max)
530 {
531 G.by_depth_max *= 2;
532 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
533 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
534 G.by_depth_max);
535 }
536 G.by_depth[G.by_depth_in_use] = p;
537 G.save_in_use[G.by_depth_in_use++] = s;
538 }
539
540 #if (GCC_VERSION < 3001)
541 #define prefetch(X) ((void) X)
542 #else
543 #define prefetch(X) __builtin_prefetch (X)
544 #endif
545
546 #define save_in_use_p_i(__i) \
547 (G.save_in_use[__i])
548 #define save_in_use_p(__p) \
549 (save_in_use_p_i (__p->index_by_depth))
550
551 /* Returns nonzero if P was allocated in GC'able memory. */
552
553 static inline int
554 ggc_allocated_p (const void *p)
555 {
556 page_entry ***base;
557 size_t L1, L2;
558
559 #if HOST_BITS_PER_PTR <= 32
560 base = &G.lookup[0];
561 #else
562 page_table table = G.lookup;
563 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
564 while (1)
565 {
566 if (table == NULL)
567 return 0;
568 if (table->high_bits == high_bits)
569 break;
570 table = table->next;
571 }
572 base = &table->table[0];
573 #endif
574
575 /* Extract the level 1 and 2 indices. */
576 L1 = LOOKUP_L1 (p);
577 L2 = LOOKUP_L2 (p);
578
579 return base[L1] && base[L1][L2];
580 }
581
582 /* Traverse the page table and find the entry for a page.
583 Die (probably) if the object wasn't allocated via GC. */
584
585 static inline page_entry *
586 lookup_page_table_entry (const void *p)
587 {
588 page_entry ***base;
589 size_t L1, L2;
590
591 #if HOST_BITS_PER_PTR <= 32
592 base = &G.lookup[0];
593 #else
594 page_table table = G.lookup;
595 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
596 while (table->high_bits != high_bits)
597 table = table->next;
598 base = &table->table[0];
599 #endif
600
601 /* Extract the level 1 and 2 indices. */
602 L1 = LOOKUP_L1 (p);
603 L2 = LOOKUP_L2 (p);
604
605 return base[L1][L2];
606 }
607
608 /* Set the page table entry for a page. */
609
610 static void
611 set_page_table_entry (void *p, page_entry *entry)
612 {
613 page_entry ***base;
614 size_t L1, L2;
615
616 #if HOST_BITS_PER_PTR <= 32
617 base = &G.lookup[0];
618 #else
619 page_table table;
620 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
621 for (table = G.lookup; table; table = table->next)
622 if (table->high_bits == high_bits)
623 goto found;
624
625 /* Not found -- allocate a new table. */
626 table = XCNEW (struct page_table_chain);
627 table->next = G.lookup;
628 table->high_bits = high_bits;
629 G.lookup = table;
630 found:
631 base = &table->table[0];
632 #endif
633
634 /* Extract the level 1 and 2 indices. */
635 L1 = LOOKUP_L1 (p);
636 L2 = LOOKUP_L2 (p);
637
638 if (base[L1] == NULL)
639 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
640
641 base[L1][L2] = entry;
642 }
643
644 /* Prints the page-entry for object size ORDER, for debugging. */
645
646 DEBUG_FUNCTION void
647 debug_print_page_list (int order)
648 {
649 page_entry *p;
650 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
651 (void *) G.page_tails[order]);
652 p = G.pages[order];
653 while (p != NULL)
654 {
655 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
656 p->num_free_objects);
657 p = p->next;
658 }
659 printf ("NULL\n");
660 fflush (stdout);
661 }
662
663 #ifdef USING_MMAP
664 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
665 (if non-null). The ifdef structure here is intended to cause a
666 compile error unless exactly one of the HAVE_* is defined. */
667
668 static inline char *
669 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
670 {
671 #ifdef HAVE_MMAP_ANON
672 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
673 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
674 #endif
675 #ifdef HAVE_MMAP_DEV_ZERO
676 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
677 MAP_PRIVATE, G.dev_zero_fd, 0);
678 #endif
679
680 if (page == (char *) MAP_FAILED)
681 {
682 if (!check)
683 return NULL;
684 perror ("virtual memory exhausted");
685 exit (FATAL_EXIT_CODE);
686 }
687
688 /* Remember that we allocated this memory. */
689 G.bytes_mapped += size;
690
691 /* Pretend we don't have access to the allocated pages. We'll enable
692 access to smaller pieces of the area in ggc_internal_alloc. Discard the
693 handle to avoid handle leak. */
694 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
695
696 return page;
697 }
698 #endif
699 #ifdef USING_MALLOC_PAGE_GROUPS
700 /* Compute the index for this page into the page group. */
701
702 static inline size_t
703 page_group_index (char *allocation, char *page)
704 {
705 return (size_t) (page - allocation) >> G.lg_pagesize;
706 }
707
708 /* Set and clear the in_use bit for this page in the page group. */
709
710 static inline void
711 set_page_group_in_use (page_group *group, char *page)
712 {
713 group->in_use |= 1 << page_group_index (group->allocation, page);
714 }
715
716 static inline void
717 clear_page_group_in_use (page_group *group, char *page)
718 {
719 group->in_use &= ~(1 << page_group_index (group->allocation, page));
720 }
721 #endif
722
723 /* Allocate a new page for allocating objects of size 2^ORDER,
724 and return an entry for it. The entry is not added to the
725 appropriate page_table list. */
726
727 static inline struct page_entry *
728 alloc_page (unsigned order)
729 {
730 struct page_entry *entry, *p, **pp;
731 char *page;
732 size_t num_objects;
733 size_t bitmap_size;
734 size_t page_entry_size;
735 size_t entry_size;
736 #ifdef USING_MALLOC_PAGE_GROUPS
737 page_group *group;
738 #endif
739
740 num_objects = OBJECTS_PER_PAGE (order);
741 bitmap_size = BITMAP_SIZE (num_objects + 1);
742 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
743 entry_size = num_objects * OBJECT_SIZE (order);
744 if (entry_size < G.pagesize)
745 entry_size = G.pagesize;
746 entry_size = PAGE_ALIGN (entry_size);
747
748 entry = NULL;
749 page = NULL;
750
751 /* Check the list of free pages for one we can use. */
752 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
753 if (p->bytes == entry_size)
754 break;
755
756 if (p != NULL)
757 {
758 if (p->discarded)
759 G.bytes_mapped += p->bytes;
760 p->discarded = false;
761
762 /* Recycle the allocated memory from this page ... */
763 *pp = p->next;
764 page = p->page;
765
766 #ifdef USING_MALLOC_PAGE_GROUPS
767 group = p->group;
768 #endif
769
770 /* ... and, if possible, the page entry itself. */
771 if (p->order == order)
772 {
773 entry = p;
774 memset (entry, 0, page_entry_size);
775 }
776 else
777 free (p);
778 }
779 #ifdef USING_MMAP
780 else if (entry_size == G.pagesize)
781 {
782 /* We want just one page. Allocate a bunch of them and put the
783 extras on the freelist. (Can only do this optimization with
784 mmap for backing store.) */
785 struct page_entry *e, *f = G.free_pages;
786 int i, entries = GGC_QUIRE_SIZE;
787
788 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
789 if (page == NULL)
790 {
791 page = alloc_anon(NULL, G.pagesize, true);
792 entries = 1;
793 }
794
795 /* This loop counts down so that the chain will be in ascending
796 memory order. */
797 for (i = entries - 1; i >= 1; i--)
798 {
799 e = XCNEWVAR (struct page_entry, page_entry_size);
800 e->order = order;
801 e->bytes = G.pagesize;
802 e->page = page + (i << G.lg_pagesize);
803 e->next = f;
804 f = e;
805 }
806
807 G.free_pages = f;
808 }
809 else
810 page = alloc_anon (NULL, entry_size, true);
811 #endif
812 #ifdef USING_MALLOC_PAGE_GROUPS
813 else
814 {
815 /* Allocate a large block of memory and serve out the aligned
816 pages therein. This results in much less memory wastage
817 than the traditional implementation of valloc. */
818
819 char *allocation, *a, *enda;
820 size_t alloc_size, head_slop, tail_slop;
821 int multiple_pages = (entry_size == G.pagesize);
822
823 if (multiple_pages)
824 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
825 else
826 alloc_size = entry_size + G.pagesize - 1;
827 allocation = XNEWVEC (char, alloc_size);
828
829 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
830 head_slop = page - allocation;
831 if (multiple_pages)
832 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
833 else
834 tail_slop = alloc_size - entry_size - head_slop;
835 enda = allocation + alloc_size - tail_slop;
836
837 /* We allocated N pages, which are likely not aligned, leaving
838 us with N-1 usable pages. We plan to place the page_group
839 structure somewhere in the slop. */
840 if (head_slop >= sizeof (page_group))
841 group = (page_group *)page - 1;
842 else
843 {
844 /* We magically got an aligned allocation. Too bad, we have
845 to waste a page anyway. */
846 if (tail_slop == 0)
847 {
848 enda -= G.pagesize;
849 tail_slop += G.pagesize;
850 }
851 gcc_assert (tail_slop >= sizeof (page_group));
852 group = (page_group *)enda;
853 tail_slop -= sizeof (page_group);
854 }
855
856 /* Remember that we allocated this memory. */
857 group->next = G.page_groups;
858 group->allocation = allocation;
859 group->alloc_size = alloc_size;
860 group->in_use = 0;
861 G.page_groups = group;
862 G.bytes_mapped += alloc_size;
863
864 /* If we allocated multiple pages, put the rest on the free list. */
865 if (multiple_pages)
866 {
867 struct page_entry *e, *f = G.free_pages;
868 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
869 {
870 e = XCNEWVAR (struct page_entry, page_entry_size);
871 e->order = order;
872 e->bytes = G.pagesize;
873 e->page = a;
874 e->group = group;
875 e->next = f;
876 f = e;
877 }
878 G.free_pages = f;
879 }
880 }
881 #endif
882
883 if (entry == NULL)
884 entry = XCNEWVAR (struct page_entry, page_entry_size);
885
886 entry->bytes = entry_size;
887 entry->page = page;
888 entry->context_depth = G.context_depth;
889 entry->order = order;
890 entry->num_free_objects = num_objects;
891 entry->next_bit_hint = 1;
892
893 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
894
895 #ifdef USING_MALLOC_PAGE_GROUPS
896 entry->group = group;
897 set_page_group_in_use (group, page);
898 #endif
899
900 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
901 increment the hint. */
902 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
903 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
904
905 set_page_table_entry (page, entry);
906
907 if (GGC_DEBUG_LEVEL >= 2)
908 fprintf (G.debug_file,
909 "Allocating page at %p, object size=%lu, data %p-%p\n",
910 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
911 page + entry_size - 1);
912
913 return entry;
914 }
915
916 /* Adjust the size of G.depth so that no index greater than the one
917 used by the top of the G.by_depth is used. */
918
919 static inline void
920 adjust_depth (void)
921 {
922 page_entry *top;
923
924 if (G.by_depth_in_use)
925 {
926 top = G.by_depth[G.by_depth_in_use-1];
927
928 /* Peel back indices in depth that index into by_depth, so that
929 as new elements are added to by_depth, we note the indices
930 of those elements, if they are for new context depths. */
931 while (G.depth_in_use > (size_t)top->context_depth+1)
932 --G.depth_in_use;
933 }
934 }
935
936 /* For a page that is no longer needed, put it on the free page list. */
937
938 static void
939 free_page (page_entry *entry)
940 {
941 if (GGC_DEBUG_LEVEL >= 2)
942 fprintf (G.debug_file,
943 "Deallocating page at %p, data %p-%p\n", (void *) entry,
944 entry->page, entry->page + entry->bytes - 1);
945
946 /* Mark the page as inaccessible. Discard the handle to avoid handle
947 leak. */
948 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
949
950 set_page_table_entry (entry->page, NULL);
951
952 #ifdef USING_MALLOC_PAGE_GROUPS
953 clear_page_group_in_use (entry->group, entry->page);
954 #endif
955
956 if (G.by_depth_in_use > 1)
957 {
958 page_entry *top = G.by_depth[G.by_depth_in_use-1];
959 int i = entry->index_by_depth;
960
961 /* We cannot free a page from a context deeper than the current
962 one. */
963 gcc_assert (entry->context_depth == top->context_depth);
964
965 /* Put top element into freed slot. */
966 G.by_depth[i] = top;
967 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
968 top->index_by_depth = i;
969 }
970 --G.by_depth_in_use;
971
972 adjust_depth ();
973
974 entry->next = G.free_pages;
975 G.free_pages = entry;
976 }
977
978 /* Release the free page cache to the system. */
979
980 static void
981 release_pages (void)
982 {
983 #ifdef USING_MADVISE
984 page_entry *p, *start_p;
985 char *start;
986 size_t len;
987 size_t mapped_len;
988 page_entry *next, *prev, *newprev;
989 size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
990
991 /* First free larger continuous areas to the OS.
992 This allows other allocators to grab these areas if needed.
993 This is only done on larger chunks to avoid fragmentation.
994 This does not always work because the free_pages list is only
995 approximately sorted. */
996
997 p = G.free_pages;
998 prev = NULL;
999 while (p)
1000 {
1001 start = p->page;
1002 start_p = p;
1003 len = 0;
1004 mapped_len = 0;
1005 newprev = prev;
1006 while (p && p->page == start + len)
1007 {
1008 len += p->bytes;
1009 if (!p->discarded)
1010 mapped_len += p->bytes;
1011 newprev = p;
1012 p = p->next;
1013 }
1014 if (len >= free_unit)
1015 {
1016 while (start_p != p)
1017 {
1018 next = start_p->next;
1019 free (start_p);
1020 start_p = next;
1021 }
1022 munmap (start, len);
1023 if (prev)
1024 prev->next = p;
1025 else
1026 G.free_pages = p;
1027 G.bytes_mapped -= mapped_len;
1028 continue;
1029 }
1030 prev = newprev;
1031 }
1032
1033 /* Now give back the fragmented pages to the OS, but keep the address
1034 space to reuse it next time. */
1035
1036 for (p = G.free_pages; p; )
1037 {
1038 if (p->discarded)
1039 {
1040 p = p->next;
1041 continue;
1042 }
1043 start = p->page;
1044 len = p->bytes;
1045 start_p = p;
1046 p = p->next;
1047 while (p && p->page == start + len)
1048 {
1049 len += p->bytes;
1050 p = p->next;
1051 }
1052 /* Give the page back to the kernel, but don't free the mapping.
1053 This avoids fragmentation in the virtual memory map of the
1054 process. Next time we can reuse it by just touching it. */
1055 madvise (start, len, MADV_DONTNEED);
1056 /* Don't count those pages as mapped to not touch the garbage collector
1057 unnecessarily. */
1058 G.bytes_mapped -= len;
1059 while (start_p != p)
1060 {
1061 start_p->discarded = true;
1062 start_p = start_p->next;
1063 }
1064 }
1065 #endif
1066 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1067 page_entry *p, *next;
1068 char *start;
1069 size_t len;
1070
1071 /* Gather up adjacent pages so they are unmapped together. */
1072 p = G.free_pages;
1073
1074 while (p)
1075 {
1076 start = p->page;
1077 next = p->next;
1078 len = p->bytes;
1079 free (p);
1080 p = next;
1081
1082 while (p && p->page == start + len)
1083 {
1084 next = p->next;
1085 len += p->bytes;
1086 free (p);
1087 p = next;
1088 }
1089
1090 munmap (start, len);
1091 G.bytes_mapped -= len;
1092 }
1093
1094 G.free_pages = NULL;
1095 #endif
1096 #ifdef USING_MALLOC_PAGE_GROUPS
1097 page_entry **pp, *p;
1098 page_group **gp, *g;
1099
1100 /* Remove all pages from free page groups from the list. */
1101 pp = &G.free_pages;
1102 while ((p = *pp) != NULL)
1103 if (p->group->in_use == 0)
1104 {
1105 *pp = p->next;
1106 free (p);
1107 }
1108 else
1109 pp = &p->next;
1110
1111 /* Remove all free page groups, and release the storage. */
1112 gp = &G.page_groups;
1113 while ((g = *gp) != NULL)
1114 if (g->in_use == 0)
1115 {
1116 *gp = g->next;
1117 G.bytes_mapped -= g->alloc_size;
1118 free (g->allocation);
1119 }
1120 else
1121 gp = &g->next;
1122 #endif
1123 }
1124
1125 /* This table provides a fast way to determine ceil(log_2(size)) for
1126 allocation requests. The minimum allocation size is eight bytes. */
1127 #define NUM_SIZE_LOOKUP 512
1128 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1129 {
1130 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1131 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1132 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1133 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1134 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1135 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1136 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1137 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1138 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1139 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1140 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1141 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1142 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1143 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1144 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1145 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1146 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1147 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1148 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1149 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1150 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1151 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1152 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1153 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1154 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1155 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1156 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1157 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1158 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1159 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1160 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1161 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1162 };
1163
1164 /* For a given size of memory requested for allocation, return the
1165 actual size that is going to be allocated, as well as the size
1166 order. */
1167
1168 static void
1169 ggc_round_alloc_size_1 (size_t requested_size,
1170 size_t *size_order,
1171 size_t *alloced_size)
1172 {
1173 size_t order, object_size;
1174
1175 if (requested_size < NUM_SIZE_LOOKUP)
1176 {
1177 order = size_lookup[requested_size];
1178 object_size = OBJECT_SIZE (order);
1179 }
1180 else
1181 {
1182 order = 10;
1183 while (requested_size > (object_size = OBJECT_SIZE (order)))
1184 order++;
1185 }
1186
1187 if (size_order)
1188 *size_order = order;
1189 if (alloced_size)
1190 *alloced_size = object_size;
1191 }
1192
1193 /* For a given size of memory requested for allocation, return the
1194 actual size that is going to be allocated. */
1195
1196 size_t
1197 ggc_round_alloc_size (size_t requested_size)
1198 {
1199 size_t size = 0;
1200
1201 ggc_round_alloc_size_1 (requested_size, NULL, &size);
1202 return size;
1203 }
1204
1205 /* Typed allocation function. Does nothing special in this collector. */
1206
1207 void *
1208 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1209 MEM_STAT_DECL)
1210 {
1211 return ggc_internal_alloc_stat (size PASS_MEM_STAT);
1212 }
1213
1214 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1215
1216 void *
1217 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1218 {
1219 size_t order, word, bit, object_offset, object_size;
1220 struct page_entry *entry;
1221 void *result;
1222
1223 ggc_round_alloc_size_1 (size, &order, &object_size);
1224
1225 /* If there are non-full pages for this size allocation, they are at
1226 the head of the list. */
1227 entry = G.pages[order];
1228
1229 /* If there is no page for this object size, or all pages in this
1230 context are full, allocate a new page. */
1231 if (entry == NULL || entry->num_free_objects == 0)
1232 {
1233 struct page_entry *new_entry;
1234 new_entry = alloc_page (order);
1235
1236 new_entry->index_by_depth = G.by_depth_in_use;
1237 push_by_depth (new_entry, 0);
1238
1239 /* We can skip context depths, if we do, make sure we go all the
1240 way to the new depth. */
1241 while (new_entry->context_depth >= G.depth_in_use)
1242 push_depth (G.by_depth_in_use-1);
1243
1244 /* If this is the only entry, it's also the tail. If it is not
1245 the only entry, then we must update the PREV pointer of the
1246 ENTRY (G.pages[order]) to point to our new page entry. */
1247 if (entry == NULL)
1248 G.page_tails[order] = new_entry;
1249 else
1250 entry->prev = new_entry;
1251
1252 /* Put new pages at the head of the page list. By definition the
1253 entry at the head of the list always has a NULL pointer. */
1254 new_entry->next = entry;
1255 new_entry->prev = NULL;
1256 entry = new_entry;
1257 G.pages[order] = new_entry;
1258
1259 /* For a new page, we know the word and bit positions (in the
1260 in_use bitmap) of the first available object -- they're zero. */
1261 new_entry->next_bit_hint = 1;
1262 word = 0;
1263 bit = 0;
1264 object_offset = 0;
1265 }
1266 else
1267 {
1268 /* First try to use the hint left from the previous allocation
1269 to locate a clear bit in the in-use bitmap. We've made sure
1270 that the one-past-the-end bit is always set, so if the hint
1271 has run over, this test will fail. */
1272 unsigned hint = entry->next_bit_hint;
1273 word = hint / HOST_BITS_PER_LONG;
1274 bit = hint % HOST_BITS_PER_LONG;
1275
1276 /* If the hint didn't work, scan the bitmap from the beginning. */
1277 if ((entry->in_use_p[word] >> bit) & 1)
1278 {
1279 word = bit = 0;
1280 while (~entry->in_use_p[word] == 0)
1281 ++word;
1282
1283 #if GCC_VERSION >= 3004
1284 bit = __builtin_ctzl (~entry->in_use_p[word]);
1285 #else
1286 while ((entry->in_use_p[word] >> bit) & 1)
1287 ++bit;
1288 #endif
1289
1290 hint = word * HOST_BITS_PER_LONG + bit;
1291 }
1292
1293 /* Next time, try the next bit. */
1294 entry->next_bit_hint = hint + 1;
1295
1296 object_offset = hint * object_size;
1297 }
1298
1299 /* Set the in-use bit. */
1300 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1301
1302 /* Keep a running total of the number of free objects. If this page
1303 fills up, we may have to move it to the end of the list if the
1304 next page isn't full. If the next page is full, all subsequent
1305 pages are full, so there's no need to move it. */
1306 if (--entry->num_free_objects == 0
1307 && entry->next != NULL
1308 && entry->next->num_free_objects > 0)
1309 {
1310 /* We have a new head for the list. */
1311 G.pages[order] = entry->next;
1312
1313 /* We are moving ENTRY to the end of the page table list.
1314 The new page at the head of the list will have NULL in
1315 its PREV field and ENTRY will have NULL in its NEXT field. */
1316 entry->next->prev = NULL;
1317 entry->next = NULL;
1318
1319 /* Append ENTRY to the tail of the list. */
1320 entry->prev = G.page_tails[order];
1321 G.page_tails[order]->next = entry;
1322 G.page_tails[order] = entry;
1323 }
1324
1325 /* Calculate the object's address. */
1326 result = entry->page + object_offset;
1327 #ifdef GATHER_STATISTICS
1328 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1329 result PASS_MEM_STAT);
1330 #endif
1331
1332 #ifdef ENABLE_GC_CHECKING
1333 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1334 exact same semantics in presence of memory bugs, regardless of
1335 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1336 handle to avoid handle leak. */
1337 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1338
1339 /* `Poison' the entire allocated object, including any padding at
1340 the end. */
1341 memset (result, 0xaf, object_size);
1342
1343 /* Make the bytes after the end of the object unaccessible. Discard the
1344 handle to avoid handle leak. */
1345 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1346 object_size - size));
1347 #endif
1348
1349 /* Tell Valgrind that the memory is there, but its content isn't
1350 defined. The bytes at the end of the object are still marked
1351 unaccessible. */
1352 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1353
1354 /* Keep track of how many bytes are being allocated. This
1355 information is used in deciding when to collect. */
1356 G.allocated += object_size;
1357
1358 /* For timevar statistics. */
1359 timevar_ggc_mem_total += object_size;
1360
1361 #ifdef GATHER_STATISTICS
1362 {
1363 size_t overhead = object_size - size;
1364
1365 G.stats.total_overhead += overhead;
1366 G.stats.total_allocated += object_size;
1367 G.stats.total_overhead_per_order[order] += overhead;
1368 G.stats.total_allocated_per_order[order] += object_size;
1369
1370 if (size <= 32)
1371 {
1372 G.stats.total_overhead_under32 += overhead;
1373 G.stats.total_allocated_under32 += object_size;
1374 }
1375 if (size <= 64)
1376 {
1377 G.stats.total_overhead_under64 += overhead;
1378 G.stats.total_allocated_under64 += object_size;
1379 }
1380 if (size <= 128)
1381 {
1382 G.stats.total_overhead_under128 += overhead;
1383 G.stats.total_allocated_under128 += object_size;
1384 }
1385 }
1386 #endif
1387
1388 if (GGC_DEBUG_LEVEL >= 3)
1389 fprintf (G.debug_file,
1390 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1391 (unsigned long) size, (unsigned long) object_size, result,
1392 (void *) entry);
1393
1394 return result;
1395 }
1396
1397 /* Mark function for strings. */
1398
1399 void
1400 gt_ggc_m_S (const void *p)
1401 {
1402 page_entry *entry;
1403 unsigned bit, word;
1404 unsigned long mask;
1405 unsigned long offset;
1406
1407 if (!p || !ggc_allocated_p (p))
1408 return;
1409
1410 /* Look up the page on which the object is alloced. . */
1411 entry = lookup_page_table_entry (p);
1412 gcc_assert (entry);
1413
1414 /* Calculate the index of the object on the page; this is its bit
1415 position in the in_use_p bitmap. Note that because a char* might
1416 point to the middle of an object, we need special code here to
1417 make sure P points to the start of an object. */
1418 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1419 if (offset)
1420 {
1421 /* Here we've seen a char* which does not point to the beginning
1422 of an allocated object. We assume it points to the middle of
1423 a STRING_CST. */
1424 gcc_assert (offset == offsetof (struct tree_string, str));
1425 p = ((const char *) p) - offset;
1426 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1427 return;
1428 }
1429
1430 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1431 word = bit / HOST_BITS_PER_LONG;
1432 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1433
1434 /* If the bit was previously set, skip it. */
1435 if (entry->in_use_p[word] & mask)
1436 return;
1437
1438 /* Otherwise set it, and decrement the free object count. */
1439 entry->in_use_p[word] |= mask;
1440 entry->num_free_objects -= 1;
1441
1442 if (GGC_DEBUG_LEVEL >= 4)
1443 fprintf (G.debug_file, "Marking %p\n", p);
1444
1445 return;
1446 }
1447
1448 /* If P is not marked, marks it and return false. Otherwise return true.
1449 P must have been allocated by the GC allocator; it mustn't point to
1450 static objects, stack variables, or memory allocated with malloc. */
1451
1452 int
1453 ggc_set_mark (const void *p)
1454 {
1455 page_entry *entry;
1456 unsigned bit, word;
1457 unsigned long mask;
1458
1459 /* Look up the page on which the object is alloced. If the object
1460 wasn't allocated by the collector, we'll probably die. */
1461 entry = lookup_page_table_entry (p);
1462 gcc_assert (entry);
1463
1464 /* Calculate the index of the object on the page; this is its bit
1465 position in the in_use_p bitmap. */
1466 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1467 word = bit / HOST_BITS_PER_LONG;
1468 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1469
1470 /* If the bit was previously set, skip it. */
1471 if (entry->in_use_p[word] & mask)
1472 return 1;
1473
1474 /* Otherwise set it, and decrement the free object count. */
1475 entry->in_use_p[word] |= mask;
1476 entry->num_free_objects -= 1;
1477
1478 if (GGC_DEBUG_LEVEL >= 4)
1479 fprintf (G.debug_file, "Marking %p\n", p);
1480
1481 return 0;
1482 }
1483
1484 /* Return 1 if P has been marked, zero otherwise.
1485 P must have been allocated by the GC allocator; it mustn't point to
1486 static objects, stack variables, or memory allocated with malloc. */
1487
1488 int
1489 ggc_marked_p (const void *p)
1490 {
1491 page_entry *entry;
1492 unsigned bit, word;
1493 unsigned long mask;
1494
1495 /* Look up the page on which the object is alloced. If the object
1496 wasn't allocated by the collector, we'll probably die. */
1497 entry = lookup_page_table_entry (p);
1498 gcc_assert (entry);
1499
1500 /* Calculate the index of the object on the page; this is its bit
1501 position in the in_use_p bitmap. */
1502 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1503 word = bit / HOST_BITS_PER_LONG;
1504 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1505
1506 return (entry->in_use_p[word] & mask) != 0;
1507 }
1508
1509 /* Return the size of the gc-able object P. */
1510
1511 size_t
1512 ggc_get_size (const void *p)
1513 {
1514 page_entry *pe = lookup_page_table_entry (p);
1515 return OBJECT_SIZE (pe->order);
1516 }
1517
1518 /* Release the memory for object P. */
1519
1520 void
1521 ggc_free (void *p)
1522 {
1523 page_entry *pe = lookup_page_table_entry (p);
1524 size_t order = pe->order;
1525 size_t size = OBJECT_SIZE (order);
1526
1527 #ifdef GATHER_STATISTICS
1528 ggc_free_overhead (p);
1529 #endif
1530
1531 if (GGC_DEBUG_LEVEL >= 3)
1532 fprintf (G.debug_file,
1533 "Freeing object, actual size=%lu, at %p on %p\n",
1534 (unsigned long) size, p, (void *) pe);
1535
1536 #ifdef ENABLE_GC_CHECKING
1537 /* Poison the data, to indicate the data is garbage. */
1538 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1539 memset (p, 0xa5, size);
1540 #endif
1541 /* Let valgrind know the object is free. */
1542 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1543
1544 #ifdef ENABLE_GC_ALWAYS_COLLECT
1545 /* In the completely-anal-checking mode, we do *not* immediately free
1546 the data, but instead verify that the data is *actually* not
1547 reachable the next time we collect. */
1548 {
1549 struct free_object *fo = XNEW (struct free_object);
1550 fo->object = p;
1551 fo->next = G.free_object_list;
1552 G.free_object_list = fo;
1553 }
1554 #else
1555 {
1556 unsigned int bit_offset, word, bit;
1557
1558 G.allocated -= size;
1559
1560 /* Mark the object not-in-use. */
1561 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1562 word = bit_offset / HOST_BITS_PER_LONG;
1563 bit = bit_offset % HOST_BITS_PER_LONG;
1564 pe->in_use_p[word] &= ~(1UL << bit);
1565
1566 if (pe->num_free_objects++ == 0)
1567 {
1568 page_entry *p, *q;
1569
1570 /* If the page is completely full, then it's supposed to
1571 be after all pages that aren't. Since we've freed one
1572 object from a page that was full, we need to move the
1573 page to the head of the list.
1574
1575 PE is the node we want to move. Q is the previous node
1576 and P is the next node in the list. */
1577 q = pe->prev;
1578 if (q && q->num_free_objects == 0)
1579 {
1580 p = pe->next;
1581
1582 q->next = p;
1583
1584 /* If PE was at the end of the list, then Q becomes the
1585 new end of the list. If PE was not the end of the
1586 list, then we need to update the PREV field for P. */
1587 if (!p)
1588 G.page_tails[order] = q;
1589 else
1590 p->prev = q;
1591
1592 /* Move PE to the head of the list. */
1593 pe->next = G.pages[order];
1594 pe->prev = NULL;
1595 G.pages[order]->prev = pe;
1596 G.pages[order] = pe;
1597 }
1598
1599 /* Reset the hint bit to point to the only free object. */
1600 pe->next_bit_hint = bit_offset;
1601 }
1602 }
1603 #endif
1604 }
1605 \f
1606 /* Subroutine of init_ggc which computes the pair of numbers used to
1607 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1608
1609 This algorithm is taken from Granlund and Montgomery's paper
1610 "Division by Invariant Integers using Multiplication"
1611 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1612 constants). */
1613
1614 static void
1615 compute_inverse (unsigned order)
1616 {
1617 size_t size, inv;
1618 unsigned int e;
1619
1620 size = OBJECT_SIZE (order);
1621 e = 0;
1622 while (size % 2 == 0)
1623 {
1624 e++;
1625 size >>= 1;
1626 }
1627
1628 inv = size;
1629 while (inv * size != 1)
1630 inv = inv * (2 - inv*size);
1631
1632 DIV_MULT (order) = inv;
1633 DIV_SHIFT (order) = e;
1634 }
1635
1636 /* Initialize the ggc-mmap allocator. */
1637 void
1638 init_ggc (void)
1639 {
1640 unsigned order;
1641
1642 G.pagesize = getpagesize();
1643 G.lg_pagesize = exact_log2 (G.pagesize);
1644
1645 #ifdef HAVE_MMAP_DEV_ZERO
1646 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1647 if (G.dev_zero_fd == -1)
1648 internal_error ("open /dev/zero: %m");
1649 #endif
1650
1651 #if 0
1652 G.debug_file = fopen ("ggc-mmap.debug", "w");
1653 #else
1654 G.debug_file = stdout;
1655 #endif
1656
1657 #ifdef USING_MMAP
1658 /* StunOS has an amazing off-by-one error for the first mmap allocation
1659 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1660 believe, is an unaligned page allocation, which would cause us to
1661 hork badly if we tried to use it. */
1662 {
1663 char *p = alloc_anon (NULL, G.pagesize, true);
1664 struct page_entry *e;
1665 if ((size_t)p & (G.pagesize - 1))
1666 {
1667 /* How losing. Discard this one and try another. If we still
1668 can't get something useful, give up. */
1669
1670 p = alloc_anon (NULL, G.pagesize, true);
1671 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1672 }
1673
1674 /* We have a good page, might as well hold onto it... */
1675 e = XCNEW (struct page_entry);
1676 e->bytes = G.pagesize;
1677 e->page = p;
1678 e->next = G.free_pages;
1679 G.free_pages = e;
1680 }
1681 #endif
1682
1683 /* Initialize the object size table. */
1684 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1685 object_size_table[order] = (size_t) 1 << order;
1686 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1687 {
1688 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1689
1690 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1691 so that we're sure of getting aligned memory. */
1692 s = ROUND_UP (s, MAX_ALIGNMENT);
1693 object_size_table[order] = s;
1694 }
1695
1696 /* Initialize the objects-per-page and inverse tables. */
1697 for (order = 0; order < NUM_ORDERS; ++order)
1698 {
1699 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1700 if (objects_per_page_table[order] == 0)
1701 objects_per_page_table[order] = 1;
1702 compute_inverse (order);
1703 }
1704
1705 /* Reset the size_lookup array to put appropriately sized objects in
1706 the special orders. All objects bigger than the previous power
1707 of two, but no greater than the special size, should go in the
1708 new order. */
1709 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1710 {
1711 int o;
1712 int i;
1713
1714 i = OBJECT_SIZE (order);
1715 if (i >= NUM_SIZE_LOOKUP)
1716 continue;
1717
1718 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1719 size_lookup[i] = order;
1720 }
1721
1722 G.depth_in_use = 0;
1723 G.depth_max = 10;
1724 G.depth = XNEWVEC (unsigned int, G.depth_max);
1725
1726 G.by_depth_in_use = 0;
1727 G.by_depth_max = INITIAL_PTE_COUNT;
1728 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1729 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1730 }
1731
1732 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1733 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1734
1735 static void
1736 ggc_recalculate_in_use_p (page_entry *p)
1737 {
1738 unsigned int i;
1739 size_t num_objects;
1740
1741 /* Because the past-the-end bit in in_use_p is always set, we
1742 pretend there is one additional object. */
1743 num_objects = OBJECTS_IN_PAGE (p) + 1;
1744
1745 /* Reset the free object count. */
1746 p->num_free_objects = num_objects;
1747
1748 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1749 for (i = 0;
1750 i < CEIL (BITMAP_SIZE (num_objects),
1751 sizeof (*p->in_use_p));
1752 ++i)
1753 {
1754 unsigned long j;
1755
1756 /* Something is in use if it is marked, or if it was in use in a
1757 context further down the context stack. */
1758 p->in_use_p[i] |= save_in_use_p (p)[i];
1759
1760 /* Decrement the free object count for every object allocated. */
1761 for (j = p->in_use_p[i]; j; j >>= 1)
1762 p->num_free_objects -= (j & 1);
1763 }
1764
1765 gcc_assert (p->num_free_objects < num_objects);
1766 }
1767 \f
1768 /* Unmark all objects. */
1769
1770 static void
1771 clear_marks (void)
1772 {
1773 unsigned order;
1774
1775 for (order = 2; order < NUM_ORDERS; order++)
1776 {
1777 page_entry *p;
1778
1779 for (p = G.pages[order]; p != NULL; p = p->next)
1780 {
1781 size_t num_objects = OBJECTS_IN_PAGE (p);
1782 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1783
1784 /* The data should be page-aligned. */
1785 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1786
1787 /* Pages that aren't in the topmost context are not collected;
1788 nevertheless, we need their in-use bit vectors to store GC
1789 marks. So, back them up first. */
1790 if (p->context_depth < G.context_depth)
1791 {
1792 if (! save_in_use_p (p))
1793 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1794 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1795 }
1796
1797 /* Reset reset the number of free objects and clear the
1798 in-use bits. These will be adjusted by mark_obj. */
1799 p->num_free_objects = num_objects;
1800 memset (p->in_use_p, 0, bitmap_size);
1801
1802 /* Make sure the one-past-the-end bit is always set. */
1803 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1804 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1805 }
1806 }
1807 }
1808
1809 /* Free all empty pages. Partially empty pages need no attention
1810 because the `mark' bit doubles as an `unused' bit. */
1811
1812 static void
1813 sweep_pages (void)
1814 {
1815 unsigned order;
1816
1817 for (order = 2; order < NUM_ORDERS; order++)
1818 {
1819 /* The last page-entry to consider, regardless of entries
1820 placed at the end of the list. */
1821 page_entry * const last = G.page_tails[order];
1822
1823 size_t num_objects;
1824 size_t live_objects;
1825 page_entry *p, *previous;
1826 int done;
1827
1828 p = G.pages[order];
1829 if (p == NULL)
1830 continue;
1831
1832 previous = NULL;
1833 do
1834 {
1835 page_entry *next = p->next;
1836
1837 /* Loop until all entries have been examined. */
1838 done = (p == last);
1839
1840 num_objects = OBJECTS_IN_PAGE (p);
1841
1842 /* Add all live objects on this page to the count of
1843 allocated memory. */
1844 live_objects = num_objects - p->num_free_objects;
1845
1846 G.allocated += OBJECT_SIZE (order) * live_objects;
1847
1848 /* Only objects on pages in the topmost context should get
1849 collected. */
1850 if (p->context_depth < G.context_depth)
1851 ;
1852
1853 /* Remove the page if it's empty. */
1854 else if (live_objects == 0)
1855 {
1856 /* If P was the first page in the list, then NEXT
1857 becomes the new first page in the list, otherwise
1858 splice P out of the forward pointers. */
1859 if (! previous)
1860 G.pages[order] = next;
1861 else
1862 previous->next = next;
1863
1864 /* Splice P out of the back pointers too. */
1865 if (next)
1866 next->prev = previous;
1867
1868 /* Are we removing the last element? */
1869 if (p == G.page_tails[order])
1870 G.page_tails[order] = previous;
1871 free_page (p);
1872 p = previous;
1873 }
1874
1875 /* If the page is full, move it to the end. */
1876 else if (p->num_free_objects == 0)
1877 {
1878 /* Don't move it if it's already at the end. */
1879 if (p != G.page_tails[order])
1880 {
1881 /* Move p to the end of the list. */
1882 p->next = NULL;
1883 p->prev = G.page_tails[order];
1884 G.page_tails[order]->next = p;
1885
1886 /* Update the tail pointer... */
1887 G.page_tails[order] = p;
1888
1889 /* ... and the head pointer, if necessary. */
1890 if (! previous)
1891 G.pages[order] = next;
1892 else
1893 previous->next = next;
1894
1895 /* And update the backpointer in NEXT if necessary. */
1896 if (next)
1897 next->prev = previous;
1898
1899 p = previous;
1900 }
1901 }
1902
1903 /* If we've fallen through to here, it's a page in the
1904 topmost context that is neither full nor empty. Such a
1905 page must precede pages at lesser context depth in the
1906 list, so move it to the head. */
1907 else if (p != G.pages[order])
1908 {
1909 previous->next = p->next;
1910
1911 /* Update the backchain in the next node if it exists. */
1912 if (p->next)
1913 p->next->prev = previous;
1914
1915 /* Move P to the head of the list. */
1916 p->next = G.pages[order];
1917 p->prev = NULL;
1918 G.pages[order]->prev = p;
1919
1920 /* Update the head pointer. */
1921 G.pages[order] = p;
1922
1923 /* Are we moving the last element? */
1924 if (G.page_tails[order] == p)
1925 G.page_tails[order] = previous;
1926 p = previous;
1927 }
1928
1929 previous = p;
1930 p = next;
1931 }
1932 while (! done);
1933
1934 /* Now, restore the in_use_p vectors for any pages from contexts
1935 other than the current one. */
1936 for (p = G.pages[order]; p; p = p->next)
1937 if (p->context_depth != G.context_depth)
1938 ggc_recalculate_in_use_p (p);
1939 }
1940 }
1941
1942 #ifdef ENABLE_GC_CHECKING
1943 /* Clobber all free objects. */
1944
1945 static void
1946 poison_pages (void)
1947 {
1948 unsigned order;
1949
1950 for (order = 2; order < NUM_ORDERS; order++)
1951 {
1952 size_t size = OBJECT_SIZE (order);
1953 page_entry *p;
1954
1955 for (p = G.pages[order]; p != NULL; p = p->next)
1956 {
1957 size_t num_objects;
1958 size_t i;
1959
1960 if (p->context_depth != G.context_depth)
1961 /* Since we don't do any collection for pages in pushed
1962 contexts, there's no need to do any poisoning. And
1963 besides, the IN_USE_P array isn't valid until we pop
1964 contexts. */
1965 continue;
1966
1967 num_objects = OBJECTS_IN_PAGE (p);
1968 for (i = 0; i < num_objects; i++)
1969 {
1970 size_t word, bit;
1971 word = i / HOST_BITS_PER_LONG;
1972 bit = i % HOST_BITS_PER_LONG;
1973 if (((p->in_use_p[word] >> bit) & 1) == 0)
1974 {
1975 char *object = p->page + i * size;
1976
1977 /* Keep poison-by-write when we expect to use Valgrind,
1978 so the exact same memory semantics is kept, in case
1979 there are memory errors. We override this request
1980 below. */
1981 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
1982 size));
1983 memset (object, 0xa5, size);
1984
1985 /* Drop the handle to avoid handle leak. */
1986 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
1987 }
1988 }
1989 }
1990 }
1991 }
1992 #else
1993 #define poison_pages()
1994 #endif
1995
1996 #ifdef ENABLE_GC_ALWAYS_COLLECT
1997 /* Validate that the reportedly free objects actually are. */
1998
1999 static void
2000 validate_free_objects (void)
2001 {
2002 struct free_object *f, *next, *still_free = NULL;
2003
2004 for (f = G.free_object_list; f ; f = next)
2005 {
2006 page_entry *pe = lookup_page_table_entry (f->object);
2007 size_t bit, word;
2008
2009 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2010 word = bit / HOST_BITS_PER_LONG;
2011 bit = bit % HOST_BITS_PER_LONG;
2012 next = f->next;
2013
2014 /* Make certain it isn't visible from any root. Notice that we
2015 do this check before sweep_pages merges save_in_use_p. */
2016 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2017
2018 /* If the object comes from an outer context, then retain the
2019 free_object entry, so that we can verify that the address
2020 isn't live on the stack in some outer context. */
2021 if (pe->context_depth != G.context_depth)
2022 {
2023 f->next = still_free;
2024 still_free = f;
2025 }
2026 else
2027 free (f);
2028 }
2029
2030 G.free_object_list = still_free;
2031 }
2032 #else
2033 #define validate_free_objects()
2034 #endif
2035
2036 /* Top level mark-and-sweep routine. */
2037
2038 void
2039 ggc_collect (void)
2040 {
2041 /* Avoid frequent unnecessary work by skipping collection if the
2042 total allocations haven't expanded much since the last
2043 collection. */
2044 float allocated_last_gc =
2045 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2046
2047 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2048
2049 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
2050 return;
2051
2052 timevar_push (TV_GC);
2053 if (!quiet_flag)
2054 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
2055 if (GGC_DEBUG_LEVEL >= 2)
2056 fprintf (G.debug_file, "BEGIN COLLECTING\n");
2057
2058 /* Zero the total allocated bytes. This will be recalculated in the
2059 sweep phase. */
2060 G.allocated = 0;
2061
2062 /* Release the pages we freed the last time we collected, but didn't
2063 reuse in the interim. */
2064 release_pages ();
2065
2066 /* Indicate that we've seen collections at this context depth. */
2067 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2068
2069 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2070
2071 clear_marks ();
2072 ggc_mark_roots ();
2073 #ifdef GATHER_STATISTICS
2074 ggc_prune_overhead_list ();
2075 #endif
2076 poison_pages ();
2077 validate_free_objects ();
2078 sweep_pages ();
2079
2080 G.allocated_last_gc = G.allocated;
2081
2082 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2083
2084 timevar_pop (TV_GC);
2085
2086 if (!quiet_flag)
2087 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2088 if (GGC_DEBUG_LEVEL >= 2)
2089 fprintf (G.debug_file, "END COLLECTING\n");
2090 }
2091
2092 /* Print allocation statistics. */
2093 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2094 ? (x) \
2095 : ((x) < 1024*1024*10 \
2096 ? (x) / 1024 \
2097 : (x) / (1024*1024))))
2098 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2099
2100 void
2101 ggc_print_statistics (void)
2102 {
2103 struct ggc_statistics stats;
2104 unsigned int i;
2105 size_t total_overhead = 0;
2106
2107 /* Clear the statistics. */
2108 memset (&stats, 0, sizeof (stats));
2109
2110 /* Make sure collection will really occur. */
2111 G.allocated_last_gc = 0;
2112
2113 /* Collect and print the statistics common across collectors. */
2114 ggc_print_common_statistics (stderr, &stats);
2115
2116 /* Release free pages so that we will not count the bytes allocated
2117 there as part of the total allocated memory. */
2118 release_pages ();
2119
2120 /* Collect some information about the various sizes of
2121 allocation. */
2122 fprintf (stderr,
2123 "Memory still allocated at the end of the compilation process\n");
2124 fprintf (stderr, "%-5s %10s %10s %10s\n",
2125 "Size", "Allocated", "Used", "Overhead");
2126 for (i = 0; i < NUM_ORDERS; ++i)
2127 {
2128 page_entry *p;
2129 size_t allocated;
2130 size_t in_use;
2131 size_t overhead;
2132
2133 /* Skip empty entries. */
2134 if (!G.pages[i])
2135 continue;
2136
2137 overhead = allocated = in_use = 0;
2138
2139 /* Figure out the total number of bytes allocated for objects of
2140 this size, and how many of them are actually in use. Also figure
2141 out how much memory the page table is using. */
2142 for (p = G.pages[i]; p; p = p->next)
2143 {
2144 allocated += p->bytes;
2145 in_use +=
2146 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2147
2148 overhead += (sizeof (page_entry) - sizeof (long)
2149 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2150 }
2151 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2152 (unsigned long) OBJECT_SIZE (i),
2153 SCALE (allocated), STAT_LABEL (allocated),
2154 SCALE (in_use), STAT_LABEL (in_use),
2155 SCALE (overhead), STAT_LABEL (overhead));
2156 total_overhead += overhead;
2157 }
2158 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2159 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2160 SCALE (G.allocated), STAT_LABEL(G.allocated),
2161 SCALE (total_overhead), STAT_LABEL (total_overhead));
2162
2163 #ifdef GATHER_STATISTICS
2164 {
2165 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2166
2167 fprintf (stderr, "Total Overhead: %10lld\n",
2168 G.stats.total_overhead);
2169 fprintf (stderr, "Total Allocated: %10lld\n",
2170 G.stats.total_allocated);
2171
2172 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2173 G.stats.total_overhead_under32);
2174 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2175 G.stats.total_allocated_under32);
2176 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2177 G.stats.total_overhead_under64);
2178 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2179 G.stats.total_allocated_under64);
2180 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2181 G.stats.total_overhead_under128);
2182 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2183 G.stats.total_allocated_under128);
2184
2185 for (i = 0; i < NUM_ORDERS; i++)
2186 if (G.stats.total_allocated_per_order[i])
2187 {
2188 fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
2189 (unsigned long) OBJECT_SIZE (i),
2190 G.stats.total_overhead_per_order[i]);
2191 fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
2192 (unsigned long) OBJECT_SIZE (i),
2193 G.stats.total_allocated_per_order[i]);
2194 }
2195 }
2196 #endif
2197 }
2198 \f
2199 struct ggc_pch_ondisk
2200 {
2201 unsigned totals[NUM_ORDERS];
2202 };
2203
2204 struct ggc_pch_data
2205 {
2206 struct ggc_pch_ondisk d;
2207 size_t base[NUM_ORDERS];
2208 size_t written[NUM_ORDERS];
2209 };
2210
2211 struct ggc_pch_data *
2212 init_ggc_pch (void)
2213 {
2214 return XCNEW (struct ggc_pch_data);
2215 }
2216
2217 void
2218 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2219 size_t size, bool is_string ATTRIBUTE_UNUSED,
2220 enum gt_types_enum type ATTRIBUTE_UNUSED)
2221 {
2222 unsigned order;
2223
2224 if (size < NUM_SIZE_LOOKUP)
2225 order = size_lookup[size];
2226 else
2227 {
2228 order = 10;
2229 while (size > OBJECT_SIZE (order))
2230 order++;
2231 }
2232
2233 d->d.totals[order]++;
2234 }
2235
2236 size_t
2237 ggc_pch_total_size (struct ggc_pch_data *d)
2238 {
2239 size_t a = 0;
2240 unsigned i;
2241
2242 for (i = 0; i < NUM_ORDERS; i++)
2243 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2244 return a;
2245 }
2246
2247 void
2248 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2249 {
2250 size_t a = (size_t) base;
2251 unsigned i;
2252
2253 for (i = 0; i < NUM_ORDERS; i++)
2254 {
2255 d->base[i] = a;
2256 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2257 }
2258 }
2259
2260
2261 char *
2262 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2263 size_t size, bool is_string ATTRIBUTE_UNUSED,
2264 enum gt_types_enum type ATTRIBUTE_UNUSED)
2265 {
2266 unsigned order;
2267 char *result;
2268
2269 if (size < NUM_SIZE_LOOKUP)
2270 order = size_lookup[size];
2271 else
2272 {
2273 order = 10;
2274 while (size > OBJECT_SIZE (order))
2275 order++;
2276 }
2277
2278 result = (char *) d->base[order];
2279 d->base[order] += OBJECT_SIZE (order);
2280 return result;
2281 }
2282
2283 void
2284 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2285 FILE *f ATTRIBUTE_UNUSED)
2286 {
2287 /* Nothing to do. */
2288 }
2289
2290 void
2291 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2292 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2293 size_t size, bool is_string ATTRIBUTE_UNUSED)
2294 {
2295 unsigned order;
2296 static const char emptyBytes[256] = { 0 };
2297
2298 if (size < NUM_SIZE_LOOKUP)
2299 order = size_lookup[size];
2300 else
2301 {
2302 order = 10;
2303 while (size > OBJECT_SIZE (order))
2304 order++;
2305 }
2306
2307 if (fwrite (x, size, 1, f) != 1)
2308 fatal_error ("can%'t write PCH file: %m");
2309
2310 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2311 object out to OBJECT_SIZE(order). This happens for strings. */
2312
2313 if (size != OBJECT_SIZE (order))
2314 {
2315 unsigned padding = OBJECT_SIZE(order) - size;
2316
2317 /* To speed small writes, we use a nulled-out array that's larger
2318 than most padding requests as the source for our null bytes. This
2319 permits us to do the padding with fwrite() rather than fseek(), and
2320 limits the chance the OS may try to flush any outstanding writes. */
2321 if (padding <= sizeof(emptyBytes))
2322 {
2323 if (fwrite (emptyBytes, 1, padding, f) != padding)
2324 fatal_error ("can%'t write PCH file");
2325 }
2326 else
2327 {
2328 /* Larger than our buffer? Just default to fseek. */
2329 if (fseek (f, padding, SEEK_CUR) != 0)
2330 fatal_error ("can%'t write PCH file");
2331 }
2332 }
2333
2334 d->written[order]++;
2335 if (d->written[order] == d->d.totals[order]
2336 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2337 G.pagesize),
2338 SEEK_CUR) != 0)
2339 fatal_error ("can%'t write PCH file: %m");
2340 }
2341
2342 void
2343 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2344 {
2345 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2346 fatal_error ("can%'t write PCH file: %m");
2347 free (d);
2348 }
2349
2350 /* Move the PCH PTE entries just added to the end of by_depth, to the
2351 front. */
2352
2353 static void
2354 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2355 {
2356 unsigned i;
2357
2358 /* First, we swap the new entries to the front of the varrays. */
2359 page_entry **new_by_depth;
2360 unsigned long **new_save_in_use;
2361
2362 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2363 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2364
2365 memcpy (&new_by_depth[0],
2366 &G.by_depth[count_old_page_tables],
2367 count_new_page_tables * sizeof (void *));
2368 memcpy (&new_by_depth[count_new_page_tables],
2369 &G.by_depth[0],
2370 count_old_page_tables * sizeof (void *));
2371 memcpy (&new_save_in_use[0],
2372 &G.save_in_use[count_old_page_tables],
2373 count_new_page_tables * sizeof (void *));
2374 memcpy (&new_save_in_use[count_new_page_tables],
2375 &G.save_in_use[0],
2376 count_old_page_tables * sizeof (void *));
2377
2378 free (G.by_depth);
2379 free (G.save_in_use);
2380
2381 G.by_depth = new_by_depth;
2382 G.save_in_use = new_save_in_use;
2383
2384 /* Now update all the index_by_depth fields. */
2385 for (i = G.by_depth_in_use; i > 0; --i)
2386 {
2387 page_entry *p = G.by_depth[i-1];
2388 p->index_by_depth = i-1;
2389 }
2390
2391 /* And last, we update the depth pointers in G.depth. The first
2392 entry is already 0, and context 0 entries always start at index
2393 0, so there is nothing to update in the first slot. We need a
2394 second slot, only if we have old ptes, and if we do, they start
2395 at index count_new_page_tables. */
2396 if (count_old_page_tables)
2397 push_depth (count_new_page_tables);
2398 }
2399
2400 void
2401 ggc_pch_read (FILE *f, void *addr)
2402 {
2403 struct ggc_pch_ondisk d;
2404 unsigned i;
2405 char *offs = (char *) addr;
2406 unsigned long count_old_page_tables;
2407 unsigned long count_new_page_tables;
2408
2409 count_old_page_tables = G.by_depth_in_use;
2410
2411 /* We've just read in a PCH file. So, every object that used to be
2412 allocated is now free. */
2413 clear_marks ();
2414 #ifdef ENABLE_GC_CHECKING
2415 poison_pages ();
2416 #endif
2417 /* Since we free all the allocated objects, the free list becomes
2418 useless. Validate it now, which will also clear it. */
2419 validate_free_objects();
2420
2421 /* No object read from a PCH file should ever be freed. So, set the
2422 context depth to 1, and set the depth of all the currently-allocated
2423 pages to be 1 too. PCH pages will have depth 0. */
2424 gcc_assert (!G.context_depth);
2425 G.context_depth = 1;
2426 for (i = 0; i < NUM_ORDERS; i++)
2427 {
2428 page_entry *p;
2429 for (p = G.pages[i]; p != NULL; p = p->next)
2430 p->context_depth = G.context_depth;
2431 }
2432
2433 /* Allocate the appropriate page-table entries for the pages read from
2434 the PCH file. */
2435 if (fread (&d, sizeof (d), 1, f) != 1)
2436 fatal_error ("can%'t read PCH file: %m");
2437
2438 for (i = 0; i < NUM_ORDERS; i++)
2439 {
2440 struct page_entry *entry;
2441 char *pte;
2442 size_t bytes;
2443 size_t num_objs;
2444 size_t j;
2445
2446 if (d.totals[i] == 0)
2447 continue;
2448
2449 bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2450 num_objs = bytes / OBJECT_SIZE (i);
2451 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2452 - sizeof (long)
2453 + BITMAP_SIZE (num_objs + 1)));
2454 entry->bytes = bytes;
2455 entry->page = offs;
2456 entry->context_depth = 0;
2457 offs += bytes;
2458 entry->num_free_objects = 0;
2459 entry->order = i;
2460
2461 for (j = 0;
2462 j + HOST_BITS_PER_LONG <= num_objs + 1;
2463 j += HOST_BITS_PER_LONG)
2464 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2465 for (; j < num_objs + 1; j++)
2466 entry->in_use_p[j / HOST_BITS_PER_LONG]
2467 |= 1L << (j % HOST_BITS_PER_LONG);
2468
2469 for (pte = entry->page;
2470 pte < entry->page + entry->bytes;
2471 pte += G.pagesize)
2472 set_page_table_entry (pte, entry);
2473
2474 if (G.page_tails[i] != NULL)
2475 G.page_tails[i]->next = entry;
2476 else
2477 G.pages[i] = entry;
2478 G.page_tails[i] = entry;
2479
2480 /* We start off by just adding all the new information to the
2481 end of the varrays, later, we will move the new information
2482 to the front of the varrays, as the PCH page tables are at
2483 context 0. */
2484 push_by_depth (entry, 0);
2485 }
2486
2487 /* Now, we update the various data structures that speed page table
2488 handling. */
2489 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2490
2491 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2492
2493 /* Update the statistics. */
2494 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2495 }
2496
2497 struct alloc_zone
2498 {
2499 int dummy;
2500 };
2501
2502 struct alloc_zone rtl_zone;
2503 struct alloc_zone tree_zone;
2504 struct alloc_zone tree_id_zone;