ipa-icf.c (sem_item_optimizer::merge_classes): Do not ICE on VAR_DECL w/o DECL_NAME.
[gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "alias.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "diagnostic-core.h"
29 #include "flags.h"
30 #include "ggc-internal.h"
31 #include "timevar.h"
32 #include "params.h"
33 #include "cgraph.h"
34 #include "cfgloop.h"
35 #include "plugin.h"
36
37 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
38 file open. Prefer either to valloc. */
39 #ifdef HAVE_MMAP_ANON
40 # undef HAVE_MMAP_DEV_ZERO
41 # define USING_MMAP
42 #endif
43
44 #ifdef HAVE_MMAP_DEV_ZERO
45 # define USING_MMAP
46 #endif
47
48 #ifndef USING_MMAP
49 #define USING_MALLOC_PAGE_GROUPS
50 #endif
51
52 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
53 && defined(USING_MMAP)
54 # define USING_MADVISE
55 #endif
56
57 /* Strategy:
58
59 This garbage-collecting allocator allocates objects on one of a set
60 of pages. Each page can allocate objects of a single size only;
61 available sizes are powers of two starting at four bytes. The size
62 of an allocation request is rounded up to the next power of two
63 (`order'), and satisfied from the appropriate page.
64
65 Each page is recorded in a page-entry, which also maintains an
66 in-use bitmap of object positions on the page. This allows the
67 allocation state of a particular object to be flipped without
68 touching the page itself.
69
70 Each page-entry also has a context depth, which is used to track
71 pushing and popping of allocation contexts. Only objects allocated
72 in the current (highest-numbered) context may be collected.
73
74 Page entries are arranged in an array of singly-linked lists. The
75 array is indexed by the allocation size, in bits, of the pages on
76 it; i.e. all pages on a list allocate objects of the same size.
77 Pages are ordered on the list such that all non-full pages precede
78 all full pages, with non-full pages arranged in order of decreasing
79 context depth.
80
81 Empty pages (of all orders) are kept on a single page cache list,
82 and are considered first when new pages are required; they are
83 deallocated at the start of the next collection if they haven't
84 been recycled by then. */
85
86 /* Define GGC_DEBUG_LEVEL to print debugging information.
87 0: No debugging output.
88 1: GC statistics only.
89 2: Page-entry allocations/deallocations as well.
90 3: Object allocations as well.
91 4: Object marks as well. */
92 #define GGC_DEBUG_LEVEL (0)
93 \f
94 #ifndef HOST_BITS_PER_PTR
95 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
96 #endif
97
98 \f
99 /* A two-level tree is used to look up the page-entry for a given
100 pointer. Two chunks of the pointer's bits are extracted to index
101 the first and second levels of the tree, as follows:
102
103 HOST_PAGE_SIZE_BITS
104 32 | |
105 msb +----------------+----+------+------+ lsb
106 | | |
107 PAGE_L1_BITS |
108 | |
109 PAGE_L2_BITS
110
111 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
112 pages are aligned on system page boundaries. The next most
113 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
114 index values in the lookup table, respectively.
115
116 For 32-bit architectures and the settings below, there are no
117 leftover bits. For architectures with wider pointers, the lookup
118 tree points to a list of pages, which must be scanned to find the
119 correct one. */
120
121 #define PAGE_L1_BITS (8)
122 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
123 #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
124 #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
125
126 #define LOOKUP_L1(p) \
127 (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
128
129 #define LOOKUP_L2(p) \
130 (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
131
132 /* The number of objects per allocation page, for objects on a page of
133 the indicated ORDER. */
134 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
135
136 /* The number of objects in P. */
137 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
138
139 /* The size of an object on a page of the indicated ORDER. */
140 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
141
142 /* For speed, we avoid doing a general integer divide to locate the
143 offset in the allocation bitmap, by precalculating numbers M, S
144 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
145 within the page which is evenly divisible by the object size Z. */
146 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
147 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
148 #define OFFSET_TO_BIT(OFFSET, ORDER) \
149 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
150
151 /* We use this structure to determine the alignment required for
152 allocations. For power-of-two sized allocations, that's not a
153 problem, but it does matter for odd-sized allocations.
154 We do not care about alignment for floating-point types. */
155
156 struct max_alignment {
157 char c;
158 union {
159 int64_t i;
160 void *p;
161 } u;
162 };
163
164 /* The biggest alignment required. */
165
166 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
167
168
169 /* The number of extra orders, not corresponding to power-of-two sized
170 objects. */
171
172 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
173
174 #define RTL_SIZE(NSLOTS) \
175 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
176
177 #define TREE_EXP_SIZE(OPS) \
178 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
179
180 /* The Ith entry is the maximum size of an object to be stored in the
181 Ith extra order. Adding a new entry to this array is the *only*
182 thing you need to do to add a new special allocation size. */
183
184 static const size_t extra_order_size_table[] = {
185 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
186 There are a lot of structures with these sizes and explicitly
187 listing them risks orders being dropped because they changed size. */
188 MAX_ALIGNMENT * 3,
189 MAX_ALIGNMENT * 5,
190 MAX_ALIGNMENT * 6,
191 MAX_ALIGNMENT * 7,
192 MAX_ALIGNMENT * 9,
193 MAX_ALIGNMENT * 10,
194 MAX_ALIGNMENT * 11,
195 MAX_ALIGNMENT * 12,
196 MAX_ALIGNMENT * 13,
197 MAX_ALIGNMENT * 14,
198 MAX_ALIGNMENT * 15,
199 sizeof (struct tree_decl_non_common),
200 sizeof (struct tree_field_decl),
201 sizeof (struct tree_parm_decl),
202 sizeof (struct tree_var_decl),
203 sizeof (struct tree_type_non_common),
204 sizeof (struct function),
205 sizeof (struct basic_block_def),
206 sizeof (struct cgraph_node),
207 sizeof (struct loop),
208 };
209
210 /* The total number of orders. */
211
212 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
213
214 /* Compute the smallest nonnegative number which when added to X gives
215 a multiple of F. */
216
217 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
218
219 /* Round X to next multiple of the page size */
220
221 #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
222
223 /* The Ith entry is the number of objects on a page or order I. */
224
225 static unsigned objects_per_page_table[NUM_ORDERS];
226
227 /* The Ith entry is the size of an object on a page of order I. */
228
229 static size_t object_size_table[NUM_ORDERS];
230
231 /* The Ith entry is a pair of numbers (mult, shift) such that
232 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
233 for all k evenly divisible by OBJECT_SIZE(I). */
234
235 static struct
236 {
237 size_t mult;
238 unsigned int shift;
239 }
240 inverse_table[NUM_ORDERS];
241
242 /* A page_entry records the status of an allocation page. This
243 structure is dynamically sized to fit the bitmap in_use_p. */
244 struct page_entry
245 {
246 /* The next page-entry with objects of the same size, or NULL if
247 this is the last page-entry. */
248 struct page_entry *next;
249
250 /* The previous page-entry with objects of the same size, or NULL if
251 this is the first page-entry. The PREV pointer exists solely to
252 keep the cost of ggc_free manageable. */
253 struct page_entry *prev;
254
255 /* The number of bytes allocated. (This will always be a multiple
256 of the host system page size.) */
257 size_t bytes;
258
259 /* The address at which the memory is allocated. */
260 char *page;
261
262 #ifdef USING_MALLOC_PAGE_GROUPS
263 /* Back pointer to the page group this page came from. */
264 struct page_group *group;
265 #endif
266
267 /* This is the index in the by_depth varray where this page table
268 can be found. */
269 unsigned long index_by_depth;
270
271 /* Context depth of this page. */
272 unsigned short context_depth;
273
274 /* The number of free objects remaining on this page. */
275 unsigned short num_free_objects;
276
277 /* A likely candidate for the bit position of a free object for the
278 next allocation from this page. */
279 unsigned short next_bit_hint;
280
281 /* The lg of size of objects allocated from this page. */
282 unsigned char order;
283
284 /* Discarded page? */
285 bool discarded;
286
287 /* A bit vector indicating whether or not objects are in use. The
288 Nth bit is one if the Nth object on this page is allocated. This
289 array is dynamically sized. */
290 unsigned long in_use_p[1];
291 };
292
293 #ifdef USING_MALLOC_PAGE_GROUPS
294 /* A page_group describes a large allocation from malloc, from which
295 we parcel out aligned pages. */
296 struct page_group
297 {
298 /* A linked list of all extant page groups. */
299 struct page_group *next;
300
301 /* The address we received from malloc. */
302 char *allocation;
303
304 /* The size of the block. */
305 size_t alloc_size;
306
307 /* A bitmask of pages in use. */
308 unsigned int in_use;
309 };
310 #endif
311
312 #if HOST_BITS_PER_PTR <= 32
313
314 /* On 32-bit hosts, we use a two level page table, as pictured above. */
315 typedef page_entry **page_table[PAGE_L1_SIZE];
316
317 #else
318
319 /* On 64-bit hosts, we use the same two level page tables plus a linked
320 list that disambiguates the top 32-bits. There will almost always be
321 exactly one entry in the list. */
322 typedef struct page_table_chain
323 {
324 struct page_table_chain *next;
325 size_t high_bits;
326 page_entry **table[PAGE_L1_SIZE];
327 } *page_table;
328
329 #endif
330
331 class finalizer
332 {
333 public:
334 finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
335
336 void *addr () const { return m_addr; }
337
338 void call () const { m_function (m_addr); }
339
340 private:
341 void *m_addr;
342 void (*m_function)(void *);
343 };
344
345 class vec_finalizer
346 {
347 public:
348 vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
349 m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
350
351 void call () const
352 {
353 for (size_t i = 0; i < m_n_objects; i++)
354 m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
355 }
356
357 void *addr () const { return reinterpret_cast<void *> (m_addr); }
358
359 private:
360 uintptr_t m_addr;
361 void (*m_function)(void *);
362 size_t m_object_size;
363 size_t m_n_objects;
364 };
365
366 #ifdef ENABLE_GC_ALWAYS_COLLECT
367 /* List of free objects to be verified as actually free on the
368 next collection. */
369 struct free_object
370 {
371 void *object;
372 struct free_object *next;
373 };
374 #endif
375
376 /* The rest of the global variables. */
377 static struct ggc_globals
378 {
379 /* The Nth element in this array is a page with objects of size 2^N.
380 If there are any pages with free objects, they will be at the
381 head of the list. NULL if there are no page-entries for this
382 object size. */
383 page_entry *pages[NUM_ORDERS];
384
385 /* The Nth element in this array is the last page with objects of
386 size 2^N. NULL if there are no page-entries for this object
387 size. */
388 page_entry *page_tails[NUM_ORDERS];
389
390 /* Lookup table for associating allocation pages with object addresses. */
391 page_table lookup;
392
393 /* The system's page size. */
394 size_t pagesize;
395 size_t lg_pagesize;
396
397 /* Bytes currently allocated. */
398 size_t allocated;
399
400 /* Bytes currently allocated at the end of the last collection. */
401 size_t allocated_last_gc;
402
403 /* Total amount of memory mapped. */
404 size_t bytes_mapped;
405
406 /* Bit N set if any allocations have been done at context depth N. */
407 unsigned long context_depth_allocations;
408
409 /* Bit N set if any collections have been done at context depth N. */
410 unsigned long context_depth_collections;
411
412 /* The current depth in the context stack. */
413 unsigned short context_depth;
414
415 /* A file descriptor open to /dev/zero for reading. */
416 #if defined (HAVE_MMAP_DEV_ZERO)
417 int dev_zero_fd;
418 #endif
419
420 /* A cache of free system pages. */
421 page_entry *free_pages;
422
423 #ifdef USING_MALLOC_PAGE_GROUPS
424 page_group *page_groups;
425 #endif
426
427 /* The file descriptor for debugging output. */
428 FILE *debug_file;
429
430 /* Current number of elements in use in depth below. */
431 unsigned int depth_in_use;
432
433 /* Maximum number of elements that can be used before resizing. */
434 unsigned int depth_max;
435
436 /* Each element of this array is an index in by_depth where the given
437 depth starts. This structure is indexed by that given depth we
438 are interested in. */
439 unsigned int *depth;
440
441 /* Current number of elements in use in by_depth below. */
442 unsigned int by_depth_in_use;
443
444 /* Maximum number of elements that can be used before resizing. */
445 unsigned int by_depth_max;
446
447 /* Each element of this array is a pointer to a page_entry, all
448 page_entries can be found in here by increasing depth.
449 index_by_depth in the page_entry is the index into this data
450 structure where that page_entry can be found. This is used to
451 speed up finding all page_entries at a particular depth. */
452 page_entry **by_depth;
453
454 /* Each element is a pointer to the saved in_use_p bits, if any,
455 zero otherwise. We allocate them all together, to enable a
456 better runtime data access pattern. */
457 unsigned long **save_in_use;
458
459 /* Finalizers for single objects. The first index is collection_depth. */
460 vec<vec<finalizer> > finalizers;
461
462 /* Finalizers for vectors of objects. */
463 vec<vec<vec_finalizer> > vec_finalizers;
464
465 #ifdef ENABLE_GC_ALWAYS_COLLECT
466 /* List of free objects to be verified as actually free on the
467 next collection. */
468 struct free_object *free_object_list;
469 #endif
470
471 struct
472 {
473 /* Total GC-allocated memory. */
474 unsigned long long total_allocated;
475 /* Total overhead for GC-allocated memory. */
476 unsigned long long total_overhead;
477
478 /* Total allocations and overhead for sizes less than 32, 64 and 128.
479 These sizes are interesting because they are typical cache line
480 sizes. */
481
482 unsigned long long total_allocated_under32;
483 unsigned long long total_overhead_under32;
484
485 unsigned long long total_allocated_under64;
486 unsigned long long total_overhead_under64;
487
488 unsigned long long total_allocated_under128;
489 unsigned long long total_overhead_under128;
490
491 /* The allocations for each of the allocation orders. */
492 unsigned long long total_allocated_per_order[NUM_ORDERS];
493
494 /* The overhead for each of the allocation orders. */
495 unsigned long long total_overhead_per_order[NUM_ORDERS];
496 } stats;
497 } G;
498
499 /* True if a gc is currently taking place. */
500
501 static bool in_gc = false;
502
503 /* The size in bytes required to maintain a bitmap for the objects
504 on a page-entry. */
505 #define BITMAP_SIZE(Num_objects) \
506 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
507
508 /* Allocate pages in chunks of this size, to throttle calls to memory
509 allocation routines. The first page is used, the rest go onto the
510 free list. This cannot be larger than HOST_BITS_PER_INT for the
511 in_use bitmask for page_group. Hosts that need a different value
512 can override this by defining GGC_QUIRE_SIZE explicitly. */
513 #ifndef GGC_QUIRE_SIZE
514 # ifdef USING_MMAP
515 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
516 # else
517 # define GGC_QUIRE_SIZE 16
518 # endif
519 #endif
520
521 /* Initial guess as to how many page table entries we might need. */
522 #define INITIAL_PTE_COUNT 128
523 \f
524 static int ggc_allocated_p (const void *);
525 static page_entry *lookup_page_table_entry (const void *);
526 static void set_page_table_entry (void *, page_entry *);
527 #ifdef USING_MMAP
528 static char *alloc_anon (char *, size_t, bool check);
529 #endif
530 #ifdef USING_MALLOC_PAGE_GROUPS
531 static size_t page_group_index (char *, char *);
532 static void set_page_group_in_use (page_group *, char *);
533 static void clear_page_group_in_use (page_group *, char *);
534 #endif
535 static struct page_entry * alloc_page (unsigned);
536 static void free_page (struct page_entry *);
537 static void release_pages (void);
538 static void clear_marks (void);
539 static void sweep_pages (void);
540 static void ggc_recalculate_in_use_p (page_entry *);
541 static void compute_inverse (unsigned);
542 static inline void adjust_depth (void);
543 static void move_ptes_to_front (int, int);
544
545 void debug_print_page_list (int);
546 static void push_depth (unsigned int);
547 static void push_by_depth (page_entry *, unsigned long *);
548
549 /* Push an entry onto G.depth. */
550
551 inline static void
552 push_depth (unsigned int i)
553 {
554 if (G.depth_in_use >= G.depth_max)
555 {
556 G.depth_max *= 2;
557 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
558 }
559 G.depth[G.depth_in_use++] = i;
560 }
561
562 /* Push an entry onto G.by_depth and G.save_in_use. */
563
564 inline static void
565 push_by_depth (page_entry *p, unsigned long *s)
566 {
567 if (G.by_depth_in_use >= G.by_depth_max)
568 {
569 G.by_depth_max *= 2;
570 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
571 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
572 G.by_depth_max);
573 }
574 G.by_depth[G.by_depth_in_use] = p;
575 G.save_in_use[G.by_depth_in_use++] = s;
576 }
577
578 #if (GCC_VERSION < 3001)
579 #define prefetch(X) ((void) X)
580 #else
581 #define prefetch(X) __builtin_prefetch (X)
582 #endif
583
584 #define save_in_use_p_i(__i) \
585 (G.save_in_use[__i])
586 #define save_in_use_p(__p) \
587 (save_in_use_p_i (__p->index_by_depth))
588
589 /* Returns nonzero if P was allocated in GC'able memory. */
590
591 static inline int
592 ggc_allocated_p (const void *p)
593 {
594 page_entry ***base;
595 size_t L1, L2;
596
597 #if HOST_BITS_PER_PTR <= 32
598 base = &G.lookup[0];
599 #else
600 page_table table = G.lookup;
601 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
602 while (1)
603 {
604 if (table == NULL)
605 return 0;
606 if (table->high_bits == high_bits)
607 break;
608 table = table->next;
609 }
610 base = &table->table[0];
611 #endif
612
613 /* Extract the level 1 and 2 indices. */
614 L1 = LOOKUP_L1 (p);
615 L2 = LOOKUP_L2 (p);
616
617 return base[L1] && base[L1][L2];
618 }
619
620 /* Traverse the page table and find the entry for a page.
621 Die (probably) if the object wasn't allocated via GC. */
622
623 static inline page_entry *
624 lookup_page_table_entry (const void *p)
625 {
626 page_entry ***base;
627 size_t L1, L2;
628
629 #if HOST_BITS_PER_PTR <= 32
630 base = &G.lookup[0];
631 #else
632 page_table table = G.lookup;
633 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
634 while (table->high_bits != high_bits)
635 table = table->next;
636 base = &table->table[0];
637 #endif
638
639 /* Extract the level 1 and 2 indices. */
640 L1 = LOOKUP_L1 (p);
641 L2 = LOOKUP_L2 (p);
642
643 return base[L1][L2];
644 }
645
646 /* Set the page table entry for a page. */
647
648 static void
649 set_page_table_entry (void *p, page_entry *entry)
650 {
651 page_entry ***base;
652 size_t L1, L2;
653
654 #if HOST_BITS_PER_PTR <= 32
655 base = &G.lookup[0];
656 #else
657 page_table table;
658 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
659 for (table = G.lookup; table; table = table->next)
660 if (table->high_bits == high_bits)
661 goto found;
662
663 /* Not found -- allocate a new table. */
664 table = XCNEW (struct page_table_chain);
665 table->next = G.lookup;
666 table->high_bits = high_bits;
667 G.lookup = table;
668 found:
669 base = &table->table[0];
670 #endif
671
672 /* Extract the level 1 and 2 indices. */
673 L1 = LOOKUP_L1 (p);
674 L2 = LOOKUP_L2 (p);
675
676 if (base[L1] == NULL)
677 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
678
679 base[L1][L2] = entry;
680 }
681
682 /* Prints the page-entry for object size ORDER, for debugging. */
683
684 DEBUG_FUNCTION void
685 debug_print_page_list (int order)
686 {
687 page_entry *p;
688 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
689 (void *) G.page_tails[order]);
690 p = G.pages[order];
691 while (p != NULL)
692 {
693 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
694 p->num_free_objects);
695 p = p->next;
696 }
697 printf ("NULL\n");
698 fflush (stdout);
699 }
700
701 #ifdef USING_MMAP
702 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
703 (if non-null). The ifdef structure here is intended to cause a
704 compile error unless exactly one of the HAVE_* is defined. */
705
706 static inline char *
707 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
708 {
709 #ifdef HAVE_MMAP_ANON
710 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
711 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
712 #endif
713 #ifdef HAVE_MMAP_DEV_ZERO
714 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
715 MAP_PRIVATE, G.dev_zero_fd, 0);
716 #endif
717
718 if (page == (char *) MAP_FAILED)
719 {
720 if (!check)
721 return NULL;
722 perror ("virtual memory exhausted");
723 exit (FATAL_EXIT_CODE);
724 }
725
726 /* Remember that we allocated this memory. */
727 G.bytes_mapped += size;
728
729 /* Pretend we don't have access to the allocated pages. We'll enable
730 access to smaller pieces of the area in ggc_internal_alloc. Discard the
731 handle to avoid handle leak. */
732 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
733
734 return page;
735 }
736 #endif
737 #ifdef USING_MALLOC_PAGE_GROUPS
738 /* Compute the index for this page into the page group. */
739
740 static inline size_t
741 page_group_index (char *allocation, char *page)
742 {
743 return (size_t) (page - allocation) >> G.lg_pagesize;
744 }
745
746 /* Set and clear the in_use bit for this page in the page group. */
747
748 static inline void
749 set_page_group_in_use (page_group *group, char *page)
750 {
751 group->in_use |= 1 << page_group_index (group->allocation, page);
752 }
753
754 static inline void
755 clear_page_group_in_use (page_group *group, char *page)
756 {
757 group->in_use &= ~(1 << page_group_index (group->allocation, page));
758 }
759 #endif
760
761 /* Allocate a new page for allocating objects of size 2^ORDER,
762 and return an entry for it. The entry is not added to the
763 appropriate page_table list. */
764
765 static inline struct page_entry *
766 alloc_page (unsigned order)
767 {
768 struct page_entry *entry, *p, **pp;
769 char *page;
770 size_t num_objects;
771 size_t bitmap_size;
772 size_t page_entry_size;
773 size_t entry_size;
774 #ifdef USING_MALLOC_PAGE_GROUPS
775 page_group *group;
776 #endif
777
778 num_objects = OBJECTS_PER_PAGE (order);
779 bitmap_size = BITMAP_SIZE (num_objects + 1);
780 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
781 entry_size = num_objects * OBJECT_SIZE (order);
782 if (entry_size < G.pagesize)
783 entry_size = G.pagesize;
784 entry_size = PAGE_ALIGN (entry_size);
785
786 entry = NULL;
787 page = NULL;
788
789 /* Check the list of free pages for one we can use. */
790 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
791 if (p->bytes == entry_size)
792 break;
793
794 if (p != NULL)
795 {
796 if (p->discarded)
797 G.bytes_mapped += p->bytes;
798 p->discarded = false;
799
800 /* Recycle the allocated memory from this page ... */
801 *pp = p->next;
802 page = p->page;
803
804 #ifdef USING_MALLOC_PAGE_GROUPS
805 group = p->group;
806 #endif
807
808 /* ... and, if possible, the page entry itself. */
809 if (p->order == order)
810 {
811 entry = p;
812 memset (entry, 0, page_entry_size);
813 }
814 else
815 free (p);
816 }
817 #ifdef USING_MMAP
818 else if (entry_size == G.pagesize)
819 {
820 /* We want just one page. Allocate a bunch of them and put the
821 extras on the freelist. (Can only do this optimization with
822 mmap for backing store.) */
823 struct page_entry *e, *f = G.free_pages;
824 int i, entries = GGC_QUIRE_SIZE;
825
826 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
827 if (page == NULL)
828 {
829 page = alloc_anon (NULL, G.pagesize, true);
830 entries = 1;
831 }
832
833 /* This loop counts down so that the chain will be in ascending
834 memory order. */
835 for (i = entries - 1; i >= 1; i--)
836 {
837 e = XCNEWVAR (struct page_entry, page_entry_size);
838 e->order = order;
839 e->bytes = G.pagesize;
840 e->page = page + (i << G.lg_pagesize);
841 e->next = f;
842 f = e;
843 }
844
845 G.free_pages = f;
846 }
847 else
848 page = alloc_anon (NULL, entry_size, true);
849 #endif
850 #ifdef USING_MALLOC_PAGE_GROUPS
851 else
852 {
853 /* Allocate a large block of memory and serve out the aligned
854 pages therein. This results in much less memory wastage
855 than the traditional implementation of valloc. */
856
857 char *allocation, *a, *enda;
858 size_t alloc_size, head_slop, tail_slop;
859 int multiple_pages = (entry_size == G.pagesize);
860
861 if (multiple_pages)
862 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
863 else
864 alloc_size = entry_size + G.pagesize - 1;
865 allocation = XNEWVEC (char, alloc_size);
866
867 page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
868 head_slop = page - allocation;
869 if (multiple_pages)
870 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
871 else
872 tail_slop = alloc_size - entry_size - head_slop;
873 enda = allocation + alloc_size - tail_slop;
874
875 /* We allocated N pages, which are likely not aligned, leaving
876 us with N-1 usable pages. We plan to place the page_group
877 structure somewhere in the slop. */
878 if (head_slop >= sizeof (page_group))
879 group = (page_group *)page - 1;
880 else
881 {
882 /* We magically got an aligned allocation. Too bad, we have
883 to waste a page anyway. */
884 if (tail_slop == 0)
885 {
886 enda -= G.pagesize;
887 tail_slop += G.pagesize;
888 }
889 gcc_assert (tail_slop >= sizeof (page_group));
890 group = (page_group *)enda;
891 tail_slop -= sizeof (page_group);
892 }
893
894 /* Remember that we allocated this memory. */
895 group->next = G.page_groups;
896 group->allocation = allocation;
897 group->alloc_size = alloc_size;
898 group->in_use = 0;
899 G.page_groups = group;
900 G.bytes_mapped += alloc_size;
901
902 /* If we allocated multiple pages, put the rest on the free list. */
903 if (multiple_pages)
904 {
905 struct page_entry *e, *f = G.free_pages;
906 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
907 {
908 e = XCNEWVAR (struct page_entry, page_entry_size);
909 e->order = order;
910 e->bytes = G.pagesize;
911 e->page = a;
912 e->group = group;
913 e->next = f;
914 f = e;
915 }
916 G.free_pages = f;
917 }
918 }
919 #endif
920
921 if (entry == NULL)
922 entry = XCNEWVAR (struct page_entry, page_entry_size);
923
924 entry->bytes = entry_size;
925 entry->page = page;
926 entry->context_depth = G.context_depth;
927 entry->order = order;
928 entry->num_free_objects = num_objects;
929 entry->next_bit_hint = 1;
930
931 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
932
933 #ifdef USING_MALLOC_PAGE_GROUPS
934 entry->group = group;
935 set_page_group_in_use (group, page);
936 #endif
937
938 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
939 increment the hint. */
940 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
941 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
942
943 set_page_table_entry (page, entry);
944
945 if (GGC_DEBUG_LEVEL >= 2)
946 fprintf (G.debug_file,
947 "Allocating page at %p, object size=%lu, data %p-%p\n",
948 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
949 page + entry_size - 1);
950
951 return entry;
952 }
953
954 /* Adjust the size of G.depth so that no index greater than the one
955 used by the top of the G.by_depth is used. */
956
957 static inline void
958 adjust_depth (void)
959 {
960 page_entry *top;
961
962 if (G.by_depth_in_use)
963 {
964 top = G.by_depth[G.by_depth_in_use-1];
965
966 /* Peel back indices in depth that index into by_depth, so that
967 as new elements are added to by_depth, we note the indices
968 of those elements, if they are for new context depths. */
969 while (G.depth_in_use > (size_t)top->context_depth+1)
970 --G.depth_in_use;
971 }
972 }
973
974 /* For a page that is no longer needed, put it on the free page list. */
975
976 static void
977 free_page (page_entry *entry)
978 {
979 if (GGC_DEBUG_LEVEL >= 2)
980 fprintf (G.debug_file,
981 "Deallocating page at %p, data %p-%p\n", (void *) entry,
982 entry->page, entry->page + entry->bytes - 1);
983
984 /* Mark the page as inaccessible. Discard the handle to avoid handle
985 leak. */
986 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
987
988 set_page_table_entry (entry->page, NULL);
989
990 #ifdef USING_MALLOC_PAGE_GROUPS
991 clear_page_group_in_use (entry->group, entry->page);
992 #endif
993
994 if (G.by_depth_in_use > 1)
995 {
996 page_entry *top = G.by_depth[G.by_depth_in_use-1];
997 int i = entry->index_by_depth;
998
999 /* We cannot free a page from a context deeper than the current
1000 one. */
1001 gcc_assert (entry->context_depth == top->context_depth);
1002
1003 /* Put top element into freed slot. */
1004 G.by_depth[i] = top;
1005 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
1006 top->index_by_depth = i;
1007 }
1008 --G.by_depth_in_use;
1009
1010 adjust_depth ();
1011
1012 entry->next = G.free_pages;
1013 G.free_pages = entry;
1014 }
1015
1016 /* Release the free page cache to the system. */
1017
1018 static void
1019 release_pages (void)
1020 {
1021 #ifdef USING_MADVISE
1022 page_entry *p, *start_p;
1023 char *start;
1024 size_t len;
1025 size_t mapped_len;
1026 page_entry *next, *prev, *newprev;
1027 size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
1028
1029 /* First free larger continuous areas to the OS.
1030 This allows other allocators to grab these areas if needed.
1031 This is only done on larger chunks to avoid fragmentation.
1032 This does not always work because the free_pages list is only
1033 approximately sorted. */
1034
1035 p = G.free_pages;
1036 prev = NULL;
1037 while (p)
1038 {
1039 start = p->page;
1040 start_p = p;
1041 len = 0;
1042 mapped_len = 0;
1043 newprev = prev;
1044 while (p && p->page == start + len)
1045 {
1046 len += p->bytes;
1047 if (!p->discarded)
1048 mapped_len += p->bytes;
1049 newprev = p;
1050 p = p->next;
1051 }
1052 if (len >= free_unit)
1053 {
1054 while (start_p != p)
1055 {
1056 next = start_p->next;
1057 free (start_p);
1058 start_p = next;
1059 }
1060 munmap (start, len);
1061 if (prev)
1062 prev->next = p;
1063 else
1064 G.free_pages = p;
1065 G.bytes_mapped -= mapped_len;
1066 continue;
1067 }
1068 prev = newprev;
1069 }
1070
1071 /* Now give back the fragmented pages to the OS, but keep the address
1072 space to reuse it next time. */
1073
1074 for (p = G.free_pages; p; )
1075 {
1076 if (p->discarded)
1077 {
1078 p = p->next;
1079 continue;
1080 }
1081 start = p->page;
1082 len = p->bytes;
1083 start_p = p;
1084 p = p->next;
1085 while (p && p->page == start + len)
1086 {
1087 len += p->bytes;
1088 p = p->next;
1089 }
1090 /* Give the page back to the kernel, but don't free the mapping.
1091 This avoids fragmentation in the virtual memory map of the
1092 process. Next time we can reuse it by just touching it. */
1093 madvise (start, len, MADV_DONTNEED);
1094 /* Don't count those pages as mapped to not touch the garbage collector
1095 unnecessarily. */
1096 G.bytes_mapped -= len;
1097 while (start_p != p)
1098 {
1099 start_p->discarded = true;
1100 start_p = start_p->next;
1101 }
1102 }
1103 #endif
1104 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1105 page_entry *p, *next;
1106 char *start;
1107 size_t len;
1108
1109 /* Gather up adjacent pages so they are unmapped together. */
1110 p = G.free_pages;
1111
1112 while (p)
1113 {
1114 start = p->page;
1115 next = p->next;
1116 len = p->bytes;
1117 free (p);
1118 p = next;
1119
1120 while (p && p->page == start + len)
1121 {
1122 next = p->next;
1123 len += p->bytes;
1124 free (p);
1125 p = next;
1126 }
1127
1128 munmap (start, len);
1129 G.bytes_mapped -= len;
1130 }
1131
1132 G.free_pages = NULL;
1133 #endif
1134 #ifdef USING_MALLOC_PAGE_GROUPS
1135 page_entry **pp, *p;
1136 page_group **gp, *g;
1137
1138 /* Remove all pages from free page groups from the list. */
1139 pp = &G.free_pages;
1140 while ((p = *pp) != NULL)
1141 if (p->group->in_use == 0)
1142 {
1143 *pp = p->next;
1144 free (p);
1145 }
1146 else
1147 pp = &p->next;
1148
1149 /* Remove all free page groups, and release the storage. */
1150 gp = &G.page_groups;
1151 while ((g = *gp) != NULL)
1152 if (g->in_use == 0)
1153 {
1154 *gp = g->next;
1155 G.bytes_mapped -= g->alloc_size;
1156 free (g->allocation);
1157 }
1158 else
1159 gp = &g->next;
1160 #endif
1161 }
1162
1163 /* This table provides a fast way to determine ceil(log_2(size)) for
1164 allocation requests. The minimum allocation size is eight bytes. */
1165 #define NUM_SIZE_LOOKUP 512
1166 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1167 {
1168 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1169 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1170 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1171 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1172 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1173 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1174 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1175 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1176 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1177 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1178 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1179 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1180 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1181 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1182 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1183 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1184 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1185 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1186 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1187 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1188 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1189 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1190 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1191 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1192 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1193 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1194 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1195 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1196 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1197 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1198 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1199 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1200 };
1201
1202 /* For a given size of memory requested for allocation, return the
1203 actual size that is going to be allocated, as well as the size
1204 order. */
1205
1206 static void
1207 ggc_round_alloc_size_1 (size_t requested_size,
1208 size_t *size_order,
1209 size_t *alloced_size)
1210 {
1211 size_t order, object_size;
1212
1213 if (requested_size < NUM_SIZE_LOOKUP)
1214 {
1215 order = size_lookup[requested_size];
1216 object_size = OBJECT_SIZE (order);
1217 }
1218 else
1219 {
1220 order = 10;
1221 while (requested_size > (object_size = OBJECT_SIZE (order)))
1222 order++;
1223 }
1224
1225 if (size_order)
1226 *size_order = order;
1227 if (alloced_size)
1228 *alloced_size = object_size;
1229 }
1230
1231 /* For a given size of memory requested for allocation, return the
1232 actual size that is going to be allocated. */
1233
1234 size_t
1235 ggc_round_alloc_size (size_t requested_size)
1236 {
1237 size_t size = 0;
1238
1239 ggc_round_alloc_size_1 (requested_size, NULL, &size);
1240 return size;
1241 }
1242
1243 /* Push a finalizer onto the appropriate vec. */
1244
1245 static void
1246 add_finalizer (void *result, void (*f)(void *), size_t s, size_t n)
1247 {
1248 if (f == NULL)
1249 /* No finalizer. */;
1250 else if (n == 1)
1251 {
1252 finalizer fin (result, f);
1253 G.finalizers[G.context_depth].safe_push (fin);
1254 }
1255 else
1256 {
1257 vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n);
1258 G.vec_finalizers[G.context_depth].safe_push (fin);
1259 }
1260 }
1261
1262 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1263
1264 void *
1265 ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1266 MEM_STAT_DECL)
1267 {
1268 size_t order, word, bit, object_offset, object_size;
1269 struct page_entry *entry;
1270 void *result;
1271
1272 ggc_round_alloc_size_1 (size, &order, &object_size);
1273
1274 /* If there are non-full pages for this size allocation, they are at
1275 the head of the list. */
1276 entry = G.pages[order];
1277
1278 /* If there is no page for this object size, or all pages in this
1279 context are full, allocate a new page. */
1280 if (entry == NULL || entry->num_free_objects == 0)
1281 {
1282 struct page_entry *new_entry;
1283 new_entry = alloc_page (order);
1284
1285 new_entry->index_by_depth = G.by_depth_in_use;
1286 push_by_depth (new_entry, 0);
1287
1288 /* We can skip context depths, if we do, make sure we go all the
1289 way to the new depth. */
1290 while (new_entry->context_depth >= G.depth_in_use)
1291 push_depth (G.by_depth_in_use-1);
1292
1293 /* If this is the only entry, it's also the tail. If it is not
1294 the only entry, then we must update the PREV pointer of the
1295 ENTRY (G.pages[order]) to point to our new page entry. */
1296 if (entry == NULL)
1297 G.page_tails[order] = new_entry;
1298 else
1299 entry->prev = new_entry;
1300
1301 /* Put new pages at the head of the page list. By definition the
1302 entry at the head of the list always has a NULL pointer. */
1303 new_entry->next = entry;
1304 new_entry->prev = NULL;
1305 entry = new_entry;
1306 G.pages[order] = new_entry;
1307
1308 /* For a new page, we know the word and bit positions (in the
1309 in_use bitmap) of the first available object -- they're zero. */
1310 new_entry->next_bit_hint = 1;
1311 word = 0;
1312 bit = 0;
1313 object_offset = 0;
1314 }
1315 else
1316 {
1317 /* First try to use the hint left from the previous allocation
1318 to locate a clear bit in the in-use bitmap. We've made sure
1319 that the one-past-the-end bit is always set, so if the hint
1320 has run over, this test will fail. */
1321 unsigned hint = entry->next_bit_hint;
1322 word = hint / HOST_BITS_PER_LONG;
1323 bit = hint % HOST_BITS_PER_LONG;
1324
1325 /* If the hint didn't work, scan the bitmap from the beginning. */
1326 if ((entry->in_use_p[word] >> bit) & 1)
1327 {
1328 word = bit = 0;
1329 while (~entry->in_use_p[word] == 0)
1330 ++word;
1331
1332 #if GCC_VERSION >= 3004
1333 bit = __builtin_ctzl (~entry->in_use_p[word]);
1334 #else
1335 while ((entry->in_use_p[word] >> bit) & 1)
1336 ++bit;
1337 #endif
1338
1339 hint = word * HOST_BITS_PER_LONG + bit;
1340 }
1341
1342 /* Next time, try the next bit. */
1343 entry->next_bit_hint = hint + 1;
1344
1345 object_offset = hint * object_size;
1346 }
1347
1348 /* Set the in-use bit. */
1349 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1350
1351 /* Keep a running total of the number of free objects. If this page
1352 fills up, we may have to move it to the end of the list if the
1353 next page isn't full. If the next page is full, all subsequent
1354 pages are full, so there's no need to move it. */
1355 if (--entry->num_free_objects == 0
1356 && entry->next != NULL
1357 && entry->next->num_free_objects > 0)
1358 {
1359 /* We have a new head for the list. */
1360 G.pages[order] = entry->next;
1361
1362 /* We are moving ENTRY to the end of the page table list.
1363 The new page at the head of the list will have NULL in
1364 its PREV field and ENTRY will have NULL in its NEXT field. */
1365 entry->next->prev = NULL;
1366 entry->next = NULL;
1367
1368 /* Append ENTRY to the tail of the list. */
1369 entry->prev = G.page_tails[order];
1370 G.page_tails[order]->next = entry;
1371 G.page_tails[order] = entry;
1372 }
1373
1374 /* Calculate the object's address. */
1375 result = entry->page + object_offset;
1376 if (GATHER_STATISTICS)
1377 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1378 result FINAL_PASS_MEM_STAT);
1379
1380 #ifdef ENABLE_GC_CHECKING
1381 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1382 exact same semantics in presence of memory bugs, regardless of
1383 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1384 handle to avoid handle leak. */
1385 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1386
1387 /* `Poison' the entire allocated object, including any padding at
1388 the end. */
1389 memset (result, 0xaf, object_size);
1390
1391 /* Make the bytes after the end of the object unaccessible. Discard the
1392 handle to avoid handle leak. */
1393 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1394 object_size - size));
1395 #endif
1396
1397 /* Tell Valgrind that the memory is there, but its content isn't
1398 defined. The bytes at the end of the object are still marked
1399 unaccessible. */
1400 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1401
1402 /* Keep track of how many bytes are being allocated. This
1403 information is used in deciding when to collect. */
1404 G.allocated += object_size;
1405
1406 /* For timevar statistics. */
1407 timevar_ggc_mem_total += object_size;
1408
1409 if (f)
1410 add_finalizer (result, f, s, n);
1411
1412 if (GATHER_STATISTICS)
1413 {
1414 size_t overhead = object_size - size;
1415
1416 G.stats.total_overhead += overhead;
1417 G.stats.total_allocated += object_size;
1418 G.stats.total_overhead_per_order[order] += overhead;
1419 G.stats.total_allocated_per_order[order] += object_size;
1420
1421 if (size <= 32)
1422 {
1423 G.stats.total_overhead_under32 += overhead;
1424 G.stats.total_allocated_under32 += object_size;
1425 }
1426 if (size <= 64)
1427 {
1428 G.stats.total_overhead_under64 += overhead;
1429 G.stats.total_allocated_under64 += object_size;
1430 }
1431 if (size <= 128)
1432 {
1433 G.stats.total_overhead_under128 += overhead;
1434 G.stats.total_allocated_under128 += object_size;
1435 }
1436 }
1437
1438 if (GGC_DEBUG_LEVEL >= 3)
1439 fprintf (G.debug_file,
1440 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1441 (unsigned long) size, (unsigned long) object_size, result,
1442 (void *) entry);
1443
1444 return result;
1445 }
1446
1447 /* Mark function for strings. */
1448
1449 void
1450 gt_ggc_m_S (const void *p)
1451 {
1452 page_entry *entry;
1453 unsigned bit, word;
1454 unsigned long mask;
1455 unsigned long offset;
1456
1457 if (!p || !ggc_allocated_p (p))
1458 return;
1459
1460 /* Look up the page on which the object is alloced. . */
1461 entry = lookup_page_table_entry (p);
1462 gcc_assert (entry);
1463
1464 /* Calculate the index of the object on the page; this is its bit
1465 position in the in_use_p bitmap. Note that because a char* might
1466 point to the middle of an object, we need special code here to
1467 make sure P points to the start of an object. */
1468 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1469 if (offset)
1470 {
1471 /* Here we've seen a char* which does not point to the beginning
1472 of an allocated object. We assume it points to the middle of
1473 a STRING_CST. */
1474 gcc_assert (offset == offsetof (struct tree_string, str));
1475 p = ((const char *) p) - offset;
1476 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1477 return;
1478 }
1479
1480 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1481 word = bit / HOST_BITS_PER_LONG;
1482 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1483
1484 /* If the bit was previously set, skip it. */
1485 if (entry->in_use_p[word] & mask)
1486 return;
1487
1488 /* Otherwise set it, and decrement the free object count. */
1489 entry->in_use_p[word] |= mask;
1490 entry->num_free_objects -= 1;
1491
1492 if (GGC_DEBUG_LEVEL >= 4)
1493 fprintf (G.debug_file, "Marking %p\n", p);
1494
1495 return;
1496 }
1497
1498
1499 /* User-callable entry points for marking string X. */
1500
1501 void
1502 gt_ggc_mx (const char *& x)
1503 {
1504 gt_ggc_m_S (x);
1505 }
1506
1507 void
1508 gt_ggc_mx (unsigned char *& x)
1509 {
1510 gt_ggc_m_S (x);
1511 }
1512
1513 void
1514 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1515 {
1516 }
1517
1518 /* If P is not marked, marks it and return false. Otherwise return true.
1519 P must have been allocated by the GC allocator; it mustn't point to
1520 static objects, stack variables, or memory allocated with malloc. */
1521
1522 int
1523 ggc_set_mark (const void *p)
1524 {
1525 page_entry *entry;
1526 unsigned bit, word;
1527 unsigned long mask;
1528
1529 /* Look up the page on which the object is alloced. If the object
1530 wasn't allocated by the collector, we'll probably die. */
1531 entry = lookup_page_table_entry (p);
1532 gcc_assert (entry);
1533
1534 /* Calculate the index of the object on the page; this is its bit
1535 position in the in_use_p bitmap. */
1536 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1537 word = bit / HOST_BITS_PER_LONG;
1538 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1539
1540 /* If the bit was previously set, skip it. */
1541 if (entry->in_use_p[word] & mask)
1542 return 1;
1543
1544 /* Otherwise set it, and decrement the free object count. */
1545 entry->in_use_p[word] |= mask;
1546 entry->num_free_objects -= 1;
1547
1548 if (GGC_DEBUG_LEVEL >= 4)
1549 fprintf (G.debug_file, "Marking %p\n", p);
1550
1551 return 0;
1552 }
1553
1554 /* Return 1 if P has been marked, zero otherwise.
1555 P must have been allocated by the GC allocator; it mustn't point to
1556 static objects, stack variables, or memory allocated with malloc. */
1557
1558 int
1559 ggc_marked_p (const void *p)
1560 {
1561 page_entry *entry;
1562 unsigned bit, word;
1563 unsigned long mask;
1564
1565 /* Look up the page on which the object is alloced. If the object
1566 wasn't allocated by the collector, we'll probably die. */
1567 entry = lookup_page_table_entry (p);
1568 gcc_assert (entry);
1569
1570 /* Calculate the index of the object on the page; this is its bit
1571 position in the in_use_p bitmap. */
1572 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1573 word = bit / HOST_BITS_PER_LONG;
1574 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1575
1576 return (entry->in_use_p[word] & mask) != 0;
1577 }
1578
1579 /* Return the size of the gc-able object P. */
1580
1581 size_t
1582 ggc_get_size (const void *p)
1583 {
1584 page_entry *pe = lookup_page_table_entry (p);
1585 return OBJECT_SIZE (pe->order);
1586 }
1587
1588 /* Release the memory for object P. */
1589
1590 void
1591 ggc_free (void *p)
1592 {
1593 if (in_gc)
1594 return;
1595
1596 page_entry *pe = lookup_page_table_entry (p);
1597 size_t order = pe->order;
1598 size_t size = OBJECT_SIZE (order);
1599
1600 if (GATHER_STATISTICS)
1601 ggc_free_overhead (p);
1602
1603 if (GGC_DEBUG_LEVEL >= 3)
1604 fprintf (G.debug_file,
1605 "Freeing object, actual size=%lu, at %p on %p\n",
1606 (unsigned long) size, p, (void *) pe);
1607
1608 #ifdef ENABLE_GC_CHECKING
1609 /* Poison the data, to indicate the data is garbage. */
1610 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1611 memset (p, 0xa5, size);
1612 #endif
1613 /* Let valgrind know the object is free. */
1614 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1615
1616 #ifdef ENABLE_GC_ALWAYS_COLLECT
1617 /* In the completely-anal-checking mode, we do *not* immediately free
1618 the data, but instead verify that the data is *actually* not
1619 reachable the next time we collect. */
1620 {
1621 struct free_object *fo = XNEW (struct free_object);
1622 fo->object = p;
1623 fo->next = G.free_object_list;
1624 G.free_object_list = fo;
1625 }
1626 #else
1627 {
1628 unsigned int bit_offset, word, bit;
1629
1630 G.allocated -= size;
1631
1632 /* Mark the object not-in-use. */
1633 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1634 word = bit_offset / HOST_BITS_PER_LONG;
1635 bit = bit_offset % HOST_BITS_PER_LONG;
1636 pe->in_use_p[word] &= ~(1UL << bit);
1637
1638 if (pe->num_free_objects++ == 0)
1639 {
1640 page_entry *p, *q;
1641
1642 /* If the page is completely full, then it's supposed to
1643 be after all pages that aren't. Since we've freed one
1644 object from a page that was full, we need to move the
1645 page to the head of the list.
1646
1647 PE is the node we want to move. Q is the previous node
1648 and P is the next node in the list. */
1649 q = pe->prev;
1650 if (q && q->num_free_objects == 0)
1651 {
1652 p = pe->next;
1653
1654 q->next = p;
1655
1656 /* If PE was at the end of the list, then Q becomes the
1657 new end of the list. If PE was not the end of the
1658 list, then we need to update the PREV field for P. */
1659 if (!p)
1660 G.page_tails[order] = q;
1661 else
1662 p->prev = q;
1663
1664 /* Move PE to the head of the list. */
1665 pe->next = G.pages[order];
1666 pe->prev = NULL;
1667 G.pages[order]->prev = pe;
1668 G.pages[order] = pe;
1669 }
1670
1671 /* Reset the hint bit to point to the only free object. */
1672 pe->next_bit_hint = bit_offset;
1673 }
1674 }
1675 #endif
1676 }
1677 \f
1678 /* Subroutine of init_ggc which computes the pair of numbers used to
1679 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1680
1681 This algorithm is taken from Granlund and Montgomery's paper
1682 "Division by Invariant Integers using Multiplication"
1683 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1684 constants). */
1685
1686 static void
1687 compute_inverse (unsigned order)
1688 {
1689 size_t size, inv;
1690 unsigned int e;
1691
1692 size = OBJECT_SIZE (order);
1693 e = 0;
1694 while (size % 2 == 0)
1695 {
1696 e++;
1697 size >>= 1;
1698 }
1699
1700 inv = size;
1701 while (inv * size != 1)
1702 inv = inv * (2 - inv*size);
1703
1704 DIV_MULT (order) = inv;
1705 DIV_SHIFT (order) = e;
1706 }
1707
1708 /* Initialize the ggc-mmap allocator. */
1709 void
1710 init_ggc (void)
1711 {
1712 static bool init_p = false;
1713 unsigned order;
1714
1715 if (init_p)
1716 return;
1717 init_p = true;
1718
1719 G.pagesize = getpagesize ();
1720 G.lg_pagesize = exact_log2 (G.pagesize);
1721
1722 #ifdef HAVE_MMAP_DEV_ZERO
1723 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1724 if (G.dev_zero_fd == -1)
1725 internal_error ("open /dev/zero: %m");
1726 #endif
1727
1728 #if 0
1729 G.debug_file = fopen ("ggc-mmap.debug", "w");
1730 #else
1731 G.debug_file = stdout;
1732 #endif
1733
1734 #ifdef USING_MMAP
1735 /* StunOS has an amazing off-by-one error for the first mmap allocation
1736 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1737 believe, is an unaligned page allocation, which would cause us to
1738 hork badly if we tried to use it. */
1739 {
1740 char *p = alloc_anon (NULL, G.pagesize, true);
1741 struct page_entry *e;
1742 if ((uintptr_t)p & (G.pagesize - 1))
1743 {
1744 /* How losing. Discard this one and try another. If we still
1745 can't get something useful, give up. */
1746
1747 p = alloc_anon (NULL, G.pagesize, true);
1748 gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1749 }
1750
1751 /* We have a good page, might as well hold onto it... */
1752 e = XCNEW (struct page_entry);
1753 e->bytes = G.pagesize;
1754 e->page = p;
1755 e->next = G.free_pages;
1756 G.free_pages = e;
1757 }
1758 #endif
1759
1760 /* Initialize the object size table. */
1761 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1762 object_size_table[order] = (size_t) 1 << order;
1763 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1764 {
1765 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1766
1767 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1768 so that we're sure of getting aligned memory. */
1769 s = ROUND_UP (s, MAX_ALIGNMENT);
1770 object_size_table[order] = s;
1771 }
1772
1773 /* Initialize the objects-per-page and inverse tables. */
1774 for (order = 0; order < NUM_ORDERS; ++order)
1775 {
1776 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1777 if (objects_per_page_table[order] == 0)
1778 objects_per_page_table[order] = 1;
1779 compute_inverse (order);
1780 }
1781
1782 /* Reset the size_lookup array to put appropriately sized objects in
1783 the special orders. All objects bigger than the previous power
1784 of two, but no greater than the special size, should go in the
1785 new order. */
1786 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1787 {
1788 int o;
1789 int i;
1790
1791 i = OBJECT_SIZE (order);
1792 if (i >= NUM_SIZE_LOOKUP)
1793 continue;
1794
1795 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1796 size_lookup[i] = order;
1797 }
1798
1799 G.depth_in_use = 0;
1800 G.depth_max = 10;
1801 G.depth = XNEWVEC (unsigned int, G.depth_max);
1802
1803 G.by_depth_in_use = 0;
1804 G.by_depth_max = INITIAL_PTE_COUNT;
1805 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1806 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1807
1808 /* Allocate space for the depth 0 finalizers. */
1809 G.finalizers.safe_push (vNULL);
1810 G.vec_finalizers.safe_push (vNULL);
1811 gcc_assert (G.finalizers.length() == 1);
1812 }
1813
1814 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1815 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1816
1817 static void
1818 ggc_recalculate_in_use_p (page_entry *p)
1819 {
1820 unsigned int i;
1821 size_t num_objects;
1822
1823 /* Because the past-the-end bit in in_use_p is always set, we
1824 pretend there is one additional object. */
1825 num_objects = OBJECTS_IN_PAGE (p) + 1;
1826
1827 /* Reset the free object count. */
1828 p->num_free_objects = num_objects;
1829
1830 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1831 for (i = 0;
1832 i < CEIL (BITMAP_SIZE (num_objects),
1833 sizeof (*p->in_use_p));
1834 ++i)
1835 {
1836 unsigned long j;
1837
1838 /* Something is in use if it is marked, or if it was in use in a
1839 context further down the context stack. */
1840 p->in_use_p[i] |= save_in_use_p (p)[i];
1841
1842 /* Decrement the free object count for every object allocated. */
1843 for (j = p->in_use_p[i]; j; j >>= 1)
1844 p->num_free_objects -= (j & 1);
1845 }
1846
1847 gcc_assert (p->num_free_objects < num_objects);
1848 }
1849 \f
1850 /* Unmark all objects. */
1851
1852 static void
1853 clear_marks (void)
1854 {
1855 unsigned order;
1856
1857 for (order = 2; order < NUM_ORDERS; order++)
1858 {
1859 page_entry *p;
1860
1861 for (p = G.pages[order]; p != NULL; p = p->next)
1862 {
1863 size_t num_objects = OBJECTS_IN_PAGE (p);
1864 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1865
1866 /* The data should be page-aligned. */
1867 gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
1868
1869 /* Pages that aren't in the topmost context are not collected;
1870 nevertheless, we need their in-use bit vectors to store GC
1871 marks. So, back them up first. */
1872 if (p->context_depth < G.context_depth)
1873 {
1874 if (! save_in_use_p (p))
1875 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1876 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1877 }
1878
1879 /* Reset reset the number of free objects and clear the
1880 in-use bits. These will be adjusted by mark_obj. */
1881 p->num_free_objects = num_objects;
1882 memset (p->in_use_p, 0, bitmap_size);
1883
1884 /* Make sure the one-past-the-end bit is always set. */
1885 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1886 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1887 }
1888 }
1889 }
1890
1891 /* Check if any blocks with a registered finalizer have become unmarked. If so
1892 run the finalizer and unregister it because the block is about to be freed.
1893 Note that no garantee is made about what order finalizers will run in so
1894 touching other objects in gc memory is extremely unwise. */
1895
1896 static void
1897 ggc_handle_finalizers ()
1898 {
1899 unsigned dlen = G.finalizers.length();
1900 for (unsigned d = G.context_depth; d < dlen; ++d)
1901 {
1902 vec<finalizer> &v = G.finalizers[d];
1903 unsigned length = v.length ();
1904 for (unsigned int i = 0; i < length;)
1905 {
1906 finalizer &f = v[i];
1907 if (!ggc_marked_p (f.addr ()))
1908 {
1909 f.call ();
1910 v.unordered_remove (i);
1911 length--;
1912 }
1913 else
1914 i++;
1915 }
1916 }
1917
1918 gcc_assert (dlen == G.vec_finalizers.length());
1919 for (unsigned d = G.context_depth; d < dlen; ++d)
1920 {
1921 vec<vec_finalizer> &vv = G.vec_finalizers[d];
1922 unsigned length = vv.length ();
1923 for (unsigned int i = 0; i < length;)
1924 {
1925 vec_finalizer &f = vv[i];
1926 if (!ggc_marked_p (f.addr ()))
1927 {
1928 f.call ();
1929 vv.unordered_remove (i);
1930 length--;
1931 }
1932 else
1933 i++;
1934 }
1935 }
1936 }
1937
1938 /* Free all empty pages. Partially empty pages need no attention
1939 because the `mark' bit doubles as an `unused' bit. */
1940
1941 static void
1942 sweep_pages (void)
1943 {
1944 unsigned order;
1945
1946 for (order = 2; order < NUM_ORDERS; order++)
1947 {
1948 /* The last page-entry to consider, regardless of entries
1949 placed at the end of the list. */
1950 page_entry * const last = G.page_tails[order];
1951
1952 size_t num_objects;
1953 size_t live_objects;
1954 page_entry *p, *previous;
1955 int done;
1956
1957 p = G.pages[order];
1958 if (p == NULL)
1959 continue;
1960
1961 previous = NULL;
1962 do
1963 {
1964 page_entry *next = p->next;
1965
1966 /* Loop until all entries have been examined. */
1967 done = (p == last);
1968
1969 num_objects = OBJECTS_IN_PAGE (p);
1970
1971 /* Add all live objects on this page to the count of
1972 allocated memory. */
1973 live_objects = num_objects - p->num_free_objects;
1974
1975 G.allocated += OBJECT_SIZE (order) * live_objects;
1976
1977 /* Only objects on pages in the topmost context should get
1978 collected. */
1979 if (p->context_depth < G.context_depth)
1980 ;
1981
1982 /* Remove the page if it's empty. */
1983 else if (live_objects == 0)
1984 {
1985 /* If P was the first page in the list, then NEXT
1986 becomes the new first page in the list, otherwise
1987 splice P out of the forward pointers. */
1988 if (! previous)
1989 G.pages[order] = next;
1990 else
1991 previous->next = next;
1992
1993 /* Splice P out of the back pointers too. */
1994 if (next)
1995 next->prev = previous;
1996
1997 /* Are we removing the last element? */
1998 if (p == G.page_tails[order])
1999 G.page_tails[order] = previous;
2000 free_page (p);
2001 p = previous;
2002 }
2003
2004 /* If the page is full, move it to the end. */
2005 else if (p->num_free_objects == 0)
2006 {
2007 /* Don't move it if it's already at the end. */
2008 if (p != G.page_tails[order])
2009 {
2010 /* Move p to the end of the list. */
2011 p->next = NULL;
2012 p->prev = G.page_tails[order];
2013 G.page_tails[order]->next = p;
2014
2015 /* Update the tail pointer... */
2016 G.page_tails[order] = p;
2017
2018 /* ... and the head pointer, if necessary. */
2019 if (! previous)
2020 G.pages[order] = next;
2021 else
2022 previous->next = next;
2023
2024 /* And update the backpointer in NEXT if necessary. */
2025 if (next)
2026 next->prev = previous;
2027
2028 p = previous;
2029 }
2030 }
2031
2032 /* If we've fallen through to here, it's a page in the
2033 topmost context that is neither full nor empty. Such a
2034 page must precede pages at lesser context depth in the
2035 list, so move it to the head. */
2036 else if (p != G.pages[order])
2037 {
2038 previous->next = p->next;
2039
2040 /* Update the backchain in the next node if it exists. */
2041 if (p->next)
2042 p->next->prev = previous;
2043
2044 /* Move P to the head of the list. */
2045 p->next = G.pages[order];
2046 p->prev = NULL;
2047 G.pages[order]->prev = p;
2048
2049 /* Update the head pointer. */
2050 G.pages[order] = p;
2051
2052 /* Are we moving the last element? */
2053 if (G.page_tails[order] == p)
2054 G.page_tails[order] = previous;
2055 p = previous;
2056 }
2057
2058 previous = p;
2059 p = next;
2060 }
2061 while (! done);
2062
2063 /* Now, restore the in_use_p vectors for any pages from contexts
2064 other than the current one. */
2065 for (p = G.pages[order]; p; p = p->next)
2066 if (p->context_depth != G.context_depth)
2067 ggc_recalculate_in_use_p (p);
2068 }
2069 }
2070
2071 #ifdef ENABLE_GC_CHECKING
2072 /* Clobber all free objects. */
2073
2074 static void
2075 poison_pages (void)
2076 {
2077 unsigned order;
2078
2079 for (order = 2; order < NUM_ORDERS; order++)
2080 {
2081 size_t size = OBJECT_SIZE (order);
2082 page_entry *p;
2083
2084 for (p = G.pages[order]; p != NULL; p = p->next)
2085 {
2086 size_t num_objects;
2087 size_t i;
2088
2089 if (p->context_depth != G.context_depth)
2090 /* Since we don't do any collection for pages in pushed
2091 contexts, there's no need to do any poisoning. And
2092 besides, the IN_USE_P array isn't valid until we pop
2093 contexts. */
2094 continue;
2095
2096 num_objects = OBJECTS_IN_PAGE (p);
2097 for (i = 0; i < num_objects; i++)
2098 {
2099 size_t word, bit;
2100 word = i / HOST_BITS_PER_LONG;
2101 bit = i % HOST_BITS_PER_LONG;
2102 if (((p->in_use_p[word] >> bit) & 1) == 0)
2103 {
2104 char *object = p->page + i * size;
2105
2106 /* Keep poison-by-write when we expect to use Valgrind,
2107 so the exact same memory semantics is kept, in case
2108 there are memory errors. We override this request
2109 below. */
2110 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
2111 size));
2112 memset (object, 0xa5, size);
2113
2114 /* Drop the handle to avoid handle leak. */
2115 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
2116 }
2117 }
2118 }
2119 }
2120 }
2121 #else
2122 #define poison_pages()
2123 #endif
2124
2125 #ifdef ENABLE_GC_ALWAYS_COLLECT
2126 /* Validate that the reportedly free objects actually are. */
2127
2128 static void
2129 validate_free_objects (void)
2130 {
2131 struct free_object *f, *next, *still_free = NULL;
2132
2133 for (f = G.free_object_list; f ; f = next)
2134 {
2135 page_entry *pe = lookup_page_table_entry (f->object);
2136 size_t bit, word;
2137
2138 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2139 word = bit / HOST_BITS_PER_LONG;
2140 bit = bit % HOST_BITS_PER_LONG;
2141 next = f->next;
2142
2143 /* Make certain it isn't visible from any root. Notice that we
2144 do this check before sweep_pages merges save_in_use_p. */
2145 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2146
2147 /* If the object comes from an outer context, then retain the
2148 free_object entry, so that we can verify that the address
2149 isn't live on the stack in some outer context. */
2150 if (pe->context_depth != G.context_depth)
2151 {
2152 f->next = still_free;
2153 still_free = f;
2154 }
2155 else
2156 free (f);
2157 }
2158
2159 G.free_object_list = still_free;
2160 }
2161 #else
2162 #define validate_free_objects()
2163 #endif
2164
2165 /* Top level mark-and-sweep routine. */
2166
2167 void
2168 ggc_collect (void)
2169 {
2170 /* Avoid frequent unnecessary work by skipping collection if the
2171 total allocations haven't expanded much since the last
2172 collection. */
2173 float allocated_last_gc =
2174 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2175
2176 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2177 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
2178 return;
2179
2180 timevar_push (TV_GC);
2181 if (!quiet_flag)
2182 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
2183 if (GGC_DEBUG_LEVEL >= 2)
2184 fprintf (G.debug_file, "BEGIN COLLECTING\n");
2185
2186 /* Zero the total allocated bytes. This will be recalculated in the
2187 sweep phase. */
2188 G.allocated = 0;
2189
2190 /* Release the pages we freed the last time we collected, but didn't
2191 reuse in the interim. */
2192 release_pages ();
2193
2194 /* Indicate that we've seen collections at this context depth. */
2195 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2196
2197 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2198
2199 in_gc = true;
2200 clear_marks ();
2201 ggc_mark_roots ();
2202 ggc_handle_finalizers ();
2203
2204 if (GATHER_STATISTICS)
2205 ggc_prune_overhead_list ();
2206
2207 poison_pages ();
2208 validate_free_objects ();
2209 sweep_pages ();
2210
2211 in_gc = false;
2212 G.allocated_last_gc = G.allocated;
2213
2214 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2215
2216 timevar_pop (TV_GC);
2217
2218 if (!quiet_flag)
2219 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2220 if (GGC_DEBUG_LEVEL >= 2)
2221 fprintf (G.debug_file, "END COLLECTING\n");
2222 }
2223
2224 /* Assume that all GGC memory is reachable and grow the limits for next collection.
2225 With checking, trigger GGC so -Q compilation outputs how much of memory really is
2226 reachable. */
2227
2228 void
2229 ggc_grow (void)
2230 {
2231 if (!flag_checking)
2232 G.allocated_last_gc = MAX (G.allocated_last_gc,
2233 G.allocated);
2234 else
2235 ggc_collect ();
2236 if (!quiet_flag)
2237 fprintf (stderr, " {GC start %luk} ", (unsigned long) G.allocated / 1024);
2238 }
2239
2240 /* Print allocation statistics. */
2241 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2242 ? (x) \
2243 : ((x) < 1024*1024*10 \
2244 ? (x) / 1024 \
2245 : (x) / (1024*1024))))
2246 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2247
2248 void
2249 ggc_print_statistics (void)
2250 {
2251 struct ggc_statistics stats;
2252 unsigned int i;
2253 size_t total_overhead = 0;
2254
2255 /* Clear the statistics. */
2256 memset (&stats, 0, sizeof (stats));
2257
2258 /* Make sure collection will really occur. */
2259 G.allocated_last_gc = 0;
2260
2261 /* Collect and print the statistics common across collectors. */
2262 ggc_print_common_statistics (stderr, &stats);
2263
2264 /* Release free pages so that we will not count the bytes allocated
2265 there as part of the total allocated memory. */
2266 release_pages ();
2267
2268 /* Collect some information about the various sizes of
2269 allocation. */
2270 fprintf (stderr,
2271 "Memory still allocated at the end of the compilation process\n");
2272 fprintf (stderr, "%-8s %10s %10s %10s\n",
2273 "Size", "Allocated", "Used", "Overhead");
2274 for (i = 0; i < NUM_ORDERS; ++i)
2275 {
2276 page_entry *p;
2277 size_t allocated;
2278 size_t in_use;
2279 size_t overhead;
2280
2281 /* Skip empty entries. */
2282 if (!G.pages[i])
2283 continue;
2284
2285 overhead = allocated = in_use = 0;
2286
2287 /* Figure out the total number of bytes allocated for objects of
2288 this size, and how many of them are actually in use. Also figure
2289 out how much memory the page table is using. */
2290 for (p = G.pages[i]; p; p = p->next)
2291 {
2292 allocated += p->bytes;
2293 in_use +=
2294 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2295
2296 overhead += (sizeof (page_entry) - sizeof (long)
2297 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2298 }
2299 fprintf (stderr, "%-8lu %10lu%c %10lu%c %10lu%c\n",
2300 (unsigned long) OBJECT_SIZE (i),
2301 SCALE (allocated), STAT_LABEL (allocated),
2302 SCALE (in_use), STAT_LABEL (in_use),
2303 SCALE (overhead), STAT_LABEL (overhead));
2304 total_overhead += overhead;
2305 }
2306 fprintf (stderr, "%-8s %10lu%c %10lu%c %10lu%c\n", "Total",
2307 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2308 SCALE (G.allocated), STAT_LABEL (G.allocated),
2309 SCALE (total_overhead), STAT_LABEL (total_overhead));
2310
2311 if (GATHER_STATISTICS)
2312 {
2313 fprintf (stderr, "\nTotal allocations and overheads during "
2314 "the compilation process\n");
2315
2316 fprintf (stderr, "Total Overhead: %10"
2317 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead);
2318 fprintf (stderr, "Total Allocated: %10"
2319 HOST_LONG_LONG_FORMAT "d\n",
2320 G.stats.total_allocated);
2321
2322 fprintf (stderr, "Total Overhead under 32B: %10"
2323 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under32);
2324 fprintf (stderr, "Total Allocated under 32B: %10"
2325 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under32);
2326 fprintf (stderr, "Total Overhead under 64B: %10"
2327 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under64);
2328 fprintf (stderr, "Total Allocated under 64B: %10"
2329 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under64);
2330 fprintf (stderr, "Total Overhead under 128B: %10"
2331 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under128);
2332 fprintf (stderr, "Total Allocated under 128B: %10"
2333 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under128);
2334
2335 for (i = 0; i < NUM_ORDERS; i++)
2336 if (G.stats.total_allocated_per_order[i])
2337 {
2338 fprintf (stderr, "Total Overhead page size %9lu: %10"
2339 HOST_LONG_LONG_FORMAT "d\n",
2340 (unsigned long) OBJECT_SIZE (i),
2341 G.stats.total_overhead_per_order[i]);
2342 fprintf (stderr, "Total Allocated page size %9lu: %10"
2343 HOST_LONG_LONG_FORMAT "d\n",
2344 (unsigned long) OBJECT_SIZE (i),
2345 G.stats.total_allocated_per_order[i]);
2346 }
2347 }
2348 }
2349 \f
2350 struct ggc_pch_ondisk
2351 {
2352 unsigned totals[NUM_ORDERS];
2353 };
2354
2355 struct ggc_pch_data
2356 {
2357 struct ggc_pch_ondisk d;
2358 uintptr_t base[NUM_ORDERS];
2359 size_t written[NUM_ORDERS];
2360 };
2361
2362 struct ggc_pch_data *
2363 init_ggc_pch (void)
2364 {
2365 return XCNEW (struct ggc_pch_data);
2366 }
2367
2368 void
2369 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2370 size_t size, bool is_string ATTRIBUTE_UNUSED)
2371 {
2372 unsigned order;
2373
2374 if (size < NUM_SIZE_LOOKUP)
2375 order = size_lookup[size];
2376 else
2377 {
2378 order = 10;
2379 while (size > OBJECT_SIZE (order))
2380 order++;
2381 }
2382
2383 d->d.totals[order]++;
2384 }
2385
2386 size_t
2387 ggc_pch_total_size (struct ggc_pch_data *d)
2388 {
2389 size_t a = 0;
2390 unsigned i;
2391
2392 for (i = 0; i < NUM_ORDERS; i++)
2393 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2394 return a;
2395 }
2396
2397 void
2398 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2399 {
2400 uintptr_t a = (uintptr_t) base;
2401 unsigned i;
2402
2403 for (i = 0; i < NUM_ORDERS; i++)
2404 {
2405 d->base[i] = a;
2406 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2407 }
2408 }
2409
2410
2411 char *
2412 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2413 size_t size, bool is_string ATTRIBUTE_UNUSED)
2414 {
2415 unsigned order;
2416 char *result;
2417
2418 if (size < NUM_SIZE_LOOKUP)
2419 order = size_lookup[size];
2420 else
2421 {
2422 order = 10;
2423 while (size > OBJECT_SIZE (order))
2424 order++;
2425 }
2426
2427 result = (char *) d->base[order];
2428 d->base[order] += OBJECT_SIZE (order);
2429 return result;
2430 }
2431
2432 void
2433 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2434 FILE *f ATTRIBUTE_UNUSED)
2435 {
2436 /* Nothing to do. */
2437 }
2438
2439 void
2440 ggc_pch_write_object (struct ggc_pch_data *d,
2441 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2442 size_t size, bool is_string ATTRIBUTE_UNUSED)
2443 {
2444 unsigned order;
2445 static const char emptyBytes[256] = { 0 };
2446
2447 if (size < NUM_SIZE_LOOKUP)
2448 order = size_lookup[size];
2449 else
2450 {
2451 order = 10;
2452 while (size > OBJECT_SIZE (order))
2453 order++;
2454 }
2455
2456 if (fwrite (x, size, 1, f) != 1)
2457 fatal_error (input_location, "can%'t write PCH file: %m");
2458
2459 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2460 object out to OBJECT_SIZE(order). This happens for strings. */
2461
2462 if (size != OBJECT_SIZE (order))
2463 {
2464 unsigned padding = OBJECT_SIZE (order) - size;
2465
2466 /* To speed small writes, we use a nulled-out array that's larger
2467 than most padding requests as the source for our null bytes. This
2468 permits us to do the padding with fwrite() rather than fseek(), and
2469 limits the chance the OS may try to flush any outstanding writes. */
2470 if (padding <= sizeof (emptyBytes))
2471 {
2472 if (fwrite (emptyBytes, 1, padding, f) != padding)
2473 fatal_error (input_location, "can%'t write PCH file");
2474 }
2475 else
2476 {
2477 /* Larger than our buffer? Just default to fseek. */
2478 if (fseek (f, padding, SEEK_CUR) != 0)
2479 fatal_error (input_location, "can%'t write PCH file");
2480 }
2481 }
2482
2483 d->written[order]++;
2484 if (d->written[order] == d->d.totals[order]
2485 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2486 G.pagesize),
2487 SEEK_CUR) != 0)
2488 fatal_error (input_location, "can%'t write PCH file: %m");
2489 }
2490
2491 void
2492 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2493 {
2494 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2495 fatal_error (input_location, "can%'t write PCH file: %m");
2496 free (d);
2497 }
2498
2499 /* Move the PCH PTE entries just added to the end of by_depth, to the
2500 front. */
2501
2502 static void
2503 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2504 {
2505 unsigned i;
2506
2507 /* First, we swap the new entries to the front of the varrays. */
2508 page_entry **new_by_depth;
2509 unsigned long **new_save_in_use;
2510
2511 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2512 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2513
2514 memcpy (&new_by_depth[0],
2515 &G.by_depth[count_old_page_tables],
2516 count_new_page_tables * sizeof (void *));
2517 memcpy (&new_by_depth[count_new_page_tables],
2518 &G.by_depth[0],
2519 count_old_page_tables * sizeof (void *));
2520 memcpy (&new_save_in_use[0],
2521 &G.save_in_use[count_old_page_tables],
2522 count_new_page_tables * sizeof (void *));
2523 memcpy (&new_save_in_use[count_new_page_tables],
2524 &G.save_in_use[0],
2525 count_old_page_tables * sizeof (void *));
2526
2527 free (G.by_depth);
2528 free (G.save_in_use);
2529
2530 G.by_depth = new_by_depth;
2531 G.save_in_use = new_save_in_use;
2532
2533 /* Now update all the index_by_depth fields. */
2534 for (i = G.by_depth_in_use; i > 0; --i)
2535 {
2536 page_entry *p = G.by_depth[i-1];
2537 p->index_by_depth = i-1;
2538 }
2539
2540 /* And last, we update the depth pointers in G.depth. The first
2541 entry is already 0, and context 0 entries always start at index
2542 0, so there is nothing to update in the first slot. We need a
2543 second slot, only if we have old ptes, and if we do, they start
2544 at index count_new_page_tables. */
2545 if (count_old_page_tables)
2546 push_depth (count_new_page_tables);
2547 }
2548
2549 void
2550 ggc_pch_read (FILE *f, void *addr)
2551 {
2552 struct ggc_pch_ondisk d;
2553 unsigned i;
2554 char *offs = (char *) addr;
2555 unsigned long count_old_page_tables;
2556 unsigned long count_new_page_tables;
2557
2558 count_old_page_tables = G.by_depth_in_use;
2559
2560 /* We've just read in a PCH file. So, every object that used to be
2561 allocated is now free. */
2562 clear_marks ();
2563 #ifdef ENABLE_GC_CHECKING
2564 poison_pages ();
2565 #endif
2566 /* Since we free all the allocated objects, the free list becomes
2567 useless. Validate it now, which will also clear it. */
2568 validate_free_objects ();
2569
2570 /* No object read from a PCH file should ever be freed. So, set the
2571 context depth to 1, and set the depth of all the currently-allocated
2572 pages to be 1 too. PCH pages will have depth 0. */
2573 gcc_assert (!G.context_depth);
2574 G.context_depth = 1;
2575 /* Allocate space for the depth 1 finalizers. */
2576 G.finalizers.safe_push (vNULL);
2577 G.vec_finalizers.safe_push (vNULL);
2578 gcc_assert (G.finalizers.length() == 2);
2579 for (i = 0; i < NUM_ORDERS; i++)
2580 {
2581 page_entry *p;
2582 for (p = G.pages[i]; p != NULL; p = p->next)
2583 p->context_depth = G.context_depth;
2584 }
2585
2586 /* Allocate the appropriate page-table entries for the pages read from
2587 the PCH file. */
2588 if (fread (&d, sizeof (d), 1, f) != 1)
2589 fatal_error (input_location, "can%'t read PCH file: %m");
2590
2591 for (i = 0; i < NUM_ORDERS; i++)
2592 {
2593 struct page_entry *entry;
2594 char *pte;
2595 size_t bytes;
2596 size_t num_objs;
2597 size_t j;
2598
2599 if (d.totals[i] == 0)
2600 continue;
2601
2602 bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2603 num_objs = bytes / OBJECT_SIZE (i);
2604 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2605 - sizeof (long)
2606 + BITMAP_SIZE (num_objs + 1)));
2607 entry->bytes = bytes;
2608 entry->page = offs;
2609 entry->context_depth = 0;
2610 offs += bytes;
2611 entry->num_free_objects = 0;
2612 entry->order = i;
2613
2614 for (j = 0;
2615 j + HOST_BITS_PER_LONG <= num_objs + 1;
2616 j += HOST_BITS_PER_LONG)
2617 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2618 for (; j < num_objs + 1; j++)
2619 entry->in_use_p[j / HOST_BITS_PER_LONG]
2620 |= 1L << (j % HOST_BITS_PER_LONG);
2621
2622 for (pte = entry->page;
2623 pte < entry->page + entry->bytes;
2624 pte += G.pagesize)
2625 set_page_table_entry (pte, entry);
2626
2627 if (G.page_tails[i] != NULL)
2628 G.page_tails[i]->next = entry;
2629 else
2630 G.pages[i] = entry;
2631 G.page_tails[i] = entry;
2632
2633 /* We start off by just adding all the new information to the
2634 end of the varrays, later, we will move the new information
2635 to the front of the varrays, as the PCH page tables are at
2636 context 0. */
2637 push_by_depth (entry, 0);
2638 }
2639
2640 /* Now, we update the various data structures that speed page table
2641 handling. */
2642 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2643
2644 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2645
2646 /* Update the statistics. */
2647 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2648 }