gimple-fold.c (fold_const_aggregate_ref_1): Use DECL_P.
[gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "hash-set.h"
25 #include "machmode.h"
26 #include "vec.h"
27 #include "double-int.h"
28 #include "input.h"
29 #include "alias.h"
30 #include "symtab.h"
31 #include "wide-int.h"
32 #include "inchash.h"
33 #include "tree.h"
34 #include "rtl.h"
35 #include "tm_p.h"
36 #include "diagnostic-core.h"
37 #include "flags.h"
38 #include "ggc.h"
39 #include "ggc-internal.h"
40 #include "timevar.h"
41 #include "params.h"
42 #include "hash-map.h"
43 #include "is-a.h"
44 #include "plugin-api.h"
45 #include "vec.h"
46 #include "hashtab.h"
47 #include "hash-set.h"
48 #include "machmode.h"
49 #include "hard-reg-set.h"
50 #include "input.h"
51 #include "function.h"
52 #include "ipa-ref.h"
53 #include "cgraph.h"
54 #include "cfgloop.h"
55 #include "plugin.h"
56 #include "basic-block.h"
57
58 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
59 file open. Prefer either to valloc. */
60 #ifdef HAVE_MMAP_ANON
61 # undef HAVE_MMAP_DEV_ZERO
62 # define USING_MMAP
63 #endif
64
65 #ifdef HAVE_MMAP_DEV_ZERO
66 # define USING_MMAP
67 #endif
68
69 #ifndef USING_MMAP
70 #define USING_MALLOC_PAGE_GROUPS
71 #endif
72
73 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
74 && defined(USING_MMAP)
75 # define USING_MADVISE
76 #endif
77
78 /* Strategy:
79
80 This garbage-collecting allocator allocates objects on one of a set
81 of pages. Each page can allocate objects of a single size only;
82 available sizes are powers of two starting at four bytes. The size
83 of an allocation request is rounded up to the next power of two
84 (`order'), and satisfied from the appropriate page.
85
86 Each page is recorded in a page-entry, which also maintains an
87 in-use bitmap of object positions on the page. This allows the
88 allocation state of a particular object to be flipped without
89 touching the page itself.
90
91 Each page-entry also has a context depth, which is used to track
92 pushing and popping of allocation contexts. Only objects allocated
93 in the current (highest-numbered) context may be collected.
94
95 Page entries are arranged in an array of singly-linked lists. The
96 array is indexed by the allocation size, in bits, of the pages on
97 it; i.e. all pages on a list allocate objects of the same size.
98 Pages are ordered on the list such that all non-full pages precede
99 all full pages, with non-full pages arranged in order of decreasing
100 context depth.
101
102 Empty pages (of all orders) are kept on a single page cache list,
103 and are considered first when new pages are required; they are
104 deallocated at the start of the next collection if they haven't
105 been recycled by then. */
106
107 /* Define GGC_DEBUG_LEVEL to print debugging information.
108 0: No debugging output.
109 1: GC statistics only.
110 2: Page-entry allocations/deallocations as well.
111 3: Object allocations as well.
112 4: Object marks as well. */
113 #define GGC_DEBUG_LEVEL (0)
114 \f
115 #ifndef HOST_BITS_PER_PTR
116 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
117 #endif
118
119 \f
120 /* A two-level tree is used to look up the page-entry for a given
121 pointer. Two chunks of the pointer's bits are extracted to index
122 the first and second levels of the tree, as follows:
123
124 HOST_PAGE_SIZE_BITS
125 32 | |
126 msb +----------------+----+------+------+ lsb
127 | | |
128 PAGE_L1_BITS |
129 | |
130 PAGE_L2_BITS
131
132 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
133 pages are aligned on system page boundaries. The next most
134 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
135 index values in the lookup table, respectively.
136
137 For 32-bit architectures and the settings below, there are no
138 leftover bits. For architectures with wider pointers, the lookup
139 tree points to a list of pages, which must be scanned to find the
140 correct one. */
141
142 #define PAGE_L1_BITS (8)
143 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
144 #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
145 #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
146
147 #define LOOKUP_L1(p) \
148 (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
149
150 #define LOOKUP_L2(p) \
151 (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
152
153 /* The number of objects per allocation page, for objects on a page of
154 the indicated ORDER. */
155 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
156
157 /* The number of objects in P. */
158 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
159
160 /* The size of an object on a page of the indicated ORDER. */
161 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
162
163 /* For speed, we avoid doing a general integer divide to locate the
164 offset in the allocation bitmap, by precalculating numbers M, S
165 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
166 within the page which is evenly divisible by the object size Z. */
167 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
168 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
169 #define OFFSET_TO_BIT(OFFSET, ORDER) \
170 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
171
172 /* We use this structure to determine the alignment required for
173 allocations. For power-of-two sized allocations, that's not a
174 problem, but it does matter for odd-sized allocations.
175 We do not care about alignment for floating-point types. */
176
177 struct max_alignment {
178 char c;
179 union {
180 int64_t i;
181 void *p;
182 } u;
183 };
184
185 /* The biggest alignment required. */
186
187 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
188
189
190 /* The number of extra orders, not corresponding to power-of-two sized
191 objects. */
192
193 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
194
195 #define RTL_SIZE(NSLOTS) \
196 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
197
198 #define TREE_EXP_SIZE(OPS) \
199 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
200
201 /* The Ith entry is the maximum size of an object to be stored in the
202 Ith extra order. Adding a new entry to this array is the *only*
203 thing you need to do to add a new special allocation size. */
204
205 static const size_t extra_order_size_table[] = {
206 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
207 There are a lot of structures with these sizes and explicitly
208 listing them risks orders being dropped because they changed size. */
209 MAX_ALIGNMENT * 3,
210 MAX_ALIGNMENT * 5,
211 MAX_ALIGNMENT * 6,
212 MAX_ALIGNMENT * 7,
213 MAX_ALIGNMENT * 9,
214 MAX_ALIGNMENT * 10,
215 MAX_ALIGNMENT * 11,
216 MAX_ALIGNMENT * 12,
217 MAX_ALIGNMENT * 13,
218 MAX_ALIGNMENT * 14,
219 MAX_ALIGNMENT * 15,
220 sizeof (struct tree_decl_non_common),
221 sizeof (struct tree_field_decl),
222 sizeof (struct tree_parm_decl),
223 sizeof (struct tree_var_decl),
224 sizeof (struct tree_type_non_common),
225 sizeof (struct function),
226 sizeof (struct basic_block_def),
227 sizeof (struct cgraph_node),
228 sizeof (struct loop),
229 };
230
231 /* The total number of orders. */
232
233 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
234
235 /* Compute the smallest nonnegative number which when added to X gives
236 a multiple of F. */
237
238 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
239
240 /* Compute the smallest multiple of F that is >= X. */
241
242 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
243
244 /* Round X to next multiple of the page size */
245
246 #define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1))
247
248 /* The Ith entry is the number of objects on a page or order I. */
249
250 static unsigned objects_per_page_table[NUM_ORDERS];
251
252 /* The Ith entry is the size of an object on a page of order I. */
253
254 static size_t object_size_table[NUM_ORDERS];
255
256 /* The Ith entry is a pair of numbers (mult, shift) such that
257 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
258 for all k evenly divisible by OBJECT_SIZE(I). */
259
260 static struct
261 {
262 size_t mult;
263 unsigned int shift;
264 }
265 inverse_table[NUM_ORDERS];
266
267 /* A page_entry records the status of an allocation page. This
268 structure is dynamically sized to fit the bitmap in_use_p. */
269 typedef struct page_entry
270 {
271 /* The next page-entry with objects of the same size, or NULL if
272 this is the last page-entry. */
273 struct page_entry *next;
274
275 /* The previous page-entry with objects of the same size, or NULL if
276 this is the first page-entry. The PREV pointer exists solely to
277 keep the cost of ggc_free manageable. */
278 struct page_entry *prev;
279
280 /* The number of bytes allocated. (This will always be a multiple
281 of the host system page size.) */
282 size_t bytes;
283
284 /* The address at which the memory is allocated. */
285 char *page;
286
287 #ifdef USING_MALLOC_PAGE_GROUPS
288 /* Back pointer to the page group this page came from. */
289 struct page_group *group;
290 #endif
291
292 /* This is the index in the by_depth varray where this page table
293 can be found. */
294 unsigned long index_by_depth;
295
296 /* Context depth of this page. */
297 unsigned short context_depth;
298
299 /* The number of free objects remaining on this page. */
300 unsigned short num_free_objects;
301
302 /* A likely candidate for the bit position of a free object for the
303 next allocation from this page. */
304 unsigned short next_bit_hint;
305
306 /* The lg of size of objects allocated from this page. */
307 unsigned char order;
308
309 /* Discarded page? */
310 bool discarded;
311
312 /* A bit vector indicating whether or not objects are in use. The
313 Nth bit is one if the Nth object on this page is allocated. This
314 array is dynamically sized. */
315 unsigned long in_use_p[1];
316 } page_entry;
317
318 #ifdef USING_MALLOC_PAGE_GROUPS
319 /* A page_group describes a large allocation from malloc, from which
320 we parcel out aligned pages. */
321 typedef struct page_group
322 {
323 /* A linked list of all extant page groups. */
324 struct page_group *next;
325
326 /* The address we received from malloc. */
327 char *allocation;
328
329 /* The size of the block. */
330 size_t alloc_size;
331
332 /* A bitmask of pages in use. */
333 unsigned int in_use;
334 } page_group;
335 #endif
336
337 #if HOST_BITS_PER_PTR <= 32
338
339 /* On 32-bit hosts, we use a two level page table, as pictured above. */
340 typedef page_entry **page_table[PAGE_L1_SIZE];
341
342 #else
343
344 /* On 64-bit hosts, we use the same two level page tables plus a linked
345 list that disambiguates the top 32-bits. There will almost always be
346 exactly one entry in the list. */
347 typedef struct page_table_chain
348 {
349 struct page_table_chain *next;
350 size_t high_bits;
351 page_entry **table[PAGE_L1_SIZE];
352 } *page_table;
353
354 #endif
355
356 class finalizer
357 {
358 public:
359 finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
360
361 void *addr () const { return m_addr; }
362
363 void call () const { m_function (m_addr); }
364
365 private:
366 void *m_addr;
367 void (*m_function)(void *);
368 };
369
370 class vec_finalizer
371 {
372 public:
373 vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
374 m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
375
376 void call () const
377 {
378 for (size_t i = 0; i < m_n_objects; i++)
379 m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
380 }
381
382 void *addr () const { return reinterpret_cast<void *> (m_addr); }
383
384 private:
385 uintptr_t m_addr;
386 void (*m_function)(void *);
387 size_t m_object_size;
388 size_t m_n_objects;
389 };
390
391 #ifdef ENABLE_GC_ALWAYS_COLLECT
392 /* List of free objects to be verified as actually free on the
393 next collection. */
394 struct free_object
395 {
396 void *object;
397 struct free_object *next;
398 };
399 #endif
400
401 /* The rest of the global variables. */
402 static struct ggc_globals
403 {
404 /* The Nth element in this array is a page with objects of size 2^N.
405 If there are any pages with free objects, they will be at the
406 head of the list. NULL if there are no page-entries for this
407 object size. */
408 page_entry *pages[NUM_ORDERS];
409
410 /* The Nth element in this array is the last page with objects of
411 size 2^N. NULL if there are no page-entries for this object
412 size. */
413 page_entry *page_tails[NUM_ORDERS];
414
415 /* Lookup table for associating allocation pages with object addresses. */
416 page_table lookup;
417
418 /* The system's page size. */
419 size_t pagesize;
420 size_t lg_pagesize;
421
422 /* Bytes currently allocated. */
423 size_t allocated;
424
425 /* Bytes currently allocated at the end of the last collection. */
426 size_t allocated_last_gc;
427
428 /* Total amount of memory mapped. */
429 size_t bytes_mapped;
430
431 /* Bit N set if any allocations have been done at context depth N. */
432 unsigned long context_depth_allocations;
433
434 /* Bit N set if any collections have been done at context depth N. */
435 unsigned long context_depth_collections;
436
437 /* The current depth in the context stack. */
438 unsigned short context_depth;
439
440 /* A file descriptor open to /dev/zero for reading. */
441 #if defined (HAVE_MMAP_DEV_ZERO)
442 int dev_zero_fd;
443 #endif
444
445 /* A cache of free system pages. */
446 page_entry *free_pages;
447
448 #ifdef USING_MALLOC_PAGE_GROUPS
449 page_group *page_groups;
450 #endif
451
452 /* The file descriptor for debugging output. */
453 FILE *debug_file;
454
455 /* Current number of elements in use in depth below. */
456 unsigned int depth_in_use;
457
458 /* Maximum number of elements that can be used before resizing. */
459 unsigned int depth_max;
460
461 /* Each element of this array is an index in by_depth where the given
462 depth starts. This structure is indexed by that given depth we
463 are interested in. */
464 unsigned int *depth;
465
466 /* Current number of elements in use in by_depth below. */
467 unsigned int by_depth_in_use;
468
469 /* Maximum number of elements that can be used before resizing. */
470 unsigned int by_depth_max;
471
472 /* Each element of this array is a pointer to a page_entry, all
473 page_entries can be found in here by increasing depth.
474 index_by_depth in the page_entry is the index into this data
475 structure where that page_entry can be found. This is used to
476 speed up finding all page_entries at a particular depth. */
477 page_entry **by_depth;
478
479 /* Each element is a pointer to the saved in_use_p bits, if any,
480 zero otherwise. We allocate them all together, to enable a
481 better runtime data access pattern. */
482 unsigned long **save_in_use;
483
484 /* Finalizers for single objects. */
485 vec<finalizer> finalizers;
486
487 /* Finalizers for vectors of objects. */
488 vec<vec_finalizer> vec_finalizers;
489
490 #ifdef ENABLE_GC_ALWAYS_COLLECT
491 /* List of free objects to be verified as actually free on the
492 next collection. */
493 struct free_object *free_object_list;
494 #endif
495
496 struct
497 {
498 /* Total GC-allocated memory. */
499 unsigned long long total_allocated;
500 /* Total overhead for GC-allocated memory. */
501 unsigned long long total_overhead;
502
503 /* Total allocations and overhead for sizes less than 32, 64 and 128.
504 These sizes are interesting because they are typical cache line
505 sizes. */
506
507 unsigned long long total_allocated_under32;
508 unsigned long long total_overhead_under32;
509
510 unsigned long long total_allocated_under64;
511 unsigned long long total_overhead_under64;
512
513 unsigned long long total_allocated_under128;
514 unsigned long long total_overhead_under128;
515
516 /* The allocations for each of the allocation orders. */
517 unsigned long long total_allocated_per_order[NUM_ORDERS];
518
519 /* The overhead for each of the allocation orders. */
520 unsigned long long total_overhead_per_order[NUM_ORDERS];
521 } stats;
522 } G;
523
524 /* True if a gc is currently taking place. */
525
526 static bool in_gc = false;
527
528 /* The size in bytes required to maintain a bitmap for the objects
529 on a page-entry. */
530 #define BITMAP_SIZE(Num_objects) \
531 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
532
533 /* Allocate pages in chunks of this size, to throttle calls to memory
534 allocation routines. The first page is used, the rest go onto the
535 free list. This cannot be larger than HOST_BITS_PER_INT for the
536 in_use bitmask for page_group. Hosts that need a different value
537 can override this by defining GGC_QUIRE_SIZE explicitly. */
538 #ifndef GGC_QUIRE_SIZE
539 # ifdef USING_MMAP
540 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
541 # else
542 # define GGC_QUIRE_SIZE 16
543 # endif
544 #endif
545
546 /* Initial guess as to how many page table entries we might need. */
547 #define INITIAL_PTE_COUNT 128
548 \f
549 static int ggc_allocated_p (const void *);
550 static page_entry *lookup_page_table_entry (const void *);
551 static void set_page_table_entry (void *, page_entry *);
552 #ifdef USING_MMAP
553 static char *alloc_anon (char *, size_t, bool check);
554 #endif
555 #ifdef USING_MALLOC_PAGE_GROUPS
556 static size_t page_group_index (char *, char *);
557 static void set_page_group_in_use (page_group *, char *);
558 static void clear_page_group_in_use (page_group *, char *);
559 #endif
560 static struct page_entry * alloc_page (unsigned);
561 static void free_page (struct page_entry *);
562 static void release_pages (void);
563 static void clear_marks (void);
564 static void sweep_pages (void);
565 static void ggc_recalculate_in_use_p (page_entry *);
566 static void compute_inverse (unsigned);
567 static inline void adjust_depth (void);
568 static void move_ptes_to_front (int, int);
569
570 void debug_print_page_list (int);
571 static void push_depth (unsigned int);
572 static void push_by_depth (page_entry *, unsigned long *);
573
574 /* Push an entry onto G.depth. */
575
576 inline static void
577 push_depth (unsigned int i)
578 {
579 if (G.depth_in_use >= G.depth_max)
580 {
581 G.depth_max *= 2;
582 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
583 }
584 G.depth[G.depth_in_use++] = i;
585 }
586
587 /* Push an entry onto G.by_depth and G.save_in_use. */
588
589 inline static void
590 push_by_depth (page_entry *p, unsigned long *s)
591 {
592 if (G.by_depth_in_use >= G.by_depth_max)
593 {
594 G.by_depth_max *= 2;
595 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
596 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
597 G.by_depth_max);
598 }
599 G.by_depth[G.by_depth_in_use] = p;
600 G.save_in_use[G.by_depth_in_use++] = s;
601 }
602
603 #if (GCC_VERSION < 3001)
604 #define prefetch(X) ((void) X)
605 #else
606 #define prefetch(X) __builtin_prefetch (X)
607 #endif
608
609 #define save_in_use_p_i(__i) \
610 (G.save_in_use[__i])
611 #define save_in_use_p(__p) \
612 (save_in_use_p_i (__p->index_by_depth))
613
614 /* Returns nonzero if P was allocated in GC'able memory. */
615
616 static inline int
617 ggc_allocated_p (const void *p)
618 {
619 page_entry ***base;
620 size_t L1, L2;
621
622 #if HOST_BITS_PER_PTR <= 32
623 base = &G.lookup[0];
624 #else
625 page_table table = G.lookup;
626 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
627 while (1)
628 {
629 if (table == NULL)
630 return 0;
631 if (table->high_bits == high_bits)
632 break;
633 table = table->next;
634 }
635 base = &table->table[0];
636 #endif
637
638 /* Extract the level 1 and 2 indices. */
639 L1 = LOOKUP_L1 (p);
640 L2 = LOOKUP_L2 (p);
641
642 return base[L1] && base[L1][L2];
643 }
644
645 /* Traverse the page table and find the entry for a page.
646 Die (probably) if the object wasn't allocated via GC. */
647
648 static inline page_entry *
649 lookup_page_table_entry (const void *p)
650 {
651 page_entry ***base;
652 size_t L1, L2;
653
654 #if HOST_BITS_PER_PTR <= 32
655 base = &G.lookup[0];
656 #else
657 page_table table = G.lookup;
658 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
659 while (table->high_bits != high_bits)
660 table = table->next;
661 base = &table->table[0];
662 #endif
663
664 /* Extract the level 1 and 2 indices. */
665 L1 = LOOKUP_L1 (p);
666 L2 = LOOKUP_L2 (p);
667
668 return base[L1][L2];
669 }
670
671 /* Set the page table entry for a page. */
672
673 static void
674 set_page_table_entry (void *p, page_entry *entry)
675 {
676 page_entry ***base;
677 size_t L1, L2;
678
679 #if HOST_BITS_PER_PTR <= 32
680 base = &G.lookup[0];
681 #else
682 page_table table;
683 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
684 for (table = G.lookup; table; table = table->next)
685 if (table->high_bits == high_bits)
686 goto found;
687
688 /* Not found -- allocate a new table. */
689 table = XCNEW (struct page_table_chain);
690 table->next = G.lookup;
691 table->high_bits = high_bits;
692 G.lookup = table;
693 found:
694 base = &table->table[0];
695 #endif
696
697 /* Extract the level 1 and 2 indices. */
698 L1 = LOOKUP_L1 (p);
699 L2 = LOOKUP_L2 (p);
700
701 if (base[L1] == NULL)
702 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
703
704 base[L1][L2] = entry;
705 }
706
707 /* Prints the page-entry for object size ORDER, for debugging. */
708
709 DEBUG_FUNCTION void
710 debug_print_page_list (int order)
711 {
712 page_entry *p;
713 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
714 (void *) G.page_tails[order]);
715 p = G.pages[order];
716 while (p != NULL)
717 {
718 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
719 p->num_free_objects);
720 p = p->next;
721 }
722 printf ("NULL\n");
723 fflush (stdout);
724 }
725
726 #ifdef USING_MMAP
727 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
728 (if non-null). The ifdef structure here is intended to cause a
729 compile error unless exactly one of the HAVE_* is defined. */
730
731 static inline char *
732 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
733 {
734 #ifdef HAVE_MMAP_ANON
735 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
736 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
737 #endif
738 #ifdef HAVE_MMAP_DEV_ZERO
739 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
740 MAP_PRIVATE, G.dev_zero_fd, 0);
741 #endif
742
743 if (page == (char *) MAP_FAILED)
744 {
745 if (!check)
746 return NULL;
747 perror ("virtual memory exhausted");
748 exit (FATAL_EXIT_CODE);
749 }
750
751 /* Remember that we allocated this memory. */
752 G.bytes_mapped += size;
753
754 /* Pretend we don't have access to the allocated pages. We'll enable
755 access to smaller pieces of the area in ggc_internal_alloc. Discard the
756 handle to avoid handle leak. */
757 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
758
759 return page;
760 }
761 #endif
762 #ifdef USING_MALLOC_PAGE_GROUPS
763 /* Compute the index for this page into the page group. */
764
765 static inline size_t
766 page_group_index (char *allocation, char *page)
767 {
768 return (size_t) (page - allocation) >> G.lg_pagesize;
769 }
770
771 /* Set and clear the in_use bit for this page in the page group. */
772
773 static inline void
774 set_page_group_in_use (page_group *group, char *page)
775 {
776 group->in_use |= 1 << page_group_index (group->allocation, page);
777 }
778
779 static inline void
780 clear_page_group_in_use (page_group *group, char *page)
781 {
782 group->in_use &= ~(1 << page_group_index (group->allocation, page));
783 }
784 #endif
785
786 /* Allocate a new page for allocating objects of size 2^ORDER,
787 and return an entry for it. The entry is not added to the
788 appropriate page_table list. */
789
790 static inline struct page_entry *
791 alloc_page (unsigned order)
792 {
793 struct page_entry *entry, *p, **pp;
794 char *page;
795 size_t num_objects;
796 size_t bitmap_size;
797 size_t page_entry_size;
798 size_t entry_size;
799 #ifdef USING_MALLOC_PAGE_GROUPS
800 page_group *group;
801 #endif
802
803 num_objects = OBJECTS_PER_PAGE (order);
804 bitmap_size = BITMAP_SIZE (num_objects + 1);
805 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
806 entry_size = num_objects * OBJECT_SIZE (order);
807 if (entry_size < G.pagesize)
808 entry_size = G.pagesize;
809 entry_size = PAGE_ALIGN (entry_size);
810
811 entry = NULL;
812 page = NULL;
813
814 /* Check the list of free pages for one we can use. */
815 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
816 if (p->bytes == entry_size)
817 break;
818
819 if (p != NULL)
820 {
821 if (p->discarded)
822 G.bytes_mapped += p->bytes;
823 p->discarded = false;
824
825 /* Recycle the allocated memory from this page ... */
826 *pp = p->next;
827 page = p->page;
828
829 #ifdef USING_MALLOC_PAGE_GROUPS
830 group = p->group;
831 #endif
832
833 /* ... and, if possible, the page entry itself. */
834 if (p->order == order)
835 {
836 entry = p;
837 memset (entry, 0, page_entry_size);
838 }
839 else
840 free (p);
841 }
842 #ifdef USING_MMAP
843 else if (entry_size == G.pagesize)
844 {
845 /* We want just one page. Allocate a bunch of them and put the
846 extras on the freelist. (Can only do this optimization with
847 mmap for backing store.) */
848 struct page_entry *e, *f = G.free_pages;
849 int i, entries = GGC_QUIRE_SIZE;
850
851 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
852 if (page == NULL)
853 {
854 page = alloc_anon (NULL, G.pagesize, true);
855 entries = 1;
856 }
857
858 /* This loop counts down so that the chain will be in ascending
859 memory order. */
860 for (i = entries - 1; i >= 1; i--)
861 {
862 e = XCNEWVAR (struct page_entry, page_entry_size);
863 e->order = order;
864 e->bytes = G.pagesize;
865 e->page = page + (i << G.lg_pagesize);
866 e->next = f;
867 f = e;
868 }
869
870 G.free_pages = f;
871 }
872 else
873 page = alloc_anon (NULL, entry_size, true);
874 #endif
875 #ifdef USING_MALLOC_PAGE_GROUPS
876 else
877 {
878 /* Allocate a large block of memory and serve out the aligned
879 pages therein. This results in much less memory wastage
880 than the traditional implementation of valloc. */
881
882 char *allocation, *a, *enda;
883 size_t alloc_size, head_slop, tail_slop;
884 int multiple_pages = (entry_size == G.pagesize);
885
886 if (multiple_pages)
887 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
888 else
889 alloc_size = entry_size + G.pagesize - 1;
890 allocation = XNEWVEC (char, alloc_size);
891
892 page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
893 head_slop = page - allocation;
894 if (multiple_pages)
895 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
896 else
897 tail_slop = alloc_size - entry_size - head_slop;
898 enda = allocation + alloc_size - tail_slop;
899
900 /* We allocated N pages, which are likely not aligned, leaving
901 us with N-1 usable pages. We plan to place the page_group
902 structure somewhere in the slop. */
903 if (head_slop >= sizeof (page_group))
904 group = (page_group *)page - 1;
905 else
906 {
907 /* We magically got an aligned allocation. Too bad, we have
908 to waste a page anyway. */
909 if (tail_slop == 0)
910 {
911 enda -= G.pagesize;
912 tail_slop += G.pagesize;
913 }
914 gcc_assert (tail_slop >= sizeof (page_group));
915 group = (page_group *)enda;
916 tail_slop -= sizeof (page_group);
917 }
918
919 /* Remember that we allocated this memory. */
920 group->next = G.page_groups;
921 group->allocation = allocation;
922 group->alloc_size = alloc_size;
923 group->in_use = 0;
924 G.page_groups = group;
925 G.bytes_mapped += alloc_size;
926
927 /* If we allocated multiple pages, put the rest on the free list. */
928 if (multiple_pages)
929 {
930 struct page_entry *e, *f = G.free_pages;
931 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
932 {
933 e = XCNEWVAR (struct page_entry, page_entry_size);
934 e->order = order;
935 e->bytes = G.pagesize;
936 e->page = a;
937 e->group = group;
938 e->next = f;
939 f = e;
940 }
941 G.free_pages = f;
942 }
943 }
944 #endif
945
946 if (entry == NULL)
947 entry = XCNEWVAR (struct page_entry, page_entry_size);
948
949 entry->bytes = entry_size;
950 entry->page = page;
951 entry->context_depth = G.context_depth;
952 entry->order = order;
953 entry->num_free_objects = num_objects;
954 entry->next_bit_hint = 1;
955
956 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
957
958 #ifdef USING_MALLOC_PAGE_GROUPS
959 entry->group = group;
960 set_page_group_in_use (group, page);
961 #endif
962
963 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
964 increment the hint. */
965 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
966 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
967
968 set_page_table_entry (page, entry);
969
970 if (GGC_DEBUG_LEVEL >= 2)
971 fprintf (G.debug_file,
972 "Allocating page at %p, object size=%lu, data %p-%p\n",
973 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
974 page + entry_size - 1);
975
976 return entry;
977 }
978
979 /* Adjust the size of G.depth so that no index greater than the one
980 used by the top of the G.by_depth is used. */
981
982 static inline void
983 adjust_depth (void)
984 {
985 page_entry *top;
986
987 if (G.by_depth_in_use)
988 {
989 top = G.by_depth[G.by_depth_in_use-1];
990
991 /* Peel back indices in depth that index into by_depth, so that
992 as new elements are added to by_depth, we note the indices
993 of those elements, if they are for new context depths. */
994 while (G.depth_in_use > (size_t)top->context_depth+1)
995 --G.depth_in_use;
996 }
997 }
998
999 /* For a page that is no longer needed, put it on the free page list. */
1000
1001 static void
1002 free_page (page_entry *entry)
1003 {
1004 if (GGC_DEBUG_LEVEL >= 2)
1005 fprintf (G.debug_file,
1006 "Deallocating page at %p, data %p-%p\n", (void *) entry,
1007 entry->page, entry->page + entry->bytes - 1);
1008
1009 /* Mark the page as inaccessible. Discard the handle to avoid handle
1010 leak. */
1011 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
1012
1013 set_page_table_entry (entry->page, NULL);
1014
1015 #ifdef USING_MALLOC_PAGE_GROUPS
1016 clear_page_group_in_use (entry->group, entry->page);
1017 #endif
1018
1019 if (G.by_depth_in_use > 1)
1020 {
1021 page_entry *top = G.by_depth[G.by_depth_in_use-1];
1022 int i = entry->index_by_depth;
1023
1024 /* We cannot free a page from a context deeper than the current
1025 one. */
1026 gcc_assert (entry->context_depth == top->context_depth);
1027
1028 /* Put top element into freed slot. */
1029 G.by_depth[i] = top;
1030 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
1031 top->index_by_depth = i;
1032 }
1033 --G.by_depth_in_use;
1034
1035 adjust_depth ();
1036
1037 entry->next = G.free_pages;
1038 G.free_pages = entry;
1039 }
1040
1041 /* Release the free page cache to the system. */
1042
1043 static void
1044 release_pages (void)
1045 {
1046 #ifdef USING_MADVISE
1047 page_entry *p, *start_p;
1048 char *start;
1049 size_t len;
1050 size_t mapped_len;
1051 page_entry *next, *prev, *newprev;
1052 size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
1053
1054 /* First free larger continuous areas to the OS.
1055 This allows other allocators to grab these areas if needed.
1056 This is only done on larger chunks to avoid fragmentation.
1057 This does not always work because the free_pages list is only
1058 approximately sorted. */
1059
1060 p = G.free_pages;
1061 prev = NULL;
1062 while (p)
1063 {
1064 start = p->page;
1065 start_p = p;
1066 len = 0;
1067 mapped_len = 0;
1068 newprev = prev;
1069 while (p && p->page == start + len)
1070 {
1071 len += p->bytes;
1072 if (!p->discarded)
1073 mapped_len += p->bytes;
1074 newprev = p;
1075 p = p->next;
1076 }
1077 if (len >= free_unit)
1078 {
1079 while (start_p != p)
1080 {
1081 next = start_p->next;
1082 free (start_p);
1083 start_p = next;
1084 }
1085 munmap (start, len);
1086 if (prev)
1087 prev->next = p;
1088 else
1089 G.free_pages = p;
1090 G.bytes_mapped -= mapped_len;
1091 continue;
1092 }
1093 prev = newprev;
1094 }
1095
1096 /* Now give back the fragmented pages to the OS, but keep the address
1097 space to reuse it next time. */
1098
1099 for (p = G.free_pages; p; )
1100 {
1101 if (p->discarded)
1102 {
1103 p = p->next;
1104 continue;
1105 }
1106 start = p->page;
1107 len = p->bytes;
1108 start_p = p;
1109 p = p->next;
1110 while (p && p->page == start + len)
1111 {
1112 len += p->bytes;
1113 p = p->next;
1114 }
1115 /* Give the page back to the kernel, but don't free the mapping.
1116 This avoids fragmentation in the virtual memory map of the
1117 process. Next time we can reuse it by just touching it. */
1118 madvise (start, len, MADV_DONTNEED);
1119 /* Don't count those pages as mapped to not touch the garbage collector
1120 unnecessarily. */
1121 G.bytes_mapped -= len;
1122 while (start_p != p)
1123 {
1124 start_p->discarded = true;
1125 start_p = start_p->next;
1126 }
1127 }
1128 #endif
1129 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1130 page_entry *p, *next;
1131 char *start;
1132 size_t len;
1133
1134 /* Gather up adjacent pages so they are unmapped together. */
1135 p = G.free_pages;
1136
1137 while (p)
1138 {
1139 start = p->page;
1140 next = p->next;
1141 len = p->bytes;
1142 free (p);
1143 p = next;
1144
1145 while (p && p->page == start + len)
1146 {
1147 next = p->next;
1148 len += p->bytes;
1149 free (p);
1150 p = next;
1151 }
1152
1153 munmap (start, len);
1154 G.bytes_mapped -= len;
1155 }
1156
1157 G.free_pages = NULL;
1158 #endif
1159 #ifdef USING_MALLOC_PAGE_GROUPS
1160 page_entry **pp, *p;
1161 page_group **gp, *g;
1162
1163 /* Remove all pages from free page groups from the list. */
1164 pp = &G.free_pages;
1165 while ((p = *pp) != NULL)
1166 if (p->group->in_use == 0)
1167 {
1168 *pp = p->next;
1169 free (p);
1170 }
1171 else
1172 pp = &p->next;
1173
1174 /* Remove all free page groups, and release the storage. */
1175 gp = &G.page_groups;
1176 while ((g = *gp) != NULL)
1177 if (g->in_use == 0)
1178 {
1179 *gp = g->next;
1180 G.bytes_mapped -= g->alloc_size;
1181 free (g->allocation);
1182 }
1183 else
1184 gp = &g->next;
1185 #endif
1186 }
1187
1188 /* This table provides a fast way to determine ceil(log_2(size)) for
1189 allocation requests. The minimum allocation size is eight bytes. */
1190 #define NUM_SIZE_LOOKUP 512
1191 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1192 {
1193 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1194 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1195 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1196 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1197 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1198 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1199 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1200 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1201 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1202 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1203 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1204 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1205 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1206 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1207 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1208 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1209 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1210 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1211 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1212 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1213 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1214 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1215 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1216 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1217 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1218 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1219 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1220 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1221 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1222 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1223 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1224 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1225 };
1226
1227 /* For a given size of memory requested for allocation, return the
1228 actual size that is going to be allocated, as well as the size
1229 order. */
1230
1231 static void
1232 ggc_round_alloc_size_1 (size_t requested_size,
1233 size_t *size_order,
1234 size_t *alloced_size)
1235 {
1236 size_t order, object_size;
1237
1238 if (requested_size < NUM_SIZE_LOOKUP)
1239 {
1240 order = size_lookup[requested_size];
1241 object_size = OBJECT_SIZE (order);
1242 }
1243 else
1244 {
1245 order = 10;
1246 while (requested_size > (object_size = OBJECT_SIZE (order)))
1247 order++;
1248 }
1249
1250 if (size_order)
1251 *size_order = order;
1252 if (alloced_size)
1253 *alloced_size = object_size;
1254 }
1255
1256 /* For a given size of memory requested for allocation, return the
1257 actual size that is going to be allocated. */
1258
1259 size_t
1260 ggc_round_alloc_size (size_t requested_size)
1261 {
1262 size_t size = 0;
1263
1264 ggc_round_alloc_size_1 (requested_size, NULL, &size);
1265 return size;
1266 }
1267
1268 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1269
1270 void *
1271 ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1272 MEM_STAT_DECL)
1273 {
1274 size_t order, word, bit, object_offset, object_size;
1275 struct page_entry *entry;
1276 void *result;
1277
1278 ggc_round_alloc_size_1 (size, &order, &object_size);
1279
1280 /* If there are non-full pages for this size allocation, they are at
1281 the head of the list. */
1282 entry = G.pages[order];
1283
1284 /* If there is no page for this object size, or all pages in this
1285 context are full, allocate a new page. */
1286 if (entry == NULL || entry->num_free_objects == 0)
1287 {
1288 struct page_entry *new_entry;
1289 new_entry = alloc_page (order);
1290
1291 new_entry->index_by_depth = G.by_depth_in_use;
1292 push_by_depth (new_entry, 0);
1293
1294 /* We can skip context depths, if we do, make sure we go all the
1295 way to the new depth. */
1296 while (new_entry->context_depth >= G.depth_in_use)
1297 push_depth (G.by_depth_in_use-1);
1298
1299 /* If this is the only entry, it's also the tail. If it is not
1300 the only entry, then we must update the PREV pointer of the
1301 ENTRY (G.pages[order]) to point to our new page entry. */
1302 if (entry == NULL)
1303 G.page_tails[order] = new_entry;
1304 else
1305 entry->prev = new_entry;
1306
1307 /* Put new pages at the head of the page list. By definition the
1308 entry at the head of the list always has a NULL pointer. */
1309 new_entry->next = entry;
1310 new_entry->prev = NULL;
1311 entry = new_entry;
1312 G.pages[order] = new_entry;
1313
1314 /* For a new page, we know the word and bit positions (in the
1315 in_use bitmap) of the first available object -- they're zero. */
1316 new_entry->next_bit_hint = 1;
1317 word = 0;
1318 bit = 0;
1319 object_offset = 0;
1320 }
1321 else
1322 {
1323 /* First try to use the hint left from the previous allocation
1324 to locate a clear bit in the in-use bitmap. We've made sure
1325 that the one-past-the-end bit is always set, so if the hint
1326 has run over, this test will fail. */
1327 unsigned hint = entry->next_bit_hint;
1328 word = hint / HOST_BITS_PER_LONG;
1329 bit = hint % HOST_BITS_PER_LONG;
1330
1331 /* If the hint didn't work, scan the bitmap from the beginning. */
1332 if ((entry->in_use_p[word] >> bit) & 1)
1333 {
1334 word = bit = 0;
1335 while (~entry->in_use_p[word] == 0)
1336 ++word;
1337
1338 #if GCC_VERSION >= 3004
1339 bit = __builtin_ctzl (~entry->in_use_p[word]);
1340 #else
1341 while ((entry->in_use_p[word] >> bit) & 1)
1342 ++bit;
1343 #endif
1344
1345 hint = word * HOST_BITS_PER_LONG + bit;
1346 }
1347
1348 /* Next time, try the next bit. */
1349 entry->next_bit_hint = hint + 1;
1350
1351 object_offset = hint * object_size;
1352 }
1353
1354 /* Set the in-use bit. */
1355 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1356
1357 /* Keep a running total of the number of free objects. If this page
1358 fills up, we may have to move it to the end of the list if the
1359 next page isn't full. If the next page is full, all subsequent
1360 pages are full, so there's no need to move it. */
1361 if (--entry->num_free_objects == 0
1362 && entry->next != NULL
1363 && entry->next->num_free_objects > 0)
1364 {
1365 /* We have a new head for the list. */
1366 G.pages[order] = entry->next;
1367
1368 /* We are moving ENTRY to the end of the page table list.
1369 The new page at the head of the list will have NULL in
1370 its PREV field and ENTRY will have NULL in its NEXT field. */
1371 entry->next->prev = NULL;
1372 entry->next = NULL;
1373
1374 /* Append ENTRY to the tail of the list. */
1375 entry->prev = G.page_tails[order];
1376 G.page_tails[order]->next = entry;
1377 G.page_tails[order] = entry;
1378 }
1379
1380 /* Calculate the object's address. */
1381 result = entry->page + object_offset;
1382 if (GATHER_STATISTICS)
1383 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1384 result FINAL_PASS_MEM_STAT);
1385
1386 #ifdef ENABLE_GC_CHECKING
1387 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1388 exact same semantics in presence of memory bugs, regardless of
1389 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1390 handle to avoid handle leak. */
1391 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1392
1393 /* `Poison' the entire allocated object, including any padding at
1394 the end. */
1395 memset (result, 0xaf, object_size);
1396
1397 /* Make the bytes after the end of the object unaccessible. Discard the
1398 handle to avoid handle leak. */
1399 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1400 object_size - size));
1401 #endif
1402
1403 /* Tell Valgrind that the memory is there, but its content isn't
1404 defined. The bytes at the end of the object are still marked
1405 unaccessible. */
1406 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1407
1408 /* Keep track of how many bytes are being allocated. This
1409 information is used in deciding when to collect. */
1410 G.allocated += object_size;
1411
1412 /* For timevar statistics. */
1413 timevar_ggc_mem_total += object_size;
1414
1415 if (f && n == 1)
1416 G.finalizers.safe_push (finalizer (result, f));
1417 else if (f)
1418 G.vec_finalizers.safe_push
1419 (vec_finalizer (reinterpret_cast<uintptr_t> (result), f, s, n));
1420
1421 if (GATHER_STATISTICS)
1422 {
1423 size_t overhead = object_size - size;
1424
1425 G.stats.total_overhead += overhead;
1426 G.stats.total_allocated += object_size;
1427 G.stats.total_overhead_per_order[order] += overhead;
1428 G.stats.total_allocated_per_order[order] += object_size;
1429
1430 if (size <= 32)
1431 {
1432 G.stats.total_overhead_under32 += overhead;
1433 G.stats.total_allocated_under32 += object_size;
1434 }
1435 if (size <= 64)
1436 {
1437 G.stats.total_overhead_under64 += overhead;
1438 G.stats.total_allocated_under64 += object_size;
1439 }
1440 if (size <= 128)
1441 {
1442 G.stats.total_overhead_under128 += overhead;
1443 G.stats.total_allocated_under128 += object_size;
1444 }
1445 }
1446
1447 if (GGC_DEBUG_LEVEL >= 3)
1448 fprintf (G.debug_file,
1449 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1450 (unsigned long) size, (unsigned long) object_size, result,
1451 (void *) entry);
1452
1453 return result;
1454 }
1455
1456 /* Mark function for strings. */
1457
1458 void
1459 gt_ggc_m_S (const void *p)
1460 {
1461 page_entry *entry;
1462 unsigned bit, word;
1463 unsigned long mask;
1464 unsigned long offset;
1465
1466 if (!p || !ggc_allocated_p (p))
1467 return;
1468
1469 /* Look up the page on which the object is alloced. . */
1470 entry = lookup_page_table_entry (p);
1471 gcc_assert (entry);
1472
1473 /* Calculate the index of the object on the page; this is its bit
1474 position in the in_use_p bitmap. Note that because a char* might
1475 point to the middle of an object, we need special code here to
1476 make sure P points to the start of an object. */
1477 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1478 if (offset)
1479 {
1480 /* Here we've seen a char* which does not point to the beginning
1481 of an allocated object. We assume it points to the middle of
1482 a STRING_CST. */
1483 gcc_assert (offset == offsetof (struct tree_string, str));
1484 p = ((const char *) p) - offset;
1485 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1486 return;
1487 }
1488
1489 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1490 word = bit / HOST_BITS_PER_LONG;
1491 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1492
1493 /* If the bit was previously set, skip it. */
1494 if (entry->in_use_p[word] & mask)
1495 return;
1496
1497 /* Otherwise set it, and decrement the free object count. */
1498 entry->in_use_p[word] |= mask;
1499 entry->num_free_objects -= 1;
1500
1501 if (GGC_DEBUG_LEVEL >= 4)
1502 fprintf (G.debug_file, "Marking %p\n", p);
1503
1504 return;
1505 }
1506
1507
1508 /* User-callable entry points for marking string X. */
1509
1510 void
1511 gt_ggc_mx (const char *& x)
1512 {
1513 gt_ggc_m_S (x);
1514 }
1515
1516 void
1517 gt_ggc_mx (unsigned char *& x)
1518 {
1519 gt_ggc_m_S (x);
1520 }
1521
1522 void
1523 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1524 {
1525 }
1526
1527 /* If P is not marked, marks it and return false. Otherwise return true.
1528 P must have been allocated by the GC allocator; it mustn't point to
1529 static objects, stack variables, or memory allocated with malloc. */
1530
1531 int
1532 ggc_set_mark (const void *p)
1533 {
1534 page_entry *entry;
1535 unsigned bit, word;
1536 unsigned long mask;
1537
1538 /* Look up the page on which the object is alloced. If the object
1539 wasn't allocated by the collector, we'll probably die. */
1540 entry = lookup_page_table_entry (p);
1541 gcc_assert (entry);
1542
1543 /* Calculate the index of the object on the page; this is its bit
1544 position in the in_use_p bitmap. */
1545 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1546 word = bit / HOST_BITS_PER_LONG;
1547 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1548
1549 /* If the bit was previously set, skip it. */
1550 if (entry->in_use_p[word] & mask)
1551 return 1;
1552
1553 /* Otherwise set it, and decrement the free object count. */
1554 entry->in_use_p[word] |= mask;
1555 entry->num_free_objects -= 1;
1556
1557 if (GGC_DEBUG_LEVEL >= 4)
1558 fprintf (G.debug_file, "Marking %p\n", p);
1559
1560 return 0;
1561 }
1562
1563 /* Return 1 if P has been marked, zero otherwise.
1564 P must have been allocated by the GC allocator; it mustn't point to
1565 static objects, stack variables, or memory allocated with malloc. */
1566
1567 int
1568 ggc_marked_p (const void *p)
1569 {
1570 page_entry *entry;
1571 unsigned bit, word;
1572 unsigned long mask;
1573
1574 /* Look up the page on which the object is alloced. If the object
1575 wasn't allocated by the collector, we'll probably die. */
1576 entry = lookup_page_table_entry (p);
1577 gcc_assert (entry);
1578
1579 /* Calculate the index of the object on the page; this is its bit
1580 position in the in_use_p bitmap. */
1581 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1582 word = bit / HOST_BITS_PER_LONG;
1583 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1584
1585 return (entry->in_use_p[word] & mask) != 0;
1586 }
1587
1588 /* Return the size of the gc-able object P. */
1589
1590 size_t
1591 ggc_get_size (const void *p)
1592 {
1593 page_entry *pe = lookup_page_table_entry (p);
1594 return OBJECT_SIZE (pe->order);
1595 }
1596
1597 /* Release the memory for object P. */
1598
1599 void
1600 ggc_free (void *p)
1601 {
1602 if (in_gc)
1603 return;
1604
1605 page_entry *pe = lookup_page_table_entry (p);
1606 size_t order = pe->order;
1607 size_t size = OBJECT_SIZE (order);
1608
1609 if (GATHER_STATISTICS)
1610 ggc_free_overhead (p);
1611
1612 if (GGC_DEBUG_LEVEL >= 3)
1613 fprintf (G.debug_file,
1614 "Freeing object, actual size=%lu, at %p on %p\n",
1615 (unsigned long) size, p, (void *) pe);
1616
1617 #ifdef ENABLE_GC_CHECKING
1618 /* Poison the data, to indicate the data is garbage. */
1619 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1620 memset (p, 0xa5, size);
1621 #endif
1622 /* Let valgrind know the object is free. */
1623 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1624
1625 #ifdef ENABLE_GC_ALWAYS_COLLECT
1626 /* In the completely-anal-checking mode, we do *not* immediately free
1627 the data, but instead verify that the data is *actually* not
1628 reachable the next time we collect. */
1629 {
1630 struct free_object *fo = XNEW (struct free_object);
1631 fo->object = p;
1632 fo->next = G.free_object_list;
1633 G.free_object_list = fo;
1634 }
1635 #else
1636 {
1637 unsigned int bit_offset, word, bit;
1638
1639 G.allocated -= size;
1640
1641 /* Mark the object not-in-use. */
1642 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1643 word = bit_offset / HOST_BITS_PER_LONG;
1644 bit = bit_offset % HOST_BITS_PER_LONG;
1645 pe->in_use_p[word] &= ~(1UL << bit);
1646
1647 if (pe->num_free_objects++ == 0)
1648 {
1649 page_entry *p, *q;
1650
1651 /* If the page is completely full, then it's supposed to
1652 be after all pages that aren't. Since we've freed one
1653 object from a page that was full, we need to move the
1654 page to the head of the list.
1655
1656 PE is the node we want to move. Q is the previous node
1657 and P is the next node in the list. */
1658 q = pe->prev;
1659 if (q && q->num_free_objects == 0)
1660 {
1661 p = pe->next;
1662
1663 q->next = p;
1664
1665 /* If PE was at the end of the list, then Q becomes the
1666 new end of the list. If PE was not the end of the
1667 list, then we need to update the PREV field for P. */
1668 if (!p)
1669 G.page_tails[order] = q;
1670 else
1671 p->prev = q;
1672
1673 /* Move PE to the head of the list. */
1674 pe->next = G.pages[order];
1675 pe->prev = NULL;
1676 G.pages[order]->prev = pe;
1677 G.pages[order] = pe;
1678 }
1679
1680 /* Reset the hint bit to point to the only free object. */
1681 pe->next_bit_hint = bit_offset;
1682 }
1683 }
1684 #endif
1685 }
1686 \f
1687 /* Subroutine of init_ggc which computes the pair of numbers used to
1688 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1689
1690 This algorithm is taken from Granlund and Montgomery's paper
1691 "Division by Invariant Integers using Multiplication"
1692 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1693 constants). */
1694
1695 static void
1696 compute_inverse (unsigned order)
1697 {
1698 size_t size, inv;
1699 unsigned int e;
1700
1701 size = OBJECT_SIZE (order);
1702 e = 0;
1703 while (size % 2 == 0)
1704 {
1705 e++;
1706 size >>= 1;
1707 }
1708
1709 inv = size;
1710 while (inv * size != 1)
1711 inv = inv * (2 - inv*size);
1712
1713 DIV_MULT (order) = inv;
1714 DIV_SHIFT (order) = e;
1715 }
1716
1717 /* Initialize the ggc-mmap allocator. */
1718 void
1719 init_ggc (void)
1720 {
1721 static bool init_p = false;
1722 unsigned order;
1723
1724 if (init_p)
1725 return;
1726 init_p = true;
1727
1728 G.pagesize = getpagesize ();
1729 G.lg_pagesize = exact_log2 (G.pagesize);
1730
1731 #ifdef HAVE_MMAP_DEV_ZERO
1732 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1733 if (G.dev_zero_fd == -1)
1734 internal_error ("open /dev/zero: %m");
1735 #endif
1736
1737 #if 0
1738 G.debug_file = fopen ("ggc-mmap.debug", "w");
1739 #else
1740 G.debug_file = stdout;
1741 #endif
1742
1743 #ifdef USING_MMAP
1744 /* StunOS has an amazing off-by-one error for the first mmap allocation
1745 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1746 believe, is an unaligned page allocation, which would cause us to
1747 hork badly if we tried to use it. */
1748 {
1749 char *p = alloc_anon (NULL, G.pagesize, true);
1750 struct page_entry *e;
1751 if ((uintptr_t)p & (G.pagesize - 1))
1752 {
1753 /* How losing. Discard this one and try another. If we still
1754 can't get something useful, give up. */
1755
1756 p = alloc_anon (NULL, G.pagesize, true);
1757 gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1758 }
1759
1760 /* We have a good page, might as well hold onto it... */
1761 e = XCNEW (struct page_entry);
1762 e->bytes = G.pagesize;
1763 e->page = p;
1764 e->next = G.free_pages;
1765 G.free_pages = e;
1766 }
1767 #endif
1768
1769 /* Initialize the object size table. */
1770 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1771 object_size_table[order] = (size_t) 1 << order;
1772 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1773 {
1774 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1775
1776 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1777 so that we're sure of getting aligned memory. */
1778 s = ROUND_UP (s, MAX_ALIGNMENT);
1779 object_size_table[order] = s;
1780 }
1781
1782 /* Initialize the objects-per-page and inverse tables. */
1783 for (order = 0; order < NUM_ORDERS; ++order)
1784 {
1785 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1786 if (objects_per_page_table[order] == 0)
1787 objects_per_page_table[order] = 1;
1788 compute_inverse (order);
1789 }
1790
1791 /* Reset the size_lookup array to put appropriately sized objects in
1792 the special orders. All objects bigger than the previous power
1793 of two, but no greater than the special size, should go in the
1794 new order. */
1795 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1796 {
1797 int o;
1798 int i;
1799
1800 i = OBJECT_SIZE (order);
1801 if (i >= NUM_SIZE_LOOKUP)
1802 continue;
1803
1804 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1805 size_lookup[i] = order;
1806 }
1807
1808 G.depth_in_use = 0;
1809 G.depth_max = 10;
1810 G.depth = XNEWVEC (unsigned int, G.depth_max);
1811
1812 G.by_depth_in_use = 0;
1813 G.by_depth_max = INITIAL_PTE_COUNT;
1814 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1815 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1816 }
1817
1818 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1819 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1820
1821 static void
1822 ggc_recalculate_in_use_p (page_entry *p)
1823 {
1824 unsigned int i;
1825 size_t num_objects;
1826
1827 /* Because the past-the-end bit in in_use_p is always set, we
1828 pretend there is one additional object. */
1829 num_objects = OBJECTS_IN_PAGE (p) + 1;
1830
1831 /* Reset the free object count. */
1832 p->num_free_objects = num_objects;
1833
1834 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1835 for (i = 0;
1836 i < CEIL (BITMAP_SIZE (num_objects),
1837 sizeof (*p->in_use_p));
1838 ++i)
1839 {
1840 unsigned long j;
1841
1842 /* Something is in use if it is marked, or if it was in use in a
1843 context further down the context stack. */
1844 p->in_use_p[i] |= save_in_use_p (p)[i];
1845
1846 /* Decrement the free object count for every object allocated. */
1847 for (j = p->in_use_p[i]; j; j >>= 1)
1848 p->num_free_objects -= (j & 1);
1849 }
1850
1851 gcc_assert (p->num_free_objects < num_objects);
1852 }
1853 \f
1854 /* Unmark all objects. */
1855
1856 static void
1857 clear_marks (void)
1858 {
1859 unsigned order;
1860
1861 for (order = 2; order < NUM_ORDERS; order++)
1862 {
1863 page_entry *p;
1864
1865 for (p = G.pages[order]; p != NULL; p = p->next)
1866 {
1867 size_t num_objects = OBJECTS_IN_PAGE (p);
1868 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1869
1870 /* The data should be page-aligned. */
1871 gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
1872
1873 /* Pages that aren't in the topmost context are not collected;
1874 nevertheless, we need their in-use bit vectors to store GC
1875 marks. So, back them up first. */
1876 if (p->context_depth < G.context_depth)
1877 {
1878 if (! save_in_use_p (p))
1879 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1880 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1881 }
1882
1883 /* Reset reset the number of free objects and clear the
1884 in-use bits. These will be adjusted by mark_obj. */
1885 p->num_free_objects = num_objects;
1886 memset (p->in_use_p, 0, bitmap_size);
1887
1888 /* Make sure the one-past-the-end bit is always set. */
1889 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1890 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1891 }
1892 }
1893 }
1894
1895 /* Check if any blocks with a registered finalizer have become unmarked. If so
1896 run the finalizer and unregister it because the block is about to be freed.
1897 Note that no garantee is made about what order finalizers will run in so
1898 touching other objects in gc memory is extremely unwise. */
1899
1900 static void
1901 ggc_handle_finalizers ()
1902 {
1903 if (G.context_depth != 0)
1904 return;
1905
1906 unsigned length = G.finalizers.length ();
1907 for (unsigned int i = 0; i < length;)
1908 {
1909 finalizer &f = G.finalizers[i];
1910 if (!ggc_marked_p (f.addr ()))
1911 {
1912 f.call ();
1913 G.finalizers.unordered_remove (i);
1914 length--;
1915 }
1916 else
1917 i++;
1918 }
1919
1920
1921 length = G.vec_finalizers.length ();
1922 for (unsigned int i = 0; i < length;)
1923 {
1924 vec_finalizer &f = G.vec_finalizers[i];
1925 if (!ggc_marked_p (f.addr ()))
1926 {
1927 f.call ();
1928 G.vec_finalizers.unordered_remove (i);
1929 length--;
1930 }
1931 else
1932 i++;
1933 }
1934 }
1935
1936 /* Free all empty pages. Partially empty pages need no attention
1937 because the `mark' bit doubles as an `unused' bit. */
1938
1939 static void
1940 sweep_pages (void)
1941 {
1942 unsigned order;
1943
1944 for (order = 2; order < NUM_ORDERS; order++)
1945 {
1946 /* The last page-entry to consider, regardless of entries
1947 placed at the end of the list. */
1948 page_entry * const last = G.page_tails[order];
1949
1950 size_t num_objects;
1951 size_t live_objects;
1952 page_entry *p, *previous;
1953 int done;
1954
1955 p = G.pages[order];
1956 if (p == NULL)
1957 continue;
1958
1959 previous = NULL;
1960 do
1961 {
1962 page_entry *next = p->next;
1963
1964 /* Loop until all entries have been examined. */
1965 done = (p == last);
1966
1967 num_objects = OBJECTS_IN_PAGE (p);
1968
1969 /* Add all live objects on this page to the count of
1970 allocated memory. */
1971 live_objects = num_objects - p->num_free_objects;
1972
1973 G.allocated += OBJECT_SIZE (order) * live_objects;
1974
1975 /* Only objects on pages in the topmost context should get
1976 collected. */
1977 if (p->context_depth < G.context_depth)
1978 ;
1979
1980 /* Remove the page if it's empty. */
1981 else if (live_objects == 0)
1982 {
1983 /* If P was the first page in the list, then NEXT
1984 becomes the new first page in the list, otherwise
1985 splice P out of the forward pointers. */
1986 if (! previous)
1987 G.pages[order] = next;
1988 else
1989 previous->next = next;
1990
1991 /* Splice P out of the back pointers too. */
1992 if (next)
1993 next->prev = previous;
1994
1995 /* Are we removing the last element? */
1996 if (p == G.page_tails[order])
1997 G.page_tails[order] = previous;
1998 free_page (p);
1999 p = previous;
2000 }
2001
2002 /* If the page is full, move it to the end. */
2003 else if (p->num_free_objects == 0)
2004 {
2005 /* Don't move it if it's already at the end. */
2006 if (p != G.page_tails[order])
2007 {
2008 /* Move p to the end of the list. */
2009 p->next = NULL;
2010 p->prev = G.page_tails[order];
2011 G.page_tails[order]->next = p;
2012
2013 /* Update the tail pointer... */
2014 G.page_tails[order] = p;
2015
2016 /* ... and the head pointer, if necessary. */
2017 if (! previous)
2018 G.pages[order] = next;
2019 else
2020 previous->next = next;
2021
2022 /* And update the backpointer in NEXT if necessary. */
2023 if (next)
2024 next->prev = previous;
2025
2026 p = previous;
2027 }
2028 }
2029
2030 /* If we've fallen through to here, it's a page in the
2031 topmost context that is neither full nor empty. Such a
2032 page must precede pages at lesser context depth in the
2033 list, so move it to the head. */
2034 else if (p != G.pages[order])
2035 {
2036 previous->next = p->next;
2037
2038 /* Update the backchain in the next node if it exists. */
2039 if (p->next)
2040 p->next->prev = previous;
2041
2042 /* Move P to the head of the list. */
2043 p->next = G.pages[order];
2044 p->prev = NULL;
2045 G.pages[order]->prev = p;
2046
2047 /* Update the head pointer. */
2048 G.pages[order] = p;
2049
2050 /* Are we moving the last element? */
2051 if (G.page_tails[order] == p)
2052 G.page_tails[order] = previous;
2053 p = previous;
2054 }
2055
2056 previous = p;
2057 p = next;
2058 }
2059 while (! done);
2060
2061 /* Now, restore the in_use_p vectors for any pages from contexts
2062 other than the current one. */
2063 for (p = G.pages[order]; p; p = p->next)
2064 if (p->context_depth != G.context_depth)
2065 ggc_recalculate_in_use_p (p);
2066 }
2067 }
2068
2069 #ifdef ENABLE_GC_CHECKING
2070 /* Clobber all free objects. */
2071
2072 static void
2073 poison_pages (void)
2074 {
2075 unsigned order;
2076
2077 for (order = 2; order < NUM_ORDERS; order++)
2078 {
2079 size_t size = OBJECT_SIZE (order);
2080 page_entry *p;
2081
2082 for (p = G.pages[order]; p != NULL; p = p->next)
2083 {
2084 size_t num_objects;
2085 size_t i;
2086
2087 if (p->context_depth != G.context_depth)
2088 /* Since we don't do any collection for pages in pushed
2089 contexts, there's no need to do any poisoning. And
2090 besides, the IN_USE_P array isn't valid until we pop
2091 contexts. */
2092 continue;
2093
2094 num_objects = OBJECTS_IN_PAGE (p);
2095 for (i = 0; i < num_objects; i++)
2096 {
2097 size_t word, bit;
2098 word = i / HOST_BITS_PER_LONG;
2099 bit = i % HOST_BITS_PER_LONG;
2100 if (((p->in_use_p[word] >> bit) & 1) == 0)
2101 {
2102 char *object = p->page + i * size;
2103
2104 /* Keep poison-by-write when we expect to use Valgrind,
2105 so the exact same memory semantics is kept, in case
2106 there are memory errors. We override this request
2107 below. */
2108 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
2109 size));
2110 memset (object, 0xa5, size);
2111
2112 /* Drop the handle to avoid handle leak. */
2113 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
2114 }
2115 }
2116 }
2117 }
2118 }
2119 #else
2120 #define poison_pages()
2121 #endif
2122
2123 #ifdef ENABLE_GC_ALWAYS_COLLECT
2124 /* Validate that the reportedly free objects actually are. */
2125
2126 static void
2127 validate_free_objects (void)
2128 {
2129 struct free_object *f, *next, *still_free = NULL;
2130
2131 for (f = G.free_object_list; f ; f = next)
2132 {
2133 page_entry *pe = lookup_page_table_entry (f->object);
2134 size_t bit, word;
2135
2136 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2137 word = bit / HOST_BITS_PER_LONG;
2138 bit = bit % HOST_BITS_PER_LONG;
2139 next = f->next;
2140
2141 /* Make certain it isn't visible from any root. Notice that we
2142 do this check before sweep_pages merges save_in_use_p. */
2143 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2144
2145 /* If the object comes from an outer context, then retain the
2146 free_object entry, so that we can verify that the address
2147 isn't live on the stack in some outer context. */
2148 if (pe->context_depth != G.context_depth)
2149 {
2150 f->next = still_free;
2151 still_free = f;
2152 }
2153 else
2154 free (f);
2155 }
2156
2157 G.free_object_list = still_free;
2158 }
2159 #else
2160 #define validate_free_objects()
2161 #endif
2162
2163 /* Top level mark-and-sweep routine. */
2164
2165 void
2166 ggc_collect (void)
2167 {
2168 /* Avoid frequent unnecessary work by skipping collection if the
2169 total allocations haven't expanded much since the last
2170 collection. */
2171 float allocated_last_gc =
2172 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2173
2174 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2175 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
2176 return;
2177
2178 timevar_push (TV_GC);
2179 if (!quiet_flag)
2180 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
2181 if (GGC_DEBUG_LEVEL >= 2)
2182 fprintf (G.debug_file, "BEGIN COLLECTING\n");
2183
2184 /* Zero the total allocated bytes. This will be recalculated in the
2185 sweep phase. */
2186 G.allocated = 0;
2187
2188 /* Release the pages we freed the last time we collected, but didn't
2189 reuse in the interim. */
2190 release_pages ();
2191
2192 /* Indicate that we've seen collections at this context depth. */
2193 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2194
2195 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2196
2197 in_gc = true;
2198 clear_marks ();
2199 ggc_mark_roots ();
2200 ggc_handle_finalizers ();
2201
2202 if (GATHER_STATISTICS)
2203 ggc_prune_overhead_list ();
2204
2205 poison_pages ();
2206 validate_free_objects ();
2207 sweep_pages ();
2208
2209 in_gc = false;
2210 G.allocated_last_gc = G.allocated;
2211
2212 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2213
2214 timevar_pop (TV_GC);
2215
2216 if (!quiet_flag)
2217 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2218 if (GGC_DEBUG_LEVEL >= 2)
2219 fprintf (G.debug_file, "END COLLECTING\n");
2220 }
2221
2222 /* Assume that all GGC memory is reachable and grow the limits for next collection.
2223 With checking, trigger GGC so -Q compilation outputs how much of memory really is
2224 reachable. */
2225
2226 void
2227 ggc_grow (void)
2228 {
2229 #ifndef ENABLE_CHECKING
2230 G.allocated_last_gc = MAX (G.allocated_last_gc,
2231 G.allocated);
2232 #else
2233 ggc_collect ();
2234 #endif
2235 if (!quiet_flag)
2236 fprintf (stderr, " {GC start %luk} ", (unsigned long) G.allocated / 1024);
2237 }
2238
2239 /* Print allocation statistics. */
2240 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2241 ? (x) \
2242 : ((x) < 1024*1024*10 \
2243 ? (x) / 1024 \
2244 : (x) / (1024*1024))))
2245 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2246
2247 void
2248 ggc_print_statistics (void)
2249 {
2250 struct ggc_statistics stats;
2251 unsigned int i;
2252 size_t total_overhead = 0;
2253
2254 /* Clear the statistics. */
2255 memset (&stats, 0, sizeof (stats));
2256
2257 /* Make sure collection will really occur. */
2258 G.allocated_last_gc = 0;
2259
2260 /* Collect and print the statistics common across collectors. */
2261 ggc_print_common_statistics (stderr, &stats);
2262
2263 /* Release free pages so that we will not count the bytes allocated
2264 there as part of the total allocated memory. */
2265 release_pages ();
2266
2267 /* Collect some information about the various sizes of
2268 allocation. */
2269 fprintf (stderr,
2270 "Memory still allocated at the end of the compilation process\n");
2271 fprintf (stderr, "%-5s %10s %10s %10s\n",
2272 "Size", "Allocated", "Used", "Overhead");
2273 for (i = 0; i < NUM_ORDERS; ++i)
2274 {
2275 page_entry *p;
2276 size_t allocated;
2277 size_t in_use;
2278 size_t overhead;
2279
2280 /* Skip empty entries. */
2281 if (!G.pages[i])
2282 continue;
2283
2284 overhead = allocated = in_use = 0;
2285
2286 /* Figure out the total number of bytes allocated for objects of
2287 this size, and how many of them are actually in use. Also figure
2288 out how much memory the page table is using. */
2289 for (p = G.pages[i]; p; p = p->next)
2290 {
2291 allocated += p->bytes;
2292 in_use +=
2293 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2294
2295 overhead += (sizeof (page_entry) - sizeof (long)
2296 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2297 }
2298 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2299 (unsigned long) OBJECT_SIZE (i),
2300 SCALE (allocated), STAT_LABEL (allocated),
2301 SCALE (in_use), STAT_LABEL (in_use),
2302 SCALE (overhead), STAT_LABEL (overhead));
2303 total_overhead += overhead;
2304 }
2305 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2306 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2307 SCALE (G.allocated), STAT_LABEL (G.allocated),
2308 SCALE (total_overhead), STAT_LABEL (total_overhead));
2309
2310 if (GATHER_STATISTICS)
2311 {
2312 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2313
2314 fprintf (stderr, "Total Overhead: %10" HOST_LONG_LONG_FORMAT "d\n",
2315 G.stats.total_overhead);
2316 fprintf (stderr, "Total Allocated: %10" HOST_LONG_LONG_FORMAT "d\n",
2317 G.stats.total_allocated);
2318
2319 fprintf (stderr, "Total Overhead under 32B: %10" HOST_LONG_LONG_FORMAT "d\n",
2320 G.stats.total_overhead_under32);
2321 fprintf (stderr, "Total Allocated under 32B: %10" HOST_LONG_LONG_FORMAT "d\n",
2322 G.stats.total_allocated_under32);
2323 fprintf (stderr, "Total Overhead under 64B: %10" HOST_LONG_LONG_FORMAT "d\n",
2324 G.stats.total_overhead_under64);
2325 fprintf (stderr, "Total Allocated under 64B: %10" HOST_LONG_LONG_FORMAT "d\n",
2326 G.stats.total_allocated_under64);
2327 fprintf (stderr, "Total Overhead under 128B: %10" HOST_LONG_LONG_FORMAT "d\n",
2328 G.stats.total_overhead_under128);
2329 fprintf (stderr, "Total Allocated under 128B: %10" HOST_LONG_LONG_FORMAT "d\n",
2330 G.stats.total_allocated_under128);
2331
2332 for (i = 0; i < NUM_ORDERS; i++)
2333 if (G.stats.total_allocated_per_order[i])
2334 {
2335 fprintf (stderr, "Total Overhead page size %7lu: %10" HOST_LONG_LONG_FORMAT "d\n",
2336 (unsigned long) OBJECT_SIZE (i),
2337 G.stats.total_overhead_per_order[i]);
2338 fprintf (stderr, "Total Allocated page size %7lu: %10" HOST_LONG_LONG_FORMAT "d\n",
2339 (unsigned long) OBJECT_SIZE (i),
2340 G.stats.total_allocated_per_order[i]);
2341 }
2342 }
2343 }
2344 \f
2345 struct ggc_pch_ondisk
2346 {
2347 unsigned totals[NUM_ORDERS];
2348 };
2349
2350 struct ggc_pch_data
2351 {
2352 struct ggc_pch_ondisk d;
2353 uintptr_t base[NUM_ORDERS];
2354 size_t written[NUM_ORDERS];
2355 };
2356
2357 struct ggc_pch_data *
2358 init_ggc_pch (void)
2359 {
2360 return XCNEW (struct ggc_pch_data);
2361 }
2362
2363 void
2364 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2365 size_t size, bool is_string ATTRIBUTE_UNUSED)
2366 {
2367 unsigned order;
2368
2369 if (size < NUM_SIZE_LOOKUP)
2370 order = size_lookup[size];
2371 else
2372 {
2373 order = 10;
2374 while (size > OBJECT_SIZE (order))
2375 order++;
2376 }
2377
2378 d->d.totals[order]++;
2379 }
2380
2381 size_t
2382 ggc_pch_total_size (struct ggc_pch_data *d)
2383 {
2384 size_t a = 0;
2385 unsigned i;
2386
2387 for (i = 0; i < NUM_ORDERS; i++)
2388 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2389 return a;
2390 }
2391
2392 void
2393 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2394 {
2395 uintptr_t a = (uintptr_t) base;
2396 unsigned i;
2397
2398 for (i = 0; i < NUM_ORDERS; i++)
2399 {
2400 d->base[i] = a;
2401 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2402 }
2403 }
2404
2405
2406 char *
2407 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2408 size_t size, bool is_string ATTRIBUTE_UNUSED)
2409 {
2410 unsigned order;
2411 char *result;
2412
2413 if (size < NUM_SIZE_LOOKUP)
2414 order = size_lookup[size];
2415 else
2416 {
2417 order = 10;
2418 while (size > OBJECT_SIZE (order))
2419 order++;
2420 }
2421
2422 result = (char *) d->base[order];
2423 d->base[order] += OBJECT_SIZE (order);
2424 return result;
2425 }
2426
2427 void
2428 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2429 FILE *f ATTRIBUTE_UNUSED)
2430 {
2431 /* Nothing to do. */
2432 }
2433
2434 void
2435 ggc_pch_write_object (struct ggc_pch_data *d,
2436 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2437 size_t size, bool is_string ATTRIBUTE_UNUSED)
2438 {
2439 unsigned order;
2440 static const char emptyBytes[256] = { 0 };
2441
2442 if (size < NUM_SIZE_LOOKUP)
2443 order = size_lookup[size];
2444 else
2445 {
2446 order = 10;
2447 while (size > OBJECT_SIZE (order))
2448 order++;
2449 }
2450
2451 if (fwrite (x, size, 1, f) != 1)
2452 fatal_error (input_location, "can%'t write PCH file: %m");
2453
2454 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2455 object out to OBJECT_SIZE(order). This happens for strings. */
2456
2457 if (size != OBJECT_SIZE (order))
2458 {
2459 unsigned padding = OBJECT_SIZE (order) - size;
2460
2461 /* To speed small writes, we use a nulled-out array that's larger
2462 than most padding requests as the source for our null bytes. This
2463 permits us to do the padding with fwrite() rather than fseek(), and
2464 limits the chance the OS may try to flush any outstanding writes. */
2465 if (padding <= sizeof (emptyBytes))
2466 {
2467 if (fwrite (emptyBytes, 1, padding, f) != padding)
2468 fatal_error (input_location, "can%'t write PCH file");
2469 }
2470 else
2471 {
2472 /* Larger than our buffer? Just default to fseek. */
2473 if (fseek (f, padding, SEEK_CUR) != 0)
2474 fatal_error (input_location, "can%'t write PCH file");
2475 }
2476 }
2477
2478 d->written[order]++;
2479 if (d->written[order] == d->d.totals[order]
2480 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2481 G.pagesize),
2482 SEEK_CUR) != 0)
2483 fatal_error (input_location, "can%'t write PCH file: %m");
2484 }
2485
2486 void
2487 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2488 {
2489 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2490 fatal_error (input_location, "can%'t write PCH file: %m");
2491 free (d);
2492 }
2493
2494 /* Move the PCH PTE entries just added to the end of by_depth, to the
2495 front. */
2496
2497 static void
2498 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2499 {
2500 unsigned i;
2501
2502 /* First, we swap the new entries to the front of the varrays. */
2503 page_entry **new_by_depth;
2504 unsigned long **new_save_in_use;
2505
2506 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2507 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2508
2509 memcpy (&new_by_depth[0],
2510 &G.by_depth[count_old_page_tables],
2511 count_new_page_tables * sizeof (void *));
2512 memcpy (&new_by_depth[count_new_page_tables],
2513 &G.by_depth[0],
2514 count_old_page_tables * sizeof (void *));
2515 memcpy (&new_save_in_use[0],
2516 &G.save_in_use[count_old_page_tables],
2517 count_new_page_tables * sizeof (void *));
2518 memcpy (&new_save_in_use[count_new_page_tables],
2519 &G.save_in_use[0],
2520 count_old_page_tables * sizeof (void *));
2521
2522 free (G.by_depth);
2523 free (G.save_in_use);
2524
2525 G.by_depth = new_by_depth;
2526 G.save_in_use = new_save_in_use;
2527
2528 /* Now update all the index_by_depth fields. */
2529 for (i = G.by_depth_in_use; i > 0; --i)
2530 {
2531 page_entry *p = G.by_depth[i-1];
2532 p->index_by_depth = i-1;
2533 }
2534
2535 /* And last, we update the depth pointers in G.depth. The first
2536 entry is already 0, and context 0 entries always start at index
2537 0, so there is nothing to update in the first slot. We need a
2538 second slot, only if we have old ptes, and if we do, they start
2539 at index count_new_page_tables. */
2540 if (count_old_page_tables)
2541 push_depth (count_new_page_tables);
2542 }
2543
2544 void
2545 ggc_pch_read (FILE *f, void *addr)
2546 {
2547 struct ggc_pch_ondisk d;
2548 unsigned i;
2549 char *offs = (char *) addr;
2550 unsigned long count_old_page_tables;
2551 unsigned long count_new_page_tables;
2552
2553 count_old_page_tables = G.by_depth_in_use;
2554
2555 /* We've just read in a PCH file. So, every object that used to be
2556 allocated is now free. */
2557 clear_marks ();
2558 #ifdef ENABLE_GC_CHECKING
2559 poison_pages ();
2560 #endif
2561 /* Since we free all the allocated objects, the free list becomes
2562 useless. Validate it now, which will also clear it. */
2563 validate_free_objects ();
2564
2565 /* No object read from a PCH file should ever be freed. So, set the
2566 context depth to 1, and set the depth of all the currently-allocated
2567 pages to be 1 too. PCH pages will have depth 0. */
2568 gcc_assert (!G.context_depth);
2569 G.context_depth = 1;
2570 for (i = 0; i < NUM_ORDERS; i++)
2571 {
2572 page_entry *p;
2573 for (p = G.pages[i]; p != NULL; p = p->next)
2574 p->context_depth = G.context_depth;
2575 }
2576
2577 /* Allocate the appropriate page-table entries for the pages read from
2578 the PCH file. */
2579 if (fread (&d, sizeof (d), 1, f) != 1)
2580 fatal_error (input_location, "can%'t read PCH file: %m");
2581
2582 for (i = 0; i < NUM_ORDERS; i++)
2583 {
2584 struct page_entry *entry;
2585 char *pte;
2586 size_t bytes;
2587 size_t num_objs;
2588 size_t j;
2589
2590 if (d.totals[i] == 0)
2591 continue;
2592
2593 bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2594 num_objs = bytes / OBJECT_SIZE (i);
2595 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2596 - sizeof (long)
2597 + BITMAP_SIZE (num_objs + 1)));
2598 entry->bytes = bytes;
2599 entry->page = offs;
2600 entry->context_depth = 0;
2601 offs += bytes;
2602 entry->num_free_objects = 0;
2603 entry->order = i;
2604
2605 for (j = 0;
2606 j + HOST_BITS_PER_LONG <= num_objs + 1;
2607 j += HOST_BITS_PER_LONG)
2608 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2609 for (; j < num_objs + 1; j++)
2610 entry->in_use_p[j / HOST_BITS_PER_LONG]
2611 |= 1L << (j % HOST_BITS_PER_LONG);
2612
2613 for (pte = entry->page;
2614 pte < entry->page + entry->bytes;
2615 pte += G.pagesize)
2616 set_page_table_entry (pte, entry);
2617
2618 if (G.page_tails[i] != NULL)
2619 G.page_tails[i]->next = entry;
2620 else
2621 G.pages[i] = entry;
2622 G.page_tails[i] = entry;
2623
2624 /* We start off by just adding all the new information to the
2625 end of the varrays, later, we will move the new information
2626 to the front of the varrays, as the PCH page tables are at
2627 context 0. */
2628 push_by_depth (entry, 0);
2629 }
2630
2631 /* Now, we update the various data structures that speed page table
2632 handling. */
2633 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2634
2635 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2636
2637 /* Update the statistics. */
2638 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2639 }