c-pretty-print.c: Fix comment formatting.
[gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "toplev.h"
29 #include "flags.h"
30 #include "ggc.h"
31 #include "timevar.h"
32 #include "params.h"
33 #ifdef ENABLE_VALGRIND_CHECKING
34 # ifdef HAVE_MEMCHECK_H
35 # include <memcheck.h>
36 # else
37 # include <valgrind.h>
38 # endif
39 #else
40 /* Avoid #ifdef:s when we can help it. */
41 #define VALGRIND_DISCARD(x)
42 #endif
43
44 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
45 file open. Prefer either to valloc. */
46 #ifdef HAVE_MMAP_ANON
47 # undef HAVE_MMAP_DEV_ZERO
48
49 # include <sys/mman.h>
50 # ifndef MAP_FAILED
51 # define MAP_FAILED -1
52 # endif
53 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
54 # define MAP_ANONYMOUS MAP_ANON
55 # endif
56 # define USING_MMAP
57
58 #endif
59
60 #ifdef HAVE_MMAP_DEV_ZERO
61
62 # include <sys/mman.h>
63 # ifndef MAP_FAILED
64 # define MAP_FAILED -1
65 # endif
66 # define USING_MMAP
67
68 #endif
69
70 #ifndef USING_MMAP
71 #define USING_MALLOC_PAGE_GROUPS
72 #endif
73
74 /* Stategy:
75
76 This garbage-collecting allocator allocates objects on one of a set
77 of pages. Each page can allocate objects of a single size only;
78 available sizes are powers of two starting at four bytes. The size
79 of an allocation request is rounded up to the next power of two
80 (`order'), and satisfied from the appropriate page.
81
82 Each page is recorded in a page-entry, which also maintains an
83 in-use bitmap of object positions on the page. This allows the
84 allocation state of a particular object to be flipped without
85 touching the page itself.
86
87 Each page-entry also has a context depth, which is used to track
88 pushing and popping of allocation contexts. Only objects allocated
89 in the current (highest-numbered) context may be collected.
90
91 Page entries are arranged in an array of singly-linked lists. The
92 array is indexed by the allocation size, in bits, of the pages on
93 it; i.e. all pages on a list allocate objects of the same size.
94 Pages are ordered on the list such that all non-full pages precede
95 all full pages, with non-full pages arranged in order of decreasing
96 context depth.
97
98 Empty pages (of all orders) are kept on a single page cache list,
99 and are considered first when new pages are required; they are
100 deallocated at the start of the next collection if they haven't
101 been recycled by then. */
102
103 /* Define GGC_DEBUG_LEVEL to print debugging information.
104 0: No debugging output.
105 1: GC statistics only.
106 2: Page-entry allocations/deallocations as well.
107 3: Object allocations as well.
108 4: Object marks as well. */
109 #define GGC_DEBUG_LEVEL (0)
110 \f
111 #ifndef HOST_BITS_PER_PTR
112 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
113 #endif
114
115 \f
116 /* A two-level tree is used to look up the page-entry for a given
117 pointer. Two chunks of the pointer's bits are extracted to index
118 the first and second levels of the tree, as follows:
119
120 HOST_PAGE_SIZE_BITS
121 32 | |
122 msb +----------------+----+------+------+ lsb
123 | | |
124 PAGE_L1_BITS |
125 | |
126 PAGE_L2_BITS
127
128 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
129 pages are aligned on system page boundaries. The next most
130 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
131 index values in the lookup table, respectively.
132
133 For 32-bit architectures and the settings below, there are no
134 leftover bits. For architectures with wider pointers, the lookup
135 tree points to a list of pages, which must be scanned to find the
136 correct one. */
137
138 #define PAGE_L1_BITS (8)
139 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
140 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
141 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
142
143 #define LOOKUP_L1(p) \
144 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
145
146 #define LOOKUP_L2(p) \
147 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
148
149 /* The number of objects per allocation page, for objects on a page of
150 the indicated ORDER. */
151 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
152
153 /* The number of objects in P. */
154 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
155
156 /* The size of an object on a page of the indicated ORDER. */
157 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
158
159 /* For speed, we avoid doing a general integer divide to locate the
160 offset in the allocation bitmap, by precalculating numbers M, S
161 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
162 within the page which is evenly divisible by the object size Z. */
163 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
164 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
165 #define OFFSET_TO_BIT(OFFSET, ORDER) \
166 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
167
168 /* The number of extra orders, not corresponding to power-of-two sized
169 objects. */
170
171 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
172
173 #define RTL_SIZE(NSLOTS) \
174 (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
175
176 #define TREE_EXP_SIZE(OPS) \
177 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
178
179 /* The Ith entry is the maximum size of an object to be stored in the
180 Ith extra order. Adding a new entry to this array is the *only*
181 thing you need to do to add a new special allocation size. */
182
183 static const size_t extra_order_size_table[] = {
184 sizeof (struct tree_decl),
185 sizeof (struct tree_list),
186 TREE_EXP_SIZE (2),
187 RTL_SIZE (2), /* MEM, PLUS, etc. */
188 RTL_SIZE (9), /* INSN, CALL_INSN, JUMP_INSN */
189 };
190
191 /* The total number of orders. */
192
193 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
194
195 /* We use this structure to determine the alignment required for
196 allocations. For power-of-two sized allocations, that's not a
197 problem, but it does matter for odd-sized allocations. */
198
199 struct max_alignment {
200 char c;
201 union {
202 HOST_WIDEST_INT i;
203 long double d;
204 } u;
205 };
206
207 /* The biggest alignment required. */
208
209 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
210
211 /* Compute the smallest nonnegative number which when added to X gives
212 a multiple of F. */
213
214 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
215
216 /* Compute the smallest multiple of F that is >= X. */
217
218 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
219
220 /* The Ith entry is the number of objects on a page or order I. */
221
222 static unsigned objects_per_page_table[NUM_ORDERS];
223
224 /* The Ith entry is the size of an object on a page of order I. */
225
226 static size_t object_size_table[NUM_ORDERS];
227
228 /* The Ith entry is a pair of numbers (mult, shift) such that
229 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
230 for all k evenly divisible by OBJECT_SIZE(I). */
231
232 static struct
233 {
234 size_t mult;
235 unsigned int shift;
236 }
237 inverse_table[NUM_ORDERS];
238
239 /* A page_entry records the status of an allocation page. This
240 structure is dynamically sized to fit the bitmap in_use_p. */
241 typedef struct page_entry
242 {
243 /* The next page-entry with objects of the same size, or NULL if
244 this is the last page-entry. */
245 struct page_entry *next;
246
247 /* The number of bytes allocated. (This will always be a multiple
248 of the host system page size.) */
249 size_t bytes;
250
251 /* The address at which the memory is allocated. */
252 char *page;
253
254 #ifdef USING_MALLOC_PAGE_GROUPS
255 /* Back pointer to the page group this page came from. */
256 struct page_group *group;
257 #endif
258
259 /* This is the index in the by_depth varray where this page table
260 can be found. */
261 unsigned long index_by_depth;
262
263 /* Context depth of this page. */
264 unsigned short context_depth;
265
266 /* The number of free objects remaining on this page. */
267 unsigned short num_free_objects;
268
269 /* A likely candidate for the bit position of a free object for the
270 next allocation from this page. */
271 unsigned short next_bit_hint;
272
273 /* The lg of size of objects allocated from this page. */
274 unsigned char order;
275
276 /* A bit vector indicating whether or not objects are in use. The
277 Nth bit is one if the Nth object on this page is allocated. This
278 array is dynamically sized. */
279 unsigned long in_use_p[1];
280 } page_entry;
281
282 #ifdef USING_MALLOC_PAGE_GROUPS
283 /* A page_group describes a large allocation from malloc, from which
284 we parcel out aligned pages. */
285 typedef struct page_group
286 {
287 /* A linked list of all extant page groups. */
288 struct page_group *next;
289
290 /* The address we received from malloc. */
291 char *allocation;
292
293 /* The size of the block. */
294 size_t alloc_size;
295
296 /* A bitmask of pages in use. */
297 unsigned int in_use;
298 } page_group;
299 #endif
300
301 #if HOST_BITS_PER_PTR <= 32
302
303 /* On 32-bit hosts, we use a two level page table, as pictured above. */
304 typedef page_entry **page_table[PAGE_L1_SIZE];
305
306 #else
307
308 /* On 64-bit hosts, we use the same two level page tables plus a linked
309 list that disambiguates the top 32-bits. There will almost always be
310 exactly one entry in the list. */
311 typedef struct page_table_chain
312 {
313 struct page_table_chain *next;
314 size_t high_bits;
315 page_entry **table[PAGE_L1_SIZE];
316 } *page_table;
317
318 #endif
319
320 /* The rest of the global variables. */
321 static struct globals
322 {
323 /* The Nth element in this array is a page with objects of size 2^N.
324 If there are any pages with free objects, they will be at the
325 head of the list. NULL if there are no page-entries for this
326 object size. */
327 page_entry *pages[NUM_ORDERS];
328
329 /* The Nth element in this array is the last page with objects of
330 size 2^N. NULL if there are no page-entries for this object
331 size. */
332 page_entry *page_tails[NUM_ORDERS];
333
334 /* Lookup table for associating allocation pages with object addresses. */
335 page_table lookup;
336
337 /* The system's page size. */
338 size_t pagesize;
339 size_t lg_pagesize;
340
341 /* Bytes currently allocated. */
342 size_t allocated;
343
344 /* Bytes currently allocated at the end of the last collection. */
345 size_t allocated_last_gc;
346
347 /* Total amount of memory mapped. */
348 size_t bytes_mapped;
349
350 /* Bit N set if any allocations have been done at context depth N. */
351 unsigned long context_depth_allocations;
352
353 /* Bit N set if any collections have been done at context depth N. */
354 unsigned long context_depth_collections;
355
356 /* The current depth in the context stack. */
357 unsigned short context_depth;
358
359 /* A file descriptor open to /dev/zero for reading. */
360 #if defined (HAVE_MMAP_DEV_ZERO)
361 int dev_zero_fd;
362 #endif
363
364 /* A cache of free system pages. */
365 page_entry *free_pages;
366
367 #ifdef USING_MALLOC_PAGE_GROUPS
368 page_group *page_groups;
369 #endif
370
371 /* The file descriptor for debugging output. */
372 FILE *debug_file;
373
374 /* Current number of elements in use in depth below. */
375 unsigned int depth_in_use;
376
377 /* Maximum number of elements that can be used before resizing. */
378 unsigned int depth_max;
379
380 /* Each element of this arry is an index in by_depth where the given
381 depth starts. This structure is indexed by that given depth we
382 are interested in. */
383 unsigned int *depth;
384
385 /* Current number of elements in use in by_depth below. */
386 unsigned int by_depth_in_use;
387
388 /* Maximum number of elements that can be used before resizing. */
389 unsigned int by_depth_max;
390
391 /* Each element of this array is a pointer to a page_entry, all
392 page_entries can be found in here by increasing depth.
393 index_by_depth in the page_entry is the index into this data
394 structure where that page_entry can be found. This is used to
395 speed up finding all page_entries at a particular depth. */
396 page_entry **by_depth;
397
398 /* Each element is a pointer to the saved in_use_p bits, if any,
399 zero otherwise. We allocate them all together, to enable a
400 better runtime data access pattern. */
401 unsigned long **save_in_use;
402
403 #ifdef GATHER_STATISTICS
404 struct
405 {
406 /* Total memory allocated with ggc_alloc */
407 unsigned long long total_allocated;
408 /* Total overhead for memory to be allocated with ggc_alloc */
409 unsigned long long total_overhead;
410
411 /* Total allocations and overhead for sizes less than 32, 64 and 128.
412 These sizes are interesting because they are typical cache line
413 sizes. */
414
415 unsigned long long total_allocated_under32;
416 unsigned long long total_overhead_under32;
417
418 unsigned long long total_allocated_under64;
419 unsigned long long total_overhead_under64;
420
421 unsigned long long total_allocated_under128;
422 unsigned long long total_overhead_under128;
423
424 /* The overhead for each of the allocation orders. */
425 unsigned long long total_overhead_per_order[NUM_ORDERS];
426 } stats;
427 #endif
428 } G;
429
430 /* The size in bytes required to maintain a bitmap for the objects
431 on a page-entry. */
432 #define BITMAP_SIZE(Num_objects) \
433 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
434
435 /* Allocate pages in chunks of this size, to throttle calls to memory
436 allocation routines. The first page is used, the rest go onto the
437 free list. This cannot be larger than HOST_BITS_PER_INT for the
438 in_use bitmask for page_group. */
439 #define GGC_QUIRE_SIZE 16
440
441 /* Initial guess as to how many page table entries we might need. */
442 #define INITIAL_PTE_COUNT 128
443 \f
444 static int ggc_allocated_p (const void *);
445 static page_entry *lookup_page_table_entry (const void *);
446 static void set_page_table_entry (void *, page_entry *);
447 #ifdef USING_MMAP
448 static char *alloc_anon (char *, size_t);
449 #endif
450 #ifdef USING_MALLOC_PAGE_GROUPS
451 static size_t page_group_index (char *, char *);
452 static void set_page_group_in_use (page_group *, char *);
453 static void clear_page_group_in_use (page_group *, char *);
454 #endif
455 static struct page_entry * alloc_page (unsigned);
456 static void free_page (struct page_entry *);
457 static void release_pages (void);
458 static void clear_marks (void);
459 static void sweep_pages (void);
460 static void ggc_recalculate_in_use_p (page_entry *);
461 static void compute_inverse (unsigned);
462 static inline void adjust_depth (void);
463 static void move_ptes_to_front (int, int);
464
465 #ifdef ENABLE_GC_CHECKING
466 static void poison_pages (void);
467 #endif
468
469 void debug_print_page_list (int);
470 static void push_depth (unsigned int);
471 static void push_by_depth (page_entry *, unsigned long *);
472 \f
473 /* Push an entry onto G.depth. */
474
475 inline static void
476 push_depth (unsigned int i)
477 {
478 if (G.depth_in_use >= G.depth_max)
479 {
480 G.depth_max *= 2;
481 G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
482 }
483 G.depth[G.depth_in_use++] = i;
484 }
485
486 /* Push an entry onto G.by_depth and G.save_in_use. */
487
488 inline static void
489 push_by_depth (page_entry *p, unsigned long *s)
490 {
491 if (G.by_depth_in_use >= G.by_depth_max)
492 {
493 G.by_depth_max *= 2;
494 G.by_depth = xrealloc (G.by_depth,
495 G.by_depth_max * sizeof (page_entry *));
496 G.save_in_use = xrealloc (G.save_in_use,
497 G.by_depth_max * sizeof (unsigned long *));
498 }
499 G.by_depth[G.by_depth_in_use] = p;
500 G.save_in_use[G.by_depth_in_use++] = s;
501 }
502
503 #if (GCC_VERSION < 3001)
504 #define prefetch(X) ((void) X)
505 #else
506 #define prefetch(X) __builtin_prefetch (X)
507 #endif
508
509 #define save_in_use_p_i(__i) \
510 (G.save_in_use[__i])
511 #define save_in_use_p(__p) \
512 (save_in_use_p_i (__p->index_by_depth))
513
514 /* Returns nonzero if P was allocated in GC'able memory. */
515
516 static inline int
517 ggc_allocated_p (const void *p)
518 {
519 page_entry ***base;
520 size_t L1, L2;
521
522 #if HOST_BITS_PER_PTR <= 32
523 base = &G.lookup[0];
524 #else
525 page_table table = G.lookup;
526 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
527 while (1)
528 {
529 if (table == NULL)
530 return 0;
531 if (table->high_bits == high_bits)
532 break;
533 table = table->next;
534 }
535 base = &table->table[0];
536 #endif
537
538 /* Extract the level 1 and 2 indices. */
539 L1 = LOOKUP_L1 (p);
540 L2 = LOOKUP_L2 (p);
541
542 return base[L1] && base[L1][L2];
543 }
544
545 /* Traverse the page table and find the entry for a page.
546 Die (probably) if the object wasn't allocated via GC. */
547
548 static inline page_entry *
549 lookup_page_table_entry (const void *p)
550 {
551 page_entry ***base;
552 size_t L1, L2;
553
554 #if HOST_BITS_PER_PTR <= 32
555 base = &G.lookup[0];
556 #else
557 page_table table = G.lookup;
558 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
559 while (table->high_bits != high_bits)
560 table = table->next;
561 base = &table->table[0];
562 #endif
563
564 /* Extract the level 1 and 2 indices. */
565 L1 = LOOKUP_L1 (p);
566 L2 = LOOKUP_L2 (p);
567
568 return base[L1][L2];
569 }
570
571 /* Set the page table entry for a page. */
572
573 static void
574 set_page_table_entry (void *p, page_entry *entry)
575 {
576 page_entry ***base;
577 size_t L1, L2;
578
579 #if HOST_BITS_PER_PTR <= 32
580 base = &G.lookup[0];
581 #else
582 page_table table;
583 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
584 for (table = G.lookup; table; table = table->next)
585 if (table->high_bits == high_bits)
586 goto found;
587
588 /* Not found -- allocate a new table. */
589 table = xcalloc (1, sizeof(*table));
590 table->next = G.lookup;
591 table->high_bits = high_bits;
592 G.lookup = table;
593 found:
594 base = &table->table[0];
595 #endif
596
597 /* Extract the level 1 and 2 indices. */
598 L1 = LOOKUP_L1 (p);
599 L2 = LOOKUP_L2 (p);
600
601 if (base[L1] == NULL)
602 base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
603
604 base[L1][L2] = entry;
605 }
606
607 /* Prints the page-entry for object size ORDER, for debugging. */
608
609 void
610 debug_print_page_list (int order)
611 {
612 page_entry *p;
613 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
614 (void *) G.page_tails[order]);
615 p = G.pages[order];
616 while (p != NULL)
617 {
618 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
619 p->num_free_objects);
620 p = p->next;
621 }
622 printf ("NULL\n");
623 fflush (stdout);
624 }
625
626 #ifdef USING_MMAP
627 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
628 (if non-null). The ifdef structure here is intended to cause a
629 compile error unless exactly one of the HAVE_* is defined. */
630
631 static inline char *
632 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
633 {
634 #ifdef HAVE_MMAP_ANON
635 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
636 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
637 #endif
638 #ifdef HAVE_MMAP_DEV_ZERO
639 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
640 MAP_PRIVATE, G.dev_zero_fd, 0);
641 #endif
642
643 if (page == (char *) MAP_FAILED)
644 {
645 perror ("virtual memory exhausted");
646 exit (FATAL_EXIT_CODE);
647 }
648
649 /* Remember that we allocated this memory. */
650 G.bytes_mapped += size;
651
652 /* Pretend we don't have access to the allocated pages. We'll enable
653 access to smaller pieces of the area in ggc_alloc. Discard the
654 handle to avoid handle leak. */
655 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
656
657 return page;
658 }
659 #endif
660 #ifdef USING_MALLOC_PAGE_GROUPS
661 /* Compute the index for this page into the page group. */
662
663 static inline size_t
664 page_group_index (char *allocation, char *page)
665 {
666 return (size_t) (page - allocation) >> G.lg_pagesize;
667 }
668
669 /* Set and clear the in_use bit for this page in the page group. */
670
671 static inline void
672 set_page_group_in_use (page_group *group, char *page)
673 {
674 group->in_use |= 1 << page_group_index (group->allocation, page);
675 }
676
677 static inline void
678 clear_page_group_in_use (page_group *group, char *page)
679 {
680 group->in_use &= ~(1 << page_group_index (group->allocation, page));
681 }
682 #endif
683
684 /* Allocate a new page for allocating objects of size 2^ORDER,
685 and return an entry for it. The entry is not added to the
686 appropriate page_table list. */
687
688 static inline struct page_entry *
689 alloc_page (unsigned order)
690 {
691 struct page_entry *entry, *p, **pp;
692 char *page;
693 size_t num_objects;
694 size_t bitmap_size;
695 size_t page_entry_size;
696 size_t entry_size;
697 #ifdef USING_MALLOC_PAGE_GROUPS
698 page_group *group;
699 #endif
700
701 num_objects = OBJECTS_PER_PAGE (order);
702 bitmap_size = BITMAP_SIZE (num_objects + 1);
703 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
704 entry_size = num_objects * OBJECT_SIZE (order);
705 if (entry_size < G.pagesize)
706 entry_size = G.pagesize;
707
708 entry = NULL;
709 page = NULL;
710
711 /* Check the list of free pages for one we can use. */
712 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
713 if (p->bytes == entry_size)
714 break;
715
716 if (p != NULL)
717 {
718 /* Recycle the allocated memory from this page ... */
719 *pp = p->next;
720 page = p->page;
721
722 #ifdef USING_MALLOC_PAGE_GROUPS
723 group = p->group;
724 #endif
725
726 /* ... and, if possible, the page entry itself. */
727 if (p->order == order)
728 {
729 entry = p;
730 memset (entry, 0, page_entry_size);
731 }
732 else
733 free (p);
734 }
735 #ifdef USING_MMAP
736 else if (entry_size == G.pagesize)
737 {
738 /* We want just one page. Allocate a bunch of them and put the
739 extras on the freelist. (Can only do this optimization with
740 mmap for backing store.) */
741 struct page_entry *e, *f = G.free_pages;
742 int i;
743
744 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
745
746 /* This loop counts down so that the chain will be in ascending
747 memory order. */
748 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
749 {
750 e = xcalloc (1, page_entry_size);
751 e->order = order;
752 e->bytes = G.pagesize;
753 e->page = page + (i << G.lg_pagesize);
754 e->next = f;
755 f = e;
756 }
757
758 G.free_pages = f;
759 }
760 else
761 page = alloc_anon (NULL, entry_size);
762 #endif
763 #ifdef USING_MALLOC_PAGE_GROUPS
764 else
765 {
766 /* Allocate a large block of memory and serve out the aligned
767 pages therein. This results in much less memory wastage
768 than the traditional implementation of valloc. */
769
770 char *allocation, *a, *enda;
771 size_t alloc_size, head_slop, tail_slop;
772 int multiple_pages = (entry_size == G.pagesize);
773
774 if (multiple_pages)
775 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
776 else
777 alloc_size = entry_size + G.pagesize - 1;
778 allocation = xmalloc (alloc_size);
779
780 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
781 head_slop = page - allocation;
782 if (multiple_pages)
783 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
784 else
785 tail_slop = alloc_size - entry_size - head_slop;
786 enda = allocation + alloc_size - tail_slop;
787
788 /* We allocated N pages, which are likely not aligned, leaving
789 us with N-1 usable pages. We plan to place the page_group
790 structure somewhere in the slop. */
791 if (head_slop >= sizeof (page_group))
792 group = (page_group *)page - 1;
793 else
794 {
795 /* We magically got an aligned allocation. Too bad, we have
796 to waste a page anyway. */
797 if (tail_slop == 0)
798 {
799 enda -= G.pagesize;
800 tail_slop += G.pagesize;
801 }
802 if (tail_slop < sizeof (page_group))
803 abort ();
804 group = (page_group *)enda;
805 tail_slop -= sizeof (page_group);
806 }
807
808 /* Remember that we allocated this memory. */
809 group->next = G.page_groups;
810 group->allocation = allocation;
811 group->alloc_size = alloc_size;
812 group->in_use = 0;
813 G.page_groups = group;
814 G.bytes_mapped += alloc_size;
815
816 /* If we allocated multiple pages, put the rest on the free list. */
817 if (multiple_pages)
818 {
819 struct page_entry *e, *f = G.free_pages;
820 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
821 {
822 e = xcalloc (1, page_entry_size);
823 e->order = order;
824 e->bytes = G.pagesize;
825 e->page = a;
826 e->group = group;
827 e->next = f;
828 f = e;
829 }
830 G.free_pages = f;
831 }
832 }
833 #endif
834
835 if (entry == NULL)
836 entry = xcalloc (1, page_entry_size);
837
838 entry->bytes = entry_size;
839 entry->page = page;
840 entry->context_depth = G.context_depth;
841 entry->order = order;
842 entry->num_free_objects = num_objects;
843 entry->next_bit_hint = 1;
844
845 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
846
847 #ifdef USING_MALLOC_PAGE_GROUPS
848 entry->group = group;
849 set_page_group_in_use (group, page);
850 #endif
851
852 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
853 increment the hint. */
854 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
855 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
856
857 set_page_table_entry (page, entry);
858
859 if (GGC_DEBUG_LEVEL >= 2)
860 fprintf (G.debug_file,
861 "Allocating page at %p, object size=%lu, data %p-%p\n",
862 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
863 page + entry_size - 1);
864
865 return entry;
866 }
867
868 /* Adjust the size of G.depth so that no index greater than the one
869 used by the top of the G.by_depth is used. */
870
871 static inline void
872 adjust_depth (void)
873 {
874 page_entry *top;
875
876 if (G.by_depth_in_use)
877 {
878 top = G.by_depth[G.by_depth_in_use-1];
879
880 /* Peel back indices in depth that index into by_depth, so that
881 as new elements are added to by_depth, we note the indices
882 of those elements, if they are for new context depths. */
883 while (G.depth_in_use > (size_t)top->context_depth+1)
884 --G.depth_in_use;
885 }
886 }
887
888 /* For a page that is no longer needed, put it on the free page list. */
889
890 static inline void
891 free_page (page_entry *entry)
892 {
893 if (GGC_DEBUG_LEVEL >= 2)
894 fprintf (G.debug_file,
895 "Deallocating page at %p, data %p-%p\n", (void *) entry,
896 entry->page, entry->page + entry->bytes - 1);
897
898 /* Mark the page as inaccessible. Discard the handle to avoid handle
899 leak. */
900 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
901
902 set_page_table_entry (entry->page, NULL);
903
904 #ifdef USING_MALLOC_PAGE_GROUPS
905 clear_page_group_in_use (entry->group, entry->page);
906 #endif
907
908 if (G.by_depth_in_use > 1)
909 {
910 page_entry *top = G.by_depth[G.by_depth_in_use-1];
911
912 /* If they are at the same depth, put top element into freed
913 slot. */
914 if (entry->context_depth == top->context_depth)
915 {
916 int i = entry->index_by_depth;
917 G.by_depth[i] = top;
918 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
919 top->index_by_depth = i;
920 }
921 else
922 {
923 /* We cannot free a page from a context deeper than the
924 current one. */
925 abort ();
926 }
927 }
928 --G.by_depth_in_use;
929
930 adjust_depth ();
931
932 entry->next = G.free_pages;
933 G.free_pages = entry;
934 }
935
936 /* Release the free page cache to the system. */
937
938 static void
939 release_pages (void)
940 {
941 #ifdef USING_MMAP
942 page_entry *p, *next;
943 char *start;
944 size_t len;
945
946 /* Gather up adjacent pages so they are unmapped together. */
947 p = G.free_pages;
948
949 while (p)
950 {
951 start = p->page;
952 next = p->next;
953 len = p->bytes;
954 free (p);
955 p = next;
956
957 while (p && p->page == start + len)
958 {
959 next = p->next;
960 len += p->bytes;
961 free (p);
962 p = next;
963 }
964
965 munmap (start, len);
966 G.bytes_mapped -= len;
967 }
968
969 G.free_pages = NULL;
970 #endif
971 #ifdef USING_MALLOC_PAGE_GROUPS
972 page_entry **pp, *p;
973 page_group **gp, *g;
974
975 /* Remove all pages from free page groups from the list. */
976 pp = &G.free_pages;
977 while ((p = *pp) != NULL)
978 if (p->group->in_use == 0)
979 {
980 *pp = p->next;
981 free (p);
982 }
983 else
984 pp = &p->next;
985
986 /* Remove all free page groups, and release the storage. */
987 gp = &G.page_groups;
988 while ((g = *gp) != NULL)
989 if (g->in_use == 0)
990 {
991 *gp = g->next;
992 G.bytes_mapped -= g->alloc_size;
993 free (g->allocation);
994 }
995 else
996 gp = &g->next;
997 #endif
998 }
999
1000 /* This table provides a fast way to determine ceil(log_2(size)) for
1001 allocation requests. The minimum allocation size is eight bytes. */
1002
1003 static unsigned char size_lookup[257] =
1004 {
1005 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1006 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1007 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1008 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1009 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1010 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1011 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1012 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1013 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1014 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1015 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1016 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1017 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1018 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1019 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1020 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1021 8
1022 };
1023
1024 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1025
1026 void *
1027 ggc_alloc (size_t size)
1028 {
1029 unsigned order, word, bit, object_offset;
1030 struct page_entry *entry;
1031 void *result;
1032
1033 if (size <= 256)
1034 order = size_lookup[size];
1035 else
1036 {
1037 order = 9;
1038 while (size > OBJECT_SIZE (order))
1039 order++;
1040 }
1041
1042 /* If there are non-full pages for this size allocation, they are at
1043 the head of the list. */
1044 entry = G.pages[order];
1045
1046 /* If there is no page for this object size, or all pages in this
1047 context are full, allocate a new page. */
1048 if (entry == NULL || entry->num_free_objects == 0)
1049 {
1050 struct page_entry *new_entry;
1051 new_entry = alloc_page (order);
1052
1053 new_entry->index_by_depth = G.by_depth_in_use;
1054 push_by_depth (new_entry, 0);
1055
1056 /* We can skip context depths, if we do, make sure we go all the
1057 way to the new depth. */
1058 while (new_entry->context_depth >= G.depth_in_use)
1059 push_depth (G.by_depth_in_use-1);
1060
1061 /* If this is the only entry, it's also the tail. */
1062 if (entry == NULL)
1063 G.page_tails[order] = new_entry;
1064
1065 /* Put new pages at the head of the page list. */
1066 new_entry->next = entry;
1067 entry = new_entry;
1068 G.pages[order] = new_entry;
1069
1070 /* For a new page, we know the word and bit positions (in the
1071 in_use bitmap) of the first available object -- they're zero. */
1072 new_entry->next_bit_hint = 1;
1073 word = 0;
1074 bit = 0;
1075 object_offset = 0;
1076 }
1077 else
1078 {
1079 /* First try to use the hint left from the previous allocation
1080 to locate a clear bit in the in-use bitmap. We've made sure
1081 that the one-past-the-end bit is always set, so if the hint
1082 has run over, this test will fail. */
1083 unsigned hint = entry->next_bit_hint;
1084 word = hint / HOST_BITS_PER_LONG;
1085 bit = hint % HOST_BITS_PER_LONG;
1086
1087 /* If the hint didn't work, scan the bitmap from the beginning. */
1088 if ((entry->in_use_p[word] >> bit) & 1)
1089 {
1090 word = bit = 0;
1091 while (~entry->in_use_p[word] == 0)
1092 ++word;
1093 while ((entry->in_use_p[word] >> bit) & 1)
1094 ++bit;
1095 hint = word * HOST_BITS_PER_LONG + bit;
1096 }
1097
1098 /* Next time, try the next bit. */
1099 entry->next_bit_hint = hint + 1;
1100
1101 object_offset = hint * OBJECT_SIZE (order);
1102 }
1103
1104 /* Set the in-use bit. */
1105 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1106
1107 /* Keep a running total of the number of free objects. If this page
1108 fills up, we may have to move it to the end of the list if the
1109 next page isn't full. If the next page is full, all subsequent
1110 pages are full, so there's no need to move it. */
1111 if (--entry->num_free_objects == 0
1112 && entry->next != NULL
1113 && entry->next->num_free_objects > 0)
1114 {
1115 G.pages[order] = entry->next;
1116 entry->next = NULL;
1117 G.page_tails[order]->next = entry;
1118 G.page_tails[order] = entry;
1119 }
1120
1121 /* Calculate the object's address. */
1122 result = entry->page + object_offset;
1123
1124 #ifdef ENABLE_GC_CHECKING
1125 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1126 exact same semantics in presence of memory bugs, regardless of
1127 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1128 handle to avoid handle leak. */
1129 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, OBJECT_SIZE (order)));
1130
1131 /* `Poison' the entire allocated object, including any padding at
1132 the end. */
1133 memset (result, 0xaf, OBJECT_SIZE (order));
1134
1135 /* Make the bytes after the end of the object unaccessible. Discard the
1136 handle to avoid handle leak. */
1137 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1138 OBJECT_SIZE (order) - size));
1139 #endif
1140
1141 /* Tell Valgrind that the memory is there, but its content isn't
1142 defined. The bytes at the end of the object are still marked
1143 unaccessible. */
1144 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1145
1146 /* Keep track of how many bytes are being allocated. This
1147 information is used in deciding when to collect. */
1148 G.allocated += OBJECT_SIZE (order);
1149
1150 #ifdef GATHER_STATISTICS
1151 {
1152 G.stats.total_overhead += OBJECT_SIZE (order) - size;
1153 G.stats.total_overhead_per_order[order] += OBJECT_SIZE (order) - size;
1154 G.stats.total_allocated += OBJECT_SIZE(order);
1155
1156 if (size <= 32){
1157 G.stats.total_overhead_under32 += OBJECT_SIZE (order) - size;
1158 G.stats.total_allocated_under32 += OBJECT_SIZE(order);
1159 }
1160
1161 if (size <= 64){
1162 G.stats.total_overhead_under64 += OBJECT_SIZE (order) - size;
1163 G.stats.total_allocated_under64 += OBJECT_SIZE(order);
1164 }
1165
1166 if (size <= 128){
1167 G.stats.total_overhead_under128 += OBJECT_SIZE (order) - size;
1168 G.stats.total_allocated_under128 += OBJECT_SIZE(order);
1169 }
1170
1171 }
1172 #endif
1173
1174 if (GGC_DEBUG_LEVEL >= 3)
1175 fprintf (G.debug_file,
1176 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1177 (unsigned long) size, (unsigned long) OBJECT_SIZE (order), result,
1178 (void *) entry);
1179
1180 return result;
1181 }
1182
1183 /* If P is not marked, marks it and return false. Otherwise return true.
1184 P must have been allocated by the GC allocator; it mustn't point to
1185 static objects, stack variables, or memory allocated with malloc. */
1186
1187 int
1188 ggc_set_mark (const void *p)
1189 {
1190 page_entry *entry;
1191 unsigned bit, word;
1192 unsigned long mask;
1193
1194 /* Look up the page on which the object is alloced. If the object
1195 wasn't allocated by the collector, we'll probably die. */
1196 entry = lookup_page_table_entry (p);
1197 #ifdef ENABLE_CHECKING
1198 if (entry == NULL)
1199 abort ();
1200 #endif
1201
1202 /* Calculate the index of the object on the page; this is its bit
1203 position in the in_use_p bitmap. */
1204 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1205 word = bit / HOST_BITS_PER_LONG;
1206 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1207
1208 /* If the bit was previously set, skip it. */
1209 if (entry->in_use_p[word] & mask)
1210 return 1;
1211
1212 /* Otherwise set it, and decrement the free object count. */
1213 entry->in_use_p[word] |= mask;
1214 entry->num_free_objects -= 1;
1215
1216 if (GGC_DEBUG_LEVEL >= 4)
1217 fprintf (G.debug_file, "Marking %p\n", p);
1218
1219 return 0;
1220 }
1221
1222 /* Return 1 if P has been marked, zero otherwise.
1223 P must have been allocated by the GC allocator; it mustn't point to
1224 static objects, stack variables, or memory allocated with malloc. */
1225
1226 int
1227 ggc_marked_p (const void *p)
1228 {
1229 page_entry *entry;
1230 unsigned bit, word;
1231 unsigned long mask;
1232
1233 /* Look up the page on which the object is alloced. If the object
1234 wasn't allocated by the collector, we'll probably die. */
1235 entry = lookup_page_table_entry (p);
1236 #ifdef ENABLE_CHECKING
1237 if (entry == NULL)
1238 abort ();
1239 #endif
1240
1241 /* Calculate the index of the object on the page; this is its bit
1242 position in the in_use_p bitmap. */
1243 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1244 word = bit / HOST_BITS_PER_LONG;
1245 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1246
1247 return (entry->in_use_p[word] & mask) != 0;
1248 }
1249
1250 /* Return the size of the gc-able object P. */
1251
1252 size_t
1253 ggc_get_size (const void *p)
1254 {
1255 page_entry *pe = lookup_page_table_entry (p);
1256 return OBJECT_SIZE (pe->order);
1257 }
1258 \f
1259 /* Subroutine of init_ggc which computes the pair of numbers used to
1260 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1261
1262 This algorithm is taken from Granlund and Montgomery's paper
1263 "Division by Invariant Integers using Multiplication"
1264 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1265 constants). */
1266
1267 static void
1268 compute_inverse (unsigned order)
1269 {
1270 size_t size, inv;
1271 unsigned int e;
1272
1273 size = OBJECT_SIZE (order);
1274 e = 0;
1275 while (size % 2 == 0)
1276 {
1277 e++;
1278 size >>= 1;
1279 }
1280
1281 inv = size;
1282 while (inv * size != 1)
1283 inv = inv * (2 - inv*size);
1284
1285 DIV_MULT (order) = inv;
1286 DIV_SHIFT (order) = e;
1287 }
1288
1289 /* Initialize the ggc-mmap allocator. */
1290 void
1291 init_ggc (void)
1292 {
1293 unsigned order;
1294
1295 G.pagesize = getpagesize();
1296 G.lg_pagesize = exact_log2 (G.pagesize);
1297
1298 #ifdef HAVE_MMAP_DEV_ZERO
1299 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1300 if (G.dev_zero_fd == -1)
1301 internal_error ("open /dev/zero: %m");
1302 #endif
1303
1304 #if 0
1305 G.debug_file = fopen ("ggc-mmap.debug", "w");
1306 #else
1307 G.debug_file = stdout;
1308 #endif
1309
1310 #ifdef USING_MMAP
1311 /* StunOS has an amazing off-by-one error for the first mmap allocation
1312 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1313 believe, is an unaligned page allocation, which would cause us to
1314 hork badly if we tried to use it. */
1315 {
1316 char *p = alloc_anon (NULL, G.pagesize);
1317 struct page_entry *e;
1318 if ((size_t)p & (G.pagesize - 1))
1319 {
1320 /* How losing. Discard this one and try another. If we still
1321 can't get something useful, give up. */
1322
1323 p = alloc_anon (NULL, G.pagesize);
1324 if ((size_t)p & (G.pagesize - 1))
1325 abort ();
1326 }
1327
1328 /* We have a good page, might as well hold onto it... */
1329 e = xcalloc (1, sizeof (struct page_entry));
1330 e->bytes = G.pagesize;
1331 e->page = p;
1332 e->next = G.free_pages;
1333 G.free_pages = e;
1334 }
1335 #endif
1336
1337 /* Initialize the object size table. */
1338 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1339 object_size_table[order] = (size_t) 1 << order;
1340 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1341 {
1342 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1343
1344 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1345 so that we're sure of getting aligned memory. */
1346 s = ROUND_UP (s, MAX_ALIGNMENT);
1347 object_size_table[order] = s;
1348 }
1349
1350 /* Initialize the objects-per-page and inverse tables. */
1351 for (order = 0; order < NUM_ORDERS; ++order)
1352 {
1353 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1354 if (objects_per_page_table[order] == 0)
1355 objects_per_page_table[order] = 1;
1356 compute_inverse (order);
1357 }
1358
1359 /* Reset the size_lookup array to put appropriately sized objects in
1360 the special orders. All objects bigger than the previous power
1361 of two, but no greater than the special size, should go in the
1362 new order. */
1363 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1364 {
1365 int o;
1366 int i;
1367
1368 o = size_lookup[OBJECT_SIZE (order)];
1369 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1370 size_lookup[i] = order;
1371 }
1372
1373 G.depth_in_use = 0;
1374 G.depth_max = 10;
1375 G.depth = xmalloc (G.depth_max * sizeof (unsigned int));
1376
1377 G.by_depth_in_use = 0;
1378 G.by_depth_max = INITIAL_PTE_COUNT;
1379 G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
1380 G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
1381 }
1382
1383 /* Increment the `GC context'. Objects allocated in an outer context
1384 are never freed, eliminating the need to register their roots. */
1385
1386 void
1387 ggc_push_context (void)
1388 {
1389 ++G.context_depth;
1390
1391 /* Die on wrap. */
1392 if (G.context_depth >= HOST_BITS_PER_LONG)
1393 abort ();
1394 }
1395
1396 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1397 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1398
1399 static void
1400 ggc_recalculate_in_use_p (page_entry *p)
1401 {
1402 unsigned int i;
1403 size_t num_objects;
1404
1405 /* Because the past-the-end bit in in_use_p is always set, we
1406 pretend there is one additional object. */
1407 num_objects = OBJECTS_IN_PAGE (p) + 1;
1408
1409 /* Reset the free object count. */
1410 p->num_free_objects = num_objects;
1411
1412 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1413 for (i = 0;
1414 i < CEIL (BITMAP_SIZE (num_objects),
1415 sizeof (*p->in_use_p));
1416 ++i)
1417 {
1418 unsigned long j;
1419
1420 /* Something is in use if it is marked, or if it was in use in a
1421 context further down the context stack. */
1422 p->in_use_p[i] |= save_in_use_p (p)[i];
1423
1424 /* Decrement the free object count for every object allocated. */
1425 for (j = p->in_use_p[i]; j; j >>= 1)
1426 p->num_free_objects -= (j & 1);
1427 }
1428
1429 if (p->num_free_objects >= num_objects)
1430 abort ();
1431 }
1432
1433 /* Decrement the `GC context'. All objects allocated since the
1434 previous ggc_push_context are migrated to the outer context. */
1435
1436 void
1437 ggc_pop_context (void)
1438 {
1439 unsigned long omask;
1440 unsigned int depth, i, e;
1441 #ifdef ENABLE_CHECKING
1442 unsigned int order;
1443 #endif
1444
1445 depth = --G.context_depth;
1446 omask = (unsigned long)1 << (depth + 1);
1447
1448 if (!((G.context_depth_allocations | G.context_depth_collections) & omask))
1449 return;
1450
1451 G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1;
1452 G.context_depth_allocations &= omask - 1;
1453 G.context_depth_collections &= omask - 1;
1454
1455 /* The G.depth array is shortened so that the last index is the
1456 context_depth of the top element of by_depth. */
1457 if (depth+1 < G.depth_in_use)
1458 e = G.depth[depth+1];
1459 else
1460 e = G.by_depth_in_use;
1461
1462 /* We might not have any PTEs of depth depth. */
1463 if (depth < G.depth_in_use)
1464 {
1465
1466 /* First we go through all the pages at depth depth to
1467 recalculate the in use bits. */
1468 for (i = G.depth[depth]; i < e; ++i)
1469 {
1470 page_entry *p;
1471
1472 #ifdef ENABLE_CHECKING
1473 p = G.by_depth[i];
1474
1475 /* Check that all of the pages really are at the depth that
1476 we expect. */
1477 if (p->context_depth != depth)
1478 abort ();
1479 if (p->index_by_depth != i)
1480 abort ();
1481 #endif
1482
1483 prefetch (&save_in_use_p_i (i+8));
1484 prefetch (&save_in_use_p_i (i+16));
1485 if (save_in_use_p_i (i))
1486 {
1487 p = G.by_depth[i];
1488 ggc_recalculate_in_use_p (p);
1489 free (save_in_use_p_i (i));
1490 save_in_use_p_i (i) = 0;
1491 }
1492 }
1493 }
1494
1495 /* Then, we reset all page_entries with a depth greater than depth
1496 to be at depth. */
1497 for (i = e; i < G.by_depth_in_use; ++i)
1498 {
1499 page_entry *p = G.by_depth[i];
1500
1501 /* Check that all of the pages really are at the depth we
1502 expect. */
1503 #ifdef ENABLE_CHECKING
1504 if (p->context_depth <= depth)
1505 abort ();
1506 if (p->index_by_depth != i)
1507 abort ();
1508 #endif
1509 p->context_depth = depth;
1510 }
1511
1512 adjust_depth ();
1513
1514 #ifdef ENABLE_CHECKING
1515 for (order = 2; order < NUM_ORDERS; order++)
1516 {
1517 page_entry *p;
1518
1519 for (p = G.pages[order]; p != NULL; p = p->next)
1520 {
1521 if (p->context_depth > depth)
1522 abort ();
1523 else if (p->context_depth == depth && save_in_use_p (p))
1524 abort ();
1525 }
1526 }
1527 #endif
1528 }
1529 \f
1530 /* Unmark all objects. */
1531
1532 static inline void
1533 clear_marks (void)
1534 {
1535 unsigned order;
1536
1537 for (order = 2; order < NUM_ORDERS; order++)
1538 {
1539 page_entry *p;
1540
1541 for (p = G.pages[order]; p != NULL; p = p->next)
1542 {
1543 size_t num_objects = OBJECTS_IN_PAGE (p);
1544 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1545
1546 #ifdef ENABLE_CHECKING
1547 /* The data should be page-aligned. */
1548 if ((size_t) p->page & (G.pagesize - 1))
1549 abort ();
1550 #endif
1551
1552 /* Pages that aren't in the topmost context are not collected;
1553 nevertheless, we need their in-use bit vectors to store GC
1554 marks. So, back them up first. */
1555 if (p->context_depth < G.context_depth)
1556 {
1557 if (! save_in_use_p (p))
1558 save_in_use_p (p) = xmalloc (bitmap_size);
1559 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1560 }
1561
1562 /* Reset reset the number of free objects and clear the
1563 in-use bits. These will be adjusted by mark_obj. */
1564 p->num_free_objects = num_objects;
1565 memset (p->in_use_p, 0, bitmap_size);
1566
1567 /* Make sure the one-past-the-end bit is always set. */
1568 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1569 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1570 }
1571 }
1572 }
1573
1574 /* Free all empty pages. Partially empty pages need no attention
1575 because the `mark' bit doubles as an `unused' bit. */
1576
1577 static inline void
1578 sweep_pages (void)
1579 {
1580 unsigned order;
1581
1582 for (order = 2; order < NUM_ORDERS; order++)
1583 {
1584 /* The last page-entry to consider, regardless of entries
1585 placed at the end of the list. */
1586 page_entry * const last = G.page_tails[order];
1587
1588 size_t num_objects;
1589 size_t live_objects;
1590 page_entry *p, *previous;
1591 int done;
1592
1593 p = G.pages[order];
1594 if (p == NULL)
1595 continue;
1596
1597 previous = NULL;
1598 do
1599 {
1600 page_entry *next = p->next;
1601
1602 /* Loop until all entries have been examined. */
1603 done = (p == last);
1604
1605 num_objects = OBJECTS_IN_PAGE (p);
1606
1607 /* Add all live objects on this page to the count of
1608 allocated memory. */
1609 live_objects = num_objects - p->num_free_objects;
1610
1611 G.allocated += OBJECT_SIZE (order) * live_objects;
1612
1613 /* Only objects on pages in the topmost context should get
1614 collected. */
1615 if (p->context_depth < G.context_depth)
1616 ;
1617
1618 /* Remove the page if it's empty. */
1619 else if (live_objects == 0)
1620 {
1621 if (! previous)
1622 G.pages[order] = next;
1623 else
1624 previous->next = next;
1625
1626 /* Are we removing the last element? */
1627 if (p == G.page_tails[order])
1628 G.page_tails[order] = previous;
1629 free_page (p);
1630 p = previous;
1631 }
1632
1633 /* If the page is full, move it to the end. */
1634 else if (p->num_free_objects == 0)
1635 {
1636 /* Don't move it if it's already at the end. */
1637 if (p != G.page_tails[order])
1638 {
1639 /* Move p to the end of the list. */
1640 p->next = NULL;
1641 G.page_tails[order]->next = p;
1642
1643 /* Update the tail pointer... */
1644 G.page_tails[order] = p;
1645
1646 /* ... and the head pointer, if necessary. */
1647 if (! previous)
1648 G.pages[order] = next;
1649 else
1650 previous->next = next;
1651 p = previous;
1652 }
1653 }
1654
1655 /* If we've fallen through to here, it's a page in the
1656 topmost context that is neither full nor empty. Such a
1657 page must precede pages at lesser context depth in the
1658 list, so move it to the head. */
1659 else if (p != G.pages[order])
1660 {
1661 previous->next = p->next;
1662 p->next = G.pages[order];
1663 G.pages[order] = p;
1664 /* Are we moving the last element? */
1665 if (G.page_tails[order] == p)
1666 G.page_tails[order] = previous;
1667 p = previous;
1668 }
1669
1670 previous = p;
1671 p = next;
1672 }
1673 while (! done);
1674
1675 /* Now, restore the in_use_p vectors for any pages from contexts
1676 other than the current one. */
1677 for (p = G.pages[order]; p; p = p->next)
1678 if (p->context_depth != G.context_depth)
1679 ggc_recalculate_in_use_p (p);
1680 }
1681 }
1682
1683 #ifdef ENABLE_GC_CHECKING
1684 /* Clobber all free objects. */
1685
1686 static inline void
1687 poison_pages (void)
1688 {
1689 unsigned order;
1690
1691 for (order = 2; order < NUM_ORDERS; order++)
1692 {
1693 size_t size = OBJECT_SIZE (order);
1694 page_entry *p;
1695
1696 for (p = G.pages[order]; p != NULL; p = p->next)
1697 {
1698 size_t num_objects;
1699 size_t i;
1700
1701 if (p->context_depth != G.context_depth)
1702 /* Since we don't do any collection for pages in pushed
1703 contexts, there's no need to do any poisoning. And
1704 besides, the IN_USE_P array isn't valid until we pop
1705 contexts. */
1706 continue;
1707
1708 num_objects = OBJECTS_IN_PAGE (p);
1709 for (i = 0; i < num_objects; i++)
1710 {
1711 size_t word, bit;
1712 word = i / HOST_BITS_PER_LONG;
1713 bit = i % HOST_BITS_PER_LONG;
1714 if (((p->in_use_p[word] >> bit) & 1) == 0)
1715 {
1716 char *object = p->page + i * size;
1717
1718 /* Keep poison-by-write when we expect to use Valgrind,
1719 so the exact same memory semantics is kept, in case
1720 there are memory errors. We override this request
1721 below. */
1722 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1723 memset (object, 0xa5, size);
1724
1725 /* Drop the handle to avoid handle leak. */
1726 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1727 }
1728 }
1729 }
1730 }
1731 }
1732 #endif
1733
1734 /* Top level mark-and-sweep routine. */
1735
1736 void
1737 ggc_collect (void)
1738 {
1739 /* Avoid frequent unnecessary work by skipping collection if the
1740 total allocations haven't expanded much since the last
1741 collection. */
1742 float allocated_last_gc =
1743 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1744
1745 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1746
1747 if (G.allocated < allocated_last_gc + min_expand)
1748 return;
1749
1750 timevar_push (TV_GC);
1751 if (!quiet_flag)
1752 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1753
1754 /* Zero the total allocated bytes. This will be recalculated in the
1755 sweep phase. */
1756 G.allocated = 0;
1757
1758 /* Release the pages we freed the last time we collected, but didn't
1759 reuse in the interim. */
1760 release_pages ();
1761
1762 /* Indicate that we've seen collections at this context depth. */
1763 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1764
1765 clear_marks ();
1766 ggc_mark_roots ();
1767
1768 #ifdef ENABLE_GC_CHECKING
1769 poison_pages ();
1770 #endif
1771
1772 sweep_pages ();
1773
1774 G.allocated_last_gc = G.allocated;
1775
1776 timevar_pop (TV_GC);
1777
1778 if (!quiet_flag)
1779 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1780 }
1781
1782 /* Print allocation statistics. */
1783 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1784 ? (x) \
1785 : ((x) < 1024*1024*10 \
1786 ? (x) / 1024 \
1787 : (x) / (1024*1024))))
1788 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1789
1790 void
1791 ggc_print_statistics (void)
1792 {
1793 struct ggc_statistics stats;
1794 unsigned int i;
1795 size_t total_overhead = 0;
1796
1797 /* Clear the statistics. */
1798 memset (&stats, 0, sizeof (stats));
1799
1800 /* Make sure collection will really occur. */
1801 G.allocated_last_gc = 0;
1802
1803 /* Collect and print the statistics common across collectors. */
1804 ggc_print_common_statistics (stderr, &stats);
1805
1806 /* Release free pages so that we will not count the bytes allocated
1807 there as part of the total allocated memory. */
1808 release_pages ();
1809
1810 /* Collect some information about the various sizes of
1811 allocation. */
1812 fprintf (stderr, "%-5s %10s %10s %10s\n",
1813 "Size", "Allocated", "Used", "Overhead");
1814 for (i = 0; i < NUM_ORDERS; ++i)
1815 {
1816 page_entry *p;
1817 size_t allocated;
1818 size_t in_use;
1819 size_t overhead;
1820
1821 /* Skip empty entries. */
1822 if (!G.pages[i])
1823 continue;
1824
1825 overhead = allocated = in_use = 0;
1826
1827 /* Figure out the total number of bytes allocated for objects of
1828 this size, and how many of them are actually in use. Also figure
1829 out how much memory the page table is using. */
1830 for (p = G.pages[i]; p; p = p->next)
1831 {
1832 allocated += p->bytes;
1833 in_use +=
1834 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
1835
1836 overhead += (sizeof (page_entry) - sizeof (long)
1837 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
1838 }
1839 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1840 (unsigned long) OBJECT_SIZE (i),
1841 SCALE (allocated), LABEL (allocated),
1842 SCALE (in_use), LABEL (in_use),
1843 SCALE (overhead), LABEL (overhead));
1844 total_overhead += overhead;
1845 }
1846 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1847 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1848 SCALE (G.allocated), LABEL(G.allocated),
1849 SCALE (total_overhead), LABEL (total_overhead));
1850
1851 #ifdef GATHER_STATISTICS
1852 {
1853 fprintf (stderr, "Total Overhead: %10lld\n",
1854 G.stats.total_overhead);
1855 fprintf (stderr, "Total Allocated: %10lld\n",
1856 G.stats.total_allocated);
1857
1858 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
1859 G.stats.total_overhead_under32);
1860 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
1861 G.stats.total_allocated_under32);
1862 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
1863 G.stats.total_overhead_under64);
1864 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
1865 G.stats.total_allocated_under64);
1866 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
1867 G.stats.total_overhead_under128);
1868 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
1869 G.stats.total_allocated_under128);
1870
1871 for (i = 0; i < NUM_ORDERS; i++)
1872 if (G.stats.total_overhead_per_order[i])
1873 fprintf (stderr, "Total Overhead page size %7d: %10lld\n",
1874 OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
1875 }
1876 #endif
1877 }
1878 \f
1879 struct ggc_pch_data
1880 {
1881 struct ggc_pch_ondisk
1882 {
1883 unsigned totals[NUM_ORDERS];
1884 } d;
1885 size_t base[NUM_ORDERS];
1886 size_t written[NUM_ORDERS];
1887 };
1888
1889 struct ggc_pch_data *
1890 init_ggc_pch (void)
1891 {
1892 return xcalloc (sizeof (struct ggc_pch_data), 1);
1893 }
1894
1895 void
1896 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1897 size_t size)
1898 {
1899 unsigned order;
1900
1901 if (size <= 256)
1902 order = size_lookup[size];
1903 else
1904 {
1905 order = 9;
1906 while (size > OBJECT_SIZE (order))
1907 order++;
1908 }
1909
1910 d->d.totals[order]++;
1911 }
1912
1913 size_t
1914 ggc_pch_total_size (struct ggc_pch_data *d)
1915 {
1916 size_t a = 0;
1917 unsigned i;
1918
1919 for (i = 0; i < NUM_ORDERS; i++)
1920 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
1921 return a;
1922 }
1923
1924 void
1925 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1926 {
1927 size_t a = (size_t) base;
1928 unsigned i;
1929
1930 for (i = 0; i < NUM_ORDERS; i++)
1931 {
1932 d->base[i] = a;
1933 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
1934 }
1935 }
1936
1937
1938 char *
1939 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1940 size_t size)
1941 {
1942 unsigned order;
1943 char *result;
1944
1945 if (size <= 256)
1946 order = size_lookup[size];
1947 else
1948 {
1949 order = 9;
1950 while (size > OBJECT_SIZE (order))
1951 order++;
1952 }
1953
1954 result = (char *) d->base[order];
1955 d->base[order] += OBJECT_SIZE (order);
1956 return result;
1957 }
1958
1959 void
1960 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1961 FILE *f ATTRIBUTE_UNUSED)
1962 {
1963 /* Nothing to do. */
1964 }
1965
1966 void
1967 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1968 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1969 size_t size)
1970 {
1971 unsigned order;
1972 static const char emptyBytes[256];
1973
1974 if (size <= 256)
1975 order = size_lookup[size];
1976 else
1977 {
1978 order = 9;
1979 while (size > OBJECT_SIZE (order))
1980 order++;
1981 }
1982
1983 if (fwrite (x, size, 1, f) != 1)
1984 fatal_error ("can't write PCH file: %m");
1985
1986 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
1987 object out to OBJECT_SIZE(order). This happens for strings. */
1988
1989 if (size != OBJECT_SIZE (order))
1990 {
1991 unsigned padding = OBJECT_SIZE(order) - size;
1992
1993 /* To speed small writes, we use a nulled-out array that's larger
1994 than most padding requests as the source for our null bytes. This
1995 permits us to do the padding with fwrite() rather than fseek(), and
1996 limits the chance the the OS may try to flush any outstanding
1997 writes. */
1998 if (padding <= sizeof(emptyBytes))
1999 {
2000 if (fwrite (emptyBytes, 1, padding, f) != padding)
2001 fatal_error ("can't write PCH file");
2002 }
2003 else
2004 {
2005 /* Larger than our buffer? Just default to fseek. */
2006 if (fseek (f, padding, SEEK_CUR) != 0)
2007 fatal_error ("can't write PCH file");
2008 }
2009 }
2010
2011 d->written[order]++;
2012 if (d->written[order] == d->d.totals[order]
2013 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2014 G.pagesize),
2015 SEEK_CUR) != 0)
2016 fatal_error ("can't write PCH file: %m");
2017 }
2018
2019 void
2020 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2021 {
2022 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2023 fatal_error ("can't write PCH file: %m");
2024 free (d);
2025 }
2026
2027 /* Move the PCH PTE entries just added to the end of by_depth, to the
2028 front. */
2029
2030 static void
2031 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2032 {
2033 unsigned i;
2034
2035 /* First, we swap the new entries to the front of the varrays. */
2036 page_entry **new_by_depth;
2037 unsigned long **new_save_in_use;
2038
2039 new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
2040 new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
2041
2042 memcpy (&new_by_depth[0],
2043 &G.by_depth[count_old_page_tables],
2044 count_new_page_tables * sizeof (void *));
2045 memcpy (&new_by_depth[count_new_page_tables],
2046 &G.by_depth[0],
2047 count_old_page_tables * sizeof (void *));
2048 memcpy (&new_save_in_use[0],
2049 &G.save_in_use[count_old_page_tables],
2050 count_new_page_tables * sizeof (void *));
2051 memcpy (&new_save_in_use[count_new_page_tables],
2052 &G.save_in_use[0],
2053 count_old_page_tables * sizeof (void *));
2054
2055 free (G.by_depth);
2056 free (G.save_in_use);
2057
2058 G.by_depth = new_by_depth;
2059 G.save_in_use = new_save_in_use;
2060
2061 /* Now update all the index_by_depth fields. */
2062 for (i = G.by_depth_in_use; i > 0; --i)
2063 {
2064 page_entry *p = G.by_depth[i-1];
2065 p->index_by_depth = i-1;
2066 }
2067
2068 /* And last, we update the depth pointers in G.depth. The first
2069 entry is already 0, and context 0 entries always start at index
2070 0, so there is nothing to update in the first slot. We need a
2071 second slot, only if we have old ptes, and if we do, they start
2072 at index count_new_page_tables. */
2073 if (count_old_page_tables)
2074 push_depth (count_new_page_tables);
2075 }
2076
2077 void
2078 ggc_pch_read (FILE *f, void *addr)
2079 {
2080 struct ggc_pch_ondisk d;
2081 unsigned i;
2082 char *offs = addr;
2083 unsigned long count_old_page_tables;
2084 unsigned long count_new_page_tables;
2085
2086 count_old_page_tables = G.by_depth_in_use;
2087
2088 /* We've just read in a PCH file. So, every object that used to be
2089 allocated is now free. */
2090 clear_marks ();
2091 #ifdef GGC_POISON
2092 poison_pages ();
2093 #endif
2094
2095 /* No object read from a PCH file should ever be freed. So, set the
2096 context depth to 1, and set the depth of all the currently-allocated
2097 pages to be 1 too. PCH pages will have depth 0. */
2098 if (G.context_depth != 0)
2099 abort ();
2100 G.context_depth = 1;
2101 for (i = 0; i < NUM_ORDERS; i++)
2102 {
2103 page_entry *p;
2104 for (p = G.pages[i]; p != NULL; p = p->next)
2105 p->context_depth = G.context_depth;
2106 }
2107
2108 /* Allocate the appropriate page-table entries for the pages read from
2109 the PCH file. */
2110 if (fread (&d, sizeof (d), 1, f) != 1)
2111 fatal_error ("can't read PCH file: %m");
2112
2113 for (i = 0; i < NUM_ORDERS; i++)
2114 {
2115 struct page_entry *entry;
2116 char *pte;
2117 size_t bytes;
2118 size_t num_objs;
2119 size_t j;
2120
2121 if (d.totals[i] == 0)
2122 continue;
2123
2124 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2125 num_objs = bytes / OBJECT_SIZE (i);
2126 entry = xcalloc (1, (sizeof (struct page_entry)
2127 - sizeof (long)
2128 + BITMAP_SIZE (num_objs + 1)));
2129 entry->bytes = bytes;
2130 entry->page = offs;
2131 entry->context_depth = 0;
2132 offs += bytes;
2133 entry->num_free_objects = 0;
2134 entry->order = i;
2135
2136 for (j = 0;
2137 j + HOST_BITS_PER_LONG <= num_objs + 1;
2138 j += HOST_BITS_PER_LONG)
2139 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2140 for (; j < num_objs + 1; j++)
2141 entry->in_use_p[j / HOST_BITS_PER_LONG]
2142 |= 1L << (j % HOST_BITS_PER_LONG);
2143
2144 for (pte = entry->page;
2145 pte < entry->page + entry->bytes;
2146 pte += G.pagesize)
2147 set_page_table_entry (pte, entry);
2148
2149 if (G.page_tails[i] != NULL)
2150 G.page_tails[i]->next = entry;
2151 else
2152 G.pages[i] = entry;
2153 G.page_tails[i] = entry;
2154
2155 /* We start off by just adding all the new information to the
2156 end of the varrays, later, we will move the new information
2157 to the front of the varrays, as the PCH page tables are at
2158 context 0. */
2159 push_by_depth (entry, 0);
2160 }
2161
2162 /* Now, we update the various data structures that speed page table
2163 handling. */
2164 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2165
2166 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2167
2168 /* Update the statistics. */
2169 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2170 }