vbsl.c: New file.
[gcc.git] / gcc / ggc-common.c
1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* Generic garbage collection (GC) functions and data, not specific to
21 any particular GC implementation. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "hash-table.h"
27 #include "ggc.h"
28 #include "ggc-internal.h"
29 #include "diagnostic-core.h"
30 #include "params.h"
31 #include "hosthooks.h"
32 #include "hosthooks-def.h"
33 #include "plugin.h"
34 #include "vec.h"
35 #include "timevar.h"
36
37 /* When set, ggc_collect will do collection. */
38 bool ggc_force_collect;
39
40 /* When true, protect the contents of the identifier hash table. */
41 bool ggc_protect_identifiers = true;
42
43 /* Statistics about the allocation. */
44 static ggc_statistics *ggc_stats;
45
46 struct traversal_state;
47
48 static int ggc_htab_delete (void **, void *);
49 static int compare_ptr_data (const void *, const void *);
50 static void relocate_ptrs (void *, void *);
51 static void write_pch_globals (const struct ggc_root_tab * const *tab,
52 struct traversal_state *state);
53
54 /* Maintain global roots that are preserved during GC. */
55
56 /* Process a slot of an htab by deleting it if it has not been marked. */
57
58 static int
59 ggc_htab_delete (void **slot, void *info)
60 {
61 const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
62
63 if (! (*r->marked_p) (*slot))
64 htab_clear_slot (*r->base, slot);
65 else
66 (*r->cb) (*slot);
67
68 return 1;
69 }
70
71
72 /* This extra vector of dynamically registered root_tab-s is used by
73 ggc_mark_roots and gives the ability to dynamically add new GGC root
74 tables, for instance from some plugins; this vector is on the heap
75 since it is used by GGC internally. */
76 typedef const struct ggc_root_tab *const_ggc_root_tab_t;
77 static vec<const_ggc_root_tab_t> extra_root_vec;
78
79 /* Dynamically register a new GGC root table RT. This is useful for
80 plugins. */
81
82 void
83 ggc_register_root_tab (const struct ggc_root_tab* rt)
84 {
85 if (rt)
86 extra_root_vec.safe_push (rt);
87 }
88
89 /* This extra vector of dynamically registered cache_tab-s is used by
90 ggc_mark_roots and gives the ability to dynamically add new GGC cache
91 tables, for instance from some plugins; this vector is on the heap
92 since it is used by GGC internally. */
93 typedef const struct ggc_cache_tab *const_ggc_cache_tab_t;
94 static vec<const_ggc_cache_tab_t> extra_cache_vec;
95
96 /* Dynamically register a new GGC cache table CT. This is useful for
97 plugins. */
98
99 void
100 ggc_register_cache_tab (const struct ggc_cache_tab* ct)
101 {
102 if (ct)
103 extra_cache_vec.safe_push (ct);
104 }
105
106 /* Scan a hash table that has objects which are to be deleted if they are not
107 already marked. */
108
109 static void
110 ggc_scan_cache_tab (const_ggc_cache_tab_t ctp)
111 {
112 const struct ggc_cache_tab *cti;
113
114 for (cti = ctp; cti->base != NULL; cti++)
115 if (*cti->base)
116 {
117 ggc_set_mark (*cti->base);
118 htab_traverse_noresize (*cti->base, ggc_htab_delete,
119 CONST_CAST (void *, (const void *)cti));
120 ggc_set_mark ((*cti->base)->entries);
121 }
122 }
123
124 /* Mark all the roots in the table RT. */
125
126 static void
127 ggc_mark_root_tab (const_ggc_root_tab_t rt)
128 {
129 size_t i;
130
131 for ( ; rt->base != NULL; rt++)
132 for (i = 0; i < rt->nelt; i++)
133 (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
134 }
135
136 /* Iterate through all registered roots and mark each element. */
137
138 void
139 ggc_mark_roots (void)
140 {
141 const struct ggc_root_tab *const *rt;
142 const_ggc_root_tab_t rtp, rti;
143 const struct ggc_cache_tab *const *ct;
144 const_ggc_cache_tab_t ctp;
145 size_t i;
146
147 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
148 for (rti = *rt; rti->base != NULL; rti++)
149 memset (rti->base, 0, rti->stride);
150
151 for (rt = gt_ggc_rtab; *rt; rt++)
152 ggc_mark_root_tab (*rt);
153
154 FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
155 ggc_mark_root_tab (rtp);
156
157 if (ggc_protect_identifiers)
158 ggc_mark_stringpool ();
159
160 /* Now scan all hash tables that have objects which are to be deleted if
161 they are not already marked. */
162 for (ct = gt_ggc_cache_rtab; *ct; ct++)
163 ggc_scan_cache_tab (*ct);
164
165 FOR_EACH_VEC_ELT (extra_cache_vec, i, ctp)
166 ggc_scan_cache_tab (ctp);
167
168 if (! ggc_protect_identifiers)
169 ggc_purge_stringpool ();
170
171 /* Some plugins may call ggc_set_mark from here. */
172 invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
173 }
174
175 /* Allocate a block of memory, then clear it. */
176 void *
177 ggc_internal_cleared_alloc (size_t size, void (*f)(void *), size_t s, size_t n
178 MEM_STAT_DECL)
179 {
180 void *buf = ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
181 memset (buf, 0, size);
182 return buf;
183 }
184
185 /* Resize a block of memory, possibly re-allocating it. */
186 void *
187 ggc_realloc (void *x, size_t size MEM_STAT_DECL)
188 {
189 void *r;
190 size_t old_size;
191
192 if (x == NULL)
193 return ggc_internal_alloc (size PASS_MEM_STAT);
194
195 old_size = ggc_get_size (x);
196
197 if (size <= old_size)
198 {
199 /* Mark the unwanted memory as unaccessible. We also need to make
200 the "new" size accessible, since ggc_get_size returns the size of
201 the pool, not the size of the individually allocated object, the
202 size which was previously made accessible. Unfortunately, we
203 don't know that previously allocated size. Without that
204 knowledge we have to lose some initialization-tracking for the
205 old parts of the object. An alternative is to mark the whole
206 old_size as reachable, but that would lose tracking of writes
207 after the end of the object (by small offsets). Discard the
208 handle to avoid handle leak. */
209 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
210 old_size - size));
211 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
212 return x;
213 }
214
215 r = ggc_internal_alloc (size PASS_MEM_STAT);
216
217 /* Since ggc_get_size returns the size of the pool, not the size of the
218 individually allocated object, we'd access parts of the old object
219 that were marked invalid with the memcpy below. We lose a bit of the
220 initialization-tracking since some of it may be uninitialized. */
221 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
222
223 memcpy (r, x, old_size);
224
225 /* The old object is not supposed to be used anymore. */
226 ggc_free (x);
227
228 return r;
229 }
230
231 void *
232 ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
233 size_t n ATTRIBUTE_UNUSED)
234 {
235 gcc_assert (c * n == sizeof (struct htab));
236 return ggc_cleared_alloc<htab> ();
237 }
238
239 /* TODO: once we actually use type information in GGC, create a new tag
240 gt_gcc_ptr_array and use it for pointer arrays. */
241 void *
242 ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
243 {
244 gcc_assert (sizeof (PTR *) == n);
245 return ggc_cleared_vec_alloc<PTR *> (c);
246 }
247
248 /* These are for splay_tree_new_ggc. */
249 void *
250 ggc_splay_alloc (int sz, void *nl)
251 {
252 gcc_assert (!nl);
253 return ggc_internal_alloc (sz);
254 }
255
256 void
257 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
258 {
259 gcc_assert (!nl);
260 }
261
262 /* Print statistics that are independent of the collector in use. */
263 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
264 ? (x) \
265 : ((x) < 1024*1024*10 \
266 ? (x) / 1024 \
267 : (x) / (1024*1024))))
268 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
269
270 void
271 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
272 ggc_statistics *stats)
273 {
274 /* Set the pointer so that during collection we will actually gather
275 the statistics. */
276 ggc_stats = stats;
277
278 /* Then do one collection to fill in the statistics. */
279 ggc_collect ();
280
281 /* At present, we don't really gather any interesting statistics. */
282
283 /* Don't gather statistics any more. */
284 ggc_stats = NULL;
285 }
286 \f
287 /* Functions for saving and restoring GCable memory to disk. */
288
289 struct ptr_data
290 {
291 void *obj;
292 void *note_ptr_cookie;
293 gt_note_pointers note_ptr_fn;
294 gt_handle_reorder reorder_fn;
295 size_t size;
296 void *new_addr;
297 };
298
299 #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
300
301 /* Helper for hashing saving_htab. */
302
303 struct saving_hasher : typed_free_remove <ptr_data>
304 {
305 typedef ptr_data value_type;
306 typedef void compare_type;
307 static inline hashval_t hash (const value_type *);
308 static inline bool equal (const value_type *, const compare_type *);
309 };
310
311 inline hashval_t
312 saving_hasher::hash (const value_type *p)
313 {
314 return POINTER_HASH (p->obj);
315 }
316
317 inline bool
318 saving_hasher::equal (const value_type *p1, const compare_type *p2)
319 {
320 return p1->obj == p2;
321 }
322
323 static hash_table<saving_hasher> *saving_htab;
324
325 /* Register an object in the hash table. */
326
327 int
328 gt_pch_note_object (void *obj, void *note_ptr_cookie,
329 gt_note_pointers note_ptr_fn)
330 {
331 struct ptr_data **slot;
332
333 if (obj == NULL || obj == (void *) 1)
334 return 0;
335
336 slot = (struct ptr_data **)
337 saving_htab->find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
338 if (*slot != NULL)
339 {
340 gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
341 && (*slot)->note_ptr_cookie == note_ptr_cookie);
342 return 0;
343 }
344
345 *slot = XCNEW (struct ptr_data);
346 (*slot)->obj = obj;
347 (*slot)->note_ptr_fn = note_ptr_fn;
348 (*slot)->note_ptr_cookie = note_ptr_cookie;
349 if (note_ptr_fn == gt_pch_p_S)
350 (*slot)->size = strlen ((const char *)obj) + 1;
351 else
352 (*slot)->size = ggc_get_size (obj);
353 return 1;
354 }
355
356 /* Register an object in the hash table. */
357
358 void
359 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
360 gt_handle_reorder reorder_fn)
361 {
362 struct ptr_data *data;
363
364 if (obj == NULL || obj == (void *) 1)
365 return;
366
367 data = (struct ptr_data *)
368 saving_htab->find_with_hash (obj, POINTER_HASH (obj));
369 gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
370
371 data->reorder_fn = reorder_fn;
372 }
373
374 /* Handy state for the traversal functions. */
375
376 struct traversal_state
377 {
378 FILE *f;
379 struct ggc_pch_data *d;
380 size_t count;
381 struct ptr_data **ptrs;
382 size_t ptrs_i;
383 };
384
385 /* Callbacks for htab_traverse. */
386
387 int
388 ggc_call_count (ptr_data **slot, traversal_state *state)
389 {
390 struct ptr_data *d = *slot;
391
392 ggc_pch_count_object (state->d, d->obj, d->size,
393 d->note_ptr_fn == gt_pch_p_S);
394 state->count++;
395 return 1;
396 }
397
398 int
399 ggc_call_alloc (ptr_data **slot, traversal_state *state)
400 {
401 struct ptr_data *d = *slot;
402
403 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
404 d->note_ptr_fn == gt_pch_p_S);
405 state->ptrs[state->ptrs_i++] = d;
406 return 1;
407 }
408
409 /* Callback for qsort. */
410
411 static int
412 compare_ptr_data (const void *p1_p, const void *p2_p)
413 {
414 const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
415 const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
416 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
417 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
418 }
419
420 /* Callbacks for note_ptr_fn. */
421
422 static void
423 relocate_ptrs (void *ptr_p, void *state_p)
424 {
425 void **ptr = (void **)ptr_p;
426 struct traversal_state *state ATTRIBUTE_UNUSED
427 = (struct traversal_state *)state_p;
428 struct ptr_data *result;
429
430 if (*ptr == NULL || *ptr == (void *)1)
431 return;
432
433 result = (struct ptr_data *)
434 saving_htab->find_with_hash (*ptr, POINTER_HASH (*ptr));
435 gcc_assert (result);
436 *ptr = result->new_addr;
437 }
438
439 /* Write out, after relocation, the pointers in TAB. */
440 static void
441 write_pch_globals (const struct ggc_root_tab * const *tab,
442 struct traversal_state *state)
443 {
444 const struct ggc_root_tab *const *rt;
445 const struct ggc_root_tab *rti;
446 size_t i;
447
448 for (rt = tab; *rt; rt++)
449 for (rti = *rt; rti->base != NULL; rti++)
450 for (i = 0; i < rti->nelt; i++)
451 {
452 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
453 struct ptr_data *new_ptr;
454 if (ptr == NULL || ptr == (void *)1)
455 {
456 if (fwrite (&ptr, sizeof (void *), 1, state->f)
457 != 1)
458 fatal_error ("can%'t write PCH file: %m");
459 }
460 else
461 {
462 new_ptr = (struct ptr_data *)
463 saving_htab->find_with_hash (ptr, POINTER_HASH (ptr));
464 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
465 != 1)
466 fatal_error ("can%'t write PCH file: %m");
467 }
468 }
469 }
470
471 /* Hold the information we need to mmap the file back in. */
472
473 struct mmap_info
474 {
475 size_t offset;
476 size_t size;
477 void *preferred_base;
478 };
479
480 /* Write out the state of the compiler to F. */
481
482 void
483 gt_pch_save (FILE *f)
484 {
485 const struct ggc_root_tab *const *rt;
486 const struct ggc_root_tab *rti;
487 size_t i;
488 struct traversal_state state;
489 char *this_object = NULL;
490 size_t this_object_size = 0;
491 struct mmap_info mmi;
492 const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity ();
493
494 gt_pch_save_stringpool ();
495
496 timevar_push (TV_PCH_PTR_REALLOC);
497 saving_htab = new hash_table<saving_hasher> (50000);
498
499 for (rt = gt_ggc_rtab; *rt; rt++)
500 for (rti = *rt; rti->base != NULL; rti++)
501 for (i = 0; i < rti->nelt; i++)
502 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
503
504 for (rt = gt_pch_cache_rtab; *rt; rt++)
505 for (rti = *rt; rti->base != NULL; rti++)
506 for (i = 0; i < rti->nelt; i++)
507 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
508
509 /* Prepare the objects for writing, determine addresses and such. */
510 state.f = f;
511 state.d = init_ggc_pch ();
512 state.count = 0;
513 saving_htab->traverse <traversal_state *, ggc_call_count> (&state);
514
515 mmi.size = ggc_pch_total_size (state.d);
516
517 /* Try to arrange things so that no relocation is necessary, but
518 don't try very hard. On most platforms, this will always work,
519 and on the rest it's a lot of work to do better.
520 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
521 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
522 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
523
524 ggc_pch_this_base (state.d, mmi.preferred_base);
525
526 state.ptrs = XNEWVEC (struct ptr_data *, state.count);
527 state.ptrs_i = 0;
528
529 saving_htab->traverse <traversal_state *, ggc_call_alloc> (&state);
530 timevar_pop (TV_PCH_PTR_REALLOC);
531
532 timevar_push (TV_PCH_PTR_SORT);
533 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
534 timevar_pop (TV_PCH_PTR_SORT);
535
536 /* Write out all the scalar variables. */
537 for (rt = gt_pch_scalar_rtab; *rt; rt++)
538 for (rti = *rt; rti->base != NULL; rti++)
539 if (fwrite (rti->base, rti->stride, 1, f) != 1)
540 fatal_error ("can%'t write PCH file: %m");
541
542 /* Write out all the global pointers, after translation. */
543 write_pch_globals (gt_ggc_rtab, &state);
544 write_pch_globals (gt_pch_cache_rtab, &state);
545
546 /* Pad the PCH file so that the mmapped area starts on an allocation
547 granularity (usually page) boundary. */
548 {
549 long o;
550 o = ftell (state.f) + sizeof (mmi);
551 if (o == -1)
552 fatal_error ("can%'t get position in PCH file: %m");
553 mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
554 if (mmi.offset == mmap_offset_alignment)
555 mmi.offset = 0;
556 mmi.offset += o;
557 }
558 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
559 fatal_error ("can%'t write PCH file: %m");
560 if (mmi.offset != 0
561 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
562 fatal_error ("can%'t write padding to PCH file: %m");
563
564 ggc_pch_prepare_write (state.d, state.f);
565
566 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
567 vec<char> vbits = vNULL;
568 #endif
569
570 /* Actually write out the objects. */
571 for (i = 0; i < state.count; i++)
572 {
573 if (this_object_size < state.ptrs[i]->size)
574 {
575 this_object_size = state.ptrs[i]->size;
576 this_object = XRESIZEVAR (char, this_object, this_object_size);
577 }
578 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
579 /* obj might contain uninitialized bytes, e.g. in the trailing
580 padding of the object. Avoid warnings by making the memory
581 temporarily defined and then restoring previous state. */
582 int get_vbits = 0;
583 size_t valid_size = state.ptrs[i]->size;
584 if (__builtin_expect (RUNNING_ON_VALGRIND, 0))
585 {
586 if (vbits.length () < valid_size)
587 vbits.safe_grow (valid_size);
588 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
589 vbits.address (), valid_size);
590 if (get_vbits == 3)
591 {
592 /* We assume that first part of obj is addressable, and
593 the rest is unaddressable. Find out where the boundary is
594 using binary search. */
595 size_t lo = 0, hi = valid_size;
596 while (hi > lo)
597 {
598 size_t mid = (lo + hi) / 2;
599 get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj
600 + mid, vbits.address (),
601 1);
602 if (get_vbits == 3)
603 hi = mid;
604 else if (get_vbits == 1)
605 lo = mid + 1;
606 else
607 break;
608 }
609 if (get_vbits == 1 || get_vbits == 3)
610 {
611 valid_size = lo;
612 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
613 vbits.address (),
614 valid_size);
615 }
616 }
617 if (get_vbits == 1)
618 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj,
619 state.ptrs[i]->size));
620 }
621 #endif
622 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
623 if (state.ptrs[i]->reorder_fn != NULL)
624 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
625 state.ptrs[i]->note_ptr_cookie,
626 relocate_ptrs, &state);
627 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
628 state.ptrs[i]->note_ptr_cookie,
629 relocate_ptrs, &state);
630 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
631 state.ptrs[i]->new_addr, state.ptrs[i]->size,
632 state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
633 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
634 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
635 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
636 if (__builtin_expect (get_vbits == 1, 0))
637 {
638 (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (),
639 valid_size);
640 if (valid_size != state.ptrs[i]->size)
641 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
642 state.ptrs[i]->obj
643 + valid_size,
644 state.ptrs[i]->size
645 - valid_size));
646 }
647 #endif
648 }
649 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
650 vbits.release ();
651 #endif
652
653 ggc_pch_finish (state.d, state.f);
654 gt_pch_fixup_stringpool ();
655
656 XDELETE (state.ptrs);
657 XDELETE (this_object);
658 delete saving_htab;
659 saving_htab = NULL;
660 }
661
662 /* Read the state of the compiler back in from F. */
663
664 void
665 gt_pch_restore (FILE *f)
666 {
667 const struct ggc_root_tab *const *rt;
668 const struct ggc_root_tab *rti;
669 size_t i;
670 struct mmap_info mmi;
671 int result;
672
673 /* Delete any deletable objects. This makes ggc_pch_read much
674 faster, as it can be sure that no GCable objects remain other
675 than the ones just read in. */
676 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
677 for (rti = *rt; rti->base != NULL; rti++)
678 memset (rti->base, 0, rti->stride);
679
680 /* Read in all the scalar variables. */
681 for (rt = gt_pch_scalar_rtab; *rt; rt++)
682 for (rti = *rt; rti->base != NULL; rti++)
683 if (fread (rti->base, rti->stride, 1, f) != 1)
684 fatal_error ("can%'t read PCH file: %m");
685
686 /* Read in all the global pointers, in 6 easy loops. */
687 for (rt = gt_ggc_rtab; *rt; rt++)
688 for (rti = *rt; rti->base != NULL; rti++)
689 for (i = 0; i < rti->nelt; i++)
690 if (fread ((char *)rti->base + rti->stride * i,
691 sizeof (void *), 1, f) != 1)
692 fatal_error ("can%'t read PCH file: %m");
693
694 for (rt = gt_pch_cache_rtab; *rt; rt++)
695 for (rti = *rt; rti->base != NULL; rti++)
696 for (i = 0; i < rti->nelt; i++)
697 if (fread ((char *)rti->base + rti->stride * i,
698 sizeof (void *), 1, f) != 1)
699 fatal_error ("can%'t read PCH file: %m");
700
701 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
702 fatal_error ("can%'t read PCH file: %m");
703
704 result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
705 fileno (f), mmi.offset);
706 if (result < 0)
707 fatal_error ("had to relocate PCH");
708 if (result == 0)
709 {
710 if (fseek (f, mmi.offset, SEEK_SET) != 0
711 || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
712 fatal_error ("can%'t read PCH file: %m");
713 }
714 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
715 fatal_error ("can%'t read PCH file: %m");
716
717 ggc_pch_read (f, mmi.preferred_base);
718
719 gt_pch_restore_stringpool ();
720 }
721
722 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
723 Select no address whatsoever, and let gt_pch_save choose what it will with
724 malloc, presumably. */
725
726 void *
727 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
728 int fd ATTRIBUTE_UNUSED)
729 {
730 return NULL;
731 }
732
733 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
734 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
735 same as base, indicating that the memory has been allocated but needs to
736 be read in from the file. Return -1 if the address differs, to relocation
737 of the PCH file would be required. */
738
739 int
740 default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
741 size_t offset ATTRIBUTE_UNUSED)
742 {
743 void *addr = xmalloc (size);
744 return (addr == base) - 1;
745 }
746
747 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
748 alignment required for allocating virtual memory. Usually this is the
749 same as pagesize. */
750
751 size_t
752 default_gt_pch_alloc_granularity (void)
753 {
754 return getpagesize ();
755 }
756
757 #if HAVE_MMAP_FILE
758 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
759 We temporarily allocate SIZE bytes, and let the kernel place the data
760 wherever it will. If it worked, that's our spot, if not we're likely
761 to be in trouble. */
762
763 void *
764 mmap_gt_pch_get_address (size_t size, int fd)
765 {
766 void *ret;
767
768 ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
769 if (ret == (void *) MAP_FAILED)
770 ret = NULL;
771 else
772 munmap ((caddr_t) ret, size);
773
774 return ret;
775 }
776
777 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
778 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
779 mapping the data at BASE, -1 if we couldn't.
780
781 This version assumes that the kernel honors the START operand of mmap
782 even without MAP_FIXED if START through START+SIZE are not currently
783 mapped with something. */
784
785 int
786 mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
787 {
788 void *addr;
789
790 /* We're called with size == 0 if we're not planning to load a PCH
791 file at all. This allows the hook to free any static space that
792 we might have allocated at link time. */
793 if (size == 0)
794 return -1;
795
796 addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
797 fd, offset);
798
799 return addr == base ? 1 : -1;
800 }
801 #endif /* HAVE_MMAP_FILE */
802
803 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
804
805 /* Modify the bound based on rlimits. */
806 static double
807 ggc_rlimit_bound (double limit)
808 {
809 #if defined(HAVE_GETRLIMIT)
810 struct rlimit rlim;
811 # if defined (RLIMIT_AS)
812 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
813 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
814 if (getrlimit (RLIMIT_AS, &rlim) == 0
815 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
816 && rlim.rlim_cur < limit)
817 limit = rlim.rlim_cur;
818 # elif defined (RLIMIT_DATA)
819 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
820 might be on an OS that has a broken mmap. (Others don't bound
821 mmap at all, apparently.) */
822 if (getrlimit (RLIMIT_DATA, &rlim) == 0
823 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
824 && rlim.rlim_cur < limit
825 /* Darwin has this horribly bogus default setting of
826 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
827 appears to be ignored. Ignore such silliness. If a limit
828 this small was actually effective for mmap, GCC wouldn't even
829 start up. */
830 && rlim.rlim_cur >= 8 * 1024 * 1024)
831 limit = rlim.rlim_cur;
832 # endif /* RLIMIT_AS or RLIMIT_DATA */
833 #endif /* HAVE_GETRLIMIT */
834
835 return limit;
836 }
837
838 /* Heuristic to set a default for GGC_MIN_EXPAND. */
839 static int
840 ggc_min_expand_heuristic (void)
841 {
842 double min_expand = physmem_total ();
843
844 /* Adjust for rlimits. */
845 min_expand = ggc_rlimit_bound (min_expand);
846
847 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
848 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
849 min_expand /= 1024*1024*1024;
850 min_expand *= 70;
851 min_expand = MIN (min_expand, 70);
852 min_expand += 30;
853
854 return min_expand;
855 }
856
857 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
858 static int
859 ggc_min_heapsize_heuristic (void)
860 {
861 double phys_kbytes = physmem_total ();
862 double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
863
864 phys_kbytes /= 1024; /* Convert to Kbytes. */
865 limit_kbytes /= 1024;
866
867 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
868 bound of 128M (when RAM >= 1GB). */
869 phys_kbytes /= 8;
870
871 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
872 /* Try not to overrun the RSS limit while doing garbage collection.
873 The RSS limit is only advisory, so no margin is subtracted. */
874 {
875 struct rlimit rlim;
876 if (getrlimit (RLIMIT_RSS, &rlim) == 0
877 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
878 phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
879 }
880 # endif
881
882 /* Don't blindly run over our data limit; do GC at least when the
883 *next* GC would be within 20Mb of the limit or within a quarter of
884 the limit, whichever is larger. If GCC does hit the data limit,
885 compilation will fail, so this tries to be conservative. */
886 limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
887 limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
888 phys_kbytes = MIN (phys_kbytes, limit_kbytes);
889
890 phys_kbytes = MAX (phys_kbytes, 4 * 1024);
891 phys_kbytes = MIN (phys_kbytes, 128 * 1024);
892
893 return phys_kbytes;
894 }
895 #endif
896
897 void
898 init_ggc_heuristics (void)
899 {
900 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
901 set_default_param_value (GGC_MIN_EXPAND, ggc_min_expand_heuristic ());
902 set_default_param_value (GGC_MIN_HEAPSIZE, ggc_min_heapsize_heuristic ());
903 #endif
904 }
905
906 /* Datastructure used to store per-call-site statistics. */
907 struct ggc_loc_descriptor
908 {
909 const char *file;
910 int line;
911 const char *function;
912 int times;
913 size_t allocated;
914 size_t overhead;
915 size_t freed;
916 size_t collected;
917 };
918
919 /* Hash table helper. */
920
921 struct ggc_loc_desc_hasher : typed_noop_remove <ggc_loc_descriptor>
922 {
923 typedef ggc_loc_descriptor value_type;
924 typedef ggc_loc_descriptor compare_type;
925 static inline hashval_t hash (const value_type *);
926 static inline bool equal (const value_type *, const compare_type *);
927 };
928
929 inline hashval_t
930 ggc_loc_desc_hasher::hash (const value_type *d)
931 {
932 return htab_hash_pointer (d->function) | d->line;
933 }
934
935 inline bool
936 ggc_loc_desc_hasher::equal (const value_type *d, const compare_type *d2)
937 {
938 return (d->file == d2->file && d->line == d2->line
939 && d->function == d2->function);
940 }
941
942 /* Hashtable used for statistics. */
943 static hash_table<ggc_loc_desc_hasher> *loc_hash;
944
945 struct ggc_ptr_hash_entry
946 {
947 void *ptr;
948 struct ggc_loc_descriptor *loc;
949 size_t size;
950 };
951
952 /* Helper for ptr_hash table. */
953
954 struct ptr_hash_hasher : typed_noop_remove <ggc_ptr_hash_entry>
955 {
956 typedef ggc_ptr_hash_entry value_type;
957 typedef void compare_type;
958 static inline hashval_t hash (const value_type *);
959 static inline bool equal (const value_type *, const compare_type *);
960 };
961
962 inline hashval_t
963 ptr_hash_hasher::hash (const value_type *d)
964 {
965 return htab_hash_pointer (d->ptr);
966 }
967
968 inline bool
969 ptr_hash_hasher::equal (const value_type *p, const compare_type *p2)
970 {
971 return (p->ptr == p2);
972 }
973
974 /* Hashtable converting address of allocated field to loc descriptor. */
975 static hash_table<ptr_hash_hasher> *ptr_hash;
976
977 /* Return descriptor for given call site, create new one if needed. */
978 static struct ggc_loc_descriptor *
979 make_loc_descriptor (const char *name, int line, const char *function)
980 {
981 struct ggc_loc_descriptor loc;
982 struct ggc_loc_descriptor **slot;
983
984 loc.file = name;
985 loc.line = line;
986 loc.function = function;
987 if (!loc_hash)
988 loc_hash = new hash_table<ggc_loc_desc_hasher> (10);
989
990 slot = loc_hash->find_slot (&loc, INSERT);
991 if (*slot)
992 return *slot;
993 *slot = XCNEW (struct ggc_loc_descriptor);
994 (*slot)->file = name;
995 (*slot)->line = line;
996 (*slot)->function = function;
997 return *slot;
998 }
999
1000 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
1001 void
1002 ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
1003 const char *name, int line, const char *function)
1004 {
1005 struct ggc_loc_descriptor *loc = make_loc_descriptor (name, line, function);
1006 struct ggc_ptr_hash_entry *p = XNEW (struct ggc_ptr_hash_entry);
1007 ggc_ptr_hash_entry **slot;
1008
1009 p->ptr = ptr;
1010 p->loc = loc;
1011 p->size = allocated + overhead;
1012 if (!ptr_hash)
1013 ptr_hash = new hash_table<ptr_hash_hasher> (10);
1014 slot = ptr_hash->find_slot_with_hash (ptr, htab_hash_pointer (ptr), INSERT);
1015 gcc_assert (!*slot);
1016 *slot = p;
1017
1018 loc->times++;
1019 loc->allocated+=allocated;
1020 loc->overhead+=overhead;
1021 }
1022
1023 /* Helper function for prune_overhead_list. See if SLOT is still marked and
1024 remove it from hashtable if it is not. */
1025 int
1026 ggc_prune_ptr (ggc_ptr_hash_entry **slot, void *b ATTRIBUTE_UNUSED)
1027 {
1028 struct ggc_ptr_hash_entry *p = *slot;
1029 if (!ggc_marked_p (p->ptr))
1030 {
1031 p->loc->collected += p->size;
1032 ptr_hash->clear_slot (slot);
1033 free (p);
1034 }
1035 return 1;
1036 }
1037
1038 /* After live values has been marked, walk all recorded pointers and see if
1039 they are still live. */
1040 void
1041 ggc_prune_overhead_list (void)
1042 {
1043 ptr_hash->traverse <void *, ggc_prune_ptr> (NULL);
1044 }
1045
1046 /* Notice that the pointer has been freed. */
1047 void
1048 ggc_free_overhead (void *ptr)
1049 {
1050 ggc_ptr_hash_entry **slot
1051 = ptr_hash->find_slot_with_hash (ptr, htab_hash_pointer (ptr), NO_INSERT);
1052 struct ggc_ptr_hash_entry *p;
1053 /* The pointer might be not found if a PCH read happened between allocation
1054 and ggc_free () call. FIXME: account memory properly in the presence of
1055 PCH. */
1056 if (!slot)
1057 return;
1058 p = (struct ggc_ptr_hash_entry *) *slot;
1059 p->loc->freed += p->size;
1060 ptr_hash->clear_slot (slot);
1061 free (p);
1062 }
1063
1064 /* Helper for qsort; sort descriptors by amount of memory consumed. */
1065 static int
1066 final_cmp_statistic (const void *loc1, const void *loc2)
1067 {
1068 const struct ggc_loc_descriptor *const l1 =
1069 *(const struct ggc_loc_descriptor *const *) loc1;
1070 const struct ggc_loc_descriptor *const l2 =
1071 *(const struct ggc_loc_descriptor *const *) loc2;
1072 long diff;
1073 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1074 (l2->allocated + l2->overhead - l2->freed));
1075 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1076 }
1077
1078 /* Helper for qsort; sort descriptors by amount of memory consumed. */
1079 static int
1080 cmp_statistic (const void *loc1, const void *loc2)
1081 {
1082 const struct ggc_loc_descriptor *const l1 =
1083 *(const struct ggc_loc_descriptor *const *) loc1;
1084 const struct ggc_loc_descriptor *const l2 =
1085 *(const struct ggc_loc_descriptor *const *) loc2;
1086 long diff;
1087
1088 diff = ((long)(l1->allocated + l1->overhead - l1->freed - l1->collected) -
1089 (l2->allocated + l2->overhead - l2->freed - l2->collected));
1090 if (diff)
1091 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1092 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1093 (l2->allocated + l2->overhead - l2->freed));
1094 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1095 }
1096
1097 /* Collect array of the descriptors from hashtable. */
1098 static struct ggc_loc_descriptor **loc_array;
1099 int
1100 ggc_add_statistics (ggc_loc_descriptor **slot, int *n)
1101 {
1102 loc_array[*n] = *slot;
1103 (*n)++;
1104 return 1;
1105 }
1106
1107 /* Dump per-site memory statistics. */
1108
1109 void
1110 dump_ggc_loc_statistics (bool final)
1111 {
1112 int nentries = 0;
1113 char s[4096];
1114 size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0;
1115 int i;
1116
1117 if (! GATHER_STATISTICS)
1118 return;
1119
1120 ggc_force_collect = true;
1121 ggc_collect ();
1122
1123 loc_array = XCNEWVEC (struct ggc_loc_descriptor *,
1124 loc_hash->elements_with_deleted ());
1125 fprintf (stderr, "-------------------------------------------------------\n");
1126 fprintf (stderr, "\n%-48s %10s %10s %10s %10s %10s\n",
1127 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1128 fprintf (stderr, "-------------------------------------------------------\n");
1129 loc_hash->traverse <int *, ggc_add_statistics> (&nentries);
1130 qsort (loc_array, nentries, sizeof (*loc_array),
1131 final ? final_cmp_statistic : cmp_statistic);
1132 for (i = 0; i < nentries; i++)
1133 {
1134 struct ggc_loc_descriptor *d = loc_array[i];
1135 allocated += d->allocated;
1136 times += d->times;
1137 freed += d->freed;
1138 collected += d->collected;
1139 overhead += d->overhead;
1140 }
1141 for (i = 0; i < nentries; i++)
1142 {
1143 struct ggc_loc_descriptor *d = loc_array[i];
1144 if (d->allocated)
1145 {
1146 const char *s1 = d->file;
1147 const char *s2;
1148 while ((s2 = strstr (s1, "gcc/")))
1149 s1 = s2 + 4;
1150 sprintf (s, "%s:%i (%s)", s1, d->line, d->function);
1151 s[48] = 0;
1152 fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s,
1153 (long)d->collected,
1154 (d->collected) * 100.0 / collected,
1155 (long)d->freed,
1156 (d->freed) * 100.0 / freed,
1157 (long)(d->allocated + d->overhead - d->freed - d->collected),
1158 (d->allocated + d->overhead - d->freed - d->collected) * 100.0
1159 / (allocated + overhead - freed - collected),
1160 (long)d->overhead,
1161 d->overhead * 100.0 / overhead,
1162 (long)d->times);
1163 }
1164 }
1165 fprintf (stderr, "%-48s %10ld %10ld %10ld %10ld %10ld\n",
1166 "Total", (long)collected, (long)freed,
1167 (long)(allocated + overhead - freed - collected), (long)overhead,
1168 (long)times);
1169 fprintf (stderr, "%-48s %10s %10s %10s %10s %10s\n",
1170 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1171 fprintf (stderr, "-------------------------------------------------------\n");
1172 ggc_force_collect = false;
1173 }