intrinsic.h (gfc_check_selected_real_kind, [...]): Update prototypes.
[gcc.git] / gcc / ggc-common.c
1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
3 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Generic garbage collection (GC) functions and data, not specific to
22 any particular GC implementation. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "hashtab.h"
28 #include "ggc.h"
29 #include "ggc-internal.h"
30 #include "toplev.h"
31 #include "params.h"
32 #include "hosthooks.h"
33 #include "hosthooks-def.h"
34 #include "plugin.h"
35 #include "vec.h"
36 #include "timevar.h"
37
38 #ifdef HAVE_SYS_RESOURCE_H
39 # include <sys/resource.h>
40 #endif
41
42 #ifdef HAVE_MMAP_FILE
43 # include <sys/mman.h>
44 # ifdef HAVE_MINCORE
45 /* This is on Solaris. */
46 # include <sys/types.h>
47 # endif
48 #endif
49
50 #ifndef MAP_FAILED
51 # define MAP_FAILED ((void *)-1)
52 #endif
53
54 /* When set, ggc_collect will do collection. */
55 bool ggc_force_collect;
56
57 /* When true, protect the contents of the identifier hash table. */
58 bool ggc_protect_identifiers = true;
59
60 /* Statistics about the allocation. */
61 static ggc_statistics *ggc_stats;
62
63 struct traversal_state;
64
65 static int ggc_htab_delete (void **, void *);
66 static hashval_t saving_htab_hash (const void *);
67 static int saving_htab_eq (const void *, const void *);
68 static int call_count (void **, void *);
69 static int call_alloc (void **, void *);
70 static int compare_ptr_data (const void *, const void *);
71 static void relocate_ptrs (void *, void *);
72 static void write_pch_globals (const struct ggc_root_tab * const *tab,
73 struct traversal_state *state);
74
75 /* Maintain global roots that are preserved during GC. */
76
77 /* Process a slot of an htab by deleting it if it has not been marked. */
78
79 static int
80 ggc_htab_delete (void **slot, void *info)
81 {
82 const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
83
84 if (! (*r->marked_p) (*slot))
85 htab_clear_slot (*r->base, slot);
86 else
87 (*r->cb) (*slot);
88
89 return 1;
90 }
91
92
93 /* This extra vector of dynamically registered root_tab-s is used by
94 ggc_mark_roots and gives the ability to dynamically add new GGC root
95 tables, for instance from some plugins; this vector is on the heap
96 since it is used by GGC internally. */
97 typedef const struct ggc_root_tab *const_ggc_root_tab_t;
98 DEF_VEC_P(const_ggc_root_tab_t);
99 DEF_VEC_ALLOC_P(const_ggc_root_tab_t, heap);
100 static VEC(const_ggc_root_tab_t, heap) *extra_root_vec;
101
102 /* Dynamically register a new GGC root table RT. This is useful for
103 plugins. */
104
105 void
106 ggc_register_root_tab (const struct ggc_root_tab* rt)
107 {
108 if (rt)
109 VEC_safe_push (const_ggc_root_tab_t, heap, extra_root_vec, rt);
110 }
111
112 /* This extra vector of dynamically registered cache_tab-s is used by
113 ggc_mark_roots and gives the ability to dynamically add new GGC cache
114 tables, for instance from some plugins; this vector is on the heap
115 since it is used by GGC internally. */
116 typedef const struct ggc_cache_tab *const_ggc_cache_tab_t;
117 DEF_VEC_P(const_ggc_cache_tab_t);
118 DEF_VEC_ALLOC_P(const_ggc_cache_tab_t, heap);
119 static VEC(const_ggc_cache_tab_t, heap) *extra_cache_vec;
120
121 /* Dynamically register a new GGC cache table CT. This is useful for
122 plugins. */
123
124 void
125 ggc_register_cache_tab (const struct ggc_cache_tab* ct)
126 {
127 if (ct)
128 VEC_safe_push (const_ggc_cache_tab_t, heap, extra_cache_vec, ct);
129 }
130
131 /* Scan a hash table that has objects which are to be deleted if they are not
132 already marked. */
133
134 static void
135 ggc_scan_cache_tab (const_ggc_cache_tab_t ctp)
136 {
137 const struct ggc_cache_tab *cti;
138
139 for (cti = ctp; cti->base != NULL; cti++)
140 if (*cti->base)
141 {
142 ggc_set_mark (*cti->base);
143 htab_traverse_noresize (*cti->base, ggc_htab_delete,
144 CONST_CAST (void *, (const void *)cti));
145 ggc_set_mark ((*cti->base)->entries);
146 }
147 }
148
149 /* Iterate through all registered roots and mark each element. */
150
151 void
152 ggc_mark_roots (void)
153 {
154 const struct ggc_root_tab *const *rt;
155 const struct ggc_root_tab *rti;
156 const_ggc_root_tab_t rtp;
157 const struct ggc_cache_tab *const *ct;
158 const_ggc_cache_tab_t ctp;
159 size_t i;
160
161 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
162 for (rti = *rt; rti->base != NULL; rti++)
163 memset (rti->base, 0, rti->stride);
164
165 for (rt = gt_ggc_rtab; *rt; rt++)
166 for (rti = *rt; rti->base != NULL; rti++)
167 for (i = 0; i < rti->nelt; i++)
168 (*rti->cb) (*(void **)((char *)rti->base + rti->stride * i));
169
170 for (i = 0; VEC_iterate (const_ggc_root_tab_t, extra_root_vec, i, rtp); i++)
171 {
172 for (rti = rtp; rti->base != NULL; rti++)
173 for (i = 0; i < rti->nelt; i++)
174 (*rti->cb) (*(void **) ((char *)rti->base + rti->stride * i));
175 }
176
177 if (ggc_protect_identifiers)
178 ggc_mark_stringpool ();
179
180 /* Now scan all hash tables that have objects which are to be deleted if
181 they are not already marked. */
182 for (ct = gt_ggc_cache_rtab; *ct; ct++)
183 ggc_scan_cache_tab (*ct);
184
185 for (i = 0; VEC_iterate (const_ggc_cache_tab_t, extra_cache_vec, i, ctp); i++)
186 ggc_scan_cache_tab (ctp);
187
188 if (! ggc_protect_identifiers)
189 ggc_purge_stringpool ();
190
191 /* Some plugins may call ggc_set_mark from here. */
192 invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
193 }
194
195 /* Allocate a block of memory, then clear it. */
196 void *
197 ggc_internal_cleared_alloc_stat (size_t size MEM_STAT_DECL)
198 {
199 void *buf = ggc_internal_alloc_stat (size PASS_MEM_STAT);
200 memset (buf, 0, size);
201 return buf;
202 }
203
204 /* Resize a block of memory, possibly re-allocating it. */
205 void *
206 ggc_realloc_stat (void *x, size_t size MEM_STAT_DECL)
207 {
208 void *r;
209 size_t old_size;
210
211 if (x == NULL)
212 return ggc_internal_alloc_stat (size PASS_MEM_STAT);
213
214 old_size = ggc_get_size (x);
215
216 if (size <= old_size)
217 {
218 /* Mark the unwanted memory as unaccessible. We also need to make
219 the "new" size accessible, since ggc_get_size returns the size of
220 the pool, not the size of the individually allocated object, the
221 size which was previously made accessible. Unfortunately, we
222 don't know that previously allocated size. Without that
223 knowledge we have to lose some initialization-tracking for the
224 old parts of the object. An alternative is to mark the whole
225 old_size as reachable, but that would lose tracking of writes
226 after the end of the object (by small offsets). Discard the
227 handle to avoid handle leak. */
228 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
229 old_size - size));
230 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
231 return x;
232 }
233
234 r = ggc_internal_alloc_stat (size PASS_MEM_STAT);
235
236 /* Since ggc_get_size returns the size of the pool, not the size of the
237 individually allocated object, we'd access parts of the old object
238 that were marked invalid with the memcpy below. We lose a bit of the
239 initialization-tracking since some of it may be uninitialized. */
240 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
241
242 memcpy (r, x, old_size);
243
244 /* The old object is not supposed to be used anymore. */
245 ggc_free (x);
246
247 return r;
248 }
249
250 void *
251 ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
252 size_t n ATTRIBUTE_UNUSED)
253 {
254 gcc_assert (c * n == sizeof (struct htab));
255 return ggc_alloc_cleared_htab ();
256 }
257
258 /* TODO: once we actually use type information in GGC, create a new tag
259 gt_gcc_ptr_array and use it for pointer arrays. */
260 void *
261 ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
262 {
263 gcc_assert (sizeof (PTR *) == n);
264 return ggc_internal_cleared_vec_alloc (sizeof (PTR *), c);
265 }
266
267 /* These are for splay_tree_new_ggc. */
268 void *
269 ggc_splay_alloc (enum gt_types_enum obj_type ATTRIBUTE_UNUSED, int sz,
270 void *nl)
271 {
272 gcc_assert (!nl);
273 return ggc_internal_alloc (sz);
274 }
275
276 void
277 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
278 {
279 gcc_assert (!nl);
280 }
281
282 /* Print statistics that are independent of the collector in use. */
283 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
284 ? (x) \
285 : ((x) < 1024*1024*10 \
286 ? (x) / 1024 \
287 : (x) / (1024*1024))))
288 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
289
290 void
291 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
292 ggc_statistics *stats)
293 {
294 /* Set the pointer so that during collection we will actually gather
295 the statistics. */
296 ggc_stats = stats;
297
298 /* Then do one collection to fill in the statistics. */
299 ggc_collect ();
300
301 /* At present, we don't really gather any interesting statistics. */
302
303 /* Don't gather statistics any more. */
304 ggc_stats = NULL;
305 }
306 \f
307 /* Functions for saving and restoring GCable memory to disk. */
308
309 static htab_t saving_htab;
310
311 struct ptr_data
312 {
313 void *obj;
314 void *note_ptr_cookie;
315 gt_note_pointers note_ptr_fn;
316 gt_handle_reorder reorder_fn;
317 size_t size;
318 void *new_addr;
319 enum gt_types_enum type;
320 };
321
322 #define POINTER_HASH(x) (hashval_t)((long)x >> 3)
323
324 /* Register an object in the hash table. */
325
326 int
327 gt_pch_note_object (void *obj, void *note_ptr_cookie,
328 gt_note_pointers note_ptr_fn,
329 enum gt_types_enum type)
330 {
331 struct ptr_data **slot;
332
333 if (obj == NULL || obj == (void *) 1)
334 return 0;
335
336 slot = (struct ptr_data **)
337 htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj),
338 INSERT);
339 if (*slot != NULL)
340 {
341 gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
342 && (*slot)->note_ptr_cookie == note_ptr_cookie);
343 return 0;
344 }
345
346 *slot = XCNEW (struct ptr_data);
347 (*slot)->obj = obj;
348 (*slot)->note_ptr_fn = note_ptr_fn;
349 (*slot)->note_ptr_cookie = note_ptr_cookie;
350 if (note_ptr_fn == gt_pch_p_S)
351 (*slot)->size = strlen ((const char *)obj) + 1;
352 else
353 (*slot)->size = ggc_get_size (obj);
354 (*slot)->type = type;
355 return 1;
356 }
357
358 /* Register an object in the hash table. */
359
360 void
361 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
362 gt_handle_reorder reorder_fn)
363 {
364 struct ptr_data *data;
365
366 if (obj == NULL || obj == (void *) 1)
367 return;
368
369 data = (struct ptr_data *)
370 htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
371 gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
372
373 data->reorder_fn = reorder_fn;
374 }
375
376 /* Hash and equality functions for saving_htab, callbacks for htab_create. */
377
378 static hashval_t
379 saving_htab_hash (const void *p)
380 {
381 return POINTER_HASH (((const struct ptr_data *)p)->obj);
382 }
383
384 static int
385 saving_htab_eq (const void *p1, const void *p2)
386 {
387 return ((const struct ptr_data *)p1)->obj == p2;
388 }
389
390 /* Handy state for the traversal functions. */
391
392 struct traversal_state
393 {
394 FILE *f;
395 struct ggc_pch_data *d;
396 size_t count;
397 struct ptr_data **ptrs;
398 size_t ptrs_i;
399 };
400
401 /* Callbacks for htab_traverse. */
402
403 static int
404 call_count (void **slot, void *state_p)
405 {
406 struct ptr_data *d = (struct ptr_data *)*slot;
407 struct traversal_state *state = (struct traversal_state *)state_p;
408
409 ggc_pch_count_object (state->d, d->obj, d->size,
410 d->note_ptr_fn == gt_pch_p_S,
411 d->type);
412 state->count++;
413 return 1;
414 }
415
416 static int
417 call_alloc (void **slot, void *state_p)
418 {
419 struct ptr_data *d = (struct ptr_data *)*slot;
420 struct traversal_state *state = (struct traversal_state *)state_p;
421
422 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
423 d->note_ptr_fn == gt_pch_p_S,
424 d->type);
425 state->ptrs[state->ptrs_i++] = d;
426 return 1;
427 }
428
429 /* Callback for qsort. */
430
431 static int
432 compare_ptr_data (const void *p1_p, const void *p2_p)
433 {
434 const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
435 const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
436 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
437 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
438 }
439
440 /* Callbacks for note_ptr_fn. */
441
442 static void
443 relocate_ptrs (void *ptr_p, void *state_p)
444 {
445 void **ptr = (void **)ptr_p;
446 struct traversal_state *state ATTRIBUTE_UNUSED
447 = (struct traversal_state *)state_p;
448 struct ptr_data *result;
449
450 if (*ptr == NULL || *ptr == (void *)1)
451 return;
452
453 result = (struct ptr_data *)
454 htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
455 gcc_assert (result);
456 *ptr = result->new_addr;
457 }
458
459 /* Write out, after relocation, the pointers in TAB. */
460 static void
461 write_pch_globals (const struct ggc_root_tab * const *tab,
462 struct traversal_state *state)
463 {
464 const struct ggc_root_tab *const *rt;
465 const struct ggc_root_tab *rti;
466 size_t i;
467
468 for (rt = tab; *rt; rt++)
469 for (rti = *rt; rti->base != NULL; rti++)
470 for (i = 0; i < rti->nelt; i++)
471 {
472 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
473 struct ptr_data *new_ptr;
474 if (ptr == NULL || ptr == (void *)1)
475 {
476 if (fwrite (&ptr, sizeof (void *), 1, state->f)
477 != 1)
478 fatal_error ("can't write PCH file: %m");
479 }
480 else
481 {
482 new_ptr = (struct ptr_data *)
483 htab_find_with_hash (saving_htab, ptr, POINTER_HASH (ptr));
484 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
485 != 1)
486 fatal_error ("can't write PCH file: %m");
487 }
488 }
489 }
490
491 /* Hold the information we need to mmap the file back in. */
492
493 struct mmap_info
494 {
495 size_t offset;
496 size_t size;
497 void *preferred_base;
498 };
499
500 /* Write out the state of the compiler to F. */
501
502 void
503 gt_pch_save (FILE *f)
504 {
505 const struct ggc_root_tab *const *rt;
506 const struct ggc_root_tab *rti;
507 size_t i;
508 struct traversal_state state;
509 char *this_object = NULL;
510 size_t this_object_size = 0;
511 struct mmap_info mmi;
512 const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity();
513
514 gt_pch_save_stringpool ();
515
516 timevar_push (TV_PCH_PTR_REALLOC);
517 saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free);
518
519 for (rt = gt_ggc_rtab; *rt; rt++)
520 for (rti = *rt; rti->base != NULL; rti++)
521 for (i = 0; i < rti->nelt; i++)
522 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
523
524 for (rt = gt_pch_cache_rtab; *rt; rt++)
525 for (rti = *rt; rti->base != NULL; rti++)
526 for (i = 0; i < rti->nelt; i++)
527 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
528
529 /* Prepare the objects for writing, determine addresses and such. */
530 state.f = f;
531 state.d = init_ggc_pch ();
532 state.count = 0;
533 htab_traverse (saving_htab, call_count, &state);
534
535 mmi.size = ggc_pch_total_size (state.d);
536
537 /* Try to arrange things so that no relocation is necessary, but
538 don't try very hard. On most platforms, this will always work,
539 and on the rest it's a lot of work to do better.
540 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
541 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
542 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
543
544 ggc_pch_this_base (state.d, mmi.preferred_base);
545
546 state.ptrs = XNEWVEC (struct ptr_data *, state.count);
547 state.ptrs_i = 0;
548
549 htab_traverse (saving_htab, call_alloc, &state);
550 timevar_pop (TV_PCH_PTR_REALLOC);
551
552 timevar_push (TV_PCH_PTR_SORT);
553 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
554 timevar_pop (TV_PCH_PTR_SORT);
555
556 /* Write out all the scalar variables. */
557 for (rt = gt_pch_scalar_rtab; *rt; rt++)
558 for (rti = *rt; rti->base != NULL; rti++)
559 if (fwrite (rti->base, rti->stride, 1, f) != 1)
560 fatal_error ("can't write PCH file: %m");
561
562 /* Write out all the global pointers, after translation. */
563 write_pch_globals (gt_ggc_rtab, &state);
564 write_pch_globals (gt_pch_cache_rtab, &state);
565
566 /* Pad the PCH file so that the mmapped area starts on an allocation
567 granularity (usually page) boundary. */
568 {
569 long o;
570 o = ftell (state.f) + sizeof (mmi);
571 if (o == -1)
572 fatal_error ("can't get position in PCH file: %m");
573 mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
574 if (mmi.offset == mmap_offset_alignment)
575 mmi.offset = 0;
576 mmi.offset += o;
577 }
578 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
579 fatal_error ("can't write PCH file: %m");
580 if (mmi.offset != 0
581 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
582 fatal_error ("can't write padding to PCH file: %m");
583
584 ggc_pch_prepare_write (state.d, state.f);
585
586 /* Actually write out the objects. */
587 for (i = 0; i < state.count; i++)
588 {
589 if (this_object_size < state.ptrs[i]->size)
590 {
591 this_object_size = state.ptrs[i]->size;
592 this_object = XRESIZEVAR (char, this_object, this_object_size);
593 }
594 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
595 if (state.ptrs[i]->reorder_fn != NULL)
596 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
597 state.ptrs[i]->note_ptr_cookie,
598 relocate_ptrs, &state);
599 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
600 state.ptrs[i]->note_ptr_cookie,
601 relocate_ptrs, &state);
602 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
603 state.ptrs[i]->new_addr, state.ptrs[i]->size,
604 state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
605 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
606 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
607 }
608 ggc_pch_finish (state.d, state.f);
609 gt_pch_fixup_stringpool ();
610
611 free (state.ptrs);
612 htab_delete (saving_htab);
613 }
614
615 /* Read the state of the compiler back in from F. */
616
617 void
618 gt_pch_restore (FILE *f)
619 {
620 const struct ggc_root_tab *const *rt;
621 const struct ggc_root_tab *rti;
622 size_t i;
623 struct mmap_info mmi;
624 int result;
625
626 /* Delete any deletable objects. This makes ggc_pch_read much
627 faster, as it can be sure that no GCable objects remain other
628 than the ones just read in. */
629 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
630 for (rti = *rt; rti->base != NULL; rti++)
631 memset (rti->base, 0, rti->stride);
632
633 /* Read in all the scalar variables. */
634 for (rt = gt_pch_scalar_rtab; *rt; rt++)
635 for (rti = *rt; rti->base != NULL; rti++)
636 if (fread (rti->base, rti->stride, 1, f) != 1)
637 fatal_error ("can't read PCH file: %m");
638
639 /* Read in all the global pointers, in 6 easy loops. */
640 for (rt = gt_ggc_rtab; *rt; rt++)
641 for (rti = *rt; rti->base != NULL; rti++)
642 for (i = 0; i < rti->nelt; i++)
643 if (fread ((char *)rti->base + rti->stride * i,
644 sizeof (void *), 1, f) != 1)
645 fatal_error ("can't read PCH file: %m");
646
647 for (rt = gt_pch_cache_rtab; *rt; rt++)
648 for (rti = *rt; rti->base != NULL; rti++)
649 for (i = 0; i < rti->nelt; i++)
650 if (fread ((char *)rti->base + rti->stride * i,
651 sizeof (void *), 1, f) != 1)
652 fatal_error ("can't read PCH file: %m");
653
654 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
655 fatal_error ("can't read PCH file: %m");
656
657 result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
658 fileno (f), mmi.offset);
659 if (result < 0)
660 fatal_error ("had to relocate PCH");
661 if (result == 0)
662 {
663 if (fseek (f, mmi.offset, SEEK_SET) != 0
664 || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
665 fatal_error ("can't read PCH file: %m");
666 }
667 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
668 fatal_error ("can't read PCH file: %m");
669
670 ggc_pch_read (f, mmi.preferred_base);
671
672 gt_pch_restore_stringpool ();
673 }
674
675 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
676 Select no address whatsoever, and let gt_pch_save choose what it will with
677 malloc, presumably. */
678
679 void *
680 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
681 int fd ATTRIBUTE_UNUSED)
682 {
683 return NULL;
684 }
685
686 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
687 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
688 same as base, indicating that the memory has been allocated but needs to
689 be read in from the file. Return -1 if the address differs, to relocation
690 of the PCH file would be required. */
691
692 int
693 default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
694 size_t offset ATTRIBUTE_UNUSED)
695 {
696 void *addr = xmalloc (size);
697 return (addr == base) - 1;
698 }
699
700 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
701 alignment required for allocating virtual memory. Usually this is the
702 same as pagesize. */
703
704 size_t
705 default_gt_pch_alloc_granularity (void)
706 {
707 return getpagesize();
708 }
709
710 #if HAVE_MMAP_FILE
711 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
712 We temporarily allocate SIZE bytes, and let the kernel place the data
713 wherever it will. If it worked, that's our spot, if not we're likely
714 to be in trouble. */
715
716 void *
717 mmap_gt_pch_get_address (size_t size, int fd)
718 {
719 void *ret;
720
721 ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
722 if (ret == (void *) MAP_FAILED)
723 ret = NULL;
724 else
725 munmap ((caddr_t) ret, size);
726
727 return ret;
728 }
729
730 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
731 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
732 mapping the data at BASE, -1 if we couldn't.
733
734 This version assumes that the kernel honors the START operand of mmap
735 even without MAP_FIXED if START through START+SIZE are not currently
736 mapped with something. */
737
738 int
739 mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
740 {
741 void *addr;
742
743 /* We're called with size == 0 if we're not planning to load a PCH
744 file at all. This allows the hook to free any static space that
745 we might have allocated at link time. */
746 if (size == 0)
747 return -1;
748
749 addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
750 fd, offset);
751
752 return addr == base ? 1 : -1;
753 }
754 #endif /* HAVE_MMAP_FILE */
755
756 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
757
758 /* Modify the bound based on rlimits. */
759 static double
760 ggc_rlimit_bound (double limit)
761 {
762 #if defined(HAVE_GETRLIMIT)
763 struct rlimit rlim;
764 # if defined (RLIMIT_AS)
765 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
766 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
767 if (getrlimit (RLIMIT_AS, &rlim) == 0
768 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
769 && rlim.rlim_cur < limit)
770 limit = rlim.rlim_cur;
771 # elif defined (RLIMIT_DATA)
772 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
773 might be on an OS that has a broken mmap. (Others don't bound
774 mmap at all, apparently.) */
775 if (getrlimit (RLIMIT_DATA, &rlim) == 0
776 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
777 && rlim.rlim_cur < limit
778 /* Darwin has this horribly bogus default setting of
779 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
780 appears to be ignored. Ignore such silliness. If a limit
781 this small was actually effective for mmap, GCC wouldn't even
782 start up. */
783 && rlim.rlim_cur >= 8 * 1024 * 1024)
784 limit = rlim.rlim_cur;
785 # endif /* RLIMIT_AS or RLIMIT_DATA */
786 #endif /* HAVE_GETRLIMIT */
787
788 return limit;
789 }
790
791 /* Heuristic to set a default for GGC_MIN_EXPAND. */
792 static int
793 ggc_min_expand_heuristic (void)
794 {
795 double min_expand = physmem_total();
796
797 /* Adjust for rlimits. */
798 min_expand = ggc_rlimit_bound (min_expand);
799
800 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
801 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
802 min_expand /= 1024*1024*1024;
803 min_expand *= 70;
804 min_expand = MIN (min_expand, 70);
805 min_expand += 30;
806
807 return min_expand;
808 }
809
810 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
811 static int
812 ggc_min_heapsize_heuristic (void)
813 {
814 double phys_kbytes = physmem_total();
815 double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
816
817 phys_kbytes /= 1024; /* Convert to Kbytes. */
818 limit_kbytes /= 1024;
819
820 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
821 bound of 128M (when RAM >= 1GB). */
822 phys_kbytes /= 8;
823
824 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
825 /* Try not to overrun the RSS limit while doing garbage collection.
826 The RSS limit is only advisory, so no margin is subtracted. */
827 {
828 struct rlimit rlim;
829 if (getrlimit (RLIMIT_RSS, &rlim) == 0
830 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
831 phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
832 }
833 # endif
834
835 /* Don't blindly run over our data limit; do GC at least when the
836 *next* GC would be within 20Mb of the limit or within a quarter of
837 the limit, whichever is larger. If GCC does hit the data limit,
838 compilation will fail, so this tries to be conservative. */
839 limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
840 limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
841 phys_kbytes = MIN (phys_kbytes, limit_kbytes);
842
843 phys_kbytes = MAX (phys_kbytes, 4 * 1024);
844 phys_kbytes = MIN (phys_kbytes, 128 * 1024);
845
846 return phys_kbytes;
847 }
848 #endif
849
850 void
851 init_ggc_heuristics (void)
852 {
853 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
854 set_param_value ("ggc-min-expand", ggc_min_expand_heuristic ());
855 set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic ());
856 #endif
857 }
858
859 #ifdef GATHER_STATISTICS
860
861 /* Datastructure used to store per-call-site statistics. */
862 struct loc_descriptor
863 {
864 const char *file;
865 int line;
866 const char *function;
867 int times;
868 size_t allocated;
869 size_t overhead;
870 size_t freed;
871 size_t collected;
872 };
873
874 /* Hashtable used for statistics. */
875 static htab_t loc_hash;
876
877 /* Hash table helpers functions. */
878 static hashval_t
879 hash_descriptor (const void *p)
880 {
881 const struct loc_descriptor *const d = (const struct loc_descriptor *) p;
882
883 return htab_hash_pointer (d->function) | d->line;
884 }
885
886 static int
887 eq_descriptor (const void *p1, const void *p2)
888 {
889 const struct loc_descriptor *const d = (const struct loc_descriptor *) p1;
890 const struct loc_descriptor *const d2 = (const struct loc_descriptor *) p2;
891
892 return (d->file == d2->file && d->line == d2->line
893 && d->function == d2->function);
894 }
895
896 /* Hashtable converting address of allocated field to loc descriptor. */
897 static htab_t ptr_hash;
898 struct ptr_hash_entry
899 {
900 void *ptr;
901 struct loc_descriptor *loc;
902 size_t size;
903 };
904
905 /* Hash table helpers functions. */
906 static hashval_t
907 hash_ptr (const void *p)
908 {
909 const struct ptr_hash_entry *const d = (const struct ptr_hash_entry *) p;
910
911 return htab_hash_pointer (d->ptr);
912 }
913
914 static int
915 eq_ptr (const void *p1, const void *p2)
916 {
917 const struct ptr_hash_entry *const p = (const struct ptr_hash_entry *) p1;
918
919 return (p->ptr == p2);
920 }
921
922 /* Return descriptor for given call site, create new one if needed. */
923 static struct loc_descriptor *
924 loc_descriptor (const char *name, int line, const char *function)
925 {
926 struct loc_descriptor loc;
927 struct loc_descriptor **slot;
928
929 loc.file = name;
930 loc.line = line;
931 loc.function = function;
932 if (!loc_hash)
933 loc_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL);
934
935 slot = (struct loc_descriptor **) htab_find_slot (loc_hash, &loc, INSERT);
936 if (*slot)
937 return *slot;
938 *slot = XCNEW (struct loc_descriptor);
939 (*slot)->file = name;
940 (*slot)->line = line;
941 (*slot)->function = function;
942 return *slot;
943 }
944
945 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
946 void
947 ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
948 const char *name, int line, const char *function)
949 {
950 struct loc_descriptor *loc = loc_descriptor (name, line, function);
951 struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry);
952 PTR *slot;
953
954 p->ptr = ptr;
955 p->loc = loc;
956 p->size = allocated + overhead;
957 if (!ptr_hash)
958 ptr_hash = htab_create (10, hash_ptr, eq_ptr, NULL);
959 slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), INSERT);
960 gcc_assert (!*slot);
961 *slot = p;
962
963 loc->times++;
964 loc->allocated+=allocated;
965 loc->overhead+=overhead;
966 }
967
968 /* Helper function for prune_overhead_list. See if SLOT is still marked and
969 remove it from hashtable if it is not. */
970 static int
971 ggc_prune_ptr (void **slot, void *b ATTRIBUTE_UNUSED)
972 {
973 struct ptr_hash_entry *p = (struct ptr_hash_entry *) *slot;
974 if (!ggc_marked_p (p->ptr))
975 {
976 p->loc->collected += p->size;
977 htab_clear_slot (ptr_hash, slot);
978 free (p);
979 }
980 return 1;
981 }
982
983 /* After live values has been marked, walk all recorded pointers and see if
984 they are still live. */
985 void
986 ggc_prune_overhead_list (void)
987 {
988 htab_traverse (ptr_hash, ggc_prune_ptr, NULL);
989 }
990
991 /* Notice that the pointer has been freed. */
992 void
993 ggc_free_overhead (void *ptr)
994 {
995 PTR *slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr),
996 NO_INSERT);
997 struct ptr_hash_entry *p;
998 /* The pointer might be not found if a PCH read happened between allocation
999 and ggc_free () call. FIXME: account memory properly in the presence of
1000 PCH. */
1001 if (!slot)
1002 return;
1003 p = (struct ptr_hash_entry *) *slot;
1004 p->loc->freed += p->size;
1005 htab_clear_slot (ptr_hash, slot);
1006 free (p);
1007 }
1008
1009 /* Helper for qsort; sort descriptors by amount of memory consumed. */
1010 static int
1011 final_cmp_statistic (const void *loc1, const void *loc2)
1012 {
1013 const struct loc_descriptor *const l1 =
1014 *(const struct loc_descriptor *const *) loc1;
1015 const struct loc_descriptor *const l2 =
1016 *(const struct loc_descriptor *const *) loc2;
1017 long diff;
1018 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1019 (l2->allocated + l2->overhead - l2->freed));
1020 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1021 }
1022
1023 /* Helper for qsort; sort descriptors by amount of memory consumed. */
1024 static int
1025 cmp_statistic (const void *loc1, const void *loc2)
1026 {
1027 const struct loc_descriptor *const l1 =
1028 *(const struct loc_descriptor *const *) loc1;
1029 const struct loc_descriptor *const l2 =
1030 *(const struct loc_descriptor *const *) loc2;
1031 long diff;
1032
1033 diff = ((long)(l1->allocated + l1->overhead - l1->freed - l1->collected) -
1034 (l2->allocated + l2->overhead - l2->freed - l2->collected));
1035 if (diff)
1036 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1037 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1038 (l2->allocated + l2->overhead - l2->freed));
1039 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1040 }
1041
1042 /* Collect array of the descriptors from hashtable. */
1043 static struct loc_descriptor **loc_array;
1044 static int
1045 add_statistics (void **slot, void *b)
1046 {
1047 int *n = (int *)b;
1048 loc_array[*n] = (struct loc_descriptor *) *slot;
1049 (*n)++;
1050 return 1;
1051 }
1052
1053 /* Dump per-site memory statistics. */
1054 #endif
1055 void
1056 dump_ggc_loc_statistics (bool final ATTRIBUTE_UNUSED)
1057 {
1058 #ifdef GATHER_STATISTICS
1059 int nentries = 0;
1060 char s[4096];
1061 size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0;
1062 int i;
1063
1064 ggc_force_collect = true;
1065 ggc_collect ();
1066
1067 loc_array = XCNEWVEC (struct loc_descriptor *, loc_hash->n_elements);
1068 fprintf (stderr, "-------------------------------------------------------\n");
1069 fprintf (stderr, "\n%-48s %10s %10s %10s %10s %10s\n",
1070 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1071 fprintf (stderr, "-------------------------------------------------------\n");
1072 htab_traverse (loc_hash, add_statistics, &nentries);
1073 qsort (loc_array, nentries, sizeof (*loc_array),
1074 final ? final_cmp_statistic : cmp_statistic);
1075 for (i = 0; i < nentries; i++)
1076 {
1077 struct loc_descriptor *d = loc_array[i];
1078 allocated += d->allocated;
1079 times += d->times;
1080 freed += d->freed;
1081 collected += d->collected;
1082 overhead += d->overhead;
1083 }
1084 for (i = 0; i < nentries; i++)
1085 {
1086 struct loc_descriptor *d = loc_array[i];
1087 if (d->allocated)
1088 {
1089 const char *s1 = d->file;
1090 const char *s2;
1091 while ((s2 = strstr (s1, "gcc/")))
1092 s1 = s2 + 4;
1093 sprintf (s, "%s:%i (%s)", s1, d->line, d->function);
1094 s[48] = 0;
1095 fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s,
1096 (long)d->collected,
1097 (d->collected) * 100.0 / collected,
1098 (long)d->freed,
1099 (d->freed) * 100.0 / freed,
1100 (long)(d->allocated + d->overhead - d->freed - d->collected),
1101 (d->allocated + d->overhead - d->freed - d->collected) * 100.0
1102 / (allocated + overhead - freed - collected),
1103 (long)d->overhead,
1104 d->overhead * 100.0 / overhead,
1105 (long)d->times);
1106 }
1107 }
1108 fprintf (stderr, "%-48s %10ld %10ld %10ld %10ld %10ld\n",
1109 "Total", (long)collected, (long)freed,
1110 (long)(allocated + overhead - freed - collected), (long)overhead,
1111 (long)times);
1112 fprintf (stderr, "%-48s %10s %10s %10s %10s %10s\n",
1113 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1114 fprintf (stderr, "-------------------------------------------------------\n");
1115 ggc_force_collect = false;
1116 #endif
1117 }