sem_aux.adb, [...] (Get_Low_Bound): Use Type_Low_Bound.
[gcc.git] / gcc / ggc-common.c
1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* Generic garbage collection (GC) functions and data, not specific to
21 any particular GC implementation. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "hash-table.h"
27 #include "ggc.h"
28 #include "ggc-internal.h"
29 #include "diagnostic-core.h"
30 #include "params.h"
31 #include "hosthooks.h"
32 #include "hosthooks-def.h"
33 #include "plugin.h"
34 #include "vec.h"
35 #include "timevar.h"
36
37 /* When set, ggc_collect will do collection. */
38 bool ggc_force_collect;
39
40 /* When true, protect the contents of the identifier hash table. */
41 bool ggc_protect_identifiers = true;
42
43 /* Statistics about the allocation. */
44 static ggc_statistics *ggc_stats;
45
46 struct traversal_state;
47
48 static int compare_ptr_data (const void *, const void *);
49 static void relocate_ptrs (void *, void *);
50 static void write_pch_globals (const struct ggc_root_tab * const *tab,
51 struct traversal_state *state);
52
53 /* Maintain global roots that are preserved during GC. */
54
55 /* This extra vector of dynamically registered root_tab-s is used by
56 ggc_mark_roots and gives the ability to dynamically add new GGC root
57 tables, for instance from some plugins; this vector is on the heap
58 since it is used by GGC internally. */
59 typedef const struct ggc_root_tab *const_ggc_root_tab_t;
60 static vec<const_ggc_root_tab_t> extra_root_vec;
61
62 /* Dynamically register a new GGC root table RT. This is useful for
63 plugins. */
64
65 void
66 ggc_register_root_tab (const struct ggc_root_tab* rt)
67 {
68 if (rt)
69 extra_root_vec.safe_push (rt);
70 }
71
72 /* Mark all the roots in the table RT. */
73
74 static void
75 ggc_mark_root_tab (const_ggc_root_tab_t rt)
76 {
77 size_t i;
78
79 for ( ; rt->base != NULL; rt++)
80 for (i = 0; i < rt->nelt; i++)
81 (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
82 }
83
84 /* Iterate through all registered roots and mark each element. */
85
86 void
87 ggc_mark_roots (void)
88 {
89 const struct ggc_root_tab *const *rt;
90 const_ggc_root_tab_t rtp, rti;
91 size_t i;
92
93 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
94 for (rti = *rt; rti->base != NULL; rti++)
95 memset (rti->base, 0, rti->stride);
96
97 for (rt = gt_ggc_rtab; *rt; rt++)
98 ggc_mark_root_tab (*rt);
99
100 FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
101 ggc_mark_root_tab (rtp);
102
103 if (ggc_protect_identifiers)
104 ggc_mark_stringpool ();
105
106 gt_clear_caches ();
107
108 if (! ggc_protect_identifiers)
109 ggc_purge_stringpool ();
110
111 /* Some plugins may call ggc_set_mark from here. */
112 invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
113 }
114
115 /* Allocate a block of memory, then clear it. */
116 void *
117 ggc_internal_cleared_alloc (size_t size, void (*f)(void *), size_t s, size_t n
118 MEM_STAT_DECL)
119 {
120 void *buf = ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
121 memset (buf, 0, size);
122 return buf;
123 }
124
125 /* Resize a block of memory, possibly re-allocating it. */
126 void *
127 ggc_realloc (void *x, size_t size MEM_STAT_DECL)
128 {
129 void *r;
130 size_t old_size;
131
132 if (x == NULL)
133 return ggc_internal_alloc (size PASS_MEM_STAT);
134
135 old_size = ggc_get_size (x);
136
137 if (size <= old_size)
138 {
139 /* Mark the unwanted memory as unaccessible. We also need to make
140 the "new" size accessible, since ggc_get_size returns the size of
141 the pool, not the size of the individually allocated object, the
142 size which was previously made accessible. Unfortunately, we
143 don't know that previously allocated size. Without that
144 knowledge we have to lose some initialization-tracking for the
145 old parts of the object. An alternative is to mark the whole
146 old_size as reachable, but that would lose tracking of writes
147 after the end of the object (by small offsets). Discard the
148 handle to avoid handle leak. */
149 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
150 old_size - size));
151 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
152 return x;
153 }
154
155 r = ggc_internal_alloc (size PASS_MEM_STAT);
156
157 /* Since ggc_get_size returns the size of the pool, not the size of the
158 individually allocated object, we'd access parts of the old object
159 that were marked invalid with the memcpy below. We lose a bit of the
160 initialization-tracking since some of it may be uninitialized. */
161 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
162
163 memcpy (r, x, old_size);
164
165 /* The old object is not supposed to be used anymore. */
166 ggc_free (x);
167
168 return r;
169 }
170
171 void *
172 ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
173 size_t n ATTRIBUTE_UNUSED)
174 {
175 gcc_assert (c * n == sizeof (struct htab));
176 return ggc_cleared_alloc<htab> ();
177 }
178
179 /* TODO: once we actually use type information in GGC, create a new tag
180 gt_gcc_ptr_array and use it for pointer arrays. */
181 void *
182 ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
183 {
184 gcc_assert (sizeof (PTR *) == n);
185 return ggc_cleared_vec_alloc<PTR *> (c);
186 }
187
188 /* These are for splay_tree_new_ggc. */
189 void *
190 ggc_splay_alloc (int sz, void *nl)
191 {
192 gcc_assert (!nl);
193 return ggc_internal_alloc (sz);
194 }
195
196 void
197 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
198 {
199 gcc_assert (!nl);
200 }
201
202 /* Print statistics that are independent of the collector in use. */
203 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
204 ? (x) \
205 : ((x) < 1024*1024*10 \
206 ? (x) / 1024 \
207 : (x) / (1024*1024))))
208 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
209
210 void
211 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
212 ggc_statistics *stats)
213 {
214 /* Set the pointer so that during collection we will actually gather
215 the statistics. */
216 ggc_stats = stats;
217
218 /* Then do one collection to fill in the statistics. */
219 ggc_collect ();
220
221 /* At present, we don't really gather any interesting statistics. */
222
223 /* Don't gather statistics any more. */
224 ggc_stats = NULL;
225 }
226 \f
227 /* Functions for saving and restoring GCable memory to disk. */
228
229 struct ptr_data
230 {
231 void *obj;
232 void *note_ptr_cookie;
233 gt_note_pointers note_ptr_fn;
234 gt_handle_reorder reorder_fn;
235 size_t size;
236 void *new_addr;
237 };
238
239 #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
240
241 /* Helper for hashing saving_htab. */
242
243 struct saving_hasher : typed_free_remove <ptr_data>
244 {
245 typedef ptr_data *value_type;
246 typedef void *compare_type;
247 static inline hashval_t hash (const ptr_data *);
248 static inline bool equal (const ptr_data *, const void *);
249 };
250
251 inline hashval_t
252 saving_hasher::hash (const ptr_data *p)
253 {
254 return POINTER_HASH (p->obj);
255 }
256
257 inline bool
258 saving_hasher::equal (const ptr_data *p1, const void *p2)
259 {
260 return p1->obj == p2;
261 }
262
263 static hash_table<saving_hasher> *saving_htab;
264
265 /* Register an object in the hash table. */
266
267 int
268 gt_pch_note_object (void *obj, void *note_ptr_cookie,
269 gt_note_pointers note_ptr_fn)
270 {
271 struct ptr_data **slot;
272
273 if (obj == NULL || obj == (void *) 1)
274 return 0;
275
276 slot = (struct ptr_data **)
277 saving_htab->find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
278 if (*slot != NULL)
279 {
280 gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
281 && (*slot)->note_ptr_cookie == note_ptr_cookie);
282 return 0;
283 }
284
285 *slot = XCNEW (struct ptr_data);
286 (*slot)->obj = obj;
287 (*slot)->note_ptr_fn = note_ptr_fn;
288 (*slot)->note_ptr_cookie = note_ptr_cookie;
289 if (note_ptr_fn == gt_pch_p_S)
290 (*slot)->size = strlen ((const char *)obj) + 1;
291 else
292 (*slot)->size = ggc_get_size (obj);
293 return 1;
294 }
295
296 /* Register an object in the hash table. */
297
298 void
299 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
300 gt_handle_reorder reorder_fn)
301 {
302 struct ptr_data *data;
303
304 if (obj == NULL || obj == (void *) 1)
305 return;
306
307 data = (struct ptr_data *)
308 saving_htab->find_with_hash (obj, POINTER_HASH (obj));
309 gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
310
311 data->reorder_fn = reorder_fn;
312 }
313
314 /* Handy state for the traversal functions. */
315
316 struct traversal_state
317 {
318 FILE *f;
319 struct ggc_pch_data *d;
320 size_t count;
321 struct ptr_data **ptrs;
322 size_t ptrs_i;
323 };
324
325 /* Callbacks for htab_traverse. */
326
327 int
328 ggc_call_count (ptr_data **slot, traversal_state *state)
329 {
330 struct ptr_data *d = *slot;
331
332 ggc_pch_count_object (state->d, d->obj, d->size,
333 d->note_ptr_fn == gt_pch_p_S);
334 state->count++;
335 return 1;
336 }
337
338 int
339 ggc_call_alloc (ptr_data **slot, traversal_state *state)
340 {
341 struct ptr_data *d = *slot;
342
343 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
344 d->note_ptr_fn == gt_pch_p_S);
345 state->ptrs[state->ptrs_i++] = d;
346 return 1;
347 }
348
349 /* Callback for qsort. */
350
351 static int
352 compare_ptr_data (const void *p1_p, const void *p2_p)
353 {
354 const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
355 const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
356 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
357 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
358 }
359
360 /* Callbacks for note_ptr_fn. */
361
362 static void
363 relocate_ptrs (void *ptr_p, void *state_p)
364 {
365 void **ptr = (void **)ptr_p;
366 struct traversal_state *state ATTRIBUTE_UNUSED
367 = (struct traversal_state *)state_p;
368 struct ptr_data *result;
369
370 if (*ptr == NULL || *ptr == (void *)1)
371 return;
372
373 result = (struct ptr_data *)
374 saving_htab->find_with_hash (*ptr, POINTER_HASH (*ptr));
375 gcc_assert (result);
376 *ptr = result->new_addr;
377 }
378
379 /* Write out, after relocation, the pointers in TAB. */
380 static void
381 write_pch_globals (const struct ggc_root_tab * const *tab,
382 struct traversal_state *state)
383 {
384 const struct ggc_root_tab *const *rt;
385 const struct ggc_root_tab *rti;
386 size_t i;
387
388 for (rt = tab; *rt; rt++)
389 for (rti = *rt; rti->base != NULL; rti++)
390 for (i = 0; i < rti->nelt; i++)
391 {
392 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
393 struct ptr_data *new_ptr;
394 if (ptr == NULL || ptr == (void *)1)
395 {
396 if (fwrite (&ptr, sizeof (void *), 1, state->f)
397 != 1)
398 fatal_error (input_location, "can%'t write PCH file: %m");
399 }
400 else
401 {
402 new_ptr = (struct ptr_data *)
403 saving_htab->find_with_hash (ptr, POINTER_HASH (ptr));
404 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
405 != 1)
406 fatal_error (input_location, "can%'t write PCH file: %m");
407 }
408 }
409 }
410
411 /* Hold the information we need to mmap the file back in. */
412
413 struct mmap_info
414 {
415 size_t offset;
416 size_t size;
417 void *preferred_base;
418 };
419
420 /* Write out the state of the compiler to F. */
421
422 void
423 gt_pch_save (FILE *f)
424 {
425 const struct ggc_root_tab *const *rt;
426 const struct ggc_root_tab *rti;
427 size_t i;
428 struct traversal_state state;
429 char *this_object = NULL;
430 size_t this_object_size = 0;
431 struct mmap_info mmi;
432 const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity ();
433
434 gt_pch_save_stringpool ();
435
436 timevar_push (TV_PCH_PTR_REALLOC);
437 saving_htab = new hash_table<saving_hasher> (50000);
438
439 for (rt = gt_ggc_rtab; *rt; rt++)
440 for (rti = *rt; rti->base != NULL; rti++)
441 for (i = 0; i < rti->nelt; i++)
442 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
443
444 /* Prepare the objects for writing, determine addresses and such. */
445 state.f = f;
446 state.d = init_ggc_pch ();
447 state.count = 0;
448 saving_htab->traverse <traversal_state *, ggc_call_count> (&state);
449
450 mmi.size = ggc_pch_total_size (state.d);
451
452 /* Try to arrange things so that no relocation is necessary, but
453 don't try very hard. On most platforms, this will always work,
454 and on the rest it's a lot of work to do better.
455 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
456 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
457 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
458
459 ggc_pch_this_base (state.d, mmi.preferred_base);
460
461 state.ptrs = XNEWVEC (struct ptr_data *, state.count);
462 state.ptrs_i = 0;
463
464 saving_htab->traverse <traversal_state *, ggc_call_alloc> (&state);
465 timevar_pop (TV_PCH_PTR_REALLOC);
466
467 timevar_push (TV_PCH_PTR_SORT);
468 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
469 timevar_pop (TV_PCH_PTR_SORT);
470
471 /* Write out all the scalar variables. */
472 for (rt = gt_pch_scalar_rtab; *rt; rt++)
473 for (rti = *rt; rti->base != NULL; rti++)
474 if (fwrite (rti->base, rti->stride, 1, f) != 1)
475 fatal_error (input_location, "can%'t write PCH file: %m");
476
477 /* Write out all the global pointers, after translation. */
478 write_pch_globals (gt_ggc_rtab, &state);
479
480 /* Pad the PCH file so that the mmapped area starts on an allocation
481 granularity (usually page) boundary. */
482 {
483 long o;
484 o = ftell (state.f) + sizeof (mmi);
485 if (o == -1)
486 fatal_error (input_location, "can%'t get position in PCH file: %m");
487 mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
488 if (mmi.offset == mmap_offset_alignment)
489 mmi.offset = 0;
490 mmi.offset += o;
491 }
492 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
493 fatal_error (input_location, "can%'t write PCH file: %m");
494 if (mmi.offset != 0
495 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
496 fatal_error (input_location, "can%'t write padding to PCH file: %m");
497
498 ggc_pch_prepare_write (state.d, state.f);
499
500 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
501 vec<char> vbits = vNULL;
502 #endif
503
504 /* Actually write out the objects. */
505 for (i = 0; i < state.count; i++)
506 {
507 if (this_object_size < state.ptrs[i]->size)
508 {
509 this_object_size = state.ptrs[i]->size;
510 this_object = XRESIZEVAR (char, this_object, this_object_size);
511 }
512 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
513 /* obj might contain uninitialized bytes, e.g. in the trailing
514 padding of the object. Avoid warnings by making the memory
515 temporarily defined and then restoring previous state. */
516 int get_vbits = 0;
517 size_t valid_size = state.ptrs[i]->size;
518 if (__builtin_expect (RUNNING_ON_VALGRIND, 0))
519 {
520 if (vbits.length () < valid_size)
521 vbits.safe_grow (valid_size);
522 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
523 vbits.address (), valid_size);
524 if (get_vbits == 3)
525 {
526 /* We assume that first part of obj is addressable, and
527 the rest is unaddressable. Find out where the boundary is
528 using binary search. */
529 size_t lo = 0, hi = valid_size;
530 while (hi > lo)
531 {
532 size_t mid = (lo + hi) / 2;
533 get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj
534 + mid, vbits.address (),
535 1);
536 if (get_vbits == 3)
537 hi = mid;
538 else if (get_vbits == 1)
539 lo = mid + 1;
540 else
541 break;
542 }
543 if (get_vbits == 1 || get_vbits == 3)
544 {
545 valid_size = lo;
546 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
547 vbits.address (),
548 valid_size);
549 }
550 }
551 if (get_vbits == 1)
552 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj,
553 state.ptrs[i]->size));
554 }
555 #endif
556 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
557 if (state.ptrs[i]->reorder_fn != NULL)
558 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
559 state.ptrs[i]->note_ptr_cookie,
560 relocate_ptrs, &state);
561 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
562 state.ptrs[i]->note_ptr_cookie,
563 relocate_ptrs, &state);
564 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
565 state.ptrs[i]->new_addr, state.ptrs[i]->size,
566 state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
567 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
568 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
569 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
570 if (__builtin_expect (get_vbits == 1, 0))
571 {
572 (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (),
573 valid_size);
574 if (valid_size != state.ptrs[i]->size)
575 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
576 state.ptrs[i]->obj
577 + valid_size,
578 state.ptrs[i]->size
579 - valid_size));
580 }
581 #endif
582 }
583 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
584 vbits.release ();
585 #endif
586
587 ggc_pch_finish (state.d, state.f);
588 gt_pch_fixup_stringpool ();
589
590 XDELETE (state.ptrs);
591 XDELETE (this_object);
592 delete saving_htab;
593 saving_htab = NULL;
594 }
595
596 /* Read the state of the compiler back in from F. */
597
598 void
599 gt_pch_restore (FILE *f)
600 {
601 const struct ggc_root_tab *const *rt;
602 const struct ggc_root_tab *rti;
603 size_t i;
604 struct mmap_info mmi;
605 int result;
606
607 /* Delete any deletable objects. This makes ggc_pch_read much
608 faster, as it can be sure that no GCable objects remain other
609 than the ones just read in. */
610 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
611 for (rti = *rt; rti->base != NULL; rti++)
612 memset (rti->base, 0, rti->stride);
613
614 /* Read in all the scalar variables. */
615 for (rt = gt_pch_scalar_rtab; *rt; rt++)
616 for (rti = *rt; rti->base != NULL; rti++)
617 if (fread (rti->base, rti->stride, 1, f) != 1)
618 fatal_error (input_location, "can%'t read PCH file: %m");
619
620 /* Read in all the global pointers, in 6 easy loops. */
621 for (rt = gt_ggc_rtab; *rt; rt++)
622 for (rti = *rt; rti->base != NULL; rti++)
623 for (i = 0; i < rti->nelt; i++)
624 if (fread ((char *)rti->base + rti->stride * i,
625 sizeof (void *), 1, f) != 1)
626 fatal_error (input_location, "can%'t read PCH file: %m");
627
628 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
629 fatal_error (input_location, "can%'t read PCH file: %m");
630
631 result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
632 fileno (f), mmi.offset);
633 if (result < 0)
634 fatal_error (input_location, "had to relocate PCH");
635 if (result == 0)
636 {
637 if (fseek (f, mmi.offset, SEEK_SET) != 0
638 || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
639 fatal_error (input_location, "can%'t read PCH file: %m");
640 }
641 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
642 fatal_error (input_location, "can%'t read PCH file: %m");
643
644 ggc_pch_read (f, mmi.preferred_base);
645
646 gt_pch_restore_stringpool ();
647 }
648
649 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
650 Select no address whatsoever, and let gt_pch_save choose what it will with
651 malloc, presumably. */
652
653 void *
654 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
655 int fd ATTRIBUTE_UNUSED)
656 {
657 return NULL;
658 }
659
660 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
661 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
662 same as base, indicating that the memory has been allocated but needs to
663 be read in from the file. Return -1 if the address differs, to relocation
664 of the PCH file would be required. */
665
666 int
667 default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
668 size_t offset ATTRIBUTE_UNUSED)
669 {
670 void *addr = xmalloc (size);
671 return (addr == base) - 1;
672 }
673
674 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
675 alignment required for allocating virtual memory. Usually this is the
676 same as pagesize. */
677
678 size_t
679 default_gt_pch_alloc_granularity (void)
680 {
681 return getpagesize ();
682 }
683
684 #if HAVE_MMAP_FILE
685 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
686 We temporarily allocate SIZE bytes, and let the kernel place the data
687 wherever it will. If it worked, that's our spot, if not we're likely
688 to be in trouble. */
689
690 void *
691 mmap_gt_pch_get_address (size_t size, int fd)
692 {
693 void *ret;
694
695 ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
696 if (ret == (void *) MAP_FAILED)
697 ret = NULL;
698 else
699 munmap ((caddr_t) ret, size);
700
701 return ret;
702 }
703
704 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
705 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
706 mapping the data at BASE, -1 if we couldn't.
707
708 This version assumes that the kernel honors the START operand of mmap
709 even without MAP_FIXED if START through START+SIZE are not currently
710 mapped with something. */
711
712 int
713 mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
714 {
715 void *addr;
716
717 /* We're called with size == 0 if we're not planning to load a PCH
718 file at all. This allows the hook to free any static space that
719 we might have allocated at link time. */
720 if (size == 0)
721 return -1;
722
723 addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
724 fd, offset);
725
726 return addr == base ? 1 : -1;
727 }
728 #endif /* HAVE_MMAP_FILE */
729
730 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
731
732 /* Modify the bound based on rlimits. */
733 static double
734 ggc_rlimit_bound (double limit)
735 {
736 #if defined(HAVE_GETRLIMIT)
737 struct rlimit rlim;
738 # if defined (RLIMIT_AS)
739 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
740 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
741 if (getrlimit (RLIMIT_AS, &rlim) == 0
742 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
743 && rlim.rlim_cur < limit)
744 limit = rlim.rlim_cur;
745 # elif defined (RLIMIT_DATA)
746 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
747 might be on an OS that has a broken mmap. (Others don't bound
748 mmap at all, apparently.) */
749 if (getrlimit (RLIMIT_DATA, &rlim) == 0
750 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
751 && rlim.rlim_cur < limit
752 /* Darwin has this horribly bogus default setting of
753 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
754 appears to be ignored. Ignore such silliness. If a limit
755 this small was actually effective for mmap, GCC wouldn't even
756 start up. */
757 && rlim.rlim_cur >= 8 * 1024 * 1024)
758 limit = rlim.rlim_cur;
759 # endif /* RLIMIT_AS or RLIMIT_DATA */
760 #endif /* HAVE_GETRLIMIT */
761
762 return limit;
763 }
764
765 /* Heuristic to set a default for GGC_MIN_EXPAND. */
766 static int
767 ggc_min_expand_heuristic (void)
768 {
769 double min_expand = physmem_total ();
770
771 /* Adjust for rlimits. */
772 min_expand = ggc_rlimit_bound (min_expand);
773
774 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
775 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
776 min_expand /= 1024*1024*1024;
777 min_expand *= 70;
778 min_expand = MIN (min_expand, 70);
779 min_expand += 30;
780
781 return min_expand;
782 }
783
784 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
785 static int
786 ggc_min_heapsize_heuristic (void)
787 {
788 double phys_kbytes = physmem_total ();
789 double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
790
791 phys_kbytes /= 1024; /* Convert to Kbytes. */
792 limit_kbytes /= 1024;
793
794 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
795 bound of 128M (when RAM >= 1GB). */
796 phys_kbytes /= 8;
797
798 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
799 /* Try not to overrun the RSS limit while doing garbage collection.
800 The RSS limit is only advisory, so no margin is subtracted. */
801 {
802 struct rlimit rlim;
803 if (getrlimit (RLIMIT_RSS, &rlim) == 0
804 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
805 phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
806 }
807 # endif
808
809 /* Don't blindly run over our data limit; do GC at least when the
810 *next* GC would be within 20Mb of the limit or within a quarter of
811 the limit, whichever is larger. If GCC does hit the data limit,
812 compilation will fail, so this tries to be conservative. */
813 limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
814 limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
815 phys_kbytes = MIN (phys_kbytes, limit_kbytes);
816
817 phys_kbytes = MAX (phys_kbytes, 4 * 1024);
818 phys_kbytes = MIN (phys_kbytes, 128 * 1024);
819
820 return phys_kbytes;
821 }
822 #endif
823
824 void
825 init_ggc_heuristics (void)
826 {
827 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
828 set_default_param_value (GGC_MIN_EXPAND, ggc_min_expand_heuristic ());
829 set_default_param_value (GGC_MIN_HEAPSIZE, ggc_min_heapsize_heuristic ());
830 #endif
831 }
832
833 /* Datastructure used to store per-call-site statistics. */
834 struct ggc_loc_descriptor
835 {
836 const char *file;
837 int line;
838 const char *function;
839 int times;
840 size_t allocated;
841 size_t overhead;
842 size_t freed;
843 size_t collected;
844 };
845
846 /* Hash table helper. */
847
848 struct ggc_loc_desc_hasher : typed_noop_remove <ggc_loc_descriptor>
849 {
850 typedef ggc_loc_descriptor *value_type;
851 typedef ggc_loc_descriptor *compare_type;
852 static inline hashval_t hash (const ggc_loc_descriptor *);
853 static inline bool equal (const ggc_loc_descriptor *,
854 const ggc_loc_descriptor *);
855 };
856
857 inline hashval_t
858 ggc_loc_desc_hasher::hash (const ggc_loc_descriptor *d)
859 {
860 return htab_hash_pointer (d->function) | d->line;
861 }
862
863 inline bool
864 ggc_loc_desc_hasher::equal (const ggc_loc_descriptor *d,
865 const ggc_loc_descriptor *d2)
866 {
867 return (d->file == d2->file && d->line == d2->line
868 && d->function == d2->function);
869 }
870
871 /* Hashtable used for statistics. */
872 static hash_table<ggc_loc_desc_hasher> *loc_hash;
873
874 struct ggc_ptr_hash_entry
875 {
876 void *ptr;
877 struct ggc_loc_descriptor *loc;
878 size_t size;
879 };
880
881 /* Helper for ptr_hash table. */
882
883 struct ptr_hash_hasher : typed_noop_remove <ggc_ptr_hash_entry>
884 {
885 typedef ggc_ptr_hash_entry *value_type;
886 typedef void *compare_type;
887 static inline hashval_t hash (const ggc_ptr_hash_entry *);
888 static inline bool equal (const ggc_ptr_hash_entry *, const void *);
889 };
890
891 inline hashval_t
892 ptr_hash_hasher::hash (const ggc_ptr_hash_entry *d)
893 {
894 return htab_hash_pointer (d->ptr);
895 }
896
897 inline bool
898 ptr_hash_hasher::equal (const ggc_ptr_hash_entry *p, const void *p2)
899 {
900 return (p->ptr == p2);
901 }
902
903 /* Hashtable converting address of allocated field to loc descriptor. */
904 static hash_table<ptr_hash_hasher> *ptr_hash;
905
906 /* Return descriptor for given call site, create new one if needed. */
907 static struct ggc_loc_descriptor *
908 make_loc_descriptor (const char *name, int line, const char *function)
909 {
910 struct ggc_loc_descriptor loc;
911 struct ggc_loc_descriptor **slot;
912
913 loc.file = name;
914 loc.line = line;
915 loc.function = function;
916 if (!loc_hash)
917 loc_hash = new hash_table<ggc_loc_desc_hasher> (10);
918
919 slot = loc_hash->find_slot (&loc, INSERT);
920 if (*slot)
921 return *slot;
922 *slot = XCNEW (struct ggc_loc_descriptor);
923 (*slot)->file = name;
924 (*slot)->line = line;
925 (*slot)->function = function;
926 return *slot;
927 }
928
929 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
930 void
931 ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
932 const char *name, int line, const char *function)
933 {
934 struct ggc_loc_descriptor *loc = make_loc_descriptor (name, line, function);
935 struct ggc_ptr_hash_entry *p = XNEW (struct ggc_ptr_hash_entry);
936 ggc_ptr_hash_entry **slot;
937
938 p->ptr = ptr;
939 p->loc = loc;
940 p->size = allocated + overhead;
941 if (!ptr_hash)
942 ptr_hash = new hash_table<ptr_hash_hasher> (10);
943 slot = ptr_hash->find_slot_with_hash (ptr, htab_hash_pointer (ptr), INSERT);
944 gcc_assert (!*slot);
945 *slot = p;
946
947 loc->times++;
948 loc->allocated+=allocated;
949 loc->overhead+=overhead;
950 }
951
952 /* Helper function for prune_overhead_list. See if SLOT is still marked and
953 remove it from hashtable if it is not. */
954 int
955 ggc_prune_ptr (ggc_ptr_hash_entry **slot, void *b ATTRIBUTE_UNUSED)
956 {
957 struct ggc_ptr_hash_entry *p = *slot;
958 if (!ggc_marked_p (p->ptr))
959 {
960 p->loc->collected += p->size;
961 ptr_hash->clear_slot (slot);
962 free (p);
963 }
964 return 1;
965 }
966
967 /* After live values has been marked, walk all recorded pointers and see if
968 they are still live. */
969 void
970 ggc_prune_overhead_list (void)
971 {
972 ptr_hash->traverse <void *, ggc_prune_ptr> (NULL);
973 }
974
975 /* Notice that the pointer has been freed. */
976 void
977 ggc_free_overhead (void *ptr)
978 {
979 ggc_ptr_hash_entry **slot
980 = ptr_hash->find_slot_with_hash (ptr, htab_hash_pointer (ptr), NO_INSERT);
981 struct ggc_ptr_hash_entry *p;
982 /* The pointer might be not found if a PCH read happened between allocation
983 and ggc_free () call. FIXME: account memory properly in the presence of
984 PCH. */
985 if (!slot)
986 return;
987 p = (struct ggc_ptr_hash_entry *) *slot;
988 p->loc->freed += p->size;
989 ptr_hash->clear_slot (slot);
990 free (p);
991 }
992
993 /* Helper for qsort; sort descriptors by amount of memory consumed. */
994 static int
995 final_cmp_statistic (const void *loc1, const void *loc2)
996 {
997 const struct ggc_loc_descriptor *const l1 =
998 *(const struct ggc_loc_descriptor *const *) loc1;
999 const struct ggc_loc_descriptor *const l2 =
1000 *(const struct ggc_loc_descriptor *const *) loc2;
1001 long diff;
1002 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1003 (l2->allocated + l2->overhead - l2->freed));
1004 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1005 }
1006
1007 /* Helper for qsort; sort descriptors by amount of memory consumed. */
1008 static int
1009 cmp_statistic (const void *loc1, const void *loc2)
1010 {
1011 const struct ggc_loc_descriptor *const l1 =
1012 *(const struct ggc_loc_descriptor *const *) loc1;
1013 const struct ggc_loc_descriptor *const l2 =
1014 *(const struct ggc_loc_descriptor *const *) loc2;
1015 long diff;
1016
1017 diff = ((long)(l1->allocated + l1->overhead - l1->freed - l1->collected) -
1018 (l2->allocated + l2->overhead - l2->freed - l2->collected));
1019 if (diff)
1020 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1021 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1022 (l2->allocated + l2->overhead - l2->freed));
1023 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1024 }
1025
1026 /* Collect array of the descriptors from hashtable. */
1027 static struct ggc_loc_descriptor **loc_array;
1028 int
1029 ggc_add_statistics (ggc_loc_descriptor **slot, int *n)
1030 {
1031 loc_array[*n] = *slot;
1032 (*n)++;
1033 return 1;
1034 }
1035
1036 /* Dump per-site memory statistics. */
1037
1038 void
1039 dump_ggc_loc_statistics (bool final)
1040 {
1041 int nentries = 0;
1042 char s[4096];
1043 size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0;
1044 int i;
1045
1046 if (! GATHER_STATISTICS)
1047 return;
1048
1049 ggc_force_collect = true;
1050 ggc_collect ();
1051
1052 loc_array = XCNEWVEC (struct ggc_loc_descriptor *,
1053 loc_hash->elements_with_deleted ());
1054 fprintf (stderr, "-------------------------------------------------------\n");
1055 fprintf (stderr, "\n%-48s %10s %10s %10s %10s %10s\n",
1056 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1057 fprintf (stderr, "-------------------------------------------------------\n");
1058 loc_hash->traverse <int *, ggc_add_statistics> (&nentries);
1059 qsort (loc_array, nentries, sizeof (*loc_array),
1060 final ? final_cmp_statistic : cmp_statistic);
1061 for (i = 0; i < nentries; i++)
1062 {
1063 struct ggc_loc_descriptor *d = loc_array[i];
1064 allocated += d->allocated;
1065 times += d->times;
1066 freed += d->freed;
1067 collected += d->collected;
1068 overhead += d->overhead;
1069 }
1070 for (i = 0; i < nentries; i++)
1071 {
1072 struct ggc_loc_descriptor *d = loc_array[i];
1073 if (d->allocated)
1074 {
1075 const char *s1 = d->file;
1076 const char *s2;
1077 while ((s2 = strstr (s1, "gcc/")))
1078 s1 = s2 + 4;
1079 sprintf (s, "%s:%i (%s)", s1, d->line, d->function);
1080 s[48] = 0;
1081 fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s,
1082 (long)d->collected,
1083 (d->collected) * 100.0 / collected,
1084 (long)d->freed,
1085 (d->freed) * 100.0 / freed,
1086 (long)(d->allocated + d->overhead - d->freed - d->collected),
1087 (d->allocated + d->overhead - d->freed - d->collected) * 100.0
1088 / (allocated + overhead - freed - collected),
1089 (long)d->overhead,
1090 d->overhead * 100.0 / overhead,
1091 (long)d->times);
1092 }
1093 }
1094 fprintf (stderr, "%-48s %10ld %10ld %10ld %10ld %10ld\n",
1095 "Total", (long)collected, (long)freed,
1096 (long)(allocated + overhead - freed - collected), (long)overhead,
1097 (long)times);
1098 fprintf (stderr, "%-48s %10s %10s %10s %10s %10s\n",
1099 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1100 fprintf (stderr, "-------------------------------------------------------\n");
1101 ggc_force_collect = false;
1102 }