Support valgrind 3.3 for --enable-checking=valgrind.
[gcc.git] / gcc / ggc-common.c
1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Generic garbage collection (GC) functions and data, not specific to
22 any particular GC implementation. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "hashtab.h"
28 #include "ggc.h"
29 #include "toplev.h"
30 #include "params.h"
31 #include "hosthooks.h"
32 #include "hosthooks-def.h"
33
34 #ifdef HAVE_SYS_RESOURCE_H
35 # include <sys/resource.h>
36 #endif
37
38 #ifdef HAVE_MMAP_FILE
39 # include <sys/mman.h>
40 # ifdef HAVE_MINCORE
41 /* This is on Solaris. */
42 # include <sys/types.h>
43 # endif
44 #endif
45
46 #ifndef MAP_FAILED
47 # define MAP_FAILED ((void *)-1)
48 #endif
49
50 /* When set, ggc_collect will do collection. */
51 bool ggc_force_collect;
52
53 /* Statistics about the allocation. */
54 static ggc_statistics *ggc_stats;
55
56 struct traversal_state;
57
58 static int ggc_htab_delete (void **, void *);
59 static hashval_t saving_htab_hash (const void *);
60 static int saving_htab_eq (const void *, const void *);
61 static int call_count (void **, void *);
62 static int call_alloc (void **, void *);
63 static int compare_ptr_data (const void *, const void *);
64 static void relocate_ptrs (void *, void *);
65 static void write_pch_globals (const struct ggc_root_tab * const *tab,
66 struct traversal_state *state);
67 static double ggc_rlimit_bound (double);
68
69 /* Maintain global roots that are preserved during GC. */
70
71 /* Process a slot of an htab by deleting it if it has not been marked. */
72
73 static int
74 ggc_htab_delete (void **slot, void *info)
75 {
76 const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
77
78 if (! (*r->marked_p) (*slot))
79 htab_clear_slot (*r->base, slot);
80 else
81 (*r->cb) (*slot);
82
83 return 1;
84 }
85
86 /* Iterate through all registered roots and mark each element. */
87
88 void
89 ggc_mark_roots (void)
90 {
91 const struct ggc_root_tab *const *rt;
92 const struct ggc_root_tab *rti;
93 const struct ggc_cache_tab *const *ct;
94 const struct ggc_cache_tab *cti;
95 size_t i;
96
97 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
98 for (rti = *rt; rti->base != NULL; rti++)
99 memset (rti->base, 0, rti->stride);
100
101 for (rt = gt_ggc_rtab; *rt; rt++)
102 for (rti = *rt; rti->base != NULL; rti++)
103 for (i = 0; i < rti->nelt; i++)
104 (*rti->cb)(*(void **)((char *)rti->base + rti->stride * i));
105
106 ggc_mark_stringpool ();
107
108 /* Now scan all hash tables that have objects which are to be deleted if
109 they are not already marked. */
110 for (ct = gt_ggc_cache_rtab; *ct; ct++)
111 for (cti = *ct; cti->base != NULL; cti++)
112 if (*cti->base)
113 {
114 ggc_set_mark (*cti->base);
115 htab_traverse_noresize (*cti->base, ggc_htab_delete, (void *) cti);
116 ggc_set_mark ((*cti->base)->entries);
117 }
118 }
119
120 /* Allocate a block of memory, then clear it. */
121 void *
122 ggc_alloc_cleared_stat (size_t size MEM_STAT_DECL)
123 {
124 void *buf = ggc_alloc_stat (size PASS_MEM_STAT);
125 memset (buf, 0, size);
126 return buf;
127 }
128
129 /* Resize a block of memory, possibly re-allocating it. */
130 void *
131 ggc_realloc_stat (void *x, size_t size MEM_STAT_DECL)
132 {
133 void *r;
134 size_t old_size;
135
136 if (x == NULL)
137 return ggc_alloc_stat (size PASS_MEM_STAT);
138
139 old_size = ggc_get_size (x);
140
141 if (size <= old_size)
142 {
143 /* Mark the unwanted memory as unaccessible. We also need to make
144 the "new" size accessible, since ggc_get_size returns the size of
145 the pool, not the size of the individually allocated object, the
146 size which was previously made accessible. Unfortunately, we
147 don't know that previously allocated size. Without that
148 knowledge we have to lose some initialization-tracking for the
149 old parts of the object. An alternative is to mark the whole
150 old_size as reachable, but that would lose tracking of writes
151 after the end of the object (by small offsets). Discard the
152 handle to avoid handle leak. */
153 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
154 old_size - size));
155 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
156 return x;
157 }
158
159 r = ggc_alloc_stat (size PASS_MEM_STAT);
160
161 /* Since ggc_get_size returns the size of the pool, not the size of the
162 individually allocated object, we'd access parts of the old object
163 that were marked invalid with the memcpy below. We lose a bit of the
164 initialization-tracking since some of it may be uninitialized. */
165 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
166
167 memcpy (r, x, old_size);
168
169 /* The old object is not supposed to be used anymore. */
170 ggc_free (x);
171
172 return r;
173 }
174
175 /* Like ggc_alloc_cleared, but performs a multiplication. */
176 void *
177 ggc_calloc (size_t s1, size_t s2)
178 {
179 return ggc_alloc_cleared (s1 * s2);
180 }
181
182 /* These are for splay_tree_new_ggc. */
183 void *
184 ggc_splay_alloc (int sz, void *nl)
185 {
186 gcc_assert (!nl);
187 return ggc_alloc (sz);
188 }
189
190 void
191 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
192 {
193 gcc_assert (!nl);
194 }
195
196 /* Print statistics that are independent of the collector in use. */
197 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
198 ? (x) \
199 : ((x) < 1024*1024*10 \
200 ? (x) / 1024 \
201 : (x) / (1024*1024))))
202 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
203
204 void
205 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
206 ggc_statistics *stats)
207 {
208 /* Set the pointer so that during collection we will actually gather
209 the statistics. */
210 ggc_stats = stats;
211
212 /* Then do one collection to fill in the statistics. */
213 ggc_collect ();
214
215 /* At present, we don't really gather any interesting statistics. */
216
217 /* Don't gather statistics any more. */
218 ggc_stats = NULL;
219 }
220 \f
221 /* Functions for saving and restoring GCable memory to disk. */
222
223 static htab_t saving_htab;
224
225 struct ptr_data
226 {
227 void *obj;
228 void *note_ptr_cookie;
229 gt_note_pointers note_ptr_fn;
230 gt_handle_reorder reorder_fn;
231 size_t size;
232 void *new_addr;
233 enum gt_types_enum type;
234 };
235
236 #define POINTER_HASH(x) (hashval_t)((long)x >> 3)
237
238 /* Register an object in the hash table. */
239
240 int
241 gt_pch_note_object (void *obj, void *note_ptr_cookie,
242 gt_note_pointers note_ptr_fn,
243 enum gt_types_enum type)
244 {
245 struct ptr_data **slot;
246
247 if (obj == NULL || obj == (void *) 1)
248 return 0;
249
250 slot = (struct ptr_data **)
251 htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj),
252 INSERT);
253 if (*slot != NULL)
254 {
255 gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
256 && (*slot)->note_ptr_cookie == note_ptr_cookie);
257 return 0;
258 }
259
260 *slot = xcalloc (sizeof (struct ptr_data), 1);
261 (*slot)->obj = obj;
262 (*slot)->note_ptr_fn = note_ptr_fn;
263 (*slot)->note_ptr_cookie = note_ptr_cookie;
264 if (note_ptr_fn == gt_pch_p_S)
265 (*slot)->size = strlen (obj) + 1;
266 else
267 (*slot)->size = ggc_get_size (obj);
268 (*slot)->type = type;
269 return 1;
270 }
271
272 /* Register an object in the hash table. */
273
274 void
275 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
276 gt_handle_reorder reorder_fn)
277 {
278 struct ptr_data *data;
279
280 if (obj == NULL || obj == (void *) 1)
281 return;
282
283 data = htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
284 gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
285
286 data->reorder_fn = reorder_fn;
287 }
288
289 /* Hash and equality functions for saving_htab, callbacks for htab_create. */
290
291 static hashval_t
292 saving_htab_hash (const void *p)
293 {
294 return POINTER_HASH (((const struct ptr_data *)p)->obj);
295 }
296
297 static int
298 saving_htab_eq (const void *p1, const void *p2)
299 {
300 return ((const struct ptr_data *)p1)->obj == p2;
301 }
302
303 /* Handy state for the traversal functions. */
304
305 struct traversal_state
306 {
307 FILE *f;
308 struct ggc_pch_data *d;
309 size_t count;
310 struct ptr_data **ptrs;
311 size_t ptrs_i;
312 };
313
314 /* Callbacks for htab_traverse. */
315
316 static int
317 call_count (void **slot, void *state_p)
318 {
319 struct ptr_data *d = (struct ptr_data *)*slot;
320 struct traversal_state *state = (struct traversal_state *)state_p;
321
322 ggc_pch_count_object (state->d, d->obj, d->size,
323 d->note_ptr_fn == gt_pch_p_S,
324 d->type);
325 state->count++;
326 return 1;
327 }
328
329 static int
330 call_alloc (void **slot, void *state_p)
331 {
332 struct ptr_data *d = (struct ptr_data *)*slot;
333 struct traversal_state *state = (struct traversal_state *)state_p;
334
335 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
336 d->note_ptr_fn == gt_pch_p_S,
337 d->type);
338 state->ptrs[state->ptrs_i++] = d;
339 return 1;
340 }
341
342 /* Callback for qsort. */
343
344 static int
345 compare_ptr_data (const void *p1_p, const void *p2_p)
346 {
347 const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
348 const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
349 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
350 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
351 }
352
353 /* Callbacks for note_ptr_fn. */
354
355 static void
356 relocate_ptrs (void *ptr_p, void *state_p)
357 {
358 void **ptr = (void **)ptr_p;
359 struct traversal_state *state ATTRIBUTE_UNUSED
360 = (struct traversal_state *)state_p;
361 struct ptr_data *result;
362
363 if (*ptr == NULL || *ptr == (void *)1)
364 return;
365
366 result = htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
367 gcc_assert (result);
368 *ptr = result->new_addr;
369 }
370
371 /* Write out, after relocation, the pointers in TAB. */
372 static void
373 write_pch_globals (const struct ggc_root_tab * const *tab,
374 struct traversal_state *state)
375 {
376 const struct ggc_root_tab *const *rt;
377 const struct ggc_root_tab *rti;
378 size_t i;
379
380 for (rt = tab; *rt; rt++)
381 for (rti = *rt; rti->base != NULL; rti++)
382 for (i = 0; i < rti->nelt; i++)
383 {
384 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
385 struct ptr_data *new_ptr;
386 if (ptr == NULL || ptr == (void *)1)
387 {
388 if (fwrite (&ptr, sizeof (void *), 1, state->f)
389 != 1)
390 fatal_error ("can't write PCH file: %m");
391 }
392 else
393 {
394 new_ptr = htab_find_with_hash (saving_htab, ptr,
395 POINTER_HASH (ptr));
396 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
397 != 1)
398 fatal_error ("can't write PCH file: %m");
399 }
400 }
401 }
402
403 /* Hold the information we need to mmap the file back in. */
404
405 struct mmap_info
406 {
407 size_t offset;
408 size_t size;
409 void *preferred_base;
410 };
411
412 /* Write out the state of the compiler to F. */
413
414 void
415 gt_pch_save (FILE *f)
416 {
417 const struct ggc_root_tab *const *rt;
418 const struct ggc_root_tab *rti;
419 size_t i;
420 struct traversal_state state;
421 char *this_object = NULL;
422 size_t this_object_size = 0;
423 struct mmap_info mmi;
424 const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity();
425
426 gt_pch_save_stringpool ();
427
428 saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free);
429
430 for (rt = gt_ggc_rtab; *rt; rt++)
431 for (rti = *rt; rti->base != NULL; rti++)
432 for (i = 0; i < rti->nelt; i++)
433 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
434
435 for (rt = gt_pch_cache_rtab; *rt; rt++)
436 for (rti = *rt; rti->base != NULL; rti++)
437 for (i = 0; i < rti->nelt; i++)
438 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
439
440 /* Prepare the objects for writing, determine addresses and such. */
441 state.f = f;
442 state.d = init_ggc_pch();
443 state.count = 0;
444 htab_traverse (saving_htab, call_count, &state);
445
446 mmi.size = ggc_pch_total_size (state.d);
447
448 /* Try to arrange things so that no relocation is necessary, but
449 don't try very hard. On most platforms, this will always work,
450 and on the rest it's a lot of work to do better.
451 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
452 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
453 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
454
455 ggc_pch_this_base (state.d, mmi.preferred_base);
456
457 state.ptrs = XNEWVEC (struct ptr_data *, state.count);
458 state.ptrs_i = 0;
459 htab_traverse (saving_htab, call_alloc, &state);
460 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
461
462 /* Write out all the scalar variables. */
463 for (rt = gt_pch_scalar_rtab; *rt; rt++)
464 for (rti = *rt; rti->base != NULL; rti++)
465 if (fwrite (rti->base, rti->stride, 1, f) != 1)
466 fatal_error ("can't write PCH file: %m");
467
468 /* Write out all the global pointers, after translation. */
469 write_pch_globals (gt_ggc_rtab, &state);
470 write_pch_globals (gt_pch_cache_rtab, &state);
471
472 /* Pad the PCH file so that the mmapped area starts on an allocation
473 granularity (usually page) boundary. */
474 {
475 long o;
476 o = ftell (state.f) + sizeof (mmi);
477 if (o == -1)
478 fatal_error ("can't get position in PCH file: %m");
479 mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
480 if (mmi.offset == mmap_offset_alignment)
481 mmi.offset = 0;
482 mmi.offset += o;
483 }
484 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
485 fatal_error ("can't write PCH file: %m");
486 if (mmi.offset != 0
487 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
488 fatal_error ("can't write padding to PCH file: %m");
489
490 ggc_pch_prepare_write (state.d, state.f);
491
492 /* Actually write out the objects. */
493 for (i = 0; i < state.count; i++)
494 {
495 if (this_object_size < state.ptrs[i]->size)
496 {
497 this_object_size = state.ptrs[i]->size;
498 this_object = xrealloc (this_object, this_object_size);
499 }
500 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
501 if (state.ptrs[i]->reorder_fn != NULL)
502 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
503 state.ptrs[i]->note_ptr_cookie,
504 relocate_ptrs, &state);
505 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
506 state.ptrs[i]->note_ptr_cookie,
507 relocate_ptrs, &state);
508 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
509 state.ptrs[i]->new_addr, state.ptrs[i]->size,
510 state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
511 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
512 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
513 }
514 ggc_pch_finish (state.d, state.f);
515 gt_pch_fixup_stringpool ();
516
517 free (state.ptrs);
518 htab_delete (saving_htab);
519 }
520
521 /* Read the state of the compiler back in from F. */
522
523 void
524 gt_pch_restore (FILE *f)
525 {
526 const struct ggc_root_tab *const *rt;
527 const struct ggc_root_tab *rti;
528 size_t i;
529 struct mmap_info mmi;
530 int result;
531
532 /* Delete any deletable objects. This makes ggc_pch_read much
533 faster, as it can be sure that no GCable objects remain other
534 than the ones just read in. */
535 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
536 for (rti = *rt; rti->base != NULL; rti++)
537 memset (rti->base, 0, rti->stride);
538
539 /* Read in all the scalar variables. */
540 for (rt = gt_pch_scalar_rtab; *rt; rt++)
541 for (rti = *rt; rti->base != NULL; rti++)
542 if (fread (rti->base, rti->stride, 1, f) != 1)
543 fatal_error ("can't read PCH file: %m");
544
545 /* Read in all the global pointers, in 6 easy loops. */
546 for (rt = gt_ggc_rtab; *rt; rt++)
547 for (rti = *rt; rti->base != NULL; rti++)
548 for (i = 0; i < rti->nelt; i++)
549 if (fread ((char *)rti->base + rti->stride * i,
550 sizeof (void *), 1, f) != 1)
551 fatal_error ("can't read PCH file: %m");
552
553 for (rt = gt_pch_cache_rtab; *rt; rt++)
554 for (rti = *rt; rti->base != NULL; rti++)
555 for (i = 0; i < rti->nelt; i++)
556 if (fread ((char *)rti->base + rti->stride * i,
557 sizeof (void *), 1, f) != 1)
558 fatal_error ("can't read PCH file: %m");
559
560 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
561 fatal_error ("can't read PCH file: %m");
562
563 result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
564 fileno (f), mmi.offset);
565 if (result < 0)
566 fatal_error ("had to relocate PCH");
567 if (result == 0)
568 {
569 if (fseek (f, mmi.offset, SEEK_SET) != 0
570 || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
571 fatal_error ("can't read PCH file: %m");
572 }
573 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
574 fatal_error ("can't read PCH file: %m");
575
576 ggc_pch_read (f, mmi.preferred_base);
577
578 gt_pch_restore_stringpool ();
579 }
580
581 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
582 Select no address whatsoever, and let gt_pch_save choose what it will with
583 malloc, presumably. */
584
585 void *
586 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
587 int fd ATTRIBUTE_UNUSED)
588 {
589 return NULL;
590 }
591
592 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
593 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
594 same as base, indicating that the memory has been allocated but needs to
595 be read in from the file. Return -1 if the address differs, to relocation
596 of the PCH file would be required. */
597
598 int
599 default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
600 size_t offset ATTRIBUTE_UNUSED)
601 {
602 void *addr = xmalloc (size);
603 return (addr == base) - 1;
604 }
605
606 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
607 alignment required for allocating virtual memory. Usually this is the
608 same as pagesize. */
609
610 size_t
611 default_gt_pch_alloc_granularity (void)
612 {
613 return getpagesize();
614 }
615
616 #if HAVE_MMAP_FILE
617 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
618 We temporarily allocate SIZE bytes, and let the kernel place the data
619 wherever it will. If it worked, that's our spot, if not we're likely
620 to be in trouble. */
621
622 void *
623 mmap_gt_pch_get_address (size_t size, int fd)
624 {
625 void *ret;
626
627 ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
628 if (ret == (void *) MAP_FAILED)
629 ret = NULL;
630 else
631 munmap (ret, size);
632
633 return ret;
634 }
635
636 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
637 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
638 mapping the data at BASE, -1 if we couldn't.
639
640 This version assumes that the kernel honors the START operand of mmap
641 even without MAP_FIXED if START through START+SIZE are not currently
642 mapped with something. */
643
644 int
645 mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
646 {
647 void *addr;
648
649 /* We're called with size == 0 if we're not planning to load a PCH
650 file at all. This allows the hook to free any static space that
651 we might have allocated at link time. */
652 if (size == 0)
653 return -1;
654
655 addr = mmap (base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
656 fd, offset);
657
658 return addr == base ? 1 : -1;
659 }
660 #endif /* HAVE_MMAP_FILE */
661
662 /* Modify the bound based on rlimits. */
663 static double
664 ggc_rlimit_bound (double limit)
665 {
666 #if defined(HAVE_GETRLIMIT)
667 struct rlimit rlim;
668 # if defined (RLIMIT_AS)
669 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
670 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
671 if (getrlimit (RLIMIT_AS, &rlim) == 0
672 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
673 && rlim.rlim_cur < limit)
674 limit = rlim.rlim_cur;
675 # elif defined (RLIMIT_DATA)
676 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
677 might be on an OS that has a broken mmap. (Others don't bound
678 mmap at all, apparently.) */
679 if (getrlimit (RLIMIT_DATA, &rlim) == 0
680 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
681 && rlim.rlim_cur < limit
682 /* Darwin has this horribly bogus default setting of
683 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
684 appears to be ignored. Ignore such silliness. If a limit
685 this small was actually effective for mmap, GCC wouldn't even
686 start up. */
687 && rlim.rlim_cur >= 8 * 1024 * 1024)
688 limit = rlim.rlim_cur;
689 # endif /* RLIMIT_AS or RLIMIT_DATA */
690 #endif /* HAVE_GETRLIMIT */
691
692 return limit;
693 }
694
695 /* Heuristic to set a default for GGC_MIN_EXPAND. */
696 int
697 ggc_min_expand_heuristic (void)
698 {
699 double min_expand = physmem_total();
700
701 /* Adjust for rlimits. */
702 min_expand = ggc_rlimit_bound (min_expand);
703
704 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
705 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
706 min_expand /= 1024*1024*1024;
707 min_expand *= 70;
708 min_expand = MIN (min_expand, 70);
709 min_expand += 30;
710
711 return min_expand;
712 }
713
714 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
715 int
716 ggc_min_heapsize_heuristic (void)
717 {
718 double phys_kbytes = physmem_total();
719 double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
720
721 phys_kbytes /= 1024; /* Convert to Kbytes. */
722 limit_kbytes /= 1024;
723
724 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
725 bound of 128M (when RAM >= 1GB). */
726 phys_kbytes /= 8;
727
728 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
729 /* Try not to overrun the RSS limit while doing garbage collection.
730 The RSS limit is only advisory, so no margin is subtracted. */
731 {
732 struct rlimit rlim;
733 if (getrlimit (RLIMIT_RSS, &rlim) == 0
734 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
735 phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
736 }
737 # endif
738
739 /* Don't blindly run over our data limit; do GC at least when the
740 *next* GC would be within 20Mb of the limit or within a quarter of
741 the limit, whichever is larger. If GCC does hit the data limit,
742 compilation will fail, so this tries to be conservative. */
743 limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
744 limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic());
745 phys_kbytes = MIN (phys_kbytes, limit_kbytes);
746
747 phys_kbytes = MAX (phys_kbytes, 4 * 1024);
748 phys_kbytes = MIN (phys_kbytes, 128 * 1024);
749
750 return phys_kbytes;
751 }
752
753 void
754 init_ggc_heuristics (void)
755 {
756 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
757 set_param_value ("ggc-min-expand", ggc_min_expand_heuristic());
758 set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic());
759 #endif
760 }
761
762 #ifdef GATHER_STATISTICS
763
764 /* Datastructure used to store per-call-site statistics. */
765 struct loc_descriptor
766 {
767 const char *file;
768 int line;
769 const char *function;
770 int times;
771 size_t allocated;
772 size_t overhead;
773 size_t freed;
774 size_t collected;
775 };
776
777 /* Hashtable used for statistics. */
778 static htab_t loc_hash;
779
780 /* Hash table helpers functions. */
781 static hashval_t
782 hash_descriptor (const void *p)
783 {
784 const struct loc_descriptor *const d = p;
785
786 return htab_hash_pointer (d->function) | d->line;
787 }
788
789 static int
790 eq_descriptor (const void *p1, const void *p2)
791 {
792 const struct loc_descriptor *const d = p1;
793 const struct loc_descriptor *const d2 = p2;
794
795 return (d->file == d2->file && d->line == d2->line
796 && d->function == d2->function);
797 }
798
799 /* Hashtable converting address of allocated field to loc descriptor. */
800 static htab_t ptr_hash;
801 struct ptr_hash_entry
802 {
803 void *ptr;
804 struct loc_descriptor *loc;
805 size_t size;
806 };
807
808 /* Hash table helpers functions. */
809 static hashval_t
810 hash_ptr (const void *p)
811 {
812 const struct ptr_hash_entry *const d = p;
813
814 return htab_hash_pointer (d->ptr);
815 }
816
817 static int
818 eq_ptr (const void *p1, const void *p2)
819 {
820 const struct ptr_hash_entry *const p = p1;
821
822 return (p->ptr == p2);
823 }
824
825 /* Return descriptor for given call site, create new one if needed. */
826 static struct loc_descriptor *
827 loc_descriptor (const char *name, int line, const char *function)
828 {
829 struct loc_descriptor loc;
830 struct loc_descriptor **slot;
831
832 loc.file = name;
833 loc.line = line;
834 loc.function = function;
835 if (!loc_hash)
836 loc_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL);
837
838 slot = (struct loc_descriptor **) htab_find_slot (loc_hash, &loc, 1);
839 if (*slot)
840 return *slot;
841 *slot = xcalloc (sizeof (**slot), 1);
842 (*slot)->file = name;
843 (*slot)->line = line;
844 (*slot)->function = function;
845 return *slot;
846 }
847
848 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
849 void
850 ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
851 const char *name, int line, const char *function)
852 {
853 struct loc_descriptor *loc = loc_descriptor (name, line, function);
854 struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry);
855 PTR *slot;
856
857 p->ptr = ptr;
858 p->loc = loc;
859 p->size = allocated + overhead;
860 if (!ptr_hash)
861 ptr_hash = htab_create (10, hash_ptr, eq_ptr, NULL);
862 slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), INSERT);
863 gcc_assert (!*slot);
864 *slot = p;
865
866 loc->times++;
867 loc->allocated+=allocated;
868 loc->overhead+=overhead;
869 }
870
871 /* Helper function for prune_overhead_list. See if SLOT is still marked and
872 remove it from hashtable if it is not. */
873 static int
874 ggc_prune_ptr (void **slot, void *b ATTRIBUTE_UNUSED)
875 {
876 struct ptr_hash_entry *p = *slot;
877 if (!ggc_marked_p (p->ptr))
878 {
879 p->loc->collected += p->size;
880 htab_clear_slot (ptr_hash, slot);
881 free (p);
882 }
883 return 1;
884 }
885
886 /* After live values has been marked, walk all recorded pointers and see if
887 they are still live. */
888 void
889 ggc_prune_overhead_list (void)
890 {
891 htab_traverse (ptr_hash, ggc_prune_ptr, NULL);
892 }
893
894 /* Notice that the pointer has been freed. */
895 void
896 ggc_free_overhead (void *ptr)
897 {
898 PTR *slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr),
899 NO_INSERT);
900 struct ptr_hash_entry *p = *slot;
901 p->loc->freed += p->size;
902 htab_clear_slot (ptr_hash, slot);
903 free (p);
904 }
905
906 /* Helper for qsort; sort descriptors by amount of memory consumed. */
907 static int
908 final_cmp_statistic (const void *loc1, const void *loc2)
909 {
910 struct loc_descriptor *l1 = *(struct loc_descriptor **) loc1;
911 struct loc_descriptor *l2 = *(struct loc_descriptor **) loc2;
912 long diff;
913 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
914 (l2->allocated + l2->overhead - l2->freed));
915 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
916 }
917
918 /* Helper for qsort; sort descriptors by amount of memory consumed. */
919 static int
920 cmp_statistic (const void *loc1, const void *loc2)
921 {
922 struct loc_descriptor *l1 = *(struct loc_descriptor **) loc1;
923 struct loc_descriptor *l2 = *(struct loc_descriptor **) loc2;
924 long diff;
925
926 diff = ((long)(l1->allocated + l1->overhead - l1->freed - l1->collected) -
927 (l2->allocated + l2->overhead - l2->freed - l2->collected));
928 if (diff)
929 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
930 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
931 (l2->allocated + l2->overhead - l2->freed));
932 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
933 }
934
935 /* Collect array of the descriptors from hashtable. */
936 struct loc_descriptor **loc_array;
937 static int
938 add_statistics (void **slot, void *b)
939 {
940 int *n = (int *)b;
941 loc_array[*n] = (struct loc_descriptor *) *slot;
942 (*n)++;
943 return 1;
944 }
945
946 /* Dump per-site memory statistics. */
947 #endif
948 void
949 dump_ggc_loc_statistics (bool final ATTRIBUTE_UNUSED)
950 {
951 #ifdef GATHER_STATISTICS
952 int nentries = 0;
953 char s[4096];
954 size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0;
955 int i;
956
957 ggc_force_collect = true;
958 ggc_collect ();
959
960 loc_array = xcalloc (sizeof (*loc_array), loc_hash->n_elements);
961 fprintf (stderr, "-------------------------------------------------------\n");
962 fprintf (stderr, "\n%-48s %10s %10s %10s %10s %10s\n",
963 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
964 fprintf (stderr, "-------------------------------------------------------\n");
965 htab_traverse (loc_hash, add_statistics, &nentries);
966 qsort (loc_array, nentries, sizeof (*loc_array),
967 final ? final_cmp_statistic : cmp_statistic);
968 for (i = 0; i < nentries; i++)
969 {
970 struct loc_descriptor *d = loc_array[i];
971 allocated += d->allocated;
972 times += d->times;
973 freed += d->freed;
974 collected += d->collected;
975 overhead += d->overhead;
976 }
977 for (i = 0; i < nentries; i++)
978 {
979 struct loc_descriptor *d = loc_array[i];
980 if (d->allocated)
981 {
982 const char *s1 = d->file;
983 const char *s2;
984 while ((s2 = strstr (s1, "gcc/")))
985 s1 = s2 + 4;
986 sprintf (s, "%s:%i (%s)", s1, d->line, d->function);
987 s[48] = 0;
988 fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s,
989 (long)d->collected,
990 (d->collected) * 100.0 / collected,
991 (long)d->freed,
992 (d->freed) * 100.0 / freed,
993 (long)(d->allocated + d->overhead - d->freed - d->collected),
994 (d->allocated + d->overhead - d->freed - d->collected) * 100.0
995 / (allocated + overhead - freed - collected),
996 (long)d->overhead,
997 d->overhead * 100.0 / overhead,
998 (long)d->times);
999 }
1000 }
1001 fprintf (stderr, "%-48s %10ld %10ld %10ld %10ld %10ld\n",
1002 "Total", (long)collected, (long)freed,
1003 (long)(allocated + overhead - freed - collected), (long)overhead,
1004 (long)times);
1005 fprintf (stderr, "%-48s %10s %10s %10s %10s %10s\n",
1006 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1007 fprintf (stderr, "-------------------------------------------------------\n");
1008 ggc_force_collect = false;
1009 #endif
1010 }