mf-runtime.c (__mfu_check): Respect ignore_reads configuration.
[gcc.git] / libmudflap / mf-runtime.c
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
6 adapted from libiberty.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
14
15 In addition to the permissions in the GNU General Public License, the
16 Free Software Foundation gives you unlimited permission to link the
17 compiled version of this file into combinations with other programs,
18 and to distribute those combinations without any restriction coming
19 from the use of this file. (The General Public License restrictions
20 do apply in other respects; for example, they cover modification of
21 the file, and distribution when not linked into a combine
22 executable.)
23
24 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
25 WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 for more details.
28
29 You should have received a copy of the GNU General Public License
30 along with GCC; see the file COPYING. If not, write to the Free
31 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
32 02111-1307, USA. */
33
34 #include "config.h"
35
36 /* These attempt to coax various unix flavours to declare all our
37 needed tidbits in the system headers. */
38 #if !defined(__FreeBSD__) && !defined(__APPLE__)
39 #define _POSIX_SOURCE
40 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
41 #define _GNU_SOURCE
42 #define _XOPEN_SOURCE
43 #define _BSD_TYPES
44 #define __EXTENSIONS__
45 #define _ALL_SOURCE
46 #define _LARGE_FILE_API
47 #define _XOPEN_SOURCE_EXTENDED 1
48
49 #include <stdio.h>
50 #include <stdlib.h>
51 #include <sys/types.h>
52 #include <sys/time.h>
53 #include <time.h>
54 #include <unistd.h>
55 #ifdef HAVE_EXECINFO_H
56 #include <execinfo.h>
57 #endif
58 #ifdef HAVE_SIGNAL_H
59 #include <signal.h>
60 #endif
61 #include <assert.h>
62
63 #include <string.h>
64 #include <limits.h>
65 #include <sys/types.h>
66 #include <signal.h>
67 #include <errno.h>
68 #include <ctype.h>
69
70 #include "mf-runtime.h"
71 #include "mf-impl.h"
72
73
74 /* ------------------------------------------------------------------------ */
75 /* Splay-tree implementation. */
76
77 typedef uintptr_t mfsplay_tree_key;
78 typedef void *mfsplay_tree_value;
79
80 /* Forward declaration for a node in the tree. */
81 typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
82
83 /* The type of a function used to iterate over the tree. */
84 typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
85
86 /* The nodes in the splay tree. */
87 struct mfsplay_tree_node_s
88 {
89 /* Data. */
90 mfsplay_tree_key key;
91 mfsplay_tree_value value;
92 /* Children. */
93 mfsplay_tree_node left;
94 mfsplay_tree_node right;
95 /* XXX: The addition of a parent pointer may eliminate some recursion. */
96 };
97
98 /* The splay tree itself. */
99 struct mfsplay_tree_s
100 {
101 /* The root of the tree. */
102 mfsplay_tree_node root;
103
104 /* The last key value for which the tree has been splayed, but not
105 since modified. */
106 mfsplay_tree_key last_splayed_key;
107 int last_splayed_key_p;
108
109 /* Statistics. */
110 unsigned num_keys;
111
112 /* Traversal recursion control flags. */
113 unsigned max_depth;
114 unsigned depth;
115 unsigned rebalance_p;
116 };
117 typedef struct mfsplay_tree_s *mfsplay_tree;
118
119 static mfsplay_tree mfsplay_tree_new (void);
120 static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
121 static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
122 static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
123 static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
124 static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
125 static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
126 static void mfsplay_tree_rebalance (mfsplay_tree sp);
127
128 /* ------------------------------------------------------------------------ */
129 /* Utility macros */
130
131 #define CTOR __attribute__ ((constructor))
132 #define DTOR __attribute__ ((destructor))
133
134
135 /* Codes to describe the context in which a violation occurs. */
136 #define __MF_VIOL_UNKNOWN 0
137 #define __MF_VIOL_READ 1
138 #define __MF_VIOL_WRITE 2
139 #define __MF_VIOL_REGISTER 3
140 #define __MF_VIOL_UNREGISTER 4
141 #define __MF_VIOL_WATCH 5
142
143 /* Protect against recursive calls. */
144 #define BEGIN_RECURSION_PROTECT() do { \
145 if (UNLIKELY (__mf_state == reentrant)) { \
146 write (2, "mf: erroneous reentrancy detected in `", 38); \
147 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
148 write (2, "'\n", 2); \
149 abort (); } \
150 __mf_state = reentrant; \
151 } while (0)
152
153 #define END_RECURSION_PROTECT() do { \
154 __mf_state = active; \
155 } while (0)
156
157
158
159 /* ------------------------------------------------------------------------ */
160 /* Required globals. */
161
162 #define LOOKUP_CACHE_MASK_DFL 1023
163 #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
164 #define LOOKUP_CACHE_SHIFT_DFL 2
165
166 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
167 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
168 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
169 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
170
171 struct __mf_options __mf_opts;
172
173 int __mf_starting_p = 1;
174 #ifndef LIBMUDFLAPTH
175 enum __mf_state_enum __mf_state = active;
176 #else
177 /* See __mf_state_perthread() in mf-hooks.c. */
178 #endif
179
180
181 #ifdef LIBMUDFLAPTH
182 pthread_mutex_t __mf_biglock =
183 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
184 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
185 #else
186 PTHREAD_MUTEX_INITIALIZER;
187 #endif
188 #endif
189
190 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
191 the libmudflap.la (no threading support) can diagnose whether
192 the application is linked with -lpthread. See __mf_usage() below. */
193 #if HAVE_PTHREAD_H
194 #ifdef _POSIX_THREADS
195 #pragma weak pthread_join
196 #else
197 #define pthread_join NULL
198 #endif
199 const void *threads_active_p = (void *) pthread_join;
200 #endif
201
202
203 /* ------------------------------------------------------------------------ */
204 /* stats-related globals. */
205
206 static unsigned long __mf_count_check;
207 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
208 static unsigned long __mf_count_register;
209 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
210 static unsigned long __mf_count_unregister;
211 static unsigned long __mf_total_unregister_size;
212 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
213 static unsigned long __mf_sigusr1_received;
214 static unsigned long __mf_sigusr1_handled;
215 /* not static */ unsigned long __mf_reentrancy;
216 #ifdef LIBMUDFLAPTH
217 /* not static */ unsigned long __mf_lock_contention;
218 #endif
219
220
221 /* ------------------------------------------------------------------------ */
222 /* mode-check-related globals. */
223
224 typedef struct __mf_object
225 {
226 uintptr_t low, high; /* __mf_register parameters */
227 const char *name;
228 char type; /* __MF_TYPE_something */
229 char watching_p; /* Trigger a VIOL_WATCH on access? */
230 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
231 unsigned write_count; /* Likewise for __mf_check/write. */
232 unsigned liveness; /* A measure of recent checking activity. */
233 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
234
235 uintptr_t alloc_pc;
236 struct timeval alloc_time;
237 char **alloc_backtrace;
238 size_t alloc_backtrace_size;
239 #ifdef LIBMUDFLAPTH
240 pthread_t alloc_thread;
241 #endif
242
243 int deallocated_p;
244 uintptr_t dealloc_pc;
245 struct timeval dealloc_time;
246 char **dealloc_backtrace;
247 size_t dealloc_backtrace_size;
248 #ifdef LIBMUDFLAPTH
249 pthread_t dealloc_thread;
250 #endif
251 } __mf_object_t;
252
253 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
254 /* Actually stored as static vars within lookup function below. */
255
256 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
257 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
258 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
259
260
261 /* ------------------------------------------------------------------------ */
262 /* Forward function declarations */
263
264 void __mf_init () CTOR;
265 static void __mf_sigusr1_respond ();
266 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
267 __mf_object_t **objs, unsigned max_objs);
268 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
269 __mf_object_t **objs, unsigned max_objs, int type);
270 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
271 __mf_object_t **objs, unsigned max_objs);
272 static void __mf_adapt_cache ();
273 static void __mf_describe_object (__mf_object_t *obj);
274 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
275 static mfsplay_tree __mf_object_tree (int type);
276 static void __mf_link_object (__mf_object_t *node);
277 static void __mf_unlink_object (__mf_object_t *node);
278
279
280 /* ------------------------------------------------------------------------ */
281 /* Configuration engine */
282
283 static void
284 __mf_set_default_options ()
285 {
286 memset (& __mf_opts, 0, sizeof (__mf_opts));
287
288 __mf_opts.adapt_cache = 1000003;
289 __mf_opts.abbreviate = 1;
290 __mf_opts.verbose_violations = 1;
291 __mf_opts.free_queue_length = 4;
292 __mf_opts.persistent_count = 100;
293 __mf_opts.crumple_zone = 32;
294 __mf_opts.backtrace = 4;
295 __mf_opts.timestamps = 1;
296 __mf_opts.mudflap_mode = mode_check;
297 __mf_opts.violation_mode = viol_nop;
298 __mf_opts.heur_std_data = 1;
299 #ifdef LIBMUDFLAPTH
300 __mf_opts.thread_stack = 0;
301 #endif
302 }
303
304 static struct option
305 {
306 char *name;
307 char *description;
308 enum
309 {
310 set_option,
311 read_integer_option,
312 } type;
313 unsigned value;
314 unsigned *target;
315 }
316 options [] =
317 {
318 {"mode-nop",
319 "mudflaps do nothing",
320 set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
321 {"mode-populate",
322 "mudflaps populate object tree",
323 set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
324 {"mode-check",
325 "mudflaps check for memory violations",
326 set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
327 {"mode-violate",
328 "mudflaps always cause violations (diagnostic)",
329 set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
330
331 {"viol-nop",
332 "violations do not change program execution",
333 set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
334 {"viol-abort",
335 "violations cause a call to abort()",
336 set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
337 {"viol-segv",
338 "violations are promoted to SIGSEGV signals",
339 set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
340 {"viol-gdb",
341 "violations fork a gdb process attached to current program",
342 set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
343 {"trace-calls",
344 "trace calls to mudflap runtime library",
345 set_option, 1, &__mf_opts.trace_mf_calls},
346 {"verbose-trace",
347 "trace internal events within mudflap runtime library",
348 set_option, 1, &__mf_opts.verbose_trace},
349 {"collect-stats",
350 "collect statistics on mudflap's operation",
351 set_option, 1, &__mf_opts.collect_stats},
352 #ifdef SIGUSR1
353 {"sigusr1-report",
354 "print report upon SIGUSR1",
355 set_option, 1, &__mf_opts.sigusr1_report},
356 #endif
357 {"internal-checking",
358 "perform more expensive internal checking",
359 set_option, 1, &__mf_opts.internal_checking},
360 {"print-leaks",
361 "print any memory leaks at program shutdown",
362 set_option, 1, &__mf_opts.print_leaks},
363 {"check-initialization",
364 "detect uninitialized object reads",
365 set_option, 1, &__mf_opts.check_initialization},
366 {"verbose-violations",
367 "print verbose messages when memory violations occur",
368 set_option, 1, &__mf_opts.verbose_violations},
369 {"abbreviate",
370 "abbreviate repetitive listings",
371 set_option, 1, &__mf_opts.abbreviate},
372 {"timestamps",
373 "track object lifetime timestamps",
374 set_option, 1, &__mf_opts.timestamps},
375 {"ignore-reads",
376 "ignore read accesses - assume okay",
377 set_option, 1, &__mf_opts.ignore_reads},
378 {"wipe-stack",
379 "wipe stack objects at unwind",
380 set_option, 1, &__mf_opts.wipe_stack},
381 {"wipe-heap",
382 "wipe heap objects at free",
383 set_option, 1, &__mf_opts.wipe_heap},
384 {"heur-proc-map",
385 "support /proc/self/map heuristics",
386 set_option, 1, &__mf_opts.heur_proc_map},
387 {"heur-stack-bound",
388 "enable a simple upper stack bound heuristic",
389 set_option, 1, &__mf_opts.heur_stack_bound},
390 {"heur-start-end",
391 "support _start.._end heuristics",
392 set_option, 1, &__mf_opts.heur_start_end},
393 {"heur-stdlib",
394 "register standard library data (argv, errno, stdin, ...)",
395 set_option, 1, &__mf_opts.heur_std_data},
396 {"free-queue-length",
397 "queue N deferred free() calls before performing them",
398 read_integer_option, 0, &__mf_opts.free_queue_length},
399 {"persistent-count",
400 "keep a history of N unregistered regions",
401 read_integer_option, 0, &__mf_opts.persistent_count},
402 {"crumple-zone",
403 "surround allocations with crumple zones of N bytes",
404 read_integer_option, 0, &__mf_opts.crumple_zone},
405 /* XXX: not type-safe.
406 {"lc-mask",
407 "set lookup cache size mask to N (2**M - 1)",
408 read_integer_option, 0, (int *)(&__mf_lc_mask)},
409 {"lc-shift",
410 "set lookup cache pointer shift",
411 read_integer_option, 0, (int *)(&__mf_lc_shift)},
412 */
413 {"lc-adapt",
414 "adapt mask/shift parameters after N cache misses",
415 read_integer_option, 1, &__mf_opts.adapt_cache},
416 {"backtrace",
417 "keep an N-level stack trace of each call context",
418 read_integer_option, 0, &__mf_opts.backtrace},
419 #ifdef LIBMUDFLAPTH
420 {"thread-stack",
421 "override thread stacks allocation: N kB",
422 read_integer_option, 0, &__mf_opts.thread_stack},
423 #endif
424 {0, 0, set_option, 0, NULL}
425 };
426
427 static void
428 __mf_usage ()
429 {
430 struct option *opt;
431
432 fprintf (stderr,
433 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
434 "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
435 "\n"
436 "The mudflap code can be controlled by an environment variable:\n"
437 "\n"
438 "$ export MUDFLAP_OPTIONS='<options>'\n"
439 "$ <mudflapped_program>\n"
440 "\n"
441 "where <options> is a space-separated list of \n"
442 "any of the following options. Use `-no-OPTION' to disable options.\n"
443 "\n",
444 #if HAVE_PTHREAD_H
445 (threads_active_p ? "multi-threaded " : "single-threaded "),
446 #else
447 "",
448 #endif
449 #if LIBMUDFLAPTH
450 "thread-aware "
451 #else
452 "thread-unaware "
453 #endif
454 );
455 /* XXX: The multi-threaded thread-unaware combination is bad. */
456
457 for (opt = options; opt->name; opt++)
458 {
459 int default_p = (opt->value == * opt->target);
460
461 switch (opt->type)
462 {
463 char buf[128];
464 case set_option:
465 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
466 if (default_p)
467 fprintf (stderr, " [active]\n");
468 else
469 fprintf (stderr, "\n");
470 break;
471 case read_integer_option:
472 strncpy (buf, opt->name, 128);
473 strncpy (buf + strlen (opt->name), "=N", 2);
474 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
475 fprintf (stderr, " [%d]\n", * opt->target);
476 break;
477 default: abort();
478 }
479 }
480
481 fprintf (stderr, "\n");
482 }
483
484
485 int
486 __mf_set_options (const char *optstr)
487 {
488 int rc;
489 LOCKTH ();
490 BEGIN_RECURSION_PROTECT ();
491 rc = __mfu_set_options (optstr);
492 /* XXX: It's not really that easy. A change to a bunch of parameters
493 can require updating auxiliary state or risk crashing:
494 free_queue_length, crumple_zone ... */
495 END_RECURSION_PROTECT ();
496 UNLOCKTH ();
497 return rc;
498 }
499
500
501 int
502 __mfu_set_options (const char *optstr)
503 {
504 struct option *opts = 0;
505 char *nxt = 0;
506 long tmp = 0;
507 int rc = 0;
508 const char *saved_optstr = optstr;
509
510 /* XXX: bounds-check for optstr! */
511
512 while (*optstr)
513 {
514 switch (*optstr) {
515 case ' ':
516 case '\t':
517 case '\n':
518 optstr++;
519 break;
520
521 case '-':
522 if (*optstr+1)
523 {
524 int negate = 0;
525 optstr++;
526
527 if (*optstr == '?' ||
528 strncmp (optstr, "help", 4) == 0)
529 {
530 /* Caller will print help and exit. */
531 return -1;
532 }
533
534 if (strncmp (optstr, "no-", 3) == 0)
535 {
536 negate = 1;
537 optstr = & optstr[3];
538 }
539
540 for (opts = options; opts->name; opts++)
541 {
542 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
543 {
544 optstr += strlen (opts->name);
545 assert (opts->target);
546 switch (opts->type)
547 {
548 case set_option:
549 if (negate)
550 *(opts->target) = 0;
551 else
552 *(opts->target) = opts->value;
553 break;
554 case read_integer_option:
555 if (! negate && (*optstr == '=' && *(optstr+1)))
556 {
557 optstr++;
558 tmp = strtol (optstr, &nxt, 10);
559 if ((optstr != nxt) && (tmp != LONG_MAX))
560 {
561 optstr = nxt;
562 *(opts->target) = (int)tmp;
563 }
564 }
565 else if (negate)
566 * opts->target = 0;
567 break;
568 }
569 }
570 }
571 }
572 break;
573
574 default:
575 fprintf (stderr,
576 "warning: unrecognized string '%s' in mudflap options\n",
577 optstr);
578 optstr += strlen (optstr);
579 rc = -1;
580 break;
581 }
582 }
583
584 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
585 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
586 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
587
588 /* Clear the lookup cache, in case the parameters got changed. */
589 /* XXX: race */
590 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
591 /* void slot 0 */
592 __mf_lookup_cache[0].low = MAXPTR;
593
594 TRACE ("set options from `%s'\n", saved_optstr);
595
596 /* Call this unconditionally, in case -sigusr1-report was toggled. */
597 __mf_sigusr1_respond ();
598
599 return rc;
600 }
601
602
603 #ifdef PIC
604
605 void
606 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
607 {
608 char *err;
609
610 assert (e);
611 if (e->pointer) return;
612
613 #if HAVE_DLVSYM
614 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
615 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
616 else
617 #endif
618 e->pointer = dlsym (RTLD_NEXT, e->name);
619
620 err = dlerror ();
621
622 if (err)
623 {
624 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
625 e->name, err);
626 abort ();
627 }
628 if (! e->pointer)
629 {
630 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
631 abort ();
632 }
633 }
634
635
636 static void
637 __mf_resolve_dynamics ()
638 {
639 int i;
640 for (i = 0; i < dyn_INITRESOLVE; i++)
641 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
642 }
643
644
645 /* NB: order must match enums in mf-impl.h */
646 struct __mf_dynamic_entry __mf_dynamic [] =
647 {
648 {NULL, "calloc", NULL},
649 {NULL, "free", NULL},
650 {NULL, "malloc", NULL},
651 {NULL, "mmap", NULL},
652 {NULL, "munmap", NULL},
653 {NULL, "realloc", NULL},
654 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
655 #ifdef LIBMUDFLAPTH
656 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
657 {NULL, "pthread_join", NULL},
658 {NULL, "pthread_exit", NULL}
659 #endif
660 };
661
662 #endif /* PIC */
663
664
665
666 /* ------------------------------------------------------------------------ */
667
668 /* Lookup & manage automatic initialization of the five or so splay trees. */
669 static mfsplay_tree
670 __mf_object_tree (int type)
671 {
672 static mfsplay_tree trees [__MF_TYPE_MAX+1];
673 assert (type >= 0 && type <= __MF_TYPE_MAX);
674 if (UNLIKELY (trees[type] == NULL))
675 trees[type] = mfsplay_tree_new ();
676 return trees[type];
677 }
678
679
680 /* not static */void
681 __mf_init ()
682 {
683 char *ov = 0;
684
685 /* Return if initialization has already been done. */
686 if (LIKELY (__mf_starting_p == 0))
687 return;
688
689 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
690 #ifdef PIC
691 __mf_resolve_dynamics ();
692 #endif
693 __mf_starting_p = 0;
694
695 __mf_set_default_options ();
696
697 ov = getenv ("MUDFLAP_OPTIONS");
698 if (ov)
699 {
700 int rc = __mfu_set_options (ov);
701 if (rc < 0)
702 {
703 __mf_usage ();
704 exit (1);
705 }
706 }
707
708 /* Initialize to a non-zero description epoch. */
709 __mf_describe_object (NULL);
710
711 #define REG_RESERVED(obj) \
712 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
713
714 REG_RESERVED (__mf_lookup_cache);
715 REG_RESERVED (__mf_lc_mask);
716 REG_RESERVED (__mf_lc_shift);
717 /* XXX: others of our statics? */
718
719 /* Prevent access to *NULL. */
720 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
721 __mf_lookup_cache[0].low = (uintptr_t) -1;
722 }
723
724
725
726 int
727 __wrap_main (int argc, char* argv[])
728 {
729 extern char **environ;
730 extern int main ();
731 extern int __real_main ();
732 static int been_here = 0;
733
734 if (__mf_opts.heur_std_data && ! been_here)
735 {
736 unsigned i;
737
738 been_here = 1;
739 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
740 for (i=0; i<argc; i++)
741 {
742 unsigned j = strlen (argv[i]);
743 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
744 }
745
746 for (i=0; ; i++)
747 {
748 char *e = environ[i];
749 unsigned j;
750 if (e == NULL) break;
751 j = strlen (environ[i]);
752 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
753 }
754 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
755
756 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
757
758 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
759 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
760 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
761
762 /* Make some effort to register ctype.h static arrays. */
763 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
764 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
765 with in mf-hooks2.c. */
766 }
767
768 #ifdef PIC
769 return main (argc, argv, environ);
770 #else
771 return __real_main (argc, argv, environ);
772 #endif
773 }
774
775
776
777 extern void __mf_fini () DTOR;
778 void __mf_fini ()
779 {
780 TRACE ("__mf_fini\n");
781 __mfu_report ();
782
783 #ifndef PIC
784 /* Since we didn't populate the tree for allocations in constructors
785 before __mf_init, we cannot check destructors after __mf_fini. */
786 __mf_opts.mudflap_mode = mode_nop;
787 #endif
788 }
789
790
791
792 /* ------------------------------------------------------------------------ */
793 /* __mf_check */
794
795 void __mf_check (void *ptr, size_t sz, int type, const char *location)
796 {
797 LOCKTH ();
798 BEGIN_RECURSION_PROTECT ();
799 __mfu_check (ptr, sz, type, location);
800 END_RECURSION_PROTECT ();
801 UNLOCKTH ();
802 }
803
804
805 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
806 {
807 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
808 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
809 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
810 uintptr_t ptr_low = (uintptr_t) ptr;
811 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
812 struct __mf_cache old_entry = *entry;
813
814 if (UNLIKELY (__mf_opts.sigusr1_report))
815 __mf_sigusr1_respond ();
816 if (UNLIKELY (__mf_opts.ignore_reads && type == 0))
817 return;
818
819 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
820 ptr, entry_idx, (unsigned long)sz,
821 (type == 0 ? "read" : "write"), location);
822
823 switch (__mf_opts.mudflap_mode)
824 {
825 case mode_nop:
826 /* It is tempting to poison the cache here similarly to
827 mode_populate. However that eliminates a valuable
828 distinction between these two modes. mode_nop is useful to
829 let a user count & trace every single check / registration
830 call. mode_populate is useful to let a program run fast
831 while unchecked.
832 */
833 judgement = 1;
834 break;
835
836 case mode_populate:
837 entry->low = ptr_low;
838 entry->high = ptr_high;
839 judgement = 1;
840 break;
841
842 case mode_check:
843 {
844 unsigned heuristics = 0;
845
846 /* Advance aging/adaptation counters. */
847 static unsigned adapt_count;
848 adapt_count ++;
849 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
850 adapt_count > __mf_opts.adapt_cache))
851 {
852 adapt_count = 0;
853 __mf_adapt_cache ();
854 }
855
856 /* Looping only occurs if heuristics were triggered. */
857 while (judgement == 0)
858 {
859 DECLARE (void, free, void *p);
860 __mf_object_t* ovr_obj[1];
861 unsigned obj_count;
862 __mf_object_t** all_ovr_obj = NULL;
863 __mf_object_t** dealloc_me = NULL;
864 unsigned i;
865
866 /* Find all overlapping objects. Be optimistic that there is just one. */
867 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
868 if (UNLIKELY (obj_count > 1))
869 {
870 /* Allocate a real buffer and do the search again. */
871 DECLARE (void *, malloc, size_t c);
872 unsigned n;
873 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
874 obj_count));
875 if (all_ovr_obj == NULL) abort ();
876 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
877 assert (n == obj_count);
878 dealloc_me = all_ovr_obj;
879 }
880 else
881 {
882 all_ovr_obj = ovr_obj;
883 dealloc_me = NULL;
884 }
885
886 /* Update object statistics. */
887 for (i = 0; i < obj_count; i++)
888 {
889 __mf_object_t *obj = all_ovr_obj[i];
890 assert (obj != NULL);
891 if (type == __MF_CHECK_READ)
892 obj->read_count ++;
893 else
894 obj->write_count ++;
895 obj->liveness ++;
896 }
897
898 /* Iterate over the various objects. There are a number of special cases. */
899 for (i = 0; i < obj_count; i++)
900 {
901 __mf_object_t *obj = all_ovr_obj[i];
902
903 /* Any __MF_TYPE_NOACCESS hit is bad. */
904 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
905 judgement = -1;
906
907 /* Any object with a watch flag is bad. */
908 if (UNLIKELY (obj->watching_p))
909 judgement = -2; /* trigger VIOL_WATCH */
910
911 /* A read from an uninitialized object is bad. */
912 if (UNLIKELY (__mf_opts.check_initialization
913 /* reading */
914 && type == __MF_CHECK_READ
915 /* not written */
916 && obj->write_count == 0
917 /* uninitialized (heap) */
918 && obj->type == __MF_TYPE_HEAP))
919 judgement = -1;
920 }
921
922 /* We now know that the access spans one or more only valid objects. */
923 if (LIKELY (judgement >= 0))
924 for (i = 0; i < obj_count; i++)
925 {
926 __mf_object_t *obj = all_ovr_obj[i];
927
928 /* Is this access entirely contained within this object? */
929 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
930 {
931 /* Valid access. */
932 entry->low = obj->low;
933 entry->high = obj->high;
934 judgement = 1;
935 }
936 }
937
938 /* This access runs off the end of one valid object. That
939 could be okay, if other valid objects fill in all the
940 holes. We allow this only for HEAP and GUESS type
941 objects. Accesses to STATIC and STACK variables
942 should not be allowed to span. */
943 if (UNLIKELY ((judgement == 0) && (obj_count > 1)))
944 {
945 unsigned uncovered = 0;
946 for (i = 0; i < obj_count; i++)
947 {
948 __mf_object_t *obj = all_ovr_obj[i];
949 int j, uncovered_low_p, uncovered_high_p;
950 uintptr_t ptr_lower, ptr_higher;
951
952 uncovered_low_p = ptr_low < obj->low;
953 ptr_lower = CLAMPSUB (obj->low, 1);
954 uncovered_high_p = ptr_high > obj->high;
955 ptr_higher = CLAMPADD (obj->high, 1);
956
957 for (j = 0; j < obj_count; j++)
958 {
959 __mf_object_t *obj2 = all_ovr_obj[j];
960
961 if (i == j) continue;
962
963 /* Filter out objects that cannot be spanned across. */
964 if (obj2->type == __MF_TYPE_STACK
965 || obj2->type == __MF_TYPE_STATIC)
966 continue;
967
968 /* Consider a side "covered" if obj2 includes
969 the next byte on that side. */
970 if (uncovered_low_p
971 && (ptr_lower >= obj2->low && ptr_lower <= obj2->high))
972 uncovered_low_p = 0;
973 if (uncovered_high_p
974 && (ptr_high >= obj2->low && ptr_higher <= obj2->high))
975 uncovered_high_p = 0;
976 }
977
978 if (uncovered_low_p || uncovered_high_p)
979 uncovered ++;
980 }
981
982 /* Success if no overlapping objects are uncovered. */
983 if (uncovered == 0)
984 judgement = 1;
985 }
986
987
988 if (dealloc_me != NULL)
989 CALL_REAL (free, dealloc_me);
990
991 /* If the judgment is still unknown at this stage, loop
992 around at most one more time. */
993 if (judgement == 0)
994 {
995 if (heuristics++ < 2) /* XXX parametrize this number? */
996 judgement = __mf_heuristic_check (ptr_low, ptr_high);
997 else
998 judgement = -1;
999 }
1000 }
1001
1002 }
1003 break;
1004
1005 case mode_violate:
1006 judgement = -1;
1007 break;
1008 }
1009
1010 if (__mf_opts.collect_stats)
1011 {
1012 __mf_count_check ++;
1013
1014 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
1015 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1016 __mf_lookup_cache_reusecount [entry_idx] ++;
1017 }
1018
1019 if (UNLIKELY (judgement < 0))
1020 __mf_violation (ptr, sz,
1021 (uintptr_t) __builtin_return_address (0), location,
1022 ((judgement == -1) ?
1023 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
1024 __MF_VIOL_WATCH));
1025 }
1026
1027
1028 static __mf_object_t *
1029 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
1030 const char *name, uintptr_t pc)
1031 {
1032 DECLARE (void *, calloc, size_t c, size_t n);
1033
1034 __mf_object_t *new_obj;
1035 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
1036 new_obj->low = low;
1037 new_obj->high = high;
1038 new_obj->type = type;
1039 new_obj->name = name;
1040 new_obj->alloc_pc = pc;
1041 #if HAVE_GETTIMEOFDAY
1042 if (__mf_opts.timestamps)
1043 gettimeofday (& new_obj->alloc_time, NULL);
1044 #endif
1045 #if LIBMUDFLAPTH
1046 new_obj->alloc_thread = pthread_self ();
1047 #endif
1048
1049 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1050 new_obj->alloc_backtrace_size =
1051 __mf_backtrace (& new_obj->alloc_backtrace,
1052 (void *) pc, 2);
1053
1054 __mf_link_object (new_obj);
1055 return new_obj;
1056 }
1057
1058
1059 static void
1060 __mf_uncache_object (__mf_object_t *old_obj)
1061 {
1062 /* Remove any low/high pointers for this object from the lookup cache. */
1063
1064 /* Can it possibly exist in the cache? */
1065 if (LIKELY (old_obj->read_count + old_obj->write_count))
1066 {
1067 uintptr_t low = old_obj->low;
1068 uintptr_t high = old_obj->high;
1069 unsigned idx_low = __MF_CACHE_INDEX (low);
1070 unsigned idx_high = __MF_CACHE_INDEX (high);
1071 unsigned i;
1072 for (i = idx_low; i <= idx_high; i++)
1073 {
1074 struct __mf_cache *entry = & __mf_lookup_cache [i];
1075 /* NB: the "||" in the following test permits this code to
1076 tolerate the situation introduced by __mf_check over
1077 contiguous objects, where a cache entry spans several
1078 objects. */
1079 if (entry->low == low || entry->high == high)
1080 {
1081 entry->low = MAXPTR;
1082 entry->high = MINPTR;
1083 }
1084 }
1085 }
1086 }
1087
1088
1089 void
1090 __mf_register (void *ptr, size_t sz, int type, const char *name)
1091 {
1092 LOCKTH ();
1093 BEGIN_RECURSION_PROTECT ();
1094 __mfu_register (ptr, sz, type, name);
1095 END_RECURSION_PROTECT ();
1096 UNLOCKTH ();
1097 }
1098
1099
1100 void
1101 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1102 {
1103 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1104 ptr, (unsigned long) sz, type, name ? name : "");
1105
1106 if (__mf_opts.collect_stats)
1107 {
1108 __mf_count_register ++;
1109 __mf_total_register_size [(type < 0) ? 0 :
1110 (type > __MF_TYPE_MAX) ? 0 :
1111 type] += sz;
1112 }
1113
1114 if (UNLIKELY (__mf_opts.sigusr1_report))
1115 __mf_sigusr1_respond ();
1116
1117 switch (__mf_opts.mudflap_mode)
1118 {
1119 case mode_nop:
1120 break;
1121
1122 case mode_violate:
1123 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1124 __MF_VIOL_REGISTER);
1125 break;
1126
1127 case mode_populate:
1128 /* Clear the cache. */
1129 /* XXX: why the entire cache? */
1130 /* XXX: race */
1131 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1132 /* void slot 0 */
1133 __mf_lookup_cache[0].low = MAXPTR;
1134 break;
1135
1136 case mode_check:
1137 {
1138 __mf_object_t *ovr_objs [1];
1139 unsigned num_overlapping_objs;
1140 uintptr_t low = (uintptr_t) ptr;
1141 uintptr_t high = CLAMPSZ (ptr, sz);
1142 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1143
1144 /* Treat unknown size indication as 1. */
1145 if (UNLIKELY (sz == 0)) sz = 1;
1146
1147 /* Look for objects only of the same type. This will e.g. permit a registration
1148 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1149 __mf_check time however harmful overlaps will be detected. */
1150 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1151
1152 /* Handle overlaps. */
1153 if (UNLIKELY (num_overlapping_objs > 0))
1154 {
1155 __mf_object_t *ovr_obj = ovr_objs[0];
1156
1157 /* Accept certain specific duplication pairs. */
1158 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1159 && ovr_obj->low == low
1160 && ovr_obj->high == high
1161 && ovr_obj->type == type)
1162 {
1163 /* Duplicate registration for static objects may come
1164 from distinct compilation units. */
1165 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1166 (void *) low, (void *) high,
1167 (ovr_obj->name ? ovr_obj->name : ""));
1168 break;
1169 }
1170
1171 /* Alas, a genuine violation. */
1172 else
1173 {
1174 /* Two or more *real* mappings here. */
1175 __mf_violation ((void *) ptr, sz,
1176 (uintptr_t) __builtin_return_address (0), NULL,
1177 __MF_VIOL_REGISTER);
1178 }
1179 }
1180 else /* No overlapping objects: AOK. */
1181 __mf_insert_new_object (low, high, type, name, pc);
1182
1183 /* We could conceivably call __mf_check() here to prime the cache,
1184 but then the read_count/write_count field is not reliable. */
1185 break;
1186 }
1187 } /* end switch (__mf_opts.mudflap_mode) */
1188 }
1189
1190
1191 void
1192 __mf_unregister (void *ptr, size_t sz, int type)
1193 {
1194 LOCKTH ();
1195 BEGIN_RECURSION_PROTECT ();
1196 __mfu_unregister (ptr, sz, type);
1197 END_RECURSION_PROTECT ();
1198 UNLOCKTH ();
1199 }
1200
1201
1202 void
1203 __mfu_unregister (void *ptr, size_t sz, int type)
1204 {
1205 DECLARE (void, free, void *ptr);
1206
1207 if (UNLIKELY (__mf_opts.sigusr1_report))
1208 __mf_sigusr1_respond ();
1209
1210 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1211
1212 switch (__mf_opts.mudflap_mode)
1213 {
1214 case mode_nop:
1215 break;
1216
1217 case mode_violate:
1218 __mf_violation (ptr, sz,
1219 (uintptr_t) __builtin_return_address (0), NULL,
1220 __MF_VIOL_UNREGISTER);
1221 break;
1222
1223 case mode_populate:
1224 /* Clear the cache. */
1225 /* XXX: race */
1226 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1227 /* void slot 0 */
1228 __mf_lookup_cache[0].low = MAXPTR;
1229 break;
1230
1231 case mode_check:
1232 {
1233 __mf_object_t *old_obj = NULL;
1234 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1235 __mf_object_t *objs[1] = {NULL};
1236 unsigned num_overlapping_objs;
1237
1238 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1239 CLAMPSZ (ptr, sz), objs, 1, type);
1240
1241 /* Special case for HEAP_I - see free & realloc hook. They don't
1242 know whether the input region was HEAP or HEAP_I before
1243 unmapping it. Here we give HEAP a try in case HEAP_I
1244 failed. */
1245 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1246 {
1247 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1248 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1249 }
1250
1251 old_obj = objs[0];
1252 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1253 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1254 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1255 {
1256 __mf_violation (ptr, sz,
1257 (uintptr_t) __builtin_return_address (0), NULL,
1258 __MF_VIOL_UNREGISTER);
1259 break;
1260 }
1261
1262 __mf_unlink_object (old_obj);
1263 __mf_uncache_object (old_obj);
1264
1265 /* Wipe buffer contents if desired. */
1266 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1267 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1268 || old_obj->type == __MF_TYPE_HEAP_I)))
1269 {
1270 memset ((void *) old_obj->low,
1271 0,
1272 (size_t) (old_obj->high - old_obj->low + 1));
1273 }
1274
1275 /* Manage the object cemetary. */
1276 if (__mf_opts.persistent_count > 0 &&
1277 old_obj->type >= 0 &&
1278 old_obj->type <= __MF_TYPE_MAX_CEM)
1279 {
1280 old_obj->deallocated_p = 1;
1281 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1282 #if HAVE_GETTIMEOFDAY
1283 if (__mf_opts.timestamps)
1284 gettimeofday (& old_obj->dealloc_time, NULL);
1285 #endif
1286 #ifdef LIBMUDFLAPTH
1287 old_obj->dealloc_thread = pthread_self ();
1288 #endif
1289
1290 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1291 old_obj->dealloc_backtrace_size =
1292 __mf_backtrace (& old_obj->dealloc_backtrace,
1293 NULL, 2);
1294
1295 /* Encourage this object to be displayed again in current epoch. */
1296 old_obj->description_epoch --;
1297
1298 /* Put this object into the cemetary. This may require this plot to
1299 be recycled, and the previous resident to be designated del_obj. */
1300 {
1301 unsigned row = old_obj->type;
1302 unsigned plot = __mf_object_dead_head [row];
1303
1304 del_obj = __mf_object_cemetary [row][plot];
1305 __mf_object_cemetary [row][plot] = old_obj;
1306 plot ++;
1307 if (plot == __mf_opts.persistent_count) plot = 0;
1308 __mf_object_dead_head [row] = plot;
1309 }
1310 }
1311 else
1312 del_obj = old_obj;
1313
1314 if (__mf_opts.print_leaks)
1315 {
1316 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1317 (old_obj->type == __MF_TYPE_HEAP
1318 || old_obj->type == __MF_TYPE_HEAP_I))
1319 {
1320 fprintf (stderr,
1321 "*******\n"
1322 "mudflap warning: unaccessed registered object:\n");
1323 __mf_describe_object (old_obj);
1324 }
1325 }
1326
1327 if (del_obj != NULL) /* May or may not equal old_obj. */
1328 {
1329 if (__mf_opts.backtrace > 0)
1330 {
1331 CALL_REAL(free, del_obj->alloc_backtrace);
1332 if (__mf_opts.persistent_count > 0)
1333 {
1334 CALL_REAL(free, del_obj->dealloc_backtrace);
1335 }
1336 }
1337 CALL_REAL(free, del_obj);
1338 }
1339
1340 break;
1341 }
1342 } /* end switch (__mf_opts.mudflap_mode) */
1343
1344
1345 if (__mf_opts.collect_stats)
1346 {
1347 __mf_count_unregister ++;
1348 __mf_total_unregister_size += sz;
1349 }
1350 }
1351
1352
1353
1354 struct tree_stats
1355 {
1356 unsigned obj_count;
1357 unsigned long total_size;
1358 unsigned live_obj_count;
1359 double total_weight;
1360 double weighted_size;
1361 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1362 };
1363
1364
1365
1366 static int
1367 __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1368 {
1369 __mf_object_t *obj = (__mf_object_t *) n->value;
1370 struct tree_stats *s = (struct tree_stats *) param;
1371
1372 assert (obj != NULL && s != NULL);
1373
1374 /* Exclude never-accessed objects. */
1375 if (obj->read_count + obj->write_count)
1376 {
1377 s->obj_count ++;
1378 s->total_size += (obj->high - obj->low + 1);
1379
1380 if (obj->liveness)
1381 {
1382 unsigned i;
1383 uintptr_t addr;
1384
1385 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1386 (void *) obj->low, obj->liveness, obj->name); */
1387
1388 s->live_obj_count ++;
1389 s->total_weight += (double) obj->liveness;
1390 s->weighted_size +=
1391 (double) (obj->high - obj->low + 1) *
1392 (double) obj->liveness;
1393
1394 addr = obj->low;
1395 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1396 {
1397 unsigned bit = addr & 1;
1398 s->weighted_address_bits[i][bit] += obj->liveness;
1399 addr = addr >> 1;
1400 }
1401
1402 /* Age the liveness value. */
1403 obj->liveness >>= 1;
1404 }
1405 }
1406
1407 return 0;
1408 }
1409
1410
1411 static void
1412 __mf_adapt_cache ()
1413 {
1414 struct tree_stats s;
1415 uintptr_t new_mask = 0;
1416 unsigned char new_shift;
1417 float cache_utilization;
1418 float max_value;
1419 static float smoothed_new_shift = -1.0;
1420 unsigned i;
1421
1422 memset (&s, 0, sizeof (s));
1423
1424 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1425 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1426 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1427 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1428 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1429
1430 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1431 empty tree. Just leave the cache alone in such cases, rather
1432 than risk dying by division-by-zero. */
1433 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1434 return;
1435
1436 /* Guess a good value for the shift parameter by finding an address bit that is a
1437 good discriminant of lively objects. */
1438 max_value = 0.0;
1439 for (i=0; i<sizeof (uintptr_t)*8; i++)
1440 {
1441 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1442 if (max_value < value) max_value = value;
1443 }
1444 for (i=0; i<sizeof (uintptr_t)*8; i++)
1445 {
1446 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1447 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1448 if (value >= max_value * shoulder_factor)
1449 break;
1450 }
1451 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1452 /* Converge toward this slowly to reduce flapping. */
1453 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1454 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1455 assert (new_shift < sizeof (uintptr_t)*8);
1456
1457 /* Count number of used buckets. */
1458 cache_utilization = 0.0;
1459 for (i = 0; i < (1 + __mf_lc_mask); i++)
1460 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1461 cache_utilization += 1.0;
1462 cache_utilization /= (1 + __mf_lc_mask);
1463
1464 new_mask |= 0xffff; /* XXX: force a large cache. */
1465 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1466
1467 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1468 "util=%u%% m=%p s=%u\n",
1469 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1470 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1471
1472 /* We should reinitialize cache if its parameters have changed. */
1473 if (new_mask != __mf_lc_mask ||
1474 new_shift != __mf_lc_shift)
1475 {
1476 __mf_lc_mask = new_mask;
1477 __mf_lc_shift = new_shift;
1478 /* XXX: race */
1479 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1480 /* void slot 0 */
1481 __mf_lookup_cache[0].low = MAXPTR;
1482 }
1483 }
1484
1485
1486
1487 /* __mf_find_object[s] */
1488
1489 /* Find overlapping live objecs between [low,high]. Return up to
1490 max_objs of their pointers in objs[]. Return total count of
1491 overlaps (may exceed max_objs). */
1492
1493 unsigned
1494 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1495 __mf_object_t **objs, unsigned max_objs, int type)
1496 {
1497 unsigned count = 0;
1498 mfsplay_tree t = __mf_object_tree (type);
1499 mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1500 int direction;
1501
1502 mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1503 /* An exact match for base address implies a hit. */
1504 if (n != NULL)
1505 {
1506 if (count < max_objs)
1507 objs[count] = (__mf_object_t *) n->value;
1508 count ++;
1509 }
1510
1511 /* Iterate left then right near this key value to find all overlapping objects. */
1512 for (direction = 0; direction < 2; direction ++)
1513 {
1514 /* Reset search origin. */
1515 k = (mfsplay_tree_key) ptr_low;
1516
1517 while (1)
1518 {
1519 __mf_object_t *obj;
1520
1521 n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1522 if (n == NULL) break;
1523 obj = (__mf_object_t *) n->value;
1524
1525 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1526 break;
1527
1528 if (count < max_objs)
1529 objs[count] = (__mf_object_t *) n->value;
1530 count ++;
1531
1532 k = (mfsplay_tree_key) obj->low;
1533 }
1534 }
1535
1536 return count;
1537 }
1538
1539
1540 unsigned
1541 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1542 __mf_object_t **objs, unsigned max_objs)
1543 {
1544 int type;
1545 unsigned count = 0;
1546
1547 /* Search each splay tree for overlaps. */
1548 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1549 {
1550 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1551 if (c > max_objs)
1552 {
1553 max_objs = 0;
1554 objs = NULL;
1555 }
1556 else /* NB: C may equal 0 */
1557 {
1558 max_objs -= c;
1559 objs += c;
1560 }
1561 count += c;
1562 }
1563
1564 return count;
1565 }
1566
1567
1568
1569 /* __mf_link_object */
1570
1571 static void
1572 __mf_link_object (__mf_object_t *node)
1573 {
1574 mfsplay_tree t = __mf_object_tree (node->type);
1575 mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1576 }
1577
1578 /* __mf_unlink_object */
1579
1580 static void
1581 __mf_unlink_object (__mf_object_t *node)
1582 {
1583 mfsplay_tree t = __mf_object_tree (node->type);
1584 mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1585 }
1586
1587 /* __mf_find_dead_objects */
1588
1589 /* Find overlapping dead objecs between [low,high]. Return up to
1590 max_objs of their pointers in objs[]. Return total count of
1591 overlaps (may exceed max_objs). */
1592
1593 static unsigned
1594 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1595 __mf_object_t **objs, unsigned max_objs)
1596 {
1597 if (__mf_opts.persistent_count > 0)
1598 {
1599 unsigned count = 0;
1600 unsigned recollection = 0;
1601 unsigned row = 0;
1602
1603 assert (low <= high);
1604 assert (max_objs == 0 || objs != NULL);
1605
1606 /* Widen the search from the most recent plots in each row, looking
1607 backward in time. */
1608 recollection = 0;
1609 while (recollection < __mf_opts.persistent_count)
1610 {
1611 count = 0;
1612
1613 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1614 {
1615 unsigned plot;
1616 unsigned i;
1617
1618 plot = __mf_object_dead_head [row];
1619 for (i = 0; i <= recollection; i ++)
1620 {
1621 __mf_object_t *obj;
1622
1623 /* Look backward through row: it's a circular buffer. */
1624 if (plot > 0) plot --;
1625 else plot = __mf_opts.persistent_count - 1;
1626
1627 obj = __mf_object_cemetary [row][plot];
1628 if (obj && obj->low <= high && obj->high >= low)
1629 {
1630 /* Found an overlapping dead object! */
1631 if (count < max_objs)
1632 objs [count] = obj;
1633 count ++;
1634 }
1635 }
1636 }
1637
1638 if (count)
1639 break;
1640
1641 /* Look farther back in time. */
1642 recollection = (recollection * 2) + 1;
1643 }
1644
1645 return count;
1646 } else {
1647 return 0;
1648 }
1649 }
1650
1651 /* __mf_describe_object */
1652
1653 static void
1654 __mf_describe_object (__mf_object_t *obj)
1655 {
1656 static unsigned epoch = 0;
1657 if (obj == NULL)
1658 {
1659 epoch ++;
1660 return;
1661 }
1662
1663 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1664 {
1665 fprintf (stderr,
1666 "mudflap %sobject %p: name=`%s'\n",
1667 (obj->deallocated_p ? "dead " : ""),
1668 (void *) obj, (obj->name ? obj->name : ""));
1669 return;
1670 }
1671 else
1672 obj->description_epoch = epoch;
1673
1674 fprintf (stderr,
1675 "mudflap %sobject %p: name=`%s'\n"
1676 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1677 "alloc time=%lu.%06lu pc=%p"
1678 #ifdef LIBMUDFLAPTH
1679 " thread=%u"
1680 #endif
1681 "\n",
1682 (obj->deallocated_p ? "dead " : ""),
1683 (void *) obj, (obj->name ? obj->name : ""),
1684 (void *) obj->low, (void *) obj->high,
1685 (unsigned long) (obj->high - obj->low + 1),
1686 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1687 obj->type == __MF_TYPE_HEAP ? "heap" :
1688 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1689 obj->type == __MF_TYPE_STACK ? "stack" :
1690 obj->type == __MF_TYPE_STATIC ? "static" :
1691 obj->type == __MF_TYPE_GUESS ? "guess" :
1692 "unknown"),
1693 obj->read_count, obj->write_count, obj->liveness,
1694 obj->watching_p ? " watching" : "",
1695 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1696 (void *) obj->alloc_pc
1697 #ifdef LIBMUDFLAPTH
1698 , (unsigned) obj->alloc_thread
1699 #endif
1700 );
1701
1702 if (__mf_opts.backtrace > 0)
1703 {
1704 unsigned i;
1705 for (i=0; i<obj->alloc_backtrace_size; i++)
1706 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1707 }
1708
1709 if (__mf_opts.persistent_count > 0)
1710 {
1711 if (obj->deallocated_p)
1712 {
1713 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1714 #ifdef LIBMUDFLAPTH
1715 " thread=%u"
1716 #endif
1717 "\n",
1718 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1719 (void *) obj->dealloc_pc
1720 #ifdef LIBMUDFLAPTH
1721 , (unsigned) obj->dealloc_thread
1722 #endif
1723 );
1724
1725
1726 if (__mf_opts.backtrace > 0)
1727 {
1728 unsigned i;
1729 for (i=0; i<obj->dealloc_backtrace_size; i++)
1730 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1731 }
1732 }
1733 }
1734 }
1735
1736
1737 static int
1738 __mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1739 {
1740 __mf_object_t *node = (__mf_object_t *) n->value;
1741 unsigned *count = (unsigned *) param;
1742
1743 if (count != NULL)
1744 (*count) ++;
1745
1746 fprintf (stderr, "Leaked object %u:\n", (*count));
1747 __mf_describe_object (node);
1748
1749 return 0;
1750 }
1751
1752
1753 static unsigned
1754 __mf_report_leaks ()
1755 {
1756 unsigned count = 0;
1757
1758 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1759 __mf_report_leaks_fn, & count);
1760 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1761 __mf_report_leaks_fn, & count);
1762
1763 return count;
1764 }
1765
1766 /* ------------------------------------------------------------------------ */
1767 /* __mf_report */
1768
1769 void
1770 __mf_report ()
1771 {
1772 LOCKTH ();
1773 BEGIN_RECURSION_PROTECT ();
1774 __mfu_report ();
1775 END_RECURSION_PROTECT ();
1776 UNLOCKTH ();
1777 }
1778
1779 void
1780 __mfu_report ()
1781 {
1782 if (__mf_opts.collect_stats)
1783 {
1784 fprintf (stderr,
1785 "*******\n"
1786 "mudflap stats:\n"
1787 "calls to __mf_check: %lu\n"
1788 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1789 " __mf_unregister: %lu [%luB]\n"
1790 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1791 __mf_count_check,
1792 __mf_count_register,
1793 __mf_total_register_size[0], __mf_total_register_size[1],
1794 __mf_total_register_size[2], __mf_total_register_size[3],
1795 __mf_total_register_size[4], /* XXX */
1796 __mf_count_unregister, __mf_total_unregister_size,
1797 __mf_count_violation[0], __mf_count_violation[1],
1798 __mf_count_violation[2], __mf_count_violation[3],
1799 __mf_count_violation[4]);
1800
1801 fprintf (stderr,
1802 "calls with reentrancy: %lu\n", __mf_reentrancy);
1803 #ifdef LIBMUDFLAPTH
1804 fprintf (stderr,
1805 " lock contention: %lu\n", __mf_lock_contention);
1806 #endif
1807
1808 /* Lookup cache stats. */
1809 {
1810 unsigned i;
1811 unsigned max_reuse = 0;
1812 unsigned num_used = 0;
1813 unsigned num_unused = 0;
1814
1815 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1816 {
1817 if (__mf_lookup_cache_reusecount[i])
1818 num_used ++;
1819 else
1820 num_unused ++;
1821 if (max_reuse < __mf_lookup_cache_reusecount[i])
1822 max_reuse = __mf_lookup_cache_reusecount[i];
1823 }
1824 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1825 num_used, num_unused, max_reuse);
1826 }
1827
1828 {
1829 unsigned live_count;
1830 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1831 fprintf (stderr, "number of live objects: %u\n", live_count);
1832 }
1833
1834 if (__mf_opts.persistent_count > 0)
1835 {
1836 unsigned dead_count = 0;
1837 unsigned row, plot;
1838 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1839 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1840 if (__mf_object_cemetary [row][plot] != 0)
1841 dead_count ++;
1842 fprintf (stderr, " zombie objects: %u\n", dead_count);
1843 }
1844 }
1845 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1846 {
1847 unsigned l;
1848 extern void * __mf_wrap_alloca_indirect (size_t c);
1849
1850 /* Free up any remaining alloca()'d blocks. */
1851 __mf_wrap_alloca_indirect (0);
1852 __mf_describe_object (NULL); /* Reset description epoch. */
1853 l = __mf_report_leaks ();
1854 fprintf (stderr, "number of leaked objects: %u\n", l);
1855 }
1856 }
1857
1858 /* __mf_backtrace */
1859
1860 size_t
1861 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1862 {
1863 void ** pc_array;
1864 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1865 unsigned remaining_size;
1866 unsigned omitted_size = 0;
1867 unsigned i;
1868 DECLARE (void, free, void *ptr);
1869 DECLARE (void *, calloc, size_t c, size_t n);
1870 DECLARE (void *, malloc, size_t n);
1871
1872 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1873 #ifdef HAVE_BACKTRACE
1874 pc_array_size = backtrace (pc_array, pc_array_size);
1875 #else
1876 #define FETCH(n) do { if (pc_array_size >= n) { \
1877 pc_array[n] = __builtin_return_address(n); \
1878 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1879
1880 /* Unroll some calls __builtin_return_address because this function
1881 only takes a literal integer parameter. */
1882 FETCH (0);
1883 #if 0
1884 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1885 rather than simply returning 0. :-( */
1886 FETCH (1);
1887 FETCH (2);
1888 FETCH (3);
1889 FETCH (4);
1890 FETCH (5);
1891 FETCH (6);
1892 FETCH (7);
1893 FETCH (8);
1894 if (pc_array_size > 8) pc_array_size = 9;
1895 #else
1896 if (pc_array_size > 0) pc_array_size = 1;
1897 #endif
1898
1899 #undef FETCH
1900 #endif
1901
1902 /* We want to trim the first few levels of the stack traceback,
1903 since they contain libmudflap wrappers and junk. If pc_array[]
1904 ends up containing a non-NULL guess_pc, then trim everything
1905 before that. Otherwise, omit the first guess_omit_levels
1906 entries. */
1907
1908 if (guess_pc != NULL)
1909 for (i=0; i<pc_array_size; i++)
1910 if (pc_array [i] == guess_pc)
1911 omitted_size = i;
1912
1913 if (omitted_size == 0) /* No match? */
1914 if (pc_array_size > guess_omit_levels)
1915 omitted_size = guess_omit_levels;
1916
1917 remaining_size = pc_array_size - omitted_size;
1918
1919 #ifdef HAVE_BACKTRACE_SYMBOLS
1920 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1921 #else
1922 {
1923 /* Let's construct a buffer by hand. It will have <remaining_size>
1924 char*'s at the front, pointing at individual strings immediately
1925 afterwards. */
1926 void *buffer;
1927 char *chars;
1928 char **pointers;
1929 enum { perline = 30 };
1930 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1931 pointers = (char **) buffer;
1932 chars = (char *)buffer + (remaining_size * sizeof (char *));
1933 for (i = 0; i < remaining_size; i++)
1934 {
1935 pointers[i] = chars;
1936 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1937 chars = chars + perline;
1938 }
1939 *symbols = pointers;
1940 }
1941 #endif
1942 CALL_REAL (free, pc_array);
1943
1944 return remaining_size;
1945 }
1946
1947 /* ------------------------------------------------------------------------ */
1948 /* __mf_violation */
1949
1950 void
1951 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1952 const char *location, int type)
1953 {
1954 char buf [128];
1955 static unsigned violation_number;
1956 DECLARE(void, free, void *ptr);
1957
1958 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1959 (void *) pc,
1960 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1961
1962 if (__mf_opts.collect_stats)
1963 __mf_count_violation [(type < 0) ? 0 :
1964 (type > __MF_VIOL_WATCH) ? 0 :
1965 type] ++;
1966
1967 /* Print out a basic warning message. */
1968 if (__mf_opts.verbose_violations)
1969 {
1970 unsigned dead_p;
1971 unsigned num_helpful = 0;
1972 struct timeval now = { 0, 0 };
1973 #if HAVE_GETTIMEOFDAY
1974 gettimeofday (& now, NULL);
1975 #endif
1976
1977 violation_number ++;
1978 fprintf (stderr,
1979 "*******\n"
1980 "mudflap violation %u (%s): time=%lu.%06lu "
1981 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1982 violation_number,
1983 ((type == __MF_VIOL_READ) ? "check/read" :
1984 (type == __MF_VIOL_WRITE) ? "check/write" :
1985 (type == __MF_VIOL_REGISTER) ? "register" :
1986 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1987 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1988 now.tv_sec, now.tv_usec,
1989 (void *) ptr, (unsigned long)sz, (void *) pc,
1990 (location != NULL ? " location=`" : ""),
1991 (location != NULL ? location : ""),
1992 (location != NULL ? "'" : ""));
1993
1994 if (__mf_opts.backtrace > 0)
1995 {
1996 char ** symbols;
1997 unsigned i, num;
1998
1999 num = __mf_backtrace (& symbols, (void *) pc, 2);
2000 /* Note: backtrace_symbols calls malloc(). But since we're in
2001 __mf_violation and presumably __mf_check, it'll detect
2002 recursion, and not put the new string into the database. */
2003
2004 for (i=0; i<num; i++)
2005 fprintf (stderr, " %s\n", symbols[i]);
2006
2007 /* Calling free() here would trigger a violation. */
2008 CALL_REAL(free, symbols);
2009 }
2010
2011
2012 /* Look for nearby objects. For this, we start with s_low/s_high
2013 pointing to the given area, looking for overlapping objects.
2014 If none show up, widen the search area and keep looking. */
2015
2016 if (sz == 0) sz = 1;
2017
2018 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2019 {
2020 enum {max_objs = 3}; /* magic */
2021 __mf_object_t *objs[max_objs];
2022 unsigned num_objs = 0;
2023 uintptr_t s_low, s_high;
2024 unsigned tries = 0;
2025 unsigned i;
2026
2027 s_low = (uintptr_t) ptr;
2028 s_high = CLAMPSZ (ptr, sz);
2029
2030 while (tries < 16) /* magic */
2031 {
2032 if (dead_p)
2033 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2034 else
2035 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2036
2037 if (num_objs) /* good enough */
2038 break;
2039
2040 tries ++;
2041
2042 /* XXX: tune this search strategy. It's too dependent on
2043 sz, which can vary from 1 to very big (when array index
2044 checking) numbers. */
2045 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2046 s_high = CLAMPADD (s_high, (sz * tries * tries));
2047 }
2048
2049 for (i = 0; i < min (num_objs, max_objs); i++)
2050 {
2051 __mf_object_t *obj = objs[i];
2052 uintptr_t low = (uintptr_t) ptr;
2053 uintptr_t high = CLAMPSZ (ptr, sz);
2054 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2055 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2056 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2057 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2058 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2059 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2060
2061 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2062 num_helpful + i + 1,
2063 (before1 ? before1 : after1 ? after1 : into1),
2064 (before1 ? "before" : after1 ? "after" : "into"),
2065 (before2 ? before2 : after2 ? after2 : into2),
2066 (before2 ? "before" : after2 ? "after" : "into"));
2067 __mf_describe_object (obj);
2068 }
2069 num_helpful += num_objs;
2070 }
2071
2072 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2073 }
2074
2075 /* How to finally handle this violation? */
2076 switch (__mf_opts.violation_mode)
2077 {
2078 case viol_nop:
2079 break;
2080 case viol_segv:
2081 kill (getpid(), SIGSEGV);
2082 break;
2083 case viol_abort:
2084 abort ();
2085 break;
2086 case viol_gdb:
2087
2088 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2089 system (buf);
2090 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2091 instead, and let the forked child execlp() gdb. That way, this
2092 subject process can be resumed under the supervision of gdb.
2093 This can't happen now, since system() only returns when gdb
2094 dies. In that case, we need to beware of starting a second
2095 concurrent gdb child upon the next violation. (But if the first
2096 gdb dies, then starting a new one is appropriate.) */
2097 break;
2098 }
2099 }
2100
2101 /* ------------------------------------------------------------------------ */
2102
2103
2104 unsigned __mf_watch (void *ptr, size_t sz)
2105 {
2106 unsigned rc;
2107 LOCKTH ();
2108 BEGIN_RECURSION_PROTECT ();
2109 rc = __mf_watch_or_not (ptr, sz, 1);
2110 END_RECURSION_PROTECT ();
2111 UNLOCKTH ();
2112 return rc;
2113 }
2114
2115 unsigned __mf_unwatch (void *ptr, size_t sz)
2116 {
2117 unsigned rc;
2118 LOCKTH ();
2119 rc = __mf_watch_or_not (ptr, sz, 0);
2120 UNLOCKTH ();
2121 return rc;
2122 }
2123
2124
2125 static unsigned
2126 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2127 {
2128 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2129 uintptr_t ptr_low = (uintptr_t) ptr;
2130 unsigned count = 0;
2131
2132 TRACE ("%s ptr=%p size=%lu\n",
2133 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2134
2135 switch (__mf_opts.mudflap_mode)
2136 {
2137 case mode_nop:
2138 case mode_populate:
2139 case mode_violate:
2140 count = 0;
2141 break;
2142
2143 case mode_check:
2144 {
2145 __mf_object_t **all_ovr_objs;
2146 unsigned obj_count;
2147 unsigned n;
2148 DECLARE (void *, malloc, size_t c);
2149 DECLARE (void, free, void *p);
2150
2151 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2152 VERBOSE_TRACE (" %u:", obj_count);
2153
2154 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2155 if (all_ovr_objs == NULL) abort ();
2156 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2157 assert (n == obj_count);
2158
2159 for (n = 0; n < obj_count; n ++)
2160 {
2161 __mf_object_t *obj = all_ovr_objs[n];
2162
2163 VERBOSE_TRACE (" [%p]", (void *) obj);
2164 if (obj->watching_p != flag)
2165 {
2166 obj->watching_p = flag;
2167 count ++;
2168
2169 /* Remove object from cache, to ensure next access
2170 goes through __mf_check(). */
2171 if (flag)
2172 __mf_uncache_object (obj);
2173 }
2174 }
2175 CALL_REAL (free, all_ovr_objs);
2176 }
2177 break;
2178 }
2179
2180 return count;
2181 }
2182
2183
2184 void
2185 __mf_sigusr1_handler (int num)
2186 {
2187 __mf_sigusr1_received ++;
2188 }
2189
2190 /* Install or remove SIGUSR1 handler as necessary.
2191 Also, respond to a received pending SIGUSR1. */
2192 void
2193 __mf_sigusr1_respond ()
2194 {
2195 static int handler_installed;
2196
2197 #ifdef SIGUSR1
2198 /* Manage handler */
2199 if (__mf_opts.sigusr1_report && ! handler_installed)
2200 {
2201 signal (SIGUSR1, __mf_sigusr1_handler);
2202 handler_installed = 1;
2203 }
2204 else if(! __mf_opts.sigusr1_report && handler_installed)
2205 {
2206 signal (SIGUSR1, SIG_DFL);
2207 handler_installed = 0;
2208 }
2209 #endif
2210
2211 /* Manage enqueued signals */
2212 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2213 {
2214 __mf_sigusr1_handled ++;
2215 assert (__mf_state == reentrant);
2216 __mfu_report ();
2217 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2218 }
2219 }
2220
2221
2222 /* XXX: provide an alternative __assert_fail function that cannot
2223 fail due to libmudflap infinite recursion. */
2224 #ifndef NDEBUG
2225
2226 static void
2227 write_itoa (int fd, unsigned n)
2228 {
2229 enum x { bufsize = sizeof(n)*4 };
2230 char buf [bufsize];
2231 unsigned i;
2232
2233 for (i=0; i<bufsize-1; i++)
2234 {
2235 unsigned digit = n % 10;
2236 buf[bufsize-2-i] = digit + '0';
2237 n /= 10;
2238 if (n == 0)
2239 {
2240 char *m = & buf [bufsize-2-i];
2241 buf[bufsize-1] = '\0';
2242 write (fd, m, strlen(m));
2243 break;
2244 }
2245 }
2246 }
2247
2248
2249 void
2250 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2251 {
2252 #define write2(string) write (2, (string), strlen ((string)));
2253 write2("mf");
2254 #ifdef LIBMUDFLAPTH
2255 write2("(");
2256 write_itoa (2, (unsigned) pthread_self ());
2257 write2(")");
2258 #endif
2259 write2(": assertion failure: `");
2260 write (2, msg, strlen (msg));
2261 write2("' in ");
2262 write (2, func, strlen (func));
2263 write2(" at ");
2264 write (2, file, strlen (file));
2265 write2(":");
2266 write_itoa (2, line);
2267 write2("\n");
2268 #undef write2
2269 abort ();
2270 }
2271
2272
2273 #endif
2274
2275
2276
2277 /* Adapted splay tree code, originally from libiberty. It has been
2278 specialized for libmudflap as requested by RMS. */
2279
2280 static void
2281 mfsplay_tree_free (void *p)
2282 {
2283 DECLARE (void, free, void *p);
2284 CALL_REAL (free, p);
2285 }
2286
2287 static void *
2288 mfsplay_tree_xmalloc (size_t s)
2289 {
2290 DECLARE (void *, malloc, size_t s);
2291 return CALL_REAL (malloc, s);
2292 }
2293
2294
2295 static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2296 static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2297 mfsplay_tree_key,
2298 mfsplay_tree_node *,
2299 mfsplay_tree_node *,
2300 mfsplay_tree_node *);
2301
2302
2303 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2304 and grandparent, respectively, of NODE. */
2305
2306 static mfsplay_tree_node
2307 mfsplay_tree_splay_helper (mfsplay_tree sp,
2308 mfsplay_tree_key key,
2309 mfsplay_tree_node * node,
2310 mfsplay_tree_node * parent,
2311 mfsplay_tree_node * grandparent)
2312 {
2313 mfsplay_tree_node *next;
2314 mfsplay_tree_node n;
2315 int comparison;
2316
2317 n = *node;
2318
2319 if (!n)
2320 return *parent;
2321
2322 comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2323
2324 if (comparison == 0)
2325 /* We've found the target. */
2326 next = 0;
2327 else if (comparison < 0)
2328 /* The target is to the left. */
2329 next = &n->left;
2330 else
2331 /* The target is to the right. */
2332 next = &n->right;
2333
2334 if (next)
2335 {
2336 /* Check whether our recursion depth is too high. Abort this search,
2337 and signal that a rebalance is required to continue. */
2338 if (sp->depth > sp->max_depth)
2339 {
2340 sp->rebalance_p = 1;
2341 return n;
2342 }
2343
2344 /* Continue down the tree. */
2345 sp->depth ++;
2346 n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2347 sp->depth --;
2348
2349 /* The recursive call will change the place to which NODE
2350 points. */
2351 if (*node != n || sp->rebalance_p)
2352 return n;
2353 }
2354
2355 if (!parent)
2356 /* NODE is the root. We are done. */
2357 return n;
2358
2359 /* First, handle the case where there is no grandparent (i.e.,
2360 *PARENT is the root of the tree.) */
2361 if (!grandparent)
2362 {
2363 if (n == (*parent)->left)
2364 {
2365 *node = n->right;
2366 n->right = *parent;
2367 }
2368 else
2369 {
2370 *node = n->left;
2371 n->left = *parent;
2372 }
2373 *parent = n;
2374 return n;
2375 }
2376
2377 /* Next handle the cases where both N and *PARENT are left children,
2378 or where both are right children. */
2379 if (n == (*parent)->left && *parent == (*grandparent)->left)
2380 {
2381 mfsplay_tree_node p = *parent;
2382
2383 (*grandparent)->left = p->right;
2384 p->right = *grandparent;
2385 p->left = n->right;
2386 n->right = p;
2387 *grandparent = n;
2388 return n;
2389 }
2390 else if (n == (*parent)->right && *parent == (*grandparent)->right)
2391 {
2392 mfsplay_tree_node p = *parent;
2393
2394 (*grandparent)->right = p->left;
2395 p->left = *grandparent;
2396 p->right = n->left;
2397 n->left = p;
2398 *grandparent = n;
2399 return n;
2400 }
2401
2402 /* Finally, deal with the case where N is a left child, but *PARENT
2403 is a right child, or vice versa. */
2404 if (n == (*parent)->left)
2405 {
2406 (*parent)->left = n->right;
2407 n->right = *parent;
2408 (*grandparent)->right = n->left;
2409 n->left = *grandparent;
2410 *grandparent = n;
2411 return n;
2412 }
2413 else
2414 {
2415 (*parent)->right = n->left;
2416 n->left = *parent;
2417 (*grandparent)->left = n->right;
2418 n->right = *grandparent;
2419 *grandparent = n;
2420 return n;
2421 }
2422 }
2423
2424
2425
2426 static int
2427 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2428 {
2429 mfsplay_tree_node **p = array_ptr;
2430 *(*p) = n;
2431 (*p)++;
2432 return 0;
2433 }
2434
2435
2436 static mfsplay_tree_node
2437 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2438 unsigned high)
2439 {
2440 unsigned middle = low + (high - low) / 2;
2441 mfsplay_tree_node n = array[middle];
2442
2443 /* Note that since we're producing a balanced binary tree, it is not a problem
2444 that this function is recursive. */
2445 if (low + 1 <= middle)
2446 n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2447 else
2448 n->left = NULL;
2449
2450 if (middle + 1 <= high)
2451 n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2452 else
2453 n->right = NULL;
2454
2455 return n;
2456 }
2457
2458
2459 /* Rebalance the entire tree. Do this by copying all the node
2460 pointers into an array, then cleverly re-linking them. */
2461 static void
2462 mfsplay_tree_rebalance (mfsplay_tree sp)
2463 {
2464 mfsplay_tree_node *all_nodes, *all_nodes_1;
2465
2466 if (sp->num_keys <= 2)
2467 return;
2468
2469 all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2470
2471 /* Traverse all nodes to copy their addresses into this array. */
2472 all_nodes_1 = all_nodes;
2473 mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2474 (void *) &all_nodes_1);
2475
2476 /* Relink all the nodes. */
2477 sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2478
2479 mfsplay_tree_free (all_nodes);
2480 }
2481
2482
2483 /* Splay SP around KEY. */
2484 static void
2485 mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2486 {
2487 if (sp->root == 0)
2488 return;
2489
2490 /* If we just splayed the tree with the same key, do nothing. */
2491 if (sp->last_splayed_key_p &&
2492 (sp->last_splayed_key == key))
2493 return;
2494
2495 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2496 The idea is to limit excessive stack usage if we're facing
2497 degenerate access patterns. Unfortunately such patterns can occur
2498 e.g. during static initialization, where many static objects might
2499 be registered in increasing address sequence, or during a case where
2500 large tree-like heap data structures are allocated quickly.
2501
2502 On x86, this corresponds to roughly 200K of stack usage.
2503 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2504 sp->max_depth = 2500;
2505 sp->rebalance_p = sp->depth = 0;
2506
2507 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2508 if (sp->rebalance_p)
2509 {
2510 mfsplay_tree_rebalance (sp);
2511
2512 sp->rebalance_p = sp->depth = 0;
2513 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2514
2515 if (sp->rebalance_p)
2516 abort ();
2517 }
2518
2519
2520 /* Cache this splay key. */
2521 sp->last_splayed_key = key;
2522 sp->last_splayed_key_p = 1;
2523 }
2524
2525
2526
2527 /* Allocate a new splay tree. */
2528 static mfsplay_tree
2529 mfsplay_tree_new ()
2530 {
2531 mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2532 sp->root = NULL;
2533 sp->last_splayed_key_p = 0;
2534 sp->num_keys = 0;
2535
2536 return sp;
2537 }
2538
2539
2540
2541 /* Insert a new node (associating KEY with DATA) into SP. If a
2542 previous node with the indicated KEY exists, its data is replaced
2543 with the new value. Returns the new node. */
2544 static mfsplay_tree_node
2545 mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2546 {
2547 int comparison = 0;
2548
2549 mfsplay_tree_splay (sp, key);
2550
2551 if (sp->root)
2552 comparison = ((sp->root->key > key) ? 1 :
2553 ((sp->root->key < key) ? -1 : 0));
2554
2555 if (sp->root && comparison == 0)
2556 {
2557 /* If the root of the tree already has the indicated KEY, just
2558 replace the value with VALUE. */
2559 sp->root->value = value;
2560 }
2561 else
2562 {
2563 /* Create a new node, and insert it at the root. */
2564 mfsplay_tree_node node;
2565
2566 node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2567 node->key = key;
2568 node->value = value;
2569 sp->num_keys++;
2570 if (!sp->root)
2571 node->left = node->right = 0;
2572 else if (comparison < 0)
2573 {
2574 node->left = sp->root;
2575 node->right = node->left->right;
2576 node->left->right = 0;
2577 }
2578 else
2579 {
2580 node->right = sp->root;
2581 node->left = node->right->left;
2582 node->right->left = 0;
2583 }
2584
2585 sp->root = node;
2586 sp->last_splayed_key_p = 0;
2587 }
2588
2589 return sp->root;
2590 }
2591
2592 /* Remove KEY from SP. It is not an error if it did not exist. */
2593
2594 static void
2595 mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2596 {
2597 mfsplay_tree_splay (sp, key);
2598 sp->last_splayed_key_p = 0;
2599 if (sp->root && (sp->root->key == key))
2600 {
2601 mfsplay_tree_node left, right;
2602 left = sp->root->left;
2603 right = sp->root->right;
2604 /* Delete the root node itself. */
2605 mfsplay_tree_free (sp->root);
2606 sp->num_keys--;
2607 /* One of the children is now the root. Doesn't matter much
2608 which, so long as we preserve the properties of the tree. */
2609 if (left)
2610 {
2611 sp->root = left;
2612 /* If there was a right child as well, hang it off the
2613 right-most leaf of the left child. */
2614 if (right)
2615 {
2616 while (left->right)
2617 left = left->right;
2618 left->right = right;
2619 }
2620 }
2621 else
2622 sp->root = right;
2623 }
2624 }
2625
2626 /* Lookup KEY in SP, returning VALUE if present, and NULL
2627 otherwise. */
2628
2629 static mfsplay_tree_node
2630 mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2631 {
2632 mfsplay_tree_splay (sp, key);
2633 if (sp->root && (sp->root->key == key))
2634 return sp->root;
2635 else
2636 return 0;
2637 }
2638
2639
2640 /* Return the immediate predecessor KEY, or NULL if there is no
2641 predecessor. KEY need not be present in the tree. */
2642
2643 static mfsplay_tree_node
2644 mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2645 {
2646 int comparison;
2647 mfsplay_tree_node node;
2648 /* If the tree is empty, there is certainly no predecessor. */
2649 if (!sp->root)
2650 return NULL;
2651 /* Splay the tree around KEY. That will leave either the KEY
2652 itself, its predecessor, or its successor at the root. */
2653 mfsplay_tree_splay (sp, key);
2654 comparison = ((sp->root->key > key) ? 1 :
2655 ((sp->root->key < key) ? -1 : 0));
2656
2657 /* If the predecessor is at the root, just return it. */
2658 if (comparison < 0)
2659 return sp->root;
2660 /* Otherwise, find the rightmost element of the left subtree. */
2661 node = sp->root->left;
2662 if (node)
2663 while (node->right)
2664 node = node->right;
2665 return node;
2666 }
2667
2668 /* Return the immediate successor KEY, or NULL if there is no
2669 successor. KEY need not be present in the tree. */
2670
2671 static mfsplay_tree_node
2672 mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2673 {
2674 int comparison;
2675 mfsplay_tree_node node;
2676 /* If the tree is empty, there is certainly no successor. */
2677 if (!sp->root)
2678 return NULL;
2679 /* Splay the tree around KEY. That will leave either the KEY
2680 itself, its predecessor, or its successor at the root. */
2681 mfsplay_tree_splay (sp, key);
2682 comparison = ((sp->root->key > key) ? 1 :
2683 ((sp->root->key < key) ? -1 : 0));
2684 /* If the successor is at the root, just return it. */
2685 if (comparison > 0)
2686 return sp->root;
2687 /* Otherwise, find the leftmost element of the right subtree. */
2688 node = sp->root->right;
2689 if (node)
2690 while (node->left)
2691 node = node->left;
2692 return node;
2693 }
2694
2695 /* Call FN, passing it the DATA, for every node in SP, following an
2696 in-order traversal. If FN every returns a non-zero value, the
2697 iteration ceases immediately, and the value is returned.
2698 Otherwise, this function returns 0.
2699
2700 This function simulates recursion using dynamically allocated
2701 arrays, since it may be called from mfsplay_tree_rebalance(), which
2702 in turn means that the tree is already uncomfortably deep for stack
2703 space limits. */
2704 static int
2705 mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2706 {
2707 mfsplay_tree_node *stack1;
2708 char *stack2;
2709 unsigned sp;
2710 int val = 0;
2711 enum s { s_left, s_here, s_right, s_up };
2712
2713 if (st->root == NULL) /* => num_keys == 0 */
2714 return 0;
2715
2716 stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2717 stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2718
2719 sp = 0;
2720 stack1 [sp] = st->root;
2721 stack2 [sp] = s_left;
2722
2723 while (1)
2724 {
2725 mfsplay_tree_node n;
2726 enum s s;
2727
2728 n = stack1 [sp];
2729 s = stack2 [sp];
2730
2731 /* Handle each of the four possible states separately. */
2732
2733 /* 1: We're here to traverse the left subtree (if any). */
2734 if (s == s_left)
2735 {
2736 stack2 [sp] = s_here;
2737 if (n->left != NULL)
2738 {
2739 sp ++;
2740 stack1 [sp] = n->left;
2741 stack2 [sp] = s_left;
2742 }
2743 }
2744
2745 /* 2: We're here to traverse this node. */
2746 else if (s == s_here)
2747 {
2748 stack2 [sp] = s_right;
2749 val = (*fn) (n, data);
2750 if (val) break;
2751 }
2752
2753 /* 3: We're here to traverse the right subtree (if any). */
2754 else if (s == s_right)
2755 {
2756 stack2 [sp] = s_up;
2757 if (n->right != NULL)
2758 {
2759 sp ++;
2760 stack1 [sp] = n->right;
2761 stack2 [sp] = s_left;
2762 }
2763 }
2764
2765 /* 4: We're here after both subtrees (if any) have been traversed. */
2766 else if (s == s_up)
2767 {
2768 /* Pop the stack. */
2769 if (sp == 0) break; /* Popping off the root note: we're finished! */
2770 sp --;
2771 }
2772
2773 else
2774 abort ();
2775 }
2776
2777 mfsplay_tree_free (stack1);
2778 mfsplay_tree_free (stack2);
2779 return val;
2780 }