configure.ac (__libc_freeres): Look for it.
[gcc.git] / libmudflap / mf-runtime.c
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
6 adapted from libiberty.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
14
15 In addition to the permissions in the GNU General Public License, the
16 Free Software Foundation gives you unlimited permission to link the
17 compiled version of this file into combinations with other programs,
18 and to distribute those combinations without any restriction coming
19 from the use of this file. (The General Public License restrictions
20 do apply in other respects; for example, they cover modification of
21 the file, and distribution when not linked into a combine
22 executable.)
23
24 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
25 WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 for more details.
28
29 You should have received a copy of the GNU General Public License
30 along with GCC; see the file COPYING. If not, write to the Free
31 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
32 02110-1301, USA. */
33
34 #include "config.h"
35
36 /* These attempt to coax various unix flavours to declare all our
37 needed tidbits in the system headers. */
38 #if !defined(__FreeBSD__) && !defined(__APPLE__)
39 #define _POSIX_SOURCE
40 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
41 #define _GNU_SOURCE
42 #define _XOPEN_SOURCE
43 #define _BSD_TYPES
44 #define __EXTENSIONS__
45 #define _ALL_SOURCE
46 #define _LARGE_FILE_API
47 #define _XOPEN_SOURCE_EXTENDED 1
48
49 #include <stdio.h>
50 #include <stdlib.h>
51 #include <sys/types.h>
52 #include <sys/time.h>
53 #include <time.h>
54 #include <unistd.h>
55 #ifdef HAVE_EXECINFO_H
56 #include <execinfo.h>
57 #endif
58 #ifdef HAVE_SIGNAL_H
59 #include <signal.h>
60 #endif
61 #include <assert.h>
62
63 #include <string.h>
64 #include <limits.h>
65 #include <sys/types.h>
66 #include <signal.h>
67 #include <errno.h>
68 #include <ctype.h>
69
70 #include "mf-runtime.h"
71 #include "mf-impl.h"
72
73
74 /* ------------------------------------------------------------------------ */
75 /* Splay-tree implementation. */
76
77 typedef uintptr_t mfsplay_tree_key;
78 typedef void *mfsplay_tree_value;
79
80 /* Forward declaration for a node in the tree. */
81 typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
82
83 /* The type of a function used to iterate over the tree. */
84 typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
85
86 /* The nodes in the splay tree. */
87 struct mfsplay_tree_node_s
88 {
89 /* Data. */
90 mfsplay_tree_key key;
91 mfsplay_tree_value value;
92 /* Children. */
93 mfsplay_tree_node left;
94 mfsplay_tree_node right;
95 /* XXX: The addition of a parent pointer may eliminate some recursion. */
96 };
97
98 /* The splay tree itself. */
99 struct mfsplay_tree_s
100 {
101 /* The root of the tree. */
102 mfsplay_tree_node root;
103
104 /* The last key value for which the tree has been splayed, but not
105 since modified. */
106 mfsplay_tree_key last_splayed_key;
107 int last_splayed_key_p;
108
109 /* Statistics. */
110 unsigned num_keys;
111
112 /* Traversal recursion control flags. */
113 unsigned max_depth;
114 unsigned depth;
115 unsigned rebalance_p;
116 };
117 typedef struct mfsplay_tree_s *mfsplay_tree;
118
119 static mfsplay_tree mfsplay_tree_new (void);
120 static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
121 static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
122 static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
123 static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
124 static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
125 static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
126 static void mfsplay_tree_rebalance (mfsplay_tree sp);
127
128 /* ------------------------------------------------------------------------ */
129 /* Utility macros */
130
131 #define CTOR __attribute__ ((constructor))
132 #define DTOR __attribute__ ((destructor))
133
134
135 /* Codes to describe the context in which a violation occurs. */
136 #define __MF_VIOL_UNKNOWN 0
137 #define __MF_VIOL_READ 1
138 #define __MF_VIOL_WRITE 2
139 #define __MF_VIOL_REGISTER 3
140 #define __MF_VIOL_UNREGISTER 4
141 #define __MF_VIOL_WATCH 5
142
143 /* Protect against recursive calls. */
144
145 static void
146 begin_recursion_protect1 (const char *pf)
147 {
148 if (__mf_get_state () == reentrant)
149 {
150 write (2, "mf: erroneous reentrancy detected in `", 38);
151 write (2, pf, strlen(pf));
152 write (2, "'\n", 2); \
153 abort ();
154 }
155 __mf_set_state (reentrant);
156 }
157
158 #define BEGIN_RECURSION_PROTECT() \
159 begin_recursion_protect1 (__PRETTY_FUNCTION__)
160
161 #define END_RECURSION_PROTECT() \
162 __mf_set_state (active)
163
164 /* ------------------------------------------------------------------------ */
165 /* Required globals. */
166
167 #define LOOKUP_CACHE_MASK_DFL 1023
168 #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
169 #define LOOKUP_CACHE_SHIFT_DFL 2
170
171 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
172 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
173 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
174 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
175
176 struct __mf_options __mf_opts;
177 int __mf_starting_p = 1;
178
179 #ifdef LIBMUDFLAPTH
180 #ifdef HAVE_TLS
181 __thread enum __mf_state_enum __mf_state_1 = reentrant;
182 #endif
183 #else
184 enum __mf_state_enum __mf_state_1 = reentrant;
185 #endif
186
187 #ifdef LIBMUDFLAPTH
188 pthread_mutex_t __mf_biglock =
189 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
190 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
191 #else
192 PTHREAD_MUTEX_INITIALIZER;
193 #endif
194 #endif
195
196 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
197 the libmudflap.la (no threading support) can diagnose whether
198 the application is linked with -lpthread. See __mf_usage() below. */
199 #if HAVE_PTHREAD_H
200 #ifdef _POSIX_THREADS
201 #pragma weak pthread_join
202 #else
203 #define pthread_join NULL
204 #endif
205 #endif
206
207
208 /* ------------------------------------------------------------------------ */
209 /* stats-related globals. */
210
211 static unsigned long __mf_count_check;
212 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
213 static unsigned long __mf_count_register;
214 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
215 static unsigned long __mf_count_unregister;
216 static unsigned long __mf_total_unregister_size;
217 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
218 static unsigned long __mf_sigusr1_received;
219 static unsigned long __mf_sigusr1_handled;
220 /* not static */ unsigned long __mf_reentrancy;
221 #ifdef LIBMUDFLAPTH
222 /* not static */ unsigned long __mf_lock_contention;
223 #endif
224
225
226 /* ------------------------------------------------------------------------ */
227 /* mode-check-related globals. */
228
229 typedef struct __mf_object
230 {
231 uintptr_t low, high; /* __mf_register parameters */
232 const char *name;
233 char type; /* __MF_TYPE_something */
234 char watching_p; /* Trigger a VIOL_WATCH on access? */
235 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
236 unsigned write_count; /* Likewise for __mf_check/write. */
237 unsigned liveness; /* A measure of recent checking activity. */
238 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
239
240 uintptr_t alloc_pc;
241 struct timeval alloc_time;
242 char **alloc_backtrace;
243 size_t alloc_backtrace_size;
244 #ifdef LIBMUDFLAPTH
245 pthread_t alloc_thread;
246 #endif
247
248 int deallocated_p;
249 uintptr_t dealloc_pc;
250 struct timeval dealloc_time;
251 char **dealloc_backtrace;
252 size_t dealloc_backtrace_size;
253 #ifdef LIBMUDFLAPTH
254 pthread_t dealloc_thread;
255 #endif
256 } __mf_object_t;
257
258 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
259 /* Actually stored as static vars within lookup function below. */
260
261 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
262 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
263 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
264
265
266 /* ------------------------------------------------------------------------ */
267 /* Forward function declarations */
268
269 void __mf_init () CTOR;
270 static void __mf_sigusr1_respond ();
271 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
272 __mf_object_t **objs, unsigned max_objs);
273 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
274 __mf_object_t **objs, unsigned max_objs, int type);
275 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
276 __mf_object_t **objs, unsigned max_objs);
277 static void __mf_adapt_cache ();
278 static void __mf_describe_object (__mf_object_t *obj);
279 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
280 static mfsplay_tree __mf_object_tree (int type);
281 static void __mf_link_object (__mf_object_t *node);
282 static void __mf_unlink_object (__mf_object_t *node);
283
284
285 /* ------------------------------------------------------------------------ */
286 /* Configuration engine */
287
288 static void
289 __mf_set_default_options ()
290 {
291 memset (& __mf_opts, 0, sizeof (__mf_opts));
292
293 __mf_opts.adapt_cache = 1000003;
294 __mf_opts.abbreviate = 1;
295 __mf_opts.verbose_violations = 1;
296 __mf_opts.free_queue_length = 4;
297 __mf_opts.persistent_count = 100;
298 __mf_opts.crumple_zone = 32;
299 __mf_opts.backtrace = 4;
300 __mf_opts.timestamps = 1;
301 __mf_opts.mudflap_mode = mode_check;
302 __mf_opts.violation_mode = viol_nop;
303 #ifdef HAVE___LIBC_FREERES
304 __mf_opts.call_libc_freeres = 1;
305 #endif
306 __mf_opts.heur_std_data = 1;
307 #ifdef LIBMUDFLAPTH
308 __mf_opts.thread_stack = 0;
309 #endif
310 }
311
312 static struct option
313 {
314 char *name;
315 char *description;
316 enum
317 {
318 set_option,
319 read_integer_option,
320 } type;
321 unsigned value;
322 unsigned *target;
323 }
324 options [] =
325 {
326 {"mode-nop",
327 "mudflaps do nothing",
328 set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
329 {"mode-populate",
330 "mudflaps populate object tree",
331 set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
332 {"mode-check",
333 "mudflaps check for memory violations",
334 set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
335 {"mode-violate",
336 "mudflaps always cause violations (diagnostic)",
337 set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
338
339 {"viol-nop",
340 "violations do not change program execution",
341 set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
342 {"viol-abort",
343 "violations cause a call to abort()",
344 set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
345 {"viol-segv",
346 "violations are promoted to SIGSEGV signals",
347 set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
348 {"viol-gdb",
349 "violations fork a gdb process attached to current program",
350 set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
351 {"trace-calls",
352 "trace calls to mudflap runtime library",
353 set_option, 1, &__mf_opts.trace_mf_calls},
354 {"verbose-trace",
355 "trace internal events within mudflap runtime library",
356 set_option, 1, &__mf_opts.verbose_trace},
357 {"collect-stats",
358 "collect statistics on mudflap's operation",
359 set_option, 1, &__mf_opts.collect_stats},
360 #ifdef SIGUSR1
361 {"sigusr1-report",
362 "print report upon SIGUSR1",
363 set_option, 1, &__mf_opts.sigusr1_report},
364 #endif
365 {"internal-checking",
366 "perform more expensive internal checking",
367 set_option, 1, &__mf_opts.internal_checking},
368 {"print-leaks",
369 "print any memory leaks at program shutdown",
370 set_option, 1, &__mf_opts.print_leaks},
371 #ifdef HAVE___LIBC_FREERES
372 {"libc-freeres",
373 "call glibc __libc_freeres at shutdown for better leak data",
374 set_option, 1, &__mf_opts.call_libc_freeres},
375 #endif
376 {"check-initialization",
377 "detect uninitialized object reads",
378 set_option, 1, &__mf_opts.check_initialization},
379 {"verbose-violations",
380 "print verbose messages when memory violations occur",
381 set_option, 1, &__mf_opts.verbose_violations},
382 {"abbreviate",
383 "abbreviate repetitive listings",
384 set_option, 1, &__mf_opts.abbreviate},
385 {"timestamps",
386 "track object lifetime timestamps",
387 set_option, 1, &__mf_opts.timestamps},
388 {"ignore-reads",
389 "ignore read accesses - assume okay",
390 set_option, 1, &__mf_opts.ignore_reads},
391 {"wipe-stack",
392 "wipe stack objects at unwind",
393 set_option, 1, &__mf_opts.wipe_stack},
394 {"wipe-heap",
395 "wipe heap objects at free",
396 set_option, 1, &__mf_opts.wipe_heap},
397 {"heur-proc-map",
398 "support /proc/self/map heuristics",
399 set_option, 1, &__mf_opts.heur_proc_map},
400 {"heur-stack-bound",
401 "enable a simple upper stack bound heuristic",
402 set_option, 1, &__mf_opts.heur_stack_bound},
403 {"heur-start-end",
404 "support _start.._end heuristics",
405 set_option, 1, &__mf_opts.heur_start_end},
406 {"heur-stdlib",
407 "register standard library data (argv, errno, stdin, ...)",
408 set_option, 1, &__mf_opts.heur_std_data},
409 {"free-queue-length",
410 "queue N deferred free() calls before performing them",
411 read_integer_option, 0, &__mf_opts.free_queue_length},
412 {"persistent-count",
413 "keep a history of N unregistered regions",
414 read_integer_option, 0, &__mf_opts.persistent_count},
415 {"crumple-zone",
416 "surround allocations with crumple zones of N bytes",
417 read_integer_option, 0, &__mf_opts.crumple_zone},
418 /* XXX: not type-safe.
419 {"lc-mask",
420 "set lookup cache size mask to N (2**M - 1)",
421 read_integer_option, 0, (int *)(&__mf_lc_mask)},
422 {"lc-shift",
423 "set lookup cache pointer shift",
424 read_integer_option, 0, (int *)(&__mf_lc_shift)},
425 */
426 {"lc-adapt",
427 "adapt mask/shift parameters after N cache misses",
428 read_integer_option, 1, &__mf_opts.adapt_cache},
429 {"backtrace",
430 "keep an N-level stack trace of each call context",
431 read_integer_option, 0, &__mf_opts.backtrace},
432 #ifdef LIBMUDFLAPTH
433 {"thread-stack",
434 "override thread stacks allocation: N kB",
435 read_integer_option, 0, &__mf_opts.thread_stack},
436 #endif
437 {0, 0, set_option, 0, NULL}
438 };
439
440 static void
441 __mf_usage ()
442 {
443 struct option *opt;
444
445 fprintf (stderr,
446 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
447 "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
448 "\n"
449 "The mudflap code can be controlled by an environment variable:\n"
450 "\n"
451 "$ export MUDFLAP_OPTIONS='<options>'\n"
452 "$ <mudflapped_program>\n"
453 "\n"
454 "where <options> is a space-separated list of \n"
455 "any of the following options. Use `-no-OPTION' to disable options.\n"
456 "\n",
457 #if HAVE_PTHREAD_H
458 (pthread_join ? "multi-threaded " : "single-threaded "),
459 #else
460 "",
461 #endif
462 #if LIBMUDFLAPTH
463 "thread-aware "
464 #else
465 "thread-unaware "
466 #endif
467 );
468 /* XXX: The multi-threaded thread-unaware combination is bad. */
469
470 for (opt = options; opt->name; opt++)
471 {
472 int default_p = (opt->value == * opt->target);
473
474 switch (opt->type)
475 {
476 char buf[128];
477 case set_option:
478 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
479 if (default_p)
480 fprintf (stderr, " [active]\n");
481 else
482 fprintf (stderr, "\n");
483 break;
484 case read_integer_option:
485 strncpy (buf, opt->name, 128);
486 strncpy (buf + strlen (opt->name), "=N", 2);
487 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
488 fprintf (stderr, " [%d]\n", * opt->target);
489 break;
490 default: abort();
491 }
492 }
493
494 fprintf (stderr, "\n");
495 }
496
497
498 int
499 __mf_set_options (const char *optstr)
500 {
501 int rc;
502 LOCKTH ();
503 BEGIN_RECURSION_PROTECT ();
504 rc = __mfu_set_options (optstr);
505 /* XXX: It's not really that easy. A change to a bunch of parameters
506 can require updating auxiliary state or risk crashing:
507 free_queue_length, crumple_zone ... */
508 END_RECURSION_PROTECT ();
509 UNLOCKTH ();
510 return rc;
511 }
512
513
514 int
515 __mfu_set_options (const char *optstr)
516 {
517 struct option *opts = 0;
518 char *nxt = 0;
519 long tmp = 0;
520 int rc = 0;
521 const char *saved_optstr = optstr;
522
523 /* XXX: bounds-check for optstr! */
524
525 while (*optstr)
526 {
527 switch (*optstr) {
528 case ' ':
529 case '\t':
530 case '\n':
531 optstr++;
532 break;
533
534 case '-':
535 if (*optstr+1)
536 {
537 int negate = 0;
538 optstr++;
539
540 if (*optstr == '?' ||
541 strncmp (optstr, "help", 4) == 0)
542 {
543 /* Caller will print help and exit. */
544 return -1;
545 }
546
547 if (strncmp (optstr, "no-", 3) == 0)
548 {
549 negate = 1;
550 optstr = & optstr[3];
551 }
552
553 for (opts = options; opts->name; opts++)
554 {
555 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
556 {
557 optstr += strlen (opts->name);
558 assert (opts->target);
559 switch (opts->type)
560 {
561 case set_option:
562 if (negate)
563 *(opts->target) = 0;
564 else
565 *(opts->target) = opts->value;
566 break;
567 case read_integer_option:
568 if (! negate && (*optstr == '=' && *(optstr+1)))
569 {
570 optstr++;
571 tmp = strtol (optstr, &nxt, 10);
572 if ((optstr != nxt) && (tmp != LONG_MAX))
573 {
574 optstr = nxt;
575 *(opts->target) = (int)tmp;
576 }
577 }
578 else if (negate)
579 * opts->target = 0;
580 break;
581 }
582 }
583 }
584 }
585 break;
586
587 default:
588 fprintf (stderr,
589 "warning: unrecognized string '%s' in mudflap options\n",
590 optstr);
591 optstr += strlen (optstr);
592 rc = -1;
593 break;
594 }
595 }
596
597 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
598 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
599 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
600
601 /* Clear the lookup cache, in case the parameters got changed. */
602 /* XXX: race */
603 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
604 /* void slot 0 */
605 __mf_lookup_cache[0].low = MAXPTR;
606
607 TRACE ("set options from `%s'\n", saved_optstr);
608
609 /* Call this unconditionally, in case -sigusr1-report was toggled. */
610 __mf_sigusr1_respond ();
611
612 return rc;
613 }
614
615
616 #ifdef PIC
617
618 void
619 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
620 {
621 char *err;
622
623 assert (e);
624 if (e->pointer) return;
625
626 #if HAVE_DLVSYM
627 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
628 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
629 else
630 #endif
631 e->pointer = dlsym (RTLD_NEXT, e->name);
632
633 err = dlerror ();
634
635 if (err)
636 {
637 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
638 e->name, err);
639 abort ();
640 }
641 if (! e->pointer)
642 {
643 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
644 abort ();
645 }
646 }
647
648
649 static void
650 __mf_resolve_dynamics ()
651 {
652 int i;
653 for (i = 0; i < dyn_INITRESOLVE; i++)
654 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
655 }
656
657
658 /* NB: order must match enums in mf-impl.h */
659 struct __mf_dynamic_entry __mf_dynamic [] =
660 {
661 {NULL, "calloc", NULL},
662 {NULL, "free", NULL},
663 {NULL, "malloc", NULL},
664 {NULL, "mmap", NULL},
665 {NULL, "munmap", NULL},
666 {NULL, "realloc", NULL},
667 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
668 #ifdef LIBMUDFLAPTH
669 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
670 {NULL, "pthread_join", NULL},
671 {NULL, "pthread_exit", NULL}
672 #endif
673 };
674
675 #endif /* PIC */
676
677
678
679 /* ------------------------------------------------------------------------ */
680
681 /* Lookup & manage automatic initialization of the five or so splay trees. */
682 static mfsplay_tree
683 __mf_object_tree (int type)
684 {
685 static mfsplay_tree trees [__MF_TYPE_MAX+1];
686 assert (type >= 0 && type <= __MF_TYPE_MAX);
687 if (UNLIKELY (trees[type] == NULL))
688 trees[type] = mfsplay_tree_new ();
689 return trees[type];
690 }
691
692
693 /* not static */void
694 __mf_init ()
695 {
696 char *ov = 0;
697
698 /* Return if initialization has already been done. */
699 if (LIKELY (__mf_starting_p == 0))
700 return;
701
702 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
703 #ifdef PIC
704 __mf_resolve_dynamics ();
705 #endif
706 __mf_starting_p = 0;
707
708 __mf_set_state (active);
709
710 __mf_set_default_options ();
711
712 ov = getenv ("MUDFLAP_OPTIONS");
713 if (ov)
714 {
715 int rc = __mfu_set_options (ov);
716 if (rc < 0)
717 {
718 __mf_usage ();
719 exit (1);
720 }
721 }
722
723 /* Initialize to a non-zero description epoch. */
724 __mf_describe_object (NULL);
725
726 #define REG_RESERVED(obj) \
727 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
728
729 REG_RESERVED (__mf_lookup_cache);
730 REG_RESERVED (__mf_lc_mask);
731 REG_RESERVED (__mf_lc_shift);
732 /* XXX: others of our statics? */
733
734 /* Prevent access to *NULL. */
735 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
736 __mf_lookup_cache[0].low = (uintptr_t) -1;
737 }
738
739
740
741 int
742 __wrap_main (int argc, char* argv[])
743 {
744 extern char **environ;
745 extern int main ();
746 extern int __real_main ();
747 static int been_here = 0;
748
749 if (__mf_opts.heur_std_data && ! been_here)
750 {
751 unsigned i;
752
753 been_here = 1;
754 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
755 for (i=0; i<argc; i++)
756 {
757 unsigned j = strlen (argv[i]);
758 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
759 }
760
761 for (i=0; ; i++)
762 {
763 char *e = environ[i];
764 unsigned j;
765 if (e == NULL) break;
766 j = strlen (environ[i]);
767 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
768 }
769 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
770
771 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
772
773 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
774 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
775 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
776
777 /* Make some effort to register ctype.h static arrays. */
778 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
779 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
780 with in mf-hooks2.c. */
781 }
782
783 #ifdef PIC
784 return main (argc, argv, environ);
785 #else
786 return __real_main (argc, argv, environ);
787 #endif
788 }
789
790
791
792 extern void __mf_fini () DTOR;
793 void __mf_fini ()
794 {
795 TRACE ("__mf_fini\n");
796 __mfu_report ();
797
798 #ifndef PIC
799 /* Since we didn't populate the tree for allocations in constructors
800 before __mf_init, we cannot check destructors after __mf_fini. */
801 __mf_opts.mudflap_mode = mode_nop;
802 #endif
803 }
804
805
806
807 /* ------------------------------------------------------------------------ */
808 /* __mf_check */
809
810 void __mf_check (void *ptr, size_t sz, int type, const char *location)
811 {
812 LOCKTH ();
813 BEGIN_RECURSION_PROTECT ();
814 __mfu_check (ptr, sz, type, location);
815 END_RECURSION_PROTECT ();
816 UNLOCKTH ();
817 }
818
819
820 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
821 {
822 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
823 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
824 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
825 uintptr_t ptr_low = (uintptr_t) ptr;
826 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
827 struct __mf_cache old_entry = *entry;
828
829 if (UNLIKELY (__mf_opts.sigusr1_report))
830 __mf_sigusr1_respond ();
831 if (UNLIKELY (__mf_opts.ignore_reads && type == 0))
832 return;
833
834 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
835 ptr, entry_idx, (unsigned long)sz,
836 (type == 0 ? "read" : "write"), location);
837
838 switch (__mf_opts.mudflap_mode)
839 {
840 case mode_nop:
841 /* It is tempting to poison the cache here similarly to
842 mode_populate. However that eliminates a valuable
843 distinction between these two modes. mode_nop is useful to
844 let a user count & trace every single check / registration
845 call. mode_populate is useful to let a program run fast
846 while unchecked.
847 */
848 judgement = 1;
849 break;
850
851 case mode_populate:
852 entry->low = ptr_low;
853 entry->high = ptr_high;
854 judgement = 1;
855 break;
856
857 case mode_check:
858 {
859 unsigned heuristics = 0;
860
861 /* Advance aging/adaptation counters. */
862 static unsigned adapt_count;
863 adapt_count ++;
864 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
865 adapt_count > __mf_opts.adapt_cache))
866 {
867 adapt_count = 0;
868 __mf_adapt_cache ();
869 }
870
871 /* Looping only occurs if heuristics were triggered. */
872 while (judgement == 0)
873 {
874 DECLARE (void, free, void *p);
875 __mf_object_t* ovr_obj[1];
876 unsigned obj_count;
877 __mf_object_t** all_ovr_obj = NULL;
878 __mf_object_t** dealloc_me = NULL;
879 unsigned i;
880
881 /* Find all overlapping objects. Be optimistic that there is just one. */
882 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
883 if (UNLIKELY (obj_count > 1))
884 {
885 /* Allocate a real buffer and do the search again. */
886 DECLARE (void *, malloc, size_t c);
887 unsigned n;
888 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
889 obj_count));
890 if (all_ovr_obj == NULL) abort ();
891 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
892 assert (n == obj_count);
893 dealloc_me = all_ovr_obj;
894 }
895 else
896 {
897 all_ovr_obj = ovr_obj;
898 dealloc_me = NULL;
899 }
900
901 /* Update object statistics. */
902 for (i = 0; i < obj_count; i++)
903 {
904 __mf_object_t *obj = all_ovr_obj[i];
905 assert (obj != NULL);
906 if (type == __MF_CHECK_READ)
907 obj->read_count ++;
908 else
909 obj->write_count ++;
910 obj->liveness ++;
911 }
912
913 /* Iterate over the various objects. There are a number of special cases. */
914 for (i = 0; i < obj_count; i++)
915 {
916 __mf_object_t *obj = all_ovr_obj[i];
917
918 /* Any __MF_TYPE_NOACCESS hit is bad. */
919 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
920 judgement = -1;
921
922 /* Any object with a watch flag is bad. */
923 if (UNLIKELY (obj->watching_p))
924 judgement = -2; /* trigger VIOL_WATCH */
925
926 /* A read from an uninitialized object is bad. */
927 if (UNLIKELY (__mf_opts.check_initialization
928 /* reading */
929 && type == __MF_CHECK_READ
930 /* not written */
931 && obj->write_count == 0
932 /* uninitialized (heap) */
933 && obj->type == __MF_TYPE_HEAP))
934 judgement = -1;
935 }
936
937 /* We now know that the access spans no invalid objects. */
938 if (LIKELY (judgement >= 0))
939 for (i = 0; i < obj_count; i++)
940 {
941 __mf_object_t *obj = all_ovr_obj[i];
942
943 /* Is this access entirely contained within this object? */
944 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
945 {
946 /* Valid access. */
947 entry->low = obj->low;
948 entry->high = obj->high;
949 judgement = 1;
950 }
951 }
952
953 /* This access runs off the end of one valid object. That
954 could be okay, if other valid objects fill in all the
955 holes. We allow this only for HEAP and GUESS type
956 objects. Accesses to STATIC and STACK variables
957 should not be allowed to span. */
958 if (UNLIKELY ((judgement == 0) && (obj_count > 1)))
959 {
960 unsigned uncovered = 0;
961 for (i = 0; i < obj_count; i++)
962 {
963 __mf_object_t *obj = all_ovr_obj[i];
964 int j, uncovered_low_p, uncovered_high_p;
965 uintptr_t ptr_lower, ptr_higher;
966
967 uncovered_low_p = ptr_low < obj->low;
968 ptr_lower = CLAMPSUB (obj->low, 1);
969 uncovered_high_p = ptr_high > obj->high;
970 ptr_higher = CLAMPADD (obj->high, 1);
971
972 for (j = 0; j < obj_count; j++)
973 {
974 __mf_object_t *obj2 = all_ovr_obj[j];
975
976 if (i == j) continue;
977
978 /* Filter out objects that cannot be spanned across. */
979 if (obj2->type == __MF_TYPE_STACK
980 || obj2->type == __MF_TYPE_STATIC)
981 continue;
982
983 /* Consider a side "covered" if obj2 includes
984 the next byte on that side. */
985 if (uncovered_low_p
986 && (ptr_lower >= obj2->low && ptr_lower <= obj2->high))
987 uncovered_low_p = 0;
988 if (uncovered_high_p
989 && (ptr_high >= obj2->low && ptr_higher <= obj2->high))
990 uncovered_high_p = 0;
991 }
992
993 if (uncovered_low_p || uncovered_high_p)
994 uncovered ++;
995 }
996
997 /* Success if no overlapping objects are uncovered. */
998 if (uncovered == 0)
999 judgement = 1;
1000 }
1001
1002
1003 if (dealloc_me != NULL)
1004 CALL_REAL (free, dealloc_me);
1005
1006 /* If the judgment is still unknown at this stage, loop
1007 around at most one more time. */
1008 if (judgement == 0)
1009 {
1010 if (heuristics++ < 2) /* XXX parametrize this number? */
1011 judgement = __mf_heuristic_check (ptr_low, ptr_high);
1012 else
1013 judgement = -1;
1014 }
1015 }
1016
1017 }
1018 break;
1019
1020 case mode_violate:
1021 judgement = -1;
1022 break;
1023 }
1024
1025 if (__mf_opts.collect_stats)
1026 {
1027 __mf_count_check ++;
1028
1029 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
1030 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1031 __mf_lookup_cache_reusecount [entry_idx] ++;
1032 }
1033
1034 if (UNLIKELY (judgement < 0))
1035 __mf_violation (ptr, sz,
1036 (uintptr_t) __builtin_return_address (0), location,
1037 ((judgement == -1) ?
1038 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
1039 __MF_VIOL_WATCH));
1040 }
1041
1042
1043 static __mf_object_t *
1044 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
1045 const char *name, uintptr_t pc)
1046 {
1047 DECLARE (void *, calloc, size_t c, size_t n);
1048
1049 __mf_object_t *new_obj;
1050 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
1051 new_obj->low = low;
1052 new_obj->high = high;
1053 new_obj->type = type;
1054 new_obj->name = name;
1055 new_obj->alloc_pc = pc;
1056 #if HAVE_GETTIMEOFDAY
1057 if (__mf_opts.timestamps)
1058 gettimeofday (& new_obj->alloc_time, NULL);
1059 #endif
1060 #if LIBMUDFLAPTH
1061 new_obj->alloc_thread = pthread_self ();
1062 #endif
1063
1064 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1065 new_obj->alloc_backtrace_size =
1066 __mf_backtrace (& new_obj->alloc_backtrace,
1067 (void *) pc, 2);
1068
1069 __mf_link_object (new_obj);
1070 return new_obj;
1071 }
1072
1073
1074 static void
1075 __mf_uncache_object (__mf_object_t *old_obj)
1076 {
1077 /* Remove any low/high pointers for this object from the lookup cache. */
1078
1079 /* Can it possibly exist in the cache? */
1080 if (LIKELY (old_obj->read_count + old_obj->write_count))
1081 {
1082 /* As reported by Herman ten Brugge, we need to scan the entire
1083 cache for entries that may hit this object. */
1084 uintptr_t low = old_obj->low;
1085 uintptr_t high = old_obj->high;
1086 struct __mf_cache *entry = & __mf_lookup_cache [0];
1087 unsigned i;
1088 for (i = 0; i <= __mf_lc_mask; i++, entry++)
1089 {
1090 /* NB: the "||" in the following test permits this code to
1091 tolerate the situation introduced by __mf_check over
1092 contiguous objects, where a cache entry spans several
1093 objects. */
1094 if (entry->low == low || entry->high == high)
1095 {
1096 entry->low = MAXPTR;
1097 entry->high = MINPTR;
1098 }
1099 }
1100 }
1101 }
1102
1103
1104 void
1105 __mf_register (void *ptr, size_t sz, int type, const char *name)
1106 {
1107 LOCKTH ();
1108 BEGIN_RECURSION_PROTECT ();
1109 __mfu_register (ptr, sz, type, name);
1110 END_RECURSION_PROTECT ();
1111 UNLOCKTH ();
1112 }
1113
1114
1115 void
1116 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1117 {
1118 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1119 ptr, (unsigned long) sz, type, name ? name : "");
1120
1121 if (__mf_opts.collect_stats)
1122 {
1123 __mf_count_register ++;
1124 __mf_total_register_size [(type < 0) ? 0 :
1125 (type > __MF_TYPE_MAX) ? 0 :
1126 type] += sz;
1127 }
1128
1129 if (UNLIKELY (__mf_opts.sigusr1_report))
1130 __mf_sigusr1_respond ();
1131
1132 switch (__mf_opts.mudflap_mode)
1133 {
1134 case mode_nop:
1135 break;
1136
1137 case mode_violate:
1138 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1139 __MF_VIOL_REGISTER);
1140 break;
1141
1142 case mode_populate:
1143 /* Clear the cache. */
1144 /* XXX: why the entire cache? */
1145 /* XXX: race */
1146 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1147 /* void slot 0 */
1148 __mf_lookup_cache[0].low = MAXPTR;
1149 break;
1150
1151 case mode_check:
1152 {
1153 __mf_object_t *ovr_objs [1];
1154 unsigned num_overlapping_objs;
1155 uintptr_t low = (uintptr_t) ptr;
1156 uintptr_t high = CLAMPSZ (ptr, sz);
1157 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1158
1159 /* Treat unknown size indication as 1. */
1160 if (UNLIKELY (sz == 0)) sz = 1;
1161
1162 /* Look for objects only of the same type. This will e.g. permit a registration
1163 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1164 __mf_check time however harmful overlaps will be detected. */
1165 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1166
1167 /* Handle overlaps. */
1168 if (UNLIKELY (num_overlapping_objs > 0))
1169 {
1170 __mf_object_t *ovr_obj = ovr_objs[0];
1171
1172 /* Accept certain specific duplication pairs. */
1173 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1174 && ovr_obj->low == low
1175 && ovr_obj->high == high
1176 && ovr_obj->type == type)
1177 {
1178 /* Duplicate registration for static objects may come
1179 from distinct compilation units. */
1180 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1181 (void *) low, (void *) high,
1182 (ovr_obj->name ? ovr_obj->name : ""));
1183 break;
1184 }
1185
1186 /* Alas, a genuine violation. */
1187 else
1188 {
1189 /* Two or more *real* mappings here. */
1190 __mf_violation ((void *) ptr, sz,
1191 (uintptr_t) __builtin_return_address (0), NULL,
1192 __MF_VIOL_REGISTER);
1193 }
1194 }
1195 else /* No overlapping objects: AOK. */
1196 __mf_insert_new_object (low, high, type, name, pc);
1197
1198 /* We could conceivably call __mf_check() here to prime the cache,
1199 but then the read_count/write_count field is not reliable. */
1200 break;
1201 }
1202 } /* end switch (__mf_opts.mudflap_mode) */
1203 }
1204
1205
1206 void
1207 __mf_unregister (void *ptr, size_t sz, int type)
1208 {
1209 LOCKTH ();
1210 BEGIN_RECURSION_PROTECT ();
1211 __mfu_unregister (ptr, sz, type);
1212 END_RECURSION_PROTECT ();
1213 UNLOCKTH ();
1214 }
1215
1216
1217 void
1218 __mfu_unregister (void *ptr, size_t sz, int type)
1219 {
1220 DECLARE (void, free, void *ptr);
1221
1222 if (UNLIKELY (__mf_opts.sigusr1_report))
1223 __mf_sigusr1_respond ();
1224
1225 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1226
1227 switch (__mf_opts.mudflap_mode)
1228 {
1229 case mode_nop:
1230 break;
1231
1232 case mode_violate:
1233 __mf_violation (ptr, sz,
1234 (uintptr_t) __builtin_return_address (0), NULL,
1235 __MF_VIOL_UNREGISTER);
1236 break;
1237
1238 case mode_populate:
1239 /* Clear the cache. */
1240 /* XXX: race */
1241 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1242 /* void slot 0 */
1243 __mf_lookup_cache[0].low = MAXPTR;
1244 break;
1245
1246 case mode_check:
1247 {
1248 __mf_object_t *old_obj = NULL;
1249 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1250 __mf_object_t *objs[1] = {NULL};
1251 unsigned num_overlapping_objs;
1252
1253 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1254 CLAMPSZ (ptr, sz), objs, 1, type);
1255
1256 /* Special case for HEAP_I - see free & realloc hook. They don't
1257 know whether the input region was HEAP or HEAP_I before
1258 unmapping it. Here we give HEAP a try in case HEAP_I
1259 failed. */
1260 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1261 {
1262 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1263 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1264 }
1265
1266 old_obj = objs[0];
1267 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1268 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1269 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1270 {
1271 __mf_violation (ptr, sz,
1272 (uintptr_t) __builtin_return_address (0), NULL,
1273 __MF_VIOL_UNREGISTER);
1274 break;
1275 }
1276
1277 __mf_unlink_object (old_obj);
1278 __mf_uncache_object (old_obj);
1279
1280 /* Wipe buffer contents if desired. */
1281 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1282 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1283 || old_obj->type == __MF_TYPE_HEAP_I)))
1284 {
1285 memset ((void *) old_obj->low,
1286 0,
1287 (size_t) (old_obj->high - old_obj->low + 1));
1288 }
1289
1290 /* Manage the object cemetary. */
1291 if (__mf_opts.persistent_count > 0
1292 && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM)
1293 {
1294 old_obj->deallocated_p = 1;
1295 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1296 #if HAVE_GETTIMEOFDAY
1297 if (__mf_opts.timestamps)
1298 gettimeofday (& old_obj->dealloc_time, NULL);
1299 #endif
1300 #ifdef LIBMUDFLAPTH
1301 old_obj->dealloc_thread = pthread_self ();
1302 #endif
1303
1304 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1305 old_obj->dealloc_backtrace_size =
1306 __mf_backtrace (& old_obj->dealloc_backtrace,
1307 NULL, 2);
1308
1309 /* Encourage this object to be displayed again in current epoch. */
1310 old_obj->description_epoch --;
1311
1312 /* Put this object into the cemetary. This may require this plot to
1313 be recycled, and the previous resident to be designated del_obj. */
1314 {
1315 unsigned row = old_obj->type;
1316 unsigned plot = __mf_object_dead_head [row];
1317
1318 del_obj = __mf_object_cemetary [row][plot];
1319 __mf_object_cemetary [row][plot] = old_obj;
1320 plot ++;
1321 if (plot == __mf_opts.persistent_count) plot = 0;
1322 __mf_object_dead_head [row] = plot;
1323 }
1324 }
1325 else
1326 del_obj = old_obj;
1327
1328 if (__mf_opts.print_leaks)
1329 {
1330 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1331 (old_obj->type == __MF_TYPE_HEAP
1332 || old_obj->type == __MF_TYPE_HEAP_I))
1333 {
1334 /* The problem with a warning message here is that we may not
1335 be privy to accesses to such objects that occur within
1336 uninstrumented libraries. */
1337 #if 0
1338 fprintf (stderr,
1339 "*******\n"
1340 "mudflap warning: unaccessed registered object:\n");
1341 __mf_describe_object (old_obj);
1342 #endif
1343 }
1344 }
1345
1346 if (del_obj != NULL) /* May or may not equal old_obj. */
1347 {
1348 if (__mf_opts.backtrace > 0)
1349 {
1350 CALL_REAL(free, del_obj->alloc_backtrace);
1351 if (__mf_opts.persistent_count > 0)
1352 {
1353 CALL_REAL(free, del_obj->dealloc_backtrace);
1354 }
1355 }
1356 CALL_REAL(free, del_obj);
1357 }
1358
1359 break;
1360 }
1361 } /* end switch (__mf_opts.mudflap_mode) */
1362
1363
1364 if (__mf_opts.collect_stats)
1365 {
1366 __mf_count_unregister ++;
1367 __mf_total_unregister_size += sz;
1368 }
1369 }
1370
1371
1372
1373 struct tree_stats
1374 {
1375 unsigned obj_count;
1376 unsigned long total_size;
1377 unsigned live_obj_count;
1378 double total_weight;
1379 double weighted_size;
1380 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1381 };
1382
1383
1384
1385 static int
1386 __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1387 {
1388 __mf_object_t *obj = (__mf_object_t *) n->value;
1389 struct tree_stats *s = (struct tree_stats *) param;
1390
1391 assert (obj != NULL && s != NULL);
1392
1393 /* Exclude never-accessed objects. */
1394 if (obj->read_count + obj->write_count)
1395 {
1396 s->obj_count ++;
1397 s->total_size += (obj->high - obj->low + 1);
1398
1399 if (obj->liveness)
1400 {
1401 unsigned i;
1402 uintptr_t addr;
1403
1404 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1405 (void *) obj->low, obj->liveness, obj->name); */
1406
1407 s->live_obj_count ++;
1408 s->total_weight += (double) obj->liveness;
1409 s->weighted_size +=
1410 (double) (obj->high - obj->low + 1) *
1411 (double) obj->liveness;
1412
1413 addr = obj->low;
1414 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1415 {
1416 unsigned bit = addr & 1;
1417 s->weighted_address_bits[i][bit] += obj->liveness;
1418 addr = addr >> 1;
1419 }
1420
1421 /* Age the liveness value. */
1422 obj->liveness >>= 1;
1423 }
1424 }
1425
1426 return 0;
1427 }
1428
1429
1430 static void
1431 __mf_adapt_cache ()
1432 {
1433 struct tree_stats s;
1434 uintptr_t new_mask = 0;
1435 unsigned char new_shift;
1436 float cache_utilization;
1437 float max_value;
1438 static float smoothed_new_shift = -1.0;
1439 unsigned i;
1440
1441 memset (&s, 0, sizeof (s));
1442
1443 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1444 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1445 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1446 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1447 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1448
1449 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1450 empty tree. Just leave the cache alone in such cases, rather
1451 than risk dying by division-by-zero. */
1452 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1453 return;
1454
1455 /* Guess a good value for the shift parameter by finding an address bit that is a
1456 good discriminant of lively objects. */
1457 max_value = 0.0;
1458 for (i=0; i<sizeof (uintptr_t)*8; i++)
1459 {
1460 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1461 if (max_value < value) max_value = value;
1462 }
1463 for (i=0; i<sizeof (uintptr_t)*8; i++)
1464 {
1465 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1466 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1467 if (value >= max_value * shoulder_factor)
1468 break;
1469 }
1470 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1471 /* Converge toward this slowly to reduce flapping. */
1472 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1473 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1474 assert (new_shift < sizeof (uintptr_t)*8);
1475
1476 /* Count number of used buckets. */
1477 cache_utilization = 0.0;
1478 for (i = 0; i < (1 + __mf_lc_mask); i++)
1479 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1480 cache_utilization += 1.0;
1481 cache_utilization /= (1 + __mf_lc_mask);
1482
1483 new_mask |= 0xffff; /* XXX: force a large cache. */
1484 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1485
1486 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1487 "util=%u%% m=%p s=%u\n",
1488 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1489 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1490
1491 /* We should reinitialize cache if its parameters have changed. */
1492 if (new_mask != __mf_lc_mask ||
1493 new_shift != __mf_lc_shift)
1494 {
1495 __mf_lc_mask = new_mask;
1496 __mf_lc_shift = new_shift;
1497 /* XXX: race */
1498 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1499 /* void slot 0 */
1500 __mf_lookup_cache[0].low = MAXPTR;
1501 }
1502 }
1503
1504
1505
1506 /* __mf_find_object[s] */
1507
1508 /* Find overlapping live objecs between [low,high]. Return up to
1509 max_objs of their pointers in objs[]. Return total count of
1510 overlaps (may exceed max_objs). */
1511
1512 unsigned
1513 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1514 __mf_object_t **objs, unsigned max_objs, int type)
1515 {
1516 unsigned count = 0;
1517 mfsplay_tree t = __mf_object_tree (type);
1518 mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1519 int direction;
1520
1521 mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1522 /* An exact match for base address implies a hit. */
1523 if (n != NULL)
1524 {
1525 if (count < max_objs)
1526 objs[count] = (__mf_object_t *) n->value;
1527 count ++;
1528 }
1529
1530 /* Iterate left then right near this key value to find all overlapping objects. */
1531 for (direction = 0; direction < 2; direction ++)
1532 {
1533 /* Reset search origin. */
1534 k = (mfsplay_tree_key) ptr_low;
1535
1536 while (1)
1537 {
1538 __mf_object_t *obj;
1539
1540 n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1541 if (n == NULL) break;
1542 obj = (__mf_object_t *) n->value;
1543
1544 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1545 break;
1546
1547 if (count < max_objs)
1548 objs[count] = (__mf_object_t *) n->value;
1549 count ++;
1550
1551 k = (mfsplay_tree_key) obj->low;
1552 }
1553 }
1554
1555 return count;
1556 }
1557
1558
1559 unsigned
1560 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1561 __mf_object_t **objs, unsigned max_objs)
1562 {
1563 int type;
1564 unsigned count = 0;
1565
1566 /* Search each splay tree for overlaps. */
1567 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1568 {
1569 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1570 if (c > max_objs)
1571 {
1572 max_objs = 0;
1573 objs = NULL;
1574 }
1575 else /* NB: C may equal 0 */
1576 {
1577 max_objs -= c;
1578 objs += c;
1579 }
1580 count += c;
1581 }
1582
1583 return count;
1584 }
1585
1586
1587
1588 /* __mf_link_object */
1589
1590 static void
1591 __mf_link_object (__mf_object_t *node)
1592 {
1593 mfsplay_tree t = __mf_object_tree (node->type);
1594 mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1595 }
1596
1597 /* __mf_unlink_object */
1598
1599 static void
1600 __mf_unlink_object (__mf_object_t *node)
1601 {
1602 mfsplay_tree t = __mf_object_tree (node->type);
1603 mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1604 }
1605
1606 /* __mf_find_dead_objects */
1607
1608 /* Find overlapping dead objecs between [low,high]. Return up to
1609 max_objs of their pointers in objs[]. Return total count of
1610 overlaps (may exceed max_objs). */
1611
1612 static unsigned
1613 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1614 __mf_object_t **objs, unsigned max_objs)
1615 {
1616 if (__mf_opts.persistent_count > 0)
1617 {
1618 unsigned count = 0;
1619 unsigned recollection = 0;
1620 unsigned row = 0;
1621
1622 assert (low <= high);
1623 assert (max_objs == 0 || objs != NULL);
1624
1625 /* Widen the search from the most recent plots in each row, looking
1626 backward in time. */
1627 recollection = 0;
1628 while (recollection < __mf_opts.persistent_count)
1629 {
1630 count = 0;
1631
1632 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1633 {
1634 unsigned plot;
1635 unsigned i;
1636
1637 plot = __mf_object_dead_head [row];
1638 for (i = 0; i <= recollection; i ++)
1639 {
1640 __mf_object_t *obj;
1641
1642 /* Look backward through row: it's a circular buffer. */
1643 if (plot > 0) plot --;
1644 else plot = __mf_opts.persistent_count - 1;
1645
1646 obj = __mf_object_cemetary [row][plot];
1647 if (obj && obj->low <= high && obj->high >= low)
1648 {
1649 /* Found an overlapping dead object! */
1650 if (count < max_objs)
1651 objs [count] = obj;
1652 count ++;
1653 }
1654 }
1655 }
1656
1657 if (count)
1658 break;
1659
1660 /* Look farther back in time. */
1661 recollection = (recollection * 2) + 1;
1662 }
1663
1664 return count;
1665 } else {
1666 return 0;
1667 }
1668 }
1669
1670 /* __mf_describe_object */
1671
1672 static void
1673 __mf_describe_object (__mf_object_t *obj)
1674 {
1675 static unsigned epoch = 0;
1676 if (obj == NULL)
1677 {
1678 epoch ++;
1679 return;
1680 }
1681
1682 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1683 {
1684 fprintf (stderr,
1685 "mudflap %sobject %p: name=`%s'\n",
1686 (obj->deallocated_p ? "dead " : ""),
1687 (void *) obj, (obj->name ? obj->name : ""));
1688 return;
1689 }
1690 else
1691 obj->description_epoch = epoch;
1692
1693 fprintf (stderr,
1694 "mudflap %sobject %p: name=`%s'\n"
1695 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1696 "alloc time=%lu.%06lu pc=%p"
1697 #ifdef LIBMUDFLAPTH
1698 " thread=%u"
1699 #endif
1700 "\n",
1701 (obj->deallocated_p ? "dead " : ""),
1702 (void *) obj, (obj->name ? obj->name : ""),
1703 (void *) obj->low, (void *) obj->high,
1704 (unsigned long) (obj->high - obj->low + 1),
1705 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1706 obj->type == __MF_TYPE_HEAP ? "heap" :
1707 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1708 obj->type == __MF_TYPE_STACK ? "stack" :
1709 obj->type == __MF_TYPE_STATIC ? "static" :
1710 obj->type == __MF_TYPE_GUESS ? "guess" :
1711 "unknown"),
1712 obj->read_count, obj->write_count, obj->liveness,
1713 obj->watching_p ? " watching" : "",
1714 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1715 (void *) obj->alloc_pc
1716 #ifdef LIBMUDFLAPTH
1717 , (unsigned) obj->alloc_thread
1718 #endif
1719 );
1720
1721 if (__mf_opts.backtrace > 0)
1722 {
1723 unsigned i;
1724 for (i=0; i<obj->alloc_backtrace_size; i++)
1725 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1726 }
1727
1728 if (__mf_opts.persistent_count > 0)
1729 {
1730 if (obj->deallocated_p)
1731 {
1732 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1733 #ifdef LIBMUDFLAPTH
1734 " thread=%u"
1735 #endif
1736 "\n",
1737 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1738 (void *) obj->dealloc_pc
1739 #ifdef LIBMUDFLAPTH
1740 , (unsigned) obj->dealloc_thread
1741 #endif
1742 );
1743
1744
1745 if (__mf_opts.backtrace > 0)
1746 {
1747 unsigned i;
1748 for (i=0; i<obj->dealloc_backtrace_size; i++)
1749 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1750 }
1751 }
1752 }
1753 }
1754
1755
1756 static int
1757 __mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1758 {
1759 __mf_object_t *node = (__mf_object_t *) n->value;
1760 unsigned *count = (unsigned *) param;
1761
1762 if (count != NULL)
1763 (*count) ++;
1764
1765 fprintf (stderr, "Leaked object %u:\n", (*count));
1766 __mf_describe_object (node);
1767
1768 return 0;
1769 }
1770
1771
1772 static unsigned
1773 __mf_report_leaks ()
1774 {
1775 unsigned count = 0;
1776
1777 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1778 __mf_report_leaks_fn, & count);
1779 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1780 __mf_report_leaks_fn, & count);
1781
1782 return count;
1783 }
1784
1785 /* ------------------------------------------------------------------------ */
1786 /* __mf_report */
1787
1788 void
1789 __mf_report ()
1790 {
1791 LOCKTH ();
1792 BEGIN_RECURSION_PROTECT ();
1793 __mfu_report ();
1794 END_RECURSION_PROTECT ();
1795 UNLOCKTH ();
1796 }
1797
1798 void
1799 __mfu_report ()
1800 {
1801 if (__mf_opts.collect_stats)
1802 {
1803 fprintf (stderr,
1804 "*******\n"
1805 "mudflap stats:\n"
1806 "calls to __mf_check: %lu\n"
1807 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1808 " __mf_unregister: %lu [%luB]\n"
1809 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1810 __mf_count_check,
1811 __mf_count_register,
1812 __mf_total_register_size[0], __mf_total_register_size[1],
1813 __mf_total_register_size[2], __mf_total_register_size[3],
1814 __mf_total_register_size[4], /* XXX */
1815 __mf_count_unregister, __mf_total_unregister_size,
1816 __mf_count_violation[0], __mf_count_violation[1],
1817 __mf_count_violation[2], __mf_count_violation[3],
1818 __mf_count_violation[4]);
1819
1820 fprintf (stderr,
1821 "calls with reentrancy: %lu\n", __mf_reentrancy);
1822 #ifdef LIBMUDFLAPTH
1823 fprintf (stderr,
1824 " lock contention: %lu\n", __mf_lock_contention);
1825 #endif
1826
1827 /* Lookup cache stats. */
1828 {
1829 unsigned i;
1830 unsigned max_reuse = 0;
1831 unsigned num_used = 0;
1832 unsigned num_unused = 0;
1833
1834 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1835 {
1836 if (__mf_lookup_cache_reusecount[i])
1837 num_used ++;
1838 else
1839 num_unused ++;
1840 if (max_reuse < __mf_lookup_cache_reusecount[i])
1841 max_reuse = __mf_lookup_cache_reusecount[i];
1842 }
1843 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1844 num_used, num_unused, max_reuse);
1845 }
1846
1847 {
1848 unsigned live_count;
1849 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1850 fprintf (stderr, "number of live objects: %u\n", live_count);
1851 }
1852
1853 if (__mf_opts.persistent_count > 0)
1854 {
1855 unsigned dead_count = 0;
1856 unsigned row, plot;
1857 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1858 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1859 if (__mf_object_cemetary [row][plot] != 0)
1860 dead_count ++;
1861 fprintf (stderr, " zombie objects: %u\n", dead_count);
1862 }
1863 }
1864 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1865 {
1866 unsigned l;
1867 extern void * __mf_wrap_alloca_indirect (size_t c);
1868
1869 /* Free up any remaining alloca()'d blocks. */
1870 __mf_wrap_alloca_indirect (0);
1871 #ifdef HAVE___LIBC_FREERES
1872 if (__mf_opts.call_libc_freeres)
1873 {
1874 extern void __libc_freeres (void);
1875 __libc_freeres ();
1876 }
1877 #endif
1878
1879 __mf_describe_object (NULL); /* Reset description epoch. */
1880 l = __mf_report_leaks ();
1881 fprintf (stderr, "number of leaked objects: %u\n", l);
1882 }
1883 }
1884
1885 /* __mf_backtrace */
1886
1887 size_t
1888 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1889 {
1890 void ** pc_array;
1891 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1892 unsigned remaining_size;
1893 unsigned omitted_size = 0;
1894 unsigned i;
1895 DECLARE (void, free, void *ptr);
1896 DECLARE (void *, calloc, size_t c, size_t n);
1897 DECLARE (void *, malloc, size_t n);
1898
1899 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1900 #ifdef HAVE_BACKTRACE
1901 pc_array_size = backtrace (pc_array, pc_array_size);
1902 #else
1903 #define FETCH(n) do { if (pc_array_size >= n) { \
1904 pc_array[n] = __builtin_return_address(n); \
1905 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1906
1907 /* Unroll some calls __builtin_return_address because this function
1908 only takes a literal integer parameter. */
1909 FETCH (0);
1910 #if 0
1911 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1912 rather than simply returning 0. :-( */
1913 FETCH (1);
1914 FETCH (2);
1915 FETCH (3);
1916 FETCH (4);
1917 FETCH (5);
1918 FETCH (6);
1919 FETCH (7);
1920 FETCH (8);
1921 if (pc_array_size > 8) pc_array_size = 9;
1922 #else
1923 if (pc_array_size > 0) pc_array_size = 1;
1924 #endif
1925
1926 #undef FETCH
1927 #endif
1928
1929 /* We want to trim the first few levels of the stack traceback,
1930 since they contain libmudflap wrappers and junk. If pc_array[]
1931 ends up containing a non-NULL guess_pc, then trim everything
1932 before that. Otherwise, omit the first guess_omit_levels
1933 entries. */
1934
1935 if (guess_pc != NULL)
1936 for (i=0; i<pc_array_size; i++)
1937 if (pc_array [i] == guess_pc)
1938 omitted_size = i;
1939
1940 if (omitted_size == 0) /* No match? */
1941 if (pc_array_size > guess_omit_levels)
1942 omitted_size = guess_omit_levels;
1943
1944 remaining_size = pc_array_size - omitted_size;
1945
1946 #ifdef HAVE_BACKTRACE_SYMBOLS
1947 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1948 #else
1949 {
1950 /* Let's construct a buffer by hand. It will have <remaining_size>
1951 char*'s at the front, pointing at individual strings immediately
1952 afterwards. */
1953 void *buffer;
1954 char *chars;
1955 char **pointers;
1956 enum { perline = 30 };
1957 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1958 pointers = (char **) buffer;
1959 chars = (char *)buffer + (remaining_size * sizeof (char *));
1960 for (i = 0; i < remaining_size; i++)
1961 {
1962 pointers[i] = chars;
1963 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1964 chars = chars + perline;
1965 }
1966 *symbols = pointers;
1967 }
1968 #endif
1969 CALL_REAL (free, pc_array);
1970
1971 return remaining_size;
1972 }
1973
1974 /* ------------------------------------------------------------------------ */
1975 /* __mf_violation */
1976
1977 void
1978 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1979 const char *location, int type)
1980 {
1981 char buf [128];
1982 static unsigned violation_number;
1983 DECLARE(void, free, void *ptr);
1984
1985 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1986 (void *) pc,
1987 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1988
1989 if (__mf_opts.collect_stats)
1990 __mf_count_violation [(type < 0) ? 0 :
1991 (type > __MF_VIOL_WATCH) ? 0 :
1992 type] ++;
1993
1994 /* Print out a basic warning message. */
1995 if (__mf_opts.verbose_violations)
1996 {
1997 unsigned dead_p;
1998 unsigned num_helpful = 0;
1999 struct timeval now = { 0, 0 };
2000 #if HAVE_GETTIMEOFDAY
2001 gettimeofday (& now, NULL);
2002 #endif
2003
2004 violation_number ++;
2005 fprintf (stderr,
2006 "*******\n"
2007 "mudflap violation %u (%s): time=%lu.%06lu "
2008 "ptr=%p size=%lu\npc=%p%s%s%s\n",
2009 violation_number,
2010 ((type == __MF_VIOL_READ) ? "check/read" :
2011 (type == __MF_VIOL_WRITE) ? "check/write" :
2012 (type == __MF_VIOL_REGISTER) ? "register" :
2013 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
2014 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
2015 now.tv_sec, now.tv_usec,
2016 (void *) ptr, (unsigned long)sz, (void *) pc,
2017 (location != NULL ? " location=`" : ""),
2018 (location != NULL ? location : ""),
2019 (location != NULL ? "'" : ""));
2020
2021 if (__mf_opts.backtrace > 0)
2022 {
2023 char ** symbols;
2024 unsigned i, num;
2025
2026 num = __mf_backtrace (& symbols, (void *) pc, 2);
2027 /* Note: backtrace_symbols calls malloc(). But since we're in
2028 __mf_violation and presumably __mf_check, it'll detect
2029 recursion, and not put the new string into the database. */
2030
2031 for (i=0; i<num; i++)
2032 fprintf (stderr, " %s\n", symbols[i]);
2033
2034 /* Calling free() here would trigger a violation. */
2035 CALL_REAL(free, symbols);
2036 }
2037
2038
2039 /* Look for nearby objects. For this, we start with s_low/s_high
2040 pointing to the given area, looking for overlapping objects.
2041 If none show up, widen the search area and keep looking. */
2042
2043 if (sz == 0) sz = 1;
2044
2045 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2046 {
2047 enum {max_objs = 3}; /* magic */
2048 __mf_object_t *objs[max_objs];
2049 unsigned num_objs = 0;
2050 uintptr_t s_low, s_high;
2051 unsigned tries = 0;
2052 unsigned i;
2053
2054 s_low = (uintptr_t) ptr;
2055 s_high = CLAMPSZ (ptr, sz);
2056
2057 while (tries < 16) /* magic */
2058 {
2059 if (dead_p)
2060 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2061 else
2062 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2063
2064 if (num_objs) /* good enough */
2065 break;
2066
2067 tries ++;
2068
2069 /* XXX: tune this search strategy. It's too dependent on
2070 sz, which can vary from 1 to very big (when array index
2071 checking) numbers. */
2072 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2073 s_high = CLAMPADD (s_high, (sz * tries * tries));
2074 }
2075
2076 for (i = 0; i < min (num_objs, max_objs); i++)
2077 {
2078 __mf_object_t *obj = objs[i];
2079 uintptr_t low = (uintptr_t) ptr;
2080 uintptr_t high = CLAMPSZ (ptr, sz);
2081 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2082 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2083 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2084 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2085 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2086 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2087
2088 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2089 num_helpful + i + 1,
2090 (before1 ? before1 : after1 ? after1 : into1),
2091 (before1 ? "before" : after1 ? "after" : "into"),
2092 (before2 ? before2 : after2 ? after2 : into2),
2093 (before2 ? "before" : after2 ? "after" : "into"));
2094 __mf_describe_object (obj);
2095 }
2096 num_helpful += num_objs;
2097 }
2098
2099 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2100 }
2101
2102 /* How to finally handle this violation? */
2103 switch (__mf_opts.violation_mode)
2104 {
2105 case viol_nop:
2106 break;
2107 case viol_segv:
2108 kill (getpid(), SIGSEGV);
2109 break;
2110 case viol_abort:
2111 abort ();
2112 break;
2113 case viol_gdb:
2114
2115 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2116 system (buf);
2117 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2118 instead, and let the forked child execlp() gdb. That way, this
2119 subject process can be resumed under the supervision of gdb.
2120 This can't happen now, since system() only returns when gdb
2121 dies. In that case, we need to beware of starting a second
2122 concurrent gdb child upon the next violation. (But if the first
2123 gdb dies, then starting a new one is appropriate.) */
2124 break;
2125 }
2126 }
2127
2128 /* ------------------------------------------------------------------------ */
2129
2130
2131 unsigned __mf_watch (void *ptr, size_t sz)
2132 {
2133 unsigned rc;
2134 LOCKTH ();
2135 BEGIN_RECURSION_PROTECT ();
2136 rc = __mf_watch_or_not (ptr, sz, 1);
2137 END_RECURSION_PROTECT ();
2138 UNLOCKTH ();
2139 return rc;
2140 }
2141
2142 unsigned __mf_unwatch (void *ptr, size_t sz)
2143 {
2144 unsigned rc;
2145 LOCKTH ();
2146 rc = __mf_watch_or_not (ptr, sz, 0);
2147 UNLOCKTH ();
2148 return rc;
2149 }
2150
2151
2152 static unsigned
2153 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2154 {
2155 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2156 uintptr_t ptr_low = (uintptr_t) ptr;
2157 unsigned count = 0;
2158
2159 TRACE ("%s ptr=%p size=%lu\n",
2160 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2161
2162 switch (__mf_opts.mudflap_mode)
2163 {
2164 case mode_nop:
2165 case mode_populate:
2166 case mode_violate:
2167 count = 0;
2168 break;
2169
2170 case mode_check:
2171 {
2172 __mf_object_t **all_ovr_objs;
2173 unsigned obj_count;
2174 unsigned n;
2175 DECLARE (void *, malloc, size_t c);
2176 DECLARE (void, free, void *p);
2177
2178 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2179 VERBOSE_TRACE (" %u:", obj_count);
2180
2181 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2182 if (all_ovr_objs == NULL) abort ();
2183 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2184 assert (n == obj_count);
2185
2186 for (n = 0; n < obj_count; n ++)
2187 {
2188 __mf_object_t *obj = all_ovr_objs[n];
2189
2190 VERBOSE_TRACE (" [%p]", (void *) obj);
2191 if (obj->watching_p != flag)
2192 {
2193 obj->watching_p = flag;
2194 count ++;
2195
2196 /* Remove object from cache, to ensure next access
2197 goes through __mf_check(). */
2198 if (flag)
2199 __mf_uncache_object (obj);
2200 }
2201 }
2202 CALL_REAL (free, all_ovr_objs);
2203 }
2204 break;
2205 }
2206
2207 return count;
2208 }
2209
2210
2211 void
2212 __mf_sigusr1_handler (int num)
2213 {
2214 __mf_sigusr1_received ++;
2215 }
2216
2217 /* Install or remove SIGUSR1 handler as necessary.
2218 Also, respond to a received pending SIGUSR1. */
2219 void
2220 __mf_sigusr1_respond ()
2221 {
2222 static int handler_installed;
2223
2224 #ifdef SIGUSR1
2225 /* Manage handler */
2226 if (__mf_opts.sigusr1_report && ! handler_installed)
2227 {
2228 signal (SIGUSR1, __mf_sigusr1_handler);
2229 handler_installed = 1;
2230 }
2231 else if(! __mf_opts.sigusr1_report && handler_installed)
2232 {
2233 signal (SIGUSR1, SIG_DFL);
2234 handler_installed = 0;
2235 }
2236 #endif
2237
2238 /* Manage enqueued signals */
2239 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2240 {
2241 __mf_sigusr1_handled ++;
2242 assert (__mf_get_state () == reentrant);
2243 __mfu_report ();
2244 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2245 }
2246 }
2247
2248
2249 /* XXX: provide an alternative __assert_fail function that cannot
2250 fail due to libmudflap infinite recursion. */
2251 #ifndef NDEBUG
2252
2253 static void
2254 write_itoa (int fd, unsigned n)
2255 {
2256 enum x { bufsize = sizeof(n)*4 };
2257 char buf [bufsize];
2258 unsigned i;
2259
2260 for (i=0; i<bufsize-1; i++)
2261 {
2262 unsigned digit = n % 10;
2263 buf[bufsize-2-i] = digit + '0';
2264 n /= 10;
2265 if (n == 0)
2266 {
2267 char *m = & buf [bufsize-2-i];
2268 buf[bufsize-1] = '\0';
2269 write (fd, m, strlen(m));
2270 break;
2271 }
2272 }
2273 }
2274
2275
2276 void
2277 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2278 {
2279 #define write2(string) write (2, (string), strlen ((string)));
2280 write2("mf");
2281 #ifdef LIBMUDFLAPTH
2282 write2("(");
2283 write_itoa (2, (unsigned) pthread_self ());
2284 write2(")");
2285 #endif
2286 write2(": assertion failure: `");
2287 write (2, msg, strlen (msg));
2288 write2("' in ");
2289 write (2, func, strlen (func));
2290 write2(" at ");
2291 write (2, file, strlen (file));
2292 write2(":");
2293 write_itoa (2, line);
2294 write2("\n");
2295 #undef write2
2296 abort ();
2297 }
2298
2299
2300 #endif
2301
2302
2303
2304 /* Adapted splay tree code, originally from libiberty. It has been
2305 specialized for libmudflap as requested by RMS. */
2306
2307 static void
2308 mfsplay_tree_free (void *p)
2309 {
2310 DECLARE (void, free, void *p);
2311 CALL_REAL (free, p);
2312 }
2313
2314 static void *
2315 mfsplay_tree_xmalloc (size_t s)
2316 {
2317 DECLARE (void *, malloc, size_t s);
2318 return CALL_REAL (malloc, s);
2319 }
2320
2321
2322 static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2323 static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2324 mfsplay_tree_key,
2325 mfsplay_tree_node *,
2326 mfsplay_tree_node *,
2327 mfsplay_tree_node *);
2328
2329
2330 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2331 and grandparent, respectively, of NODE. */
2332
2333 static mfsplay_tree_node
2334 mfsplay_tree_splay_helper (mfsplay_tree sp,
2335 mfsplay_tree_key key,
2336 mfsplay_tree_node * node,
2337 mfsplay_tree_node * parent,
2338 mfsplay_tree_node * grandparent)
2339 {
2340 mfsplay_tree_node *next;
2341 mfsplay_tree_node n;
2342 int comparison;
2343
2344 n = *node;
2345
2346 if (!n)
2347 return *parent;
2348
2349 comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2350
2351 if (comparison == 0)
2352 /* We've found the target. */
2353 next = 0;
2354 else if (comparison < 0)
2355 /* The target is to the left. */
2356 next = &n->left;
2357 else
2358 /* The target is to the right. */
2359 next = &n->right;
2360
2361 if (next)
2362 {
2363 /* Check whether our recursion depth is too high. Abort this search,
2364 and signal that a rebalance is required to continue. */
2365 if (sp->depth > sp->max_depth)
2366 {
2367 sp->rebalance_p = 1;
2368 return n;
2369 }
2370
2371 /* Continue down the tree. */
2372 sp->depth ++;
2373 n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2374 sp->depth --;
2375
2376 /* The recursive call will change the place to which NODE
2377 points. */
2378 if (*node != n || sp->rebalance_p)
2379 return n;
2380 }
2381
2382 if (!parent)
2383 /* NODE is the root. We are done. */
2384 return n;
2385
2386 /* First, handle the case where there is no grandparent (i.e.,
2387 *PARENT is the root of the tree.) */
2388 if (!grandparent)
2389 {
2390 if (n == (*parent)->left)
2391 {
2392 *node = n->right;
2393 n->right = *parent;
2394 }
2395 else
2396 {
2397 *node = n->left;
2398 n->left = *parent;
2399 }
2400 *parent = n;
2401 return n;
2402 }
2403
2404 /* Next handle the cases where both N and *PARENT are left children,
2405 or where both are right children. */
2406 if (n == (*parent)->left && *parent == (*grandparent)->left)
2407 {
2408 mfsplay_tree_node p = *parent;
2409
2410 (*grandparent)->left = p->right;
2411 p->right = *grandparent;
2412 p->left = n->right;
2413 n->right = p;
2414 *grandparent = n;
2415 return n;
2416 }
2417 else if (n == (*parent)->right && *parent == (*grandparent)->right)
2418 {
2419 mfsplay_tree_node p = *parent;
2420
2421 (*grandparent)->right = p->left;
2422 p->left = *grandparent;
2423 p->right = n->left;
2424 n->left = p;
2425 *grandparent = n;
2426 return n;
2427 }
2428
2429 /* Finally, deal with the case where N is a left child, but *PARENT
2430 is a right child, or vice versa. */
2431 if (n == (*parent)->left)
2432 {
2433 (*parent)->left = n->right;
2434 n->right = *parent;
2435 (*grandparent)->right = n->left;
2436 n->left = *grandparent;
2437 *grandparent = n;
2438 return n;
2439 }
2440 else
2441 {
2442 (*parent)->right = n->left;
2443 n->left = *parent;
2444 (*grandparent)->left = n->right;
2445 n->right = *grandparent;
2446 *grandparent = n;
2447 return n;
2448 }
2449 }
2450
2451
2452
2453 static int
2454 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2455 {
2456 mfsplay_tree_node **p = array_ptr;
2457 *(*p) = n;
2458 (*p)++;
2459 return 0;
2460 }
2461
2462
2463 static mfsplay_tree_node
2464 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2465 unsigned high)
2466 {
2467 unsigned middle = low + (high - low) / 2;
2468 mfsplay_tree_node n = array[middle];
2469
2470 /* Note that since we're producing a balanced binary tree, it is not a problem
2471 that this function is recursive. */
2472 if (low + 1 <= middle)
2473 n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2474 else
2475 n->left = NULL;
2476
2477 if (middle + 1 <= high)
2478 n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2479 else
2480 n->right = NULL;
2481
2482 return n;
2483 }
2484
2485
2486 /* Rebalance the entire tree. Do this by copying all the node
2487 pointers into an array, then cleverly re-linking them. */
2488 static void
2489 mfsplay_tree_rebalance (mfsplay_tree sp)
2490 {
2491 mfsplay_tree_node *all_nodes, *all_nodes_1;
2492
2493 if (sp->num_keys <= 2)
2494 return;
2495
2496 all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2497
2498 /* Traverse all nodes to copy their addresses into this array. */
2499 all_nodes_1 = all_nodes;
2500 mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2501 (void *) &all_nodes_1);
2502
2503 /* Relink all the nodes. */
2504 sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2505
2506 mfsplay_tree_free (all_nodes);
2507 }
2508
2509
2510 /* Splay SP around KEY. */
2511 static void
2512 mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2513 {
2514 if (sp->root == 0)
2515 return;
2516
2517 /* If we just splayed the tree with the same key, do nothing. */
2518 if (sp->last_splayed_key_p &&
2519 (sp->last_splayed_key == key))
2520 return;
2521
2522 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2523 The idea is to limit excessive stack usage if we're facing
2524 degenerate access patterns. Unfortunately such patterns can occur
2525 e.g. during static initialization, where many static objects might
2526 be registered in increasing address sequence, or during a case where
2527 large tree-like heap data structures are allocated quickly.
2528
2529 On x86, this corresponds to roughly 200K of stack usage.
2530 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2531 sp->max_depth = 2500;
2532 sp->rebalance_p = sp->depth = 0;
2533
2534 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2535 if (sp->rebalance_p)
2536 {
2537 mfsplay_tree_rebalance (sp);
2538
2539 sp->rebalance_p = sp->depth = 0;
2540 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2541
2542 if (sp->rebalance_p)
2543 abort ();
2544 }
2545
2546
2547 /* Cache this splay key. */
2548 sp->last_splayed_key = key;
2549 sp->last_splayed_key_p = 1;
2550 }
2551
2552
2553
2554 /* Allocate a new splay tree. */
2555 static mfsplay_tree
2556 mfsplay_tree_new ()
2557 {
2558 mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2559 sp->root = NULL;
2560 sp->last_splayed_key_p = 0;
2561 sp->num_keys = 0;
2562
2563 return sp;
2564 }
2565
2566
2567
2568 /* Insert a new node (associating KEY with DATA) into SP. If a
2569 previous node with the indicated KEY exists, its data is replaced
2570 with the new value. Returns the new node. */
2571 static mfsplay_tree_node
2572 mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2573 {
2574 int comparison = 0;
2575
2576 mfsplay_tree_splay (sp, key);
2577
2578 if (sp->root)
2579 comparison = ((sp->root->key > key) ? 1 :
2580 ((sp->root->key < key) ? -1 : 0));
2581
2582 if (sp->root && comparison == 0)
2583 {
2584 /* If the root of the tree already has the indicated KEY, just
2585 replace the value with VALUE. */
2586 sp->root->value = value;
2587 }
2588 else
2589 {
2590 /* Create a new node, and insert it at the root. */
2591 mfsplay_tree_node node;
2592
2593 node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2594 node->key = key;
2595 node->value = value;
2596 sp->num_keys++;
2597 if (!sp->root)
2598 node->left = node->right = 0;
2599 else if (comparison < 0)
2600 {
2601 node->left = sp->root;
2602 node->right = node->left->right;
2603 node->left->right = 0;
2604 }
2605 else
2606 {
2607 node->right = sp->root;
2608 node->left = node->right->left;
2609 node->right->left = 0;
2610 }
2611
2612 sp->root = node;
2613 sp->last_splayed_key_p = 0;
2614 }
2615
2616 return sp->root;
2617 }
2618
2619 /* Remove KEY from SP. It is not an error if it did not exist. */
2620
2621 static void
2622 mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2623 {
2624 mfsplay_tree_splay (sp, key);
2625 sp->last_splayed_key_p = 0;
2626 if (sp->root && (sp->root->key == key))
2627 {
2628 mfsplay_tree_node left, right;
2629 left = sp->root->left;
2630 right = sp->root->right;
2631 /* Delete the root node itself. */
2632 mfsplay_tree_free (sp->root);
2633 sp->num_keys--;
2634 /* One of the children is now the root. Doesn't matter much
2635 which, so long as we preserve the properties of the tree. */
2636 if (left)
2637 {
2638 sp->root = left;
2639 /* If there was a right child as well, hang it off the
2640 right-most leaf of the left child. */
2641 if (right)
2642 {
2643 while (left->right)
2644 left = left->right;
2645 left->right = right;
2646 }
2647 }
2648 else
2649 sp->root = right;
2650 }
2651 }
2652
2653 /* Lookup KEY in SP, returning VALUE if present, and NULL
2654 otherwise. */
2655
2656 static mfsplay_tree_node
2657 mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2658 {
2659 mfsplay_tree_splay (sp, key);
2660 if (sp->root && (sp->root->key == key))
2661 return sp->root;
2662 else
2663 return 0;
2664 }
2665
2666
2667 /* Return the immediate predecessor KEY, or NULL if there is no
2668 predecessor. KEY need not be present in the tree. */
2669
2670 static mfsplay_tree_node
2671 mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2672 {
2673 int comparison;
2674 mfsplay_tree_node node;
2675 /* If the tree is empty, there is certainly no predecessor. */
2676 if (!sp->root)
2677 return NULL;
2678 /* Splay the tree around KEY. That will leave either the KEY
2679 itself, its predecessor, or its successor at the root. */
2680 mfsplay_tree_splay (sp, key);
2681 comparison = ((sp->root->key > key) ? 1 :
2682 ((sp->root->key < key) ? -1 : 0));
2683
2684 /* If the predecessor is at the root, just return it. */
2685 if (comparison < 0)
2686 return sp->root;
2687 /* Otherwise, find the rightmost element of the left subtree. */
2688 node = sp->root->left;
2689 if (node)
2690 while (node->right)
2691 node = node->right;
2692 return node;
2693 }
2694
2695 /* Return the immediate successor KEY, or NULL if there is no
2696 successor. KEY need not be present in the tree. */
2697
2698 static mfsplay_tree_node
2699 mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2700 {
2701 int comparison;
2702 mfsplay_tree_node node;
2703 /* If the tree is empty, there is certainly no successor. */
2704 if (!sp->root)
2705 return NULL;
2706 /* Splay the tree around KEY. That will leave either the KEY
2707 itself, its predecessor, or its successor at the root. */
2708 mfsplay_tree_splay (sp, key);
2709 comparison = ((sp->root->key > key) ? 1 :
2710 ((sp->root->key < key) ? -1 : 0));
2711 /* If the successor is at the root, just return it. */
2712 if (comparison > 0)
2713 return sp->root;
2714 /* Otherwise, find the leftmost element of the right subtree. */
2715 node = sp->root->right;
2716 if (node)
2717 while (node->left)
2718 node = node->left;
2719 return node;
2720 }
2721
2722 /* Call FN, passing it the DATA, for every node in SP, following an
2723 in-order traversal. If FN every returns a non-zero value, the
2724 iteration ceases immediately, and the value is returned.
2725 Otherwise, this function returns 0.
2726
2727 This function simulates recursion using dynamically allocated
2728 arrays, since it may be called from mfsplay_tree_rebalance(), which
2729 in turn means that the tree is already uncomfortably deep for stack
2730 space limits. */
2731 static int
2732 mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2733 {
2734 mfsplay_tree_node *stack1;
2735 char *stack2;
2736 unsigned sp;
2737 int val = 0;
2738 enum s { s_left, s_here, s_right, s_up };
2739
2740 if (st->root == NULL) /* => num_keys == 0 */
2741 return 0;
2742
2743 stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2744 stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2745
2746 sp = 0;
2747 stack1 [sp] = st->root;
2748 stack2 [sp] = s_left;
2749
2750 while (1)
2751 {
2752 mfsplay_tree_node n;
2753 enum s s;
2754
2755 n = stack1 [sp];
2756 s = stack2 [sp];
2757
2758 /* Handle each of the four possible states separately. */
2759
2760 /* 1: We're here to traverse the left subtree (if any). */
2761 if (s == s_left)
2762 {
2763 stack2 [sp] = s_here;
2764 if (n->left != NULL)
2765 {
2766 sp ++;
2767 stack1 [sp] = n->left;
2768 stack2 [sp] = s_left;
2769 }
2770 }
2771
2772 /* 2: We're here to traverse this node. */
2773 else if (s == s_here)
2774 {
2775 stack2 [sp] = s_right;
2776 val = (*fn) (n, data);
2777 if (val) break;
2778 }
2779
2780 /* 3: We're here to traverse the right subtree (if any). */
2781 else if (s == s_right)
2782 {
2783 stack2 [sp] = s_up;
2784 if (n->right != NULL)
2785 {
2786 sp ++;
2787 stack1 [sp] = n->right;
2788 stack2 [sp] = s_left;
2789 }
2790 }
2791
2792 /* 4: We're here after both subtrees (if any) have been traversed. */
2793 else if (s == s_up)
2794 {
2795 /* Pop the stack. */
2796 if (sp == 0) break; /* Popping off the root note: we're finished! */
2797 sp --;
2798 }
2799
2800 else
2801 abort ();
2802 }
2803
2804 mfsplay_tree_free (stack1);
2805 mfsplay_tree_free (stack2);
2806 return val;
2807 }