re PR target/37438 (ICE in extract_insn, at recog.c:2027 for i{4,5}86)
[gcc.git] / libmudflap / mf-hooks1.c
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
21
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
26
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
30 02110-1301, USA. */
31
32
33 #include "config.h"
34
35 #ifndef HAVE_SOCKLEN_T
36 #define socklen_t int
37 #endif
38
39
40 /* These attempt to coax various unix flavours to declare all our
41 needed tidbits in the system headers. */
42 #if !defined(__FreeBSD__) && !defined(__APPLE__)
43 #define _POSIX_SOURCE
44 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
45 #define _GNU_SOURCE
46 #define _XOPEN_SOURCE
47 #define _BSD_TYPES
48 #define __EXTENSIONS__
49 #define _ALL_SOURCE
50 #define _LARGE_FILE_API
51 #define _XOPEN_SOURCE_EXTENDED 1
52
53 #include <string.h>
54 #include <stdio.h>
55 #include <stdlib.h>
56 #include <sys/time.h>
57 #include <sys/types.h>
58 #include <unistd.h>
59 #include <assert.h>
60 #include <errno.h>
61 #include <limits.h>
62 #include <time.h>
63
64 #include "mf-runtime.h"
65 #include "mf-impl.h"
66
67 #ifdef _MUDFLAP
68 #error "Do not compile this file with -fmudflap!"
69 #endif
70
71
72 /* Memory allocation related hook functions. Some of these are
73 intercepted via linker wrapping or symbol interposition. Others
74 use plain macros in mf-runtime.h. */
75
76
77 #if PIC
78
79 enum { BS = 4096, NB=10 };
80 static char __mf_0fn_bufs[NB][BS];
81 static unsigned __mf_0fn_bufs_used[NB];
82
83
84 /* A special bootstrap variant. */
85 void *
86 __mf_0fn_malloc (size_t c)
87 {
88 unsigned i;
89
90 for (i=0; i<NB; i++)
91 {
92 if (! __mf_0fn_bufs_used[i] && c < BS)
93 {
94 __mf_0fn_bufs_used[i] = 1;
95 return & __mf_0fn_bufs[i][0];
96 }
97 }
98 return NULL;
99 }
100 #endif
101
102
103 #undef malloc
104 WRAPPER(void *, malloc, size_t c)
105 {
106 size_t size_with_crumple_zones;
107 DECLARE(void *, malloc, size_t c);
108 void *result;
109 BEGIN_PROTECT (malloc, c);
110
111 size_with_crumple_zones =
112 CLAMPADD(c,CLAMPADD(__mf_opts.crumple_zone,
113 __mf_opts.crumple_zone));
114 BEGIN_MALLOC_PROTECT ();
115 result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
116 END_MALLOC_PROTECT ();
117
118 if (LIKELY(result))
119 {
120 result += __mf_opts.crumple_zone;
121 __mf_register (result, c, __MF_TYPE_HEAP, "malloc region");
122 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
123 }
124
125 return result;
126 }
127
128
129 #ifdef PIC
130 /* A special bootstrap variant. */
131 void *
132 __mf_0fn_calloc (size_t c, size_t n)
133 {
134 return __mf_0fn_malloc (c * n);
135 }
136 #endif
137
138
139 #undef calloc
140 WRAPPER(void *, calloc, size_t c, size_t n)
141 {
142 size_t size_with_crumple_zones;
143 DECLARE(void *, calloc, size_t, size_t);
144 DECLARE(void *, malloc, size_t);
145 DECLARE(void *, memset, void *, int, size_t);
146 char *result;
147 BEGIN_PROTECT (calloc, c, n);
148
149 size_with_crumple_zones =
150 CLAMPADD((c * n), /* XXX: CLAMPMUL */
151 CLAMPADD(__mf_opts.crumple_zone,
152 __mf_opts.crumple_zone));
153 BEGIN_MALLOC_PROTECT ();
154 result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
155 END_MALLOC_PROTECT ();
156
157 if (LIKELY(result))
158 memset (result, 0, size_with_crumple_zones);
159
160 if (LIKELY(result))
161 {
162 result += __mf_opts.crumple_zone;
163 __mf_register (result, c*n /* XXX: clamp */, __MF_TYPE_HEAP_I, "calloc region");
164 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
165 }
166
167 return result;
168 }
169
170
171 #if PIC
172 /* A special bootstrap variant. */
173 void *
174 __mf_0fn_realloc (void *buf, size_t c)
175 {
176 return NULL;
177 }
178 #endif
179
180
181 #undef realloc
182 WRAPPER(void *, realloc, void *buf, size_t c)
183 {
184 DECLARE(void * , realloc, void *, size_t);
185 size_t size_with_crumple_zones;
186 char *base = buf;
187 unsigned saved_wipe_heap;
188 char *result;
189 BEGIN_PROTECT (realloc, buf, c);
190
191 if (LIKELY(buf))
192 base -= __mf_opts.crumple_zone;
193
194 size_with_crumple_zones =
195 CLAMPADD(c, CLAMPADD(__mf_opts.crumple_zone,
196 __mf_opts.crumple_zone));
197 BEGIN_MALLOC_PROTECT ();
198 result = (char *) CALL_REAL (realloc, base, size_with_crumple_zones);
199 END_MALLOC_PROTECT ();
200
201 /* Ensure heap wiping doesn't occur during this peculiar
202 unregister/reregister pair. */
203 LOCKTH ();
204 __mf_set_state (reentrant);
205 saved_wipe_heap = __mf_opts.wipe_heap;
206 __mf_opts.wipe_heap = 0;
207
208 if (LIKELY(buf))
209 __mfu_unregister (buf, 0, __MF_TYPE_HEAP_I);
210 /* NB: underlying region may have been __MF_TYPE_HEAP. */
211
212 if (LIKELY(result))
213 {
214 result += __mf_opts.crumple_zone;
215 __mfu_register (result, c, __MF_TYPE_HEAP_I, "realloc region");
216 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
217 }
218
219 /* Restore previous setting. */
220 __mf_opts.wipe_heap = saved_wipe_heap;
221
222 __mf_set_state (active);
223 UNLOCKTH ();
224
225 return result;
226 }
227
228
229 #if PIC
230 /* A special bootstrap variant. */
231 void
232 __mf_0fn_free (void *buf)
233 {
234 return;
235 }
236 #endif
237
238 #undef free
239 WRAPPER(void, free, void *buf)
240 {
241 /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s. */
242 static void *free_queue [__MF_FREEQ_MAX];
243 static unsigned free_ptr = 0;
244 static int freeq_initialized = 0;
245 DECLARE(void, free, void *);
246
247 BEGIN_PROTECT (free, buf);
248
249 if (UNLIKELY(buf == NULL))
250 return;
251
252 #if PIC
253 /* Check whether the given buffer might have come from a
254 __mf_0fn_malloc/calloc call that for whatever reason was not
255 redirected back to __mf_0fn_free. If so, we just ignore the
256 call. */
257 if (UNLIKELY((uintptr_t) buf >= (uintptr_t) __mf_0fn_bufs &&
258 (uintptr_t) buf < ((uintptr_t) __mf_0fn_bufs + sizeof(__mf_0fn_bufs))))
259 {
260 VERBOSE_TRACE ("skipping free of boot (0fn) alloc buffer %p\n", buf);
261 return;
262 }
263 #endif
264
265 LOCKTH ();
266 if (UNLIKELY(!freeq_initialized))
267 {
268 memset (free_queue, 0,
269 __MF_FREEQ_MAX * sizeof (void *));
270 freeq_initialized = 1;
271 }
272 UNLOCKTH ();
273
274 __mf_unregister (buf, 0, __MF_TYPE_HEAP_I);
275 /* NB: underlying region may have been __MF_TYPE_HEAP. */
276
277 if (UNLIKELY(__mf_opts.free_queue_length > 0))
278 {
279 char *freeme = NULL;
280 LOCKTH ();
281 if (free_queue [free_ptr] != NULL)
282 {
283 freeme = free_queue [free_ptr];
284 freeme -= __mf_opts.crumple_zone;
285 }
286 free_queue [free_ptr] = buf;
287 free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1);
288 UNLOCKTH ();
289 if (freeme)
290 {
291 if (__mf_opts.trace_mf_calls)
292 {
293 VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
294 (void *) freeme,
295 __mf_opts.crumple_zone);
296 }
297 BEGIN_MALLOC_PROTECT ();
298 CALL_REAL (free, freeme);
299 END_MALLOC_PROTECT ();
300 }
301 }
302 else
303 {
304 /* back pointer up a bit to the beginning of crumple zone */
305 char *base = (char *)buf;
306 base -= __mf_opts.crumple_zone;
307 if (__mf_opts.trace_mf_calls)
308 {
309 VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
310 (void *) base,
311 (void *) buf,
312 __mf_opts.crumple_zone);
313 }
314 BEGIN_MALLOC_PROTECT ();
315 CALL_REAL (free, base);
316 END_MALLOC_PROTECT ();
317 }
318 }
319
320
321 /* We can only wrap mmap if the target supports it. Likewise for munmap.
322 We assume we have both if we have mmap. */
323 #ifdef HAVE_MMAP
324
325 #if PIC
326 /* A special bootstrap variant. */
327 void *
328 __mf_0fn_mmap (void *start, size_t l, int prot, int f, int fd, off_t off)
329 {
330 return (void *) -1;
331 }
332 #endif
333
334
335 #undef mmap
336 WRAPPER(void *, mmap,
337 void *start, size_t length, int prot,
338 int flags, int fd, off_t offset)
339 {
340 DECLARE(void *, mmap, void *, size_t, int,
341 int, int, off_t);
342 void *result;
343 BEGIN_PROTECT (mmap, start, length, prot, flags, fd, offset);
344
345 result = CALL_REAL (mmap, start, length, prot,
346 flags, fd, offset);
347
348 /*
349 VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
350 (uintptr_t) start, (uintptr_t) length,
351 (uintptr_t) result);
352 */
353
354 if (result != (void *)-1)
355 {
356 /* Register each page as a heap object. Why not register it all
357 as a single segment? That's so that a later munmap() call
358 can unmap individual pages. XXX: would __MF_TYPE_GUESS make
359 this more automatic? */
360 size_t ps = getpagesize ();
361 uintptr_t base = (uintptr_t) result;
362 uintptr_t offset;
363
364 for (offset=0; offset<length; offset+=ps)
365 {
366 /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
367 /* XXX: Unaccessed HEAP pages are reported as leaks. Is this
368 appropriate for unaccessed mmap pages? */
369 __mf_register ((void *) CLAMPADD (base, offset), ps,
370 __MF_TYPE_HEAP_I, "mmap page");
371 }
372 }
373
374 return result;
375 }
376
377
378 #if PIC
379 /* A special bootstrap variant. */
380 int
381 __mf_0fn_munmap (void *start, size_t length)
382 {
383 return -1;
384 }
385 #endif
386
387
388 #undef munmap
389 WRAPPER(int , munmap, void *start, size_t length)
390 {
391 DECLARE(int, munmap, void *, size_t);
392 int result;
393 BEGIN_PROTECT (munmap, start, length);
394
395 result = CALL_REAL (munmap, start, length);
396
397 /*
398 VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
399 (uintptr_t) start, (uintptr_t) length,
400 (uintptr_t) result);
401 */
402
403 if (result == 0)
404 {
405 /* Unregister each page as a heap object. */
406 size_t ps = getpagesize ();
407 uintptr_t base = (uintptr_t) start & (~ (ps - 1)); /* page align */
408 uintptr_t offset;
409
410 for (offset=0; offset<length; offset+=ps)
411 __mf_unregister ((void *) CLAMPADD (base, offset), ps, __MF_TYPE_HEAP_I);
412 }
413 return result;
414 }
415 #endif /* HAVE_MMAP */
416
417
418 /* This wrapper is a little different, as it's called indirectly from
419 __mf_fini also to clean up pending allocations. */
420 void *
421 __mf_wrap_alloca_indirect (size_t c)
422 {
423 DECLARE (void *, malloc, size_t);
424 DECLARE (void, free, void *);
425
426 /* This struct, a linked list, tracks alloca'd objects. The newest
427 object is at the head of the list. If we detect that we've
428 popped a few levels of stack, then the listed objects are freed
429 as needed. NB: The tracking struct is allocated with
430 real_malloc; the user data with wrap_malloc.
431 */
432 struct alloca_tracking { void *ptr; void *stack; struct alloca_tracking* next; };
433 static struct alloca_tracking *alloca_history = NULL;
434
435 void *stack = __builtin_frame_address (0);
436 void *result;
437 struct alloca_tracking *track;
438
439 TRACE ("%s\n", __PRETTY_FUNCTION__);
440 VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack);
441
442 /* XXX: thread locking! */
443
444 /* Free any previously alloca'd blocks that belong to deeper-nested functions,
445 which must therefore have exited by now. */
446
447 #define DEEPER_THAN < /* XXX: for x86; steal find_stack_direction() from libiberty/alloca.c */
448
449 while (alloca_history &&
450 ((uintptr_t) alloca_history->stack DEEPER_THAN (uintptr_t) stack))
451 {
452 struct alloca_tracking *next = alloca_history->next;
453 __mf_unregister (alloca_history->ptr, 0, __MF_TYPE_HEAP);
454 BEGIN_MALLOC_PROTECT ();
455 CALL_REAL (free, alloca_history->ptr);
456 CALL_REAL (free, alloca_history);
457 END_MALLOC_PROTECT ();
458 alloca_history = next;
459 }
460
461 /* Allocate new block. */
462 result = NULL;
463 if (LIKELY (c > 0)) /* alloca(0) causes no allocation. */
464 {
465 BEGIN_MALLOC_PROTECT ();
466 track = (struct alloca_tracking *) CALL_REAL (malloc,
467 sizeof (struct alloca_tracking));
468 END_MALLOC_PROTECT ();
469 if (LIKELY (track != NULL))
470 {
471 BEGIN_MALLOC_PROTECT ();
472 result = CALL_REAL (malloc, c);
473 END_MALLOC_PROTECT ();
474 if (UNLIKELY (result == NULL))
475 {
476 BEGIN_MALLOC_PROTECT ();
477 CALL_REAL (free, track);
478 END_MALLOC_PROTECT ();
479 /* Too bad. XXX: What about errno? */
480 }
481 else
482 {
483 __mf_register (result, c, __MF_TYPE_HEAP, "alloca region");
484 track->ptr = result;
485 track->stack = stack;
486 track->next = alloca_history;
487 alloca_history = track;
488 }
489 }
490 }
491
492 return result;
493 }
494
495
496 #undef alloca
497 WRAPPER(void *, alloca, size_t c)
498 {
499 return __mf_wrap_alloca_indirect (c);
500 }
501