76e909f7971ee415fa4236b92a06281e37541c21
[gcc.git] / boehm-gc / os_dep.c
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16
17 # include "gc_priv.h"
18
19 # if defined(LINUX) && !defined(POWERPC)
20 # include <linux/version.h>
21 # if (LINUX_VERSION_CODE <= 0x10400)
22 /* Ugly hack to get struct sigcontext_struct definition. Required */
23 /* for some early 1.3.X releases. Will hopefully go away soon. */
24 /* in some later Linux releases, asm/sigcontext.h may have to */
25 /* be included instead. */
26 # define __KERNEL__
27 # include <asm/signal.h>
28 # undef __KERNEL__
29 # else
30 /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31 /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32 /* prototypes, so we have to include the top-level sigcontext.h to */
33 /* make sure the former gets defined to be the latter if appropriate. */
34 # include <features.h>
35 # if 2 <= __GLIBC__
36 # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
37 /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38 /* has the right declaration for glibc 2.1. */
39 # include <sigcontext.h>
40 # endif /* 0 == __GLIBC_MINOR__ */
41 # else /* not 2 <= __GLIBC__ */
42 /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43 /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44 # include <asm/sigcontext.h>
45 # endif /* 2 <= __GLIBC__ */
46 # endif
47 # endif
48 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS)
49 # include <sys/types.h>
50 # if !defined(MSWIN32) && !defined(SUNOS4)
51 # include <unistd.h>
52 # endif
53 # endif
54
55 # include <stdio.h>
56 # include <signal.h>
57
58 /* Blatantly OS dependent routines, except for those that are related */
59 /* to dynamic loading. */
60
61 # if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
62 # define NEED_FIND_LIMIT
63 # endif
64
65 # if defined(IRIX_THREADS) || defined(HPUX_THREADS)
66 # define NEED_FIND_LIMIT
67 # endif
68
69 # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
70 # define NEED_FIND_LIMIT
71 # endif
72
73 # if (defined(SVR4) || defined(AUX) || defined(DGUX)) && !defined(PCR)
74 # define NEED_FIND_LIMIT
75 # endif
76
77 # if defined(LINUX) && \
78 (defined(POWERPC) || defined(SPARC) || defined(ALPHA) || defined(IA64) \
79 || defined(MIPS))
80 # define NEED_FIND_LIMIT
81 # endif
82
83 #ifdef NEED_FIND_LIMIT
84 # include <setjmp.h>
85 #endif
86
87 #ifdef FREEBSD
88 # include <machine/trap.h>
89 #endif
90
91 #ifdef AMIGA
92 # include <proto/exec.h>
93 # include <proto/dos.h>
94 # include <dos/dosextens.h>
95 # include <workbench/startup.h>
96 #endif
97
98 #ifdef MSWIN32
99 # define WIN32_LEAN_AND_MEAN
100 # define NOSERVICE
101 # include <windows.h>
102 #endif
103
104 #ifdef MACOS
105 # include <Processes.h>
106 #endif
107
108 #ifdef IRIX5
109 # include <sys/uio.h>
110 # include <malloc.h> /* for locking */
111 #endif
112 #ifdef USE_MMAP
113 # include <sys/types.h>
114 # include <sys/mman.h>
115 # include <sys/stat.h>
116 # include <fcntl.h>
117 #endif
118
119 #ifdef SUNOS5SIGS
120 # include <sys/siginfo.h>
121 # undef setjmp
122 # undef longjmp
123 # define setjmp(env) sigsetjmp(env, 1)
124 # define longjmp(env, val) siglongjmp(env, val)
125 # define jmp_buf sigjmp_buf
126 #endif
127
128 #ifdef DJGPP
129 /* Apparently necessary for djgpp 2.01. May casuse problems with */
130 /* other versions. */
131 typedef long unsigned int caddr_t;
132 #endif
133
134 #ifdef PCR
135 # include "il/PCR_IL.h"
136 # include "th/PCR_ThCtl.h"
137 # include "mm/PCR_MM.h"
138 #endif
139
140 #if !defined(NO_EXECUTE_PERMISSION)
141 # define OPT_PROT_EXEC PROT_EXEC
142 #else
143 # define OPT_PROT_EXEC 0
144 #endif
145
146 #if defined(SEARCH_FOR_DATA_START)
147 /* The following doesn't work if the GC is in a dynamic library. */
148 /* The I386 case can be handled without a search. The Alpha case */
149 /* used to be handled differently as well, but the rules changed */
150 /* for recent Linux versions. This seems to be the easiest way to */
151 /* cover all versions. */
152 ptr_t GC_data_start;
153
154 extern char * GC_copyright[]; /* Any data symbol would do. */
155
156 void GC_init_linux_data_start()
157 {
158 extern ptr_t GC_find_limit();
159
160 GC_data_start = GC_find_limit((ptr_t)GC_copyright, FALSE);
161 }
162 #endif
163
164 # ifdef ECOS
165
166 # ifndef ECOS_GC_MEMORY_SIZE
167 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
168 # endif /* ECOS_GC_MEMORY_SIZE */
169
170 // setjmp() function, as described in ANSI para 7.6.1.1
171 #define setjmp( __env__ ) hal_setjmp( __env__ )
172
173 // FIXME: This is a simple way of allocating memory which is
174 // compatible with ECOS early releases. Later releases use a more
175 // sophisticated means of allocating memory than this simple static
176 // allocator, but this method is at least bound to work.
177 static char memory[ECOS_GC_MEMORY_SIZE];
178 static char *brk = memory;
179
180 static void *tiny_sbrk(ptrdiff_t increment)
181 {
182 void *p = brk;
183
184 brk += increment;
185
186 if (brk > memory + sizeof memory)
187 {
188 brk -= increment;
189 return NULL;
190 }
191
192 return p;
193 }
194 #define sbrk tiny_sbrk
195 # endif /* ECOS */
196
197 # ifdef OS2
198
199 # include <stddef.h>
200
201 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
202
203 struct exe_hdr {
204 unsigned short magic_number;
205 unsigned short padding[29];
206 long new_exe_offset;
207 };
208
209 #define E_MAGIC(x) (x).magic_number
210 #define EMAGIC 0x5A4D
211 #define E_LFANEW(x) (x).new_exe_offset
212
213 struct e32_exe {
214 unsigned char magic_number[2];
215 unsigned char byte_order;
216 unsigned char word_order;
217 unsigned long exe_format_level;
218 unsigned short cpu;
219 unsigned short os;
220 unsigned long padding1[13];
221 unsigned long object_table_offset;
222 unsigned long object_count;
223 unsigned long padding2[31];
224 };
225
226 #define E32_MAGIC1(x) (x).magic_number[0]
227 #define E32MAGIC1 'L'
228 #define E32_MAGIC2(x) (x).magic_number[1]
229 #define E32MAGIC2 'X'
230 #define E32_BORDER(x) (x).byte_order
231 #define E32LEBO 0
232 #define E32_WORDER(x) (x).word_order
233 #define E32LEWO 0
234 #define E32_CPU(x) (x).cpu
235 #define E32CPU286 1
236 #define E32_OBJTAB(x) (x).object_table_offset
237 #define E32_OBJCNT(x) (x).object_count
238
239 struct o32_obj {
240 unsigned long size;
241 unsigned long base;
242 unsigned long flags;
243 unsigned long pagemap;
244 unsigned long mapsize;
245 unsigned long reserved;
246 };
247
248 #define O32_FLAGS(x) (x).flags
249 #define OBJREAD 0x0001L
250 #define OBJWRITE 0x0002L
251 #define OBJINVALID 0x0080L
252 #define O32_SIZE(x) (x).size
253 #define O32_BASE(x) (x).base
254
255 # else /* IBM's compiler */
256
257 /* A kludge to get around what appears to be a header file bug */
258 # ifndef WORD
259 # define WORD unsigned short
260 # endif
261 # ifndef DWORD
262 # define DWORD unsigned long
263 # endif
264
265 # define EXE386 1
266 # include <newexe.h>
267 # include <exe386.h>
268
269 # endif /* __IBMC__ */
270
271 # define INCL_DOSEXCEPTIONS
272 # define INCL_DOSPROCESS
273 # define INCL_DOSERRORS
274 # define INCL_DOSMODULEMGR
275 # define INCL_DOSMEMMGR
276 # include <os2.h>
277
278
279 /* Disable and enable signals during nontrivial allocations */
280
281 void GC_disable_signals(void)
282 {
283 ULONG nest;
284
285 DosEnterMustComplete(&nest);
286 if (nest != 1) ABORT("nested GC_disable_signals");
287 }
288
289 void GC_enable_signals(void)
290 {
291 ULONG nest;
292
293 DosExitMustComplete(&nest);
294 if (nest != 0) ABORT("GC_enable_signals");
295 }
296
297
298 # else
299
300 # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
301 && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \
302 && !defined(NO_SIGSET)
303
304 # if defined(sigmask) && !defined(UTS4)
305 /* Use the traditional BSD interface */
306 # define SIGSET_T int
307 # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
308 # define SIG_FILL(set) (set) = 0x7fffffff
309 /* Setting the leading bit appears to provoke a bug in some */
310 /* longjmp implementations. Most systems appear not to have */
311 /* a signal 32. */
312 # define SIGSETMASK(old, new) (old) = sigsetmask(new)
313 # else
314 /* Use POSIX/SYSV interface */
315 # define SIGSET_T sigset_t
316 # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
317 # define SIG_FILL(set) sigfillset(&set)
318 # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
319 # endif
320
321 static GC_bool mask_initialized = FALSE;
322
323 static SIGSET_T new_mask;
324
325 static SIGSET_T old_mask;
326
327 static SIGSET_T dummy;
328
329 #if defined(PRINTSTATS) && !defined(THREADS)
330 # define CHECK_SIGNALS
331 int GC_sig_disabled = 0;
332 #endif
333
334 void GC_disable_signals()
335 {
336 if (!mask_initialized) {
337 SIG_FILL(new_mask);
338
339 SIG_DEL(new_mask, SIGSEGV);
340 SIG_DEL(new_mask, SIGILL);
341 SIG_DEL(new_mask, SIGQUIT);
342 # ifdef SIGBUS
343 SIG_DEL(new_mask, SIGBUS);
344 # endif
345 # ifdef SIGIOT
346 SIG_DEL(new_mask, SIGIOT);
347 # endif
348 # ifdef SIGEMT
349 SIG_DEL(new_mask, SIGEMT);
350 # endif
351 # ifdef SIGTRAP
352 SIG_DEL(new_mask, SIGTRAP);
353 # endif
354 mask_initialized = TRUE;
355 }
356 # ifdef CHECK_SIGNALS
357 if (GC_sig_disabled != 0) ABORT("Nested disables");
358 GC_sig_disabled++;
359 # endif
360 SIGSETMASK(old_mask,new_mask);
361 }
362
363 void GC_enable_signals()
364 {
365 # ifdef CHECK_SIGNALS
366 if (GC_sig_disabled != 1) ABORT("Unmatched enable");
367 GC_sig_disabled--;
368 # endif
369 SIGSETMASK(dummy,old_mask);
370 }
371
372 # endif /* !PCR */
373
374 # endif /*!OS/2 */
375
376 /* Ivan Demakov: simplest way (to me) */
377 #if defined (DOS4GW) || defined (NO_SIGSET)
378 void GC_disable_signals() { }
379 void GC_enable_signals() { }
380 #endif
381
382 /* Find the page size */
383 word GC_page_size;
384
385 # ifdef MSWIN32
386 void GC_setpagesize()
387 {
388 SYSTEM_INFO sysinfo;
389
390 GetSystemInfo(&sysinfo);
391 GC_page_size = sysinfo.dwPageSize;
392 }
393
394 # else
395 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
396 || defined(USE_MUNMAP)
397 void GC_setpagesize()
398 {
399 GC_page_size = GETPAGESIZE();
400 }
401 # else
402 /* It's acceptable to fake it. */
403 void GC_setpagesize()
404 {
405 GC_page_size = HBLKSIZE;
406 }
407 # endif
408 # endif
409
410 /*
411 * Find the base of the stack.
412 * Used only in single-threaded environment.
413 * With threads, GC_mark_roots needs to know how to do this.
414 * Called with allocator lock held.
415 */
416 # ifdef MSWIN32
417 # define is_writable(prot) ((prot) == PAGE_READWRITE \
418 || (prot) == PAGE_WRITECOPY \
419 || (prot) == PAGE_EXECUTE_READWRITE \
420 || (prot) == PAGE_EXECUTE_WRITECOPY)
421 /* Return the number of bytes that are writable starting at p. */
422 /* The pointer p is assumed to be page aligned. */
423 /* If base is not 0, *base becomes the beginning of the */
424 /* allocation region containing p. */
425 word GC_get_writable_length(ptr_t p, ptr_t *base)
426 {
427 MEMORY_BASIC_INFORMATION buf;
428 word result;
429 word protect;
430
431 result = VirtualQuery(p, &buf, sizeof(buf));
432 if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
433 if (base != 0) *base = (ptr_t)(buf.AllocationBase);
434 protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
435 if (!is_writable(protect)) {
436 return(0);
437 }
438 if (buf.State != MEM_COMMIT) return(0);
439 return(buf.RegionSize);
440 }
441
442 ptr_t GC_get_stack_base()
443 {
444 int dummy;
445 ptr_t sp = (ptr_t)(&dummy);
446 ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
447 word size = GC_get_writable_length(trunc_sp, 0);
448
449 return(trunc_sp + size);
450 }
451
452
453 # else
454
455 # ifdef OS2
456
457 ptr_t GC_get_stack_base()
458 {
459 PTIB ptib;
460 PPIB ppib;
461
462 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
463 GC_err_printf0("DosGetInfoBlocks failed\n");
464 ABORT("DosGetInfoBlocks failed\n");
465 }
466 return((ptr_t)(ptib -> tib_pstacklimit));
467 }
468
469 # else
470
471 # ifdef AMIGA
472
473 ptr_t GC_get_stack_base()
474 {
475 struct Process *proc = (struct Process*)SysBase->ThisTask;
476
477 /* Reference: Amiga Guru Book Pages: 42,567,574 */
478 if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS
479 && proc->pr_CLI != NULL) {
480 /* first ULONG is StackSize */
481 /*longPtr = proc->pr_ReturnAddr;
482 size = longPtr[0];*/
483
484 return (char *)proc->pr_ReturnAddr + sizeof(ULONG);
485 } else {
486 return (char *)proc->pr_Task.tc_SPUpper;
487 }
488 }
489
490 #if 0 /* old version */
491 ptr_t GC_get_stack_base()
492 {
493 extern struct WBStartup *_WBenchMsg;
494 extern long __base;
495 extern long __stack;
496 struct Task *task;
497 struct Process *proc;
498 struct CommandLineInterface *cli;
499 long size;
500
501 if ((task = FindTask(0)) == 0) {
502 GC_err_puts("Cannot find own task structure\n");
503 ABORT("task missing");
504 }
505 proc = (struct Process *)task;
506 cli = BADDR(proc->pr_CLI);
507
508 if (_WBenchMsg != 0 || cli == 0) {
509 size = (char *)task->tc_SPUpper - (char *)task->tc_SPLower;
510 } else {
511 size = cli->cli_DefaultStack * 4;
512 }
513 return (ptr_t)(__base + GC_max(size, __stack));
514 }
515 #endif /* 0 */
516
517 # else /* !AMIGA, !OS2, ... */
518
519 # ifdef NEED_FIND_LIMIT
520 /* Some tools to implement HEURISTIC2 */
521 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
522 /* static */ jmp_buf GC_jmp_buf;
523
524 /*ARGSUSED*/
525 void GC_fault_handler(sig)
526 int sig;
527 {
528 longjmp(GC_jmp_buf, 1);
529 }
530
531 # ifdef __STDC__
532 typedef void (*handler)(int);
533 # else
534 typedef void (*handler)();
535 # endif
536
537 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
538 static struct sigaction old_segv_act;
539 # if defined(_sigargs) || defined(HPUX) /* !Irix6.x */
540 static struct sigaction old_bus_act;
541 # endif
542 # else
543 static handler old_segv_handler, old_bus_handler;
544 # endif
545
546 void GC_setup_temporary_fault_handler()
547 {
548 # ifndef ECOS
549 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
550 struct sigaction act;
551
552 act.sa_handler = GC_fault_handler;
553 act.sa_flags = SA_RESTART | SA_NODEFER;
554 /* The presence of SA_NODEFER represents yet another gross */
555 /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
556 /* interact correctly with -lthread. We hide the confusion */
557 /* by making sure that signal handling doesn't affect the */
558 /* signal mask. */
559
560 (void) sigemptyset(&act.sa_mask);
561 # ifdef IRIX_THREADS
562 /* Older versions have a bug related to retrieving and */
563 /* and setting a handler at the same time. */
564 (void) sigaction(SIGSEGV, 0, &old_segv_act);
565 (void) sigaction(SIGSEGV, &act, 0);
566 # else
567 (void) sigaction(SIGSEGV, &act, &old_segv_act);
568 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
569 || defined(HPUX)
570 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
571 /* Pthreads doesn't exist under Irix 5.x, so we */
572 /* don't have to worry in the threads case. */
573 (void) sigaction(SIGBUS, &act, &old_bus_act);
574 # endif
575 # endif /* IRIX_THREADS */
576 # else
577 old_segv_handler = signal(SIGSEGV, GC_fault_handler);
578 # ifdef SIGBUS
579 old_bus_handler = signal(SIGBUS, GC_fault_handler);
580 # endif
581 # endif
582 # endif /* ECOS */
583 }
584
585 void GC_reset_fault_handler()
586 {
587 # ifndef ECOS
588 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
589 (void) sigaction(SIGSEGV, &old_segv_act, 0);
590 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
591 || defined(HPUX)
592 (void) sigaction(SIGBUS, &old_bus_act, 0);
593 # endif
594 # else
595 (void) signal(SIGSEGV, old_segv_handler);
596 # ifdef SIGBUS
597 (void) signal(SIGBUS, old_bus_handler);
598 # endif
599 # endif
600 # endif /* ECOS */
601 }
602
603 /* Return the first nonaddressible location > p (up) or */
604 /* the smallest location q s.t. [q,p] is addressible (!up). */
605 ptr_t GC_find_limit(p, up)
606 ptr_t p;
607 GC_bool up;
608 {
609 # ifndef ECOS
610 static VOLATILE ptr_t result;
611 /* Needs to be static, since otherwise it may not be */
612 /* preserved across the longjmp. Can safely be */
613 /* static since it's only called once, with the */
614 /* allocation lock held. */
615
616
617 GC_setup_temporary_fault_handler();
618 if (setjmp(GC_jmp_buf) == 0) {
619 result = (ptr_t)(((word)(p))
620 & ~(MIN_PAGE_SIZE-1));
621 for (;;) {
622 if (up) {
623 result += MIN_PAGE_SIZE;
624 } else {
625 result -= MIN_PAGE_SIZE;
626 }
627 GC_noop1((word)(*result));
628 }
629 }
630 GC_reset_fault_handler();
631 if (!up) {
632 result += MIN_PAGE_SIZE;
633 }
634 return(result);
635 # else /* ECOS */
636 abort();
637 # endif /* ECOS */
638 }
639 # endif
640
641 # ifndef ECOS
642
643 #ifdef LINUX_STACKBOTTOM
644
645 # define STAT_SKIP 27 /* Number of fields preceding startstack */
646 /* field in /proc/self/stat */
647
648 ptr_t GC_linux_stack_base(void)
649 {
650 FILE *f;
651 char c;
652 word result = 0;
653 int i;
654
655 f = fopen("/proc/self/stat", "r");
656 if (NULL == f) ABORT("Couldn't open /proc/self/stat");
657 c = getc(f);
658 /* Skip the required number of fields. This number is hopefully */
659 /* constant across all Linux implementations. */
660 for (i = 0; i < STAT_SKIP; ++i) {
661 while (isspace(c)) c = getc(f);
662 while (!isspace(c)) c = getc(f);
663 }
664 while (isspace(c)) c = getc(f);
665 while (isdigit(c)) {
666 result *= 10;
667 result += c - '0';
668 c = getc(f);
669 }
670 if (result < 0x10000000) ABORT("Absurd stack bottom value");
671 return (ptr_t)result;
672 }
673
674 #endif /* LINUX_STACKBOTTOM */
675
676 ptr_t GC_get_stack_base()
677 {
678 word dummy;
679 ptr_t result;
680
681 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
682
683 # if defined(STACKBASE)
684 extern ptr_t STACKBASE;
685 return(STACKBASE);
686 # else
687 # ifdef STACKBOTTOM
688 return(STACKBOTTOM);
689 # else
690 # ifdef HEURISTIC1
691 # ifdef STACK_GROWS_DOWN
692 result = (ptr_t)((((word)(&dummy))
693 + STACKBOTTOM_ALIGNMENT_M1)
694 & ~STACKBOTTOM_ALIGNMENT_M1);
695 # else
696 result = (ptr_t)(((word)(&dummy))
697 & ~STACKBOTTOM_ALIGNMENT_M1);
698 # endif
699 # endif /* HEURISTIC1 */
700 # ifdef LINUX_STACKBOTTOM
701 result = GC_linux_stack_base();
702 # endif
703 # ifdef HEURISTIC2
704 # ifdef STACK_GROWS_DOWN
705 result = GC_find_limit((ptr_t)(&dummy), TRUE);
706 # ifdef HEURISTIC2_LIMIT
707 if (result > HEURISTIC2_LIMIT
708 && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
709 result = HEURISTIC2_LIMIT;
710 }
711 # endif
712 # else
713 result = GC_find_limit((ptr_t)(&dummy), FALSE);
714 # ifdef HEURISTIC2_LIMIT
715 if (result < HEURISTIC2_LIMIT
716 && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
717 result = HEURISTIC2_LIMIT;
718 }
719 # endif
720 # endif
721
722 # endif /* HEURISTIC2 */
723 # ifdef STACK_GROWS_DOWN
724 if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t));
725 # endif
726 return(result);
727 # endif /* STACKBOTTOM */
728 # endif /* STACKBASE */
729 }
730 # endif /* ECOS */
731
732 # endif /* ! AMIGA */
733 # endif /* ! OS2 */
734 # endif /* ! MSWIN32 */
735
736 /*
737 * Register static data segment(s) as roots.
738 * If more data segments are added later then they need to be registered
739 * add that point (as we do with SunOS dynamic loading),
740 * or GC_mark_roots needs to check for them (as we do with PCR).
741 * Called with allocator lock held.
742 */
743
744 # ifdef OS2
745
746 void GC_register_data_segments()
747 {
748 PTIB ptib;
749 PPIB ppib;
750 HMODULE module_handle;
751 # define PBUFSIZ 512
752 UCHAR path[PBUFSIZ];
753 FILE * myexefile;
754 struct exe_hdr hdrdos; /* MSDOS header. */
755 struct e32_exe hdr386; /* Real header for my executable */
756 struct o32_obj seg; /* Currrent segment */
757 int nsegs;
758
759
760 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
761 GC_err_printf0("DosGetInfoBlocks failed\n");
762 ABORT("DosGetInfoBlocks failed\n");
763 }
764 module_handle = ppib -> pib_hmte;
765 if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
766 GC_err_printf0("DosQueryModuleName failed\n");
767 ABORT("DosGetInfoBlocks failed\n");
768 }
769 myexefile = fopen(path, "rb");
770 if (myexefile == 0) {
771 GC_err_puts("Couldn't open executable ");
772 GC_err_puts(path); GC_err_puts("\n");
773 ABORT("Failed to open executable\n");
774 }
775 if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
776 GC_err_puts("Couldn't read MSDOS header from ");
777 GC_err_puts(path); GC_err_puts("\n");
778 ABORT("Couldn't read MSDOS header");
779 }
780 if (E_MAGIC(hdrdos) != EMAGIC) {
781 GC_err_puts("Executable has wrong DOS magic number: ");
782 GC_err_puts(path); GC_err_puts("\n");
783 ABORT("Bad DOS magic number");
784 }
785 if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
786 GC_err_puts("Seek to new header failed in ");
787 GC_err_puts(path); GC_err_puts("\n");
788 ABORT("Bad DOS magic number");
789 }
790 if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
791 GC_err_puts("Couldn't read MSDOS header from ");
792 GC_err_puts(path); GC_err_puts("\n");
793 ABORT("Couldn't read OS/2 header");
794 }
795 if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
796 GC_err_puts("Executable has wrong OS/2 magic number:");
797 GC_err_puts(path); GC_err_puts("\n");
798 ABORT("Bad OS/2 magic number");
799 }
800 if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
801 GC_err_puts("Executable %s has wrong byte order: ");
802 GC_err_puts(path); GC_err_puts("\n");
803 ABORT("Bad byte order");
804 }
805 if ( E32_CPU(hdr386) == E32CPU286) {
806 GC_err_puts("GC can't handle 80286 executables: ");
807 GC_err_puts(path); GC_err_puts("\n");
808 EXIT();
809 }
810 if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
811 SEEK_SET) != 0) {
812 GC_err_puts("Seek to object table failed: ");
813 GC_err_puts(path); GC_err_puts("\n");
814 ABORT("Seek to object table failed");
815 }
816 for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
817 int flags;
818 if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
819 GC_err_puts("Couldn't read obj table entry from ");
820 GC_err_puts(path); GC_err_puts("\n");
821 ABORT("Couldn't read obj table entry");
822 }
823 flags = O32_FLAGS(seg);
824 if (!(flags & OBJWRITE)) continue;
825 if (!(flags & OBJREAD)) continue;
826 if (flags & OBJINVALID) {
827 GC_err_printf0("Object with invalid pages?\n");
828 continue;
829 }
830 GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
831 }
832 }
833
834 # else
835
836 # ifdef MSWIN32
837 /* Unfortunately, we have to handle win32s very differently from NT, */
838 /* Since VirtualQuery has very different semantics. In particular, */
839 /* under win32s a VirtualQuery call on an unmapped page returns an */
840 /* invalid result. Under GC_register_data_segments is a noop and */
841 /* all real work is done by GC_register_dynamic_libraries. Under */
842 /* win32s, we cannot find the data segments associated with dll's. */
843 /* We rgister the main data segment here. */
844 GC_bool GC_win32s = FALSE; /* We're running under win32s. */
845
846 GC_bool GC_is_win32s()
847 {
848 DWORD v = GetVersion();
849
850 /* Check that this is not NT, and Windows major version <= 3 */
851 return ((v & 0x80000000) && (v & 0xff) <= 3);
852 }
853
854 void GC_init_win32()
855 {
856 GC_win32s = GC_is_win32s();
857 }
858
859 /* Return the smallest address a such that VirtualQuery */
860 /* returns correct results for all addresses between a and start. */
861 /* Assumes VirtualQuery returns correct information for start. */
862 ptr_t GC_least_described_address(ptr_t start)
863 {
864 MEMORY_BASIC_INFORMATION buf;
865 SYSTEM_INFO sysinfo;
866 DWORD result;
867 LPVOID limit;
868 ptr_t p;
869 LPVOID q;
870
871 GetSystemInfo(&sysinfo);
872 limit = sysinfo.lpMinimumApplicationAddress;
873 p = (ptr_t)((word)start & ~(GC_page_size - 1));
874 for (;;) {
875 q = (LPVOID)(p - GC_page_size);
876 if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
877 result = VirtualQuery(q, &buf, sizeof(buf));
878 if (result != sizeof(buf) || buf.AllocationBase == 0) break;
879 p = (ptr_t)(buf.AllocationBase);
880 }
881 return(p);
882 }
883
884 /* Is p the start of either the malloc heap, or of one of our */
885 /* heap sections? */
886 GC_bool GC_is_heap_base (ptr_t p)
887 {
888
889 register unsigned i;
890
891 # ifndef REDIRECT_MALLOC
892 static ptr_t malloc_heap_pointer = 0;
893
894 if (0 == malloc_heap_pointer) {
895 MEMORY_BASIC_INFORMATION buf;
896 register DWORD result = VirtualQuery(malloc(1), &buf, sizeof(buf));
897
898 if (result != sizeof(buf)) {
899 ABORT("Weird VirtualQuery result");
900 }
901 malloc_heap_pointer = (ptr_t)(buf.AllocationBase);
902 }
903 if (p == malloc_heap_pointer) return(TRUE);
904 # endif
905 for (i = 0; i < GC_n_heap_bases; i++) {
906 if (GC_heap_bases[i] == p) return(TRUE);
907 }
908 return(FALSE);
909 }
910
911 void GC_register_root_section(ptr_t static_root)
912 {
913 MEMORY_BASIC_INFORMATION buf;
914 SYSTEM_INFO sysinfo;
915 DWORD result;
916 DWORD protect;
917 LPVOID p;
918 char * base;
919 char * limit, * new_limit;
920
921 if (!GC_win32s) return;
922 p = base = limit = GC_least_described_address(static_root);
923 GetSystemInfo(&sysinfo);
924 while (p < sysinfo.lpMaximumApplicationAddress) {
925 result = VirtualQuery(p, &buf, sizeof(buf));
926 if (result != sizeof(buf) || buf.AllocationBase == 0
927 || GC_is_heap_base(buf.AllocationBase)) break;
928 new_limit = (char *)p + buf.RegionSize;
929 protect = buf.Protect;
930 if (buf.State == MEM_COMMIT
931 && is_writable(protect)) {
932 if ((char *)p == limit) {
933 limit = new_limit;
934 } else {
935 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
936 base = p;
937 limit = new_limit;
938 }
939 }
940 if (p > (LPVOID)new_limit /* overflow */) break;
941 p = (LPVOID)new_limit;
942 }
943 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
944 }
945
946 void GC_register_data_segments()
947 {
948 static char dummy;
949
950 GC_register_root_section((ptr_t)(&dummy));
951 }
952 # else
953 # ifdef AMIGA
954
955 void GC_register_data_segments()
956 {
957 struct Process *proc;
958 struct CommandLineInterface *cli;
959 BPTR myseglist;
960 ULONG *data;
961
962 int num;
963
964
965 # ifdef __GNUC__
966 ULONG dataSegSize;
967 GC_bool found_segment = FALSE;
968 extern char __data_size[];
969
970 dataSegSize=__data_size+8;
971 /* Can`t find the Location of __data_size, because
972 it`s possible that is it, inside the segment. */
973
974 # endif
975
976 proc= (struct Process*)SysBase->ThisTask;
977
978 /* Reference: Amiga Guru Book Pages: 538ff,565,573
979 and XOper.asm */
980 if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS) {
981 if (proc->pr_CLI == NULL) {
982 myseglist = proc->pr_SegList;
983 } else {
984 /* ProcLoaded 'Loaded as a command: '*/
985 cli = BADDR(proc->pr_CLI);
986 myseglist = cli->cli_Module;
987 }
988 } else {
989 ABORT("Not a Process.");
990 }
991
992 if (myseglist == NULL) {
993 ABORT("Arrrgh.. can't find segments, aborting");
994 }
995
996 /* xoper hunks Shell Process */
997
998 num=0;
999 for (data = (ULONG *)BADDR(myseglist); data != NULL;
1000 data = (ULONG *)BADDR(data[0])) {
1001 if (((ULONG) GC_register_data_segments < (ULONG) &data[1]) ||
1002 ((ULONG) GC_register_data_segments > (ULONG) &data[1] + data[-1])) {
1003 # ifdef __GNUC__
1004 if (dataSegSize == data[-1]) {
1005 found_segment = TRUE;
1006 }
1007 # endif
1008 GC_add_roots_inner((char *)&data[1],
1009 ((char *)&data[1]) + data[-1], FALSE);
1010 }
1011 ++num;
1012 } /* for */
1013 # ifdef __GNUC__
1014 if (!found_segment) {
1015 ABORT("Can`t find correct Segments.\nSolution: Use an newer version of ixemul.library");
1016 }
1017 # endif
1018 }
1019
1020 #if 0 /* old version */
1021 void GC_register_data_segments()
1022 {
1023 extern struct WBStartup *_WBenchMsg;
1024 struct Process *proc;
1025 struct CommandLineInterface *cli;
1026 BPTR myseglist;
1027 ULONG *data;
1028
1029 if ( _WBenchMsg != 0 ) {
1030 if ((myseglist = _WBenchMsg->sm_Segment) == 0) {
1031 GC_err_puts("No seglist from workbench\n");
1032 return;
1033 }
1034 } else {
1035 if ((proc = (struct Process *)FindTask(0)) == 0) {
1036 GC_err_puts("Cannot find process structure\n");
1037 return;
1038 }
1039 if ((cli = BADDR(proc->pr_CLI)) == 0) {
1040 GC_err_puts("No CLI\n");
1041 return;
1042 }
1043 if ((myseglist = cli->cli_Module) == 0) {
1044 GC_err_puts("No seglist from CLI\n");
1045 return;
1046 }
1047 }
1048
1049 for (data = (ULONG *)BADDR(myseglist); data != 0;
1050 data = (ULONG *)BADDR(data[0])) {
1051 # ifdef AMIGA_SKIP_SEG
1052 if (((ULONG) GC_register_data_segments < (ULONG) &data[1]) ||
1053 ((ULONG) GC_register_data_segments > (ULONG) &data[1] + data[-1])) {
1054 # else
1055 {
1056 # endif /* AMIGA_SKIP_SEG */
1057 GC_add_roots_inner((char *)&data[1],
1058 ((char *)&data[1]) + data[-1], FALSE);
1059 }
1060 }
1061 }
1062 #endif /* old version */
1063
1064
1065 # else
1066
1067 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1068 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1069 char * GC_SysVGetDataStart(max_page_size, etext_addr)
1070 int max_page_size;
1071 int * etext_addr;
1072 {
1073 word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1074 & ~(sizeof(word) - 1);
1075 /* etext rounded to word boundary */
1076 word next_page = ((text_end + (word)max_page_size - 1)
1077 & ~((word)max_page_size - 1));
1078 word page_offset = (text_end & ((word)max_page_size - 1));
1079 VOLATILE char * result = (char *)(next_page + page_offset);
1080 /* Note that this isnt equivalent to just adding */
1081 /* max_page_size to &etext if &etext is at a page boundary */
1082
1083 GC_setup_temporary_fault_handler();
1084 if (setjmp(GC_jmp_buf) == 0) {
1085 /* Try writing to the address. */
1086 *result = *result;
1087 GC_reset_fault_handler();
1088 } else {
1089 GC_reset_fault_handler();
1090 /* We got here via a longjmp. The address is not readable. */
1091 /* This is known to happen under Solaris 2.4 + gcc, which place */
1092 /* string constants in the text segment, but after etext. */
1093 /* Use plan B. Note that we now know there is a gap between */
1094 /* text and data segments, so plan A bought us something. */
1095 result = (char *)GC_find_limit((ptr_t)(DATAEND) - MIN_PAGE_SIZE, FALSE);
1096 }
1097 return((char *)result);
1098 }
1099 # endif
1100
1101
1102 void GC_register_data_segments()
1103 {
1104 # if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
1105 && !defined(MACOSX)
1106 # if defined(REDIRECT_MALLOC) && defined(SOLARIS_THREADS)
1107 /* As of Solaris 2.3, the Solaris threads implementation */
1108 /* allocates the data structure for the initial thread with */
1109 /* sbrk at process startup. It needs to be scanned, so that */
1110 /* we don't lose some malloc allocated data structures */
1111 /* hanging from it. We're on thin ice here ... */
1112 extern caddr_t sbrk();
1113
1114 GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
1115 # else
1116 GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
1117 # endif
1118 # endif
1119 # if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
1120 GC_add_roots_inner(DATASTART, (char *) get_end(), FALSE);
1121 # endif
1122 # if defined(MACOS)
1123 {
1124 # if defined(THINK_C)
1125 extern void* GC_MacGetDataStart(void);
1126 /* globals begin above stack and end at a5. */
1127 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1128 (ptr_t)LMGetCurrentA5(), FALSE);
1129 # else
1130 # if defined(__MWERKS__)
1131 # if !__POWERPC__
1132 extern void* GC_MacGetDataStart(void);
1133 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1134 # if __option(far_data)
1135 extern void* GC_MacGetDataEnd(void);
1136 # endif
1137 /* globals begin above stack and end at a5. */
1138 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1139 (ptr_t)LMGetCurrentA5(), FALSE);
1140 /* MATTHEW: Handle Far Globals */
1141 # if __option(far_data)
1142 /* Far globals follow he QD globals: */
1143 GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
1144 (ptr_t)GC_MacGetDataEnd(), FALSE);
1145 # endif
1146 # else
1147 extern char __data_start__[], __data_end__[];
1148 GC_add_roots_inner((ptr_t)&__data_start__,
1149 (ptr_t)&__data_end__, FALSE);
1150 # endif /* __POWERPC__ */
1151 # endif /* __MWERKS__ */
1152 # endif /* !THINK_C */
1153 }
1154 # endif /* MACOS */
1155
1156 /* Dynamic libraries are added at every collection, since they may */
1157 /* change. */
1158 }
1159
1160 # endif /* ! AMIGA */
1161 # endif /* ! MSWIN32 */
1162 # endif /* ! OS2 */
1163
1164 /*
1165 * Auxiliary routines for obtaining memory from OS.
1166 */
1167
1168 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1169 && !defined(MSWIN32) && !defined(MACOS) && !defined(DOS4GW)
1170
1171 # ifdef SUNOS4
1172 extern caddr_t sbrk();
1173 # endif
1174 # ifdef __STDC__
1175 # define SBRK_ARG_T ptrdiff_t
1176 # else
1177 # define SBRK_ARG_T int
1178 # endif
1179
1180 # ifdef RS6000
1181 /* The compiler seems to generate speculative reads one past the end of */
1182 /* an allocated object. Hence we need to make sure that the page */
1183 /* following the last heap page is also mapped. */
1184 ptr_t GC_unix_get_mem(bytes)
1185 word bytes;
1186 {
1187 caddr_t cur_brk = (caddr_t)sbrk(0);
1188 caddr_t result;
1189 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1190 static caddr_t my_brk_val = 0;
1191
1192 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1193 if (lsbs != 0) {
1194 if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
1195 }
1196 if (cur_brk == my_brk_val) {
1197 /* Use the extra block we allocated last time. */
1198 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1199 if (result == (caddr_t)(-1)) return(0);
1200 result -= GC_page_size;
1201 } else {
1202 result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
1203 if (result == (caddr_t)(-1)) return(0);
1204 }
1205 my_brk_val = result + bytes + GC_page_size; /* Always page aligned */
1206 return((ptr_t)result);
1207 }
1208
1209 #else /* Not RS6000 */
1210
1211 #if defined(USE_MMAP)
1212 /* Tested only under IRIX5 and Solaris 2 */
1213
1214 #ifdef USE_MMAP_FIXED
1215 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1216 /* Seems to yield better performance on Solaris 2, but can */
1217 /* be unreliable if something is already mapped at the address. */
1218 #else
1219 # define GC_MMAP_FLAGS MAP_PRIVATE
1220 #endif
1221
1222 ptr_t GC_unix_get_mem(bytes)
1223 word bytes;
1224 {
1225 static GC_bool initialized = FALSE;
1226 static int fd;
1227 void *result;
1228 static ptr_t last_addr = HEAP_START;
1229
1230 if (!initialized) {
1231 fd = open("/dev/zero", O_RDONLY);
1232 initialized = TRUE;
1233 }
1234 if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
1235 result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1236 GC_MMAP_FLAGS, fd, 0/* offset */);
1237 if (result == MAP_FAILED) return(0);
1238 last_addr = (ptr_t)result + bytes + GC_page_size - 1;
1239 last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
1240 return((ptr_t)result);
1241 }
1242
1243 #else /* Not RS6000, not USE_MMAP */
1244 ptr_t GC_unix_get_mem(bytes)
1245 word bytes;
1246 {
1247 ptr_t result;
1248 # ifdef IRIX5
1249 /* Bare sbrk isn't thread safe. Play by malloc rules. */
1250 /* The equivalent may be needed on other systems as well. */
1251 __LOCK_MALLOC();
1252 # endif
1253 {
1254 ptr_t cur_brk = (ptr_t)sbrk(0);
1255 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1256
1257 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1258 if (lsbs != 0) {
1259 if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
1260 }
1261 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1262 if (result == (ptr_t)(-1)) result = 0;
1263 }
1264 # ifdef IRIX5
1265 __UNLOCK_MALLOC();
1266 # endif
1267 return(result);
1268 }
1269
1270 #endif /* Not USE_MMAP */
1271 #endif /* Not RS6000 */
1272
1273 # endif /* UN*X */
1274
1275 # ifdef OS2
1276
1277 void * os2_alloc(size_t bytes)
1278 {
1279 void * result;
1280
1281 if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
1282 PAG_WRITE | PAG_COMMIT)
1283 != NO_ERROR) {
1284 return(0);
1285 }
1286 if (result == 0) return(os2_alloc(bytes));
1287 return(result);
1288 }
1289
1290 # endif /* OS2 */
1291
1292
1293 # ifdef MSWIN32
1294 word GC_n_heap_bases = 0;
1295
1296 ptr_t GC_win32_get_mem(bytes)
1297 word bytes;
1298 {
1299 ptr_t result;
1300
1301 if (GC_win32s) {
1302 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1303 /* There are also unconfirmed rumors of other */
1304 /* problems, so we dodge the issue. */
1305 result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
1306 result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
1307 } else {
1308 result = (ptr_t) VirtualAlloc(NULL, bytes,
1309 MEM_COMMIT | MEM_RESERVE,
1310 PAGE_EXECUTE_READWRITE);
1311 }
1312 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1313 /* If I read the documentation correctly, this can */
1314 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1315 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1316 GC_heap_bases[GC_n_heap_bases++] = result;
1317 return(result);
1318 }
1319
1320 void GC_win32_free_heap ()
1321 {
1322 if (GC_win32s) {
1323 while (GC_n_heap_bases > 0) {
1324 GlobalFree (GC_heap_bases[--GC_n_heap_bases]);
1325 GC_heap_bases[GC_n_heap_bases] = 0;
1326 }
1327 }
1328 }
1329
1330
1331 # endif
1332
1333 #ifdef USE_MUNMAP
1334
1335 /* For now, this only works on some Unix-like systems. If you */
1336 /* have something else, don't define USE_MUNMAP. */
1337 /* We assume ANSI C to support this feature. */
1338 #include <unistd.h>
1339 #include <sys/mman.h>
1340 #include <sys/stat.h>
1341 #include <sys/types.h>
1342 #include <fcntl.h>
1343
1344 /* Compute a page aligned starting address for the unmap */
1345 /* operation on a block of size bytes starting at start. */
1346 /* Return 0 if the block is too small to make this feasible. */
1347 ptr_t GC_unmap_start(ptr_t start, word bytes)
1348 {
1349 ptr_t result = start;
1350 /* Round start to next page boundary. */
1351 result += GC_page_size - 1;
1352 result = (ptr_t)((word)result & ~(GC_page_size - 1));
1353 if (result + GC_page_size > start + bytes) return 0;
1354 return result;
1355 }
1356
1357 /* Compute end address for an unmap operation on the indicated */
1358 /* block. */
1359 ptr_t GC_unmap_end(ptr_t start, word bytes)
1360 {
1361 ptr_t end_addr = start + bytes;
1362 end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
1363 return end_addr;
1364 }
1365
1366 /* We assume that GC_remap is called on exactly the same range */
1367 /* as a previous call to GC_unmap. It is safe to consistently */
1368 /* round the endpoints in both places. */
1369 void GC_unmap(ptr_t start, word bytes)
1370 {
1371 ptr_t start_addr = GC_unmap_start(start, bytes);
1372 ptr_t end_addr = GC_unmap_end(start, bytes);
1373 word len = end_addr - start_addr;
1374 if (0 == start_addr) return;
1375 if (munmap(start_addr, len) != 0) ABORT("munmap failed");
1376 GC_unmapped_bytes += len;
1377 }
1378
1379
1380 void GC_remap(ptr_t start, word bytes)
1381 {
1382 static int zero_descr = -1;
1383 ptr_t start_addr = GC_unmap_start(start, bytes);
1384 ptr_t end_addr = GC_unmap_end(start, bytes);
1385 word len = end_addr - start_addr;
1386 ptr_t result;
1387
1388 if (-1 == zero_descr) zero_descr = open("/dev/zero", O_RDWR);
1389 if (0 == start_addr) return;
1390 result = mmap(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1391 MAP_FIXED | MAP_PRIVATE, zero_descr, 0);
1392 if (result != start_addr) {
1393 ABORT("mmap remapping failed");
1394 }
1395 GC_unmapped_bytes -= len;
1396 }
1397
1398 /* Two adjacent blocks have already been unmapped and are about to */
1399 /* be merged. Unmap the whole block. This typically requires */
1400 /* that we unmap a small section in the middle that was not previously */
1401 /* unmapped due to alignment constraints. */
1402 void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
1403 {
1404 ptr_t start1_addr = GC_unmap_start(start1, bytes1);
1405 ptr_t end1_addr = GC_unmap_end(start1, bytes1);
1406 ptr_t start2_addr = GC_unmap_start(start2, bytes2);
1407 ptr_t end2_addr = GC_unmap_end(start2, bytes2);
1408 ptr_t start_addr = end1_addr;
1409 ptr_t end_addr = start2_addr;
1410 word len;
1411 GC_ASSERT(start1 + bytes1 == start2);
1412 if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
1413 if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
1414 if (0 == start_addr) return;
1415 len = end_addr - start_addr;
1416 if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
1417 GC_unmapped_bytes += len;
1418 }
1419
1420 #endif /* USE_MUNMAP */
1421
1422 /* Routine for pushing any additional roots. In THREADS */
1423 /* environment, this is also responsible for marking from */
1424 /* thread stacks. In the SRC_M3 case, it also handles */
1425 /* global variables. */
1426 #ifndef THREADS
1427 void (*GC_push_other_roots)() = 0;
1428 #else /* THREADS */
1429
1430 # ifdef PCR
1431 PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
1432 {
1433 struct PCR_ThCtl_TInfoRep info;
1434 PCR_ERes result;
1435
1436 info.ti_stkLow = info.ti_stkHi = 0;
1437 result = PCR_ThCtl_GetInfo(t, &info);
1438 GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
1439 return(result);
1440 }
1441
1442 /* Push the contents of an old object. We treat this as stack */
1443 /* data only becasue that makes it robust against mark stack */
1444 /* overflow. */
1445 PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
1446 {
1447 GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
1448 return(PCR_ERes_okay);
1449 }
1450
1451
1452 void GC_default_push_other_roots()
1453 {
1454 /* Traverse data allocated by previous memory managers. */
1455 {
1456 extern struct PCR_MM_ProcsRep * GC_old_allocator;
1457
1458 if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
1459 GC_push_old_obj, 0)
1460 != PCR_ERes_okay) {
1461 ABORT("Old object enumeration failed");
1462 }
1463 }
1464 /* Traverse all thread stacks. */
1465 if (PCR_ERes_IsErr(
1466 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
1467 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1468 ABORT("Thread stack marking failed\n");
1469 }
1470 }
1471
1472 # endif /* PCR */
1473
1474 # ifdef SRC_M3
1475
1476 # ifdef ALL_INTERIOR_POINTERS
1477 --> misconfigured
1478 # endif
1479
1480
1481 extern void ThreadF__ProcessStacks();
1482
1483 void GC_push_thread_stack(start, stop)
1484 word start, stop;
1485 {
1486 GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
1487 }
1488
1489 /* Push routine with M3 specific calling convention. */
1490 GC_m3_push_root(dummy1, p, dummy2, dummy3)
1491 word *p;
1492 ptr_t dummy1, dummy2;
1493 int dummy3;
1494 {
1495 word q = *p;
1496
1497 if ((ptr_t)(q) >= GC_least_plausible_heap_addr
1498 && (ptr_t)(q) < GC_greatest_plausible_heap_addr) {
1499 GC_push_one_checked(q,FALSE);
1500 }
1501 }
1502
1503 /* M3 set equivalent to RTHeap.TracedRefTypes */
1504 typedef struct { int elts[1]; } RefTypeSet;
1505 RefTypeSet GC_TracedRefTypes = {{0x1}};
1506
1507 /* From finalize.c */
1508 extern void GC_push_finalizer_structures();
1509
1510 /* From stubborn.c: */
1511 # ifdef STUBBORN_ALLOC
1512 extern GC_PTR * GC_changing_list_start;
1513 # endif
1514
1515
1516 void GC_default_push_other_roots()
1517 {
1518 /* Use the M3 provided routine for finding static roots. */
1519 /* This is a bit dubious, since it presumes no C roots. */
1520 /* We handle the collector roots explicitly. */
1521 {
1522 # ifdef STUBBORN_ALLOC
1523 GC_push_one(GC_changing_list_start);
1524 # endif
1525 GC_push_finalizer_structures();
1526 RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
1527 }
1528 if (GC_words_allocd > 0) {
1529 ThreadF__ProcessStacks(GC_push_thread_stack);
1530 }
1531 /* Otherwise this isn't absolutely necessary, and we have */
1532 /* startup ordering problems. */
1533 }
1534
1535 # endif /* SRC_M3 */
1536
1537 # if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
1538 || defined(IRIX_THREADS) || defined(LINUX_THREADS) \
1539 || defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
1540
1541 extern void GC_push_all_stacks();
1542
1543 void GC_default_push_other_roots()
1544 {
1545 GC_push_all_stacks();
1546 }
1547
1548 # endif /* SOLARIS_THREADS || ... */
1549
1550 void (*GC_push_other_roots)() = GC_default_push_other_roots;
1551
1552 #endif
1553
1554 /*
1555 * Routines for accessing dirty bits on virtual pages.
1556 * We plan to eventaually implement four strategies for doing so:
1557 * DEFAULT_VDB: A simple dummy implementation that treats every page
1558 * as possibly dirty. This makes incremental collection
1559 * useless, but the implementation is still correct.
1560 * PCR_VDB: Use PPCRs virtual dirty bit facility.
1561 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
1562 * works under some SVR4 variants. Even then, it may be
1563 * too slow to be entirely satisfactory. Requires reading
1564 * dirty bits for entire address space. Implementations tend
1565 * to assume that the client is a (slow) debugger.
1566 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
1567 * dirtied pages. The implementation (and implementability)
1568 * is highly system dependent. This usually fails when system
1569 * calls write to a protected page. We prevent the read system
1570 * call from doing so. It is the clients responsibility to
1571 * make sure that other system calls are similarly protected
1572 * or write only to the stack.
1573 */
1574
1575 GC_bool GC_dirty_maintained = FALSE;
1576
1577 # ifdef DEFAULT_VDB
1578
1579 /* All of the following assume the allocation lock is held, and */
1580 /* signals are disabled. */
1581
1582 /* The client asserts that unallocated pages in the heap are never */
1583 /* written. */
1584
1585 /* Initialize virtual dirty bit implementation. */
1586 void GC_dirty_init()
1587 {
1588 GC_dirty_maintained = TRUE;
1589 }
1590
1591 /* Retrieve system dirty bits for heap to a local buffer. */
1592 /* Restore the systems notion of which pages are dirty. */
1593 void GC_read_dirty()
1594 {}
1595
1596 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
1597 /* If the actual page size is different, this returns TRUE if any */
1598 /* of the pages overlapping h are dirty. This routine may err on the */
1599 /* side of labelling pages as dirty (and this implementation does). */
1600 /*ARGSUSED*/
1601 GC_bool GC_page_was_dirty(h)
1602 struct hblk *h;
1603 {
1604 return(TRUE);
1605 }
1606
1607 /*
1608 * The following two routines are typically less crucial. They matter
1609 * most with large dynamic libraries, or if we can't accurately identify
1610 * stacks, e.g. under Solaris 2.X. Otherwise the following default
1611 * versions are adequate.
1612 */
1613
1614 /* Could any valid GC heap pointer ever have been written to this page? */
1615 /*ARGSUSED*/
1616 GC_bool GC_page_was_ever_dirty(h)
1617 struct hblk *h;
1618 {
1619 return(TRUE);
1620 }
1621
1622 /* Reset the n pages starting at h to "was never dirty" status. */
1623 void GC_is_fresh(h, n)
1624 struct hblk *h;
1625 word n;
1626 {
1627 }
1628
1629 /* A call hints that h is about to be written. */
1630 /* May speed up some dirty bit implementations. */
1631 /*ARGSUSED*/
1632 void GC_write_hint(h)
1633 struct hblk *h;
1634 {
1635 }
1636
1637 # endif /* DEFAULT_VDB */
1638
1639
1640 # ifdef MPROTECT_VDB
1641
1642 /*
1643 * See DEFAULT_VDB for interface descriptions.
1644 */
1645
1646 /*
1647 * This implementation maintains dirty bits itself by catching write
1648 * faults and keeping track of them. We assume nobody else catches
1649 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls
1650 * except as a result of a read system call. This means clients must
1651 * either ensure that system calls do not touch the heap, or must
1652 * provide their own wrappers analogous to the one for read.
1653 * We assume the page size is a multiple of HBLKSIZE.
1654 * This implementation is currently SunOS 4.X and IRIX 5.X specific, though we
1655 * tried to use portable code where easily possible. It is known
1656 * not to work under a number of other systems.
1657 */
1658
1659 # ifndef MSWIN32
1660
1661 # include <sys/mman.h>
1662 # include <signal.h>
1663 # include <sys/syscall.h>
1664
1665 # define PROTECT(addr, len) \
1666 if (mprotect((caddr_t)(addr), (size_t)(len), \
1667 PROT_READ | OPT_PROT_EXEC) < 0) { \
1668 ABORT("mprotect failed"); \
1669 }
1670 # define UNPROTECT(addr, len) \
1671 if (mprotect((caddr_t)(addr), (size_t)(len), \
1672 PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
1673 ABORT("un-mprotect failed"); \
1674 }
1675
1676 # else
1677
1678 # include <signal.h>
1679
1680 static DWORD protect_junk;
1681 # define PROTECT(addr, len) \
1682 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
1683 &protect_junk)) { \
1684 DWORD last_error = GetLastError(); \
1685 GC_printf1("Last error code: %lx\n", last_error); \
1686 ABORT("VirtualProtect failed"); \
1687 }
1688 # define UNPROTECT(addr, len) \
1689 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
1690 &protect_junk)) { \
1691 ABORT("un-VirtualProtect failed"); \
1692 }
1693
1694 # endif
1695
1696 #if defined(SUNOS4) || defined(FREEBSD)
1697 typedef void (* SIG_PF)();
1698 #endif
1699 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX)
1700 # ifdef __STDC__
1701 typedef void (* SIG_PF)(int);
1702 # else
1703 typedef void (* SIG_PF)();
1704 # endif
1705 #endif
1706 #if defined(MSWIN32)
1707 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
1708 # undef SIG_DFL
1709 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
1710 #endif
1711
1712 #if defined(IRIX5) || defined(OSF1)
1713 typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
1714 #endif
1715 #if defined(SUNOS5SIGS)
1716 # ifdef HPUX
1717 # define SIGINFO __siginfo
1718 # else
1719 # define SIGINFO siginfo
1720 # endif
1721 # ifdef __STDC__
1722 typedef void (* REAL_SIG_PF)(int, struct SIGINFO *, void *);
1723 # else
1724 typedef void (* REAL_SIG_PF)();
1725 # endif
1726 #endif
1727 #if defined(LINUX)
1728 # include <linux/version.h>
1729 # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(IA64)
1730 typedef struct sigcontext s_c;
1731 # else
1732 typedef struct sigcontext_struct s_c;
1733 # endif
1734 # if defined(ALPHA) || defined(M68K)
1735 typedef void (* REAL_SIG_PF)(int, int, s_c *);
1736 # else
1737 # if defined(IA64)
1738 typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
1739 # else
1740 typedef void (* REAL_SIG_PF)(int, s_c);
1741 # endif
1742 # endif
1743 # ifdef ALPHA
1744 /* Retrieve fault address from sigcontext structure by decoding */
1745 /* instruction. */
1746 char * get_fault_addr(s_c *sc) {
1747 unsigned instr;
1748 word faultaddr;
1749
1750 instr = *((unsigned *)(sc->sc_pc));
1751 faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
1752 faultaddr += (word) (((int)instr << 16) >> 16);
1753 return (char *)faultaddr;
1754 }
1755 # endif /* !ALPHA */
1756 # endif
1757
1758 SIG_PF GC_old_bus_handler;
1759 SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
1760
1761 /*ARGSUSED*/
1762 # if defined (SUNOS4) || defined(FREEBSD)
1763 void GC_write_fault_handler(sig, code, scp, addr)
1764 int sig, code;
1765 struct sigcontext *scp;
1766 char * addr;
1767 # ifdef SUNOS4
1768 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
1769 # define CODE_OK (FC_CODE(code) == FC_PROT \
1770 || (FC_CODE(code) == FC_OBJERR \
1771 && FC_ERRNO(code) == FC_PROT))
1772 # endif
1773 # ifdef FREEBSD
1774 # define SIG_OK (sig == SIGBUS)
1775 # define CODE_OK (code == BUS_PAGE_FAULT)
1776 # endif
1777 # endif
1778 # if defined(IRIX5) || defined(OSF1)
1779 # include <errno.h>
1780 void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
1781 # define SIG_OK (sig == SIGSEGV)
1782 # ifdef OSF1
1783 # define CODE_OK (code == 2 /* experimentally determined */)
1784 # endif
1785 # ifdef IRIX5
1786 # define CODE_OK (code == EACCES)
1787 # endif
1788 # endif
1789 # if defined(LINUX)
1790 # if defined(ALPHA) || defined(M68K)
1791 void GC_write_fault_handler(int sig, int code, s_c * sc)
1792 # else
1793 # if defined(IA64)
1794 void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
1795 # else
1796 void GC_write_fault_handler(int sig, s_c sc)
1797 # endif
1798 # endif
1799 # define SIG_OK (sig == SIGSEGV)
1800 # define CODE_OK TRUE
1801 /* Empirically c.trapno == 14, on IA32, but is that useful? */
1802 /* Should probably consider alignment issues on other */
1803 /* architectures. */
1804 # endif
1805 # if defined(SUNOS5SIGS)
1806 # ifdef __STDC__
1807 void GC_write_fault_handler(int sig, struct SIGINFO *scp, void * context)
1808 # else
1809 void GC_write_fault_handler(sig, scp, context)
1810 int sig;
1811 struct SIGINFO *scp;
1812 void * context;
1813 # endif
1814 # ifdef HPUX
1815 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
1816 # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
1817 || (scp -> si_code == BUS_ADRERR) \
1818 || (scp -> si_code == BUS_UNKNOWN) \
1819 || (scp -> si_code == SEGV_UNKNOWN) \
1820 || (scp -> si_code == BUS_OBJERR)
1821 # else
1822 # define SIG_OK (sig == SIGSEGV)
1823 # define CODE_OK (scp -> si_code == SEGV_ACCERR)
1824 # endif
1825 # endif
1826 # if defined(MSWIN32)
1827 LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
1828 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
1829 EXCEPTION_ACCESS_VIOLATION)
1830 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
1831 /* Write fault */
1832 # endif
1833 {
1834 register unsigned i;
1835 # ifdef IRIX5
1836 char * addr = (char *) (size_t) (scp -> sc_badvaddr);
1837 # endif
1838 # if defined(OSF1) && defined(ALPHA)
1839 char * addr = (char *) (scp -> sc_traparg_a0);
1840 # endif
1841 # ifdef SUNOS5SIGS
1842 char * addr = (char *) (scp -> si_addr);
1843 # endif
1844 # ifdef LINUX
1845 # ifdef I386
1846 char * addr = (char *) (sc.cr2);
1847 # else
1848 # if defined(M68K)
1849 char * addr = NULL;
1850
1851 struct sigcontext *scp = (struct sigcontext *)(&sc);
1852
1853 int format = (scp->sc_formatvec >> 12) & 0xf;
1854 unsigned long *framedata = (unsigned long *)(scp + 1);
1855 unsigned long ea;
1856
1857 if (format == 0xa || format == 0xb) {
1858 /* 68020/030 */
1859 ea = framedata[2];
1860 } else if (format == 7) {
1861 /* 68040 */
1862 ea = framedata[3];
1863 } else if (format == 4) {
1864 /* 68060 */
1865 ea = framedata[0];
1866 if (framedata[1] & 0x08000000) {
1867 /* correct addr on misaligned access */
1868 ea = (ea+4095)&(~4095);
1869 }
1870 }
1871 addr = (char *)ea;
1872 # else
1873 # ifdef ALPHA
1874 char * addr = get_fault_addr(sc);
1875 # else
1876 # ifdef IA64
1877 char * addr = si -> si_addr;
1878 /* I believe this is claimed to work on all platforms for */
1879 /* Linux 2.3.47 and later. Hopefully we don't have to */
1880 /* worry about earlier kernels on IA64. */
1881 # else
1882 # if defined(POWERPC)
1883 char * addr = (char *) (sc.regs->dar);
1884 # else
1885 --> architecture not supported
1886 # endif
1887 # endif
1888 # endif
1889 # endif
1890 # endif
1891 # endif
1892 # if defined(MSWIN32)
1893 char * addr = (char *) (exc_info -> ExceptionRecord
1894 -> ExceptionInformation[1]);
1895 # define sig SIGSEGV
1896 # endif
1897
1898 if (SIG_OK && CODE_OK) {
1899 register struct hblk * h =
1900 (struct hblk *)((word)addr & ~(GC_page_size-1));
1901 GC_bool in_allocd_block;
1902
1903 # ifdef SUNOS5SIGS
1904 /* Address is only within the correct physical page. */
1905 in_allocd_block = FALSE;
1906 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
1907 if (HDR(h+i) != 0) {
1908 in_allocd_block = TRUE;
1909 }
1910 }
1911 # else
1912 in_allocd_block = (HDR(addr) != 0);
1913 # endif
1914 if (!in_allocd_block) {
1915 /* Heap blocks now begin and end on page boundaries */
1916 SIG_PF old_handler;
1917
1918 if (sig == SIGSEGV) {
1919 old_handler = GC_old_segv_handler;
1920 } else {
1921 old_handler = GC_old_bus_handler;
1922 }
1923 if (old_handler == SIG_DFL) {
1924 # ifndef MSWIN32
1925 GC_err_printf1("Segfault at 0x%lx\n", addr);
1926 ABORT("Unexpected bus error or segmentation fault");
1927 # else
1928 return(EXCEPTION_CONTINUE_SEARCH);
1929 # endif
1930 } else {
1931 # if defined (SUNOS4) || defined(FREEBSD)
1932 (*old_handler) (sig, code, scp, addr);
1933 return;
1934 # endif
1935 # if defined (SUNOS5SIGS)
1936 (*(REAL_SIG_PF)old_handler) (sig, scp, context);
1937 return;
1938 # endif
1939 # if defined (LINUX)
1940 # if defined(ALPHA) || defined(M68K)
1941 (*(REAL_SIG_PF)old_handler) (sig, code, sc);
1942 # else
1943 # if defined(IA64)
1944 (*(REAL_SIG_PF)old_handler) (sig, si, scp);
1945 # else
1946 (*(REAL_SIG_PF)old_handler) (sig, sc);
1947 # endif
1948 # endif
1949 return;
1950 # endif
1951 # if defined (IRIX5) || defined(OSF1)
1952 (*(REAL_SIG_PF)old_handler) (sig, code, scp);
1953 return;
1954 # endif
1955 # ifdef MSWIN32
1956 return((*old_handler)(exc_info));
1957 # endif
1958 }
1959 }
1960 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
1961 register int index = PHT_HASH(h+i);
1962
1963 set_pht_entry_from_index(GC_dirty_pages, index);
1964 }
1965 UNPROTECT(h, GC_page_size);
1966 # if defined(OSF1) || defined(LINUX)
1967 /* These reset the signal handler each time by default. */
1968 signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
1969 # endif
1970 /* The write may not take place before dirty bits are read. */
1971 /* But then we'll fault again ... */
1972 # ifdef MSWIN32
1973 return(EXCEPTION_CONTINUE_EXECUTION);
1974 # else
1975 return;
1976 # endif
1977 }
1978 #ifdef MSWIN32
1979 return EXCEPTION_CONTINUE_SEARCH;
1980 #else
1981 GC_err_printf1("Segfault at 0x%lx\n", addr);
1982 ABORT("Unexpected bus error or segmentation fault");
1983 #endif
1984 }
1985
1986 /*
1987 * We hold the allocation lock. We expect block h to be written
1988 * shortly.
1989 */
1990 void GC_write_hint(h)
1991 struct hblk *h;
1992 {
1993 register struct hblk * h_trunc;
1994 register unsigned i;
1995 register GC_bool found_clean;
1996
1997 if (!GC_dirty_maintained) return;
1998 h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
1999 found_clean = FALSE;
2000 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2001 register int index = PHT_HASH(h_trunc+i);
2002
2003 if (!get_pht_entry_from_index(GC_dirty_pages, index)) {
2004 found_clean = TRUE;
2005 set_pht_entry_from_index(GC_dirty_pages, index);
2006 }
2007 }
2008 if (found_clean) {
2009 UNPROTECT(h_trunc, GC_page_size);
2010 }
2011 }
2012
2013 void GC_dirty_init()
2014 {
2015 #if defined(SUNOS5SIGS) || defined(IRIX5) /* || defined(OSF1) */
2016 struct sigaction act, oldact;
2017 # ifdef IRIX5
2018 act.sa_flags = SA_RESTART;
2019 act.sa_handler = GC_write_fault_handler;
2020 # else
2021 act.sa_flags = SA_RESTART | SA_SIGINFO;
2022 act.sa_sigaction = GC_write_fault_handler;
2023 # endif
2024 (void)sigemptyset(&act.sa_mask);
2025 #endif
2026 # ifdef PRINTSTATS
2027 GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2028 # endif
2029 GC_dirty_maintained = TRUE;
2030 if (GC_page_size % HBLKSIZE != 0) {
2031 GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2032 ABORT("Page size not multiple of HBLKSIZE");
2033 }
2034 # if defined(SUNOS4) || defined(FREEBSD)
2035 GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
2036 if (GC_old_bus_handler == SIG_IGN) {
2037 GC_err_printf0("Previously ignored bus error!?");
2038 GC_old_bus_handler = SIG_DFL;
2039 }
2040 if (GC_old_bus_handler != SIG_DFL) {
2041 # ifdef PRINTSTATS
2042 GC_err_printf0("Replaced other SIGBUS handler\n");
2043 # endif
2044 }
2045 # endif
2046 # if defined(OSF1) || defined(SUNOS4) || defined(LINUX)
2047 GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
2048 if (GC_old_segv_handler == SIG_IGN) {
2049 GC_err_printf0("Previously ignored segmentation violation!?");
2050 GC_old_segv_handler = SIG_DFL;
2051 }
2052 if (GC_old_segv_handler != SIG_DFL) {
2053 # ifdef PRINTSTATS
2054 GC_err_printf0("Replaced other SIGSEGV handler\n");
2055 # endif
2056 }
2057 # endif
2058 # if defined(SUNOS5SIGS) || defined(IRIX5)
2059 # if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS)
2060 sigaction(SIGSEGV, 0, &oldact);
2061 sigaction(SIGSEGV, &act, 0);
2062 # else
2063 sigaction(SIGSEGV, &act, &oldact);
2064 # endif
2065 # if defined(_sigargs)
2066 /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2067 /* sa_sigaction. */
2068 GC_old_segv_handler = oldact.sa_handler;
2069 # else /* Irix 6.x or SUNOS5SIGS */
2070 if (oldact.sa_flags & SA_SIGINFO) {
2071 GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
2072 } else {
2073 GC_old_segv_handler = oldact.sa_handler;
2074 }
2075 # endif
2076 if (GC_old_segv_handler == SIG_IGN) {
2077 GC_err_printf0("Previously ignored segmentation violation!?");
2078 GC_old_segv_handler = SIG_DFL;
2079 }
2080 if (GC_old_segv_handler != SIG_DFL) {
2081 # ifdef PRINTSTATS
2082 GC_err_printf0("Replaced other SIGSEGV handler\n");
2083 # endif
2084 }
2085 # ifdef HPUX
2086 sigaction(SIGBUS, &act, &oldact);
2087 GC_old_bus_handler = oldact.sa_handler;
2088 if (GC_old_segv_handler != SIG_DFL) {
2089 # ifdef PRINTSTATS
2090 GC_err_printf0("Replaced other SIGBUS handler\n");
2091 # endif
2092 }
2093 # endif
2094 # endif
2095 # if defined(MSWIN32)
2096 GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
2097 if (GC_old_segv_handler != NULL) {
2098 # ifdef PRINTSTATS
2099 GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2100 # endif
2101 } else {
2102 GC_old_segv_handler = SIG_DFL;
2103 }
2104 # endif
2105 }
2106
2107
2108
2109 void GC_protect_heap()
2110 {
2111 ptr_t start;
2112 word len;
2113 unsigned i;
2114
2115 for (i = 0; i < GC_n_heap_sects; i++) {
2116 start = GC_heap_sects[i].hs_start;
2117 len = GC_heap_sects[i].hs_bytes;
2118 PROTECT(start, len);
2119 }
2120 }
2121
2122 /* We assume that either the world is stopped or its OK to lose dirty */
2123 /* bits while this is happenning (as in GC_enable_incremental). */
2124 void GC_read_dirty()
2125 {
2126 BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
2127 (sizeof GC_dirty_pages));
2128 BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
2129 GC_protect_heap();
2130 }
2131
2132 GC_bool GC_page_was_dirty(h)
2133 struct hblk * h;
2134 {
2135 register word index = PHT_HASH(h);
2136
2137 return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
2138 }
2139
2140 /*
2141 * Acquiring the allocation lock here is dangerous, since this
2142 * can be called from within GC_call_with_alloc_lock, and the cord
2143 * package does so. On systems that allow nested lock acquisition, this
2144 * happens to work.
2145 * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2146 */
2147
2148 void GC_begin_syscall()
2149 {
2150 if (!I_HOLD_LOCK()) LOCK();
2151 }
2152
2153 void GC_end_syscall()
2154 {
2155 if (!I_HOLD_LOCK()) UNLOCK();
2156 }
2157
2158 void GC_unprotect_range(addr, len)
2159 ptr_t addr;
2160 word len;
2161 {
2162 struct hblk * start_block;
2163 struct hblk * end_block;
2164 register struct hblk *h;
2165 ptr_t obj_start;
2166
2167 if (!GC_incremental) return;
2168 obj_start = GC_base(addr);
2169 if (obj_start == 0) return;
2170 if (GC_base(addr + len - 1) != obj_start) {
2171 ABORT("GC_unprotect_range(range bigger than object)");
2172 }
2173 start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
2174 end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
2175 end_block += GC_page_size/HBLKSIZE - 1;
2176 for (h = start_block; h <= end_block; h++) {
2177 register word index = PHT_HASH(h);
2178
2179 set_pht_entry_from_index(GC_dirty_pages, index);
2180 }
2181 UNPROTECT(start_block,
2182 ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
2183 }
2184
2185 #if !defined(MSWIN32) && !defined(LINUX_THREADS)
2186 /* Replacement for UNIX system call. */
2187 /* Other calls that write to the heap */
2188 /* should be handled similarly. */
2189 # if defined(__STDC__) && !defined(SUNOS4)
2190 # include <unistd.h>
2191 # include <sys/uio.h>
2192 ssize_t read(int fd, void *buf, size_t nbyte)
2193 # else
2194 # ifndef LINT
2195 int read(fd, buf, nbyte)
2196 # else
2197 int GC_read(fd, buf, nbyte)
2198 # endif
2199 int fd;
2200 char *buf;
2201 int nbyte;
2202 # endif
2203 {
2204 int result;
2205
2206 GC_begin_syscall();
2207 GC_unprotect_range(buf, (word)nbyte);
2208 # if defined(IRIX5) || defined(LINUX_THREADS)
2209 /* Indirect system call may not always be easily available. */
2210 /* We could call _read, but that would interfere with the */
2211 /* libpthread interception of read. */
2212 /* On Linux, we have to be careful with the linuxthreads */
2213 /* read interception. */
2214 {
2215 struct iovec iov;
2216
2217 iov.iov_base = buf;
2218 iov.iov_len = nbyte;
2219 result = readv(fd, &iov, 1);
2220 }
2221 # else
2222 result = syscall(SYS_read, fd, buf, nbyte);
2223 # endif
2224 GC_end_syscall();
2225 return(result);
2226 }
2227 #endif /* !MSWIN32 && !LINUX */
2228
2229 #ifdef USE_LD_WRAP
2230 /* We use the GNU ld call wrapping facility. */
2231 /* This requires that the linker be invoked with "--wrap read". */
2232 /* This can be done by passing -Wl,"--wrap read" to gcc. */
2233 /* I'm not sure that this actually wraps whatever version of read */
2234 /* is called by stdio. That code also mentions __read. */
2235 # include <unistd.h>
2236 ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
2237 {
2238 int result;
2239
2240 GC_begin_syscall();
2241 GC_unprotect_range(buf, (word)nbyte);
2242 result = __real_read(fd, buf, nbyte);
2243 GC_end_syscall();
2244 return(result);
2245 }
2246
2247 /* We should probably also do this for __read, or whatever stdio */
2248 /* actually calls. */
2249 #endif
2250
2251 /*ARGSUSED*/
2252 GC_bool GC_page_was_ever_dirty(h)
2253 struct hblk *h;
2254 {
2255 return(TRUE);
2256 }
2257
2258 /* Reset the n pages starting at h to "was never dirty" status. */
2259 /*ARGSUSED*/
2260 void GC_is_fresh(h, n)
2261 struct hblk *h;
2262 word n;
2263 {
2264 }
2265
2266 # endif /* MPROTECT_VDB */
2267
2268 # ifdef PROC_VDB
2269
2270 /*
2271 * See DEFAULT_VDB for interface descriptions.
2272 */
2273
2274 /*
2275 * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
2276 * from which we can read page modified bits. This facility is far from
2277 * optimal (e.g. we would like to get the info for only some of the
2278 * address space), but it avoids intercepting system calls.
2279 */
2280
2281 #include <errno.h>
2282 #include <sys/types.h>
2283 #include <sys/signal.h>
2284 #include <sys/fault.h>
2285 #include <sys/syscall.h>
2286 #include <sys/procfs.h>
2287 #include <sys/stat.h>
2288 #include <fcntl.h>
2289
2290 #define INITIAL_BUF_SZ 4096
2291 word GC_proc_buf_size = INITIAL_BUF_SZ;
2292 char *GC_proc_buf;
2293
2294 #ifdef SOLARIS_THREADS
2295 /* We don't have exact sp values for threads. So we count on */
2296 /* occasionally declaring stack pages to be fresh. Thus we */
2297 /* need a real implementation of GC_is_fresh. We can't clear */
2298 /* entries in GC_written_pages, since that would declare all */
2299 /* pages with the given hash address to be fresh. */
2300 # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
2301 struct hblk ** GC_fresh_pages; /* A direct mapped cache. */
2302 /* Collisions are dropped. */
2303
2304 # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
2305 # define ADD_FRESH_PAGE(h) \
2306 GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
2307 # define PAGE_IS_FRESH(h) \
2308 (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
2309 #endif
2310
2311 /* Add all pages in pht2 to pht1 */
2312 void GC_or_pages(pht1, pht2)
2313 page_hash_table pht1, pht2;
2314 {
2315 register int i;
2316
2317 for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
2318 }
2319
2320 int GC_proc_fd;
2321
2322 void GC_dirty_init()
2323 {
2324 int fd;
2325 char buf[30];
2326
2327 GC_dirty_maintained = TRUE;
2328 if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
2329 register int i;
2330
2331 for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
2332 # ifdef PRINTSTATS
2333 GC_printf1("Allocated words:%lu:all pages may have been written\n",
2334 (unsigned long)
2335 (GC_words_allocd + GC_words_allocd_before_gc));
2336 # endif
2337 }
2338 sprintf(buf, "/proc/%d", getpid());
2339 fd = open(buf, O_RDONLY);
2340 if (fd < 0) {
2341 ABORT("/proc open failed");
2342 }
2343 GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
2344 close(fd);
2345 if (GC_proc_fd < 0) {
2346 ABORT("/proc ioctl failed");
2347 }
2348 GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
2349 # ifdef SOLARIS_THREADS
2350 GC_fresh_pages = (struct hblk **)
2351 GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
2352 if (GC_fresh_pages == 0) {
2353 GC_err_printf0("No space for fresh pages\n");
2354 EXIT();
2355 }
2356 BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
2357 # endif
2358 }
2359
2360 /* Ignore write hints. They don't help us here. */
2361 /*ARGSUSED*/
2362 void GC_write_hint(h)
2363 struct hblk *h;
2364 {
2365 }
2366
2367 #ifdef SOLARIS_THREADS
2368 # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
2369 #else
2370 # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
2371 #endif
2372
2373 void GC_read_dirty()
2374 {
2375 unsigned long ps, np;
2376 int nmaps;
2377 ptr_t vaddr;
2378 struct prasmap * map;
2379 char * bufp;
2380 ptr_t current_addr, limit;
2381 int i;
2382 int dummy;
2383
2384 BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
2385
2386 bufp = GC_proc_buf;
2387 if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
2388 # ifdef PRINTSTATS
2389 GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
2390 GC_proc_buf_size);
2391 # endif
2392 {
2393 /* Retry with larger buffer. */
2394 word new_size = 2 * GC_proc_buf_size;
2395 char * new_buf = GC_scratch_alloc(new_size);
2396
2397 if (new_buf != 0) {
2398 GC_proc_buf = bufp = new_buf;
2399 GC_proc_buf_size = new_size;
2400 }
2401 if (syscall(SYS_read, GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
2402 WARN("Insufficient space for /proc read\n", 0);
2403 /* Punt: */
2404 memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
2405 memset(GC_written_pages, 0xff, sizeof(page_hash_table));
2406 # ifdef SOLARIS_THREADS
2407 BZERO(GC_fresh_pages,
2408 MAX_FRESH_PAGES * sizeof (struct hblk *));
2409 # endif
2410 return;
2411 }
2412 }
2413 }
2414 /* Copy dirty bits into GC_grungy_pages */
2415 nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
2416 /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
2417 nmaps, PG_REFERENCED, PG_MODIFIED); */
2418 bufp = bufp + sizeof(struct prpageheader);
2419 for (i = 0; i < nmaps; i++) {
2420 map = (struct prasmap *)bufp;
2421 vaddr = (ptr_t)(map -> pr_vaddr);
2422 ps = map -> pr_pagesize;
2423 np = map -> pr_npage;
2424 /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
2425 limit = vaddr + ps * np;
2426 bufp += sizeof (struct prasmap);
2427 for (current_addr = vaddr;
2428 current_addr < limit; current_addr += ps){
2429 if ((*bufp++) & PG_MODIFIED) {
2430 register struct hblk * h = (struct hblk *) current_addr;
2431
2432 while ((ptr_t)h < current_addr + ps) {
2433 register word index = PHT_HASH(h);
2434
2435 set_pht_entry_from_index(GC_grungy_pages, index);
2436 # ifdef SOLARIS_THREADS
2437 {
2438 register int slot = FRESH_PAGE_SLOT(h);
2439
2440 if (GC_fresh_pages[slot] == h) {
2441 GC_fresh_pages[slot] = 0;
2442 }
2443 }
2444 # endif
2445 h++;
2446 }
2447 }
2448 }
2449 bufp += sizeof(long) - 1;
2450 bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
2451 }
2452 /* Update GC_written_pages. */
2453 GC_or_pages(GC_written_pages, GC_grungy_pages);
2454 # ifdef SOLARIS_THREADS
2455 /* Make sure that old stacks are considered completely clean */
2456 /* unless written again. */
2457 GC_old_stacks_are_fresh();
2458 # endif
2459 }
2460
2461 #undef READ
2462
2463 GC_bool GC_page_was_dirty(h)
2464 struct hblk *h;
2465 {
2466 register word index = PHT_HASH(h);
2467 register GC_bool result;
2468
2469 result = get_pht_entry_from_index(GC_grungy_pages, index);
2470 # ifdef SOLARIS_THREADS
2471 if (result && PAGE_IS_FRESH(h)) result = FALSE;
2472 /* This happens only if page was declared fresh since */
2473 /* the read_dirty call, e.g. because it's in an unused */
2474 /* thread stack. It's OK to treat it as clean, in */
2475 /* that case. And it's consistent with */
2476 /* GC_page_was_ever_dirty. */
2477 # endif
2478 return(result);
2479 }
2480
2481 GC_bool GC_page_was_ever_dirty(h)
2482 struct hblk *h;
2483 {
2484 register word index = PHT_HASH(h);
2485 register GC_bool result;
2486
2487 result = get_pht_entry_from_index(GC_written_pages, index);
2488 # ifdef SOLARIS_THREADS
2489 if (result && PAGE_IS_FRESH(h)) result = FALSE;
2490 # endif
2491 return(result);
2492 }
2493
2494 /* Caller holds allocation lock. */
2495 void GC_is_fresh(h, n)
2496 struct hblk *h;
2497 word n;
2498 {
2499
2500 register word index;
2501
2502 # ifdef SOLARIS_THREADS
2503 register word i;
2504
2505 if (GC_fresh_pages != 0) {
2506 for (i = 0; i < n; i++) {
2507 ADD_FRESH_PAGE(h + i);
2508 }
2509 }
2510 # endif
2511 }
2512
2513 # endif /* PROC_VDB */
2514
2515
2516 # ifdef PCR_VDB
2517
2518 # include "vd/PCR_VD.h"
2519
2520 # define NPAGES (32*1024) /* 128 MB */
2521
2522 PCR_VD_DB GC_grungy_bits[NPAGES];
2523
2524 ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
2525 /* HBLKSIZE aligned. */
2526
2527 void GC_dirty_init()
2528 {
2529 GC_dirty_maintained = TRUE;
2530 /* For the time being, we assume the heap generally grows up */
2531 GC_vd_base = GC_heap_sects[0].hs_start;
2532 if (GC_vd_base == 0) {
2533 ABORT("Bad initial heap segment");
2534 }
2535 if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
2536 != PCR_ERes_okay) {
2537 ABORT("dirty bit initialization failed");
2538 }
2539 }
2540
2541 void GC_read_dirty()
2542 {
2543 /* lazily enable dirty bits on newly added heap sects */
2544 {
2545 static int onhs = 0;
2546 int nhs = GC_n_heap_sects;
2547 for( ; onhs < nhs; onhs++ ) {
2548 PCR_VD_WriteProtectEnable(
2549 GC_heap_sects[onhs].hs_start,
2550 GC_heap_sects[onhs].hs_bytes );
2551 }
2552 }
2553
2554
2555 if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
2556 != PCR_ERes_okay) {
2557 ABORT("dirty bit read failed");
2558 }
2559 }
2560
2561 GC_bool GC_page_was_dirty(h)
2562 struct hblk *h;
2563 {
2564 if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
2565 return(TRUE);
2566 }
2567 return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
2568 }
2569
2570 /*ARGSUSED*/
2571 void GC_write_hint(h)
2572 struct hblk *h;
2573 {
2574 PCR_VD_WriteProtectDisable(h, HBLKSIZE);
2575 PCR_VD_WriteProtectEnable(h, HBLKSIZE);
2576 }
2577
2578 # endif /* PCR_VDB */
2579
2580 /*
2581 * Call stack save code for debugging.
2582 * Should probably be in mach_dep.c, but that requires reorganization.
2583 */
2584 #if defined(SPARC)
2585 # if defined(LINUX)
2586 struct frame {
2587 long fr_local[8];
2588 long fr_arg[6];
2589 struct frame *fr_savfp;
2590 long fr_savpc;
2591 # ifndef __arch64__
2592 char *fr_stret;
2593 # endif
2594 long fr_argd[6];
2595 long fr_argx[0];
2596 };
2597 # else
2598 # if defined(SUNOS4)
2599 # include <machine/frame.h>
2600 # else
2601 # if defined (DRSNX)
2602 # include <sys/sparc/frame.h>
2603 # else
2604 # if defined(OPENBSD)
2605 # include <frame.h>
2606 # else
2607 # include <sys/frame.h>
2608 # endif
2609 # endif
2610 # endif
2611 # endif
2612 # if NARGS > 6
2613 --> We only know how to to get the first 6 arguments
2614 # endif
2615
2616 #ifdef SAVE_CALL_CHAIN
2617 /* Fill in the pc and argument information for up to NFRAMES of my */
2618 /* callers. Ignore my frame and my callers frame. */
2619
2620 #ifdef OPENBSD
2621 # define FR_SAVFP fr_fp
2622 # define FR_SAVPC fr_pc
2623 #else
2624 # define FR_SAVFP fr_savfp
2625 # define FR_SAVPC fr_savpc
2626 #endif
2627
2628 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
2629 #define BIAS 2047
2630 #else
2631 #define BIAS 0
2632 #endif
2633
2634 void GC_save_callers (info)
2635 struct callinfo info[NFRAMES];
2636 {
2637 struct frame *frame;
2638 struct frame *fp;
2639 int nframes = 0;
2640 word GC_save_regs_in_stack();
2641
2642 frame = (struct frame *) GC_save_regs_in_stack ();
2643
2644 for (fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
2645 fp != 0 && nframes < NFRAMES;
2646 fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
2647 register int i;
2648
2649 info[nframes].ci_pc = fp->FR_SAVPC;
2650 for (i = 0; i < NARGS; i++) {
2651 info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
2652 }
2653 }
2654 if (nframes < NFRAMES) info[nframes].ci_pc = 0;
2655 }
2656
2657 #endif /* SAVE_CALL_CHAIN */
2658 #endif /* SPARC */
2659
2660
2661