Imported version version 5.0alpha6.
[gcc.git] / boehm-gc / os_dep.c
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16
17 # include "gc_priv.h"
18
19 # if defined(LINUX) && !defined(POWERPC)
20 # include <linux/version.h>
21 # if (LINUX_VERSION_CODE <= 0x10400)
22 /* Ugly hack to get struct sigcontext_struct definition. Required */
23 /* for some early 1.3.X releases. Will hopefully go away soon. */
24 /* in some later Linux releases, asm/sigcontext.h may have to */
25 /* be included instead. */
26 # define __KERNEL__
27 # include <asm/signal.h>
28 # undef __KERNEL__
29 # else
30 /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31 /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32 /* prototypes, so we have to include the top-level sigcontext.h to */
33 /* make sure the former gets defined to be the latter if appropriate. */
34 # include <features.h>
35 # if 2 <= __GLIBC__
36 # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
37 /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38 /* has the right declaration for glibc 2.1. */
39 # include <sigcontext.h>
40 # endif /* 0 == __GLIBC_MINOR__ */
41 # else /* not 2 <= __GLIBC__ */
42 /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43 /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44 # include <asm/sigcontext.h>
45 # endif /* 2 <= __GLIBC__ */
46 # endif
47 # endif
48 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS)
49 # include <sys/types.h>
50 # if !defined(MSWIN32) && !defined(SUNOS4)
51 # include <unistd.h>
52 # endif
53 # endif
54
55 # include <stdio.h>
56 # include <signal.h>
57
58 /* Blatantly OS dependent routines, except for those that are related */
59 /* to dynamic loading. */
60
61 # if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
62 # define NEED_FIND_LIMIT
63 # endif
64
65 # if defined(IRIX_THREADS) || defined(HPUX_THREADS)
66 # define NEED_FIND_LIMIT
67 # endif
68
69 # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
70 # define NEED_FIND_LIMIT
71 # endif
72
73 # if (defined(SVR4) || defined(AUX) || defined(DGUX)) && !defined(PCR)
74 # define NEED_FIND_LIMIT
75 # endif
76
77 # if defined(LINUX) && \
78 (defined(POWERPC) || defined(SPARC) || defined(ALPHA) || defined(IA64) \
79 || defined(MIPS))
80 # define NEED_FIND_LIMIT
81 # endif
82
83 #ifdef NEED_FIND_LIMIT
84 # include <setjmp.h>
85 #endif
86
87 #ifdef FREEBSD
88 # include <machine/trap.h>
89 #endif
90
91 #ifdef AMIGA
92 # include <proto/exec.h>
93 # include <proto/dos.h>
94 # include <dos/dosextens.h>
95 # include <workbench/startup.h>
96 #endif
97
98 #ifdef MSWIN32
99 # define WIN32_LEAN_AND_MEAN
100 # define NOSERVICE
101 # include <windows.h>
102 #endif
103
104 #ifdef MACOS
105 # include <Processes.h>
106 #endif
107
108 #ifdef IRIX5
109 # include <sys/uio.h>
110 # include <malloc.h> /* for locking */
111 #endif
112 #ifdef USE_MMAP
113 # include <sys/types.h>
114 # include <sys/mman.h>
115 # include <sys/stat.h>
116 # include <fcntl.h>
117 #endif
118
119 #ifdef SUNOS5SIGS
120 # include <sys/siginfo.h>
121 # undef setjmp
122 # undef longjmp
123 # define setjmp(env) sigsetjmp(env, 1)
124 # define longjmp(env, val) siglongjmp(env, val)
125 # define jmp_buf sigjmp_buf
126 #endif
127
128 #ifdef DJGPP
129 /* Apparently necessary for djgpp 2.01. May casuse problems with */
130 /* other versions. */
131 typedef long unsigned int caddr_t;
132 #endif
133
134 #ifdef PCR
135 # include "il/PCR_IL.h"
136 # include "th/PCR_ThCtl.h"
137 # include "mm/PCR_MM.h"
138 #endif
139
140 #if !defined(NO_EXECUTE_PERMISSION)
141 # define OPT_PROT_EXEC PROT_EXEC
142 #else
143 # define OPT_PROT_EXEC 0
144 #endif
145
146 #if defined(SEARCH_FOR_DATA_START)
147 /* The following doesn't work if the GC is in a dynamic library. */
148 /* The I386 case can be handled without a search. The Alpha case */
149 /* used to be handled differently as well, but the rules changed */
150 /* for recent Linux versions. This seems to be the easiest way to */
151 /* cover all versions. */
152 ptr_t GC_data_start;
153
154 extern char * GC_copyright[]; /* Any data symbol would do. */
155
156 void GC_init_linux_data_start()
157 {
158 extern ptr_t GC_find_limit();
159
160 GC_data_start = GC_find_limit((ptr_t)GC_copyright, FALSE);
161 }
162 #endif
163
164 # ifdef ECOS
165
166 # ifndef ECOS_GC_MEMORY_SIZE
167 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
168 # endif /* ECOS_GC_MEMORY_SIZE */
169
170 // setjmp() function, as described in ANSI para 7.6.1.1
171 #define setjmp( __env__ ) hal_setjmp( __env__ )
172
173 // FIXME: This is a simple way of allocating memory which is
174 // compatible with ECOS early releases. Later releases use a more
175 // sophisticated means of allocating memory than this simple static
176 // allocator, but this method is at least bound to work.
177 static char memory[ECOS_GC_MEMORY_SIZE];
178 static char *brk = memory;
179
180 static void *tiny_sbrk(ptrdiff_t increment)
181 {
182 void *p = brk;
183
184 brk += increment;
185
186 if (brk > memory + sizeof memory)
187 {
188 brk -= increment;
189 return NULL;
190 }
191
192 return p;
193 }
194 #define sbrk tiny_sbrk
195 # endif /* ECOS */
196
197 # ifdef OS2
198
199 # include <stddef.h>
200
201 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
202
203 struct exe_hdr {
204 unsigned short magic_number;
205 unsigned short padding[29];
206 long new_exe_offset;
207 };
208
209 #define E_MAGIC(x) (x).magic_number
210 #define EMAGIC 0x5A4D
211 #define E_LFANEW(x) (x).new_exe_offset
212
213 struct e32_exe {
214 unsigned char magic_number[2];
215 unsigned char byte_order;
216 unsigned char word_order;
217 unsigned long exe_format_level;
218 unsigned short cpu;
219 unsigned short os;
220 unsigned long padding1[13];
221 unsigned long object_table_offset;
222 unsigned long object_count;
223 unsigned long padding2[31];
224 };
225
226 #define E32_MAGIC1(x) (x).magic_number[0]
227 #define E32MAGIC1 'L'
228 #define E32_MAGIC2(x) (x).magic_number[1]
229 #define E32MAGIC2 'X'
230 #define E32_BORDER(x) (x).byte_order
231 #define E32LEBO 0
232 #define E32_WORDER(x) (x).word_order
233 #define E32LEWO 0
234 #define E32_CPU(x) (x).cpu
235 #define E32CPU286 1
236 #define E32_OBJTAB(x) (x).object_table_offset
237 #define E32_OBJCNT(x) (x).object_count
238
239 struct o32_obj {
240 unsigned long size;
241 unsigned long base;
242 unsigned long flags;
243 unsigned long pagemap;
244 unsigned long mapsize;
245 unsigned long reserved;
246 };
247
248 #define O32_FLAGS(x) (x).flags
249 #define OBJREAD 0x0001L
250 #define OBJWRITE 0x0002L
251 #define OBJINVALID 0x0080L
252 #define O32_SIZE(x) (x).size
253 #define O32_BASE(x) (x).base
254
255 # else /* IBM's compiler */
256
257 /* A kludge to get around what appears to be a header file bug */
258 # ifndef WORD
259 # define WORD unsigned short
260 # endif
261 # ifndef DWORD
262 # define DWORD unsigned long
263 # endif
264
265 # define EXE386 1
266 # include <newexe.h>
267 # include <exe386.h>
268
269 # endif /* __IBMC__ */
270
271 # define INCL_DOSEXCEPTIONS
272 # define INCL_DOSPROCESS
273 # define INCL_DOSERRORS
274 # define INCL_DOSMODULEMGR
275 # define INCL_DOSMEMMGR
276 # include <os2.h>
277
278
279 /* Disable and enable signals during nontrivial allocations */
280
281 void GC_disable_signals(void)
282 {
283 ULONG nest;
284
285 DosEnterMustComplete(&nest);
286 if (nest != 1) ABORT("nested GC_disable_signals");
287 }
288
289 void GC_enable_signals(void)
290 {
291 ULONG nest;
292
293 DosExitMustComplete(&nest);
294 if (nest != 0) ABORT("GC_enable_signals");
295 }
296
297
298 # else
299
300 # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
301 && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \
302 && !defined(NO_SIGSET)
303
304 # if defined(sigmask) && !defined(UTS4)
305 /* Use the traditional BSD interface */
306 # define SIGSET_T int
307 # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
308 # define SIG_FILL(set) (set) = 0x7fffffff
309 /* Setting the leading bit appears to provoke a bug in some */
310 /* longjmp implementations. Most systems appear not to have */
311 /* a signal 32. */
312 # define SIGSETMASK(old, new) (old) = sigsetmask(new)
313 # else
314 /* Use POSIX/SYSV interface */
315 # define SIGSET_T sigset_t
316 # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
317 # define SIG_FILL(set) sigfillset(&set)
318 # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
319 # endif
320
321 static GC_bool mask_initialized = FALSE;
322
323 static SIGSET_T new_mask;
324
325 static SIGSET_T old_mask;
326
327 static SIGSET_T dummy;
328
329 #if defined(PRINTSTATS) && !defined(THREADS)
330 # define CHECK_SIGNALS
331 int GC_sig_disabled = 0;
332 #endif
333
334 void GC_disable_signals()
335 {
336 if (!mask_initialized) {
337 SIG_FILL(new_mask);
338
339 SIG_DEL(new_mask, SIGSEGV);
340 SIG_DEL(new_mask, SIGILL);
341 SIG_DEL(new_mask, SIGQUIT);
342 # ifdef SIGBUS
343 SIG_DEL(new_mask, SIGBUS);
344 # endif
345 # ifdef SIGIOT
346 SIG_DEL(new_mask, SIGIOT);
347 # endif
348 # ifdef SIGEMT
349 SIG_DEL(new_mask, SIGEMT);
350 # endif
351 # ifdef SIGTRAP
352 SIG_DEL(new_mask, SIGTRAP);
353 # endif
354 mask_initialized = TRUE;
355 }
356 # ifdef CHECK_SIGNALS
357 if (GC_sig_disabled != 0) ABORT("Nested disables");
358 GC_sig_disabled++;
359 # endif
360 SIGSETMASK(old_mask,new_mask);
361 }
362
363 void GC_enable_signals()
364 {
365 # ifdef CHECK_SIGNALS
366 if (GC_sig_disabled != 1) ABORT("Unmatched enable");
367 GC_sig_disabled--;
368 # endif
369 SIGSETMASK(dummy,old_mask);
370 }
371
372 # endif /* !PCR */
373
374 # endif /*!OS/2 */
375
376 /* Ivan Demakov: simplest way (to me) */
377 #if defined (DOS4GW) || defined (NO_SIGSET)
378 void GC_disable_signals() { }
379 void GC_enable_signals() { }
380 #endif
381
382 /* Find the page size */
383 word GC_page_size;
384
385 # ifdef MSWIN32
386 void GC_setpagesize()
387 {
388 SYSTEM_INFO sysinfo;
389
390 GetSystemInfo(&sysinfo);
391 GC_page_size = sysinfo.dwPageSize;
392 }
393
394 # else
395 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
396 || defined(USE_MUNMAP)
397 void GC_setpagesize()
398 {
399 GC_page_size = GETPAGESIZE();
400 }
401 # else
402 /* It's acceptable to fake it. */
403 void GC_setpagesize()
404 {
405 GC_page_size = HBLKSIZE;
406 }
407 # endif
408 # endif
409
410 /*
411 * Find the base of the stack.
412 * Used only in single-threaded environment.
413 * With threads, GC_mark_roots needs to know how to do this.
414 * Called with allocator lock held.
415 */
416 # ifdef MSWIN32
417 # define is_writable(prot) ((prot) == PAGE_READWRITE \
418 || (prot) == PAGE_WRITECOPY \
419 || (prot) == PAGE_EXECUTE_READWRITE \
420 || (prot) == PAGE_EXECUTE_WRITECOPY)
421 /* Return the number of bytes that are writable starting at p. */
422 /* The pointer p is assumed to be page aligned. */
423 /* If base is not 0, *base becomes the beginning of the */
424 /* allocation region containing p. */
425 word GC_get_writable_length(ptr_t p, ptr_t *base)
426 {
427 MEMORY_BASIC_INFORMATION buf;
428 word result;
429 word protect;
430
431 result = VirtualQuery(p, &buf, sizeof(buf));
432 if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
433 if (base != 0) *base = (ptr_t)(buf.AllocationBase);
434 protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
435 if (!is_writable(protect)) {
436 return(0);
437 }
438 if (buf.State != MEM_COMMIT) return(0);
439 return(buf.RegionSize);
440 }
441
442 ptr_t GC_get_stack_base()
443 {
444 int dummy;
445 ptr_t sp = (ptr_t)(&dummy);
446 ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
447 word size = GC_get_writable_length(trunc_sp, 0);
448
449 return(trunc_sp + size);
450 }
451
452
453 # else
454
455 # ifdef OS2
456
457 ptr_t GC_get_stack_base()
458 {
459 PTIB ptib;
460 PPIB ppib;
461
462 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
463 GC_err_printf0("DosGetInfoBlocks failed\n");
464 ABORT("DosGetInfoBlocks failed\n");
465 }
466 return((ptr_t)(ptib -> tib_pstacklimit));
467 }
468
469 # else
470
471 # ifdef AMIGA
472
473 ptr_t GC_get_stack_base()
474 {
475 struct Process *proc = (struct Process*)SysBase->ThisTask;
476
477 /* Reference: Amiga Guru Book Pages: 42,567,574 */
478 if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS
479 && proc->pr_CLI != NULL) {
480 /* first ULONG is StackSize */
481 /*longPtr = proc->pr_ReturnAddr;
482 size = longPtr[0];*/
483
484 return (char *)proc->pr_ReturnAddr + sizeof(ULONG);
485 } else {
486 return (char *)proc->pr_Task.tc_SPUpper;
487 }
488 }
489
490 #if 0 /* old version */
491 ptr_t GC_get_stack_base()
492 {
493 extern struct WBStartup *_WBenchMsg;
494 extern long __base;
495 extern long __stack;
496 struct Task *task;
497 struct Process *proc;
498 struct CommandLineInterface *cli;
499 long size;
500
501 if ((task = FindTask(0)) == 0) {
502 GC_err_puts("Cannot find own task structure\n");
503 ABORT("task missing");
504 }
505 proc = (struct Process *)task;
506 cli = BADDR(proc->pr_CLI);
507
508 if (_WBenchMsg != 0 || cli == 0) {
509 size = (char *)task->tc_SPUpper - (char *)task->tc_SPLower;
510 } else {
511 size = cli->cli_DefaultStack * 4;
512 }
513 return (ptr_t)(__base + GC_max(size, __stack));
514 }
515 #endif /* 0 */
516
517 # else /* !AMIGA, !OS2, ... */
518
519 # ifdef NEED_FIND_LIMIT
520 /* Some tools to implement HEURISTIC2 */
521 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
522 /* static */ jmp_buf GC_jmp_buf;
523
524 /*ARGSUSED*/
525 void GC_fault_handler(sig)
526 int sig;
527 {
528 longjmp(GC_jmp_buf, 1);
529 }
530
531 # ifdef __STDC__
532 typedef void (*handler)(int);
533 # else
534 typedef void (*handler)();
535 # endif
536
537 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
538 static struct sigaction old_segv_act;
539 # if defined(_sigargs) || defined(HPUX) /* !Irix6.x */
540 static struct sigaction old_bus_act;
541 # endif
542 # else
543 static handler old_segv_handler, old_bus_handler;
544 # endif
545
546 void GC_setup_temporary_fault_handler()
547 {
548 # ifndef ECOS
549 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
550 struct sigaction act;
551
552 act.sa_handler = GC_fault_handler;
553 act.sa_flags = SA_RESTART | SA_NODEFER;
554 /* The presence of SA_NODEFER represents yet another gross */
555 /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
556 /* interact correctly with -lthread. We hide the confusion */
557 /* by making sure that signal handling doesn't affect the */
558 /* signal mask. */
559
560 (void) sigemptyset(&act.sa_mask);
561 # ifdef IRIX_THREADS
562 /* Older versions have a bug related to retrieving and */
563 /* and setting a handler at the same time. */
564 (void) sigaction(SIGSEGV, 0, &old_segv_act);
565 (void) sigaction(SIGSEGV, &act, 0);
566 # else
567 (void) sigaction(SIGSEGV, &act, &old_segv_act);
568 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
569 || defined(HPUX)
570 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
571 /* Pthreads doesn't exist under Irix 5.x, so we */
572 /* don't have to worry in the threads case. */
573 (void) sigaction(SIGBUS, &act, &old_bus_act);
574 # endif
575 # endif /* IRIX_THREADS */
576 # else
577 old_segv_handler = signal(SIGSEGV, GC_fault_handler);
578 # ifdef SIGBUS
579 old_bus_handler = signal(SIGBUS, GC_fault_handler);
580 # endif
581 # endif
582 # endif /* ECOS */
583 }
584
585 void GC_reset_fault_handler()
586 {
587 # ifndef ECOS
588 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
589 (void) sigaction(SIGSEGV, &old_segv_act, 0);
590 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
591 || defined(HPUX)
592 (void) sigaction(SIGBUS, &old_bus_act, 0);
593 # endif
594 # else
595 (void) signal(SIGSEGV, old_segv_handler);
596 # ifdef SIGBUS
597 (void) signal(SIGBUS, old_bus_handler);
598 # endif
599 # endif
600 # endif /* ECOS */
601 }
602
603 /* Return the first nonaddressible location > p (up) or */
604 /* the smallest location q s.t. [q,p] is addressible (!up). */
605 ptr_t GC_find_limit(p, up)
606 ptr_t p;
607 GC_bool up;
608 {
609 # ifndef ECOS
610 static VOLATILE ptr_t result;
611 /* Needs to be static, since otherwise it may not be */
612 /* preserved across the longjmp. Can safely be */
613 /* static since it's only called once, with the */
614 /* allocation lock held. */
615
616
617 GC_setup_temporary_fault_handler();
618 if (setjmp(GC_jmp_buf) == 0) {
619 result = (ptr_t)(((word)(p))
620 & ~(MIN_PAGE_SIZE-1));
621 for (;;) {
622 if (up) {
623 result += MIN_PAGE_SIZE;
624 } else {
625 result -= MIN_PAGE_SIZE;
626 }
627 GC_noop1((word)(*result));
628 }
629 }
630 GC_reset_fault_handler();
631 if (!up) {
632 result += MIN_PAGE_SIZE;
633 }
634 return(result);
635 # else /* ECOS */
636 abort();
637 # endif /* ECOS */
638 }
639 # endif
640
641 # ifndef ECOS
642
643 #ifdef LINUX_STACKBOTTOM
644
645 # define STAT_SKIP 27 /* Number of fields preceding startstack */
646 /* field in /proc/self/stat */
647
648 ptr_t GC_linux_stack_base(void)
649 {
650 FILE *f;
651 char c;
652 word result = 0;
653 int i;
654
655 f = fopen("/proc/self/stat", "r");
656 if (NULL == f) ABORT("Couldn't open /proc/self/stat");
657 c = getc(f);
658 /* Skip the required number of fields. This number is hopefully */
659 /* constant across all Linux implementations. */
660 for (i = 0; i < STAT_SKIP; ++i) {
661 while (isspace(c)) c = getc(f);
662 while (!isspace(c)) c = getc(f);
663 }
664 while (isspace(c)) c = getc(f);
665 while (isdigit(c)) {
666 result *= 10;
667 result += c - '0';
668 c = getc(f);
669 }
670 if (result < 0x10000000) ABORT("Absurd stack bottom value");
671 return (ptr_t)result;
672 }
673
674 #endif /* LINUX_STACKBOTTOM */
675
676 ptr_t GC_get_stack_base()
677 {
678 word dummy;
679 ptr_t result;
680
681 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
682
683 # if defined(STACKBASE)
684 extern ptr_t STACKBASE;
685 return(STACKBASE);
686 # else
687 # ifdef STACKBOTTOM
688 return(STACKBOTTOM);
689 # else
690 # ifdef HEURISTIC1
691 # ifdef STACK_GROWS_DOWN
692 result = (ptr_t)((((word)(&dummy))
693 + STACKBOTTOM_ALIGNMENT_M1)
694 & ~STACKBOTTOM_ALIGNMENT_M1);
695 # else
696 result = (ptr_t)(((word)(&dummy))
697 & ~STACKBOTTOM_ALIGNMENT_M1);
698 # endif
699 # endif /* HEURISTIC1 */
700 # ifdef LINUX_STACKBOTTOM
701 result = GC_linux_stack_base();
702 # endif
703 # ifdef HEURISTIC2
704 # ifdef STACK_GROWS_DOWN
705 result = GC_find_limit((ptr_t)(&dummy), TRUE);
706 # ifdef HEURISTIC2_LIMIT
707 if (result > HEURISTIC2_LIMIT
708 && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
709 result = HEURISTIC2_LIMIT;
710 }
711 # endif
712 # else
713 result = GC_find_limit((ptr_t)(&dummy), FALSE);
714 # ifdef HEURISTIC2_LIMIT
715 if (result < HEURISTIC2_LIMIT
716 && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
717 result = HEURISTIC2_LIMIT;
718 }
719 # endif
720 # endif
721
722 # endif /* HEURISTIC2 */
723 # ifdef STACK_GROWS_DOWN
724 if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t));
725 # endif
726 return(result);
727 # endif /* STACKBOTTOM */
728 # endif /* STACKBASE */
729 }
730 # endif /* ECOS */
731
732 # endif /* ! AMIGA */
733 # endif /* ! OS2 */
734 # endif /* ! MSWIN32 */
735
736 /*
737 * Register static data segment(s) as roots.
738 * If more data segments are added later then they need to be registered
739 * add that point (as we do with SunOS dynamic loading),
740 * or GC_mark_roots needs to check for them (as we do with PCR).
741 * Called with allocator lock held.
742 */
743
744 # ifdef OS2
745
746 void GC_register_data_segments()
747 {
748 PTIB ptib;
749 PPIB ppib;
750 HMODULE module_handle;
751 # define PBUFSIZ 512
752 UCHAR path[PBUFSIZ];
753 FILE * myexefile;
754 struct exe_hdr hdrdos; /* MSDOS header. */
755 struct e32_exe hdr386; /* Real header for my executable */
756 struct o32_obj seg; /* Currrent segment */
757 int nsegs;
758
759
760 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
761 GC_err_printf0("DosGetInfoBlocks failed\n");
762 ABORT("DosGetInfoBlocks failed\n");
763 }
764 module_handle = ppib -> pib_hmte;
765 if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
766 GC_err_printf0("DosQueryModuleName failed\n");
767 ABORT("DosGetInfoBlocks failed\n");
768 }
769 myexefile = fopen(path, "rb");
770 if (myexefile == 0) {
771 GC_err_puts("Couldn't open executable ");
772 GC_err_puts(path); GC_err_puts("\n");
773 ABORT("Failed to open executable\n");
774 }
775 if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
776 GC_err_puts("Couldn't read MSDOS header from ");
777 GC_err_puts(path); GC_err_puts("\n");
778 ABORT("Couldn't read MSDOS header");
779 }
780 if (E_MAGIC(hdrdos) != EMAGIC) {
781 GC_err_puts("Executable has wrong DOS magic number: ");
782 GC_err_puts(path); GC_err_puts("\n");
783 ABORT("Bad DOS magic number");
784 }
785 if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
786 GC_err_puts("Seek to new header failed in ");
787 GC_err_puts(path); GC_err_puts("\n");
788 ABORT("Bad DOS magic number");
789 }
790 if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
791 GC_err_puts("Couldn't read MSDOS header from ");
792 GC_err_puts(path); GC_err_puts("\n");
793 ABORT("Couldn't read OS/2 header");
794 }
795 if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
796 GC_err_puts("Executable has wrong OS/2 magic number:");
797 GC_err_puts(path); GC_err_puts("\n");
798 ABORT("Bad OS/2 magic number");
799 }
800 if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
801 GC_err_puts("Executable %s has wrong byte order: ");
802 GC_err_puts(path); GC_err_puts("\n");
803 ABORT("Bad byte order");
804 }
805 if ( E32_CPU(hdr386) == E32CPU286) {
806 GC_err_puts("GC can't handle 80286 executables: ");
807 GC_err_puts(path); GC_err_puts("\n");
808 EXIT();
809 }
810 if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
811 SEEK_SET) != 0) {
812 GC_err_puts("Seek to object table failed: ");
813 GC_err_puts(path); GC_err_puts("\n");
814 ABORT("Seek to object table failed");
815 }
816 for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
817 int flags;
818 if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
819 GC_err_puts("Couldn't read obj table entry from ");
820 GC_err_puts(path); GC_err_puts("\n");
821 ABORT("Couldn't read obj table entry");
822 }
823 flags = O32_FLAGS(seg);
824 if (!(flags & OBJWRITE)) continue;
825 if (!(flags & OBJREAD)) continue;
826 if (flags & OBJINVALID) {
827 GC_err_printf0("Object with invalid pages?\n");
828 continue;
829 }
830 GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
831 }
832 }
833
834 # else
835
836 # ifdef MSWIN32
837 /* Unfortunately, we have to handle win32s very differently from NT, */
838 /* Since VirtualQuery has very different semantics. In particular, */
839 /* under win32s a VirtualQuery call on an unmapped page returns an */
840 /* invalid result. Under GC_register_data_segments is a noop and */
841 /* all real work is done by GC_register_dynamic_libraries. Under */
842 /* win32s, we cannot find the data segments associated with dll's. */
843 /* We rgister the main data segment here. */
844 GC_bool GC_win32s = FALSE; /* We're running under win32s. */
845
846 GC_bool GC_is_win32s()
847 {
848 DWORD v = GetVersion();
849
850 /* Check that this is not NT, and Windows major version <= 3 */
851 return ((v & 0x80000000) && (v & 0xff) <= 3);
852 }
853
854 void GC_init_win32()
855 {
856 GC_win32s = GC_is_win32s();
857 }
858
859 /* Return the smallest address a such that VirtualQuery */
860 /* returns correct results for all addresses between a and start. */
861 /* Assumes VirtualQuery returns correct information for start. */
862 ptr_t GC_least_described_address(ptr_t start)
863 {
864 MEMORY_BASIC_INFORMATION buf;
865 SYSTEM_INFO sysinfo;
866 DWORD result;
867 LPVOID limit;
868 ptr_t p;
869 LPVOID q;
870
871 GetSystemInfo(&sysinfo);
872 limit = sysinfo.lpMinimumApplicationAddress;
873 p = (ptr_t)((word)start & ~(GC_page_size - 1));
874 for (;;) {
875 q = (LPVOID)(p - GC_page_size);
876 if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
877 result = VirtualQuery(q, &buf, sizeof(buf));
878 if (result != sizeof(buf) || buf.AllocationBase == 0) break;
879 p = (ptr_t)(buf.AllocationBase);
880 }
881 return(p);
882 }
883
884 /* Is p the start of either the malloc heap, or of one of our */
885 /* heap sections? */
886 GC_bool GC_is_heap_base (ptr_t p)
887 {
888
889 register unsigned i;
890
891 # ifndef REDIRECT_MALLOC
892 static ptr_t malloc_heap_pointer = 0;
893
894 if (0 == malloc_heap_pointer) {
895 MEMORY_BASIC_INFORMATION buf;
896 register DWORD result = VirtualQuery(malloc(1), &buf, sizeof(buf));
897
898 if (result != sizeof(buf)) {
899 ABORT("Weird VirtualQuery result");
900 }
901 malloc_heap_pointer = (ptr_t)(buf.AllocationBase);
902 }
903 if (p == malloc_heap_pointer) return(TRUE);
904 # endif
905 for (i = 0; i < GC_n_heap_bases; i++) {
906 if (GC_heap_bases[i] == p) return(TRUE);
907 }
908 return(FALSE);
909 }
910
911 void GC_register_root_section(ptr_t static_root)
912 {
913 MEMORY_BASIC_INFORMATION buf;
914 SYSTEM_INFO sysinfo;
915 DWORD result;
916 DWORD protect;
917 LPVOID p;
918 char * base;
919 char * limit, * new_limit;
920
921 if (!GC_win32s) return;
922 p = base = limit = GC_least_described_address(static_root);
923 GetSystemInfo(&sysinfo);
924 while (p < sysinfo.lpMaximumApplicationAddress) {
925 result = VirtualQuery(p, &buf, sizeof(buf));
926 if (result != sizeof(buf) || buf.AllocationBase == 0
927 || GC_is_heap_base(buf.AllocationBase)) break;
928 new_limit = (char *)p + buf.RegionSize;
929 protect = buf.Protect;
930 if (buf.State == MEM_COMMIT
931 && is_writable(protect)) {
932 if ((char *)p == limit) {
933 limit = new_limit;
934 } else {
935 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
936 base = p;
937 limit = new_limit;
938 }
939 }
940 if (p > (LPVOID)new_limit /* overflow */) break;
941 p = (LPVOID)new_limit;
942 }
943 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
944 }
945
946 void GC_register_data_segments()
947 {
948 static char dummy;
949
950 GC_register_root_section((ptr_t)(&dummy));
951 }
952 # else
953 # ifdef AMIGA
954
955 void GC_register_data_segments()
956 {
957 struct Process *proc;
958 struct CommandLineInterface *cli;
959 BPTR myseglist;
960 ULONG *data;
961
962 int num;
963
964
965 # ifdef __GNUC__
966 ULONG dataSegSize;
967 GC_bool found_segment = FALSE;
968 extern char __data_size[];
969
970 dataSegSize=__data_size+8;
971 /* Can`t find the Location of __data_size, because
972 it`s possible that is it, inside the segment. */
973
974 # endif
975
976 proc= (struct Process*)SysBase->ThisTask;
977
978 /* Reference: Amiga Guru Book Pages: 538ff,565,573
979 and XOper.asm */
980 if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS) {
981 if (proc->pr_CLI == NULL) {
982 myseglist = proc->pr_SegList;
983 } else {
984 /* ProcLoaded 'Loaded as a command: '*/
985 cli = BADDR(proc->pr_CLI);
986 myseglist = cli->cli_Module;
987 }
988 } else {
989 ABORT("Not a Process.");
990 }
991
992 if (myseglist == NULL) {
993 ABORT("Arrrgh.. can't find segments, aborting");
994 }
995
996 /* xoper hunks Shell Process */
997
998 num=0;
999 for (data = (ULONG *)BADDR(myseglist); data != NULL;
1000 data = (ULONG *)BADDR(data[0])) {
1001 if (((ULONG) GC_register_data_segments < (ULONG) &data[1]) ||
1002 ((ULONG) GC_register_data_segments > (ULONG) &data[1] + data[-1])) {
1003 # ifdef __GNUC__
1004 if (dataSegSize == data[-1]) {
1005 found_segment = TRUE;
1006 }
1007 # endif
1008 GC_add_roots_inner((char *)&data[1],
1009 ((char *)&data[1]) + data[-1], FALSE);
1010 }
1011 ++num;
1012 } /* for */
1013 # ifdef __GNUC__
1014 if (!found_segment) {
1015 ABORT("Can`t find correct Segments.\nSolution: Use an newer version of ixemul.library");
1016 }
1017 # endif
1018 }
1019
1020 #if 0 /* old version */
1021 void GC_register_data_segments()
1022 {
1023 extern struct WBStartup *_WBenchMsg;
1024 struct Process *proc;
1025 struct CommandLineInterface *cli;
1026 BPTR myseglist;
1027 ULONG *data;
1028
1029 if ( _WBenchMsg != 0 ) {
1030 if ((myseglist = _WBenchMsg->sm_Segment) == 0) {
1031 GC_err_puts("No seglist from workbench\n");
1032 return;
1033 }
1034 } else {
1035 if ((proc = (struct Process *)FindTask(0)) == 0) {
1036 GC_err_puts("Cannot find process structure\n");
1037 return;
1038 }
1039 if ((cli = BADDR(proc->pr_CLI)) == 0) {
1040 GC_err_puts("No CLI\n");
1041 return;
1042 }
1043 if ((myseglist = cli->cli_Module) == 0) {
1044 GC_err_puts("No seglist from CLI\n");
1045 return;
1046 }
1047 }
1048
1049 for (data = (ULONG *)BADDR(myseglist); data != 0;
1050 data = (ULONG *)BADDR(data[0])) {
1051 # ifdef AMIGA_SKIP_SEG
1052 if (((ULONG) GC_register_data_segments < (ULONG) &data[1]) ||
1053 ((ULONG) GC_register_data_segments > (ULONG) &data[1] + data[-1])) {
1054 # else
1055 {
1056 # endif /* AMIGA_SKIP_SEG */
1057 GC_add_roots_inner((char *)&data[1],
1058 ((char *)&data[1]) + data[-1], FALSE);
1059 }
1060 }
1061 }
1062 #endif /* old version */
1063
1064
1065 # else
1066
1067 # if (defined(SVR4) || defined(AUX) || defined(DGUX)) && !defined(PCR)
1068 char * GC_SysVGetDataStart(max_page_size, etext_addr)
1069 int max_page_size;
1070 int * etext_addr;
1071 {
1072 word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1073 & ~(sizeof(word) - 1);
1074 /* etext rounded to word boundary */
1075 word next_page = ((text_end + (word)max_page_size - 1)
1076 & ~((word)max_page_size - 1));
1077 word page_offset = (text_end & ((word)max_page_size - 1));
1078 VOLATILE char * result = (char *)(next_page + page_offset);
1079 /* Note that this isnt equivalent to just adding */
1080 /* max_page_size to &etext if &etext is at a page boundary */
1081
1082 GC_setup_temporary_fault_handler();
1083 if (setjmp(GC_jmp_buf) == 0) {
1084 /* Try writing to the address. */
1085 *result = *result;
1086 GC_reset_fault_handler();
1087 } else {
1088 GC_reset_fault_handler();
1089 /* We got here via a longjmp. The address is not readable. */
1090 /* This is known to happen under Solaris 2.4 + gcc, which place */
1091 /* string constants in the text segment, but after etext. */
1092 /* Use plan B. Note that we now know there is a gap between */
1093 /* text and data segments, so plan A bought us something. */
1094 result = (char *)GC_find_limit((ptr_t)(DATAEND) - MIN_PAGE_SIZE, FALSE);
1095 }
1096 return((char *)result);
1097 }
1098 # endif
1099
1100
1101 void GC_register_data_segments()
1102 {
1103 # if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
1104 && !defined(MACOSX)
1105 # if defined(REDIRECT_MALLOC) && defined(SOLARIS_THREADS)
1106 /* As of Solaris 2.3, the Solaris threads implementation */
1107 /* allocates the data structure for the initial thread with */
1108 /* sbrk at process startup. It needs to be scanned, so that */
1109 /* we don't lose some malloc allocated data structures */
1110 /* hanging from it. We're on thin ice here ... */
1111 extern caddr_t sbrk();
1112
1113 GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
1114 # else
1115 GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
1116 # endif
1117 # endif
1118 # if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
1119 GC_add_roots_inner(DATASTART, (char *) get_end(), FALSE);
1120 # endif
1121 # if defined(MACOS)
1122 {
1123 # if defined(THINK_C)
1124 extern void* GC_MacGetDataStart(void);
1125 /* globals begin above stack and end at a5. */
1126 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1127 (ptr_t)LMGetCurrentA5(), FALSE);
1128 # else
1129 # if defined(__MWERKS__)
1130 # if !__POWERPC__
1131 extern void* GC_MacGetDataStart(void);
1132 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1133 # if __option(far_data)
1134 extern void* GC_MacGetDataEnd(void);
1135 # endif
1136 /* globals begin above stack and end at a5. */
1137 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1138 (ptr_t)LMGetCurrentA5(), FALSE);
1139 /* MATTHEW: Handle Far Globals */
1140 # if __option(far_data)
1141 /* Far globals follow he QD globals: */
1142 GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
1143 (ptr_t)GC_MacGetDataEnd(), FALSE);
1144 # endif
1145 # else
1146 extern char __data_start__[], __data_end__[];
1147 GC_add_roots_inner((ptr_t)&__data_start__,
1148 (ptr_t)&__data_end__, FALSE);
1149 # endif /* __POWERPC__ */
1150 # endif /* __MWERKS__ */
1151 # endif /* !THINK_C */
1152 }
1153 # endif /* MACOS */
1154
1155 /* Dynamic libraries are added at every collection, since they may */
1156 /* change. */
1157 }
1158
1159 # endif /* ! AMIGA */
1160 # endif /* ! MSWIN32 */
1161 # endif /* ! OS2 */
1162
1163 /*
1164 * Auxiliary routines for obtaining memory from OS.
1165 */
1166
1167 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1168 && !defined(MSWIN32) && !defined(MACOS) && !defined(DOS4GW)
1169
1170 # ifdef SUNOS4
1171 extern caddr_t sbrk();
1172 # endif
1173 # ifdef __STDC__
1174 # define SBRK_ARG_T ptrdiff_t
1175 # else
1176 # define SBRK_ARG_T int
1177 # endif
1178
1179 # ifdef RS6000
1180 /* The compiler seems to generate speculative reads one past the end of */
1181 /* an allocated object. Hence we need to make sure that the page */
1182 /* following the last heap page is also mapped. */
1183 ptr_t GC_unix_get_mem(bytes)
1184 word bytes;
1185 {
1186 caddr_t cur_brk = (caddr_t)sbrk(0);
1187 caddr_t result;
1188 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1189 static caddr_t my_brk_val = 0;
1190
1191 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1192 if (lsbs != 0) {
1193 if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
1194 }
1195 if (cur_brk == my_brk_val) {
1196 /* Use the extra block we allocated last time. */
1197 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1198 if (result == (caddr_t)(-1)) return(0);
1199 result -= GC_page_size;
1200 } else {
1201 result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
1202 if (result == (caddr_t)(-1)) return(0);
1203 }
1204 my_brk_val = result + bytes + GC_page_size; /* Always page aligned */
1205 return((ptr_t)result);
1206 }
1207
1208 #else /* Not RS6000 */
1209
1210 #if defined(USE_MMAP)
1211 /* Tested only under IRIX5 and Solaris 2 */
1212
1213 #ifdef USE_MMAP_FIXED
1214 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1215 /* Seems to yield better performance on Solaris 2, but can */
1216 /* be unreliable if something is already mapped at the address. */
1217 #else
1218 # define GC_MMAP_FLAGS MAP_PRIVATE
1219 #endif
1220
1221 ptr_t GC_unix_get_mem(bytes)
1222 word bytes;
1223 {
1224 static GC_bool initialized = FALSE;
1225 static int fd;
1226 void *result;
1227 static ptr_t last_addr = HEAP_START;
1228
1229 if (!initialized) {
1230 fd = open("/dev/zero", O_RDONLY);
1231 initialized = TRUE;
1232 }
1233 if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
1234 result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1235 GC_MMAP_FLAGS, fd, 0/* offset */);
1236 if (result == MAP_FAILED) return(0);
1237 last_addr = (ptr_t)result + bytes + GC_page_size - 1;
1238 last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
1239 return((ptr_t)result);
1240 }
1241
1242 #else /* Not RS6000, not USE_MMAP */
1243 ptr_t GC_unix_get_mem(bytes)
1244 word bytes;
1245 {
1246 ptr_t result;
1247 # ifdef IRIX5
1248 /* Bare sbrk isn't thread safe. Play by malloc rules. */
1249 /* The equivalent may be needed on other systems as well. */
1250 __LOCK_MALLOC();
1251 # endif
1252 {
1253 ptr_t cur_brk = (ptr_t)sbrk(0);
1254 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1255
1256 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1257 if (lsbs != 0) {
1258 if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
1259 }
1260 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1261 if (result == (ptr_t)(-1)) result = 0;
1262 }
1263 # ifdef IRIX5
1264 __UNLOCK_MALLOC();
1265 # endif
1266 return(result);
1267 }
1268
1269 #endif /* Not USE_MMAP */
1270 #endif /* Not RS6000 */
1271
1272 # endif /* UN*X */
1273
1274 # ifdef OS2
1275
1276 void * os2_alloc(size_t bytes)
1277 {
1278 void * result;
1279
1280 if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
1281 PAG_WRITE | PAG_COMMIT)
1282 != NO_ERROR) {
1283 return(0);
1284 }
1285 if (result == 0) return(os2_alloc(bytes));
1286 return(result);
1287 }
1288
1289 # endif /* OS2 */
1290
1291
1292 # ifdef MSWIN32
1293 word GC_n_heap_bases = 0;
1294
1295 ptr_t GC_win32_get_mem(bytes)
1296 word bytes;
1297 {
1298 ptr_t result;
1299
1300 if (GC_win32s) {
1301 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1302 /* There are also unconfirmed rumors of other */
1303 /* problems, so we dodge the issue. */
1304 result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
1305 result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
1306 } else {
1307 result = (ptr_t) VirtualAlloc(NULL, bytes,
1308 MEM_COMMIT | MEM_RESERVE,
1309 PAGE_EXECUTE_READWRITE);
1310 }
1311 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1312 /* If I read the documentation correctly, this can */
1313 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1314 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1315 GC_heap_bases[GC_n_heap_bases++] = result;
1316 return(result);
1317 }
1318
1319 void GC_win32_free_heap ()
1320 {
1321 if (GC_win32s) {
1322 while (GC_n_heap_bases > 0) {
1323 GlobalFree (GC_heap_bases[--GC_n_heap_bases]);
1324 GC_heap_bases[GC_n_heap_bases] = 0;
1325 }
1326 }
1327 }
1328
1329
1330 # endif
1331
1332 #ifdef USE_MUNMAP
1333
1334 /* For now, this only works on some Unix-like systems. If you */
1335 /* have something else, don't define USE_MUNMAP. */
1336 /* We assume ANSI C to support this feature. */
1337 #include <unistd.h>
1338 #include <sys/mman.h>
1339 #include <sys/stat.h>
1340 #include <sys/types.h>
1341 #include <fcntl.h>
1342
1343 /* Compute a page aligned starting address for the unmap */
1344 /* operation on a block of size bytes starting at start. */
1345 /* Return 0 if the block is too small to make this feasible. */
1346 ptr_t GC_unmap_start(ptr_t start, word bytes)
1347 {
1348 ptr_t result = start;
1349 /* Round start to next page boundary. */
1350 result += GC_page_size - 1;
1351 result = (ptr_t)((word)result & ~(GC_page_size - 1));
1352 if (result + GC_page_size > start + bytes) return 0;
1353 return result;
1354 }
1355
1356 /* Compute end address for an unmap operation on the indicated */
1357 /* block. */
1358 ptr_t GC_unmap_end(ptr_t start, word bytes)
1359 {
1360 ptr_t end_addr = start + bytes;
1361 end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
1362 return end_addr;
1363 }
1364
1365 /* We assume that GC_remap is called on exactly the same range */
1366 /* as a previous call to GC_unmap. It is safe to consistently */
1367 /* round the endpoints in both places. */
1368 void GC_unmap(ptr_t start, word bytes)
1369 {
1370 ptr_t start_addr = GC_unmap_start(start, bytes);
1371 ptr_t end_addr = GC_unmap_end(start, bytes);
1372 word len = end_addr - start_addr;
1373 if (0 == start_addr) return;
1374 if (munmap(start_addr, len) != 0) ABORT("munmap failed");
1375 GC_unmapped_bytes += len;
1376 }
1377
1378
1379 void GC_remap(ptr_t start, word bytes)
1380 {
1381 static int zero_descr = -1;
1382 ptr_t start_addr = GC_unmap_start(start, bytes);
1383 ptr_t end_addr = GC_unmap_end(start, bytes);
1384 word len = end_addr - start_addr;
1385 ptr_t result;
1386
1387 if (-1 == zero_descr) zero_descr = open("/dev/zero", O_RDWR);
1388 if (0 == start_addr) return;
1389 result = mmap(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1390 MAP_FIXED | MAP_PRIVATE, zero_descr, 0);
1391 if (result != start_addr) {
1392 ABORT("mmap remapping failed");
1393 }
1394 GC_unmapped_bytes -= len;
1395 }
1396
1397 /* Two adjacent blocks have already been unmapped and are about to */
1398 /* be merged. Unmap the whole block. This typically requires */
1399 /* that we unmap a small section in the middle that was not previously */
1400 /* unmapped due to alignment constraints. */
1401 void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
1402 {
1403 ptr_t start1_addr = GC_unmap_start(start1, bytes1);
1404 ptr_t end1_addr = GC_unmap_end(start1, bytes1);
1405 ptr_t start2_addr = GC_unmap_start(start2, bytes2);
1406 ptr_t end2_addr = GC_unmap_end(start2, bytes2);
1407 ptr_t start_addr = end1_addr;
1408 ptr_t end_addr = start2_addr;
1409 word len;
1410 GC_ASSERT(start1 + bytes1 == start2);
1411 if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
1412 if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
1413 if (0 == start_addr) return;
1414 len = end_addr - start_addr;
1415 if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
1416 GC_unmapped_bytes += len;
1417 }
1418
1419 #endif /* USE_MUNMAP */
1420
1421 /* Routine for pushing any additional roots. In THREADS */
1422 /* environment, this is also responsible for marking from */
1423 /* thread stacks. In the SRC_M3 case, it also handles */
1424 /* global variables. */
1425 #ifndef THREADS
1426 void (*GC_push_other_roots)() = 0;
1427 #else /* THREADS */
1428
1429 # ifdef PCR
1430 PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
1431 {
1432 struct PCR_ThCtl_TInfoRep info;
1433 PCR_ERes result;
1434
1435 info.ti_stkLow = info.ti_stkHi = 0;
1436 result = PCR_ThCtl_GetInfo(t, &info);
1437 GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
1438 return(result);
1439 }
1440
1441 /* Push the contents of an old object. We treat this as stack */
1442 /* data only becasue that makes it robust against mark stack */
1443 /* overflow. */
1444 PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
1445 {
1446 GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
1447 return(PCR_ERes_okay);
1448 }
1449
1450
1451 void GC_default_push_other_roots()
1452 {
1453 /* Traverse data allocated by previous memory managers. */
1454 {
1455 extern struct PCR_MM_ProcsRep * GC_old_allocator;
1456
1457 if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
1458 GC_push_old_obj, 0)
1459 != PCR_ERes_okay) {
1460 ABORT("Old object enumeration failed");
1461 }
1462 }
1463 /* Traverse all thread stacks. */
1464 if (PCR_ERes_IsErr(
1465 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
1466 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1467 ABORT("Thread stack marking failed\n");
1468 }
1469 }
1470
1471 # endif /* PCR */
1472
1473 # ifdef SRC_M3
1474
1475 # ifdef ALL_INTERIOR_POINTERS
1476 --> misconfigured
1477 # endif
1478
1479
1480 extern void ThreadF__ProcessStacks();
1481
1482 void GC_push_thread_stack(start, stop)
1483 word start, stop;
1484 {
1485 GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
1486 }
1487
1488 /* Push routine with M3 specific calling convention. */
1489 GC_m3_push_root(dummy1, p, dummy2, dummy3)
1490 word *p;
1491 ptr_t dummy1, dummy2;
1492 int dummy3;
1493 {
1494 word q = *p;
1495
1496 if ((ptr_t)(q) >= GC_least_plausible_heap_addr
1497 && (ptr_t)(q) < GC_greatest_plausible_heap_addr) {
1498 GC_push_one_checked(q,FALSE);
1499 }
1500 }
1501
1502 /* M3 set equivalent to RTHeap.TracedRefTypes */
1503 typedef struct { int elts[1]; } RefTypeSet;
1504 RefTypeSet GC_TracedRefTypes = {{0x1}};
1505
1506 /* From finalize.c */
1507 extern void GC_push_finalizer_structures();
1508
1509 /* From stubborn.c: */
1510 # ifdef STUBBORN_ALLOC
1511 extern GC_PTR * GC_changing_list_start;
1512 # endif
1513
1514
1515 void GC_default_push_other_roots()
1516 {
1517 /* Use the M3 provided routine for finding static roots. */
1518 /* This is a bit dubious, since it presumes no C roots. */
1519 /* We handle the collector roots explicitly. */
1520 {
1521 # ifdef STUBBORN_ALLOC
1522 GC_push_one(GC_changing_list_start);
1523 # endif
1524 GC_push_finalizer_structures();
1525 RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
1526 }
1527 if (GC_words_allocd > 0) {
1528 ThreadF__ProcessStacks(GC_push_thread_stack);
1529 }
1530 /* Otherwise this isn't absolutely necessary, and we have */
1531 /* startup ordering problems. */
1532 }
1533
1534 # endif /* SRC_M3 */
1535
1536 # if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
1537 || defined(IRIX_THREADS) || defined(LINUX_THREADS) \
1538 || defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
1539
1540 extern void GC_push_all_stacks();
1541
1542 void GC_default_push_other_roots()
1543 {
1544 GC_push_all_stacks();
1545 }
1546
1547 # endif /* SOLARIS_THREADS || ... */
1548
1549 void (*GC_push_other_roots)() = GC_default_push_other_roots;
1550
1551 #endif
1552
1553 /*
1554 * Routines for accessing dirty bits on virtual pages.
1555 * We plan to eventaually implement four strategies for doing so:
1556 * DEFAULT_VDB: A simple dummy implementation that treats every page
1557 * as possibly dirty. This makes incremental collection
1558 * useless, but the implementation is still correct.
1559 * PCR_VDB: Use PPCRs virtual dirty bit facility.
1560 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
1561 * works under some SVR4 variants. Even then, it may be
1562 * too slow to be entirely satisfactory. Requires reading
1563 * dirty bits for entire address space. Implementations tend
1564 * to assume that the client is a (slow) debugger.
1565 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
1566 * dirtied pages. The implementation (and implementability)
1567 * is highly system dependent. This usually fails when system
1568 * calls write to a protected page. We prevent the read system
1569 * call from doing so. It is the clients responsibility to
1570 * make sure that other system calls are similarly protected
1571 * or write only to the stack.
1572 */
1573
1574 GC_bool GC_dirty_maintained = FALSE;
1575
1576 # ifdef DEFAULT_VDB
1577
1578 /* All of the following assume the allocation lock is held, and */
1579 /* signals are disabled. */
1580
1581 /* The client asserts that unallocated pages in the heap are never */
1582 /* written. */
1583
1584 /* Initialize virtual dirty bit implementation. */
1585 void GC_dirty_init()
1586 {
1587 GC_dirty_maintained = TRUE;
1588 }
1589
1590 /* Retrieve system dirty bits for heap to a local buffer. */
1591 /* Restore the systems notion of which pages are dirty. */
1592 void GC_read_dirty()
1593 {}
1594
1595 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
1596 /* If the actual page size is different, this returns TRUE if any */
1597 /* of the pages overlapping h are dirty. This routine may err on the */
1598 /* side of labelling pages as dirty (and this implementation does). */
1599 /*ARGSUSED*/
1600 GC_bool GC_page_was_dirty(h)
1601 struct hblk *h;
1602 {
1603 return(TRUE);
1604 }
1605
1606 /*
1607 * The following two routines are typically less crucial. They matter
1608 * most with large dynamic libraries, or if we can't accurately identify
1609 * stacks, e.g. under Solaris 2.X. Otherwise the following default
1610 * versions are adequate.
1611 */
1612
1613 /* Could any valid GC heap pointer ever have been written to this page? */
1614 /*ARGSUSED*/
1615 GC_bool GC_page_was_ever_dirty(h)
1616 struct hblk *h;
1617 {
1618 return(TRUE);
1619 }
1620
1621 /* Reset the n pages starting at h to "was never dirty" status. */
1622 void GC_is_fresh(h, n)
1623 struct hblk *h;
1624 word n;
1625 {
1626 }
1627
1628 /* A call hints that h is about to be written. */
1629 /* May speed up some dirty bit implementations. */
1630 /*ARGSUSED*/
1631 void GC_write_hint(h)
1632 struct hblk *h;
1633 {
1634 }
1635
1636 # endif /* DEFAULT_VDB */
1637
1638
1639 # ifdef MPROTECT_VDB
1640
1641 /*
1642 * See DEFAULT_VDB for interface descriptions.
1643 */
1644
1645 /*
1646 * This implementation maintains dirty bits itself by catching write
1647 * faults and keeping track of them. We assume nobody else catches
1648 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls
1649 * except as a result of a read system call. This means clients must
1650 * either ensure that system calls do not touch the heap, or must
1651 * provide their own wrappers analogous to the one for read.
1652 * We assume the page size is a multiple of HBLKSIZE.
1653 * This implementation is currently SunOS 4.X and IRIX 5.X specific, though we
1654 * tried to use portable code where easily possible. It is known
1655 * not to work under a number of other systems.
1656 */
1657
1658 # ifndef MSWIN32
1659
1660 # include <sys/mman.h>
1661 # include <signal.h>
1662 # include <sys/syscall.h>
1663
1664 # define PROTECT(addr, len) \
1665 if (mprotect((caddr_t)(addr), (size_t)(len), \
1666 PROT_READ | OPT_PROT_EXEC) < 0) { \
1667 ABORT("mprotect failed"); \
1668 }
1669 # define UNPROTECT(addr, len) \
1670 if (mprotect((caddr_t)(addr), (size_t)(len), \
1671 PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
1672 ABORT("un-mprotect failed"); \
1673 }
1674
1675 # else
1676
1677 # include <signal.h>
1678
1679 static DWORD protect_junk;
1680 # define PROTECT(addr, len) \
1681 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
1682 &protect_junk)) { \
1683 DWORD last_error = GetLastError(); \
1684 GC_printf1("Last error code: %lx\n", last_error); \
1685 ABORT("VirtualProtect failed"); \
1686 }
1687 # define UNPROTECT(addr, len) \
1688 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
1689 &protect_junk)) { \
1690 ABORT("un-VirtualProtect failed"); \
1691 }
1692
1693 # endif
1694
1695 #if defined(SUNOS4) || defined(FREEBSD)
1696 typedef void (* SIG_PF)();
1697 #endif
1698 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX)
1699 # ifdef __STDC__
1700 typedef void (* SIG_PF)(int);
1701 # else
1702 typedef void (* SIG_PF)();
1703 # endif
1704 #endif
1705 #if defined(MSWIN32)
1706 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
1707 # undef SIG_DFL
1708 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
1709 #endif
1710
1711 #if defined(IRIX5) || defined(OSF1)
1712 typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
1713 #endif
1714 #if defined(SUNOS5SIGS)
1715 # ifdef HPUX
1716 # define SIGINFO __siginfo
1717 # else
1718 # define SIGINFO siginfo
1719 # endif
1720 # ifdef __STDC__
1721 typedef void (* REAL_SIG_PF)(int, struct SIGINFO *, void *);
1722 # else
1723 typedef void (* REAL_SIG_PF)();
1724 # endif
1725 #endif
1726 #if defined(LINUX)
1727 # include <linux/version.h>
1728 # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(IA64)
1729 typedef struct sigcontext s_c;
1730 # else
1731 typedef struct sigcontext_struct s_c;
1732 # endif
1733 # if defined(ALPHA) || defined(M68K)
1734 typedef void (* REAL_SIG_PF)(int, int, s_c *);
1735 # else
1736 # if defined(IA64)
1737 typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
1738 # else
1739 typedef void (* REAL_SIG_PF)(int, s_c);
1740 # endif
1741 # endif
1742 # ifdef ALPHA
1743 /* Retrieve fault address from sigcontext structure by decoding */
1744 /* instruction. */
1745 char * get_fault_addr(s_c *sc) {
1746 unsigned instr;
1747 word faultaddr;
1748
1749 instr = *((unsigned *)(sc->sc_pc));
1750 faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
1751 faultaddr += (word) (((int)instr << 16) >> 16);
1752 return (char *)faultaddr;
1753 }
1754 # endif /* !ALPHA */
1755 # endif
1756
1757 SIG_PF GC_old_bus_handler;
1758 SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
1759
1760 /*ARGSUSED*/
1761 # if defined (SUNOS4) || defined(FREEBSD)
1762 void GC_write_fault_handler(sig, code, scp, addr)
1763 int sig, code;
1764 struct sigcontext *scp;
1765 char * addr;
1766 # ifdef SUNOS4
1767 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
1768 # define CODE_OK (FC_CODE(code) == FC_PROT \
1769 || (FC_CODE(code) == FC_OBJERR \
1770 && FC_ERRNO(code) == FC_PROT))
1771 # endif
1772 # ifdef FREEBSD
1773 # define SIG_OK (sig == SIGBUS)
1774 # define CODE_OK (code == BUS_PAGE_FAULT)
1775 # endif
1776 # endif
1777 # if defined(IRIX5) || defined(OSF1)
1778 # include <errno.h>
1779 void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
1780 # define SIG_OK (sig == SIGSEGV)
1781 # ifdef OSF1
1782 # define CODE_OK (code == 2 /* experimentally determined */)
1783 # endif
1784 # ifdef IRIX5
1785 # define CODE_OK (code == EACCES)
1786 # endif
1787 # endif
1788 # if defined(LINUX)
1789 # if defined(ALPHA) || defined(M68K)
1790 void GC_write_fault_handler(int sig, int code, s_c * sc)
1791 # else
1792 # if defined(IA64)
1793 void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
1794 # else
1795 void GC_write_fault_handler(int sig, s_c sc)
1796 # endif
1797 # endif
1798 # define SIG_OK (sig == SIGSEGV)
1799 # define CODE_OK TRUE
1800 /* Empirically c.trapno == 14, on IA32, but is that useful? */
1801 /* Should probably consider alignment issues on other */
1802 /* architectures. */
1803 # endif
1804 # if defined(SUNOS5SIGS)
1805 # ifdef __STDC__
1806 void GC_write_fault_handler(int sig, struct SIGINFO *scp, void * context)
1807 # else
1808 void GC_write_fault_handler(sig, scp, context)
1809 int sig;
1810 struct SIGINFO *scp;
1811 void * context;
1812 # endif
1813 # ifdef HPUX
1814 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
1815 # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
1816 || (scp -> si_code == BUS_ADRERR) \
1817 || (scp -> si_code == BUS_UNKNOWN) \
1818 || (scp -> si_code == SEGV_UNKNOWN) \
1819 || (scp -> si_code == BUS_OBJERR)
1820 # else
1821 # define SIG_OK (sig == SIGSEGV)
1822 # define CODE_OK (scp -> si_code == SEGV_ACCERR)
1823 # endif
1824 # endif
1825 # if defined(MSWIN32)
1826 LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
1827 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
1828 EXCEPTION_ACCESS_VIOLATION)
1829 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
1830 /* Write fault */
1831 # endif
1832 {
1833 register unsigned i;
1834 # ifdef IRIX5
1835 char * addr = (char *) (size_t) (scp -> sc_badvaddr);
1836 # endif
1837 # if defined(OSF1) && defined(ALPHA)
1838 char * addr = (char *) (scp -> sc_traparg_a0);
1839 # endif
1840 # ifdef SUNOS5SIGS
1841 char * addr = (char *) (scp -> si_addr);
1842 # endif
1843 # ifdef LINUX
1844 # ifdef I386
1845 char * addr = (char *) (sc.cr2);
1846 # else
1847 # if defined(M68K)
1848 char * addr = NULL;
1849
1850 struct sigcontext *scp = (struct sigcontext *)(&sc);
1851
1852 int format = (scp->sc_formatvec >> 12) & 0xf;
1853 unsigned long *framedata = (unsigned long *)(scp + 1);
1854 unsigned long ea;
1855
1856 if (format == 0xa || format == 0xb) {
1857 /* 68020/030 */
1858 ea = framedata[2];
1859 } else if (format == 7) {
1860 /* 68040 */
1861 ea = framedata[3];
1862 } else if (format == 4) {
1863 /* 68060 */
1864 ea = framedata[0];
1865 if (framedata[1] & 0x08000000) {
1866 /* correct addr on misaligned access */
1867 ea = (ea+4095)&(~4095);
1868 }
1869 }
1870 addr = (char *)ea;
1871 # else
1872 # ifdef ALPHA
1873 char * addr = get_fault_addr(sc);
1874 # else
1875 # ifdef IA64
1876 char * addr = si -> si_addr;
1877 /* I believe this is claimed to work on all platforms for */
1878 /* Linux 2.3.47 and later. Hopefully we don't have to */
1879 /* worry about earlier kernels on IA64. */
1880 # else
1881 # if defined(POWERPC)
1882 char * addr = (char *) (sc.regs->dar);
1883 # else
1884 --> architecture not supported
1885 # endif
1886 # endif
1887 # endif
1888 # endif
1889 # endif
1890 # endif
1891 # if defined(MSWIN32)
1892 char * addr = (char *) (exc_info -> ExceptionRecord
1893 -> ExceptionInformation[1]);
1894 # define sig SIGSEGV
1895 # endif
1896
1897 if (SIG_OK && CODE_OK) {
1898 register struct hblk * h =
1899 (struct hblk *)((word)addr & ~(GC_page_size-1));
1900 GC_bool in_allocd_block;
1901
1902 # ifdef SUNOS5SIGS
1903 /* Address is only within the correct physical page. */
1904 in_allocd_block = FALSE;
1905 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
1906 if (HDR(h+i) != 0) {
1907 in_allocd_block = TRUE;
1908 }
1909 }
1910 # else
1911 in_allocd_block = (HDR(addr) != 0);
1912 # endif
1913 if (!in_allocd_block) {
1914 /* Heap blocks now begin and end on page boundaries */
1915 SIG_PF old_handler;
1916
1917 if (sig == SIGSEGV) {
1918 old_handler = GC_old_segv_handler;
1919 } else {
1920 old_handler = GC_old_bus_handler;
1921 }
1922 if (old_handler == SIG_DFL) {
1923 # ifndef MSWIN32
1924 GC_err_printf1("Segfault at 0x%lx\n", addr);
1925 ABORT("Unexpected bus error or segmentation fault");
1926 # else
1927 return(EXCEPTION_CONTINUE_SEARCH);
1928 # endif
1929 } else {
1930 # if defined (SUNOS4) || defined(FREEBSD)
1931 (*old_handler) (sig, code, scp, addr);
1932 return;
1933 # endif
1934 # if defined (SUNOS5SIGS)
1935 (*(REAL_SIG_PF)old_handler) (sig, scp, context);
1936 return;
1937 # endif
1938 # if defined (LINUX)
1939 # if defined(ALPHA) || defined(M68K)
1940 (*(REAL_SIG_PF)old_handler) (sig, code, sc);
1941 # else
1942 # if defined(IA64)
1943 (*(REAL_SIG_PF)old_handler) (sig, si, scp);
1944 # else
1945 (*(REAL_SIG_PF)old_handler) (sig, sc);
1946 # endif
1947 # endif
1948 return;
1949 # endif
1950 # if defined (IRIX5) || defined(OSF1)
1951 (*(REAL_SIG_PF)old_handler) (sig, code, scp);
1952 return;
1953 # endif
1954 # ifdef MSWIN32
1955 return((*old_handler)(exc_info));
1956 # endif
1957 }
1958 }
1959 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
1960 register int index = PHT_HASH(h+i);
1961
1962 set_pht_entry_from_index(GC_dirty_pages, index);
1963 }
1964 UNPROTECT(h, GC_page_size);
1965 # if defined(OSF1) || defined(LINUX)
1966 /* These reset the signal handler each time by default. */
1967 signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
1968 # endif
1969 /* The write may not take place before dirty bits are read. */
1970 /* But then we'll fault again ... */
1971 # ifdef MSWIN32
1972 return(EXCEPTION_CONTINUE_EXECUTION);
1973 # else
1974 return;
1975 # endif
1976 }
1977 #ifdef MSWIN32
1978 return EXCEPTION_CONTINUE_SEARCH;
1979 #else
1980 GC_err_printf1("Segfault at 0x%lx\n", addr);
1981 ABORT("Unexpected bus error or segmentation fault");
1982 #endif
1983 }
1984
1985 /*
1986 * We hold the allocation lock. We expect block h to be written
1987 * shortly.
1988 */
1989 void GC_write_hint(h)
1990 struct hblk *h;
1991 {
1992 register struct hblk * h_trunc;
1993 register unsigned i;
1994 register GC_bool found_clean;
1995
1996 if (!GC_dirty_maintained) return;
1997 h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
1998 found_clean = FALSE;
1999 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2000 register int index = PHT_HASH(h_trunc+i);
2001
2002 if (!get_pht_entry_from_index(GC_dirty_pages, index)) {
2003 found_clean = TRUE;
2004 set_pht_entry_from_index(GC_dirty_pages, index);
2005 }
2006 }
2007 if (found_clean) {
2008 UNPROTECT(h_trunc, GC_page_size);
2009 }
2010 }
2011
2012 void GC_dirty_init()
2013 {
2014 #if defined(SUNOS5SIGS) || defined(IRIX5) /* || defined(OSF1) */
2015 struct sigaction act, oldact;
2016 # ifdef IRIX5
2017 act.sa_flags = SA_RESTART;
2018 act.sa_handler = GC_write_fault_handler;
2019 # else
2020 act.sa_flags = SA_RESTART | SA_SIGINFO;
2021 act.sa_sigaction = GC_write_fault_handler;
2022 # endif
2023 (void)sigemptyset(&act.sa_mask);
2024 #endif
2025 # ifdef PRINTSTATS
2026 GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2027 # endif
2028 GC_dirty_maintained = TRUE;
2029 if (GC_page_size % HBLKSIZE != 0) {
2030 GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2031 ABORT("Page size not multiple of HBLKSIZE");
2032 }
2033 # if defined(SUNOS4) || defined(FREEBSD)
2034 GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
2035 if (GC_old_bus_handler == SIG_IGN) {
2036 GC_err_printf0("Previously ignored bus error!?");
2037 GC_old_bus_handler = SIG_DFL;
2038 }
2039 if (GC_old_bus_handler != SIG_DFL) {
2040 # ifdef PRINTSTATS
2041 GC_err_printf0("Replaced other SIGBUS handler\n");
2042 # endif
2043 }
2044 # endif
2045 # if defined(OSF1) || defined(SUNOS4) || defined(LINUX)
2046 GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
2047 if (GC_old_segv_handler == SIG_IGN) {
2048 GC_err_printf0("Previously ignored segmentation violation!?");
2049 GC_old_segv_handler = SIG_DFL;
2050 }
2051 if (GC_old_segv_handler != SIG_DFL) {
2052 # ifdef PRINTSTATS
2053 GC_err_printf0("Replaced other SIGSEGV handler\n");
2054 # endif
2055 }
2056 # endif
2057 # if defined(SUNOS5SIGS) || defined(IRIX5)
2058 # if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS)
2059 sigaction(SIGSEGV, 0, &oldact);
2060 sigaction(SIGSEGV, &act, 0);
2061 # else
2062 sigaction(SIGSEGV, &act, &oldact);
2063 # endif
2064 # if defined(_sigargs)
2065 /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2066 /* sa_sigaction. */
2067 GC_old_segv_handler = oldact.sa_handler;
2068 # else /* Irix 6.x or SUNOS5SIGS */
2069 if (oldact.sa_flags & SA_SIGINFO) {
2070 GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
2071 } else {
2072 GC_old_segv_handler = oldact.sa_handler;
2073 }
2074 # endif
2075 if (GC_old_segv_handler == SIG_IGN) {
2076 GC_err_printf0("Previously ignored segmentation violation!?");
2077 GC_old_segv_handler = SIG_DFL;
2078 }
2079 if (GC_old_segv_handler != SIG_DFL) {
2080 # ifdef PRINTSTATS
2081 GC_err_printf0("Replaced other SIGSEGV handler\n");
2082 # endif
2083 }
2084 # ifdef HPUX
2085 sigaction(SIGBUS, &act, &oldact);
2086 GC_old_bus_handler = oldact.sa_handler;
2087 if (GC_old_segv_handler != SIG_DFL) {
2088 # ifdef PRINTSTATS
2089 GC_err_printf0("Replaced other SIGBUS handler\n");
2090 # endif
2091 }
2092 # endif
2093 # endif
2094 # if defined(MSWIN32)
2095 GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
2096 if (GC_old_segv_handler != NULL) {
2097 # ifdef PRINTSTATS
2098 GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2099 # endif
2100 } else {
2101 GC_old_segv_handler = SIG_DFL;
2102 }
2103 # endif
2104 }
2105
2106
2107
2108 void GC_protect_heap()
2109 {
2110 ptr_t start;
2111 word len;
2112 unsigned i;
2113
2114 for (i = 0; i < GC_n_heap_sects; i++) {
2115 start = GC_heap_sects[i].hs_start;
2116 len = GC_heap_sects[i].hs_bytes;
2117 PROTECT(start, len);
2118 }
2119 }
2120
2121 /* We assume that either the world is stopped or its OK to lose dirty */
2122 /* bits while this is happenning (as in GC_enable_incremental). */
2123 void GC_read_dirty()
2124 {
2125 BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
2126 (sizeof GC_dirty_pages));
2127 BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
2128 GC_protect_heap();
2129 }
2130
2131 GC_bool GC_page_was_dirty(h)
2132 struct hblk * h;
2133 {
2134 register word index = PHT_HASH(h);
2135
2136 return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
2137 }
2138
2139 /*
2140 * Acquiring the allocation lock here is dangerous, since this
2141 * can be called from within GC_call_with_alloc_lock, and the cord
2142 * package does so. On systems that allow nested lock acquisition, this
2143 * happens to work.
2144 * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2145 */
2146
2147 void GC_begin_syscall()
2148 {
2149 if (!I_HOLD_LOCK()) LOCK();
2150 }
2151
2152 void GC_end_syscall()
2153 {
2154 if (!I_HOLD_LOCK()) UNLOCK();
2155 }
2156
2157 void GC_unprotect_range(addr, len)
2158 ptr_t addr;
2159 word len;
2160 {
2161 struct hblk * start_block;
2162 struct hblk * end_block;
2163 register struct hblk *h;
2164 ptr_t obj_start;
2165
2166 if (!GC_incremental) return;
2167 obj_start = GC_base(addr);
2168 if (obj_start == 0) return;
2169 if (GC_base(addr + len - 1) != obj_start) {
2170 ABORT("GC_unprotect_range(range bigger than object)");
2171 }
2172 start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
2173 end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
2174 end_block += GC_page_size/HBLKSIZE - 1;
2175 for (h = start_block; h <= end_block; h++) {
2176 register word index = PHT_HASH(h);
2177
2178 set_pht_entry_from_index(GC_dirty_pages, index);
2179 }
2180 UNPROTECT(start_block,
2181 ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
2182 }
2183
2184 #if !defined(MSWIN32) && !defined(LINUX_THREADS)
2185 /* Replacement for UNIX system call. */
2186 /* Other calls that write to the heap */
2187 /* should be handled similarly. */
2188 # if defined(__STDC__) && !defined(SUNOS4)
2189 # include <unistd.h>
2190 # include <sys/uio.h>
2191 ssize_t read(int fd, void *buf, size_t nbyte)
2192 # else
2193 # ifndef LINT
2194 int read(fd, buf, nbyte)
2195 # else
2196 int GC_read(fd, buf, nbyte)
2197 # endif
2198 int fd;
2199 char *buf;
2200 int nbyte;
2201 # endif
2202 {
2203 int result;
2204
2205 GC_begin_syscall();
2206 GC_unprotect_range(buf, (word)nbyte);
2207 # if defined(IRIX5) || defined(LINUX_THREADS)
2208 /* Indirect system call may not always be easily available. */
2209 /* We could call _read, but that would interfere with the */
2210 /* libpthread interception of read. */
2211 /* On Linux, we have to be careful with the linuxthreads */
2212 /* read interception. */
2213 {
2214 struct iovec iov;
2215
2216 iov.iov_base = buf;
2217 iov.iov_len = nbyte;
2218 result = readv(fd, &iov, 1);
2219 }
2220 # else
2221 result = syscall(SYS_read, fd, buf, nbyte);
2222 # endif
2223 GC_end_syscall();
2224 return(result);
2225 }
2226 #endif /* !MSWIN32 && !LINUX */
2227
2228 #ifdef USE_LD_WRAP
2229 /* We use the GNU ld call wrapping facility. */
2230 /* This requires that the linker be invoked with "--wrap read". */
2231 /* This can be done by passing -Wl,"--wrap read" to gcc. */
2232 /* I'm not sure that this actually wraps whatever version of read */
2233 /* is called by stdio. That code also mentions __read. */
2234 # include <unistd.h>
2235 ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
2236 {
2237 int result;
2238
2239 GC_begin_syscall();
2240 GC_unprotect_range(buf, (word)nbyte);
2241 result = __real_read(fd, buf, nbyte);
2242 GC_end_syscall();
2243 return(result);
2244 }
2245
2246 /* We should probably also do this for __read, or whatever stdio */
2247 /* actually calls. */
2248 #endif
2249
2250 /*ARGSUSED*/
2251 GC_bool GC_page_was_ever_dirty(h)
2252 struct hblk *h;
2253 {
2254 return(TRUE);
2255 }
2256
2257 /* Reset the n pages starting at h to "was never dirty" status. */
2258 /*ARGSUSED*/
2259 void GC_is_fresh(h, n)
2260 struct hblk *h;
2261 word n;
2262 {
2263 }
2264
2265 # endif /* MPROTECT_VDB */
2266
2267 # ifdef PROC_VDB
2268
2269 /*
2270 * See DEFAULT_VDB for interface descriptions.
2271 */
2272
2273 /*
2274 * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
2275 * from which we can read page modified bits. This facility is far from
2276 * optimal (e.g. we would like to get the info for only some of the
2277 * address space), but it avoids intercepting system calls.
2278 */
2279
2280 #include <errno.h>
2281 #include <sys/types.h>
2282 #include <sys/signal.h>
2283 #include <sys/fault.h>
2284 #include <sys/syscall.h>
2285 #include <sys/procfs.h>
2286 #include <sys/stat.h>
2287 #include <fcntl.h>
2288
2289 #define INITIAL_BUF_SZ 4096
2290 word GC_proc_buf_size = INITIAL_BUF_SZ;
2291 char *GC_proc_buf;
2292
2293 #ifdef SOLARIS_THREADS
2294 /* We don't have exact sp values for threads. So we count on */
2295 /* occasionally declaring stack pages to be fresh. Thus we */
2296 /* need a real implementation of GC_is_fresh. We can't clear */
2297 /* entries in GC_written_pages, since that would declare all */
2298 /* pages with the given hash address to be fresh. */
2299 # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
2300 struct hblk ** GC_fresh_pages; /* A direct mapped cache. */
2301 /* Collisions are dropped. */
2302
2303 # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
2304 # define ADD_FRESH_PAGE(h) \
2305 GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
2306 # define PAGE_IS_FRESH(h) \
2307 (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
2308 #endif
2309
2310 /* Add all pages in pht2 to pht1 */
2311 void GC_or_pages(pht1, pht2)
2312 page_hash_table pht1, pht2;
2313 {
2314 register int i;
2315
2316 for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
2317 }
2318
2319 int GC_proc_fd;
2320
2321 void GC_dirty_init()
2322 {
2323 int fd;
2324 char buf[30];
2325
2326 GC_dirty_maintained = TRUE;
2327 if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
2328 register int i;
2329
2330 for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
2331 # ifdef PRINTSTATS
2332 GC_printf1("Allocated words:%lu:all pages may have been written\n",
2333 (unsigned long)
2334 (GC_words_allocd + GC_words_allocd_before_gc));
2335 # endif
2336 }
2337 sprintf(buf, "/proc/%d", getpid());
2338 fd = open(buf, O_RDONLY);
2339 if (fd < 0) {
2340 ABORT("/proc open failed");
2341 }
2342 GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
2343 close(fd);
2344 if (GC_proc_fd < 0) {
2345 ABORT("/proc ioctl failed");
2346 }
2347 GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
2348 # ifdef SOLARIS_THREADS
2349 GC_fresh_pages = (struct hblk **)
2350 GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
2351 if (GC_fresh_pages == 0) {
2352 GC_err_printf0("No space for fresh pages\n");
2353 EXIT();
2354 }
2355 BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
2356 # endif
2357 }
2358
2359 /* Ignore write hints. They don't help us here. */
2360 /*ARGSUSED*/
2361 void GC_write_hint(h)
2362 struct hblk *h;
2363 {
2364 }
2365
2366 #ifdef SOLARIS_THREADS
2367 # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
2368 #else
2369 # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
2370 #endif
2371
2372 void GC_read_dirty()
2373 {
2374 unsigned long ps, np;
2375 int nmaps;
2376 ptr_t vaddr;
2377 struct prasmap * map;
2378 char * bufp;
2379 ptr_t current_addr, limit;
2380 int i;
2381 int dummy;
2382
2383 BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
2384
2385 bufp = GC_proc_buf;
2386 if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
2387 # ifdef PRINTSTATS
2388 GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
2389 GC_proc_buf_size);
2390 # endif
2391 {
2392 /* Retry with larger buffer. */
2393 word new_size = 2 * GC_proc_buf_size;
2394 char * new_buf = GC_scratch_alloc(new_size);
2395
2396 if (new_buf != 0) {
2397 GC_proc_buf = bufp = new_buf;
2398 GC_proc_buf_size = new_size;
2399 }
2400 if (syscall(SYS_read, GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
2401 WARN("Insufficient space for /proc read\n", 0);
2402 /* Punt: */
2403 memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
2404 memset(GC_written_pages, 0xff, sizeof(page_hash_table));
2405 # ifdef SOLARIS_THREADS
2406 BZERO(GC_fresh_pages,
2407 MAX_FRESH_PAGES * sizeof (struct hblk *));
2408 # endif
2409 return;
2410 }
2411 }
2412 }
2413 /* Copy dirty bits into GC_grungy_pages */
2414 nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
2415 /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
2416 nmaps, PG_REFERENCED, PG_MODIFIED); */
2417 bufp = bufp + sizeof(struct prpageheader);
2418 for (i = 0; i < nmaps; i++) {
2419 map = (struct prasmap *)bufp;
2420 vaddr = (ptr_t)(map -> pr_vaddr);
2421 ps = map -> pr_pagesize;
2422 np = map -> pr_npage;
2423 /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
2424 limit = vaddr + ps * np;
2425 bufp += sizeof (struct prasmap);
2426 for (current_addr = vaddr;
2427 current_addr < limit; current_addr += ps){
2428 if ((*bufp++) & PG_MODIFIED) {
2429 register struct hblk * h = (struct hblk *) current_addr;
2430
2431 while ((ptr_t)h < current_addr + ps) {
2432 register word index = PHT_HASH(h);
2433
2434 set_pht_entry_from_index(GC_grungy_pages, index);
2435 # ifdef SOLARIS_THREADS
2436 {
2437 register int slot = FRESH_PAGE_SLOT(h);
2438
2439 if (GC_fresh_pages[slot] == h) {
2440 GC_fresh_pages[slot] = 0;
2441 }
2442 }
2443 # endif
2444 h++;
2445 }
2446 }
2447 }
2448 bufp += sizeof(long) - 1;
2449 bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
2450 }
2451 /* Update GC_written_pages. */
2452 GC_or_pages(GC_written_pages, GC_grungy_pages);
2453 # ifdef SOLARIS_THREADS
2454 /* Make sure that old stacks are considered completely clean */
2455 /* unless written again. */
2456 GC_old_stacks_are_fresh();
2457 # endif
2458 }
2459
2460 #undef READ
2461
2462 GC_bool GC_page_was_dirty(h)
2463 struct hblk *h;
2464 {
2465 register word index = PHT_HASH(h);
2466 register GC_bool result;
2467
2468 result = get_pht_entry_from_index(GC_grungy_pages, index);
2469 # ifdef SOLARIS_THREADS
2470 if (result && PAGE_IS_FRESH(h)) result = FALSE;
2471 /* This happens only if page was declared fresh since */
2472 /* the read_dirty call, e.g. because it's in an unused */
2473 /* thread stack. It's OK to treat it as clean, in */
2474 /* that case. And it's consistent with */
2475 /* GC_page_was_ever_dirty. */
2476 # endif
2477 return(result);
2478 }
2479
2480 GC_bool GC_page_was_ever_dirty(h)
2481 struct hblk *h;
2482 {
2483 register word index = PHT_HASH(h);
2484 register GC_bool result;
2485
2486 result = get_pht_entry_from_index(GC_written_pages, index);
2487 # ifdef SOLARIS_THREADS
2488 if (result && PAGE_IS_FRESH(h)) result = FALSE;
2489 # endif
2490 return(result);
2491 }
2492
2493 /* Caller holds allocation lock. */
2494 void GC_is_fresh(h, n)
2495 struct hblk *h;
2496 word n;
2497 {
2498
2499 register word index;
2500
2501 # ifdef SOLARIS_THREADS
2502 register word i;
2503
2504 if (GC_fresh_pages != 0) {
2505 for (i = 0; i < n; i++) {
2506 ADD_FRESH_PAGE(h + i);
2507 }
2508 }
2509 # endif
2510 }
2511
2512 # endif /* PROC_VDB */
2513
2514
2515 # ifdef PCR_VDB
2516
2517 # include "vd/PCR_VD.h"
2518
2519 # define NPAGES (32*1024) /* 128 MB */
2520
2521 PCR_VD_DB GC_grungy_bits[NPAGES];
2522
2523 ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
2524 /* HBLKSIZE aligned. */
2525
2526 void GC_dirty_init()
2527 {
2528 GC_dirty_maintained = TRUE;
2529 /* For the time being, we assume the heap generally grows up */
2530 GC_vd_base = GC_heap_sects[0].hs_start;
2531 if (GC_vd_base == 0) {
2532 ABORT("Bad initial heap segment");
2533 }
2534 if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
2535 != PCR_ERes_okay) {
2536 ABORT("dirty bit initialization failed");
2537 }
2538 }
2539
2540 void GC_read_dirty()
2541 {
2542 /* lazily enable dirty bits on newly added heap sects */
2543 {
2544 static int onhs = 0;
2545 int nhs = GC_n_heap_sects;
2546 for( ; onhs < nhs; onhs++ ) {
2547 PCR_VD_WriteProtectEnable(
2548 GC_heap_sects[onhs].hs_start,
2549 GC_heap_sects[onhs].hs_bytes );
2550 }
2551 }
2552
2553
2554 if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
2555 != PCR_ERes_okay) {
2556 ABORT("dirty bit read failed");
2557 }
2558 }
2559
2560 GC_bool GC_page_was_dirty(h)
2561 struct hblk *h;
2562 {
2563 if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
2564 return(TRUE);
2565 }
2566 return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
2567 }
2568
2569 /*ARGSUSED*/
2570 void GC_write_hint(h)
2571 struct hblk *h;
2572 {
2573 PCR_VD_WriteProtectDisable(h, HBLKSIZE);
2574 PCR_VD_WriteProtectEnable(h, HBLKSIZE);
2575 }
2576
2577 # endif /* PCR_VDB */
2578
2579 /*
2580 * Call stack save code for debugging.
2581 * Should probably be in mach_dep.c, but that requires reorganization.
2582 */
2583 #if defined(SPARC) && !defined(LINUX)
2584 # if defined(SUNOS4)
2585 # include <machine/frame.h>
2586 # else
2587 # if defined (DRSNX)
2588 # include <sys/sparc/frame.h>
2589 # else
2590 # if defined(OPENBSD)
2591 # include <frame.h>
2592 # else
2593 # include <sys/frame.h>
2594 # endif
2595 # endif
2596 # endif
2597 # if NARGS > 6
2598 --> We only know how to to get the first 6 arguments
2599 # endif
2600
2601 #ifdef SAVE_CALL_CHAIN
2602 /* Fill in the pc and argument information for up to NFRAMES of my */
2603 /* callers. Ignore my frame and my callers frame. */
2604
2605 #ifdef OPENBSD
2606 # define FR_SAVFP fr_fp
2607 # define FR_SAVPC fr_pc
2608 #else
2609 # define FR_SAVFP fr_savfp
2610 # define FR_SAVPC fr_savpc
2611 #endif
2612
2613 void GC_save_callers (info)
2614 struct callinfo info[NFRAMES];
2615 {
2616 struct frame *frame;
2617 struct frame *fp;
2618 int nframes = 0;
2619 word GC_save_regs_in_stack();
2620
2621 frame = (struct frame *) GC_save_regs_in_stack ();
2622
2623 for (fp = frame -> FR_SAVFP; fp != 0 && nframes < NFRAMES;
2624 fp = fp -> FR_SAVFP, nframes++) {
2625 register int i;
2626
2627 info[nframes].ci_pc = fp->FR_SAVPC;
2628 for (i = 0; i < NARGS; i++) {
2629 info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
2630 }
2631 }
2632 if (nframes < NFRAMES) info[nframes].ci_pc = 0;
2633 }
2634
2635 #endif /* SAVE_CALL_CHAIN */
2636 #endif /* SPARC */
2637
2638
2639