1 /* Library support for -fsplit-stack. */
2 /* Copyright (C) 2009-2020 Free Software Foundation, Inc.
3 Contributed by Ian Lance Taylor <iant@google.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #pragma GCC optimize ("no-isolate-erroneous-paths-dereference")
28 /* powerpc 32-bit not supported. */
29 #if !defined __powerpc__ || defined __powerpc64__
33 #include "coretypes.h"
35 #include "libgcc_tm.h"
37 /* If inhibit_libc is defined, we cannot compile this file. The
38 effect is that people will not be able to use -fsplit-stack. That
39 is much better than failing the build particularly since people
40 will want to define inhibit_libc while building a compiler which
54 #include "generic-morestack.h"
56 /* Some systems use LD_PRELOAD or similar tricks to add hooks to
57 mmap/munmap. That breaks this code, because when we call mmap
58 there is enough stack space for the system call but there is not,
59 in general, enough stack space to run a hook. At least when using
60 glibc on GNU/Linux we can avoid the problem by calling __mmap and
63 #if defined(__gnu_linux__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 26))
65 extern void *__mmap (void *, size_t, int, int, int, off_t
);
66 extern int __munmap (void *, size_t);
69 #define munmap __munmap
71 #endif /* defined(__gnu_linux__) */
73 typedef unsigned uintptr_type
__attribute__ ((mode (pointer
)));
75 /* This file contains subroutines that are used by code compiled with
78 /* Declare functions to avoid warnings--there is no header file for
79 these internal functions. We give most of these functions the
80 flatten attribute in order to minimize their stack usage--here we
81 must minimize stack usage even at the cost of code size, and in
82 general inlining everything will do that. */
85 __generic_morestack_set_initial_sp (void *sp
, size_t len
)
86 __attribute__ ((no_split_stack
, flatten
, visibility ("hidden")));
89 __generic_morestack (size_t *frame_size
, void *old_stack
, size_t param_size
)
90 __attribute__ ((no_split_stack
, flatten
, visibility ("hidden")));
93 __generic_releasestack (size_t *pavailable
)
94 __attribute__ ((no_split_stack
, flatten
, visibility ("hidden")));
97 __morestack_block_signals (void)
98 __attribute__ ((no_split_stack
, flatten
, visibility ("hidden")));
101 __morestack_unblock_signals (void)
102 __attribute__ ((no_split_stack
, flatten
, visibility ("hidden")));
105 __generic_findstack (void *stack
)
106 __attribute__ ((no_split_stack
, flatten
, visibility ("hidden")));
109 __morestack_load_mmap (void)
110 __attribute__ ((no_split_stack
, visibility ("hidden")));
113 __morestack_allocate_stack_space (size_t size
)
114 __attribute__ ((visibility ("hidden")));
116 /* These are functions which -fsplit-stack code can call. These are
117 not called by the compiler, and are not hidden. FIXME: These
118 should be in some header file somewhere, somehow. */
121 __splitstack_find (void *, void *, size_t *, void **, void **, void **)
122 __attribute__ ((visibility ("default")));
125 __splitstack_block_signals (int *, int *)
126 __attribute__ ((visibility ("default")));
129 __splitstack_getcontext (void *context
[10])
130 __attribute__ ((no_split_stack
, visibility ("default")));
133 __splitstack_setcontext (void *context
[10])
134 __attribute__ ((no_split_stack
, visibility ("default")));
137 __splitstack_makecontext (size_t, void *context
[10], size_t *)
138 __attribute__ ((visibility ("default")));
141 __splitstack_resetcontext (void *context
[10], size_t *)
142 __attribute__ ((visibility ("default")));
145 __splitstack_releasecontext (void *context
[10])
146 __attribute__ ((visibility ("default")));
149 __splitstack_block_signals_context (void *context
[10], int *, int *)
150 __attribute__ ((visibility ("default")));
153 __splitstack_find_context (void *context
[10], size_t *, void **, void **,
155 __attribute__ ((visibility ("default")));
157 /* These functions must be defined by the processor specific code. */
159 extern void *__morestack_get_guard (void)
160 __attribute__ ((no_split_stack
, visibility ("hidden")));
162 extern void __morestack_set_guard (void *)
163 __attribute__ ((no_split_stack
, visibility ("hidden")));
165 extern void *__morestack_make_guard (void *, size_t)
166 __attribute__ ((no_split_stack
, visibility ("hidden")));
168 /* When we allocate a stack segment we put this header at the
173 /* The previous stack segment--when a function running on this stack
174 segment returns, it will run on the previous one. */
175 struct stack_segment
*prev
;
176 /* The next stack segment, if it has been allocated--when a function
177 is running on this stack segment, the next one is not being
179 struct stack_segment
*next
;
180 /* The total size of this stack segment. */
182 /* The stack address when this stack was created. This is used when
183 popping the stack. */
185 /* A list of memory blocks allocated by dynamic stack
187 struct dynamic_allocation_blocks
*dynamic_allocation
;
188 /* A list of dynamic memory blocks no longer needed. */
189 struct dynamic_allocation_blocks
*free_dynamic_allocation
;
190 /* An extra pointer in case we need some more information some
195 /* This structure holds the (approximate) initial stack pointer and
196 size for the system supplied stack for a thread. This is set when
197 the thread is created. We also store a sigset_t here to hold the
198 signal mask while splitting the stack, since we don't want to store
199 that on the stack. */
203 /* The initial stack pointer. */
205 /* The stack length. */
207 /* A signal mask, put here so that the thread can use it without
208 needing stack space. */
210 /* Non-zero if we should not block signals. This is a reversed flag
211 so that the default zero value is the safe value. The type is
212 uintptr_type because it replaced one of the void * pointers in
214 uintptr_type dont_block_signals
;
215 /* Some extra space for later extensibility. */
219 /* A list of memory blocks allocated by dynamic stack allocation.
220 This is used for code that calls alloca or uses variably sized
223 struct dynamic_allocation_blocks
225 /* The next block in the list. */
226 struct dynamic_allocation_blocks
*next
;
227 /* The size of the allocated memory. */
229 /* The allocated memory. */
233 /* These thread local global variables must be shared by all split
234 stack code across shared library boundaries. Therefore, they have
235 default visibility. They have extensibility fields if needed for
236 new versions. If more radical changes are needed, new code can be
237 written using new variable names, while still using the existing
238 variables in a backward compatible manner. Symbol versioning is
239 also used, although, since these variables are only referenced by
240 code in this file and generic-morestack-thread.c, it is likely that
241 simply using new names will suffice. */
243 /* The first stack segment allocated for this thread. */
245 __thread
struct stack_segment
*__morestack_segments
246 __attribute__ ((visibility ("default")));
248 /* The stack segment that we think we are currently using. This will
249 be correct in normal usage, but will be incorrect if an exception
250 unwinds into a different stack segment or if longjmp jumps to a
251 different stack segment. */
253 __thread
struct stack_segment
*__morestack_current_segment
254 __attribute__ ((visibility ("default")));
256 /* The initial stack pointer and size for this thread. */
258 __thread
struct initial_sp __morestack_initial_sp
259 __attribute__ ((visibility ("default")));
261 /* A static signal mask, to avoid taking up stack space. */
263 static sigset_t __morestack_fullmask
;
265 /* Page size, as returned from getpagesize(). Set on startup. */
266 static unsigned int static_pagesize
;
268 /* Set on startup to non-zero value if SPLIT_STACK_GUARD env var is set. */
269 static int use_guard_page
;
271 /* Convert an integer to a decimal string without using much stack
272 space. Return a pointer to the part of the buffer to use. We this
273 instead of sprintf because sprintf will require too much stack
277 print_int (int val
, char *buf
, int buflen
, size_t *print_len
)
283 uval
= (unsigned int) val
;
296 buf
[i
] = '0' + (uval
% 10);
299 while (uval
!= 0 && i
> 0);
308 *print_len
= buflen
- i
;
312 /* Print the string MSG/LEN, the errno number ERR, and a newline on
313 stderr. Then crash. */
316 __morestack_fail (const char *, size_t, int) __attribute__ ((noreturn
));
319 __morestack_fail (const char *msg
, size_t len
, int err
)
322 static const char nl
[] = "\n";
324 union { char *p
; const char *cp
; } const_cast;
327 iov
[0].iov_base
= const_cast.p
;
328 iov
[0].iov_len
= len
;
329 /* We can't call strerror, because it may try to translate the error
330 message, and that would use too much stack space. */
331 iov
[1].iov_base
= print_int (err
, buf
, sizeof buf
, &iov
[1].iov_len
);
332 const_cast.cp
= &nl
[0];
333 iov
[2].iov_base
= const_cast.p
;
334 iov
[2].iov_len
= sizeof nl
- 1;
335 /* FIXME: On systems without writev we need to issue three write
336 calls, or punt on printing errno. For now this is irrelevant
337 since stack splitting only works on GNU/Linux anyhow. */
342 /* Allocate a new stack segment. FRAME_SIZE is the required frame
345 static struct stack_segment
*
346 allocate_segment (size_t frame_size
)
348 unsigned int pagesize
;
349 unsigned int overhead
;
350 unsigned int allocate
;
352 struct stack_segment
*pss
;
354 pagesize
= static_pagesize
;
355 overhead
= sizeof (struct stack_segment
);
358 if (allocate
< MINSIGSTKSZ
)
359 allocate
= ((MINSIGSTKSZ
+ overhead
+ pagesize
- 1)
361 if (allocate
< frame_size
)
362 allocate
= ((frame_size
+ overhead
+ pagesize
- 1)
366 allocate
+= pagesize
;
368 /* FIXME: If this binary requires an executable stack, then we need
369 to set PROT_EXEC. Unfortunately figuring that out is complicated
370 and target dependent. We would need to use dl_iterate_phdr to
371 see if there is any object which does not have a PT_GNU_STACK
372 phdr, though only for architectures which use that mechanism. */
373 space
= mmap (NULL
, allocate
, PROT_READ
| PROT_WRITE
,
374 MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
375 if (space
== MAP_FAILED
)
377 static const char msg
[] =
378 "unable to allocate additional stack space: errno ";
379 __morestack_fail (msg
, sizeof msg
- 1, errno
);
386 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
388 space
= (char *) space
+ pagesize
;
390 guard
= space
+ allocate
- pagesize
;
393 mprotect (guard
, pagesize
, PROT_NONE
);
394 allocate
-= pagesize
;
397 pss
= (struct stack_segment
*) space
;
401 pss
->size
= allocate
- overhead
;
402 pss
->dynamic_allocation
= NULL
;
403 pss
->free_dynamic_allocation
= NULL
;
409 /* Free a list of dynamic blocks. */
412 free_dynamic_blocks (struct dynamic_allocation_blocks
*p
)
416 struct dynamic_allocation_blocks
*next
;
425 /* Merge two lists of dynamic blocks. */
427 static struct dynamic_allocation_blocks
*
428 merge_dynamic_blocks (struct dynamic_allocation_blocks
*a
,
429 struct dynamic_allocation_blocks
*b
)
431 struct dynamic_allocation_blocks
**pp
;
437 for (pp
= &a
->next
; *pp
!= NULL
; pp
= &(*pp
)->next
)
443 /* Release stack segments. If FREE_DYNAMIC is non-zero, we also free
444 any dynamic blocks. Otherwise we return them. */
446 struct dynamic_allocation_blocks
*
447 __morestack_release_segments (struct stack_segment
**pp
, int free_dynamic
)
449 struct dynamic_allocation_blocks
*ret
;
450 struct stack_segment
*pss
;
456 struct stack_segment
*next
;
457 unsigned int allocate
;
461 if (pss
->dynamic_allocation
!= NULL
462 || pss
->free_dynamic_allocation
!= NULL
)
466 free_dynamic_blocks (pss
->dynamic_allocation
);
467 free_dynamic_blocks (pss
->free_dynamic_allocation
);
471 ret
= merge_dynamic_blocks (pss
->dynamic_allocation
, ret
);
472 ret
= merge_dynamic_blocks (pss
->free_dynamic_allocation
, ret
);
476 allocate
= pss
->size
+ sizeof (struct stack_segment
);
477 if (munmap (pss
, allocate
) < 0)
479 static const char msg
[] = "munmap of stack space failed: errno ";
480 __morestack_fail (msg
, sizeof msg
- 1, errno
);
490 /* This function is called by a processor specific function to set the
491 initial stack pointer for a thread. The operating system will
492 always create a stack for a thread. Here we record a stack pointer
493 near the base of that stack. The size argument lets the processor
494 specific code estimate how much stack space is available on this
498 __generic_morestack_set_initial_sp (void *sp
, size_t len
)
500 /* The stack pointer most likely starts on a page boundary. Adjust
501 to the nearest 512 byte boundary. It's not essential that we be
502 precise here; getting it wrong will just leave some stack space
504 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
505 sp
= (void *) ((((__UINTPTR_TYPE__
) sp
+ 511U) / 512U) * 512U);
507 sp
= (void *) ((((__UINTPTR_TYPE__
) sp
- 511U) / 512U) * 512U);
510 __morestack_initial_sp
.sp
= sp
;
511 __morestack_initial_sp
.len
= len
;
512 sigemptyset (&__morestack_initial_sp
.mask
);
514 sigfillset (&__morestack_fullmask
);
515 #if defined(__GLIBC__) && defined(__linux__)
516 /* In glibc, the first two real time signals are used by the NPTL
517 threading library. By taking them out of the set of signals, we
518 avoiding copying the signal mask in pthread_sigmask. More
519 importantly, pthread_sigmask uses less stack space on x86_64. */
520 sigdelset (&__morestack_fullmask
, __SIGRTMIN
);
521 sigdelset (&__morestack_fullmask
, __SIGRTMIN
+ 1);
525 /* This function is called by a processor specific function which is
526 run in the prologue when more stack is needed. The processor
527 specific function handles the details of saving registers and
528 frobbing the actual stack pointer. This function is responsible
529 for allocating a new stack segment and for copying a parameter
530 block from the old stack to the new one. On function entry
531 *PFRAME_SIZE is the size of the required stack frame--the returned
532 stack must be at least this large. On function exit *PFRAME_SIZE
533 is the amount of space remaining on the allocated stack. OLD_STACK
534 points at the parameters the old stack (really the current one
535 while this function is running). OLD_STACK is saved so that it can
536 be returned by a later call to __generic_releasestack. PARAM_SIZE
537 is the size in bytes of parameters to copy to the new stack. This
538 function returns a pointer to the new stack segment, pointing to
539 the memory after the parameters have been copied. The returned
540 value minus the returned *PFRAME_SIZE (or plus if the stack grows
541 upward) is the first address on the stack which should not be used.
543 This function is running on the old stack and has only a limited
544 amount of stack space available. */
547 __generic_morestack (size_t *pframe_size
, void *old_stack
, size_t param_size
)
549 size_t frame_size
= *pframe_size
;
550 struct stack_segment
*current
;
551 struct stack_segment
**pp
;
552 struct dynamic_allocation_blocks
*dynamic
;
559 current
= __morestack_current_segment
;
561 pp
= current
!= NULL
? ¤t
->next
: &__morestack_segments
;
562 if (*pp
!= NULL
&& (*pp
)->size
< frame_size
)
563 dynamic
= __morestack_release_segments (pp
, 0);
570 current
= allocate_segment (frame_size
+ param_size
);
571 current
->prev
= __morestack_current_segment
;
575 current
->old_stack
= old_stack
;
577 __morestack_current_segment
= current
;
581 /* Move the free blocks onto our list. We don't want to call
582 free here, as we are short on stack space. */
583 current
->free_dynamic_allocation
=
584 merge_dynamic_blocks (dynamic
, current
->free_dynamic_allocation
);
587 *pframe_size
= current
->size
- param_size
;
589 /* Align the returned stack to a 32-byte boundary. */
590 aligned
= (param_size
+ 31) & ~ (size_t) 31;
592 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
594 char *bottom
= (char *) (current
+ 1) + current
->size
;
595 to
= bottom
- aligned
;
596 ret
= bottom
- aligned
;
600 to
+= aligned
- param_size
;
601 ret
= (char *) (current
+ 1) + aligned
;
604 /* We don't call memcpy to avoid worrying about the dynamic linker
605 trying to resolve it. */
606 from
= (char *) old_stack
;
607 for (i
= 0; i
< param_size
; i
++)
613 /* This function is called by a processor specific function when it is
614 ready to release a stack segment. We don't actually release the
615 stack segment, we just move back to the previous one. The current
616 stack segment will still be available if we need it in
617 __generic_morestack. This returns a pointer to the new stack
618 segment to use, which is the one saved by a previous call to
619 __generic_morestack. The processor specific function is then
620 responsible for actually updating the stack pointer. This sets
621 *PAVAILABLE to the amount of stack space now available. */
624 __generic_releasestack (size_t *pavailable
)
626 struct stack_segment
*current
;
629 current
= __morestack_current_segment
;
630 old_stack
= current
->old_stack
;
631 current
= current
->prev
;
632 __morestack_current_segment
= current
;
636 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
637 *pavailable
= (char *) old_stack
- (char *) (current
+ 1);
639 *pavailable
= (char *) (current
+ 1) + current
->size
- (char *) old_stack
;
646 /* We have popped back to the original stack. */
647 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
648 if ((char *) old_stack
>= (char *) __morestack_initial_sp
.sp
)
651 used
= (char *) __morestack_initial_sp
.sp
- (char *) old_stack
;
653 if ((char *) old_stack
<= (char *) __morestack_initial_sp
.sp
)
656 used
= (char *) old_stack
- (char *) __morestack_initial_sp
.sp
;
659 if (used
> __morestack_initial_sp
.len
)
662 *pavailable
= __morestack_initial_sp
.len
- used
;
668 /* Block signals while splitting the stack. This avoids trouble if we
669 try to invoke a signal handler which itself wants to split the
672 extern int pthread_sigmask (int, const sigset_t
*, sigset_t
*)
673 __attribute__ ((weak
));
676 __morestack_block_signals (void)
678 if (__morestack_initial_sp
.dont_block_signals
)
680 else if (pthread_sigmask
)
681 pthread_sigmask (SIG_BLOCK
, &__morestack_fullmask
,
682 &__morestack_initial_sp
.mask
);
684 sigprocmask (SIG_BLOCK
, &__morestack_fullmask
,
685 &__morestack_initial_sp
.mask
);
688 /* Unblock signals while splitting the stack. */
691 __morestack_unblock_signals (void)
693 if (__morestack_initial_sp
.dont_block_signals
)
695 else if (pthread_sigmask
)
696 pthread_sigmask (SIG_SETMASK
, &__morestack_initial_sp
.mask
, NULL
);
698 sigprocmask (SIG_SETMASK
, &__morestack_initial_sp
.mask
, NULL
);
701 /* This function is called to allocate dynamic stack space, for alloca
702 or a variably sized array. This is a regular function with
703 sufficient stack space, so we just use malloc to allocate the
704 space. We attach the allocated blocks to the current stack
705 segment, so that they will eventually be reused or freed. */
708 __morestack_allocate_stack_space (size_t size
)
710 struct stack_segment
*seg
, *current
;
711 struct dynamic_allocation_blocks
*p
;
713 /* We have to block signals to avoid getting confused if we get
714 interrupted by a signal whose handler itself uses alloca or a
715 variably sized array. */
716 __morestack_block_signals ();
718 /* Since we don't want to call free while we are low on stack space,
719 we may have a list of already allocated blocks waiting to be
720 freed. Release them all, unless we find one that is large
721 enough. We don't look at every block to see if one is large
722 enough, just the first one, because we aren't trying to build a
723 memory allocator here, we're just trying to speed up common
726 current
= __morestack_current_segment
;
728 for (seg
= __morestack_segments
; seg
!= NULL
; seg
= seg
->next
)
730 p
= seg
->free_dynamic_allocation
;
735 seg
->free_dynamic_allocation
= p
->next
;
739 free_dynamic_blocks (p
);
740 seg
->free_dynamic_allocation
= NULL
;
747 /* We need to allocate additional memory. */
748 p
= malloc (sizeof (*p
));
752 p
->block
= malloc (size
);
753 if (p
->block
== NULL
)
757 /* If we are still on the initial stack, then we have a space leak.
761 p
->next
= current
->dynamic_allocation
;
762 current
->dynamic_allocation
= p
;
765 __morestack_unblock_signals ();
770 /* Find the stack segment for STACK and return the amount of space
771 available. This is used when unwinding the stack because of an
772 exception, in order to reset the stack guard correctly. */
775 __generic_findstack (void *stack
)
777 struct stack_segment
*pss
;
780 for (pss
= __morestack_current_segment
; pss
!= NULL
; pss
= pss
->prev
)
782 if ((char *) pss
< (char *) stack
783 && (char *) pss
+ pss
->size
> (char *) stack
)
785 __morestack_current_segment
= pss
;
786 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
787 return (char *) stack
- (char *) (pss
+ 1);
789 return (char *) (pss
+ 1) + pss
->size
- (char *) stack
;
794 /* We have popped back to the original stack. */
796 if (__morestack_initial_sp
.sp
== NULL
)
799 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
800 if ((char *) stack
>= (char *) __morestack_initial_sp
.sp
)
803 used
= (char *) __morestack_initial_sp
.sp
- (char *) stack
;
805 if ((char *) stack
<= (char *) __morestack_initial_sp
.sp
)
808 used
= (char *) stack
- (char *) __morestack_initial_sp
.sp
;
811 if (used
> __morestack_initial_sp
.len
)
814 return __morestack_initial_sp
.len
- used
;
817 /* This function is called at program startup time to make sure that
818 mmap, munmap, and getpagesize are resolved if linking dynamically.
819 We want to resolve them while we have enough stack for them, rather
820 than calling into the dynamic linker while low on stack space.
821 Similarly, invoke getenv here to check for split-stack related control
822 variables, since doing do as part of the __morestack path can result
823 in unwanted use of SSE/AVX registers (see GCC PR 86213). */
826 __morestack_load_mmap (void)
828 /* Call with bogus values to run faster. We don't care if the call
829 fails. Pass __MORESTACK_CURRENT_SEGMENT to make sure that any
830 TLS accessor function is resolved. */
831 mmap (__morestack_current_segment
, 0, PROT_READ
, MAP_ANONYMOUS
, -1, 0);
832 mprotect (NULL
, 0, 0);
833 munmap (0, static_pagesize
);
835 /* Initialize these values here, so as to avoid dynamic linker
836 activity as part of a __morestack call. */
837 static_pagesize
= getpagesize();
838 use_guard_page
= getenv ("SPLIT_STACK_GUARD") != 0;
841 /* This function may be used to iterate over the stack segments.
842 This can be called like this.
843 void *next_segment = NULL;
844 void *next_sp = NULL;
845 void *initial_sp = NULL;
848 while ((stack = __splitstack_find (next_segment, next_sp, &stack_size,
849 &next_segment, &next_sp,
850 &initial_sp)) != NULL)
852 // Stack segment starts at stack and is stack_size bytes long.
855 There is no way to iterate over the stack segments of a different
856 thread. However, what is permitted is for one thread to call this
857 with the first two values NULL, to pass next_segment, next_sp, and
858 initial_sp to a different thread, and then to suspend one way or
859 another. A different thread may run the subsequent
860 __morestack_find iterations. Of course, this will only work if the
861 first thread is suspended during the __morestack_find iterations.
862 If not, the second thread will be looking at the stack while it is
863 changing, and anything could happen.
865 FIXME: This should be declared in some header file, but where? */
868 __splitstack_find (void *segment_arg
, void *sp
, size_t *len
,
869 void **next_segment
, void **next_sp
,
872 struct stack_segment
*segment
;
876 if (segment_arg
== (void *) (uintptr_type
) 1)
878 char *isp
= (char *) *initial_sp
;
883 *next_segment
= (void *) (uintptr_type
) 2;
885 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
886 if ((char *) sp
>= isp
)
888 *len
= (char *) isp
- (char *) sp
;
891 if ((char *) sp
<= (char *) isp
)
893 *len
= (char *) sp
- (char *) isp
;
897 else if (segment_arg
== (void *) (uintptr_type
) 2)
899 else if (segment_arg
!= NULL
)
900 segment
= (struct stack_segment
*) segment_arg
;
903 *initial_sp
= __morestack_initial_sp
.sp
;
904 segment
= __morestack_current_segment
;
905 sp
= (void *) &segment
;
909 return __splitstack_find ((void *) (uintptr_type
) 1, sp
, len
,
910 next_segment
, next_sp
, initial_sp
);
911 if ((char *) sp
>= (char *) (segment
+ 1)
912 && (char *) sp
<= (char *) (segment
+ 1) + segment
->size
)
914 segment
= segment
->prev
;
918 if (segment
->prev
== NULL
)
919 *next_segment
= (void *) (uintptr_type
) 1;
921 *next_segment
= segment
->prev
;
923 /* The old_stack value is the address of the function parameters of
924 the function which called __morestack. So if f1 called f2 which
925 called __morestack, the stack looks like this:
927 parameters <- old_stack
930 registers pushed by __morestack
932 The registers pushed by __morestack may not be visible on any
933 other stack, if we are being called by a signal handler
934 immediately after the call to __morestack_unblock_signals. We
935 want to adjust our return value to include those registers. This
936 is target dependent. */
938 nsp
= (char *) segment
->old_stack
;
942 /* We've reached the top of the stack. */
943 *next_segment
= (void *) (uintptr_type
) 2;
947 #if defined (__x86_64__)
948 nsp
-= 12 * sizeof (void *);
949 #elif defined (__i386__)
950 nsp
-= 6 * sizeof (void *);
951 #elif defined __powerpc64__
952 #elif defined __s390x__
954 #elif defined __s390__
957 #error "unrecognized target"
960 *next_sp
= (void *) nsp
;
963 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
964 *len
= (char *) (segment
+ 1) + segment
->size
- (char *) sp
;
967 *len
= (char *) sp
- (char *) (segment
+ 1);
968 ret
= (void *) (segment
+ 1);
974 /* Tell the split stack code whether it has to block signals while
975 manipulating the stack. This is for programs in which some threads
976 block all signals. If a thread already blocks signals, there is no
977 need for the split stack code to block them as well. If NEW is not
978 NULL, then if *NEW is non-zero signals will be blocked while
979 splitting the stack, otherwise they will not. If OLD is not NULL,
980 *OLD will be set to the old value. */
983 __splitstack_block_signals (int *new, int *old
)
986 *old
= __morestack_initial_sp
.dont_block_signals
? 0 : 1;
988 __morestack_initial_sp
.dont_block_signals
= *new ? 0 : 1;
991 /* The offsets into the arrays used by __splitstack_getcontext and
992 __splitstack_setcontext. */
994 enum __splitstack_context_offsets
996 MORESTACK_SEGMENTS
= 0,
1007 /* Get the current split stack context. This may be used for
1008 coroutine switching, similar to getcontext. The argument should
1009 have at least 10 void *pointers for extensibility, although we
1010 don't currently use all of them. This would normally be called
1011 immediately before a call to getcontext or swapcontext or
1015 __splitstack_getcontext (void *context
[NUMBER_OFFSETS
])
1017 memset (context
, 0, NUMBER_OFFSETS
* sizeof (void *));
1018 context
[MORESTACK_SEGMENTS
] = (void *) __morestack_segments
;
1019 context
[CURRENT_SEGMENT
] = (void *) __morestack_current_segment
;
1020 context
[CURRENT_STACK
] = (void *) &context
;
1021 context
[STACK_GUARD
] = __morestack_get_guard ();
1022 context
[INITIAL_SP
] = (void *) __morestack_initial_sp
.sp
;
1023 context
[INITIAL_SP_LEN
] = (void *) (uintptr_type
) __morestack_initial_sp
.len
;
1024 context
[BLOCK_SIGNALS
] = (void *) __morestack_initial_sp
.dont_block_signals
;
1027 /* Set the current split stack context. The argument should be a
1028 context previously passed to __splitstack_getcontext. This would
1029 normally be called immediately after a call to getcontext or
1030 swapcontext or setjmp if something jumped to it. */
1033 __splitstack_setcontext (void *context
[NUMBER_OFFSETS
])
1035 __morestack_segments
= (struct stack_segment
*) context
[MORESTACK_SEGMENTS
];
1036 __morestack_current_segment
=
1037 (struct stack_segment
*) context
[CURRENT_SEGMENT
];
1038 __morestack_set_guard (context
[STACK_GUARD
]);
1039 __morestack_initial_sp
.sp
= context
[INITIAL_SP
];
1040 __morestack_initial_sp
.len
= (size_t) context
[INITIAL_SP_LEN
];
1041 __morestack_initial_sp
.dont_block_signals
=
1042 (uintptr_type
) context
[BLOCK_SIGNALS
];
1045 /* Create a new split stack context. This will allocate a new stack
1046 segment which may be used by a coroutine. STACK_SIZE is the
1047 minimum size of the new stack. The caller is responsible for
1048 actually setting the stack pointer. This would normally be called
1049 before a call to makecontext, and the returned stack pointer and
1050 size would be used to set the uc_stack field. A function called
1051 via makecontext on a stack created by __splitstack_makecontext may
1052 not return. Note that the returned pointer points to the lowest
1053 address in the stack space, and thus may not be the value to which
1054 to set the stack pointer. */
1057 __splitstack_makecontext (size_t stack_size
, void *context
[NUMBER_OFFSETS
],
1060 struct stack_segment
*segment
;
1063 memset (context
, 0, NUMBER_OFFSETS
* sizeof (void *));
1064 segment
= allocate_segment (stack_size
);
1065 context
[MORESTACK_SEGMENTS
] = segment
;
1066 context
[CURRENT_SEGMENT
] = segment
;
1067 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1068 initial_sp
= (void *) ((char *) (segment
+ 1) + segment
->size
);
1070 initial_sp
= (void *) (segment
+ 1);
1072 context
[STACK_GUARD
] = __morestack_make_guard (initial_sp
, segment
->size
);
1073 context
[INITIAL_SP
] = NULL
;
1074 context
[INITIAL_SP_LEN
] = 0;
1075 *size
= segment
->size
;
1076 return (void *) (segment
+ 1);
1079 /* Given an existing split stack context, reset it back to the start
1080 of the stack. Return the stack pointer and size, appropriate for
1081 use with makecontext. This may be used if a coroutine exits, in
1082 order to reuse the stack segments for a new coroutine. */
1085 __splitstack_resetcontext (void *context
[10], size_t *size
)
1087 struct stack_segment
*segment
;
1089 size_t initial_size
;
1092 /* Reset the context assuming that MORESTACK_SEGMENTS, INITIAL_SP
1093 and INITIAL_SP_LEN are correct. */
1095 segment
= context
[MORESTACK_SEGMENTS
];
1096 context
[CURRENT_SEGMENT
] = segment
;
1097 context
[CURRENT_STACK
] = NULL
;
1098 if (segment
== NULL
)
1100 initial_sp
= context
[INITIAL_SP
];
1101 initial_size
= (uintptr_type
) context
[INITIAL_SP_LEN
];
1103 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1104 ret
= (void *) ((char *) ret
- initial_size
);
1109 #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1110 initial_sp
= (void *) ((char *) (segment
+ 1) + segment
->size
);
1112 initial_sp
= (void *) (segment
+ 1);
1114 initial_size
= segment
->size
;
1115 ret
= (void *) (segment
+ 1);
1117 context
[STACK_GUARD
] = __morestack_make_guard (initial_sp
, initial_size
);
1118 context
[BLOCK_SIGNALS
] = NULL
;
1119 *size
= initial_size
;
1123 /* Release all the memory associated with a splitstack context. This
1124 may be used if a coroutine exits and the associated stack should be
1128 __splitstack_releasecontext (void *context
[10])
1130 __morestack_release_segments (((struct stack_segment
**)
1131 &context
[MORESTACK_SEGMENTS
]),
1135 /* Like __splitstack_block_signals, but operating on CONTEXT, rather
1136 than on the current state. */
1139 __splitstack_block_signals_context (void *context
[NUMBER_OFFSETS
], int *new,
1143 *old
= ((uintptr_type
) context
[BLOCK_SIGNALS
]) != 0 ? 0 : 1;
1145 context
[BLOCK_SIGNALS
] = (void *) (uintptr_type
) (*new ? 0 : 1);
1148 /* Find the stack segments associated with a split stack context.
1149 This will return the address of the first stack segment and set
1150 *STACK_SIZE to its size. It will set next_segment, next_sp, and
1151 initial_sp which may be passed to __splitstack_find to find the
1152 remaining segments. */
1155 __splitstack_find_context (void *context
[NUMBER_OFFSETS
], size_t *stack_size
,
1156 void **next_segment
, void **next_sp
,
1160 struct stack_segment
*segment
;
1162 *initial_sp
= context
[INITIAL_SP
];
1164 sp
= context
[CURRENT_STACK
];
1167 /* Most likely this context was created but was never used. The
1168 value 2 is a code used by __splitstack_find to mean that we
1169 have reached the end of the list of stacks. */
1170 *next_segment
= (void *) (uintptr_type
) 2;
1176 segment
= context
[CURRENT_SEGMENT
];
1177 if (segment
== NULL
)
1179 /* Most likely this context was saved by a thread which was not
1180 created using __splistack_makecontext and which has never
1181 split the stack. The value 1 is a code used by
1182 __splitstack_find to look at the initial stack. */
1183 segment
= (struct stack_segment
*) (uintptr_type
) 1;
1186 return __splitstack_find (segment
, sp
, stack_size
, next_segment
, next_sp
,
1190 #endif /* !defined (inhibit_libc) */
1191 #endif /* not powerpc 32-bit */