1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
26 #include <sys/param.h>
27 #include <dl-sysdep.h>
31 #include <lowlevellock.h>
32 #include <kernel-features.h>
33 #include <stack-aliasing.h>
36 #if !(defined(NEED_SEPARATE_REGISTER_STACK) || defined(NEED_STACK_SIZE_FOR_PTH_CREATE))
38 /* Most architectures have exactly one stack pointer. Some have more. */
39 # define STACK_VARIABLES void *stackaddr = NULL
41 /* How to pass the values to the 'create_thread' function. */
42 # define STACK_VARIABLES_ARGS stackaddr
44 /* How to declare function which gets there parameters. */
45 # define STACK_VARIABLES_PARMS void *stackaddr
47 /* How to declare allocate_stack. */
48 # define ALLOCATE_STACK_PARMS void **stack
50 /* This is how the function is called. We do it this way to allow
51 other variants of the function to have more parameters. */
52 # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
56 /* We need two stacks. The kernel will place them but we have to tell
57 the kernel about the size of the reserved address space. */
58 # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
60 /* How to pass the values to the 'create_thread' function. */
61 # define STACK_VARIABLES_ARGS stackaddr, stacksize
63 /* How to declare function which gets there parameters. */
64 # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize
66 /* How to declare allocate_stack. */
67 # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize
69 /* This is how the function is called. We do it this way to allow
70 other variants of the function to have more parameters. */
71 # define ALLOCATE_STACK(attr, pd) \
72 allocate_stack (attr, pd, &stackaddr, &stacksize)
77 /* Default alignment of stack. */
79 # define STACK_ALIGN __alignof__ (long double)
82 /* Default value for minimal stack size after allocating thread
83 descriptor and guard. */
84 #ifndef MINIMAL_REST_STACK
85 # define MINIMAL_REST_STACK 4096
89 Unfortunately, under FreeBSD mmap fails with addr=NULL, flags=MAP_STACK
91 See http://www.freebsd.org/cgi/query-pr.cgi?pr=158755
93 do not use MAP_STACK at all
99 /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for
100 a stack. Use it when possible. */
105 /* This yields the pointer that TLS support code calls the thread pointer. */
107 # define TLS_TPADJ(pd) (pd)
109 # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
112 /* Cache handling for not-yet free stacks. */
114 /* Maximum size in kB of cache. */
115 static size_t stack_cache_maxsize
= 40 * 1024 * 1024; /* 40MiBi by default. */
116 static size_t stack_cache_actsize
;
118 /* Mutex protecting this variable. */
119 static int stack_cache_lock
= LLL_LOCK_INITIALIZER
;
121 /* List of queued stack frames. */
122 static LIST_HEAD (stack_cache
);
124 /* List of the stacks in use. */
125 static LIST_HEAD (stack_used
);
127 /* We need to record what list operations we are going to do so that,
128 in case of an asynchronous interruption due to a fork() call, we
129 can correct for the work. */
130 static uintptr_t in_flight_stack
;
132 /* List of the threads with user provided stacks in use. No need to
133 initialize this, since it's done in __pthread_initialize_minimal. */
134 list_t __stack_user
__attribute__ ((nocommon
));
135 hidden_data_def (__stack_user
)
137 #if COLORING_INCREMENT != 0
138 /* Number of threads created. */
139 static unsigned int nptl_ncreated
;
143 /* Check whether the stack is still used or not. */
144 #define FREE_P(descr) ((descr)->tid <= KTID_TERMINATED)
148 stack_list_del (list_t
*elem
)
150 in_flight_stack
= (uintptr_t) elem
;
152 atomic_write_barrier ();
156 atomic_write_barrier ();
163 stack_list_add (list_t
*elem
, list_t
*list
)
165 in_flight_stack
= (uintptr_t) elem
| 1;
167 atomic_write_barrier ();
169 list_add (elem
, list
);
171 atomic_write_barrier ();
177 /* We create a double linked list of all cache entries. Double linked
178 because this allows removing entries from the end. */
181 /* Get a stack frame from the cache. We have to match by size since
182 some blocks might be too small or far too large. */
183 static struct pthread
*
184 get_cached_stack (size_t *sizep
, void **memp
)
186 size_t size
= *sizep
;
187 struct pthread
*result
= NULL
;
190 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
192 /* Search the cache for a matching entry. We search for the
193 smallest stack which has at least the required size. Note that
194 in normal situations the size of all allocated stacks is the
195 same. As the very least there are only a few different sizes.
196 Therefore this loop will exit early most of the time with an
198 list_for_each (entry
, &stack_cache
)
200 struct pthread
*curr
;
202 curr
= list_entry (entry
, struct pthread
, list
);
203 if (FREE_P (curr
) && curr
->stackblock_size
>= size
)
205 if (curr
->stackblock_size
== size
)
212 || result
->stackblock_size
> curr
->stackblock_size
)
217 if (__builtin_expect (result
== NULL
, 0)
218 /* Make sure the size difference is not too excessive. In that
219 case we do not use the block. */
220 || __builtin_expect (result
->stackblock_size
> 4 * size
, 0))
222 /* Release the lock. */
223 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
228 /* Don't allow setxid until cloned. */
229 result
->setxid_futex
= -1;
231 /* Dequeue the entry. */
232 stack_list_del (&result
->list
);
234 /* And add to the list of stacks in use. */
235 stack_list_add (&result
->list
, &stack_used
);
237 /* And decrease the cache size. */
238 stack_cache_actsize
-= result
->stackblock_size
;
240 /* Release the lock early. */
241 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
243 /* Report size and location of the stack to the caller. */
244 *sizep
= result
->stackblock_size
;
245 *memp
= result
->stackblock
;
247 /* Cancellation handling is back to the default. */
248 result
->cancelhandling
= 0;
249 result
->cleanup
= NULL
;
251 /* No pending event. */
252 result
->nextevent
= NULL
;
255 dtv_t
*dtv
= GET_DTV (TLS_TPADJ (result
));
256 for (size_t cnt
= 0; cnt
< dtv
[-1].counter
; ++cnt
)
257 free (dtv
[1 + cnt
].pointer
.to_free
);
258 memset (dtv
, '\0', (dtv
[-1].counter
+ 1) * sizeof (dtv_t
));
260 /* Re-initialize the TLS. */
261 _dl_allocate_tls_init (TLS_TPADJ (result
));
267 /* Free stacks until cache size is lower than LIMIT. */
269 __free_stacks (size_t limit
)
271 /* We reduce the size of the cache. Remove the last entries until
272 the size is below the limit. */
276 /* Search from the end of the list. */
277 list_for_each_prev_safe (entry
, prev
, &stack_cache
)
279 struct pthread
*curr
;
281 curr
= list_entry (entry
, struct pthread
, list
);
284 /* Unlink the block. */
285 stack_list_del (entry
);
287 /* Account for the freed memory. */
288 stack_cache_actsize
-= curr
->stackblock_size
;
290 /* Free the memory associated with the ELF TLS. */
291 _dl_deallocate_tls (TLS_TPADJ (curr
), false);
293 /* Remove this block. This should never fail. If it does
294 something is really wrong. */
295 if (munmap (curr
->stackblock
, curr
->stackblock_size
) != 0)
298 /* Maybe we have freed enough. */
299 if (stack_cache_actsize
<= limit
)
306 /* Add a stack frame which is not used anymore to the stack. Must be
307 called with the cache lock held. */
309 __attribute ((always_inline
))
310 queue_stack (struct pthread
*stack
)
312 /* We unconditionally add the stack to the list. The memory may
313 still be in use but it will not be reused until the kernel marks
314 the stack as not used anymore. */
315 stack_list_add (&stack
->list
, &stack_cache
);
317 stack_cache_actsize
+= stack
->stackblock_size
;
318 if (__glibc_unlikely (stack_cache_actsize
> stack_cache_maxsize
))
319 __free_stacks (stack_cache_maxsize
);
325 change_stack_perm (struct pthread
*pd
326 #ifdef NEED_SEPARATE_REGISTER_STACK
331 #ifdef NEED_SEPARATE_REGISTER_STACK
332 void *stack
= (pd
->stackblock
333 + (((((pd
->stackblock_size
- pd
->guardsize
) / 2)
334 & pagemask
) + pd
->guardsize
) & pagemask
));
335 size_t len
= pd
->stackblock
+ pd
->stackblock_size
- stack
;
336 #elif _STACK_GROWS_DOWN
337 void *stack
= pd
->stackblock
+ pd
->guardsize
;
338 size_t len
= pd
->stackblock_size
- pd
->guardsize
;
339 #elif _STACK_GROWS_UP
340 void *stack
= pd
->stackblock
;
341 size_t len
= (uintptr_t) pd
- pd
->guardsize
- (uintptr_t) pd
->stackblock
;
343 # error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
345 if (mprotect (stack
, len
, PROT_READ
| PROT_WRITE
| PROT_EXEC
) != 0)
352 /* Returns a usable stack for a new thread either by allocating a
353 new stack or reusing a cached stack of sufficient size.
354 ATTR must be non-NULL and point to a valid pthread_attr.
355 PDP must be non-NULL. */
357 allocate_stack (const struct pthread_attr
*attr
, struct pthread
**pdp
,
358 ALLOCATE_STACK_PARMS
)
362 size_t pagesize_m1
= __getpagesize () - 1;
365 assert (powerof2 (pagesize_m1
+ 1));
366 assert (TCB_ALIGNMENT
>= STACK_ALIGN
);
368 /* Get the stack size from the attribute if it is set. Otherwise we
369 use the default we determined at start time. */
370 if (attr
->stacksize
!= 0)
371 size
= attr
->stacksize
;
374 lll_lock (__default_pthread_attr_lock
, LLL_PRIVATE
);
375 size
= __default_pthread_attr
.stacksize
;
376 lll_unlock (__default_pthread_attr_lock
, LLL_PRIVATE
);
379 /* Get memory for the stack. */
380 if (__glibc_unlikely (attr
->flags
& ATTR_FLAG_STACKADDR
))
384 /* If the user also specified the size of the stack make sure it
386 if (attr
->stacksize
!= 0
387 && attr
->stacksize
< (__static_tls_size
+ MINIMAL_REST_STACK
))
390 /* Adjust stack size for alignment of the TLS block. */
392 adj
= ((uintptr_t) attr
->stackaddr
- TLS_TCB_SIZE
)
393 & __static_tls_align_m1
;
394 assert (size
> adj
+ TLS_TCB_SIZE
);
396 adj
= ((uintptr_t) attr
->stackaddr
- __static_tls_size
)
397 & __static_tls_align_m1
;
401 /* The user provided some memory. Let's hope it matches the
402 size... We do not allocate guard pages if the user provided
403 the stack. It is the user's responsibility to do this if it
406 pd
= (struct pthread
*) ((uintptr_t) attr
->stackaddr
407 - TLS_TCB_SIZE
- adj
);
409 pd
= (struct pthread
*) (((uintptr_t) attr
->stackaddr
410 - __static_tls_size
- adj
)
414 /* The user provided stack memory needs to be cleared. */
415 memset (pd
, '\0', sizeof (struct pthread
));
417 /* The first TSD block is included in the TCB. */
418 pd
->specific
[0] = pd
->specific_1stblock
;
420 /* Remember the stack-related values. */
421 pd
->stackblock
= (char *) attr
->stackaddr
- size
;
422 pd
->stackblock_size
= size
;
424 /* This is a user-provided stack. It will not be queued in the
425 stack cache nor will the memory (except the TLS memory) be freed. */
426 pd
->user_stack
= true;
428 /* This is at least the second thread. */
429 pd
->header
.multiple_threads
= 1;
430 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
431 __pthread_multiple_threads
= *__libc_multiple_threads_ptr
= 1;
434 #ifndef __ASSUME_PRIVATE_FUTEX
435 /* The thread must know when private futexes are supported. */
436 pd
->header
.private_futex
= THREAD_GETMEM (THREAD_SELF
,
437 header
.private_futex
);
440 #ifdef NEED_DL_SYSINFO
441 /* Copy the sysinfo value from the parent. */
442 THREAD_SYSINFO(pd
) = THREAD_SELF_SYSINFO
;
445 /* Don't allow setxid until cloned. */
446 pd
->setxid_futex
= -1;
448 /* Allocate the DTV for this thread. */
449 if (_dl_allocate_tls (TLS_TPADJ (pd
)) == NULL
)
451 /* Something went wrong. */
452 assert (errno
== ENOMEM
);
457 /* Prepare to modify global data. */
458 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
460 /* And add to the list of stacks in use. */
461 list_add (&pd
->list
, &__stack_user
);
463 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
467 /* Allocate some anonymous memory. If possible use the cache. */
471 const int prot
= (PROT_READ
| PROT_WRITE
472 | ((GL(dl_stack_flags
) & PF_X
) ? PROT_EXEC
: 0));
474 #if COLORING_INCREMENT != 0
475 /* Add one more page for stack coloring. Don't do it for stacks
476 with 16 times pagesize or larger. This might just cause
477 unnecessary misalignment. */
478 if (size
<= 16 * pagesize_m1
)
479 size
+= pagesize_m1
+ 1;
482 /* Adjust the stack size for alignment. */
483 size
&= ~__static_tls_align_m1
;
486 /* Make sure the size of the stack is enough for the guard and
487 eventually the thread descriptor. */
488 guardsize
= (attr
->guardsize
+ pagesize_m1
) & ~pagesize_m1
;
489 if (__builtin_expect (size
< ((guardsize
+ __static_tls_size
490 + MINIMAL_REST_STACK
+ pagesize_m1
)
493 /* The stack is too small (or the guard too large). */
496 /* Try to get a stack from the cache. */
498 pd
= get_cached_stack (&size
, &mem
);
501 /* To avoid aliasing effects on a larger scale than pages we
502 adjust the allocated stack size if necessary. This way
503 allocations directly following each other will not have
504 aliasing problems. */
505 #if MULTI_PAGE_ALIASING != 0
506 if ((size
% MULTI_PAGE_ALIASING
) == 0)
507 size
+= pagesize_m1
+ 1;
510 mem
= mmap (NULL
, size
, prot
,
511 MAP_PRIVATE
| MAP_ANONYMOUS
| MAP_STACK
, -1, 0);
513 if (__glibc_unlikely (mem
== MAP_FAILED
))
516 /* SIZE is guaranteed to be greater than zero.
517 So we can never get a null pointer back from mmap. */
518 assert (mem
!= NULL
);
520 #if COLORING_INCREMENT != 0
521 /* Atomically increment NCREATED. */
522 unsigned int ncreated
= atomic_increment_val (&nptl_ncreated
);
524 /* We chose the offset for coloring by incrementing it for
525 every new thread by a fixed amount. The offset used
526 module the page size. Even if coloring would be better
527 relative to higher alignment values it makes no sense to
528 do it since the mmap() interface does not allow us to
529 specify any alignment for the returned memory block. */
530 size_t coloring
= (ncreated
* COLORING_INCREMENT
) & pagesize_m1
;
532 /* Make sure the coloring offsets does not disturb the alignment
533 of the TCB and static TLS block. */
534 if (__glibc_unlikely ((coloring
& __static_tls_align_m1
) != 0))
535 coloring
= (((coloring
+ __static_tls_align_m1
)
536 & ~(__static_tls_align_m1
))
539 /* Unless specified we do not make any adjustments. */
543 /* Place the thread descriptor at the end of the stack. */
545 pd
= (struct pthread
*) ((char *) mem
+ size
- coloring
) - 1;
547 pd
= (struct pthread
*) ((((uintptr_t) mem
+ size
- coloring
549 & ~__static_tls_align_m1
)
553 /* Remember the stack-related values. */
554 pd
->stackblock
= mem
;
555 pd
->stackblock_size
= size
;
557 /* We allocated the first block thread-specific data array.
558 This address will not change for the lifetime of this
560 pd
->specific
[0] = pd
->specific_1stblock
;
562 /* This is at least the second thread. */
563 pd
->header
.multiple_threads
= 1;
564 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
565 __pthread_multiple_threads
= *__libc_multiple_threads_ptr
= 1;
568 #ifndef __ASSUME_PRIVATE_FUTEX
569 /* The thread must know when private futexes are supported. */
570 pd
->header
.private_futex
= THREAD_GETMEM (THREAD_SELF
,
571 header
.private_futex
);
574 #ifdef NEED_DL_SYSINFO
575 /* Copy the sysinfo value from the parent. */
576 THREAD_SYSINFO(pd
) = THREAD_SELF_SYSINFO
;
579 /* Don't allow setxid until cloned. */
580 pd
->setxid_futex
= -1;
582 /* Allocate the DTV for this thread. */
583 if (_dl_allocate_tls (TLS_TPADJ (pd
)) == NULL
)
585 /* Something went wrong. */
586 assert (errno
== ENOMEM
);
588 /* Free the stack memory we just allocated. */
589 (void) munmap (mem
, size
);
595 /* Prepare to modify global data. */
596 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
598 /* And add to the list of stacks in use. */
599 stack_list_add (&pd
->list
, &stack_used
);
601 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
604 /* There might have been a race. Another thread might have
605 caused the stacks to get exec permission while this new
606 stack was prepared. Detect if this was possible and
607 change the permission if necessary. */
608 if (__builtin_expect ((GL(dl_stack_flags
) & PF_X
) != 0
609 && (prot
& PROT_EXEC
) == 0, 0))
611 int err
= change_stack_perm (pd
612 #ifdef NEED_SEPARATE_REGISTER_STACK
618 /* Free the stack memory we just allocated. */
619 (void) munmap (mem
, size
);
626 /* Note that all of the stack and the thread descriptor is
627 zeroed. This means we do not have to initialize fields
628 with initial value zero. This is specifically true for
629 the 'tid' field which is always set back to zero once the
630 stack is not used anymore and for the 'guardsize' field
631 which will be read next. */
634 /* Create or resize the guard area if necessary. */
635 if (__glibc_unlikely (guardsize
> pd
->guardsize
))
637 #ifdef NEED_SEPARATE_REGISTER_STACK
638 char *guard
= mem
+ (((size
- guardsize
) / 2) & ~pagesize_m1
);
639 #elif _STACK_GROWS_DOWN
641 # elif _STACK_GROWS_UP
642 char *guard
= (char *) (((uintptr_t) pd
- guardsize
) & ~pagesize_m1
);
644 if (mprotect (guard
, guardsize
, PROT_NONE
) != 0)
647 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
649 /* Remove the thread from the list. */
650 stack_list_del (&pd
->list
);
652 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
654 /* Get rid of the TLS block we allocated. */
655 _dl_deallocate_tls (TLS_TPADJ (pd
), false);
657 /* Free the stack memory regardless of whether the size
658 of the cache is over the limit or not. If this piece
659 of memory caused problems we better do not use it
660 anymore. Uh, and we ignore possible errors. There
661 is nothing we could do. */
662 (void) munmap (mem
, size
);
667 pd
->guardsize
= guardsize
;
669 else if (__builtin_expect (pd
->guardsize
- guardsize
> size
- reqsize
,
672 /* The old guard area is too large. */
674 #ifdef NEED_SEPARATE_REGISTER_STACK
675 char *guard
= mem
+ (((size
- guardsize
) / 2) & ~pagesize_m1
);
676 char *oldguard
= mem
+ (((size
- pd
->guardsize
) / 2) & ~pagesize_m1
);
679 && mprotect (oldguard
, guard
- oldguard
, prot
) != 0)
682 if (mprotect (guard
+ guardsize
,
683 oldguard
+ pd
->guardsize
- guard
- guardsize
,
686 #elif _STACK_GROWS_DOWN
687 if (mprotect ((char *) mem
+ guardsize
, pd
->guardsize
- guardsize
,
690 #elif _STACK_GROWS_UP
691 if (mprotect ((char *) pd
- pd
->guardsize
,
692 pd
->guardsize
- guardsize
, prot
) != 0)
696 pd
->guardsize
= guardsize
;
698 /* The pthread_getattr_np() calls need to get passed the size
699 requested in the attribute, regardless of how large the
700 actually used guardsize is. */
701 pd
->reported_guardsize
= guardsize
;
704 /* Initialize the lock. We have to do this unconditionally since the
705 stillborn thread could be canceled while the lock is taken. */
706 pd
->lock
= LLL_LOCK_INITIALIZER
;
708 /* The robust mutex lists also need to be initialized
709 unconditionally because the cleanup for the previous stack owner
710 might have happened in the kernel. */
711 pd
->robust_head
.futex_offset
= (offsetof (pthread_mutex_t
, __data
.__lock
)
712 - offsetof (pthread_mutex_t
,
713 __data
.__list
.__next
));
714 pd
->robust_head
.list_op_pending
= NULL
;
715 #ifdef __PTHREAD_MUTEX_HAVE_PREV
716 pd
->robust_prev
= &pd
->robust_head
;
718 pd
->robust_head
.list
= &pd
->robust_head
;
720 /* We place the thread descriptor at the end of the stack. */
724 /* The stack begins before the TCB and the static TLS block. */
725 stacktop
= ((char *) (pd
+ 1) - __static_tls_size
);
727 stacktop
= (char *) (pd
- 1);
730 #if defined(NEED_SEPARATE_REGISTER_STACK) || defined(NEED_STACK_SIZE_FOR_PTH_CREATE)
731 *stack
= pd
->stackblock
;
732 *stacksize
= stacktop
- *stack
;
733 #elif _STACK_GROWS_DOWN
735 #elif _STACK_GROWS_UP
736 *stack
= pd
->stackblock
;
746 __deallocate_stack (struct pthread
*pd
)
748 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
750 /* Remove the thread from the list of threads with user defined
752 stack_list_del (&pd
->list
);
754 /* Not much to do. Just free the mmap()ed memory. Note that we do
755 not reset the 'used' flag in the 'tid' field. This is done by
756 the kernel. If no thread has been created yet this field is
758 if (__glibc_likely (! pd
->user_stack
))
759 (void) queue_stack (pd
);
761 /* Free the memory associated with the ELF TLS. */
762 _dl_deallocate_tls (TLS_TPADJ (pd
), false);
764 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
770 __make_stacks_executable (void **stack_endp
)
772 /* First the main thread's stack. */
773 int err
= _dl_make_stack_executable (stack_endp
);
777 #ifdef NEED_SEPARATE_REGISTER_STACK
778 const size_t pagemask
= ~(__getpagesize () - 1);
781 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
784 list_for_each (runp
, &stack_used
)
786 err
= change_stack_perm (list_entry (runp
, struct pthread
, list
)
787 #ifdef NEED_SEPARATE_REGISTER_STACK
795 /* Also change the permission for the currently unused stacks. This
796 might be wasted time but better spend it here than adding a check
799 list_for_each (runp
, &stack_cache
)
801 err
= change_stack_perm (list_entry (runp
, struct pthread
, list
)
802 #ifdef NEED_SEPARATE_REGISTER_STACK
810 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
816 /* In case of a fork() call the memory allocation in the child will be
817 the same but only one thread is running. All stacks except that of
818 the one running thread are not used anymore. We have to recycle
821 __reclaim_stacks (void)
823 struct pthread
*self
= (struct pthread
*) THREAD_SELF
;
825 /* No locking necessary. The caller is the only stack in use. But
826 we have to be aware that we might have interrupted a list
829 if (in_flight_stack
!= 0)
831 bool add_p
= in_flight_stack
& 1;
832 list_t
*elem
= (list_t
*) (in_flight_stack
& ~(uintptr_t) 1);
836 /* We always add at the beginning of the list. So in this
837 case we only need to check the beginning of these lists. */
838 int check_list (list_t
*l
)
840 if (l
->next
->prev
!= l
)
842 assert (l
->next
->prev
== elem
);
844 elem
->next
= l
->next
;
854 if (check_list (&stack_used
) == 0)
855 (void) check_list (&stack_cache
);
859 /* We can simply always replay the delete operation. */
860 elem
->next
->prev
= elem
->prev
;
861 elem
->prev
->next
= elem
->next
;
865 /* Mark all stacks except the still running one as free. */
867 list_for_each (runp
, &stack_used
)
869 struct pthread
*curp
= list_entry (runp
, struct pthread
, list
);
872 /* This marks the stack as free. */
875 /* Account for the size of the stack. */
876 stack_cache_actsize
+= curp
->stackblock_size
;
878 if (curp
->specific_used
)
880 /* Clear the thread-specific data. */
881 memset (curp
->specific_1stblock
, '\0',
882 sizeof (curp
->specific_1stblock
));
884 curp
->specific_used
= false;
886 for (size_t cnt
= 1; cnt
< PTHREAD_KEY_1STLEVEL_SIZE
; ++cnt
)
887 if (curp
->specific
[cnt
] != NULL
)
889 memset (curp
->specific
[cnt
], '\0',
890 sizeof (curp
->specific_1stblock
));
892 /* We have allocated the block which we do not
893 free here so re-set the bit. */
894 curp
->specific_used
= true;
900 /* Add the stack of all running threads to the cache. */
901 list_splice (&stack_used
, &stack_cache
);
903 /* Remove the entry for the current thread to from the cache list
904 and add it to the list of running threads. Which of the two
905 lists is decided by the user_stack flag. */
906 stack_list_del (&self
->list
);
908 /* Re-initialize the lists for all the threads. */
909 INIT_LIST_HEAD (&stack_used
);
910 INIT_LIST_HEAD (&__stack_user
);
912 if (__glibc_unlikely (THREAD_GETMEM (self
, user_stack
)))
913 list_add (&self
->list
, &__stack_user
);
915 list_add (&self
->list
, &stack_used
);
917 /* There is one thread running. */
922 /* Initialize locks. */
923 stack_cache_lock
= LLL_LOCK_INITIALIZER
;
924 __default_pthread_attr_lock
= LLL_LOCK_INITIALIZER
;
929 # undef __find_thread_by_id
930 /* Find a thread given the thread ID. */
933 __find_thread_by_id (pid_t tid
)
935 struct pthread
*result
= NULL
;
937 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
939 /* Iterate over the list with system-allocated threads first. */
941 list_for_each (runp
, &stack_used
)
943 struct pthread
*curp
;
945 curp
= list_entry (runp
, struct pthread
, list
);
947 if (curp
->tid
== tid
)
954 /* Now the list with threads using user-allocated stacks. */
955 list_for_each (runp
, &__stack_user
)
957 struct pthread
*curp
;
959 curp
= list_entry (runp
, struct pthread
, list
);
961 if (curp
->tid
== tid
)
969 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
978 setxid_mark_thread (struct xid_command
*cmdp
, struct pthread
*t
)
982 /* Wait until this thread is cloned. */
983 if (t
->setxid_futex
== -1
984 && ! atomic_compare_and_exchange_bool_acq (&t
->setxid_futex
, -2, -1))
986 lll_futex_wait (&t
->setxid_futex
, -2, LLL_PRIVATE
);
987 while (t
->setxid_futex
== -2);
989 /* Don't let the thread exit before the setxid handler runs. */
994 ch
= t
->cancelhandling
;
996 /* If the thread is exiting right now, ignore it. */
997 if ((ch
& EXITING_BITMASK
) != 0)
999 /* Release the futex if there is no other setxid in
1001 if ((ch
& SETXID_BITMASK
) == 0)
1003 t
->setxid_futex
= 1;
1004 lll_futex_wake (&t
->setxid_futex
, 1, LLL_PRIVATE
);
1009 while (atomic_compare_and_exchange_bool_acq (&t
->cancelhandling
,
1010 ch
| SETXID_BITMASK
, ch
));
1016 setxid_unmark_thread (struct xid_command
*cmdp
, struct pthread
*t
)
1022 ch
= t
->cancelhandling
;
1023 if ((ch
& SETXID_BITMASK
) == 0)
1026 while (atomic_compare_and_exchange_bool_acq (&t
->cancelhandling
,
1027 ch
& ~SETXID_BITMASK
, ch
));
1029 /* Release the futex just in case. */
1030 t
->setxid_futex
= 1;
1031 lll_futex_wake (&t
->setxid_futex
, 1, LLL_PRIVATE
);
1037 setxid_signal_thread (struct xid_command
*cmdp
, struct pthread
*t
)
1039 if ((t
->cancelhandling
& SETXID_BITMASK
) == 0)
1042 #warning setxid fixup needed
1045 pid_t pid
= __getpid ();
1046 INTERNAL_SYSCALL_DECL (err
);
1047 val
= INTERNAL_SYSCALL (tgkill
, err
, pid
, t
->tid
, SIGSETXID
);
1049 /* If this failed, it must have had not started yet or else exited. */
1050 if (!INTERNAL_SYSCALL_ERROR_P (val
, err
))
1052 atomic_increment (&cmdp
->cntr
);
1063 __nptl_setxid (struct xid_command
*cmdp
)
1065 #warning setxid fixup needed
1069 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1074 struct pthread
*self
= THREAD_SELF
;
1076 /* Iterate over the list with system-allocated threads first. */
1078 list_for_each (runp
, &stack_used
)
1080 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1084 setxid_mark_thread (cmdp
, t
);
1087 /* Now the list with threads using user-allocated stacks. */
1088 list_for_each (runp
, &__stack_user
)
1090 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1094 setxid_mark_thread (cmdp
, t
);
1097 /* Iterate until we don't succeed in signalling anyone. That means
1098 we have gotten all running threads, and their children will be
1099 automatically correct once started. */
1104 list_for_each (runp
, &stack_used
)
1106 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1110 signalled
+= setxid_signal_thread (cmdp
, t
);
1113 list_for_each (runp
, &__stack_user
)
1115 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1119 signalled
+= setxid_signal_thread (cmdp
, t
);
1122 int cur
= cmdp
->cntr
;
1125 lll_futex_wait (&cmdp
->cntr
, cur
, LLL_PRIVATE
);
1129 while (signalled
!= 0);
1131 /* Clean up flags, so that no thread blocks during exit waiting
1132 for a signal which will never come. */
1133 list_for_each (runp
, &stack_used
)
1135 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1139 setxid_unmark_thread (cmdp
, t
);
1142 list_for_each (runp
, &__stack_user
)
1144 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1148 setxid_unmark_thread (cmdp
, t
);
1151 /* This must be last, otherwise the current thread might not have
1152 permissions to send SIGSETXID syscall to the other threads. */
1153 INTERNAL_SYSCALL_DECL (err
);
1154 result
= INTERNAL_SYSCALL_NCS (cmdp
->syscall_no
, err
, 3,
1155 cmdp
->id
[0], cmdp
->id
[1], cmdp
->id
[2]);
1156 if (INTERNAL_SYSCALL_ERROR_P (result
, err
))
1158 __set_errno (INTERNAL_SYSCALL_ERRNO (result
, err
));
1162 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
1167 static inline void __attribute__((always_inline
))
1168 init_one_static_tls (struct pthread
*curp
, struct link_map
*map
)
1171 void *dest
= (char *) curp
- map
->l_tls_offset
;
1172 # elif TLS_DTV_AT_TP
1173 void *dest
= (char *) curp
+ map
->l_tls_offset
+ TLS_PRE_TCB_SIZE
;
1175 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
1178 /* We cannot delay the initialization of the Static TLS area, since
1179 it can be accessed with LE or IE, but since the DTV is only used
1180 by GD and LD, we can delay its update to avoid a race. */
1181 memset (__mempcpy (dest
, map
->l_tls_initimage
, map
->l_tls_initimage_size
),
1182 '\0', map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
1187 __pthread_init_static_tls (struct link_map
*map
)
1189 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1191 /* Iterate over the list with system-allocated threads first. */
1193 list_for_each (runp
, &stack_used
)
1194 init_one_static_tls (list_entry (runp
, struct pthread
, list
), map
);
1196 /* Now the list with threads using user-allocated stacks. */
1197 list_for_each (runp
, &__stack_user
)
1198 init_one_static_tls (list_entry (runp
, struct pthread
, list
), map
);
1200 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
1206 __wait_lookup_done (void)
1208 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1210 struct pthread
*self
= THREAD_SELF
;
1212 /* Iterate over the list with system-allocated threads first. */
1214 list_for_each (runp
, &stack_used
)
1216 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1217 if (t
== self
|| t
->header
.gscope_flag
== THREAD_GSCOPE_FLAG_UNUSED
)
1220 int *const gscope_flagp
= &t
->header
.gscope_flag
;
1222 /* We have to wait until this thread is done with the global
1223 scope. First tell the thread that we are waiting and
1224 possibly have to be woken. */
1225 if (atomic_compare_and_exchange_bool_acq (gscope_flagp
,
1226 THREAD_GSCOPE_FLAG_WAIT
,
1227 THREAD_GSCOPE_FLAG_USED
))
1231 lll_futex_wait (gscope_flagp
, THREAD_GSCOPE_FLAG_WAIT
, LLL_PRIVATE
);
1232 while (*gscope_flagp
== THREAD_GSCOPE_FLAG_WAIT
);
1235 /* Now the list with threads using user-allocated stacks. */
1236 list_for_each (runp
, &__stack_user
)
1238 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1239 if (t
== self
|| t
->header
.gscope_flag
== THREAD_GSCOPE_FLAG_UNUSED
)
1242 int *const gscope_flagp
= &t
->header
.gscope_flag
;
1244 /* We have to wait until this thread is done with the global
1245 scope. First tell the thread that we are waiting and
1246 possibly have to be woken. */
1247 if (atomic_compare_and_exchange_bool_acq (gscope_flagp
,
1248 THREAD_GSCOPE_FLAG_WAIT
,
1249 THREAD_GSCOPE_FLAG_USED
))
1253 lll_futex_wait (gscope_flagp
, THREAD_GSCOPE_FLAG_WAIT
, LLL_PRIVATE
);
1254 while (*gscope_flagp
== THREAD_GSCOPE_FLAG_WAIT
);
1257 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);