1 /* Copyright (C) 2002-2020 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
25 #include <sys/param.h>
26 #include <sys/resource.h>
34 #include <shlib-compat.h>
35 #include <lowlevellock.h>
36 #include <futex-internal.h>
37 #include <kernel-features.h>
38 #include <libc-pointer-arith.h>
39 #include <pthread-pids.h>
40 #include <pthread_mutex_conf.h>
42 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
43 /* Pointer to the corresponding variable in libc. */
44 int *__libc_multiple_threads_ptr attribute_hidden
;
47 /* Size and alignment of static TLS block. */
48 size_t __static_tls_size
;
49 size_t __static_tls_align_m1
;
51 #ifndef __ASSUME_SET_ROBUST_LIST
52 /* Negative if we do not have the system call and we can use it. */
53 int __set_robust_list_avail
;
54 # define set_robust_list_not_avail() \
55 __set_robust_list_avail = -1
57 # define set_robust_list_not_avail() do { } while (0)
60 /* Version of the library, used in libthread_db to detect mismatches. */
61 static const char nptl_version
[] __attribute_used__
= VERSION
;
69 void __nptl_set_robust (struct pthread
*);
72 static const struct pthread_functions pthread_functions
=
74 .ptr___pthread_cond_broadcast
= __pthread_cond_broadcast
,
75 .ptr___pthread_cond_signal
= __pthread_cond_signal
,
76 .ptr___pthread_cond_wait
= __pthread_cond_wait
,
77 .ptr___pthread_cond_timedwait
= __pthread_cond_timedwait
,
78 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
79 .ptr___pthread_cond_broadcast_2_0
= __pthread_cond_broadcast_2_0
,
80 .ptr___pthread_cond_signal_2_0
= __pthread_cond_signal_2_0
,
81 .ptr___pthread_cond_wait_2_0
= __pthread_cond_wait_2_0
,
82 .ptr___pthread_cond_timedwait_2_0
= __pthread_cond_timedwait_2_0
,
84 .ptr___pthread_exit
= __pthread_exit
,
85 .ptr_pthread_mutex_destroy
= __pthread_mutex_destroy
,
86 .ptr_pthread_mutex_init
= __pthread_mutex_init
,
87 .ptr_pthread_mutex_lock
= __pthread_mutex_lock
,
88 .ptr_pthread_mutex_unlock
= __pthread_mutex_unlock
,
89 .ptr___pthread_setcancelstate
= __pthread_setcancelstate
,
90 .ptr_pthread_setcanceltype
= __pthread_setcanceltype
,
91 .ptr___pthread_cleanup_upto
= __pthread_cleanup_upto
,
92 .ptr___pthread_once
= __pthread_once
,
93 .ptr___pthread_rwlock_rdlock
= __pthread_rwlock_rdlock
,
94 .ptr___pthread_rwlock_wrlock
= __pthread_rwlock_wrlock
,
95 .ptr___pthread_rwlock_unlock
= __pthread_rwlock_unlock
,
96 .ptr___pthread_key_create
= __pthread_key_create
,
97 .ptr___pthread_getspecific
= __pthread_getspecific
,
98 .ptr___pthread_setspecific
= __pthread_setspecific
,
99 .ptr__pthread_cleanup_push_defer
= __pthread_cleanup_push_defer
,
100 .ptr__pthread_cleanup_pop_restore
= __pthread_cleanup_pop_restore
,
101 .ptr_nthreads
= &__nptl_nthreads
,
102 .ptr___pthread_unwind
= &__pthread_unwind
,
103 .ptr__nptl_deallocate_tsd
= __nptl_deallocate_tsd
,
104 .ptr__nptl_setxid
= __nptl_setxid
,
105 .ptr_set_robust
= __nptl_set_robust
107 # define ptr_pthread_functions &pthread_functions
109 # define ptr_pthread_functions NULL
117 __nptl_set_robust (struct pthread
*self
)
119 INTERNAL_SYSCALL_CALL (set_robust_list
, &self
->robust_head
,
120 sizeof (struct robust_list_head
));
124 /* For asynchronous cancellation we use a signal. This is the handler. */
126 sigcancel_handler (int sig
, siginfo_t
*si
, void *ctx
)
128 /* Safety check. It would be possible to call this function for
129 other signals and send a signal from another process. This is not
130 correct and might even be a security problem. Try to catch as
131 many incorrect invocations as possible. */
133 || si
->si_pid
!= __getpid()
134 || si
->si_code
!= SI_TKILL
)
137 struct pthread
*self
= THREAD_SELF
;
139 int oldval
= THREAD_GETMEM (self
, cancelhandling
);
142 /* We are canceled now. When canceled by another thread this flag
143 is already set but if the signal is directly send (internally or
144 from another process) is has to be done here. */
145 int newval
= oldval
| CANCELING_BITMASK
| CANCELED_BITMASK
;
147 if (oldval
== newval
|| (oldval
& EXITING_BITMASK
) != 0)
148 /* Already canceled or exiting. */
151 int curval
= THREAD_ATOMIC_CMPXCHG_VAL (self
, cancelhandling
, newval
,
153 if (curval
== oldval
)
155 /* Set the return value. */
156 THREAD_SETMEM (self
, result
, PTHREAD_CANCELED
);
158 /* Make sure asynchronous cancellation is still enabled. */
159 if ((newval
& CANCELTYPE_BITMASK
) != 0)
160 /* Run the registered destructors and terminate the thread. */
171 struct xid_command
*__xidcmd attribute_hidden
;
173 /* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to
174 tell each thread to call the respective setxid syscall on itself. This is
177 sighandler_setxid (int sig
, siginfo_t
*si
, void *ctx
)
181 /* Safety check. It would be possible to call this function for
182 other signals and send a signal from another process. This is not
183 correct and might even be a security problem. Try to catch as
184 many incorrect invocations as possible. */
186 || si
->si_pid
!= __getpid ()
187 || si
->si_code
!= SI_TKILL
)
190 result
= INTERNAL_SYSCALL_NCS (__xidcmd
->syscall_no
, 3, __xidcmd
->id
[0],
191 __xidcmd
->id
[1], __xidcmd
->id
[2]);
193 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result
)))
194 error
= INTERNAL_SYSCALL_ERRNO (result
);
195 __nptl_setxid_error (__xidcmd
, error
);
197 /* Reset the SETXID flag. */
198 struct pthread
*self
= THREAD_SELF
;
202 flags
= THREAD_GETMEM (self
, cancelhandling
);
203 newval
= THREAD_ATOMIC_CMPXCHG_VAL (self
, cancelhandling
,
204 flags
& ~SETXID_BITMASK
, flags
);
206 while (flags
!= newval
);
208 /* And release the futex. */
209 self
->setxid_futex
= 1;
210 futex_wake (&self
->setxid_futex
, 1, FUTEX_PRIVATE
);
212 if (atomic_decrement_val (&__xidcmd
->cntr
) == 0)
213 futex_wake ((unsigned int *) &__xidcmd
->cntr
, 1, FUTEX_PRIVATE
);
217 /* When using __thread for this, we do it in libc so as not
218 to give libpthread its own TLS segment just for this. */
219 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
222 /* This can be set by the debugger before initialization is complete. */
223 static bool __nptl_initial_report_events __attribute_used__
;
226 __pthread_initialize_minimal_internal (void)
228 /* Minimal initialization of the thread descriptor. */
229 struct pthread
*pd
= THREAD_SELF
;
230 __pthread_initialize_pids (pd
);
231 THREAD_SETMEM (pd
, specific
[0], &pd
->specific_1stblock
[0]);
232 THREAD_SETMEM (pd
, user_stack
, true);
234 /* Initialize the robust mutex data. */
236 #if __PTHREAD_MUTEX_HAVE_PREV
237 pd
->robust_prev
= &pd
->robust_head
;
239 pd
->robust_head
.list
= &pd
->robust_head
;
240 pd
->robust_head
.futex_offset
= (offsetof (pthread_mutex_t
, __data
.__lock
)
241 - offsetof (pthread_mutex_t
,
242 __data
.__list
.__next
));
243 int res
= INTERNAL_SYSCALL_CALL (set_robust_list
, &pd
->robust_head
,
244 sizeof (struct robust_list_head
));
245 if (INTERNAL_SYSCALL_ERROR_P (res
))
246 set_robust_list_not_avail ();
249 /* Set initial thread's stack block from 0 up to __libc_stack_end.
250 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
251 purposes this is good enough. */
252 THREAD_SETMEM (pd
, stackblock_size
, (size_t) __libc_stack_end
);
254 /* Before initializing GL (dl_stack_user), the debugger could not
255 find us and had to set __nptl_initial_report_events. Propagate
257 THREAD_SETMEM (pd
, report_events
, __nptl_initial_report_events
);
260 __sigemptyset (&sa
.sa_mask
);
262 /* Install the cancellation signal handler. If for some reason we
263 cannot install the handler we do not abort. Maybe we should, but
264 it is only asynchronous cancellation which is affected. */
265 sa
.sa_sigaction
= sigcancel_handler
;
266 sa
.sa_flags
= SA_SIGINFO
;
267 (void) __libc_sigaction (SIGCANCEL
, &sa
, NULL
);
269 /* Install the handle to change the threads' uid/gid. */
270 sa
.sa_sigaction
= sighandler_setxid
;
271 sa
.sa_flags
= SA_SIGINFO
| SA_RESTART
;
272 (void) __libc_sigaction (SIGSETXID
, &sa
, NULL
);
274 /* The parent process might have left the signals blocked. Just in
275 case, unblock it. We reuse the signal mask in the sigaction
276 structure. It is already cleared. */
277 __sigaddset (&sa
.sa_mask
, SIGCANCEL
);
278 __sigaddset (&sa
.sa_mask
, SIGSETXID
);
279 INTERNAL_SYSCALL_CALL (rt_sigprocmask
, SIG_UNBLOCK
, &sa
.sa_mask
,
282 /* Get the size of the static and alignment requirements for the TLS
284 size_t static_tls_align
;
285 _dl_get_tls_static_info (&__static_tls_size
, &static_tls_align
);
287 /* Make sure the size takes all the alignments into account. */
288 if (STACK_ALIGN
> static_tls_align
)
289 static_tls_align
= STACK_ALIGN
;
290 __static_tls_align_m1
= static_tls_align
- 1;
292 __static_tls_size
= roundup (__static_tls_size
, static_tls_align
);
294 /* Determine the default allowed stack size. This is the size used
295 in case the user does not specify one. */
297 if (__getrlimit (RLIMIT_STACK
, &limit
) != 0
298 || limit
.rlim_cur
== RLIM_INFINITY
)
299 /* The system limit is not usable. Use an architecture-specific
301 limit
.rlim_cur
= ARCH_STACK_DEFAULT_SIZE
;
302 else if (limit
.rlim_cur
< PTHREAD_STACK_MIN
)
303 /* The system limit is unusably small.
304 Use the minimal size acceptable. */
305 limit
.rlim_cur
= PTHREAD_STACK_MIN
;
307 /* Make sure it meets the minimum size that allocate_stack
308 (allocatestack.c) will demand, which depends on the page size. */
309 const uintptr_t pagesz
= GLRO(dl_pagesize
);
310 const size_t minstack
= pagesz
+ __static_tls_size
+ MINIMAL_REST_STACK
;
311 if (limit
.rlim_cur
< minstack
)
312 limit
.rlim_cur
= minstack
;
314 /* Round the resource limit up to page size. */
315 limit
.rlim_cur
= ALIGN_UP (limit
.rlim_cur
, pagesz
);
316 lll_lock (__default_pthread_attr_lock
, LLL_PRIVATE
);
317 __default_pthread_attr
.internal
.stacksize
= limit
.rlim_cur
;
318 __default_pthread_attr
.internal
.guardsize
= GLRO (dl_pagesize
);
319 lll_unlock (__default_pthread_attr_lock
, LLL_PRIVATE
);
322 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
323 keep the lock count from the ld.so implementation. */
324 GL(dl_rtld_lock_recursive
) = (void *) __pthread_mutex_lock
;
325 GL(dl_rtld_unlock_recursive
) = (void *) __pthread_mutex_unlock
;
326 unsigned int rtld_lock_count
= GL(dl_load_lock
).mutex
.__data
.__count
;
327 GL(dl_load_lock
).mutex
.__data
.__count
= 0;
328 while (rtld_lock_count
-- > 0)
329 __pthread_mutex_lock (&GL(dl_load_lock
).mutex
);
331 GL(dl_make_stack_executable_hook
) = &__make_stacks_executable
;
334 GL(dl_init_static_tls
) = &__pthread_init_static_tls
;
336 /* Register the fork generation counter with the libc. */
337 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
338 __libc_multiple_threads_ptr
=
340 __libc_pthread_init (&__fork_generation
, __reclaim_stacks
,
341 ptr_pthread_functions
);
344 __pthread_tunables_init ();
347 strong_alias (__pthread_initialize_minimal_internal
,
348 __pthread_initialize_minimal
)
351 /* This function is internal (it has a GLIBC_PRIVATE) version, but it
352 is widely used (either via weak symbol, or dlsym) to obtain the
353 __static_tls_size value. This value is then used to adjust the
354 value of the stack size attribute, so that applications receive the
355 full requested stack size, not diminished by the TCB and static TLS
356 allocation on the stack. Once the TCB is separately allocated,
357 this function should be removed or renamed (if it is still
358 necessary at that point). */
360 __pthread_get_minstack (const pthread_attr_t
*attr
)
362 return GLRO(dl_pagesize
) + __static_tls_size
+ PTHREAD_STACK_MIN
;