]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/nptl-init.c
nptl: Move pthread_attr_setinheritsched implementation into libc.
[thirdparty/glibc.git] / nptl / nptl-init.c
CommitLineData
04277e02 1/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6 16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
76a50749
UD
18
19#include <assert.h>
cbd8aeb8 20#include <errno.h>
76a50749
UD
21#include <limits.h>
22#include <signal.h>
23#include <stdlib.h>
24#include <unistd.h>
25#include <sys/param.h>
26#include <sys/resource.h>
27#include <pthreadP.h>
28#include <atomic.h>
29#include <ldsodefs.h>
30#include <tls.h>
7a775e6b 31#include <list.h>
76a50749
UD
32#include <fork.h>
33#include <version.h>
bf293afe 34#include <shlib-compat.h>
2c0b891a 35#include <smp.h>
2edb61e3 36#include <lowlevellock.h>
a2f0363f 37#include <futex-internal.h>
f8de5057 38#include <kernel-features.h>
9090848d 39#include <libc-pointer-arith.h>
7cea6212 40#include <pthread-pids.h>
6310e6be 41#include <pthread_mutex_conf.h>
76a50749 42
c6aab2cb
RM
43#ifndef TLS_MULTIPLE_THREADS_IN_TCB
44/* Pointer to the corresponding variable in libc. */
45int *__libc_multiple_threads_ptr attribute_hidden;
46#endif
47
76a50749
UD
48/* Size and alignment of static TLS block. */
49size_t __static_tls_size;
923e02ea 50size_t __static_tls_align_m1;
76a50749 51
0f6699ea
UD
52#ifndef __ASSUME_SET_ROBUST_LIST
53/* Negative if we do not have the system call and we can use it. */
54int __set_robust_list_avail;
55# define set_robust_list_not_avail() \
56 __set_robust_list_avail = -1
57#else
58# define set_robust_list_not_avail() do { } while (0)
59#endif
60
76a50749 61/* Version of the library, used in libthread_db to detect mismatches. */
e3b22ad3 62static const char nptl_version[] __attribute_used__ = VERSION;
76a50749
UD
63
64
6f8326ca
UD
65#ifdef SHARED
66static
67#else
68extern
69#endif
70void __nptl_set_robust (struct pthread *);
71
30991b8b 72#ifdef SHARED
630d93a7 73static const struct pthread_functions pthread_functions =
8454830b 74 {
8454830b
UD
75 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
76 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
77 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
78 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
79 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
80 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
81 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
82 .ptr_pthread_condattr_init = __pthread_condattr_init,
bf293afe
UD
83 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
84 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
85 .ptr___pthread_cond_init = __pthread_cond_init,
86 .ptr___pthread_cond_signal = __pthread_cond_signal,
87 .ptr___pthread_cond_wait = __pthread_cond_wait,
c503d3dc 88 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
73e9ae88 89# if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
bf293afe
UD
90 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
91 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
92 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
93 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
94 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
c503d3dc 95 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
73e9ae88 96# endif
49e9f864 97 .ptr___pthread_exit = __pthread_exit,
8454830b
UD
98 .ptr_pthread_getschedparam = __pthread_getschedparam,
99 .ptr_pthread_setschedparam = __pthread_setschedparam,
4d17e683
AS
100 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
101 .ptr_pthread_mutex_init = __pthread_mutex_init,
102 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
103 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
e5d19c08 104 .ptr___pthread_setcancelstate = __pthread_setcancelstate,
73e9ae88
UD
105 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
106 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
4d17e683
AS
107 .ptr___pthread_once = __pthread_once,
108 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock,
109 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock,
110 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock,
111 .ptr___pthread_key_create = __pthread_key_create,
112 .ptr___pthread_getspecific = __pthread_getspecific,
113 .ptr___pthread_setspecific = __pthread_setspecific,
73e9ae88 114 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
47202270 115 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
6efd4814 116 .ptr_nthreads = &__nptl_nthreads,
3fa21fd8 117 .ptr___pthread_unwind = &__pthread_unwind,
2edb61e3 118 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
cc40d25e 119# ifdef SIGSETXID
ba408f84 120 .ptr__nptl_setxid = __nptl_setxid,
cc40d25e 121# endif
6f8326ca 122 .ptr_set_robust = __nptl_set_robust
8454830b
UD
123 };
124# define ptr_pthread_functions &pthread_functions
125#else
126# define ptr_pthread_functions NULL
127#endif
128
129
30991b8b 130#ifdef SHARED
6f8326ca 131static
30991b8b 132#endif
6f8326ca
UD
133void
134__nptl_set_robust (struct pthread *self)
135{
b0643088 136#ifdef __NR_set_robust_list
6f8326ca
UD
137 INTERNAL_SYSCALL_DECL (err);
138 INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
139 sizeof (struct robust_list_head));
b0643088 140#endif
6f8326ca 141}
cca50323
UD
142
143
327ae257 144#ifdef SIGCANCEL
76a50749
UD
145/* For asynchronous cancellation we use a signal. This is the handler. */
146static void
a1ed6b4c 147sigcancel_handler (int sig, siginfo_t *si, void *ctx)
76a50749 148{
a1ed6b4c 149 /* Safety check. It would be possible to call this function for
2edb61e3 150 other signals and send a signal from another process. This is not
a1ed6b4c
UD
151 correct and might even be a security problem. Try to catch as
152 many incorrect invocations as possible. */
153 if (sig != SIGCANCEL
c579f48e 154 || si->si_pid != __getpid()
a1ed6b4c 155 || si->si_code != SI_TKILL)
a1ed6b4c
UD
156 return;
157
76a50749
UD
158 struct pthread *self = THREAD_SELF;
159
b22d701b 160 int oldval = THREAD_GETMEM (self, cancelhandling);
76a50749
UD
161 while (1)
162 {
163 /* We are canceled now. When canceled by another thread this flag
164 is already set but if the signal is directly send (internally or
165 from another process) is has to be done here. */
e320ef46 166 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
76a50749
UD
167
168 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
169 /* Already canceled or exiting. */
170 break;
171
b22d701b
UD
172 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
173 oldval);
174 if (curval == oldval)
76a50749
UD
175 {
176 /* Set the return value. */
177 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
178
179 /* Make sure asynchronous cancellation is still enabled. */
180 if ((newval & CANCELTYPE_BITMASK) != 0)
d9eb687b
UD
181 /* Run the registered destructors and terminate the thread. */
182 __do_cancel ();
76a50749
UD
183
184 break;
185 }
b22d701b
UD
186
187 oldval = curval;
76a50749
UD
188 }
189}
327ae257 190#endif
76a50749
UD
191
192
327ae257 193#ifdef SIGSETXID
2edb61e3
UD
194struct xid_command *__xidcmd attribute_hidden;
195
a9843058
SP
196/* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to
197 tell each thread to call the respective setxid syscall on itself. This is
198 the handler. */
2edb61e3
UD
199static void
200sighandler_setxid (int sig, siginfo_t *si, void *ctx)
201{
13f7fe35 202 int result;
7960f2a7 203
2edb61e3
UD
204 /* Safety check. It would be possible to call this function for
205 other signals and send a signal from another process. This is not
206 correct and might even be a security problem. Try to catch as
207 many incorrect invocations as possible. */
208 if (sig != SIGSETXID
c579f48e 209 || si->si_pid != __getpid ()
2edb61e3
UD
210 || si->si_code != SI_TKILL)
211 return;
212
213 INTERNAL_SYSCALL_DECL (err);
13f7fe35
FW
214 result = INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
215 __xidcmd->id[1], __xidcmd->id[2]);
771eb141 216 int error = 0;
13f7fe35 217 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
771eb141
FW
218 error = INTERNAL_SYSCALL_ERRNO (result, err);
219 __nptl_setxid_error (__xidcmd, error);
2edb61e3 220
dff9a7a1
UD
221 /* Reset the SETXID flag. */
222 struct pthread *self = THREAD_SELF;
25db0f6c
DJ
223 int flags, newval;
224 do
225 {
226 flags = THREAD_GETMEM (self, cancelhandling);
227 newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
228 flags & ~SETXID_BITMASK, flags);
229 }
230 while (flags != newval);
dff9a7a1
UD
231
232 /* And release the futex. */
233 self->setxid_futex = 1;
a2f0363f 234 futex_wake (&self->setxid_futex, 1, FUTEX_PRIVATE);
25db0f6c
DJ
235
236 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
a2f0363f 237 futex_wake ((unsigned int *) &__xidcmd->cntr, 1, FUTEX_PRIVATE);
2edb61e3 238}
327ae257 239#endif
2edb61e3
UD
240
241
b1531183
UD
242/* When using __thread for this, we do it in libc so as not
243 to give libpthread its own TLS segment just for this. */
244extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
245
76a50749 246
0ecf9c10 247/* This can be set by the debugger before initialization is complete. */
e965d514 248static bool __nptl_initial_report_events __attribute_used__;
0ecf9c10 249
76a50749 250void
4e9b5995 251__pthread_initialize_minimal_internal (void)
76a50749 252{
76a50749 253 /* Minimal initialization of the thread descriptor. */
75cddafe 254 struct pthread *pd = THREAD_SELF;
7cea6212 255 __pthread_initialize_pids (pd);
76a50749
UD
256 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
257 THREAD_SETMEM (pd, user_stack, true);
76a50749 258
0f6699ea 259 /* Initialize the robust mutex data. */
992328e5 260 {
06be6368 261#if __PTHREAD_MUTEX_HAVE_PREV
992328e5 262 pd->robust_prev = &pd->robust_head;
0f6699ea 263#endif
992328e5 264 pd->robust_head.list = &pd->robust_head;
0f6699ea 265#ifdef __NR_set_robust_list
992328e5
RM
266 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
267 - offsetof (pthread_mutex_t,
268 __data.__list.__next));
269 INTERNAL_SYSCALL_DECL (err);
270 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
271 sizeof (struct robust_list_head));
272 if (INTERNAL_SYSCALL_ERROR_P (res, err))
0f6699ea 273#endif
992328e5
RM
274 set_robust_list_not_avail ();
275 }
0f6699ea 276
675620f7
UD
277 /* Set initial thread's stack block from 0 up to __libc_stack_end.
278 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
279 purposes this is good enough. */
280 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
281
fde89ad0 282 /* Initialize the list of all running threads with the main thread. */
a4548cea 283 INIT_LIST_HEAD (&__stack_user);
d4f64e1a 284 list_add (&pd->list, &__stack_user);
76a50749 285
0ecf9c10
RM
286 /* Before initializing __stack_user, the debugger could not find us and
287 had to set __nptl_initial_report_events. Propagate its setting. */
288 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
76a50749 289
327ae257
RM
290#if defined SIGCANCEL || defined SIGSETXID
291 struct sigaction sa;
292 __sigemptyset (&sa.sa_mask);
293
294# ifdef SIGCANCEL
76a50749
UD
295 /* Install the cancellation signal handler. If for some reason we
296 cannot install the handler we do not abort. Maybe we should, but
297 it is only asynchronous cancellation which is affected. */
a1ed6b4c
UD
298 sa.sa_sigaction = sigcancel_handler;
299 sa.sa_flags = SA_SIGINFO;
76a50749 300 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
327ae257 301# endif
76a50749 302
327ae257 303# ifdef SIGSETXID
2edb61e3
UD
304 /* Install the handle to change the threads' uid/gid. */
305 sa.sa_sigaction = sighandler_setxid;
306 sa.sa_flags = SA_SIGINFO | SA_RESTART;
2edb61e3 307 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
327ae257 308# endif
2edb61e3 309
708bfb9a 310 /* The parent process might have left the signals blocked. Just in
f006d3a0
UD
311 case, unblock it. We reuse the signal mask in the sigaction
312 structure. It is already cleared. */
327ae257 313# ifdef SIGCANCEL
f006d3a0 314 __sigaddset (&sa.sa_mask, SIGCANCEL);
327ae257
RM
315# endif
316# ifdef SIGSETXID
708bfb9a 317 __sigaddset (&sa.sa_mask, SIGSETXID);
327ae257 318# endif
992328e5
RM
319 {
320 INTERNAL_SYSCALL_DECL (err);
321 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
322 NULL, _NSIG / 8);
323 }
327ae257 324#endif
f006d3a0 325
b399707e
RM
326 /* Get the size of the static and alignment requirements for the TLS
327 block. */
328 size_t static_tls_align;
329 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
330
331 /* Make sure the size takes all the alignments into account. */
332 if (STACK_ALIGN > static_tls_align)
333 static_tls_align = STACK_ALIGN;
334 __static_tls_align_m1 = static_tls_align - 1;
335
336 __static_tls_size = roundup (__static_tls_size, static_tls_align);
76a50749 337
4e9b5995
CD
338 /* Determine the default allowed stack size. This is the size used
339 in case the user does not specify one. */
340 struct rlimit limit;
c5c2b7c3 341 if (__getrlimit (RLIMIT_STACK, &limit) != 0
4e9b5995
CD
342 || limit.rlim_cur == RLIM_INFINITY)
343 /* The system limit is not usable. Use an architecture-specific
344 default. */
345 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
346 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
347 /* The system limit is unusably small.
348 Use the minimal size acceptable. */
349 limit.rlim_cur = PTHREAD_STACK_MIN;
e23872c8 350
4e9b5995
CD
351 /* Make sure it meets the minimum size that allocate_stack
352 (allocatestack.c) will demand, which depends on the page size. */
353 const uintptr_t pagesz = GLRO(dl_pagesize);
354 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
355 if (limit.rlim_cur < minstack)
356 limit.rlim_cur = minstack;
e23872c8 357
4e9b5995 358 /* Round the resource limit up to page size. */
6d03458e 359 limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz);
61dd6208 360 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
e903a713
SP
361 __default_pthread_attr.stacksize = limit.rlim_cur;
362 __default_pthread_attr.guardsize = GLRO (dl_pagesize);
61dd6208 363 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
76a50749 364
b1531183 365#ifdef SHARED
334fcf2a
UD
366 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
367 keep the lock count from the ld.so implementation. */
4d17e683
AS
368 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
369 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
334fcf2a
UD
370 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
371 GL(dl_load_lock).mutex.__data.__count = 0;
372 while (rtld_lock_count-- > 0)
4d17e683 373 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
54ee14b3
UD
374
375 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
b1531183
UD
376#endif
377
adc12574
UD
378 GL(dl_init_static_tls) = &__pthread_init_static_tls;
379
7adefea8
UD
380 GL(dl_wait_lookup_done) = &__wait_lookup_done;
381
76a50749 382 /* Register the fork generation counter with the libc. */
5a03acfe
UD
383#ifndef TLS_MULTIPLE_THREADS_IN_TCB
384 __libc_multiple_threads_ptr =
385#endif
386 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
387 ptr_pthread_functions);
2c0b891a
UD
388
389 /* Determine whether the machine is SMP or not. */
390 __is_smp = is_smp_system ();
6310e6be
KW
391
392#if HAVE_TUNABLES
393 __pthread_tunables_init ();
394#endif
76a50749 395}
2ae920ed
UD
396strong_alias (__pthread_initialize_minimal_internal,
397 __pthread_initialize_minimal)
2c1094bd
UD
398
399
85188d82
FW
400/* This function is internal (it has a GLIBC_PRIVATE) version, but it
401 is widely used (either via weak symbol, or dlsym) to obtain the
402 __static_tls_size value. This value is then used to adjust the
403 value of the stack size attribute, so that applications receive the
404 full requested stack size, not diminished by the TCB and static TLS
405 allocation on the stack. Once the TCB is separately allocated,
406 this function should be removed or renamed (if it is still
407 necessary at that point). */
2c1094bd
UD
408size_t
409__pthread_get_minstack (const pthread_attr_t *attr)
410{
630f4cc3 411 return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN;
2c1094bd 412}