]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/nptl-init.c
Assume LLL_LOCK_INITIALIZER is 0
[thirdparty/glibc.git] / nptl / nptl-init.c
CommitLineData
04277e02 1/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
76a50749
UD
18
19#include <assert.h>
cbd8aeb8 20#include <errno.h>
76a50749
UD
21#include <limits.h>
22#include <signal.h>
23#include <stdlib.h>
24#include <unistd.h>
25#include <sys/param.h>
26#include <sys/resource.h>
27#include <pthreadP.h>
28#include <atomic.h>
29#include <ldsodefs.h>
30#include <tls.h>
7a775e6b 31#include <list.h>
76a50749
UD
32#include <fork.h>
33#include <version.h>
bf293afe 34#include <shlib-compat.h>
2c0b891a 35#include <smp.h>
2edb61e3 36#include <lowlevellock.h>
a2f0363f 37#include <futex-internal.h>
f8de5057 38#include <kernel-features.h>
9090848d 39#include <libc-pointer-arith.h>
7cea6212 40#include <pthread-pids.h>
6310e6be 41#include <pthread_mutex_conf.h>
76a50749 42
c6aab2cb
RM
43#ifndef TLS_MULTIPLE_THREADS_IN_TCB
44/* Pointer to the corresponding variable in libc. */
45int *__libc_multiple_threads_ptr attribute_hidden;
46#endif
47
76a50749
UD
48/* Size and alignment of static TLS block. */
49size_t __static_tls_size;
923e02ea 50size_t __static_tls_align_m1;
76a50749 51
0f6699ea
UD
52#ifndef __ASSUME_SET_ROBUST_LIST
53/* Negative if we do not have the system call and we can use it. */
54int __set_robust_list_avail;
55# define set_robust_list_not_avail() \
56 __set_robust_list_avail = -1
57#else
58# define set_robust_list_not_avail() do { } while (0)
59#endif
60
76a50749 61/* Version of the library, used in libthread_db to detect mismatches. */
e3b22ad3 62static const char nptl_version[] __attribute_used__ = VERSION;
76a50749
UD
63
64
6f8326ca
UD
65#ifdef SHARED
66static
67#else
68extern
69#endif
70void __nptl_set_robust (struct pthread *);
71
30991b8b 72#ifdef SHARED
630d93a7 73static const struct pthread_functions pthread_functions =
8454830b
UD
74 {
75 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
73e9ae88 76# if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
49e9f864 77 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
73e9ae88 78# endif
49e9f864 79 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
8454830b
UD
80 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
81 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
82 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
83 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
84 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
85 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
86 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
87 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
88 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
89 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
90 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
91 .ptr_pthread_condattr_init = __pthread_condattr_init,
bf293afe
UD
92 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
93 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
94 .ptr___pthread_cond_init = __pthread_cond_init,
95 .ptr___pthread_cond_signal = __pthread_cond_signal,
96 .ptr___pthread_cond_wait = __pthread_cond_wait,
c503d3dc 97 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
73e9ae88 98# if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
bf293afe
UD
99 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
100 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
101 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
102 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
103 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
c503d3dc 104 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
73e9ae88 105# endif
8454830b 106 .ptr_pthread_equal = __pthread_equal,
49e9f864 107 .ptr___pthread_exit = __pthread_exit,
8454830b
UD
108 .ptr_pthread_getschedparam = __pthread_getschedparam,
109 .ptr_pthread_setschedparam = __pthread_setschedparam,
4d17e683
AS
110 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
111 .ptr_pthread_mutex_init = __pthread_mutex_init,
112 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
113 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
e5d19c08 114 .ptr___pthread_setcancelstate = __pthread_setcancelstate,
73e9ae88
UD
115 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
116 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
4d17e683
AS
117 .ptr___pthread_once = __pthread_once,
118 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock,
119 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock,
120 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock,
121 .ptr___pthread_key_create = __pthread_key_create,
122 .ptr___pthread_getspecific = __pthread_getspecific,
123 .ptr___pthread_setspecific = __pthread_setspecific,
73e9ae88 124 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
47202270 125 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
6efd4814 126 .ptr_nthreads = &__nptl_nthreads,
3fa21fd8 127 .ptr___pthread_unwind = &__pthread_unwind,
2edb61e3 128 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
cc40d25e 129# ifdef SIGSETXID
ba408f84 130 .ptr__nptl_setxid = __nptl_setxid,
cc40d25e 131# endif
6f8326ca 132 .ptr_set_robust = __nptl_set_robust
8454830b
UD
133 };
134# define ptr_pthread_functions &pthread_functions
135#else
136# define ptr_pthread_functions NULL
137#endif
138
139
30991b8b 140#ifdef SHARED
6f8326ca 141static
30991b8b 142#endif
6f8326ca
UD
143void
144__nptl_set_robust (struct pthread *self)
145{
b0643088 146#ifdef __NR_set_robust_list
6f8326ca
UD
147 INTERNAL_SYSCALL_DECL (err);
148 INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
149 sizeof (struct robust_list_head));
b0643088 150#endif
6f8326ca 151}
cca50323
UD
152
153
327ae257 154#ifdef SIGCANCEL
76a50749
UD
155/* For asynchronous cancellation we use a signal. This is the handler. */
156static void
a1ed6b4c 157sigcancel_handler (int sig, siginfo_t *si, void *ctx)
76a50749 158{
a1ed6b4c 159 /* Safety check. It would be possible to call this function for
2edb61e3 160 other signals and send a signal from another process. This is not
a1ed6b4c
UD
161 correct and might even be a security problem. Try to catch as
162 many incorrect invocations as possible. */
163 if (sig != SIGCANCEL
c579f48e 164 || si->si_pid != __getpid()
a1ed6b4c 165 || si->si_code != SI_TKILL)
a1ed6b4c
UD
166 return;
167
76a50749
UD
168 struct pthread *self = THREAD_SELF;
169
b22d701b 170 int oldval = THREAD_GETMEM (self, cancelhandling);
76a50749
UD
171 while (1)
172 {
173 /* We are canceled now. When canceled by another thread this flag
174 is already set but if the signal is directly send (internally or
175 from another process) is has to be done here. */
e320ef46 176 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
76a50749
UD
177
178 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
179 /* Already canceled or exiting. */
180 break;
181
b22d701b
UD
182 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
183 oldval);
184 if (curval == oldval)
76a50749
UD
185 {
186 /* Set the return value. */
187 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
188
189 /* Make sure asynchronous cancellation is still enabled. */
190 if ((newval & CANCELTYPE_BITMASK) != 0)
d9eb687b
UD
191 /* Run the registered destructors and terminate the thread. */
192 __do_cancel ();
76a50749
UD
193
194 break;
195 }
b22d701b
UD
196
197 oldval = curval;
76a50749
UD
198 }
199}
327ae257 200#endif
76a50749
UD
201
202
327ae257 203#ifdef SIGSETXID
2edb61e3
UD
204struct xid_command *__xidcmd attribute_hidden;
205
a9843058
SP
206/* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to
207 tell each thread to call the respective setxid syscall on itself. This is
208 the handler. */
2edb61e3
UD
209static void
210sighandler_setxid (int sig, siginfo_t *si, void *ctx)
211{
13f7fe35 212 int result;
7960f2a7 213
2edb61e3
UD
214 /* Safety check. It would be possible to call this function for
215 other signals and send a signal from another process. This is not
216 correct and might even be a security problem. Try to catch as
217 many incorrect invocations as possible. */
218 if (sig != SIGSETXID
c579f48e 219 || si->si_pid != __getpid ()
2edb61e3
UD
220 || si->si_code != SI_TKILL)
221 return;
222
223 INTERNAL_SYSCALL_DECL (err);
13f7fe35
FW
224 result = INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
225 __xidcmd->id[1], __xidcmd->id[2]);
771eb141 226 int error = 0;
13f7fe35 227 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
771eb141
FW
228 error = INTERNAL_SYSCALL_ERRNO (result, err);
229 __nptl_setxid_error (__xidcmd, error);
2edb61e3 230
dff9a7a1
UD
231 /* Reset the SETXID flag. */
232 struct pthread *self = THREAD_SELF;
25db0f6c
DJ
233 int flags, newval;
234 do
235 {
236 flags = THREAD_GETMEM (self, cancelhandling);
237 newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
238 flags & ~SETXID_BITMASK, flags);
239 }
240 while (flags != newval);
dff9a7a1
UD
241
242 /* And release the futex. */
243 self->setxid_futex = 1;
a2f0363f 244 futex_wake (&self->setxid_futex, 1, FUTEX_PRIVATE);
25db0f6c
DJ
245
246 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
a2f0363f 247 futex_wake ((unsigned int *) &__xidcmd->cntr, 1, FUTEX_PRIVATE);
2edb61e3 248}
327ae257 249#endif
2edb61e3
UD
250
251
b1531183
UD
252/* When using __thread for this, we do it in libc so as not
253 to give libpthread its own TLS segment just for this. */
254extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
255
76a50749 256
0ecf9c10 257/* This can be set by the debugger before initialization is complete. */
e965d514 258static bool __nptl_initial_report_events __attribute_used__;
0ecf9c10 259
76a50749 260void
4e9b5995 261__pthread_initialize_minimal_internal (void)
76a50749 262{
76a50749 263 /* Minimal initialization of the thread descriptor. */
75cddafe 264 struct pthread *pd = THREAD_SELF;
7cea6212 265 __pthread_initialize_pids (pd);
76a50749
UD
266 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
267 THREAD_SETMEM (pd, user_stack, true);
76a50749 268
0f6699ea 269 /* Initialize the robust mutex data. */
992328e5 270 {
06be6368 271#if __PTHREAD_MUTEX_HAVE_PREV
992328e5 272 pd->robust_prev = &pd->robust_head;
0f6699ea 273#endif
992328e5 274 pd->robust_head.list = &pd->robust_head;
0f6699ea 275#ifdef __NR_set_robust_list
992328e5
RM
276 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
277 - offsetof (pthread_mutex_t,
278 __data.__list.__next));
279 INTERNAL_SYSCALL_DECL (err);
280 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
281 sizeof (struct robust_list_head));
282 if (INTERNAL_SYSCALL_ERROR_P (res, err))
0f6699ea 283#endif
992328e5
RM
284 set_robust_list_not_avail ();
285 }
0f6699ea 286
675620f7
UD
287 /* Set initial thread's stack block from 0 up to __libc_stack_end.
288 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
289 purposes this is good enough. */
290 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
291
fde89ad0 292 /* Initialize the list of all running threads with the main thread. */
a4548cea 293 INIT_LIST_HEAD (&__stack_user);
d4f64e1a 294 list_add (&pd->list, &__stack_user);
76a50749 295
0ecf9c10
RM
296 /* Before initializing __stack_user, the debugger could not find us and
297 had to set __nptl_initial_report_events. Propagate its setting. */
298 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
76a50749 299
327ae257
RM
300#if defined SIGCANCEL || defined SIGSETXID
301 struct sigaction sa;
302 __sigemptyset (&sa.sa_mask);
303
304# ifdef SIGCANCEL
76a50749
UD
305 /* Install the cancellation signal handler. If for some reason we
306 cannot install the handler we do not abort. Maybe we should, but
307 it is only asynchronous cancellation which is affected. */
a1ed6b4c
UD
308 sa.sa_sigaction = sigcancel_handler;
309 sa.sa_flags = SA_SIGINFO;
76a50749 310 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
327ae257 311# endif
76a50749 312
327ae257 313# ifdef SIGSETXID
2edb61e3
UD
314 /* Install the handle to change the threads' uid/gid. */
315 sa.sa_sigaction = sighandler_setxid;
316 sa.sa_flags = SA_SIGINFO | SA_RESTART;
2edb61e3 317 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
327ae257 318# endif
2edb61e3 319
708bfb9a 320 /* The parent process might have left the signals blocked. Just in
f006d3a0
UD
321 case, unblock it. We reuse the signal mask in the sigaction
322 structure. It is already cleared. */
327ae257 323# ifdef SIGCANCEL
f006d3a0 324 __sigaddset (&sa.sa_mask, SIGCANCEL);
327ae257
RM
325# endif
326# ifdef SIGSETXID
708bfb9a 327 __sigaddset (&sa.sa_mask, SIGSETXID);
327ae257 328# endif
992328e5
RM
329 {
330 INTERNAL_SYSCALL_DECL (err);
331 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
332 NULL, _NSIG / 8);
333 }
327ae257 334#endif
f006d3a0 335
b399707e
RM
336 /* Get the size of the static and alignment requirements for the TLS
337 block. */
338 size_t static_tls_align;
339 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
340
341 /* Make sure the size takes all the alignments into account. */
342 if (STACK_ALIGN > static_tls_align)
343 static_tls_align = STACK_ALIGN;
344 __static_tls_align_m1 = static_tls_align - 1;
345
346 __static_tls_size = roundup (__static_tls_size, static_tls_align);
76a50749 347
4e9b5995
CD
348 /* Determine the default allowed stack size. This is the size used
349 in case the user does not specify one. */
350 struct rlimit limit;
c5c2b7c3 351 if (__getrlimit (RLIMIT_STACK, &limit) != 0
4e9b5995
CD
352 || limit.rlim_cur == RLIM_INFINITY)
353 /* The system limit is not usable. Use an architecture-specific
354 default. */
355 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
356 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
357 /* The system limit is unusably small.
358 Use the minimal size acceptable. */
359 limit.rlim_cur = PTHREAD_STACK_MIN;
e23872c8 360
4e9b5995
CD
361 /* Make sure it meets the minimum size that allocate_stack
362 (allocatestack.c) will demand, which depends on the page size. */
363 const uintptr_t pagesz = GLRO(dl_pagesize);
364 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
365 if (limit.rlim_cur < minstack)
366 limit.rlim_cur = minstack;
e23872c8 367
4e9b5995 368 /* Round the resource limit up to page size. */
6d03458e 369 limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz);
61dd6208 370 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
e903a713
SP
371 __default_pthread_attr.stacksize = limit.rlim_cur;
372 __default_pthread_attr.guardsize = GLRO (dl_pagesize);
61dd6208 373 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
76a50749 374
b1531183 375#ifdef SHARED
334fcf2a
UD
376 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
377 keep the lock count from the ld.so implementation. */
4d17e683
AS
378 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
379 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
334fcf2a
UD
380 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
381 GL(dl_load_lock).mutex.__data.__count = 0;
382 while (rtld_lock_count-- > 0)
4d17e683 383 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
54ee14b3
UD
384
385 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
b1531183
UD
386#endif
387
adc12574
UD
388 GL(dl_init_static_tls) = &__pthread_init_static_tls;
389
7adefea8
UD
390 GL(dl_wait_lookup_done) = &__wait_lookup_done;
391
76a50749 392 /* Register the fork generation counter with the libc. */
5a03acfe
UD
393#ifndef TLS_MULTIPLE_THREADS_IN_TCB
394 __libc_multiple_threads_ptr =
395#endif
396 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
397 ptr_pthread_functions);
2c0b891a
UD
398
399 /* Determine whether the machine is SMP or not. */
400 __is_smp = is_smp_system ();
6310e6be
KW
401
402#if HAVE_TUNABLES
403 __pthread_tunables_init ();
404#endif
76a50749 405}
2ae920ed
UD
406strong_alias (__pthread_initialize_minimal_internal,
407 __pthread_initialize_minimal)
2c1094bd
UD
408
409
410size_t
411__pthread_get_minstack (const pthread_attr_t *attr)
412{
630f4cc3 413 return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN;
2c1094bd 414}