]> git.ipfire.org Git - thirdparty/glibc.git/blob - nptl/nptl-init.c
ea91b9e138197a6fb93cb3346c452f79fc4eff12
[thirdparty/glibc.git] / nptl / nptl-init.c
1 /* Copyright (C) 2002-2019 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <errno.h>
21 #include <limits.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <sys/param.h>
26 #include <sys/resource.h>
27 #include <pthreadP.h>
28 #include <atomic.h>
29 #include <ldsodefs.h>
30 #include <tls.h>
31 #include <list.h>
32 #include <fork.h>
33 #include <version.h>
34 #include <shlib-compat.h>
35 #include <smp.h>
36 #include <lowlevellock.h>
37 #include <futex-internal.h>
38 #include <kernel-features.h>
39 #include <libc-pointer-arith.h>
40 #include <pthread-pids.h>
41 #include <pthread_mutex_conf.h>
42
43 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
44 /* Pointer to the corresponding variable in libc. */
45 int *__libc_multiple_threads_ptr attribute_hidden;
46 #endif
47
48 /* Size and alignment of static TLS block. */
49 size_t __static_tls_size;
50 size_t __static_tls_align_m1;
51
52 #ifndef __ASSUME_SET_ROBUST_LIST
53 /* Negative if we do not have the system call and we can use it. */
54 int __set_robust_list_avail;
55 # define set_robust_list_not_avail() \
56 __set_robust_list_avail = -1
57 #else
58 # define set_robust_list_not_avail() do { } while (0)
59 #endif
60
61 /* Version of the library, used in libthread_db to detect mismatches. */
62 static const char nptl_version[] __attribute_used__ = VERSION;
63
64
65 #ifdef SHARED
66 static
67 #else
68 extern
69 #endif
70 void __nptl_set_robust (struct pthread *);
71
72 #ifdef SHARED
73 static const struct pthread_functions pthread_functions =
74 {
75 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
76 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
77 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
78 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
79 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
80 .ptr_pthread_condattr_init = __pthread_condattr_init,
81 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
82 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
83 .ptr___pthread_cond_init = __pthread_cond_init,
84 .ptr___pthread_cond_signal = __pthread_cond_signal,
85 .ptr___pthread_cond_wait = __pthread_cond_wait,
86 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
87 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
88 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
89 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
90 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
91 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
92 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
93 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
94 # endif
95 .ptr___pthread_exit = __pthread_exit,
96 .ptr_pthread_getschedparam = __pthread_getschedparam,
97 .ptr_pthread_setschedparam = __pthread_setschedparam,
98 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
99 .ptr_pthread_mutex_init = __pthread_mutex_init,
100 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
101 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
102 .ptr___pthread_setcancelstate = __pthread_setcancelstate,
103 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
104 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
105 .ptr___pthread_once = __pthread_once,
106 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock,
107 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock,
108 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock,
109 .ptr___pthread_key_create = __pthread_key_create,
110 .ptr___pthread_getspecific = __pthread_getspecific,
111 .ptr___pthread_setspecific = __pthread_setspecific,
112 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
113 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
114 .ptr_nthreads = &__nptl_nthreads,
115 .ptr___pthread_unwind = &__pthread_unwind,
116 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
117 # ifdef SIGSETXID
118 .ptr__nptl_setxid = __nptl_setxid,
119 # endif
120 .ptr_set_robust = __nptl_set_robust
121 };
122 # define ptr_pthread_functions &pthread_functions
123 #else
124 # define ptr_pthread_functions NULL
125 #endif
126
127
128 #ifdef SHARED
129 static
130 #endif
131 void
132 __nptl_set_robust (struct pthread *self)
133 {
134 #ifdef __NR_set_robust_list
135 INTERNAL_SYSCALL_DECL (err);
136 INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
137 sizeof (struct robust_list_head));
138 #endif
139 }
140
141
142 #ifdef SIGCANCEL
143 /* For asynchronous cancellation we use a signal. This is the handler. */
144 static void
145 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
146 {
147 /* Safety check. It would be possible to call this function for
148 other signals and send a signal from another process. This is not
149 correct and might even be a security problem. Try to catch as
150 many incorrect invocations as possible. */
151 if (sig != SIGCANCEL
152 || si->si_pid != __getpid()
153 || si->si_code != SI_TKILL)
154 return;
155
156 struct pthread *self = THREAD_SELF;
157
158 int oldval = THREAD_GETMEM (self, cancelhandling);
159 while (1)
160 {
161 /* We are canceled now. When canceled by another thread this flag
162 is already set but if the signal is directly send (internally or
163 from another process) is has to be done here. */
164 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
165
166 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
167 /* Already canceled or exiting. */
168 break;
169
170 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
171 oldval);
172 if (curval == oldval)
173 {
174 /* Set the return value. */
175 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
176
177 /* Make sure asynchronous cancellation is still enabled. */
178 if ((newval & CANCELTYPE_BITMASK) != 0)
179 /* Run the registered destructors and terminate the thread. */
180 __do_cancel ();
181
182 break;
183 }
184
185 oldval = curval;
186 }
187 }
188 #endif
189
190
191 #ifdef SIGSETXID
192 struct xid_command *__xidcmd attribute_hidden;
193
194 /* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to
195 tell each thread to call the respective setxid syscall on itself. This is
196 the handler. */
197 static void
198 sighandler_setxid (int sig, siginfo_t *si, void *ctx)
199 {
200 int result;
201
202 /* Safety check. It would be possible to call this function for
203 other signals and send a signal from another process. This is not
204 correct and might even be a security problem. Try to catch as
205 many incorrect invocations as possible. */
206 if (sig != SIGSETXID
207 || si->si_pid != __getpid ()
208 || si->si_code != SI_TKILL)
209 return;
210
211 INTERNAL_SYSCALL_DECL (err);
212 result = INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
213 __xidcmd->id[1], __xidcmd->id[2]);
214 int error = 0;
215 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
216 error = INTERNAL_SYSCALL_ERRNO (result, err);
217 __nptl_setxid_error (__xidcmd, error);
218
219 /* Reset the SETXID flag. */
220 struct pthread *self = THREAD_SELF;
221 int flags, newval;
222 do
223 {
224 flags = THREAD_GETMEM (self, cancelhandling);
225 newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
226 flags & ~SETXID_BITMASK, flags);
227 }
228 while (flags != newval);
229
230 /* And release the futex. */
231 self->setxid_futex = 1;
232 futex_wake (&self->setxid_futex, 1, FUTEX_PRIVATE);
233
234 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
235 futex_wake ((unsigned int *) &__xidcmd->cntr, 1, FUTEX_PRIVATE);
236 }
237 #endif
238
239
240 /* When using __thread for this, we do it in libc so as not
241 to give libpthread its own TLS segment just for this. */
242 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
243
244
245 /* This can be set by the debugger before initialization is complete. */
246 static bool __nptl_initial_report_events __attribute_used__;
247
248 void
249 __pthread_initialize_minimal_internal (void)
250 {
251 /* Minimal initialization of the thread descriptor. */
252 struct pthread *pd = THREAD_SELF;
253 __pthread_initialize_pids (pd);
254 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
255 THREAD_SETMEM (pd, user_stack, true);
256
257 /* Initialize the robust mutex data. */
258 {
259 #if __PTHREAD_MUTEX_HAVE_PREV
260 pd->robust_prev = &pd->robust_head;
261 #endif
262 pd->robust_head.list = &pd->robust_head;
263 #ifdef __NR_set_robust_list
264 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
265 - offsetof (pthread_mutex_t,
266 __data.__list.__next));
267 INTERNAL_SYSCALL_DECL (err);
268 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
269 sizeof (struct robust_list_head));
270 if (INTERNAL_SYSCALL_ERROR_P (res, err))
271 #endif
272 set_robust_list_not_avail ();
273 }
274
275 /* Set initial thread's stack block from 0 up to __libc_stack_end.
276 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
277 purposes this is good enough. */
278 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
279
280 /* Initialize the list of all running threads with the main thread. */
281 INIT_LIST_HEAD (&__stack_user);
282 list_add (&pd->list, &__stack_user);
283
284 /* Before initializing __stack_user, the debugger could not find us and
285 had to set __nptl_initial_report_events. Propagate its setting. */
286 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
287
288 #if defined SIGCANCEL || defined SIGSETXID
289 struct sigaction sa;
290 __sigemptyset (&sa.sa_mask);
291
292 # ifdef SIGCANCEL
293 /* Install the cancellation signal handler. If for some reason we
294 cannot install the handler we do not abort. Maybe we should, but
295 it is only asynchronous cancellation which is affected. */
296 sa.sa_sigaction = sigcancel_handler;
297 sa.sa_flags = SA_SIGINFO;
298 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
299 # endif
300
301 # ifdef SIGSETXID
302 /* Install the handle to change the threads' uid/gid. */
303 sa.sa_sigaction = sighandler_setxid;
304 sa.sa_flags = SA_SIGINFO | SA_RESTART;
305 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
306 # endif
307
308 /* The parent process might have left the signals blocked. Just in
309 case, unblock it. We reuse the signal mask in the sigaction
310 structure. It is already cleared. */
311 # ifdef SIGCANCEL
312 __sigaddset (&sa.sa_mask, SIGCANCEL);
313 # endif
314 # ifdef SIGSETXID
315 __sigaddset (&sa.sa_mask, SIGSETXID);
316 # endif
317 {
318 INTERNAL_SYSCALL_DECL (err);
319 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
320 NULL, _NSIG / 8);
321 }
322 #endif
323
324 /* Get the size of the static and alignment requirements for the TLS
325 block. */
326 size_t static_tls_align;
327 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
328
329 /* Make sure the size takes all the alignments into account. */
330 if (STACK_ALIGN > static_tls_align)
331 static_tls_align = STACK_ALIGN;
332 __static_tls_align_m1 = static_tls_align - 1;
333
334 __static_tls_size = roundup (__static_tls_size, static_tls_align);
335
336 /* Determine the default allowed stack size. This is the size used
337 in case the user does not specify one. */
338 struct rlimit limit;
339 if (__getrlimit (RLIMIT_STACK, &limit) != 0
340 || limit.rlim_cur == RLIM_INFINITY)
341 /* The system limit is not usable. Use an architecture-specific
342 default. */
343 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
344 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
345 /* The system limit is unusably small.
346 Use the minimal size acceptable. */
347 limit.rlim_cur = PTHREAD_STACK_MIN;
348
349 /* Make sure it meets the minimum size that allocate_stack
350 (allocatestack.c) will demand, which depends on the page size. */
351 const uintptr_t pagesz = GLRO(dl_pagesize);
352 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
353 if (limit.rlim_cur < minstack)
354 limit.rlim_cur = minstack;
355
356 /* Round the resource limit up to page size. */
357 limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz);
358 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
359 __default_pthread_attr.stacksize = limit.rlim_cur;
360 __default_pthread_attr.guardsize = GLRO (dl_pagesize);
361 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
362
363 #ifdef SHARED
364 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
365 keep the lock count from the ld.so implementation. */
366 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
367 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
368 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
369 GL(dl_load_lock).mutex.__data.__count = 0;
370 while (rtld_lock_count-- > 0)
371 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
372
373 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
374 #endif
375
376 GL(dl_init_static_tls) = &__pthread_init_static_tls;
377
378 GL(dl_wait_lookup_done) = &__wait_lookup_done;
379
380 /* Register the fork generation counter with the libc. */
381 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
382 __libc_multiple_threads_ptr =
383 #endif
384 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
385 ptr_pthread_functions);
386
387 /* Determine whether the machine is SMP or not. */
388 __is_smp = is_smp_system ();
389
390 #if HAVE_TUNABLES
391 __pthread_tunables_init ();
392 #endif
393 }
394 strong_alias (__pthread_initialize_minimal_internal,
395 __pthread_initialize_minimal)
396
397
398 /* This function is internal (it has a GLIBC_PRIVATE) version, but it
399 is widely used (either via weak symbol, or dlsym) to obtain the
400 __static_tls_size value. This value is then used to adjust the
401 value of the stack size attribute, so that applications receive the
402 full requested stack size, not diminished by the TCB and static TLS
403 allocation on the stack. Once the TCB is separately allocated,
404 this function should be removed or renamed (if it is still
405 necessary at that point). */
406 size_t
407 __pthread_get_minstack (const pthread_attr_t *attr)
408 {
409 return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN;
410 }