]> git.ipfire.org Git - thirdparty/glibc.git/blob - nptl/nptl-init.c
Prefer https to http for gnu.org and fsf.org URLs
[thirdparty/glibc.git] / nptl / nptl-init.c
1 /* Copyright (C) 2002-2019 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <errno.h>
21 #include <limits.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <sys/param.h>
26 #include <sys/resource.h>
27 #include <pthreadP.h>
28 #include <atomic.h>
29 #include <ldsodefs.h>
30 #include <tls.h>
31 #include <list.h>
32 #include <fork.h>
33 #include <version.h>
34 #include <shlib-compat.h>
35 #include <smp.h>
36 #include <lowlevellock.h>
37 #include <futex-internal.h>
38 #include <kernel-features.h>
39 #include <libc-pointer-arith.h>
40 #include <pthread-pids.h>
41 #include <pthread_mutex_conf.h>
42
43 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
44 /* Pointer to the corresponding variable in libc. */
45 int *__libc_multiple_threads_ptr attribute_hidden;
46 #endif
47
48 /* Size and alignment of static TLS block. */
49 size_t __static_tls_size;
50 size_t __static_tls_align_m1;
51
52 #ifndef __ASSUME_SET_ROBUST_LIST
53 /* Negative if we do not have the system call and we can use it. */
54 int __set_robust_list_avail;
55 # define set_robust_list_not_avail() \
56 __set_robust_list_avail = -1
57 #else
58 # define set_robust_list_not_avail() do { } while (0)
59 #endif
60
61 /* Version of the library, used in libthread_db to detect mismatches. */
62 static const char nptl_version[] __attribute_used__ = VERSION;
63
64
65 #ifdef SHARED
66 static
67 #else
68 extern
69 #endif
70 void __nptl_set_robust (struct pthread *);
71
72 #ifdef SHARED
73 static const struct pthread_functions pthread_functions =
74 {
75 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
76 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
77 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
78 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
79 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
80 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
81 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
82 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
83 .ptr_pthread_condattr_init = __pthread_condattr_init,
84 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
85 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
86 .ptr___pthread_cond_init = __pthread_cond_init,
87 .ptr___pthread_cond_signal = __pthread_cond_signal,
88 .ptr___pthread_cond_wait = __pthread_cond_wait,
89 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
90 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
91 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
92 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
93 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
94 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
95 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
96 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
97 # endif
98 .ptr___pthread_exit = __pthread_exit,
99 .ptr_pthread_getschedparam = __pthread_getschedparam,
100 .ptr_pthread_setschedparam = __pthread_setschedparam,
101 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
102 .ptr_pthread_mutex_init = __pthread_mutex_init,
103 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
104 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
105 .ptr___pthread_setcancelstate = __pthread_setcancelstate,
106 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
107 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
108 .ptr___pthread_once = __pthread_once,
109 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock,
110 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock,
111 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock,
112 .ptr___pthread_key_create = __pthread_key_create,
113 .ptr___pthread_getspecific = __pthread_getspecific,
114 .ptr___pthread_setspecific = __pthread_setspecific,
115 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
116 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
117 .ptr_nthreads = &__nptl_nthreads,
118 .ptr___pthread_unwind = &__pthread_unwind,
119 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
120 # ifdef SIGSETXID
121 .ptr__nptl_setxid = __nptl_setxid,
122 # endif
123 .ptr_set_robust = __nptl_set_robust
124 };
125 # define ptr_pthread_functions &pthread_functions
126 #else
127 # define ptr_pthread_functions NULL
128 #endif
129
130
131 #ifdef SHARED
132 static
133 #endif
134 void
135 __nptl_set_robust (struct pthread *self)
136 {
137 #ifdef __NR_set_robust_list
138 INTERNAL_SYSCALL_DECL (err);
139 INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
140 sizeof (struct robust_list_head));
141 #endif
142 }
143
144
145 #ifdef SIGCANCEL
146 /* For asynchronous cancellation we use a signal. This is the handler. */
147 static void
148 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
149 {
150 /* Safety check. It would be possible to call this function for
151 other signals and send a signal from another process. This is not
152 correct and might even be a security problem. Try to catch as
153 many incorrect invocations as possible. */
154 if (sig != SIGCANCEL
155 || si->si_pid != __getpid()
156 || si->si_code != SI_TKILL)
157 return;
158
159 struct pthread *self = THREAD_SELF;
160
161 int oldval = THREAD_GETMEM (self, cancelhandling);
162 while (1)
163 {
164 /* We are canceled now. When canceled by another thread this flag
165 is already set but if the signal is directly send (internally or
166 from another process) is has to be done here. */
167 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
168
169 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
170 /* Already canceled or exiting. */
171 break;
172
173 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
174 oldval);
175 if (curval == oldval)
176 {
177 /* Set the return value. */
178 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
179
180 /* Make sure asynchronous cancellation is still enabled. */
181 if ((newval & CANCELTYPE_BITMASK) != 0)
182 /* Run the registered destructors and terminate the thread. */
183 __do_cancel ();
184
185 break;
186 }
187
188 oldval = curval;
189 }
190 }
191 #endif
192
193
194 #ifdef SIGSETXID
195 struct xid_command *__xidcmd attribute_hidden;
196
197 /* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to
198 tell each thread to call the respective setxid syscall on itself. This is
199 the handler. */
200 static void
201 sighandler_setxid (int sig, siginfo_t *si, void *ctx)
202 {
203 int result;
204
205 /* Safety check. It would be possible to call this function for
206 other signals and send a signal from another process. This is not
207 correct and might even be a security problem. Try to catch as
208 many incorrect invocations as possible. */
209 if (sig != SIGSETXID
210 || si->si_pid != __getpid ()
211 || si->si_code != SI_TKILL)
212 return;
213
214 INTERNAL_SYSCALL_DECL (err);
215 result = INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
216 __xidcmd->id[1], __xidcmd->id[2]);
217 int error = 0;
218 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
219 error = INTERNAL_SYSCALL_ERRNO (result, err);
220 __nptl_setxid_error (__xidcmd, error);
221
222 /* Reset the SETXID flag. */
223 struct pthread *self = THREAD_SELF;
224 int flags, newval;
225 do
226 {
227 flags = THREAD_GETMEM (self, cancelhandling);
228 newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
229 flags & ~SETXID_BITMASK, flags);
230 }
231 while (flags != newval);
232
233 /* And release the futex. */
234 self->setxid_futex = 1;
235 futex_wake (&self->setxid_futex, 1, FUTEX_PRIVATE);
236
237 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
238 futex_wake ((unsigned int *) &__xidcmd->cntr, 1, FUTEX_PRIVATE);
239 }
240 #endif
241
242
243 /* When using __thread for this, we do it in libc so as not
244 to give libpthread its own TLS segment just for this. */
245 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
246
247
248 /* This can be set by the debugger before initialization is complete. */
249 static bool __nptl_initial_report_events __attribute_used__;
250
251 void
252 __pthread_initialize_minimal_internal (void)
253 {
254 /* Minimal initialization of the thread descriptor. */
255 struct pthread *pd = THREAD_SELF;
256 __pthread_initialize_pids (pd);
257 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
258 THREAD_SETMEM (pd, user_stack, true);
259
260 /* Initialize the robust mutex data. */
261 {
262 #if __PTHREAD_MUTEX_HAVE_PREV
263 pd->robust_prev = &pd->robust_head;
264 #endif
265 pd->robust_head.list = &pd->robust_head;
266 #ifdef __NR_set_robust_list
267 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
268 - offsetof (pthread_mutex_t,
269 __data.__list.__next));
270 INTERNAL_SYSCALL_DECL (err);
271 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
272 sizeof (struct robust_list_head));
273 if (INTERNAL_SYSCALL_ERROR_P (res, err))
274 #endif
275 set_robust_list_not_avail ();
276 }
277
278 /* Set initial thread's stack block from 0 up to __libc_stack_end.
279 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
280 purposes this is good enough. */
281 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
282
283 /* Initialize the list of all running threads with the main thread. */
284 INIT_LIST_HEAD (&__stack_user);
285 list_add (&pd->list, &__stack_user);
286
287 /* Before initializing __stack_user, the debugger could not find us and
288 had to set __nptl_initial_report_events. Propagate its setting. */
289 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
290
291 #if defined SIGCANCEL || defined SIGSETXID
292 struct sigaction sa;
293 __sigemptyset (&sa.sa_mask);
294
295 # ifdef SIGCANCEL
296 /* Install the cancellation signal handler. If for some reason we
297 cannot install the handler we do not abort. Maybe we should, but
298 it is only asynchronous cancellation which is affected. */
299 sa.sa_sigaction = sigcancel_handler;
300 sa.sa_flags = SA_SIGINFO;
301 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
302 # endif
303
304 # ifdef SIGSETXID
305 /* Install the handle to change the threads' uid/gid. */
306 sa.sa_sigaction = sighandler_setxid;
307 sa.sa_flags = SA_SIGINFO | SA_RESTART;
308 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
309 # endif
310
311 /* The parent process might have left the signals blocked. Just in
312 case, unblock it. We reuse the signal mask in the sigaction
313 structure. It is already cleared. */
314 # ifdef SIGCANCEL
315 __sigaddset (&sa.sa_mask, SIGCANCEL);
316 # endif
317 # ifdef SIGSETXID
318 __sigaddset (&sa.sa_mask, SIGSETXID);
319 # endif
320 {
321 INTERNAL_SYSCALL_DECL (err);
322 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
323 NULL, _NSIG / 8);
324 }
325 #endif
326
327 /* Get the size of the static and alignment requirements for the TLS
328 block. */
329 size_t static_tls_align;
330 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
331
332 /* Make sure the size takes all the alignments into account. */
333 if (STACK_ALIGN > static_tls_align)
334 static_tls_align = STACK_ALIGN;
335 __static_tls_align_m1 = static_tls_align - 1;
336
337 __static_tls_size = roundup (__static_tls_size, static_tls_align);
338
339 /* Determine the default allowed stack size. This is the size used
340 in case the user does not specify one. */
341 struct rlimit limit;
342 if (__getrlimit (RLIMIT_STACK, &limit) != 0
343 || limit.rlim_cur == RLIM_INFINITY)
344 /* The system limit is not usable. Use an architecture-specific
345 default. */
346 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
347 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
348 /* The system limit is unusably small.
349 Use the minimal size acceptable. */
350 limit.rlim_cur = PTHREAD_STACK_MIN;
351
352 /* Make sure it meets the minimum size that allocate_stack
353 (allocatestack.c) will demand, which depends on the page size. */
354 const uintptr_t pagesz = GLRO(dl_pagesize);
355 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
356 if (limit.rlim_cur < minstack)
357 limit.rlim_cur = minstack;
358
359 /* Round the resource limit up to page size. */
360 limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz);
361 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
362 __default_pthread_attr.stacksize = limit.rlim_cur;
363 __default_pthread_attr.guardsize = GLRO (dl_pagesize);
364 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
365
366 #ifdef SHARED
367 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
368 keep the lock count from the ld.so implementation. */
369 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
370 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
371 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
372 GL(dl_load_lock).mutex.__data.__count = 0;
373 while (rtld_lock_count-- > 0)
374 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
375
376 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
377 #endif
378
379 GL(dl_init_static_tls) = &__pthread_init_static_tls;
380
381 GL(dl_wait_lookup_done) = &__wait_lookup_done;
382
383 /* Register the fork generation counter with the libc. */
384 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
385 __libc_multiple_threads_ptr =
386 #endif
387 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
388 ptr_pthread_functions);
389
390 /* Determine whether the machine is SMP or not. */
391 __is_smp = is_smp_system ();
392
393 #if HAVE_TUNABLES
394 __pthread_tunables_init ();
395 #endif
396 }
397 strong_alias (__pthread_initialize_minimal_internal,
398 __pthread_initialize_minimal)
399
400
401 /* This function is internal (it has a GLIBC_PRIVATE) version, but it
402 is widely used (either via weak symbol, or dlsym) to obtain the
403 __static_tls_size value. This value is then used to adjust the
404 value of the stack size attribute, so that applications receive the
405 full requested stack size, not diminished by the TCB and static TLS
406 allocation on the stack. Once the TCB is separately allocated,
407 this function should be removed or renamed (if it is still
408 necessary at that point). */
409 size_t
410 __pthread_get_minstack (const pthread_attr_t *attr)
411 {
412 return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN;
413 }