]> git.ipfire.org Git - thirdparty/glibc.git/blob - nptl/nptl-init.c
Update copyright notices with scripts/update-copyrights
[thirdparty/glibc.git] / nptl / nptl-init.c
1 /* Copyright (C) 2002-2014 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <errno.h>
21 #include <limits.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <sys/param.h>
26 #include <sys/resource.h>
27 #include <pthreadP.h>
28 #include <atomic.h>
29 #include <ldsodefs.h>
30 #include <tls.h>
31 #include <list.h>
32 #include <fork.h>
33 #include <version.h>
34 #include <shlib-compat.h>
35 #include <smp.h>
36 #include <lowlevellock.h>
37 #include <kernel-features.h>
38
39
40 /* Size and alignment of static TLS block. */
41 size_t __static_tls_size;
42 size_t __static_tls_align_m1;
43
44 #ifndef __ASSUME_SET_ROBUST_LIST
45 /* Negative if we do not have the system call and we can use it. */
46 int __set_robust_list_avail;
47 # define set_robust_list_not_avail() \
48 __set_robust_list_avail = -1
49 #else
50 # define set_robust_list_not_avail() do { } while (0)
51 #endif
52
53 #ifndef __ASSUME_FUTEX_CLOCK_REALTIME
54 /* Nonzero if we do not have FUTEX_CLOCK_REALTIME. */
55 int __have_futex_clock_realtime;
56 # define __set_futex_clock_realtime() \
57 __have_futex_clock_realtime = 1
58 #else
59 #define __set_futex_clock_realtime() do { } while (0)
60 #endif
61
62 /* Version of the library, used in libthread_db to detect mismatches. */
63 static const char nptl_version[] __attribute_used__ = VERSION;
64
65
66 #ifndef SHARED
67 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
68 #endif
69
70 #ifdef SHARED
71 static
72 #else
73 extern
74 #endif
75 void __nptl_set_robust (struct pthread *);
76
77 #ifdef SHARED
78 static void nptl_freeres (void);
79
80
81 static const struct pthread_functions pthread_functions =
82 {
83 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
84 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
85 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
86 # endif
87 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
88 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
89 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
90 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
91 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
92 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
93 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
94 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
95 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
96 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
97 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
98 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
99 .ptr_pthread_condattr_init = __pthread_condattr_init,
100 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
101 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
102 .ptr___pthread_cond_init = __pthread_cond_init,
103 .ptr___pthread_cond_signal = __pthread_cond_signal,
104 .ptr___pthread_cond_wait = __pthread_cond_wait,
105 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
106 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
107 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
108 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
109 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
110 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
111 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
112 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
113 # endif
114 .ptr_pthread_equal = __pthread_equal,
115 .ptr___pthread_exit = __pthread_exit,
116 .ptr_pthread_getschedparam = __pthread_getschedparam,
117 .ptr_pthread_setschedparam = __pthread_setschedparam,
118 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
119 .ptr_pthread_mutex_init = __pthread_mutex_init,
120 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
121 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
122 .ptr_pthread_self = __pthread_self,
123 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
124 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
125 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
126 .ptr___pthread_once = __pthread_once,
127 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock,
128 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock,
129 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock,
130 .ptr___pthread_key_create = __pthread_key_create,
131 .ptr___pthread_getspecific = __pthread_getspecific,
132 .ptr___pthread_setspecific = __pthread_setspecific,
133 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
134 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
135 .ptr_nthreads = &__nptl_nthreads,
136 .ptr___pthread_unwind = &__pthread_unwind,
137 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
138 .ptr__nptl_setxid = __nptl_setxid,
139 /* For now only the stack cache needs to be freed. */
140 .ptr_freeres = nptl_freeres,
141 .ptr_set_robust = __nptl_set_robust
142 };
143 # define ptr_pthread_functions &pthread_functions
144 #else
145 # define ptr_pthread_functions NULL
146 #endif
147
148
149 #ifdef SHARED
150 /* This function is called indirectly from the freeres code in libc. */
151 static void
152 __libc_freeres_fn_section
153 nptl_freeres (void)
154 {
155 __unwind_freeres ();
156 __free_stacks (0);
157 }
158
159
160 static
161 #endif
162 void
163 __nptl_set_robust (struct pthread *self)
164 {
165 INTERNAL_SYSCALL_DECL (err);
166 INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
167 sizeof (struct robust_list_head));
168 }
169
170
171 /* For asynchronous cancellation we use a signal. This is the handler. */
172 static void
173 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
174 {
175 /* Determine the process ID. It might be negative if the thread is
176 in the middle of a fork() call. */
177 pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
178 if (__builtin_expect (pid < 0, 0))
179 pid = -pid;
180
181 /* Safety check. It would be possible to call this function for
182 other signals and send a signal from another process. This is not
183 correct and might even be a security problem. Try to catch as
184 many incorrect invocations as possible. */
185 if (sig != SIGCANCEL
186 || si->si_pid != pid
187 || si->si_code != SI_TKILL)
188 return;
189
190 struct pthread *self = THREAD_SELF;
191
192 int oldval = THREAD_GETMEM (self, cancelhandling);
193 while (1)
194 {
195 /* We are canceled now. When canceled by another thread this flag
196 is already set but if the signal is directly send (internally or
197 from another process) is has to be done here. */
198 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
199
200 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
201 /* Already canceled or exiting. */
202 break;
203
204 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
205 oldval);
206 if (curval == oldval)
207 {
208 /* Set the return value. */
209 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
210
211 /* Make sure asynchronous cancellation is still enabled. */
212 if ((newval & CANCELTYPE_BITMASK) != 0)
213 /* Run the registered destructors and terminate the thread. */
214 __do_cancel ();
215
216 break;
217 }
218
219 oldval = curval;
220 }
221 }
222
223
224 struct xid_command *__xidcmd attribute_hidden;
225
226 /* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to
227 tell each thread to call the respective setxid syscall on itself. This is
228 the handler. */
229 static void
230 sighandler_setxid (int sig, siginfo_t *si, void *ctx)
231 {
232 /* Determine the process ID. It might be negative if the thread is
233 in the middle of a fork() call. */
234 pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
235 if (__builtin_expect (pid < 0, 0))
236 pid = -pid;
237
238 /* Safety check. It would be possible to call this function for
239 other signals and send a signal from another process. This is not
240 correct and might even be a security problem. Try to catch as
241 many incorrect invocations as possible. */
242 if (sig != SIGSETXID
243 || si->si_pid != pid
244 || si->si_code != SI_TKILL)
245 return;
246
247 INTERNAL_SYSCALL_DECL (err);
248 INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
249 __xidcmd->id[1], __xidcmd->id[2]);
250
251 /* Reset the SETXID flag. */
252 struct pthread *self = THREAD_SELF;
253 int flags, newval;
254 do
255 {
256 flags = THREAD_GETMEM (self, cancelhandling);
257 newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
258 flags & ~SETXID_BITMASK, flags);
259 }
260 while (flags != newval);
261
262 /* And release the futex. */
263 self->setxid_futex = 1;
264 lll_futex_wake (&self->setxid_futex, 1, LLL_PRIVATE);
265
266 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
267 lll_futex_wake (&__xidcmd->cntr, 1, LLL_PRIVATE);
268 }
269
270
271 /* When using __thread for this, we do it in libc so as not
272 to give libpthread its own TLS segment just for this. */
273 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
274
275
276 /* This can be set by the debugger before initialization is complete. */
277 static bool __nptl_initial_report_events __attribute_used__;
278
279 void
280 __pthread_initialize_minimal_internal (void)
281 {
282 #ifndef SHARED
283 /* Unlike in the dynamically linked case the dynamic linker has not
284 taken care of initializing the TLS data structures. */
285 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
286
287 /* We must prevent gcc from being clever and move any of the
288 following code ahead of the __libc_setup_tls call. This function
289 will initialize the thread register which is subsequently
290 used. */
291 __asm __volatile ("");
292 #endif
293
294 /* Minimal initialization of the thread descriptor. */
295 struct pthread *pd = THREAD_SELF;
296 INTERNAL_SYSCALL_DECL (err);
297 pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
298 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
299 THREAD_SETMEM (pd, user_stack, true);
300 if (LLL_LOCK_INITIALIZER != 0)
301 THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
302 #if HP_TIMING_AVAIL
303 THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
304 #endif
305
306 /* Initialize the robust mutex data. */
307 #ifdef __PTHREAD_MUTEX_HAVE_PREV
308 pd->robust_prev = &pd->robust_head;
309 #endif
310 pd->robust_head.list = &pd->robust_head;
311 #ifdef __NR_set_robust_list
312 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
313 - offsetof (pthread_mutex_t,
314 __data.__list.__next));
315 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
316 sizeof (struct robust_list_head));
317 if (INTERNAL_SYSCALL_ERROR_P (res, err))
318 #endif
319 set_robust_list_not_avail ();
320
321 #ifndef __ASSUME_PRIVATE_FUTEX
322 /* Private futexes are always used (at least internally) so that
323 doing the test once this early is beneficial. */
324 {
325 int word = 0;
326 word = INTERNAL_SYSCALL (futex, err, 3, &word,
327 FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
328 if (!INTERNAL_SYSCALL_ERROR_P (word, err))
329 THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
330 }
331
332 /* Private futexes have been introduced earlier than the
333 FUTEX_CLOCK_REALTIME flag. We don't have to run the test if we
334 know the former are not supported. This also means we know the
335 kernel will return ENOSYS for unknown operations. */
336 if (THREAD_GETMEM (pd, header.private_futex) != 0)
337 #endif
338 #ifndef __ASSUME_FUTEX_CLOCK_REALTIME
339 {
340 int word = 0;
341 /* NB: the syscall actually takes six parameters. The last is the
342 bit mask. But since we will not actually wait at all the value
343 is irrelevant. Given that passing six parameters is difficult
344 on some architectures we just pass whatever random value the
345 calling convention calls for to the kernel. It causes no harm. */
346 word = INTERNAL_SYSCALL (futex, err, 5, &word,
347 FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
348 | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
349 assert (INTERNAL_SYSCALL_ERROR_P (word, err));
350 if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
351 __set_futex_clock_realtime ();
352 }
353 #endif
354
355 /* Set initial thread's stack block from 0 up to __libc_stack_end.
356 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
357 purposes this is good enough. */
358 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
359
360 /* Initialize the list of all running threads with the main thread. */
361 INIT_LIST_HEAD (&__stack_user);
362 list_add (&pd->list, &__stack_user);
363
364 /* Before initializing __stack_user, the debugger could not find us and
365 had to set __nptl_initial_report_events. Propagate its setting. */
366 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
367
368 /* Install the cancellation signal handler. If for some reason we
369 cannot install the handler we do not abort. Maybe we should, but
370 it is only asynchronous cancellation which is affected. */
371 struct sigaction sa;
372 sa.sa_sigaction = sigcancel_handler;
373 sa.sa_flags = SA_SIGINFO;
374 __sigemptyset (&sa.sa_mask);
375
376 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
377
378 /* Install the handle to change the threads' uid/gid. */
379 sa.sa_sigaction = sighandler_setxid;
380 sa.sa_flags = SA_SIGINFO | SA_RESTART;
381
382 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
383
384 /* The parent process might have left the signals blocked. Just in
385 case, unblock it. We reuse the signal mask in the sigaction
386 structure. It is already cleared. */
387 __sigaddset (&sa.sa_mask, SIGCANCEL);
388 __sigaddset (&sa.sa_mask, SIGSETXID);
389 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
390 NULL, _NSIG / 8);
391
392 /* Get the size of the static and alignment requirements for the TLS
393 block. */
394 size_t static_tls_align;
395 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
396
397 /* Make sure the size takes all the alignments into account. */
398 if (STACK_ALIGN > static_tls_align)
399 static_tls_align = STACK_ALIGN;
400 __static_tls_align_m1 = static_tls_align - 1;
401
402 __static_tls_size = roundup (__static_tls_size, static_tls_align);
403
404 /* Determine the default allowed stack size. This is the size used
405 in case the user does not specify one. */
406 struct rlimit limit;
407 if (getrlimit (RLIMIT_STACK, &limit) != 0
408 || limit.rlim_cur == RLIM_INFINITY)
409 /* The system limit is not usable. Use an architecture-specific
410 default. */
411 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
412 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
413 /* The system limit is unusably small.
414 Use the minimal size acceptable. */
415 limit.rlim_cur = PTHREAD_STACK_MIN;
416
417 /* Make sure it meets the minimum size that allocate_stack
418 (allocatestack.c) will demand, which depends on the page size. */
419 const uintptr_t pagesz = GLRO(dl_pagesize);
420 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
421 if (limit.rlim_cur < minstack)
422 limit.rlim_cur = minstack;
423
424 /* Round the resource limit up to page size. */
425 limit.rlim_cur = (limit.rlim_cur + pagesz - 1) & -pagesz;
426 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
427 __default_pthread_attr.stacksize = limit.rlim_cur;
428 __default_pthread_attr.guardsize = GLRO (dl_pagesize);
429 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
430
431 #ifdef SHARED
432 /* Transfer the old value from the dynamic linker's internal location. */
433 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
434 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
435
436 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
437 keep the lock count from the ld.so implementation. */
438 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
439 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
440 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
441 GL(dl_load_lock).mutex.__data.__count = 0;
442 while (rtld_lock_count-- > 0)
443 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
444
445 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
446 #endif
447
448 GL(dl_init_static_tls) = &__pthread_init_static_tls;
449
450 GL(dl_wait_lookup_done) = &__wait_lookup_done;
451
452 /* Register the fork generation counter with the libc. */
453 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
454 __libc_multiple_threads_ptr =
455 #endif
456 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
457 ptr_pthread_functions);
458
459 /* Determine whether the machine is SMP or not. */
460 __is_smp = is_smp_system ();
461 }
462 strong_alias (__pthread_initialize_minimal_internal,
463 __pthread_initialize_minimal)
464
465
466 size_t
467 __pthread_get_minstack (const pthread_attr_t *attr)
468 {
469 struct pthread_attr *iattr = (struct pthread_attr *) attr;
470
471 return (GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN
472 + iattr->guardsize);
473 }