]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_create.c
nptl_db: Support different libpthread/ld.so load orders (bug 27744)
[thirdparty/glibc.git] / nptl / pthread_create.c
CommitLineData
2b778ceb 1/* Copyright (C) 2002-2021 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6 16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
76a50749 18
fd5bdc09 19#include <ctype.h>
76a50749
UD
20#include <errno.h>
21#include <stdbool.h>
22#include <stdlib.h>
23#include <string.h>
e054f494 24#include <stdint.h>
76a50749
UD
25#include "pthreadP.h"
26#include <hp-timing.h>
27#include <ldsodefs.h>
3e4fc359 28#include <atomic.h>
2098d403 29#include <libc-diag.h>
12d7ca07 30#include <libc-internal.h>
0e9d6240 31#include <resolv.h>
f8de5057 32#include <kernel-features.h>
e0db6517 33#include <exit-thread.h>
f214ff74 34#include <default-sched.h>
a2f0363f 35#include <futex-internal.h>
ebff9c5c 36#include <tls-setup.h>
d2e04918 37#include "libioP.h"
706ad1e7 38#include <sys/single_threaded.h>
76a50749
UD
39
40#include <shlib-compat.h>
41
3a097cc7
RM
42#include <stap-probe.h>
43
76a50749 44
76a50749
UD
45/* Nozero if debugging mode is enabled. */
46int __pthread_debug;
47
48/* Globally enabled events. */
e965d514 49static td_thr_events_t __nptl_threads_events __attribute_used__;
76a50749
UD
50
51/* Pointer to descriptor with the last event. */
e965d514 52static struct pthread *__nptl_last_event __attribute_used__;
76a50749 53
a64afc22
FW
54#ifdef SHARED
55/* This variable is used to access _rtld_global from libthread_db. If
56 GDB loads libpthread before ld.so, it is not possible to resolve
57 _rtld_global directly during libpthread initialization. */
58static struct rtld_global *__nptl_rtld_global __attribute_used__
59 = &_rtld_global;
60#endif
61
47202270
UD
62/* Number of threads running. */
63unsigned int __nptl_nthreads = 1;
64
76a50749
UD
65
66/* Code to allocate and deallocate a stack. */
76a50749
UD
67#include "allocatestack.c"
68
f8bf15fe
CD
69/* CONCURRENCY NOTES:
70
71 Understanding who is the owner of the 'struct pthread' or 'PD'
72 (refers to the value of the 'struct pthread *pd' function argument)
73 is critically important in determining exactly which operations are
74 allowed and which are not and when, particularly when it comes to the
75 implementation of pthread_create, pthread_join, pthread_detach, and
76 other functions which all operate on PD.
77
78 The owner of PD is responsible for freeing the final resources
79 associated with PD, and may examine the memory underlying PD at any
80 point in time until it frees it back to the OS or to reuse by the
81 runtime.
82
83 The thread which calls pthread_create is called the creating thread.
84 The creating thread begins as the owner of PD.
85
86 During startup the new thread may examine PD in coordination with the
87 owner thread (which may be itself).
88
89 The four cases of ownership transfer are:
90
91 (1) Ownership of PD is released to the process (all threads may use it)
92 after the new thread starts in a joinable state
93 i.e. pthread_create returns a usable pthread_t.
94
95 (2) Ownership of PD is released to the new thread starting in a detached
96 state.
97
98 (3) Ownership of PD is dynamically released to a running thread via
99 pthread_detach.
100
101 (4) Ownership of PD is acquired by the thread which calls pthread_join.
102
103 Implementation notes:
104
105 The PD->stopped_start and thread_ran variables are used to determine
106 exactly which of the four ownership states we are in and therefore
107 what actions can be taken. For example after (2) we cannot read or
108 write from PD anymore since the thread may no longer exist and the
fa17b9c7
CD
109 memory may be unmapped.
110
111 It is important to point out that PD->lock is being used both
112 similar to a one-shot semaphore and subsequently as a mutex. The
113 lock is taken in the parent to force the child to wait, and then the
114 child releases the lock. However, this semaphore-like effect is used
115 only for synchronizing the parent and child. After startup the lock
116 is used like a mutex to create a critical section during which a
117 single owner modifies the thread parameters.
118
119 The most complicated cases happen during thread startup:
f8bf15fe
CD
120
121 (a) If the created thread is in a detached (PTHREAD_CREATE_DETACHED),
122 or joinable (default PTHREAD_CREATE_JOINABLE) state and
123 STOPPED_START is true, then the creating thread has ownership of
124 PD until the PD->lock is released by pthread_create. If any
125 errors occur we are in states (c), (d), or (e) below.
126
127 (b) If the created thread is in a detached state
128 (PTHREAD_CREATED_DETACHED), and STOPPED_START is false, then the
129 creating thread has ownership of PD until it invokes the OS
130 kernel's thread creation routine. If this routine returns
131 without error, then the created thread owns PD; otherwise, see
132 (c) and (e) below.
133
134 (c) If the detached thread setup failed and THREAD_RAN is true, then
135 the creating thread releases ownership to the new thread by
136 sending a cancellation signal. All threads set THREAD_RAN to
137 true as quickly as possible after returning from the OS kernel's
138 thread creation routine.
139
140 (d) If the joinable thread setup failed and THREAD_RAN is true, then
141 then the creating thread retains ownership of PD and must cleanup
142 state. Ownership cannot be released to the process via the
143 return of pthread_create since a non-zero result entails PD is
144 undefined and therefore cannot be joined to free the resources.
145 We privately call pthread_join on the thread to finish handling
146 the resource shutdown (Or at least we should, see bug 19511).
147
148 (e) If the thread creation failed and THREAD_RAN is false, then the
149 creating thread retains ownership of PD and must cleanup state.
150 No waiting for the new thread is required because it never
151 started.
152
153 The nptl_db interface:
154
155 The interface with nptl_db requires that we enqueue PD into a linked
156 list and then call a function which the debugger will trap. The PD
157 will then be dequeued and control returned to the thread. The caller
158 at the time must have ownership of PD and such ownership remains
159 after control returns to thread. The enqueued PD is removed from the
160 linked list by the nptl_db callback td_thr_event_getmsg. The debugger
161 must ensure that the thread does not resume execution, otherwise
162 ownership of PD may be lost and examining PD will not be possible.
163
164 Note that the GNU Debugger as of (December 10th 2015) commit
165 c2c2a31fdb228d41ce3db62b268efea04bd39c18 no longer uses
166 td_thr_event_getmsg and several other related nptl_db interfaces. The
167 principal reason for this is that nptl_db does not support non-stop
168 mode where other threads can run concurrently and modify runtime
169 structures currently in use by the debugger and the nptl_db
170 interface.
171
172 Axioms:
173
174 * The create_thread function can never set stopped_start to false.
175 * The created thread can read stopped_start but never write to it.
176 * The variable thread_ran is set some time after the OS thread
177 creation routine returns, how much time after the thread is created
178 is unspecified, but it should be as quickly as possible.
179
180*/
181
182/* CREATE THREAD NOTES:
183
184 createthread.c defines the create_thread function, and two macros:
32fed10f
RM
185 START_THREAD_DEFN and START_THREAD_SELF (see below).
186
f8bf15fe
CD
187 create_thread must initialize PD->stopped_start. It should be true
188 if the STOPPED_START parameter is true, or if create_thread needs the
189 new thread to synchronize at startup for some other implementation
190 reason. If STOPPED_START will be true, then create_thread is obliged
191 to lock PD->lock before starting the thread. Then pthread_create
192 unlocks PD->lock which synchronizes-with START_THREAD_DEFN in the
193 child thread which does an acquire/release of PD->lock as the last
194 action before calling the user entry point. The goal of all of this
195 is to ensure that the required initial thread attributes are applied
196 (by the creating thread) before the new thread runs user code. Note
197 that the the functions pthread_getschedparam, pthread_setschedparam,
198 pthread_setschedprio, __pthread_tpp_change_priority, and
199 __pthread_current_priority reuse the same lock, PD->lock, for a
200 similar purpose e.g. synchronizing the setting of similar thread
201 attributes. These functions are never called before the thread is
202 created, so don't participate in startup syncronization, but given
203 that the lock is present already and in the unlocked state, reusing
204 it saves space.
32fed10f
RM
205
206 The return value is zero for success or an errno code for failure.
207 If the return value is ENOMEM, that will be translated to EAGAIN,
208 so create_thread need not do that. On failure, *THREAD_RAN should
209 be set to true iff the thread actually started up and then got
f8bf15fe 210 canceled before calling user code (*PD->start_routine). */
32fed10f 211static int create_thread (struct pthread *pd, const struct pthread_attr *attr,
f8bf15fe 212 bool *stopped_start, STACK_VARIABLES_PARMS,
32fed10f
RM
213 bool *thread_ran);
214
8dea90aa 215#include <createthread.c>
76a50749
UD
216
217
76a50749 218struct pthread *
9dd346ff 219__find_in_stack_list (struct pthread *pd)
76a50749
UD
220{
221 list_t *entry;
222 struct pthread *result = NULL;
223
1daccf40 224 lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
76a50749 225
1daccf40 226 list_for_each (entry, &GL (dl_stack_used))
76a50749
UD
227 {
228 struct pthread *curp;
229
d4f64e1a 230 curp = list_entry (entry, struct pthread, list);
76a50749
UD
231 if (curp == pd)
232 {
233 result = curp;
234 break;
235 }
236 }
237
238 if (result == NULL)
1daccf40 239 list_for_each (entry, &GL (dl_stack_user))
76a50749
UD
240 {
241 struct pthread *curp;
242
d4f64e1a 243 curp = list_entry (entry, struct pthread, list);
76a50749
UD
244 if (curp == pd)
245 {
246 result = curp;
247 break;
248 }
249 }
250
1daccf40 251 lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
76a50749
UD
252
253 return result;
254}
255
256
257/* Deallocate POSIX thread-local-storage. */
3fa21fd8
UD
258void
259attribute_hidden
260__nptl_deallocate_tsd (void)
76a50749 261{
877e51b2
UD
262 struct pthread *self = THREAD_SELF;
263
76a50749
UD
264 /* Maybe no data was ever allocated. This happens often so we have
265 a flag for this. */
877e51b2 266 if (THREAD_GETMEM (self, specific_used))
76a50749
UD
267 {
268 size_t round;
6b4686a5 269 size_t cnt;
76a50749 270
6b4686a5
UD
271 round = 0;
272 do
76a50749 273 {
76a50749
UD
274 size_t idx;
275
c5acd3d7 276 /* So far no new nonzero data entry. */
877e51b2 277 THREAD_SETMEM (self, specific_used, false);
c5acd3d7 278
76a50749 279 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
6b4686a5
UD
280 {
281 struct pthread_key_data *level2;
282
877e51b2 283 level2 = THREAD_GETMEM_NC (self, specific, cnt);
6b4686a5
UD
284
285 if (level2 != NULL)
286 {
287 size_t inner;
288
289 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
290 ++inner, ++idx)
291 {
292 void *data = level2[inner].data;
293
877e51b2
UD
294 if (data != NULL)
295 {
296 /* Always clear the data. */
297 level2[inner].data = NULL;
298
6b4686a5
UD
299 /* Make sure the data corresponds to a valid
300 key. This test fails if the key was
301 deallocated and also if it was
302 re-allocated. It is the user's
303 responsibility to free the memory in this
304 case. */
877e51b2
UD
305 if (level2[inner].seq
306 == __pthread_keys[idx].seq
307 /* It is not necessary to register a destructor
308 function. */
309 && __pthread_keys[idx].destr != NULL)
310 /* Call the user-provided destructor. */
311 __pthread_keys[idx].destr (data);
6b4686a5
UD
312 }
313 }
314 }
315 else
316 idx += PTHREAD_KEY_1STLEVEL_SIZE;
317 }
877e51b2
UD
318
319 if (THREAD_GETMEM (self, specific_used) == 0)
320 /* No data has been modified. */
321 goto just_free;
76a50749 322 }
877e51b2
UD
323 /* We only repeat the process a fixed number of times. */
324 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
76a50749 325
877e51b2
UD
326 /* Just clear the memory of the first block for reuse. */
327 memset (&THREAD_SELF->specific_1stblock, '\0',
328 sizeof (self->specific_1stblock));
6b4686a5 329
877e51b2 330 just_free:
6b4686a5
UD
331 /* Free the memory for the other blocks. */
332 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
333 {
334 struct pthread_key_data *level2;
335
877e51b2 336 level2 = THREAD_GETMEM_NC (self, specific, cnt);
6b4686a5
UD
337 if (level2 != NULL)
338 {
339 /* The first block is allocated as part of the thread
340 descriptor. */
341 free (level2);
877e51b2 342 THREAD_SETMEM_NC (self, specific, cnt, NULL);
6b4686a5
UD
343 }
344 }
345
877e51b2 346 THREAD_SETMEM (self, specific_used, false);
76a50749
UD
347 }
348}
349
350
351/* Deallocate a thread's stack after optionally making sure the thread
352 descriptor is still valid. */
353void
354__free_tcb (struct pthread *pd)
355{
356 /* The thread is exiting now. */
ba25bb0f
UD
357 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
358 TERMINATED_BIT) == 0, 1))
76a50749
UD
359 {
360 /* Remove the descriptor from the list. */
361 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
362 /* Something is really wrong. The descriptor for a still
363 running thread is gone. */
364 abort ();
365
f17efcb4 366 /* Free TPP data. */
a1ffb40e 367 if (__glibc_unlikely (pd->tpp != NULL))
f17efcb4
UD
368 {
369 struct priority_protection_data *tpp = pd->tpp;
370
371 pd->tpp = NULL;
372 free (tpp);
373 }
374
76a50749
UD
375 /* Queue the stack memory block for reuse and exit the process. The
376 kernel will signal via writing to the address returned by
377 QUEUE-STACK when the stack is available. */
378 __deallocate_stack (pd);
379 }
380}
381
32fed10f
RM
382/* Local function to start thread and handle cleanup.
383 createthread.c defines the macro START_THREAD_DEFN to the
384 declaration that its create_thread function will refer to, and
385 START_THREAD_SELF to the expression to optimally deliver the new
386 thread's THREAD_SELF value. */
387START_THREAD_DEFN
76a50749 388{
32fed10f 389 struct pthread *pd = START_THREAD_SELF;
76a50749 390
0e9d6240
UD
391 /* Initialize resolver state pointer. */
392 __resp = &pd->res;
393
fd5bdc09
UD
394 /* Initialize pointers to locale data. */
395 __ctype_init ();
396
b03604b1 397#ifndef __ASSUME_SET_ROBUST_LIST
0f6699ea 398 if (__set_robust_list_avail >= 0)
b03604b1 399#endif
0f6699ea 400 {
0f6699ea
UD
401 /* This call should never fail because the initial call in init.c
402 succeeded. */
bc2eb932
AZ
403 INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head,
404 sizeof (struct robust_list_head));
0f6699ea 405 }
0f6699ea 406
76a50749
UD
407 /* This is where the try/finally block should be created. For
408 compilers without that support we do use setjmp. */
877e51b2
UD
409 struct pthread_unwind_buf unwind_buf;
410
d6cc1829 411 int not_first_call;
2098d403
JM
412 DIAG_PUSH_NEEDS_COMMENT;
413#if __GNUC_PREREQ (7, 0)
414 /* This call results in a -Wstringop-overflow warning because struct
415 pthread_unwind_buf is smaller than jmp_buf. setjmp and longjmp
416 do not use anything beyond the common prefix (they never access
417 the saved signal mask), so that is a false positive. */
418 DIAG_IGNORE_NEEDS_COMMENT (11, "-Wstringop-overflow=");
419#endif
d6cc1829 420 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
2098d403 421 DIAG_POP_NEEDS_COMMENT;
d6cc1829
L
422
423 /* No previous handlers. NB: This must be done after setjmp since the
424 private space in the unwind jump buffer may overlap space used by
425 setjmp to store extra architecture-specific information which is
426 never used by the cancellation-specific __libc_unwind_longjmp.
427
428 The private space is allowed to overlap because the unwinder never
429 has to return through any of the jumped-to call frames, and thus
430 only a minimum amount of saved data need be stored, and for example,
431 need not include the process signal mask information. This is all
432 an optimization to reduce stack usage when pushing cancellation
433 handlers. */
877e51b2
UD
434 unwind_buf.priv.data.prev = NULL;
435 unwind_buf.priv.data.cleanup = NULL;
436
b3cae39d
FW
437 __libc_signal_restore_set (&pd->sigmask);
438
439 /* Allow setxid from now onwards. */
440 if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
441 futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
442
a1ffb40e 443 if (__glibc_likely (! not_first_call))
76a50749 444 {
877e51b2
UD
445 /* Store the new cleanup handler info. */
446 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
447
f8bf15fe
CD
448 /* We are either in (a) or (b), and in either case we either own
449 PD already (2) or are about to own PD (1), and so our only
450 restriction would be that we can't free PD until we know we
451 have ownership (see CONCURRENCY NOTES above). */
a1ffb40e 452 if (__glibc_unlikely (pd->stopped_start))
5f66b766
UD
453 {
454 int oldtype = CANCEL_ASYNC ();
362038b0 455
5f66b766 456 /* Get the lock the parent locked to force synchronization. */
e51deae7 457 lll_lock (pd->lock, LLL_PRIVATE);
f8bf15fe
CD
458
459 /* We have ownership of PD now. */
460
5f66b766 461 /* And give it up right away. */
e51deae7 462 lll_unlock (pd->lock, LLL_PRIVATE);
362038b0 463
5f66b766
UD
464 CANCEL_RESET (oldtype);
465 }
362038b0 466
3a097cc7
RM
467 LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg);
468
76a50749 469 /* Run the code the user provided. */
ce7528f6
AZ
470 void *ret;
471 if (pd->c11)
472 {
473 /* The function pointer of the c11 thread start is cast to an incorrect
474 type on __pthread_create_2_1 call, however it is casted back to correct
475 one so the call behavior is well-defined (it is assumed that pointers
476 to void are able to represent all values of int. */
477 int (*start)(void*) = (int (*) (void*)) pd->start_routine;
478 ret = (void*) (uintptr_t) start (pd->arg);
479 }
480 else
481 ret = pd->start_routine (pd->arg);
482 THREAD_SETMEM (pd, result, ret);
76a50749
UD
483 }
484
ba384f6e 485 /* Call destructors for the thread_local TLS variables. */
e57b0c61
RM
486#ifndef SHARED
487 if (&__call_tls_dtors != NULL)
488#endif
489 __call_tls_dtors ();
ba384f6e 490
6b4686a5 491 /* Run the destructor for the thread-local data. */
3fa21fd8 492 __nptl_deallocate_tsd ();
6b4686a5 493
12d7ca07
RM
494 /* Clean up any state libc stored in thread-local variables. */
495 __libc_thread_freeres ();
76a50749 496
47202270
UD
497 /* If this is the last thread we terminate the process now. We
498 do not notify the debugger, it might just irritate it if there
499 is no thread left. */
a1ffb40e 500 if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads)))
47202270
UD
501 /* This was the last thread. */
502 exit (0);
503
76a50749 504 /* Report the death of the thread if this is wanted. */
a1ffb40e 505 if (__glibc_unlikely (pd->report_events))
76a50749
UD
506 {
507 /* See whether TD_DEATH is in any of the mask. */
508 const int idx = __td_eventword (TD_DEATH);
509 const uint32_t mask = __td_eventmask (TD_DEATH);
510
511 if ((mask & (__nptl_threads_events.event_bits[idx]
512 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
513 {
514 /* Yep, we have to signal the death. Add the descriptor to
515 the list but only if it is not already on it. */
516 if (pd->nextevent == NULL)
517 {
518 pd->eventbuf.eventnum = TD_DEATH;
519 pd->eventbuf.eventdata = pd;
520
521 do
522 pd->nextevent = __nptl_last_event;
5a3ab2fc
UD
523 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
524 pd, pd->nextevent));
76a50749
UD
525 }
526
f8bf15fe
CD
527 /* Now call the function which signals the event. See
528 CONCURRENCY NOTES for the nptl_db interface comments. */
76a50749
UD
529 __nptl_death_event ();
530 }
531 }
532
6461e577
RM
533 /* The thread is exiting now. Don't set this bit until after we've hit
534 the event-reporting breakpoint, so that td_thr_get_info on us while at
535 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
536 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
76a50749 537
0f6699ea 538#ifndef __ASSUME_SET_ROBUST_LIST
1bcfb5a5 539 /* If this thread has any robust mutexes locked, handle them now. */
06be6368 540# if __PTHREAD_MUTEX_HAVE_PREV
0f6699ea
UD
541 void *robust = pd->robust_head.list;
542# else
b007ce7c 543 __pthread_slist_t *robust = pd->robust_list.__next;
0f6699ea 544# endif
df47504c
UD
545 /* We let the kernel do the notification if it is able to do so.
546 If we have to do it here there for sure are no PI mutexes involved
547 since the kernel support for them is even more recent. */
0f6699ea 548 if (__set_robust_list_avail < 0
df47504c 549 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
1bcfb5a5
UD
550 {
551 do
552 {
b007ce7c 553 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
0f6699ea
UD
554 ((char *) robust - offsetof (struct __pthread_mutex_s,
555 __list.__next));
556 robust = *((void **) robust);
d804f5df 557
06be6368 558# if __PTHREAD_MUTEX_HAVE_PREV
b007ce7c 559 this->__list.__prev = NULL;
0f6699ea
UD
560# endif
561 this->__list.__next = NULL;
1bcfb5a5 562
c0c6bac9 563 atomic_or (&this->__lock, FUTEX_OWNER_DIED);
a2f0363f
TR
564 futex_wake ((unsigned int *) &this->__lock, 1,
565 /* XYZ */ FUTEX_SHARED);
1bcfb5a5 566 }
df47504c 567 while (robust != (void *) &pd->robust_head);
1bcfb5a5 568 }
0f6699ea 569#endif
1bcfb5a5 570
08794225
SN
571 if (!pd->user_stack)
572 advise_stack_range (pd->stackblock, pd->stackblock_size, (uintptr_t) pd,
573 pd->guardsize);
b42a214c 574
4cab20fa 575 if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK))
dff9a7a1
UD
576 {
577 /* Some other thread might call any of the setXid functions and expect
578 us to reply. In this case wait until we did that. */
579 do
a2f0363f
TR
580 /* XXX This differs from the typical futex_wait_simple pattern in that
581 the futex_wait condition (setxid_futex) is different from the
582 condition used in the surrounding loop (cancelhandling). We need
583 to check and document why this is correct. */
584 futex_wait_simple (&pd->setxid_futex, 0, FUTEX_PRIVATE);
dff9a7a1
UD
585 while (pd->cancelhandling & SETXID_BITMASK);
586
587 /* Reset the value so that the stack can be reused. */
588 pd->setxid_futex = 0;
589 }
76a50749 590
4cab20fa
AS
591 /* If the thread is detached free the TCB. */
592 if (IS_DETACHED (pd))
593 /* Free the TCB. */
594 __free_tcb (pd);
595
76a50749
UD
596 /* We cannot call '_exit' here. '_exit' will terminate the process.
597
598 The 'exit' implementation in the kernel will signal when the
adcdc775 599 process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID
76a50749
UD
600 flag. The 'tid' field in the TCB will be set to zero.
601
602 The exit code is zero since in case all threads exit by calling
603 'pthread_exit' the exit status must be 0 (zero). */
e0db6517 604 __exit_thread ();
76a50749
UD
605
606 /* NOTREACHED */
32fed10f
RM
607}
608
609
610/* Return true iff obliged to report TD_CREATE events. */
611static bool
612report_thread_creation (struct pthread *pd)
613{
614 if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF, report_events)))
615 {
616 /* The parent thread is supposed to report events.
617 Check whether the TD_CREATE event is needed, too. */
618 const size_t idx = __td_eventword (TD_CREATE);
619 const uint32_t mask = __td_eventmask (TD_CREATE);
620
621 return ((mask & (__nptl_threads_events.event_bits[idx]
622 | pd->eventbuf.eventmask.event_bits[idx])) != 0);
623 }
624 return false;
76a50749
UD
625}
626
627
76a50749 628int
80d9be81
JM
629__pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr,
630 void *(*start_routine) (void *), void *arg)
76a50749
UD
631{
632 STACK_VARIABLES;
76a50749 633
706ad1e7
FW
634 /* Avoid a data race in the multi-threaded case. */
635 if (__libc_single_threaded)
636 __libc_single_threaded = 0;
637
1e6da2b0 638 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
c2322a56 639 union pthread_attr_transparent default_attr;
8111c457 640 bool destroy_default_attr = false;
ce7528f6
AZ
641 bool c11 = (attr == ATTR_C11_THREAD);
642 if (iattr == NULL || c11)
61dd6208 643 {
c2322a56 644 int ret = __pthread_getattr_default_np (&default_attr.external);
8111c457
FW
645 if (ret != 0)
646 return ret;
647 destroy_default_attr = true;
c2322a56 648 iattr = &default_attr.internal;
61dd6208 649 }
76a50749 650
dff9a7a1 651 struct pthread *pd = NULL;
1e6da2b0 652 int err = ALLOCATE_STACK (iattr, &pd);
61dd6208
SP
653 int retval = 0;
654
a1ffb40e 655 if (__glibc_unlikely (err != 0))
76a50749 656 /* Something went wrong. Maybe a parameter of the attributes is
e988dba9
JL
657 invalid or we could not allocate memory. Note we have to
658 translate error codes. */
61dd6208
SP
659 {
660 retval = err == ENOMEM ? EAGAIN : err;
661 goto out;
662 }
76a50749
UD
663
664
665 /* Initialize the TCB. All initializations with zero should be
666 performed in 'get_cached_stack'. This way we avoid doing this if
667 the stack freshly allocated with 'mmap'. */
668
d7329d4b 669#if TLS_TCB_AT_TP
76a50749 670 /* Reference to the TCB itself. */
55c11fbd 671 pd->header.self = pd;
76a50749 672
d4f64e1a 673 /* Self-reference for TLS. */
55c11fbd 674 pd->header.tcb = pd;
76a50749
UD
675#endif
676
677 /* Store the address of the start routine and the parameter. Since
678 we do not start the function directly the stillborn thread will
679 get the information from its thread descriptor. */
680 pd->start_routine = start_routine;
681 pd->arg = arg;
ce7528f6 682 pd->c11 = c11;
76a50749
UD
683
684 /* Copy the thread attribute flags. */
14ffbc83
UD
685 struct pthread *self = THREAD_SELF;
686 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
687 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
76a50749
UD
688
689 /* Initialize the field for the ID of the thread which is waiting
690 for us. This is a self-reference in case the thread is created
691 detached. */
692 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
693
694 /* The debug events are inherited from the parent. */
14ffbc83
UD
695 pd->eventbuf = self->eventbuf;
696
76a50749 697
14ffbc83
UD
698 /* Copy the parent's scheduling parameters. The flags will say what
699 is valid and what is not. */
700 pd->schedpolicy = self->schedpolicy;
701 pd->schedparam = self->schedparam;
76a50749 702
35f1e827
UD
703 /* Copy the stack guard canary. */
704#ifdef THREAD_COPY_STACK_GUARD
705 THREAD_COPY_STACK_GUARD (pd);
706#endif
707
827b7087
UD
708 /* Copy the pointer guard value. */
709#ifdef THREAD_COPY_POINTER_GUARD
710 THREAD_COPY_POINTER_GUARD (pd);
711#endif
712
ebff9c5c
L
713 /* Setup tcbhead. */
714 tls_setup_tcbhead (pd);
715
32fed10f
RM
716 /* Verify the sysinfo bits were copied in allocate_stack if needed. */
717#ifdef NEED_DL_SYSINFO
718 CHECK_THREAD_SYSINFO (pd);
719#endif
720
14ffbc83 721 /* Determine scheduling parameters for the thread. */
61dd6208 722 if (__builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
14ffbc83 723 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
76a50749 724 {
14ffbc83
UD
725 /* Use the scheduling parameters the user provided. */
726 if (iattr->flags & ATTR_FLAG_POLICY_SET)
33cd1f74
RM
727 {
728 pd->schedpolicy = iattr->schedpolicy;
729 pd->flags |= ATTR_FLAG_POLICY_SET;
730 }
14ffbc83 731 if (iattr->flags & ATTR_FLAG_SCHED_SET)
33cd1f74
RM
732 {
733 /* The values were validated in pthread_attr_setschedparam. */
734 pd->schedparam = iattr->schedparam;
735 pd->flags |= ATTR_FLAG_SCHED_SET;
736 }
f214ff74
RM
737
738 if ((pd->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
739 != (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
740 collect_default_sched (pd);
76a50749
UD
741 }
742
d2e04918
SN
743 if (__glibc_unlikely (__nptl_nthreads == 1))
744 _IO_enable_locks ();
745
76a50749
UD
746 /* Pass the descriptor to the caller. */
747 *newthread = (pthread_t) pd;
748
5acf7263
RM
749 LIBC_PROBE (pthread_create, 4, newthread, attr, start_routine, arg);
750
32fed10f
RM
751 /* One more thread. We cannot have the thread do this itself, since it
752 might exist but not have been scheduled yet by the time we've returned
753 and need to check the value to behave correctly. We must do it before
754 creating the thread, in case it does get scheduled first and then
755 might mistakenly think it was the only thread. In the failure case,
756 we momentarily store a false value; this doesn't matter because there
757 is no kosher thing a signal handler interrupting us right here can do
758 that cares whether the thread count is correct. */
759 atomic_increment (&__nptl_nthreads);
760
f8bf15fe
CD
761 /* Our local value of stopped_start and thread_ran can be accessed at
762 any time. The PD->stopped_start may only be accessed if we have
763 ownership of PD (see CONCURRENCY NOTES above). */
764 bool stopped_start = false; bool thread_ran = false;
32fed10f 765
b3cae39d
FW
766 /* Block all signals, so that the new thread starts out with
767 signals disabled. This avoids race conditions in the thread
768 startup. */
769 sigset_t original_sigmask;
770 __libc_signal_block_all (&original_sigmask);
771
ec41af45
FW
772 if (iattr->extension != NULL && iattr->extension->sigmask_set)
773 /* Use the signal mask in the attribute. The internal signals
774 have already been filtered by the public
775 pthread_attr_setsigmask_np interface. */
776 pd->sigmask = iattr->extension->sigmask;
777 else
778 {
779 /* Conceptually, the new thread needs to inherit the signal mask
780 of this thread. Therefore, it needs to restore the saved
781 signal mask of this thread, so save it in the startup
782 information. */
783 pd->sigmask = original_sigmask;
784
785 /* Reset the cancellation signal mask in case this thread is
786 running cancellation. */
787 __sigdelset (&pd->sigmask, SIGCANCEL);
788 }
b3cae39d 789
76a50749 790 /* Start the thread. */
32fed10f
RM
791 if (__glibc_unlikely (report_thread_creation (pd)))
792 {
f8bf15fe
CD
793 stopped_start = true;
794
795 /* We always create the thread stopped at startup so we can
796 notify the debugger. */
797 retval = create_thread (pd, iattr, &stopped_start,
798 STACK_VARIABLES_ARGS, &thread_ran);
32fed10f
RM
799 if (retval == 0)
800 {
f8bf15fe
CD
801 /* We retain ownership of PD until (a) (see CONCURRENCY NOTES
802 above). */
803
804 /* Assert stopped_start is true in both our local copy and the
805 PD copy. */
806 assert (stopped_start);
32fed10f
RM
807 assert (pd->stopped_start);
808
809 /* Now fill in the information about the new thread in
810 the newly created thread's data structure. We cannot let
811 the new thread do this since we don't know whether it was
812 already scheduled when we send the event. */
813 pd->eventbuf.eventnum = TD_CREATE;
814 pd->eventbuf.eventdata = pd;
815
816 /* Enqueue the descriptor. */
817 do
818 pd->nextevent = __nptl_last_event;
819 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
820 pd, pd->nextevent)
821 != 0);
822
f8bf15fe
CD
823 /* Now call the function which signals the event. See
824 CONCURRENCY NOTES for the nptl_db interface comments. */
32fed10f
RM
825 __nptl_create_event ();
826 }
827 }
828 else
f8bf15fe
CD
829 retval = create_thread (pd, iattr, &stopped_start,
830 STACK_VARIABLES_ARGS, &thread_ran);
32fed10f 831
b3cae39d
FW
832 /* Return to the previous signal mask, after creating the new
833 thread. */
834 __libc_signal_restore_set (&original_sigmask);
835
32fed10f
RM
836 if (__glibc_unlikely (retval != 0))
837 {
32fed10f 838 if (thread_ran)
f8bf15fe
CD
839 /* State (c) or (d) and we may not have PD ownership (see
840 CONCURRENCY NOTES above). We can assert that STOPPED_START
841 must have been true because thread creation didn't fail, but
842 thread attribute setting did. */
843 /* See bug 19511 which explains why doing nothing here is a
844 resource leak for a joinable thread. */
845 assert (stopped_start);
32fed10f
RM
846 else
847 {
f8bf15fe
CD
848 /* State (e) and we have ownership of PD (see CONCURRENCY
849 NOTES above). */
850
32fed10f
RM
851 /* Oops, we lied for a second. */
852 atomic_decrement (&__nptl_nthreads);
853
854 /* Perhaps a thread wants to change the IDs and is waiting for this
855 stillborn thread. */
856 if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0)
857 == -2))
a2f0363f 858 futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
32fed10f
RM
859
860 /* Free the resources. */
861 __deallocate_stack (pd);
862 }
863
864 /* We have to translate error codes. */
865 if (retval == ENOMEM)
866 retval = EAGAIN;
867 }
868 else
869 {
f8bf15fe
CD
870 /* We don't know if we have PD ownership. Once we check the local
871 stopped_start we'll know if we're in state (a) or (b) (see
872 CONCURRENCY NOTES above). */
873 if (stopped_start)
874 /* State (a), we own PD. The thread blocked on this lock either
875 because we're doing TD_CREATE event reporting, or for some
876 other reason that create_thread chose. Now let it run
877 free. */
32fed10f
RM
878 lll_unlock (pd->lock, LLL_PRIVATE);
879
880 /* We now have for sure more than one thread. The main thread might
881 not yet have the flag set. No need to set the global variable
882 again if this is what we use. */
883 THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
884 }
61dd6208
SP
885
886 out:
8111c457 887 if (destroy_default_attr)
c2322a56 888 __pthread_attr_destroy (&default_attr.external);
61dd6208
SP
889
890 return retval;
76a50749
UD
891}
892versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
893
894
895#if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
896int
80d9be81
JM
897__pthread_create_2_0 (pthread_t *newthread, const pthread_attr_t *attr,
898 void *(*start_routine) (void *), void *arg)
76a50749
UD
899{
900 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
901 the old size and access to the new members might crash the program.
902 We convert the struct now. */
903 struct pthread_attr new_attr;
904
905 if (attr != NULL)
906 {
907 struct pthread_attr *iattr = (struct pthread_attr *) attr;
908 size_t ps = __getpagesize ();
909
910 /* Copy values from the user-provided attributes. */
911 new_attr.schedparam = iattr->schedparam;
912 new_attr.schedpolicy = iattr->schedpolicy;
913 new_attr.flags = iattr->flags;
914
915 /* Fill in default values for the fields not present in the old
916 implementation. */
917 new_attr.guardsize = ps;
918 new_attr.stackaddr = NULL;
919 new_attr.stacksize = 0;
7538d461 920 new_attr.extension = NULL;
76a50749
UD
921
922 /* We will pass this value on to the real implementation. */
923 attr = (pthread_attr_t *) &new_attr;
924 }
925
926 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
927}
928compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
929 GLIBC_2_0);
930#endif
7f08f55a
RM
931\f
932/* Information for libthread_db. */
933
934#include "../nptl_db/db_info.c"
b639d0c9
UD
935\f
936/* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
937 functions to be present as well. */
fa872e1b
AZ
938PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_lock)
939PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_trylock)
940PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_unlock)
b639d0c9 941
fa872e1b
AZ
942PTHREAD_STATIC_FN_REQUIRE (__pthread_once)
943PTHREAD_STATIC_FN_REQUIRE (__pthread_cancel)
b639d0c9 944
fa872e1b
AZ
945PTHREAD_STATIC_FN_REQUIRE (__pthread_key_create)
946PTHREAD_STATIC_FN_REQUIRE (__pthread_key_delete)
947PTHREAD_STATIC_FN_REQUIRE (__pthread_setspecific)
948PTHREAD_STATIC_FN_REQUIRE (__pthread_getspecific)