]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_create.c
Remove "Contributed by" lines
[thirdparty/glibc.git] / nptl / pthread_create.c
CommitLineData
2b778ceb 1/* Copyright (C) 2002-2021 Free Software Foundation, Inc.
76a50749 2 This file is part of the GNU C Library.
76a50749
UD
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
59ba27a6 15 License along with the GNU C Library; if not, see
5a82c748 16 <https://www.gnu.org/licenses/>. */
76a50749 17
fd5bdc09 18#include <ctype.h>
76a50749
UD
19#include <errno.h>
20#include <stdbool.h>
21#include <stdlib.h>
22#include <string.h>
e054f494 23#include <stdint.h>
76a50749
UD
24#include "pthreadP.h"
25#include <hp-timing.h>
26#include <ldsodefs.h>
3e4fc359 27#include <atomic.h>
2098d403 28#include <libc-diag.h>
12d7ca07 29#include <libc-internal.h>
0e9d6240 30#include <resolv.h>
f8de5057 31#include <kernel-features.h>
f214ff74 32#include <default-sched.h>
a2f0363f 33#include <futex-internal.h>
ebff9c5c 34#include <tls-setup.h>
d2e04918 35#include "libioP.h"
706ad1e7 36#include <sys/single_threaded.h>
b8cdc3bb 37#include <version.h>
d8ea0d01 38#include <clone_internal.h>
76a50749
UD
39
40#include <shlib-compat.h>
41
3a097cc7
RM
42#include <stap-probe.h>
43
76a50749 44
76a50749 45/* Globally enabled events. */
7c241325 46td_thr_events_t __nptl_threads_events;
fef400a2
FW
47libc_hidden_proto (__nptl_threads_events)
48libc_hidden_data_def (__nptl_threads_events)
76a50749
UD
49
50/* Pointer to descriptor with the last event. */
7c241325 51struct pthread *__nptl_last_event;
fef400a2
FW
52libc_hidden_proto (__nptl_last_event)
53libc_hidden_data_def (__nptl_last_event)
76a50749 54
a64afc22
FW
55#ifdef SHARED
56/* This variable is used to access _rtld_global from libthread_db. If
57 GDB loads libpthread before ld.so, it is not possible to resolve
58 _rtld_global directly during libpthread initialization. */
fef400a2 59struct rtld_global *__nptl_rtld_global = &_rtld_global;
a64afc22
FW
60#endif
61
b8cdc3bb 62/* Version of the library, used in libthread_db to detect mismatches. */
fef400a2 63const char __nptl_version[] = VERSION;
b8cdc3bb 64
2f69522d
FW
65/* This performs the initialization necessary when going from
66 single-threaded to multi-threaded mode for the first time. */
67static void
68late_init (void)
69{
70 struct sigaction sa;
71 __sigemptyset (&sa.sa_mask);
72
76b0c59e
FW
73 /* Install the handle to change the threads' uid/gid. Use
74 SA_ONSTACK because the signal may be sent to threads that are
75 running with custom stacks. (This is less likely for
76 SIGCANCEL.) */
2f69522d 77 sa.sa_sigaction = __nptl_setxid_sighandler;
76b0c59e 78 sa.sa_flags = SA_ONSTACK | SA_SIGINFO | SA_RESTART;
2f69522d
FW
79 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
80
81 /* The parent process might have left the signals blocked. Just in
82 case, unblock it. We reuse the signal mask in the sigaction
83 structure. It is already cleared. */
84 __sigaddset (&sa.sa_mask, SIGCANCEL);
85 __sigaddset (&sa.sa_mask, SIGSETXID);
86 INTERNAL_SYSCALL_CALL (rt_sigprocmask, SIG_UNBLOCK, &sa.sa_mask,
87 NULL, __NSIG_BYTES);
88}
89
76a50749 90/* Code to allocate and deallocate a stack. */
76a50749
UD
91#include "allocatestack.c"
92
f8bf15fe
CD
93/* CONCURRENCY NOTES:
94
95 Understanding who is the owner of the 'struct pthread' or 'PD'
96 (refers to the value of the 'struct pthread *pd' function argument)
97 is critically important in determining exactly which operations are
98 allowed and which are not and when, particularly when it comes to the
99 implementation of pthread_create, pthread_join, pthread_detach, and
100 other functions which all operate on PD.
101
102 The owner of PD is responsible for freeing the final resources
103 associated with PD, and may examine the memory underlying PD at any
104 point in time until it frees it back to the OS or to reuse by the
105 runtime.
106
107 The thread which calls pthread_create is called the creating thread.
108 The creating thread begins as the owner of PD.
109
110 During startup the new thread may examine PD in coordination with the
111 owner thread (which may be itself).
112
113 The four cases of ownership transfer are:
114
115 (1) Ownership of PD is released to the process (all threads may use it)
116 after the new thread starts in a joinable state
117 i.e. pthread_create returns a usable pthread_t.
118
119 (2) Ownership of PD is released to the new thread starting in a detached
120 state.
121
122 (3) Ownership of PD is dynamically released to a running thread via
123 pthread_detach.
124
125 (4) Ownership of PD is acquired by the thread which calls pthread_join.
126
127 Implementation notes:
128
129 The PD->stopped_start and thread_ran variables are used to determine
130 exactly which of the four ownership states we are in and therefore
131 what actions can be taken. For example after (2) we cannot read or
132 write from PD anymore since the thread may no longer exist and the
fa17b9c7
CD
133 memory may be unmapped.
134
135 It is important to point out that PD->lock is being used both
136 similar to a one-shot semaphore and subsequently as a mutex. The
137 lock is taken in the parent to force the child to wait, and then the
138 child releases the lock. However, this semaphore-like effect is used
139 only for synchronizing the parent and child. After startup the lock
140 is used like a mutex to create a critical section during which a
141 single owner modifies the thread parameters.
142
143 The most complicated cases happen during thread startup:
f8bf15fe
CD
144
145 (a) If the created thread is in a detached (PTHREAD_CREATE_DETACHED),
146 or joinable (default PTHREAD_CREATE_JOINABLE) state and
147 STOPPED_START is true, then the creating thread has ownership of
148 PD until the PD->lock is released by pthread_create. If any
02189e8f 149 errors occur we are in states (c) or (d) below.
f8bf15fe
CD
150
151 (b) If the created thread is in a detached state
152 (PTHREAD_CREATED_DETACHED), and STOPPED_START is false, then the
153 creating thread has ownership of PD until it invokes the OS
154 kernel's thread creation routine. If this routine returns
155 without error, then the created thread owns PD; otherwise, see
02189e8f
AZ
156 (c) or (d) below.
157
158 (c) If either a joinable or detached thread setup failed and THREAD_RAN
159 is true, then the creating thread releases ownership to the new thread,
160 the created thread sees the failed setup through PD->setup_failed
161 member, releases the PD ownership, and exits. The creating thread will
162 be responsible for cleanup the allocated resources. The THREAD_RAN is
163 local to creating thread and indicate whether thread creation or setup
164 has failed.
165
166 (d) If the thread creation failed and THREAD_RAN is false (meaning
167 ARCH_CLONE has failed), then the creating thread retains ownership
168 of PD and must cleanup he allocated resource. No waiting for the new
169 thread is required because it never started.
f8bf15fe
CD
170
171 The nptl_db interface:
172
173 The interface with nptl_db requires that we enqueue PD into a linked
174 list and then call a function which the debugger will trap. The PD
175 will then be dequeued and control returned to the thread. The caller
176 at the time must have ownership of PD and such ownership remains
177 after control returns to thread. The enqueued PD is removed from the
178 linked list by the nptl_db callback td_thr_event_getmsg. The debugger
179 must ensure that the thread does not resume execution, otherwise
180 ownership of PD may be lost and examining PD will not be possible.
181
182 Note that the GNU Debugger as of (December 10th 2015) commit
183 c2c2a31fdb228d41ce3db62b268efea04bd39c18 no longer uses
184 td_thr_event_getmsg and several other related nptl_db interfaces. The
185 principal reason for this is that nptl_db does not support non-stop
186 mode where other threads can run concurrently and modify runtime
187 structures currently in use by the debugger and the nptl_db
188 interface.
189
190 Axioms:
191
192 * The create_thread function can never set stopped_start to false.
193 * The created thread can read stopped_start but never write to it.
194 * The variable thread_ran is set some time after the OS thread
195 creation routine returns, how much time after the thread is created
196 is unspecified, but it should be as quickly as possible.
197
198*/
199
200/* CREATE THREAD NOTES:
201
f8bf15fe
CD
202 create_thread must initialize PD->stopped_start. It should be true
203 if the STOPPED_START parameter is true, or if create_thread needs the
204 new thread to synchronize at startup for some other implementation
205 reason. If STOPPED_START will be true, then create_thread is obliged
206 to lock PD->lock before starting the thread. Then pthread_create
42813c67 207 unlocks PD->lock which synchronizes-with create_thread in the
f8bf15fe
CD
208 child thread which does an acquire/release of PD->lock as the last
209 action before calling the user entry point. The goal of all of this
210 is to ensure that the required initial thread attributes are applied
211 (by the creating thread) before the new thread runs user code. Note
212 that the the functions pthread_getschedparam, pthread_setschedparam,
213 pthread_setschedprio, __pthread_tpp_change_priority, and
214 __pthread_current_priority reuse the same lock, PD->lock, for a
215 similar purpose e.g. synchronizing the setting of similar thread
216 attributes. These functions are never called before the thread is
217 created, so don't participate in startup syncronization, but given
218 that the lock is present already and in the unlocked state, reusing
219 it saves space.
32fed10f
RM
220
221 The return value is zero for success or an errno code for failure.
222 If the return value is ENOMEM, that will be translated to EAGAIN,
223 so create_thread need not do that. On failure, *THREAD_RAN should
02189e8f
AZ
224 be set to true iff the thread actually started up but before calling
225 the user code (*PD->start_routine). */
42813c67
AZ
226
227static int _Noreturn start_thread (void *arg);
228
32fed10f 229static int create_thread (struct pthread *pd, const struct pthread_attr *attr,
d8ea0d01
L
230 bool *stopped_start, void *stackaddr,
231 size_t stacksize, bool *thread_ran)
42813c67
AZ
232{
233 /* Determine whether the newly created threads has to be started
234 stopped since we have to set the scheduling parameters or set the
235 affinity. */
236 bool need_setaffinity = (attr != NULL && attr->extension != NULL
237 && attr->extension->cpuset != 0);
238 if (attr != NULL
239 && (__glibc_unlikely (need_setaffinity)
240 || __glibc_unlikely ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)))
241 *stopped_start = true;
242
243 pd->stopped_start = *stopped_start;
244 if (__glibc_unlikely (*stopped_start))
245 lll_lock (pd->lock, LLL_PRIVATE);
246
247 /* We rely heavily on various flags the CLONE function understands:
248
249 CLONE_VM, CLONE_FS, CLONE_FILES
250 These flags select semantics with shared address space and
251 file descriptors according to what POSIX requires.
252
253 CLONE_SIGHAND, CLONE_THREAD
254 This flag selects the POSIX signal semantics and various
255 other kinds of sharing (itimers, POSIX timers, etc.).
256
257 CLONE_SETTLS
258 The sixth parameter to CLONE determines the TLS area for the
259 new thread.
260
261 CLONE_PARENT_SETTID
262 The kernels writes the thread ID of the newly created thread
263 into the location pointed to by the fifth parameters to CLONE.
264
265 Note that it would be semantically equivalent to use
266 CLONE_CHILD_SETTID but it is be more expensive in the kernel.
267
268 CLONE_CHILD_CLEARTID
269 The kernels clears the thread ID of a thread that has called
270 sys_exit() in the location pointed to by the seventh parameter
271 to CLONE.
272
273 The termination signal is chosen to be zero which means no signal
274 is sent. */
275 const int clone_flags = (CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SYSVSEM
276 | CLONE_SIGHAND | CLONE_THREAD
277 | CLONE_SETTLS | CLONE_PARENT_SETTID
278 | CLONE_CHILD_CLEARTID
279 | 0);
280
281 TLS_DEFINE_INIT_TP (tp, pd);
282
d8ea0d01
L
283 struct clone_args args =
284 {
285 .flags = clone_flags,
286 .pidfd = (uintptr_t) &pd->tid,
287 .parent_tid = (uintptr_t) &pd->tid,
288 .child_tid = (uintptr_t) &pd->tid,
289 .stack = (uintptr_t) stackaddr,
290 .stack_size = stacksize,
291 .tls = (uintptr_t) tp,
292 };
293 int ret = __clone_internal (&args, &start_thread, pd);
294 if (__glibc_unlikely (ret == -1))
42813c67
AZ
295 return errno;
296
02189e8f
AZ
297 /* It's started now, so if we fail below, we'll have to let it clean itself
298 up. */
42813c67
AZ
299 *thread_ran = true;
300
301 /* Now we have the possibility to set scheduling parameters etc. */
302 if (attr != NULL)
303 {
42813c67
AZ
304 /* Set the affinity mask if necessary. */
305 if (need_setaffinity)
306 {
307 assert (*stopped_start);
32fed10f 308
02189e8f
AZ
309 int res = INTERNAL_SYSCALL_CALL (sched_setaffinity, pd->tid,
310 attr->extension->cpusetsize,
311 attr->extension->cpuset);
42813c67 312 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res)))
02189e8f 313 return INTERNAL_SYSCALL_ERRNO (res);
42813c67
AZ
314 }
315
316 /* Set the scheduling parameters. */
317 if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
318 {
319 assert (*stopped_start);
320
02189e8f
AZ
321 int res = INTERNAL_SYSCALL_CALL (sched_setscheduler, pd->tid,
322 pd->schedpolicy, &pd->schedparam);
42813c67 323 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res)))
02189e8f 324 return INTERNAL_SYSCALL_ERRNO (res);
42813c67
AZ
325 }
326 }
327
328 return 0;
329}
76a50749 330
42813c67
AZ
331/* Local function to start thread and handle cleanup. */
332static int _Noreturn
333start_thread (void *arg)
76a50749 334{
42813c67 335 struct pthread *pd = arg;
76a50749 336
02189e8f
AZ
337 /* We are either in (a) or (b), and in either case we either own PD already
338 (2) or are about to own PD (1), and so our only restriction would be that
339 we can't free PD until we know we have ownership (see CONCURRENCY NOTES
340 above). */
341 if (pd->stopped_start)
342 {
343 bool setup_failed = false;
344
345 /* Get the lock the parent locked to force synchronization. */
346 lll_lock (pd->lock, LLL_PRIVATE);
347
348 /* We have ownership of PD now, for detached threads with setup failure
349 we set it as joinable so the creating thread could synchronous join
350 and free any resource prior return to the pthread_create caller. */
351 setup_failed = pd->setup_failed == 1;
352 if (setup_failed)
353 pd->joinid = NULL;
354
355 /* And give it up right away. */
356 lll_unlock (pd->lock, LLL_PRIVATE);
357
358 if (setup_failed)
359 goto out;
360 }
361
0e9d6240
UD
362 /* Initialize resolver state pointer. */
363 __resp = &pd->res;
364
fd5bdc09
UD
365 /* Initialize pointers to locale data. */
366 __ctype_init ();
367
b03604b1 368#ifndef __ASSUME_SET_ROBUST_LIST
442e8a40 369 if (__nptl_set_robust_list_avail)
b03604b1 370#endif
0f6699ea 371 {
0f6699ea
UD
372 /* This call should never fail because the initial call in init.c
373 succeeded. */
bc2eb932
AZ
374 INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head,
375 sizeof (struct robust_list_head));
0f6699ea 376 }
0f6699ea 377
76a50749
UD
378 /* This is where the try/finally block should be created. For
379 compilers without that support we do use setjmp. */
877e51b2
UD
380 struct pthread_unwind_buf unwind_buf;
381
d6cc1829 382 int not_first_call;
2098d403
JM
383 DIAG_PUSH_NEEDS_COMMENT;
384#if __GNUC_PREREQ (7, 0)
385 /* This call results in a -Wstringop-overflow warning because struct
386 pthread_unwind_buf is smaller than jmp_buf. setjmp and longjmp
387 do not use anything beyond the common prefix (they never access
388 the saved signal mask), so that is a false positive. */
389 DIAG_IGNORE_NEEDS_COMMENT (11, "-Wstringop-overflow=");
390#endif
d6cc1829 391 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
2098d403 392 DIAG_POP_NEEDS_COMMENT;
d6cc1829
L
393
394 /* No previous handlers. NB: This must be done after setjmp since the
395 private space in the unwind jump buffer may overlap space used by
396 setjmp to store extra architecture-specific information which is
397 never used by the cancellation-specific __libc_unwind_longjmp.
398
399 The private space is allowed to overlap because the unwinder never
400 has to return through any of the jumped-to call frames, and thus
401 only a minimum amount of saved data need be stored, and for example,
402 need not include the process signal mask information. This is all
403 an optimization to reduce stack usage when pushing cancellation
404 handlers. */
877e51b2
UD
405 unwind_buf.priv.data.prev = NULL;
406 unwind_buf.priv.data.cleanup = NULL;
407
b3cae39d
FW
408 __libc_signal_restore_set (&pd->sigmask);
409
410 /* Allow setxid from now onwards. */
411 if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
412 futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
413
a1ffb40e 414 if (__glibc_likely (! not_first_call))
76a50749 415 {
877e51b2
UD
416 /* Store the new cleanup handler info. */
417 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
418
3a097cc7
RM
419 LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg);
420
76a50749 421 /* Run the code the user provided. */
ce7528f6
AZ
422 void *ret;
423 if (pd->c11)
424 {
425 /* The function pointer of the c11 thread start is cast to an incorrect
426 type on __pthread_create_2_1 call, however it is casted back to correct
427 one so the call behavior is well-defined (it is assumed that pointers
428 to void are able to represent all values of int. */
429 int (*start)(void*) = (int (*) (void*)) pd->start_routine;
430 ret = (void*) (uintptr_t) start (pd->arg);
431 }
432 else
433 ret = pd->start_routine (pd->arg);
434 THREAD_SETMEM (pd, result, ret);
76a50749
UD
435 }
436
ba384f6e 437 /* Call destructors for the thread_local TLS variables. */
e57b0c61
RM
438#ifndef SHARED
439 if (&__call_tls_dtors != NULL)
440#endif
441 __call_tls_dtors ();
ba384f6e 442
6b4686a5 443 /* Run the destructor for the thread-local data. */
3fa21fd8 444 __nptl_deallocate_tsd ();
6b4686a5 445
12d7ca07
RM
446 /* Clean up any state libc stored in thread-local variables. */
447 __libc_thread_freeres ();
76a50749
UD
448
449 /* Report the death of the thread if this is wanted. */
a1ffb40e 450 if (__glibc_unlikely (pd->report_events))
76a50749
UD
451 {
452 /* See whether TD_DEATH is in any of the mask. */
453 const int idx = __td_eventword (TD_DEATH);
454 const uint32_t mask = __td_eventmask (TD_DEATH);
455
456 if ((mask & (__nptl_threads_events.event_bits[idx]
457 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
458 {
459 /* Yep, we have to signal the death. Add the descriptor to
460 the list but only if it is not already on it. */
461 if (pd->nextevent == NULL)
462 {
463 pd->eventbuf.eventnum = TD_DEATH;
464 pd->eventbuf.eventdata = pd;
465
466 do
467 pd->nextevent = __nptl_last_event;
5a3ab2fc
UD
468 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
469 pd, pd->nextevent));
76a50749
UD
470 }
471
f8bf15fe
CD
472 /* Now call the function which signals the event. See
473 CONCURRENCY NOTES for the nptl_db interface comments. */
76a50749
UD
474 __nptl_death_event ();
475 }
476 }
477
6461e577
RM
478 /* The thread is exiting now. Don't set this bit until after we've hit
479 the event-reporting breakpoint, so that td_thr_get_info on us while at
480 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
481 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
76a50749 482
8fe503f7
AZ
483 if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads)))
484 /* This was the last thread. */
485 exit (0);
486
0f6699ea 487#ifndef __ASSUME_SET_ROBUST_LIST
1bcfb5a5 488 /* If this thread has any robust mutexes locked, handle them now. */
06be6368 489# if __PTHREAD_MUTEX_HAVE_PREV
0f6699ea
UD
490 void *robust = pd->robust_head.list;
491# else
b007ce7c 492 __pthread_slist_t *robust = pd->robust_list.__next;
0f6699ea 493# endif
df47504c
UD
494 /* We let the kernel do the notification if it is able to do so.
495 If we have to do it here there for sure are no PI mutexes involved
496 since the kernel support for them is even more recent. */
442e8a40 497 if (!__nptl_set_robust_list_avail
df47504c 498 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
1bcfb5a5
UD
499 {
500 do
501 {
b007ce7c 502 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
0f6699ea
UD
503 ((char *) robust - offsetof (struct __pthread_mutex_s,
504 __list.__next));
505 robust = *((void **) robust);
d804f5df 506
06be6368 507# if __PTHREAD_MUTEX_HAVE_PREV
b007ce7c 508 this->__list.__prev = NULL;
0f6699ea
UD
509# endif
510 this->__list.__next = NULL;
1bcfb5a5 511
c0c6bac9 512 atomic_or (&this->__lock, FUTEX_OWNER_DIED);
a2f0363f
TR
513 futex_wake ((unsigned int *) &this->__lock, 1,
514 /* XYZ */ FUTEX_SHARED);
1bcfb5a5 515 }
df47504c 516 while (robust != (void *) &pd->robust_head);
1bcfb5a5 517 }
0f6699ea 518#endif
1bcfb5a5 519
08794225
SN
520 if (!pd->user_stack)
521 advise_stack_range (pd->stackblock, pd->stackblock_size, (uintptr_t) pd,
522 pd->guardsize);
b42a214c 523
4cab20fa 524 if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK))
dff9a7a1
UD
525 {
526 /* Some other thread might call any of the setXid functions and expect
527 us to reply. In this case wait until we did that. */
528 do
a2f0363f
TR
529 /* XXX This differs from the typical futex_wait_simple pattern in that
530 the futex_wait condition (setxid_futex) is different from the
531 condition used in the surrounding loop (cancelhandling). We need
532 to check and document why this is correct. */
533 futex_wait_simple (&pd->setxid_futex, 0, FUTEX_PRIVATE);
dff9a7a1
UD
534 while (pd->cancelhandling & SETXID_BITMASK);
535
536 /* Reset the value so that the stack can be reused. */
537 pd->setxid_futex = 0;
538 }
76a50749 539
4cab20fa
AS
540 /* If the thread is detached free the TCB. */
541 if (IS_DETACHED (pd))
542 /* Free the TCB. */
8fbb33b3 543 __nptl_free_tcb (pd);
4cab20fa 544
02189e8f 545out:
76a50749
UD
546 /* We cannot call '_exit' here. '_exit' will terminate the process.
547
548 The 'exit' implementation in the kernel will signal when the
adcdc775 549 process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID
76a50749
UD
550 flag. The 'tid' field in the TCB will be set to zero.
551
552 The exit code is zero since in case all threads exit by calling
553 'pthread_exit' the exit status must be 0 (zero). */
eaa53d0f
AZ
554 while (1)
555 INTERNAL_SYSCALL_CALL (exit, 0);
76a50749
UD
556
557 /* NOTREACHED */
32fed10f
RM
558}
559
560
561/* Return true iff obliged to report TD_CREATE events. */
562static bool
563report_thread_creation (struct pthread *pd)
564{
565 if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF, report_events)))
566 {
567 /* The parent thread is supposed to report events.
568 Check whether the TD_CREATE event is needed, too. */
569 const size_t idx = __td_eventword (TD_CREATE);
570 const uint32_t mask = __td_eventmask (TD_CREATE);
571
572 return ((mask & (__nptl_threads_events.event_bits[idx]
573 | pd->eventbuf.eventmask.event_bits[idx])) != 0);
574 }
575 return false;
76a50749
UD
576}
577
578
76a50749 579int
80d9be81
JM
580__pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr,
581 void *(*start_routine) (void *), void *arg)
76a50749 582{
d8ea0d01
L
583 void *stackaddr = NULL;
584 size_t stacksize = 0;
76a50749 585
2f69522d
FW
586 /* Avoid a data race in the multi-threaded case, and call the
587 deferred initialization only once. */
706ad1e7 588 if (__libc_single_threaded)
2f69522d
FW
589 {
590 late_init ();
591 __libc_single_threaded = 0;
592 }
706ad1e7 593
1e6da2b0 594 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
c2322a56 595 union pthread_attr_transparent default_attr;
8111c457 596 bool destroy_default_attr = false;
ce7528f6
AZ
597 bool c11 = (attr == ATTR_C11_THREAD);
598 if (iattr == NULL || c11)
61dd6208 599 {
c2322a56 600 int ret = __pthread_getattr_default_np (&default_attr.external);
8111c457
FW
601 if (ret != 0)
602 return ret;
603 destroy_default_attr = true;
c2322a56 604 iattr = &default_attr.internal;
61dd6208 605 }
76a50749 606
dff9a7a1 607 struct pthread *pd = NULL;
d8ea0d01 608 int err = allocate_stack (iattr, &pd, &stackaddr, &stacksize);
61dd6208
SP
609 int retval = 0;
610
a1ffb40e 611 if (__glibc_unlikely (err != 0))
76a50749 612 /* Something went wrong. Maybe a parameter of the attributes is
e988dba9
JL
613 invalid or we could not allocate memory. Note we have to
614 translate error codes. */
61dd6208
SP
615 {
616 retval = err == ENOMEM ? EAGAIN : err;
617 goto out;
618 }
76a50749
UD
619
620
621 /* Initialize the TCB. All initializations with zero should be
622 performed in 'get_cached_stack'. This way we avoid doing this if
623 the stack freshly allocated with 'mmap'. */
624
d7329d4b 625#if TLS_TCB_AT_TP
76a50749 626 /* Reference to the TCB itself. */
55c11fbd 627 pd->header.self = pd;
76a50749 628
d4f64e1a 629 /* Self-reference for TLS. */
55c11fbd 630 pd->header.tcb = pd;
76a50749
UD
631#endif
632
633 /* Store the address of the start routine and the parameter. Since
634 we do not start the function directly the stillborn thread will
635 get the information from its thread descriptor. */
636 pd->start_routine = start_routine;
637 pd->arg = arg;
ce7528f6 638 pd->c11 = c11;
76a50749
UD
639
640 /* Copy the thread attribute flags. */
14ffbc83
UD
641 struct pthread *self = THREAD_SELF;
642 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
643 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
76a50749
UD
644
645 /* Initialize the field for the ID of the thread which is waiting
646 for us. This is a self-reference in case the thread is created
647 detached. */
648 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
649
650 /* The debug events are inherited from the parent. */
14ffbc83
UD
651 pd->eventbuf = self->eventbuf;
652
76a50749 653
14ffbc83
UD
654 /* Copy the parent's scheduling parameters. The flags will say what
655 is valid and what is not. */
656 pd->schedpolicy = self->schedpolicy;
657 pd->schedparam = self->schedparam;
76a50749 658
35f1e827
UD
659 /* Copy the stack guard canary. */
660#ifdef THREAD_COPY_STACK_GUARD
661 THREAD_COPY_STACK_GUARD (pd);
662#endif
663
827b7087
UD
664 /* Copy the pointer guard value. */
665#ifdef THREAD_COPY_POINTER_GUARD
666 THREAD_COPY_POINTER_GUARD (pd);
667#endif
668
ebff9c5c
L
669 /* Setup tcbhead. */
670 tls_setup_tcbhead (pd);
671
32fed10f
RM
672 /* Verify the sysinfo bits were copied in allocate_stack if needed. */
673#ifdef NEED_DL_SYSINFO
674 CHECK_THREAD_SYSINFO (pd);
675#endif
676
14ffbc83 677 /* Determine scheduling parameters for the thread. */
61dd6208 678 if (__builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
14ffbc83 679 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
76a50749 680 {
14ffbc83
UD
681 /* Use the scheduling parameters the user provided. */
682 if (iattr->flags & ATTR_FLAG_POLICY_SET)
33cd1f74
RM
683 {
684 pd->schedpolicy = iattr->schedpolicy;
685 pd->flags |= ATTR_FLAG_POLICY_SET;
686 }
14ffbc83 687 if (iattr->flags & ATTR_FLAG_SCHED_SET)
33cd1f74
RM
688 {
689 /* The values were validated in pthread_attr_setschedparam. */
690 pd->schedparam = iattr->schedparam;
691 pd->flags |= ATTR_FLAG_SCHED_SET;
692 }
f214ff74
RM
693
694 if ((pd->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
695 != (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
696 collect_default_sched (pd);
76a50749
UD
697 }
698
d2e04918
SN
699 if (__glibc_unlikely (__nptl_nthreads == 1))
700 _IO_enable_locks ();
701
76a50749
UD
702 /* Pass the descriptor to the caller. */
703 *newthread = (pthread_t) pd;
704
5acf7263
RM
705 LIBC_PROBE (pthread_create, 4, newthread, attr, start_routine, arg);
706
32fed10f
RM
707 /* One more thread. We cannot have the thread do this itself, since it
708 might exist but not have been scheduled yet by the time we've returned
709 and need to check the value to behave correctly. We must do it before
710 creating the thread, in case it does get scheduled first and then
711 might mistakenly think it was the only thread. In the failure case,
712 we momentarily store a false value; this doesn't matter because there
713 is no kosher thing a signal handler interrupting us right here can do
714 that cares whether the thread count is correct. */
715 atomic_increment (&__nptl_nthreads);
716
f8bf15fe
CD
717 /* Our local value of stopped_start and thread_ran can be accessed at
718 any time. The PD->stopped_start may only be accessed if we have
719 ownership of PD (see CONCURRENCY NOTES above). */
720 bool stopped_start = false; bool thread_ran = false;
32fed10f 721
b3cae39d
FW
722 /* Block all signals, so that the new thread starts out with
723 signals disabled. This avoids race conditions in the thread
724 startup. */
725 sigset_t original_sigmask;
726 __libc_signal_block_all (&original_sigmask);
727
ec41af45
FW
728 if (iattr->extension != NULL && iattr->extension->sigmask_set)
729 /* Use the signal mask in the attribute. The internal signals
730 have already been filtered by the public
731 pthread_attr_setsigmask_np interface. */
732 pd->sigmask = iattr->extension->sigmask;
733 else
734 {
735 /* Conceptually, the new thread needs to inherit the signal mask
736 of this thread. Therefore, it needs to restore the saved
737 signal mask of this thread, so save it in the startup
738 information. */
739 pd->sigmask = original_sigmask;
ec41af45
FW
740 /* Reset the cancellation signal mask in case this thread is
741 running cancellation. */
742 __sigdelset (&pd->sigmask, SIGCANCEL);
743 }
b3cae39d 744
76a50749 745 /* Start the thread. */
32fed10f
RM
746 if (__glibc_unlikely (report_thread_creation (pd)))
747 {
f8bf15fe
CD
748 stopped_start = true;
749
750 /* We always create the thread stopped at startup so we can
751 notify the debugger. */
d8ea0d01
L
752 retval = create_thread (pd, iattr, &stopped_start, stackaddr,
753 stacksize, &thread_ran);
32fed10f
RM
754 if (retval == 0)
755 {
f8bf15fe
CD
756 /* We retain ownership of PD until (a) (see CONCURRENCY NOTES
757 above). */
758
759 /* Assert stopped_start is true in both our local copy and the
760 PD copy. */
761 assert (stopped_start);
32fed10f
RM
762 assert (pd->stopped_start);
763
764 /* Now fill in the information about the new thread in
765 the newly created thread's data structure. We cannot let
766 the new thread do this since we don't know whether it was
767 already scheduled when we send the event. */
768 pd->eventbuf.eventnum = TD_CREATE;
769 pd->eventbuf.eventdata = pd;
770
771 /* Enqueue the descriptor. */
772 do
773 pd->nextevent = __nptl_last_event;
774 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
775 pd, pd->nextevent)
776 != 0);
777
f8bf15fe
CD
778 /* Now call the function which signals the event. See
779 CONCURRENCY NOTES for the nptl_db interface comments. */
32fed10f
RM
780 __nptl_create_event ();
781 }
782 }
783 else
d8ea0d01
L
784 retval = create_thread (pd, iattr, &stopped_start, stackaddr,
785 stacksize, &thread_ran);
32fed10f 786
b3cae39d
FW
787 /* Return to the previous signal mask, after creating the new
788 thread. */
789 __libc_signal_restore_set (&original_sigmask);
790
32fed10f
RM
791 if (__glibc_unlikely (retval != 0))
792 {
32fed10f 793 if (thread_ran)
02189e8f
AZ
794 /* State (c) and we not have PD ownership (see CONCURRENCY NOTES
795 above). We can assert that STOPPED_START must have been true
796 because thread creation didn't fail, but thread attribute setting
797 did. */
798 {
799 assert (stopped_start);
800 /* Signal the created thread to release PD ownership and early
801 exit so it could be joined. */
802 pd->setup_failed = 1;
803 lll_unlock (pd->lock, LLL_PRIVATE);
804
805 /* Similar to pthread_join, but since thread creation has failed at
806 startup there is no need to handle all the steps. */
807 pid_t tid;
808 while ((tid = atomic_load_acquire (&pd->tid)) != 0)
809 __futex_abstimed_wait_cancelable64 ((unsigned int *) &pd->tid,
810 tid, 0, NULL, LLL_SHARED);
811 }
f8bf15fe 812
02189e8f
AZ
813 /* State (c) or (d) and we have ownership of PD (see CONCURRENCY
814 NOTES above). */
32fed10f 815
02189e8f
AZ
816 /* Oops, we lied for a second. */
817 atomic_decrement (&__nptl_nthreads);
32fed10f 818
02189e8f
AZ
819 /* Free the resources. */
820 __nptl_deallocate_stack (pd);
32fed10f
RM
821
822 /* We have to translate error codes. */
823 if (retval == ENOMEM)
824 retval = EAGAIN;
825 }
826 else
827 {
f8bf15fe
CD
828 /* We don't know if we have PD ownership. Once we check the local
829 stopped_start we'll know if we're in state (a) or (b) (see
830 CONCURRENCY NOTES above). */
831 if (stopped_start)
832 /* State (a), we own PD. The thread blocked on this lock either
833 because we're doing TD_CREATE event reporting, or for some
834 other reason that create_thread chose. Now let it run
835 free. */
32fed10f
RM
836 lll_unlock (pd->lock, LLL_PRIVATE);
837
838 /* We now have for sure more than one thread. The main thread might
839 not yet have the flag set. No need to set the global variable
840 again if this is what we use. */
841 THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
842 }
61dd6208
SP
843
844 out:
8111c457 845 if (destroy_default_attr)
c2322a56 846 __pthread_attr_destroy (&default_attr.external);
61dd6208
SP
847
848 return retval;
76a50749 849}
f47f1d91
FW
850versioned_symbol (libc, __pthread_create_2_1, pthread_create, GLIBC_2_34);
851libc_hidden_ver (__pthread_create_2_1, __pthread_create)
852#ifndef SHARED
853strong_alias (__pthread_create_2_1, __pthread_create)
854#endif
76a50749 855
f47f1d91
FW
856#if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_1, GLIBC_2_34)
857compat_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
858#endif
76a50749 859
f47f1d91 860#if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
76a50749 861int
80d9be81
JM
862__pthread_create_2_0 (pthread_t *newthread, const pthread_attr_t *attr,
863 void *(*start_routine) (void *), void *arg)
76a50749
UD
864{
865 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
866 the old size and access to the new members might crash the program.
867 We convert the struct now. */
868 struct pthread_attr new_attr;
869
870 if (attr != NULL)
871 {
872 struct pthread_attr *iattr = (struct pthread_attr *) attr;
873 size_t ps = __getpagesize ();
874
875 /* Copy values from the user-provided attributes. */
876 new_attr.schedparam = iattr->schedparam;
877 new_attr.schedpolicy = iattr->schedpolicy;
878 new_attr.flags = iattr->flags;
879
880 /* Fill in default values for the fields not present in the old
881 implementation. */
882 new_attr.guardsize = ps;
883 new_attr.stackaddr = NULL;
884 new_attr.stacksize = 0;
7538d461 885 new_attr.extension = NULL;
76a50749
UD
886
887 /* We will pass this value on to the real implementation. */
888 attr = (pthread_attr_t *) &new_attr;
889 }
890
891 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
892}
893compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
894 GLIBC_2_0);
895#endif
7f08f55a
RM
896\f
897/* Information for libthread_db. */
898
899#include "../nptl_db/db_info.c"
b639d0c9
UD
900\f
901/* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
902 functions to be present as well. */
fa872e1b
AZ
903PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_lock)
904PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_trylock)
905PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_unlock)
b639d0c9 906
fa872e1b
AZ
907PTHREAD_STATIC_FN_REQUIRE (__pthread_once)
908PTHREAD_STATIC_FN_REQUIRE (__pthread_cancel)
b639d0c9 909
fa872e1b
AZ
910PTHREAD_STATIC_FN_REQUIRE (__pthread_key_create)
911PTHREAD_STATIC_FN_REQUIRE (__pthread_key_delete)
912PTHREAD_STATIC_FN_REQUIRE (__pthread_setspecific)
913PTHREAD_STATIC_FN_REQUIRE (__pthread_getspecific)