]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_create.c
Convert 29 more function definitions to prototype style (multiple parameters in one...
[thirdparty/glibc.git] / nptl / pthread_create.c
CommitLineData
b168057a 1/* Copyright (C) 2002-2015 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
76a50749 18
fd5bdc09 19#include <ctype.h>
76a50749
UD
20#include <errno.h>
21#include <stdbool.h>
22#include <stdlib.h>
23#include <string.h>
e054f494 24#include <stdint.h>
76a50749
UD
25#include "pthreadP.h"
26#include <hp-timing.h>
27#include <ldsodefs.h>
3e4fc359 28#include <atomic.h>
12d7ca07 29#include <libc-internal.h>
0e9d6240 30#include <resolv.h>
f8de5057 31#include <kernel-features.h>
e0db6517 32#include <exit-thread.h>
f214ff74 33#include <default-sched.h>
a2f0363f 34#include <futex-internal.h>
76a50749
UD
35
36#include <shlib-compat.h>
37
3a097cc7
RM
38#include <stap-probe.h>
39
76a50749 40
76a50749
UD
41/* Nozero if debugging mode is enabled. */
42int __pthread_debug;
43
44/* Globally enabled events. */
e965d514 45static td_thr_events_t __nptl_threads_events __attribute_used__;
76a50749
UD
46
47/* Pointer to descriptor with the last event. */
e965d514 48static struct pthread *__nptl_last_event __attribute_used__;
76a50749 49
47202270
UD
50/* Number of threads running. */
51unsigned int __nptl_nthreads = 1;
52
76a50749
UD
53
54/* Code to allocate and deallocate a stack. */
76a50749
UD
55#include "allocatestack.c"
56
32fed10f
RM
57/* createthread.c defines this function, and two macros:
58 START_THREAD_DEFN and START_THREAD_SELF (see below).
59
60 create_thread is obliged to initialize PD->stopped_start. It
61 should be true if the STOPPED_START parameter is true, or if
62 create_thread needs the new thread to synchronize at startup for
63 some other implementation reason. If PD->stopped_start will be
64 true, then create_thread is obliged to perform the operation
65 "lll_lock (PD->lock, LLL_PRIVATE)" before starting the thread.
66
67 The return value is zero for success or an errno code for failure.
68 If the return value is ENOMEM, that will be translated to EAGAIN,
69 so create_thread need not do that. On failure, *THREAD_RAN should
70 be set to true iff the thread actually started up and then got
71 cancelled before calling user code (*PD->start_routine), in which
72 case it is responsible for doing its own cleanup. */
73
74static int create_thread (struct pthread *pd, const struct pthread_attr *attr,
75 bool stopped_start, STACK_VARIABLES_PARMS,
76 bool *thread_ran);
77
8dea90aa 78#include <createthread.c>
76a50749
UD
79
80
76a50749 81struct pthread *
90491dc4 82internal_function
76a50749
UD
83__find_in_stack_list (pd)
84 struct pthread *pd;
85{
86 list_t *entry;
87 struct pthread *result = NULL;
88
e51deae7 89 lll_lock (stack_cache_lock, LLL_PRIVATE);
76a50749
UD
90
91 list_for_each (entry, &stack_used)
92 {
93 struct pthread *curp;
94
d4f64e1a 95 curp = list_entry (entry, struct pthread, list);
76a50749
UD
96 if (curp == pd)
97 {
98 result = curp;
99 break;
100 }
101 }
102
103 if (result == NULL)
104 list_for_each (entry, &__stack_user)
105 {
106 struct pthread *curp;
107
d4f64e1a 108 curp = list_entry (entry, struct pthread, list);
76a50749
UD
109 if (curp == pd)
110 {
111 result = curp;
112 break;
113 }
114 }
115
e51deae7 116 lll_unlock (stack_cache_lock, LLL_PRIVATE);
76a50749
UD
117
118 return result;
119}
120
121
122/* Deallocate POSIX thread-local-storage. */
3fa21fd8
UD
123void
124attribute_hidden
125__nptl_deallocate_tsd (void)
76a50749 126{
877e51b2
UD
127 struct pthread *self = THREAD_SELF;
128
76a50749
UD
129 /* Maybe no data was ever allocated. This happens often so we have
130 a flag for this. */
877e51b2 131 if (THREAD_GETMEM (self, specific_used))
76a50749
UD
132 {
133 size_t round;
6b4686a5 134 size_t cnt;
76a50749 135
6b4686a5
UD
136 round = 0;
137 do
76a50749 138 {
76a50749
UD
139 size_t idx;
140
c5acd3d7 141 /* So far no new nonzero data entry. */
877e51b2 142 THREAD_SETMEM (self, specific_used, false);
c5acd3d7 143
76a50749 144 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
6b4686a5
UD
145 {
146 struct pthread_key_data *level2;
147
877e51b2 148 level2 = THREAD_GETMEM_NC (self, specific, cnt);
6b4686a5
UD
149
150 if (level2 != NULL)
151 {
152 size_t inner;
153
154 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
155 ++inner, ++idx)
156 {
157 void *data = level2[inner].data;
158
877e51b2
UD
159 if (data != NULL)
160 {
161 /* Always clear the data. */
162 level2[inner].data = NULL;
163
6b4686a5
UD
164 /* Make sure the data corresponds to a valid
165 key. This test fails if the key was
166 deallocated and also if it was
167 re-allocated. It is the user's
168 responsibility to free the memory in this
169 case. */
877e51b2
UD
170 if (level2[inner].seq
171 == __pthread_keys[idx].seq
172 /* It is not necessary to register a destructor
173 function. */
174 && __pthread_keys[idx].destr != NULL)
175 /* Call the user-provided destructor. */
176 __pthread_keys[idx].destr (data);
6b4686a5
UD
177 }
178 }
179 }
180 else
181 idx += PTHREAD_KEY_1STLEVEL_SIZE;
182 }
877e51b2
UD
183
184 if (THREAD_GETMEM (self, specific_used) == 0)
185 /* No data has been modified. */
186 goto just_free;
76a50749 187 }
877e51b2
UD
188 /* We only repeat the process a fixed number of times. */
189 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
76a50749 190
877e51b2
UD
191 /* Just clear the memory of the first block for reuse. */
192 memset (&THREAD_SELF->specific_1stblock, '\0',
193 sizeof (self->specific_1stblock));
6b4686a5 194
877e51b2 195 just_free:
6b4686a5
UD
196 /* Free the memory for the other blocks. */
197 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
198 {
199 struct pthread_key_data *level2;
200
877e51b2 201 level2 = THREAD_GETMEM_NC (self, specific, cnt);
6b4686a5
UD
202 if (level2 != NULL)
203 {
204 /* The first block is allocated as part of the thread
205 descriptor. */
206 free (level2);
877e51b2 207 THREAD_SETMEM_NC (self, specific, cnt, NULL);
6b4686a5
UD
208 }
209 }
210
877e51b2 211 THREAD_SETMEM (self, specific_used, false);
76a50749
UD
212 }
213}
214
215
216/* Deallocate a thread's stack after optionally making sure the thread
217 descriptor is still valid. */
218void
90491dc4 219internal_function
76a50749
UD
220__free_tcb (struct pthread *pd)
221{
222 /* The thread is exiting now. */
ba25bb0f
UD
223 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
224 TERMINATED_BIT) == 0, 1))
76a50749
UD
225 {
226 /* Remove the descriptor from the list. */
227 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
228 /* Something is really wrong. The descriptor for a still
229 running thread is gone. */
230 abort ();
231
f17efcb4 232 /* Free TPP data. */
a1ffb40e 233 if (__glibc_unlikely (pd->tpp != NULL))
f17efcb4
UD
234 {
235 struct priority_protection_data *tpp = pd->tpp;
236
237 pd->tpp = NULL;
238 free (tpp);
239 }
240
76a50749
UD
241 /* Queue the stack memory block for reuse and exit the process. The
242 kernel will signal via writing to the address returned by
243 QUEUE-STACK when the stack is available. */
244 __deallocate_stack (pd);
245 }
246}
247
248
32fed10f
RM
249/* Local function to start thread and handle cleanup.
250 createthread.c defines the macro START_THREAD_DEFN to the
251 declaration that its create_thread function will refer to, and
252 START_THREAD_SELF to the expression to optimally deliver the new
253 thread's THREAD_SELF value. */
254START_THREAD_DEFN
76a50749 255{
32fed10f 256 struct pthread *pd = START_THREAD_SELF;
76a50749
UD
257
258#if HP_TIMING_AVAIL
259 /* Remember the time when the thread was started. */
260 hp_timing_t now;
261 HP_TIMING_NOW (now);
262 THREAD_SETMEM (pd, cpuclock_offset, now);
263#endif
264
0e9d6240
UD
265 /* Initialize resolver state pointer. */
266 __resp = &pd->res;
267
fd5bdc09
UD
268 /* Initialize pointers to locale data. */
269 __ctype_init ();
270
66f1b8ee 271 /* Allow setxid from now onwards. */
a1ffb40e 272 if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
a2f0363f 273 futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
66f1b8ee 274
0f6699ea
UD
275#ifdef __NR_set_robust_list
276# ifndef __ASSUME_SET_ROBUST_LIST
277 if (__set_robust_list_avail >= 0)
278# endif
279 {
280 INTERNAL_SYSCALL_DECL (err);
281 /* This call should never fail because the initial call in init.c
282 succeeded. */
283 INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
284 sizeof (struct robust_list_head));
285 }
286#endif
287
327ae257 288#ifdef SIGCANCEL
b051fc44
UD
289 /* If the parent was running cancellation handlers while creating
290 the thread the new thread inherited the signal mask. Reset the
291 cancellation signal mask. */
a1ffb40e 292 if (__glibc_unlikely (pd->parent_cancelhandling & CANCELING_BITMASK))
b051fc44
UD
293 {
294 INTERNAL_SYSCALL_DECL (err);
295 sigset_t mask;
296 __sigemptyset (&mask);
297 __sigaddset (&mask, SIGCANCEL);
298 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
299 NULL, _NSIG / 8);
300 }
327ae257 301#endif
b051fc44 302
76a50749
UD
303 /* This is where the try/finally block should be created. For
304 compilers without that support we do use setjmp. */
877e51b2
UD
305 struct pthread_unwind_buf unwind_buf;
306
307 /* No previous handlers. */
308 unwind_buf.priv.data.prev = NULL;
309 unwind_buf.priv.data.cleanup = NULL;
310
311 int not_first_call;
312 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
a1ffb40e 313 if (__glibc_likely (! not_first_call))
76a50749 314 {
877e51b2
UD
315 /* Store the new cleanup handler info. */
316 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
317
a1ffb40e 318 if (__glibc_unlikely (pd->stopped_start))
5f66b766
UD
319 {
320 int oldtype = CANCEL_ASYNC ();
362038b0 321
5f66b766 322 /* Get the lock the parent locked to force synchronization. */
e51deae7 323 lll_lock (pd->lock, LLL_PRIVATE);
5f66b766 324 /* And give it up right away. */
e51deae7 325 lll_unlock (pd->lock, LLL_PRIVATE);
362038b0 326
5f66b766
UD
327 CANCEL_RESET (oldtype);
328 }
362038b0 329
3a097cc7
RM
330 LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg);
331
76a50749 332 /* Run the code the user provided. */
42c8f44c
UD
333#ifdef CALL_THREAD_FCT
334 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
335#else
cc775edf 336 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
42c8f44c 337#endif
76a50749
UD
338 }
339
ba384f6e 340 /* Call destructors for the thread_local TLS variables. */
e57b0c61
RM
341#ifndef SHARED
342 if (&__call_tls_dtors != NULL)
343#endif
344 __call_tls_dtors ();
ba384f6e 345
6b4686a5 346 /* Run the destructor for the thread-local data. */
3fa21fd8 347 __nptl_deallocate_tsd ();
6b4686a5 348
12d7ca07
RM
349 /* Clean up any state libc stored in thread-local variables. */
350 __libc_thread_freeres ();
76a50749 351
47202270
UD
352 /* If this is the last thread we terminate the process now. We
353 do not notify the debugger, it might just irritate it if there
354 is no thread left. */
a1ffb40e 355 if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads)))
47202270
UD
356 /* This was the last thread. */
357 exit (0);
358
76a50749 359 /* Report the death of the thread if this is wanted. */
a1ffb40e 360 if (__glibc_unlikely (pd->report_events))
76a50749
UD
361 {
362 /* See whether TD_DEATH is in any of the mask. */
363 const int idx = __td_eventword (TD_DEATH);
364 const uint32_t mask = __td_eventmask (TD_DEATH);
365
366 if ((mask & (__nptl_threads_events.event_bits[idx]
367 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
368 {
369 /* Yep, we have to signal the death. Add the descriptor to
370 the list but only if it is not already on it. */
371 if (pd->nextevent == NULL)
372 {
373 pd->eventbuf.eventnum = TD_DEATH;
374 pd->eventbuf.eventdata = pd;
375
376 do
377 pd->nextevent = __nptl_last_event;
5a3ab2fc
UD
378 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
379 pd, pd->nextevent));
76a50749
UD
380 }
381
382 /* Now call the function to signal the event. */
383 __nptl_death_event ();
384 }
385 }
386
6461e577
RM
387 /* The thread is exiting now. Don't set this bit until after we've hit
388 the event-reporting breakpoint, so that td_thr_get_info on us while at
389 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
390 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
76a50749 391
0f6699ea 392#ifndef __ASSUME_SET_ROBUST_LIST
1bcfb5a5 393 /* If this thread has any robust mutexes locked, handle them now. */
c252ec15 394# ifdef __PTHREAD_MUTEX_HAVE_PREV
0f6699ea
UD
395 void *robust = pd->robust_head.list;
396# else
b007ce7c 397 __pthread_slist_t *robust = pd->robust_list.__next;
0f6699ea 398# endif
df47504c
UD
399 /* We let the kernel do the notification if it is able to do so.
400 If we have to do it here there for sure are no PI mutexes involved
401 since the kernel support for them is even more recent. */
0f6699ea 402 if (__set_robust_list_avail < 0
df47504c 403 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
1bcfb5a5
UD
404 {
405 do
406 {
b007ce7c 407 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
0f6699ea
UD
408 ((char *) robust - offsetof (struct __pthread_mutex_s,
409 __list.__next));
410 robust = *((void **) robust);
d804f5df 411
0f6699ea 412# ifdef __PTHREAD_MUTEX_HAVE_PREV
b007ce7c 413 this->__list.__prev = NULL;
0f6699ea
UD
414# endif
415 this->__list.__next = NULL;
1bcfb5a5 416
c0c6bac9 417 atomic_or (&this->__lock, FUTEX_OWNER_DIED);
a2f0363f
TR
418 futex_wake ((unsigned int *) &this->__lock, 1,
419 /* XYZ */ FUTEX_SHARED);
1bcfb5a5 420 }
df47504c 421 while (robust != (void *) &pd->robust_head);
1bcfb5a5 422 }
0f6699ea 423#endif
1bcfb5a5 424
b42a214c
UD
425 /* Mark the memory of the stack as usable to the kernel. We free
426 everything except for the space used for the TCB itself. */
427 size_t pagesize_m1 = __getpagesize () - 1;
428#ifdef _STACK_GROWS_DOWN
429 char *sp = CURRENT_STACK_FRAME;
430 size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1;
431#else
432# error "to do"
433#endif
434 assert (freesize < pd->stackblock_size);
435 if (freesize > PTHREAD_STACK_MIN)
9043e228 436 __madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
b42a214c 437
76a50749
UD
438 /* If the thread is detached free the TCB. */
439 if (IS_DETACHED (pd))
440 /* Free the TCB. */
441 __free_tcb (pd);
a1ffb40e 442 else if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK))
dff9a7a1
UD
443 {
444 /* Some other thread might call any of the setXid functions and expect
445 us to reply. In this case wait until we did that. */
446 do
a2f0363f
TR
447 /* XXX This differs from the typical futex_wait_simple pattern in that
448 the futex_wait condition (setxid_futex) is different from the
449 condition used in the surrounding loop (cancelhandling). We need
450 to check and document why this is correct. */
451 futex_wait_simple (&pd->setxid_futex, 0, FUTEX_PRIVATE);
dff9a7a1
UD
452 while (pd->cancelhandling & SETXID_BITMASK);
453
454 /* Reset the value so that the stack can be reused. */
455 pd->setxid_futex = 0;
456 }
76a50749
UD
457
458 /* We cannot call '_exit' here. '_exit' will terminate the process.
459
460 The 'exit' implementation in the kernel will signal when the
adcdc775 461 process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID
76a50749
UD
462 flag. The 'tid' field in the TCB will be set to zero.
463
464 The exit code is zero since in case all threads exit by calling
465 'pthread_exit' the exit status must be 0 (zero). */
e0db6517 466 __exit_thread ();
76a50749
UD
467
468 /* NOTREACHED */
32fed10f
RM
469}
470
471
472/* Return true iff obliged to report TD_CREATE events. */
473static bool
474report_thread_creation (struct pthread *pd)
475{
476 if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF, report_events)))
477 {
478 /* The parent thread is supposed to report events.
479 Check whether the TD_CREATE event is needed, too. */
480 const size_t idx = __td_eventword (TD_CREATE);
481 const uint32_t mask = __td_eventmask (TD_CREATE);
482
483 return ((mask & (__nptl_threads_events.event_bits[idx]
484 | pd->eventbuf.eventmask.event_bits[idx])) != 0);
485 }
486 return false;
76a50749
UD
487}
488
489
76a50749
UD
490int
491__pthread_create_2_1 (newthread, attr, start_routine, arg)
492 pthread_t *newthread;
493 const pthread_attr_t *attr;
494 void *(*start_routine) (void *);
495 void *arg;
496{
497 STACK_VARIABLES;
76a50749 498
1e6da2b0 499 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
61dd6208
SP
500 struct pthread_attr default_attr;
501 bool free_cpuset = false;
76a50749 502 if (iattr == NULL)
61dd6208
SP
503 {
504 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
505 default_attr = __default_pthread_attr;
506 size_t cpusetsize = default_attr.cpusetsize;
507 if (cpusetsize > 0)
508 {
509 cpu_set_t *cpuset;
510 if (__glibc_likely (__libc_use_alloca (cpusetsize)))
511 cpuset = __alloca (cpusetsize);
512 else
513 {
514 cpuset = malloc (cpusetsize);
515 if (cpuset == NULL)
516 {
517 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
518 return ENOMEM;
519 }
520 free_cpuset = true;
521 }
522 memcpy (cpuset, default_attr.cpuset, cpusetsize);
523 default_attr.cpuset = cpuset;
524 }
525 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
526 iattr = &default_attr;
527 }
76a50749 528
dff9a7a1 529 struct pthread *pd = NULL;
1e6da2b0 530 int err = ALLOCATE_STACK (iattr, &pd);
61dd6208
SP
531 int retval = 0;
532
a1ffb40e 533 if (__glibc_unlikely (err != 0))
76a50749 534 /* Something went wrong. Maybe a parameter of the attributes is
e988dba9
JL
535 invalid or we could not allocate memory. Note we have to
536 translate error codes. */
61dd6208
SP
537 {
538 retval = err == ENOMEM ? EAGAIN : err;
539 goto out;
540 }
76a50749
UD
541
542
543 /* Initialize the TCB. All initializations with zero should be
544 performed in 'get_cached_stack'. This way we avoid doing this if
545 the stack freshly allocated with 'mmap'. */
546
d7329d4b 547#if TLS_TCB_AT_TP
76a50749 548 /* Reference to the TCB itself. */
55c11fbd 549 pd->header.self = pd;
76a50749 550
d4f64e1a 551 /* Self-reference for TLS. */
55c11fbd 552 pd->header.tcb = pd;
76a50749
UD
553#endif
554
555 /* Store the address of the start routine and the parameter. Since
556 we do not start the function directly the stillborn thread will
557 get the information from its thread descriptor. */
558 pd->start_routine = start_routine;
559 pd->arg = arg;
560
561 /* Copy the thread attribute flags. */
14ffbc83
UD
562 struct pthread *self = THREAD_SELF;
563 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
564 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
76a50749
UD
565
566 /* Initialize the field for the ID of the thread which is waiting
567 for us. This is a self-reference in case the thread is created
568 detached. */
569 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
570
571 /* The debug events are inherited from the parent. */
14ffbc83
UD
572 pd->eventbuf = self->eventbuf;
573
76a50749 574
14ffbc83
UD
575 /* Copy the parent's scheduling parameters. The flags will say what
576 is valid and what is not. */
577 pd->schedpolicy = self->schedpolicy;
578 pd->schedparam = self->schedparam;
76a50749 579
35f1e827
UD
580 /* Copy the stack guard canary. */
581#ifdef THREAD_COPY_STACK_GUARD
582 THREAD_COPY_STACK_GUARD (pd);
583#endif
584
827b7087
UD
585 /* Copy the pointer guard value. */
586#ifdef THREAD_COPY_POINTER_GUARD
587 THREAD_COPY_POINTER_GUARD (pd);
588#endif
589
32fed10f
RM
590 /* Verify the sysinfo bits were copied in allocate_stack if needed. */
591#ifdef NEED_DL_SYSINFO
592 CHECK_THREAD_SYSINFO (pd);
593#endif
594
595 /* Inform start_thread (above) about cancellation state that might
596 translate into inherited signal state. */
597 pd->parent_cancelhandling = THREAD_GETMEM (THREAD_SELF, cancelhandling);
598
14ffbc83 599 /* Determine scheduling parameters for the thread. */
61dd6208 600 if (__builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
14ffbc83 601 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
76a50749 602 {
14ffbc83
UD
603 /* Use the scheduling parameters the user provided. */
604 if (iattr->flags & ATTR_FLAG_POLICY_SET)
33cd1f74
RM
605 {
606 pd->schedpolicy = iattr->schedpolicy;
607 pd->flags |= ATTR_FLAG_POLICY_SET;
608 }
14ffbc83 609 if (iattr->flags & ATTR_FLAG_SCHED_SET)
33cd1f74
RM
610 {
611 /* The values were validated in pthread_attr_setschedparam. */
612 pd->schedparam = iattr->schedparam;
613 pd->flags |= ATTR_FLAG_SCHED_SET;
614 }
f214ff74
RM
615
616 if ((pd->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
617 != (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
618 collect_default_sched (pd);
76a50749
UD
619 }
620
621 /* Pass the descriptor to the caller. */
622 *newthread = (pthread_t) pd;
623
5acf7263
RM
624 LIBC_PROBE (pthread_create, 4, newthread, attr, start_routine, arg);
625
32fed10f
RM
626 /* One more thread. We cannot have the thread do this itself, since it
627 might exist but not have been scheduled yet by the time we've returned
628 and need to check the value to behave correctly. We must do it before
629 creating the thread, in case it does get scheduled first and then
630 might mistakenly think it was the only thread. In the failure case,
631 we momentarily store a false value; this doesn't matter because there
632 is no kosher thing a signal handler interrupting us right here can do
633 that cares whether the thread count is correct. */
634 atomic_increment (&__nptl_nthreads);
635
636 bool thread_ran = false;
637
76a50749 638 /* Start the thread. */
32fed10f
RM
639 if (__glibc_unlikely (report_thread_creation (pd)))
640 {
641 /* Create the thread. We always create the thread stopped
642 so that it does not get far before we tell the debugger. */
643 retval = create_thread (pd, iattr, true, STACK_VARIABLES_ARGS,
644 &thread_ran);
645 if (retval == 0)
646 {
647 /* create_thread should have set this so that the logic below can
648 test it. */
649 assert (pd->stopped_start);
650
651 /* Now fill in the information about the new thread in
652 the newly created thread's data structure. We cannot let
653 the new thread do this since we don't know whether it was
654 already scheduled when we send the event. */
655 pd->eventbuf.eventnum = TD_CREATE;
656 pd->eventbuf.eventdata = pd;
657
658 /* Enqueue the descriptor. */
659 do
660 pd->nextevent = __nptl_last_event;
661 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
662 pd, pd->nextevent)
663 != 0);
664
665 /* Now call the function which signals the event. */
666 __nptl_create_event ();
667 }
668 }
669 else
670 retval = create_thread (pd, iattr, false, STACK_VARIABLES_ARGS,
671 &thread_ran);
672
673 if (__glibc_unlikely (retval != 0))
674 {
675 /* If thread creation "failed", that might mean that the thread got
676 created and ran a little--short of running user code--but then
677 create_thread cancelled it. In that case, the thread will do all
678 its own cleanup just like a normal thread exit after a successful
679 creation would do. */
680
681 if (thread_ran)
682 assert (pd->stopped_start);
683 else
684 {
685 /* Oops, we lied for a second. */
686 atomic_decrement (&__nptl_nthreads);
687
688 /* Perhaps a thread wants to change the IDs and is waiting for this
689 stillborn thread. */
690 if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0)
691 == -2))
a2f0363f 692 futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
32fed10f
RM
693
694 /* Free the resources. */
695 __deallocate_stack (pd);
696 }
697
698 /* We have to translate error codes. */
699 if (retval == ENOMEM)
700 retval = EAGAIN;
701 }
702 else
703 {
704 if (pd->stopped_start)
705 /* The thread blocked on this lock either because we're doing TD_CREATE
706 event reporting, or for some other reason that create_thread chose.
707 Now let it run free. */
708 lll_unlock (pd->lock, LLL_PRIVATE);
709
710 /* We now have for sure more than one thread. The main thread might
711 not yet have the flag set. No need to set the global variable
712 again if this is what we use. */
713 THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
714 }
61dd6208
SP
715
716 out:
717 if (__glibc_unlikely (free_cpuset))
718 free (default_attr.cpuset);
719
720 return retval;
76a50749
UD
721}
722versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
723
724
725#if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
726int
727__pthread_create_2_0 (newthread, attr, start_routine, arg)
728 pthread_t *newthread;
729 const pthread_attr_t *attr;
730 void *(*start_routine) (void *);
731 void *arg;
732{
733 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
734 the old size and access to the new members might crash the program.
735 We convert the struct now. */
736 struct pthread_attr new_attr;
737
738 if (attr != NULL)
739 {
740 struct pthread_attr *iattr = (struct pthread_attr *) attr;
741 size_t ps = __getpagesize ();
742
743 /* Copy values from the user-provided attributes. */
744 new_attr.schedparam = iattr->schedparam;
745 new_attr.schedpolicy = iattr->schedpolicy;
746 new_attr.flags = iattr->flags;
747
748 /* Fill in default values for the fields not present in the old
749 implementation. */
750 new_attr.guardsize = ps;
751 new_attr.stackaddr = NULL;
752 new_attr.stacksize = 0;
ca85ede0 753 new_attr.cpuset = NULL;
76a50749
UD
754
755 /* We will pass this value on to the real implementation. */
756 attr = (pthread_attr_t *) &new_attr;
757 }
758
759 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
760}
761compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
762 GLIBC_2_0);
763#endif
7f08f55a
RM
764\f
765/* Information for libthread_db. */
766
767#include "../nptl_db/db_info.c"
b639d0c9
UD
768\f
769/* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
770 functions to be present as well. */
771PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
6c30d38f 772PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
b639d0c9
UD
773PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
774
775PTHREAD_STATIC_FN_REQUIRE (pthread_once)
ef2bb413 776PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
b639d0c9
UD
777
778PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
6c30d38f 779PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
b639d0c9
UD
780PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
781PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)