]> git.ipfire.org Git - thirdparty/glibc.git/blob - nptl/pthread_create.c
nptl: Use __pthread_getattr_default_np in pthread_create
[thirdparty/glibc.git] / nptl / pthread_create.c
1 /* Copyright (C) 2002-2020 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #include <ctype.h>
20 #include <errno.h>
21 #include <stdbool.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <stdint.h>
25 #include "pthreadP.h"
26 #include <hp-timing.h>
27 #include <ldsodefs.h>
28 #include <atomic.h>
29 #include <libc-internal.h>
30 #include <resolv.h>
31 #include <kernel-features.h>
32 #include <exit-thread.h>
33 #include <default-sched.h>
34 #include <futex-internal.h>
35 #include <tls-setup.h>
36 #include "libioP.h"
37
38 #include <shlib-compat.h>
39
40 #include <stap-probe.h>
41
42
43 /* Nozero if debugging mode is enabled. */
44 int __pthread_debug;
45
46 /* Globally enabled events. */
47 static td_thr_events_t __nptl_threads_events __attribute_used__;
48
49 /* Pointer to descriptor with the last event. */
50 static struct pthread *__nptl_last_event __attribute_used__;
51
52 /* Number of threads running. */
53 unsigned int __nptl_nthreads = 1;
54
55
56 /* Code to allocate and deallocate a stack. */
57 #include "allocatestack.c"
58
59 /* CONCURRENCY NOTES:
60
61 Understanding who is the owner of the 'struct pthread' or 'PD'
62 (refers to the value of the 'struct pthread *pd' function argument)
63 is critically important in determining exactly which operations are
64 allowed and which are not and when, particularly when it comes to the
65 implementation of pthread_create, pthread_join, pthread_detach, and
66 other functions which all operate on PD.
67
68 The owner of PD is responsible for freeing the final resources
69 associated with PD, and may examine the memory underlying PD at any
70 point in time until it frees it back to the OS or to reuse by the
71 runtime.
72
73 The thread which calls pthread_create is called the creating thread.
74 The creating thread begins as the owner of PD.
75
76 During startup the new thread may examine PD in coordination with the
77 owner thread (which may be itself).
78
79 The four cases of ownership transfer are:
80
81 (1) Ownership of PD is released to the process (all threads may use it)
82 after the new thread starts in a joinable state
83 i.e. pthread_create returns a usable pthread_t.
84
85 (2) Ownership of PD is released to the new thread starting in a detached
86 state.
87
88 (3) Ownership of PD is dynamically released to a running thread via
89 pthread_detach.
90
91 (4) Ownership of PD is acquired by the thread which calls pthread_join.
92
93 Implementation notes:
94
95 The PD->stopped_start and thread_ran variables are used to determine
96 exactly which of the four ownership states we are in and therefore
97 what actions can be taken. For example after (2) we cannot read or
98 write from PD anymore since the thread may no longer exist and the
99 memory may be unmapped.
100
101 It is important to point out that PD->lock is being used both
102 similar to a one-shot semaphore and subsequently as a mutex. The
103 lock is taken in the parent to force the child to wait, and then the
104 child releases the lock. However, this semaphore-like effect is used
105 only for synchronizing the parent and child. After startup the lock
106 is used like a mutex to create a critical section during which a
107 single owner modifies the thread parameters.
108
109 The most complicated cases happen during thread startup:
110
111 (a) If the created thread is in a detached (PTHREAD_CREATE_DETACHED),
112 or joinable (default PTHREAD_CREATE_JOINABLE) state and
113 STOPPED_START is true, then the creating thread has ownership of
114 PD until the PD->lock is released by pthread_create. If any
115 errors occur we are in states (c), (d), or (e) below.
116
117 (b) If the created thread is in a detached state
118 (PTHREAD_CREATED_DETACHED), and STOPPED_START is false, then the
119 creating thread has ownership of PD until it invokes the OS
120 kernel's thread creation routine. If this routine returns
121 without error, then the created thread owns PD; otherwise, see
122 (c) and (e) below.
123
124 (c) If the detached thread setup failed and THREAD_RAN is true, then
125 the creating thread releases ownership to the new thread by
126 sending a cancellation signal. All threads set THREAD_RAN to
127 true as quickly as possible after returning from the OS kernel's
128 thread creation routine.
129
130 (d) If the joinable thread setup failed and THREAD_RAN is true, then
131 then the creating thread retains ownership of PD and must cleanup
132 state. Ownership cannot be released to the process via the
133 return of pthread_create since a non-zero result entails PD is
134 undefined and therefore cannot be joined to free the resources.
135 We privately call pthread_join on the thread to finish handling
136 the resource shutdown (Or at least we should, see bug 19511).
137
138 (e) If the thread creation failed and THREAD_RAN is false, then the
139 creating thread retains ownership of PD and must cleanup state.
140 No waiting for the new thread is required because it never
141 started.
142
143 The nptl_db interface:
144
145 The interface with nptl_db requires that we enqueue PD into a linked
146 list and then call a function which the debugger will trap. The PD
147 will then be dequeued and control returned to the thread. The caller
148 at the time must have ownership of PD and such ownership remains
149 after control returns to thread. The enqueued PD is removed from the
150 linked list by the nptl_db callback td_thr_event_getmsg. The debugger
151 must ensure that the thread does not resume execution, otherwise
152 ownership of PD may be lost and examining PD will not be possible.
153
154 Note that the GNU Debugger as of (December 10th 2015) commit
155 c2c2a31fdb228d41ce3db62b268efea04bd39c18 no longer uses
156 td_thr_event_getmsg and several other related nptl_db interfaces. The
157 principal reason for this is that nptl_db does not support non-stop
158 mode where other threads can run concurrently and modify runtime
159 structures currently in use by the debugger and the nptl_db
160 interface.
161
162 Axioms:
163
164 * The create_thread function can never set stopped_start to false.
165 * The created thread can read stopped_start but never write to it.
166 * The variable thread_ran is set some time after the OS thread
167 creation routine returns, how much time after the thread is created
168 is unspecified, but it should be as quickly as possible.
169
170 */
171
172 /* CREATE THREAD NOTES:
173
174 createthread.c defines the create_thread function, and two macros:
175 START_THREAD_DEFN and START_THREAD_SELF (see below).
176
177 create_thread must initialize PD->stopped_start. It should be true
178 if the STOPPED_START parameter is true, or if create_thread needs the
179 new thread to synchronize at startup for some other implementation
180 reason. If STOPPED_START will be true, then create_thread is obliged
181 to lock PD->lock before starting the thread. Then pthread_create
182 unlocks PD->lock which synchronizes-with START_THREAD_DEFN in the
183 child thread which does an acquire/release of PD->lock as the last
184 action before calling the user entry point. The goal of all of this
185 is to ensure that the required initial thread attributes are applied
186 (by the creating thread) before the new thread runs user code. Note
187 that the the functions pthread_getschedparam, pthread_setschedparam,
188 pthread_setschedprio, __pthread_tpp_change_priority, and
189 __pthread_current_priority reuse the same lock, PD->lock, for a
190 similar purpose e.g. synchronizing the setting of similar thread
191 attributes. These functions are never called before the thread is
192 created, so don't participate in startup syncronization, but given
193 that the lock is present already and in the unlocked state, reusing
194 it saves space.
195
196 The return value is zero for success or an errno code for failure.
197 If the return value is ENOMEM, that will be translated to EAGAIN,
198 so create_thread need not do that. On failure, *THREAD_RAN should
199 be set to true iff the thread actually started up and then got
200 canceled before calling user code (*PD->start_routine). */
201 static int create_thread (struct pthread *pd, const struct pthread_attr *attr,
202 bool *stopped_start, STACK_VARIABLES_PARMS,
203 bool *thread_ran);
204
205 #include <createthread.c>
206
207
208 struct pthread *
209 __find_in_stack_list (struct pthread *pd)
210 {
211 list_t *entry;
212 struct pthread *result = NULL;
213
214 lll_lock (stack_cache_lock, LLL_PRIVATE);
215
216 list_for_each (entry, &stack_used)
217 {
218 struct pthread *curp;
219
220 curp = list_entry (entry, struct pthread, list);
221 if (curp == pd)
222 {
223 result = curp;
224 break;
225 }
226 }
227
228 if (result == NULL)
229 list_for_each (entry, &__stack_user)
230 {
231 struct pthread *curp;
232
233 curp = list_entry (entry, struct pthread, list);
234 if (curp == pd)
235 {
236 result = curp;
237 break;
238 }
239 }
240
241 lll_unlock (stack_cache_lock, LLL_PRIVATE);
242
243 return result;
244 }
245
246
247 /* Deallocate POSIX thread-local-storage. */
248 void
249 attribute_hidden
250 __nptl_deallocate_tsd (void)
251 {
252 struct pthread *self = THREAD_SELF;
253
254 /* Maybe no data was ever allocated. This happens often so we have
255 a flag for this. */
256 if (THREAD_GETMEM (self, specific_used))
257 {
258 size_t round;
259 size_t cnt;
260
261 round = 0;
262 do
263 {
264 size_t idx;
265
266 /* So far no new nonzero data entry. */
267 THREAD_SETMEM (self, specific_used, false);
268
269 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
270 {
271 struct pthread_key_data *level2;
272
273 level2 = THREAD_GETMEM_NC (self, specific, cnt);
274
275 if (level2 != NULL)
276 {
277 size_t inner;
278
279 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
280 ++inner, ++idx)
281 {
282 void *data = level2[inner].data;
283
284 if (data != NULL)
285 {
286 /* Always clear the data. */
287 level2[inner].data = NULL;
288
289 /* Make sure the data corresponds to a valid
290 key. This test fails if the key was
291 deallocated and also if it was
292 re-allocated. It is the user's
293 responsibility to free the memory in this
294 case. */
295 if (level2[inner].seq
296 == __pthread_keys[idx].seq
297 /* It is not necessary to register a destructor
298 function. */
299 && __pthread_keys[idx].destr != NULL)
300 /* Call the user-provided destructor. */
301 __pthread_keys[idx].destr (data);
302 }
303 }
304 }
305 else
306 idx += PTHREAD_KEY_1STLEVEL_SIZE;
307 }
308
309 if (THREAD_GETMEM (self, specific_used) == 0)
310 /* No data has been modified. */
311 goto just_free;
312 }
313 /* We only repeat the process a fixed number of times. */
314 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
315
316 /* Just clear the memory of the first block for reuse. */
317 memset (&THREAD_SELF->specific_1stblock, '\0',
318 sizeof (self->specific_1stblock));
319
320 just_free:
321 /* Free the memory for the other blocks. */
322 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
323 {
324 struct pthread_key_data *level2;
325
326 level2 = THREAD_GETMEM_NC (self, specific, cnt);
327 if (level2 != NULL)
328 {
329 /* The first block is allocated as part of the thread
330 descriptor. */
331 free (level2);
332 THREAD_SETMEM_NC (self, specific, cnt, NULL);
333 }
334 }
335
336 THREAD_SETMEM (self, specific_used, false);
337 }
338 }
339
340
341 /* Deallocate a thread's stack after optionally making sure the thread
342 descriptor is still valid. */
343 void
344 __free_tcb (struct pthread *pd)
345 {
346 /* The thread is exiting now. */
347 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
348 TERMINATED_BIT) == 0, 1))
349 {
350 /* Remove the descriptor from the list. */
351 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
352 /* Something is really wrong. The descriptor for a still
353 running thread is gone. */
354 abort ();
355
356 /* Free TPP data. */
357 if (__glibc_unlikely (pd->tpp != NULL))
358 {
359 struct priority_protection_data *tpp = pd->tpp;
360
361 pd->tpp = NULL;
362 free (tpp);
363 }
364
365 /* Queue the stack memory block for reuse and exit the process. The
366 kernel will signal via writing to the address returned by
367 QUEUE-STACK when the stack is available. */
368 __deallocate_stack (pd);
369 }
370 }
371
372 /* Local function to start thread and handle cleanup.
373 createthread.c defines the macro START_THREAD_DEFN to the
374 declaration that its create_thread function will refer to, and
375 START_THREAD_SELF to the expression to optimally deliver the new
376 thread's THREAD_SELF value. */
377 START_THREAD_DEFN
378 {
379 struct pthread *pd = START_THREAD_SELF;
380
381 /* Initialize resolver state pointer. */
382 __resp = &pd->res;
383
384 /* Initialize pointers to locale data. */
385 __ctype_init ();
386
387 #ifndef __ASSUME_SET_ROBUST_LIST
388 if (__set_robust_list_avail >= 0)
389 #endif
390 {
391 /* This call should never fail because the initial call in init.c
392 succeeded. */
393 INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head,
394 sizeof (struct robust_list_head));
395 }
396
397 /* This is where the try/finally block should be created. For
398 compilers without that support we do use setjmp. */
399 struct pthread_unwind_buf unwind_buf;
400
401 int not_first_call;
402 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
403
404 /* No previous handlers. NB: This must be done after setjmp since the
405 private space in the unwind jump buffer may overlap space used by
406 setjmp to store extra architecture-specific information which is
407 never used by the cancellation-specific __libc_unwind_longjmp.
408
409 The private space is allowed to overlap because the unwinder never
410 has to return through any of the jumped-to call frames, and thus
411 only a minimum amount of saved data need be stored, and for example,
412 need not include the process signal mask information. This is all
413 an optimization to reduce stack usage when pushing cancellation
414 handlers. */
415 unwind_buf.priv.data.prev = NULL;
416 unwind_buf.priv.data.cleanup = NULL;
417
418 __libc_signal_restore_set (&pd->sigmask);
419
420 /* Allow setxid from now onwards. */
421 if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
422 futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
423
424 if (__glibc_likely (! not_first_call))
425 {
426 /* Store the new cleanup handler info. */
427 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
428
429 /* We are either in (a) or (b), and in either case we either own
430 PD already (2) or are about to own PD (1), and so our only
431 restriction would be that we can't free PD until we know we
432 have ownership (see CONCURRENCY NOTES above). */
433 if (__glibc_unlikely (pd->stopped_start))
434 {
435 int oldtype = CANCEL_ASYNC ();
436
437 /* Get the lock the parent locked to force synchronization. */
438 lll_lock (pd->lock, LLL_PRIVATE);
439
440 /* We have ownership of PD now. */
441
442 /* And give it up right away. */
443 lll_unlock (pd->lock, LLL_PRIVATE);
444
445 CANCEL_RESET (oldtype);
446 }
447
448 LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg);
449
450 /* Run the code the user provided. */
451 void *ret;
452 if (pd->c11)
453 {
454 /* The function pointer of the c11 thread start is cast to an incorrect
455 type on __pthread_create_2_1 call, however it is casted back to correct
456 one so the call behavior is well-defined (it is assumed that pointers
457 to void are able to represent all values of int. */
458 int (*start)(void*) = (int (*) (void*)) pd->start_routine;
459 ret = (void*) (uintptr_t) start (pd->arg);
460 }
461 else
462 ret = pd->start_routine (pd->arg);
463 THREAD_SETMEM (pd, result, ret);
464 }
465
466 /* Call destructors for the thread_local TLS variables. */
467 #ifndef SHARED
468 if (&__call_tls_dtors != NULL)
469 #endif
470 __call_tls_dtors ();
471
472 /* Run the destructor for the thread-local data. */
473 __nptl_deallocate_tsd ();
474
475 /* Clean up any state libc stored in thread-local variables. */
476 __libc_thread_freeres ();
477
478 /* If this is the last thread we terminate the process now. We
479 do not notify the debugger, it might just irritate it if there
480 is no thread left. */
481 if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads)))
482 /* This was the last thread. */
483 exit (0);
484
485 /* Report the death of the thread if this is wanted. */
486 if (__glibc_unlikely (pd->report_events))
487 {
488 /* See whether TD_DEATH is in any of the mask. */
489 const int idx = __td_eventword (TD_DEATH);
490 const uint32_t mask = __td_eventmask (TD_DEATH);
491
492 if ((mask & (__nptl_threads_events.event_bits[idx]
493 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
494 {
495 /* Yep, we have to signal the death. Add the descriptor to
496 the list but only if it is not already on it. */
497 if (pd->nextevent == NULL)
498 {
499 pd->eventbuf.eventnum = TD_DEATH;
500 pd->eventbuf.eventdata = pd;
501
502 do
503 pd->nextevent = __nptl_last_event;
504 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
505 pd, pd->nextevent));
506 }
507
508 /* Now call the function which signals the event. See
509 CONCURRENCY NOTES for the nptl_db interface comments. */
510 __nptl_death_event ();
511 }
512 }
513
514 /* The thread is exiting now. Don't set this bit until after we've hit
515 the event-reporting breakpoint, so that td_thr_get_info on us while at
516 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
517 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
518
519 #ifndef __ASSUME_SET_ROBUST_LIST
520 /* If this thread has any robust mutexes locked, handle them now. */
521 # if __PTHREAD_MUTEX_HAVE_PREV
522 void *robust = pd->robust_head.list;
523 # else
524 __pthread_slist_t *robust = pd->robust_list.__next;
525 # endif
526 /* We let the kernel do the notification if it is able to do so.
527 If we have to do it here there for sure are no PI mutexes involved
528 since the kernel support for them is even more recent. */
529 if (__set_robust_list_avail < 0
530 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
531 {
532 do
533 {
534 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
535 ((char *) robust - offsetof (struct __pthread_mutex_s,
536 __list.__next));
537 robust = *((void **) robust);
538
539 # if __PTHREAD_MUTEX_HAVE_PREV
540 this->__list.__prev = NULL;
541 # endif
542 this->__list.__next = NULL;
543
544 atomic_or (&this->__lock, FUTEX_OWNER_DIED);
545 futex_wake ((unsigned int *) &this->__lock, 1,
546 /* XYZ */ FUTEX_SHARED);
547 }
548 while (robust != (void *) &pd->robust_head);
549 }
550 #endif
551
552 advise_stack_range (pd->stackblock, pd->stackblock_size, (uintptr_t) pd,
553 pd->guardsize);
554
555 if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK))
556 {
557 /* Some other thread might call any of the setXid functions and expect
558 us to reply. In this case wait until we did that. */
559 do
560 /* XXX This differs from the typical futex_wait_simple pattern in that
561 the futex_wait condition (setxid_futex) is different from the
562 condition used in the surrounding loop (cancelhandling). We need
563 to check and document why this is correct. */
564 futex_wait_simple (&pd->setxid_futex, 0, FUTEX_PRIVATE);
565 while (pd->cancelhandling & SETXID_BITMASK);
566
567 /* Reset the value so that the stack can be reused. */
568 pd->setxid_futex = 0;
569 }
570
571 /* If the thread is detached free the TCB. */
572 if (IS_DETACHED (pd))
573 /* Free the TCB. */
574 __free_tcb (pd);
575
576 /* We cannot call '_exit' here. '_exit' will terminate the process.
577
578 The 'exit' implementation in the kernel will signal when the
579 process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID
580 flag. The 'tid' field in the TCB will be set to zero.
581
582 The exit code is zero since in case all threads exit by calling
583 'pthread_exit' the exit status must be 0 (zero). */
584 __exit_thread ();
585
586 /* NOTREACHED */
587 }
588
589
590 /* Return true iff obliged to report TD_CREATE events. */
591 static bool
592 report_thread_creation (struct pthread *pd)
593 {
594 if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF, report_events)))
595 {
596 /* The parent thread is supposed to report events.
597 Check whether the TD_CREATE event is needed, too. */
598 const size_t idx = __td_eventword (TD_CREATE);
599 const uint32_t mask = __td_eventmask (TD_CREATE);
600
601 return ((mask & (__nptl_threads_events.event_bits[idx]
602 | pd->eventbuf.eventmask.event_bits[idx])) != 0);
603 }
604 return false;
605 }
606
607
608 int
609 __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr,
610 void *(*start_routine) (void *), void *arg)
611 {
612 STACK_VARIABLES;
613
614 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
615 struct pthread_attr default_attr;
616 bool destroy_default_attr = false;
617 bool c11 = (attr == ATTR_C11_THREAD);
618 if (iattr == NULL || c11)
619 {
620 int ret = __pthread_getattr_default_np ((pthread_attr_t *) &default_attr);
621 if (ret != 0)
622 return ret;
623 destroy_default_attr = true;
624 iattr = &default_attr;
625 }
626
627 struct pthread *pd = NULL;
628 int err = ALLOCATE_STACK (iattr, &pd);
629 int retval = 0;
630
631 if (__glibc_unlikely (err != 0))
632 /* Something went wrong. Maybe a parameter of the attributes is
633 invalid or we could not allocate memory. Note we have to
634 translate error codes. */
635 {
636 retval = err == ENOMEM ? EAGAIN : err;
637 goto out;
638 }
639
640
641 /* Initialize the TCB. All initializations with zero should be
642 performed in 'get_cached_stack'. This way we avoid doing this if
643 the stack freshly allocated with 'mmap'. */
644
645 #if TLS_TCB_AT_TP
646 /* Reference to the TCB itself. */
647 pd->header.self = pd;
648
649 /* Self-reference for TLS. */
650 pd->header.tcb = pd;
651 #endif
652
653 /* Store the address of the start routine and the parameter. Since
654 we do not start the function directly the stillborn thread will
655 get the information from its thread descriptor. */
656 pd->start_routine = start_routine;
657 pd->arg = arg;
658 pd->c11 = c11;
659
660 /* Copy the thread attribute flags. */
661 struct pthread *self = THREAD_SELF;
662 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
663 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
664
665 /* Initialize the field for the ID of the thread which is waiting
666 for us. This is a self-reference in case the thread is created
667 detached. */
668 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
669
670 /* The debug events are inherited from the parent. */
671 pd->eventbuf = self->eventbuf;
672
673
674 /* Copy the parent's scheduling parameters. The flags will say what
675 is valid and what is not. */
676 pd->schedpolicy = self->schedpolicy;
677 pd->schedparam = self->schedparam;
678
679 /* Copy the stack guard canary. */
680 #ifdef THREAD_COPY_STACK_GUARD
681 THREAD_COPY_STACK_GUARD (pd);
682 #endif
683
684 /* Copy the pointer guard value. */
685 #ifdef THREAD_COPY_POINTER_GUARD
686 THREAD_COPY_POINTER_GUARD (pd);
687 #endif
688
689 /* Setup tcbhead. */
690 tls_setup_tcbhead (pd);
691
692 /* Verify the sysinfo bits were copied in allocate_stack if needed. */
693 #ifdef NEED_DL_SYSINFO
694 CHECK_THREAD_SYSINFO (pd);
695 #endif
696
697 /* Determine scheduling parameters for the thread. */
698 if (__builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
699 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
700 {
701 /* Use the scheduling parameters the user provided. */
702 if (iattr->flags & ATTR_FLAG_POLICY_SET)
703 {
704 pd->schedpolicy = iattr->schedpolicy;
705 pd->flags |= ATTR_FLAG_POLICY_SET;
706 }
707 if (iattr->flags & ATTR_FLAG_SCHED_SET)
708 {
709 /* The values were validated in pthread_attr_setschedparam. */
710 pd->schedparam = iattr->schedparam;
711 pd->flags |= ATTR_FLAG_SCHED_SET;
712 }
713
714 if ((pd->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
715 != (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
716 collect_default_sched (pd);
717 }
718
719 if (__glibc_unlikely (__nptl_nthreads == 1))
720 _IO_enable_locks ();
721
722 /* Pass the descriptor to the caller. */
723 *newthread = (pthread_t) pd;
724
725 LIBC_PROBE (pthread_create, 4, newthread, attr, start_routine, arg);
726
727 /* One more thread. We cannot have the thread do this itself, since it
728 might exist but not have been scheduled yet by the time we've returned
729 and need to check the value to behave correctly. We must do it before
730 creating the thread, in case it does get scheduled first and then
731 might mistakenly think it was the only thread. In the failure case,
732 we momentarily store a false value; this doesn't matter because there
733 is no kosher thing a signal handler interrupting us right here can do
734 that cares whether the thread count is correct. */
735 atomic_increment (&__nptl_nthreads);
736
737 /* Our local value of stopped_start and thread_ran can be accessed at
738 any time. The PD->stopped_start may only be accessed if we have
739 ownership of PD (see CONCURRENCY NOTES above). */
740 bool stopped_start = false; bool thread_ran = false;
741
742 /* Block all signals, so that the new thread starts out with
743 signals disabled. This avoids race conditions in the thread
744 startup. */
745 sigset_t original_sigmask;
746 __libc_signal_block_all (&original_sigmask);
747
748 /* Conceptually, the new thread needs to inherit the signal mask of
749 this thread. Therefore, it needs to restore the saved signal
750 mask of this thread, so save it in the startup information. */
751 pd->sigmask = original_sigmask;
752
753 /* Reset the cancellation signal mask in case this thread is running
754 cancellation. */
755 __sigdelset (&pd->sigmask, SIGCANCEL);
756
757 /* Start the thread. */
758 if (__glibc_unlikely (report_thread_creation (pd)))
759 {
760 stopped_start = true;
761
762 /* We always create the thread stopped at startup so we can
763 notify the debugger. */
764 retval = create_thread (pd, iattr, &stopped_start,
765 STACK_VARIABLES_ARGS, &thread_ran);
766 if (retval == 0)
767 {
768 /* We retain ownership of PD until (a) (see CONCURRENCY NOTES
769 above). */
770
771 /* Assert stopped_start is true in both our local copy and the
772 PD copy. */
773 assert (stopped_start);
774 assert (pd->stopped_start);
775
776 /* Now fill in the information about the new thread in
777 the newly created thread's data structure. We cannot let
778 the new thread do this since we don't know whether it was
779 already scheduled when we send the event. */
780 pd->eventbuf.eventnum = TD_CREATE;
781 pd->eventbuf.eventdata = pd;
782
783 /* Enqueue the descriptor. */
784 do
785 pd->nextevent = __nptl_last_event;
786 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
787 pd, pd->nextevent)
788 != 0);
789
790 /* Now call the function which signals the event. See
791 CONCURRENCY NOTES for the nptl_db interface comments. */
792 __nptl_create_event ();
793 }
794 }
795 else
796 retval = create_thread (pd, iattr, &stopped_start,
797 STACK_VARIABLES_ARGS, &thread_ran);
798
799 /* Return to the previous signal mask, after creating the new
800 thread. */
801 __libc_signal_restore_set (&original_sigmask);
802
803 if (__glibc_unlikely (retval != 0))
804 {
805 if (thread_ran)
806 /* State (c) or (d) and we may not have PD ownership (see
807 CONCURRENCY NOTES above). We can assert that STOPPED_START
808 must have been true because thread creation didn't fail, but
809 thread attribute setting did. */
810 /* See bug 19511 which explains why doing nothing here is a
811 resource leak for a joinable thread. */
812 assert (stopped_start);
813 else
814 {
815 /* State (e) and we have ownership of PD (see CONCURRENCY
816 NOTES above). */
817
818 /* Oops, we lied for a second. */
819 atomic_decrement (&__nptl_nthreads);
820
821 /* Perhaps a thread wants to change the IDs and is waiting for this
822 stillborn thread. */
823 if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0)
824 == -2))
825 futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
826
827 /* Free the resources. */
828 __deallocate_stack (pd);
829 }
830
831 /* We have to translate error codes. */
832 if (retval == ENOMEM)
833 retval = EAGAIN;
834 }
835 else
836 {
837 /* We don't know if we have PD ownership. Once we check the local
838 stopped_start we'll know if we're in state (a) or (b) (see
839 CONCURRENCY NOTES above). */
840 if (stopped_start)
841 /* State (a), we own PD. The thread blocked on this lock either
842 because we're doing TD_CREATE event reporting, or for some
843 other reason that create_thread chose. Now let it run
844 free. */
845 lll_unlock (pd->lock, LLL_PRIVATE);
846
847 /* We now have for sure more than one thread. The main thread might
848 not yet have the flag set. No need to set the global variable
849 again if this is what we use. */
850 THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
851 }
852
853 out:
854 if (destroy_default_attr)
855 __pthread_attr_destroy ((pthread_attr_t *) &default_attr);
856
857 return retval;
858 }
859 versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
860
861
862 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
863 int
864 __pthread_create_2_0 (pthread_t *newthread, const pthread_attr_t *attr,
865 void *(*start_routine) (void *), void *arg)
866 {
867 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
868 the old size and access to the new members might crash the program.
869 We convert the struct now. */
870 struct pthread_attr new_attr;
871
872 if (attr != NULL)
873 {
874 struct pthread_attr *iattr = (struct pthread_attr *) attr;
875 size_t ps = __getpagesize ();
876
877 /* Copy values from the user-provided attributes. */
878 new_attr.schedparam = iattr->schedparam;
879 new_attr.schedpolicy = iattr->schedpolicy;
880 new_attr.flags = iattr->flags;
881
882 /* Fill in default values for the fields not present in the old
883 implementation. */
884 new_attr.guardsize = ps;
885 new_attr.stackaddr = NULL;
886 new_attr.stacksize = 0;
887 new_attr.cpuset = NULL;
888
889 /* We will pass this value on to the real implementation. */
890 attr = (pthread_attr_t *) &new_attr;
891 }
892
893 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
894 }
895 compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
896 GLIBC_2_0);
897 #endif
898 \f
899 /* Information for libthread_db. */
900
901 #include "../nptl_db/db_info.c"
902 \f
903 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
904 functions to be present as well. */
905 PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_lock)
906 PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_trylock)
907 PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_unlock)
908
909 PTHREAD_STATIC_FN_REQUIRE (__pthread_once)
910 PTHREAD_STATIC_FN_REQUIRE (__pthread_cancel)
911
912 PTHREAD_STATIC_FN_REQUIRE (__pthread_key_create)
913 PTHREAD_STATIC_FN_REQUIRE (__pthread_key_delete)
914 PTHREAD_STATIC_FN_REQUIRE (__pthread_setspecific)
915 PTHREAD_STATIC_FN_REQUIRE (__pthread_getspecific)