]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_create.c
Clean up __exit_thread.
[thirdparty/glibc.git] / nptl / pthread_create.c
CommitLineData
d4697bc9 1/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
76a50749 18
fd5bdc09 19#include <ctype.h>
76a50749
UD
20#include <errno.h>
21#include <stdbool.h>
22#include <stdlib.h>
23#include <string.h>
e054f494 24#include <stdint.h>
76a50749
UD
25#include "pthreadP.h"
26#include <hp-timing.h>
27#include <ldsodefs.h>
3e4fc359 28#include <atomic.h>
12d7ca07 29#include <libc-internal.h>
0e9d6240 30#include <resolv.h>
f8de5057 31#include <kernel-features.h>
e0db6517 32#include <exit-thread.h>
76a50749
UD
33
34#include <shlib-compat.h>
35
3a097cc7
RM
36#include <stap-probe.h>
37
76a50749
UD
38
39/* Local function to start thread and handle cleanup. */
40static int start_thread (void *arg);
76a50749
UD
41
42
43/* Nozero if debugging mode is enabled. */
44int __pthread_debug;
45
46/* Globally enabled events. */
e965d514 47static td_thr_events_t __nptl_threads_events __attribute_used__;
76a50749
UD
48
49/* Pointer to descriptor with the last event. */
e965d514 50static struct pthread *__nptl_last_event __attribute_used__;
76a50749 51
47202270
UD
52/* Number of threads running. */
53unsigned int __nptl_nthreads = 1;
54
76a50749
UD
55
56/* Code to allocate and deallocate a stack. */
76a50749
UD
57#include "allocatestack.c"
58
59/* Code to create the thread. */
8dea90aa 60#include <createthread.c>
76a50749
UD
61
62
76a50749 63struct pthread *
90491dc4 64internal_function
76a50749
UD
65__find_in_stack_list (pd)
66 struct pthread *pd;
67{
68 list_t *entry;
69 struct pthread *result = NULL;
70
e51deae7 71 lll_lock (stack_cache_lock, LLL_PRIVATE);
76a50749
UD
72
73 list_for_each (entry, &stack_used)
74 {
75 struct pthread *curp;
76
d4f64e1a 77 curp = list_entry (entry, struct pthread, list);
76a50749
UD
78 if (curp == pd)
79 {
80 result = curp;
81 break;
82 }
83 }
84
85 if (result == NULL)
86 list_for_each (entry, &__stack_user)
87 {
88 struct pthread *curp;
89
d4f64e1a 90 curp = list_entry (entry, struct pthread, list);
76a50749
UD
91 if (curp == pd)
92 {
93 result = curp;
94 break;
95 }
96 }
97
e51deae7 98 lll_unlock (stack_cache_lock, LLL_PRIVATE);
76a50749
UD
99
100 return result;
101}
102
103
104/* Deallocate POSIX thread-local-storage. */
3fa21fd8
UD
105void
106attribute_hidden
107__nptl_deallocate_tsd (void)
76a50749 108{
877e51b2
UD
109 struct pthread *self = THREAD_SELF;
110
76a50749
UD
111 /* Maybe no data was ever allocated. This happens often so we have
112 a flag for this. */
877e51b2 113 if (THREAD_GETMEM (self, specific_used))
76a50749
UD
114 {
115 size_t round;
6b4686a5 116 size_t cnt;
76a50749 117
6b4686a5
UD
118 round = 0;
119 do
76a50749 120 {
76a50749
UD
121 size_t idx;
122
c5acd3d7 123 /* So far no new nonzero data entry. */
877e51b2 124 THREAD_SETMEM (self, specific_used, false);
c5acd3d7 125
76a50749 126 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
6b4686a5
UD
127 {
128 struct pthread_key_data *level2;
129
877e51b2 130 level2 = THREAD_GETMEM_NC (self, specific, cnt);
6b4686a5
UD
131
132 if (level2 != NULL)
133 {
134 size_t inner;
135
136 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
137 ++inner, ++idx)
138 {
139 void *data = level2[inner].data;
140
877e51b2
UD
141 if (data != NULL)
142 {
143 /* Always clear the data. */
144 level2[inner].data = NULL;
145
6b4686a5
UD
146 /* Make sure the data corresponds to a valid
147 key. This test fails if the key was
148 deallocated and also if it was
149 re-allocated. It is the user's
150 responsibility to free the memory in this
151 case. */
877e51b2
UD
152 if (level2[inner].seq
153 == __pthread_keys[idx].seq
154 /* It is not necessary to register a destructor
155 function. */
156 && __pthread_keys[idx].destr != NULL)
157 /* Call the user-provided destructor. */
158 __pthread_keys[idx].destr (data);
6b4686a5
UD
159 }
160 }
161 }
162 else
163 idx += PTHREAD_KEY_1STLEVEL_SIZE;
164 }
877e51b2
UD
165
166 if (THREAD_GETMEM (self, specific_used) == 0)
167 /* No data has been modified. */
168 goto just_free;
76a50749 169 }
877e51b2
UD
170 /* We only repeat the process a fixed number of times. */
171 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
76a50749 172
877e51b2
UD
173 /* Just clear the memory of the first block for reuse. */
174 memset (&THREAD_SELF->specific_1stblock, '\0',
175 sizeof (self->specific_1stblock));
6b4686a5 176
877e51b2 177 just_free:
6b4686a5
UD
178 /* Free the memory for the other blocks. */
179 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
180 {
181 struct pthread_key_data *level2;
182
877e51b2 183 level2 = THREAD_GETMEM_NC (self, specific, cnt);
6b4686a5
UD
184 if (level2 != NULL)
185 {
186 /* The first block is allocated as part of the thread
187 descriptor. */
188 free (level2);
877e51b2 189 THREAD_SETMEM_NC (self, specific, cnt, NULL);
6b4686a5
UD
190 }
191 }
192
877e51b2 193 THREAD_SETMEM (self, specific_used, false);
76a50749
UD
194 }
195}
196
197
198/* Deallocate a thread's stack after optionally making sure the thread
199 descriptor is still valid. */
200void
90491dc4 201internal_function
76a50749
UD
202__free_tcb (struct pthread *pd)
203{
204 /* The thread is exiting now. */
ba25bb0f
UD
205 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
206 TERMINATED_BIT) == 0, 1))
76a50749
UD
207 {
208 /* Remove the descriptor from the list. */
209 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
210 /* Something is really wrong. The descriptor for a still
211 running thread is gone. */
212 abort ();
213
f17efcb4 214 /* Free TPP data. */
a1ffb40e 215 if (__glibc_unlikely (pd->tpp != NULL))
f17efcb4
UD
216 {
217 struct priority_protection_data *tpp = pd->tpp;
218
219 pd->tpp = NULL;
220 free (tpp);
221 }
222
76a50749
UD
223 /* Queue the stack memory block for reuse and exit the process. The
224 kernel will signal via writing to the address returned by
225 QUEUE-STACK when the stack is available. */
226 __deallocate_stack (pd);
227 }
228}
229
230
231static int
232start_thread (void *arg)
233{
234 struct pthread *pd = (struct pthread *) arg;
235
236#if HP_TIMING_AVAIL
237 /* Remember the time when the thread was started. */
238 hp_timing_t now;
239 HP_TIMING_NOW (now);
240 THREAD_SETMEM (pd, cpuclock_offset, now);
241#endif
242
0e9d6240
UD
243 /* Initialize resolver state pointer. */
244 __resp = &pd->res;
245
fd5bdc09
UD
246 /* Initialize pointers to locale data. */
247 __ctype_init ();
248
66f1b8ee 249 /* Allow setxid from now onwards. */
a1ffb40e 250 if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
66f1b8ee
UD
251 lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);
252
0f6699ea
UD
253#ifdef __NR_set_robust_list
254# ifndef __ASSUME_SET_ROBUST_LIST
255 if (__set_robust_list_avail >= 0)
256# endif
257 {
258 INTERNAL_SYSCALL_DECL (err);
259 /* This call should never fail because the initial call in init.c
260 succeeded. */
261 INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
262 sizeof (struct robust_list_head));
263 }
264#endif
265
b051fc44
UD
266 /* If the parent was running cancellation handlers while creating
267 the thread the new thread inherited the signal mask. Reset the
268 cancellation signal mask. */
a1ffb40e 269 if (__glibc_unlikely (pd->parent_cancelhandling & CANCELING_BITMASK))
b051fc44
UD
270 {
271 INTERNAL_SYSCALL_DECL (err);
272 sigset_t mask;
273 __sigemptyset (&mask);
274 __sigaddset (&mask, SIGCANCEL);
275 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
276 NULL, _NSIG / 8);
277 }
278
76a50749
UD
279 /* This is where the try/finally block should be created. For
280 compilers without that support we do use setjmp. */
877e51b2
UD
281 struct pthread_unwind_buf unwind_buf;
282
283 /* No previous handlers. */
284 unwind_buf.priv.data.prev = NULL;
285 unwind_buf.priv.data.cleanup = NULL;
286
287 int not_first_call;
288 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
a1ffb40e 289 if (__glibc_likely (! not_first_call))
76a50749 290 {
877e51b2
UD
291 /* Store the new cleanup handler info. */
292 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
293
a1ffb40e 294 if (__glibc_unlikely (pd->stopped_start))
5f66b766
UD
295 {
296 int oldtype = CANCEL_ASYNC ();
362038b0 297
5f66b766 298 /* Get the lock the parent locked to force synchronization. */
e51deae7 299 lll_lock (pd->lock, LLL_PRIVATE);
5f66b766 300 /* And give it up right away. */
e51deae7 301 lll_unlock (pd->lock, LLL_PRIVATE);
362038b0 302
5f66b766
UD
303 CANCEL_RESET (oldtype);
304 }
362038b0 305
3a097cc7
RM
306 LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg);
307
76a50749 308 /* Run the code the user provided. */
42c8f44c
UD
309#ifdef CALL_THREAD_FCT
310 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
311#else
cc775edf 312 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
42c8f44c 313#endif
76a50749
UD
314 }
315
ba384f6e 316 /* Call destructors for the thread_local TLS variables. */
e57b0c61
RM
317#ifndef SHARED
318 if (&__call_tls_dtors != NULL)
319#endif
320 __call_tls_dtors ();
ba384f6e 321
6b4686a5 322 /* Run the destructor for the thread-local data. */
3fa21fd8 323 __nptl_deallocate_tsd ();
6b4686a5 324
12d7ca07
RM
325 /* Clean up any state libc stored in thread-local variables. */
326 __libc_thread_freeres ();
76a50749 327
47202270
UD
328 /* If this is the last thread we terminate the process now. We
329 do not notify the debugger, it might just irritate it if there
330 is no thread left. */
a1ffb40e 331 if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads)))
47202270
UD
332 /* This was the last thread. */
333 exit (0);
334
76a50749 335 /* Report the death of the thread if this is wanted. */
a1ffb40e 336 if (__glibc_unlikely (pd->report_events))
76a50749
UD
337 {
338 /* See whether TD_DEATH is in any of the mask. */
339 const int idx = __td_eventword (TD_DEATH);
340 const uint32_t mask = __td_eventmask (TD_DEATH);
341
342 if ((mask & (__nptl_threads_events.event_bits[idx]
343 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
344 {
345 /* Yep, we have to signal the death. Add the descriptor to
346 the list but only if it is not already on it. */
347 if (pd->nextevent == NULL)
348 {
349 pd->eventbuf.eventnum = TD_DEATH;
350 pd->eventbuf.eventdata = pd;
351
352 do
353 pd->nextevent = __nptl_last_event;
5a3ab2fc
UD
354 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
355 pd, pd->nextevent));
76a50749
UD
356 }
357
358 /* Now call the function to signal the event. */
359 __nptl_death_event ();
360 }
361 }
362
6461e577
RM
363 /* The thread is exiting now. Don't set this bit until after we've hit
364 the event-reporting breakpoint, so that td_thr_get_info on us while at
365 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
366 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
76a50749 367
0f6699ea 368#ifndef __ASSUME_SET_ROBUST_LIST
1bcfb5a5 369 /* If this thread has any robust mutexes locked, handle them now. */
c252ec15 370# ifdef __PTHREAD_MUTEX_HAVE_PREV
0f6699ea
UD
371 void *robust = pd->robust_head.list;
372# else
b007ce7c 373 __pthread_slist_t *robust = pd->robust_list.__next;
0f6699ea 374# endif
df47504c
UD
375 /* We let the kernel do the notification if it is able to do so.
376 If we have to do it here there for sure are no PI mutexes involved
377 since the kernel support for them is even more recent. */
0f6699ea 378 if (__set_robust_list_avail < 0
df47504c 379 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
1bcfb5a5
UD
380 {
381 do
382 {
b007ce7c 383 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
0f6699ea
UD
384 ((char *) robust - offsetof (struct __pthread_mutex_s,
385 __list.__next));
386 robust = *((void **) robust);
d804f5df 387
0f6699ea 388# ifdef __PTHREAD_MUTEX_HAVE_PREV
b007ce7c 389 this->__list.__prev = NULL;
0f6699ea
UD
390# endif
391 this->__list.__next = NULL;
1bcfb5a5 392
e51deae7 393 lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
1bcfb5a5 394 }
df47504c 395 while (robust != (void *) &pd->robust_head);
1bcfb5a5 396 }
0f6699ea 397#endif
1bcfb5a5 398
b42a214c
UD
399 /* Mark the memory of the stack as usable to the kernel. We free
400 everything except for the space used for the TCB itself. */
401 size_t pagesize_m1 = __getpagesize () - 1;
402#ifdef _STACK_GROWS_DOWN
403 char *sp = CURRENT_STACK_FRAME;
404 size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1;
405#else
406# error "to do"
407#endif
408 assert (freesize < pd->stackblock_size);
409 if (freesize > PTHREAD_STACK_MIN)
9043e228 410 __madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
b42a214c 411
76a50749
UD
412 /* If the thread is detached free the TCB. */
413 if (IS_DETACHED (pd))
414 /* Free the TCB. */
415 __free_tcb (pd);
a1ffb40e 416 else if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK))
dff9a7a1
UD
417 {
418 /* Some other thread might call any of the setXid functions and expect
419 us to reply. In this case wait until we did that. */
420 do
085a4412 421 lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE);
dff9a7a1
UD
422 while (pd->cancelhandling & SETXID_BITMASK);
423
424 /* Reset the value so that the stack can be reused. */
425 pd->setxid_futex = 0;
426 }
76a50749
UD
427
428 /* We cannot call '_exit' here. '_exit' will terminate the process.
429
430 The 'exit' implementation in the kernel will signal when the
adcdc775 431 process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID
76a50749
UD
432 flag. The 'tid' field in the TCB will be set to zero.
433
434 The exit code is zero since in case all threads exit by calling
435 'pthread_exit' the exit status must be 0 (zero). */
e0db6517 436 __exit_thread ();
76a50749
UD
437
438 /* NOTREACHED */
439 return 0;
440}
441
442
76a50749
UD
443int
444__pthread_create_2_1 (newthread, attr, start_routine, arg)
445 pthread_t *newthread;
446 const pthread_attr_t *attr;
447 void *(*start_routine) (void *);
448 void *arg;
449{
450 STACK_VARIABLES;
76a50749 451
1e6da2b0 452 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
61dd6208
SP
453 struct pthread_attr default_attr;
454 bool free_cpuset = false;
76a50749 455 if (iattr == NULL)
61dd6208
SP
456 {
457 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
458 default_attr = __default_pthread_attr;
459 size_t cpusetsize = default_attr.cpusetsize;
460 if (cpusetsize > 0)
461 {
462 cpu_set_t *cpuset;
463 if (__glibc_likely (__libc_use_alloca (cpusetsize)))
464 cpuset = __alloca (cpusetsize);
465 else
466 {
467 cpuset = malloc (cpusetsize);
468 if (cpuset == NULL)
469 {
470 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
471 return ENOMEM;
472 }
473 free_cpuset = true;
474 }
475 memcpy (cpuset, default_attr.cpuset, cpusetsize);
476 default_attr.cpuset = cpuset;
477 }
478 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
479 iattr = &default_attr;
480 }
76a50749 481
dff9a7a1 482 struct pthread *pd = NULL;
1e6da2b0 483 int err = ALLOCATE_STACK (iattr, &pd);
61dd6208
SP
484 int retval = 0;
485
a1ffb40e 486 if (__glibc_unlikely (err != 0))
76a50749 487 /* Something went wrong. Maybe a parameter of the attributes is
e988dba9
JL
488 invalid or we could not allocate memory. Note we have to
489 translate error codes. */
61dd6208
SP
490 {
491 retval = err == ENOMEM ? EAGAIN : err;
492 goto out;
493 }
76a50749
UD
494
495
496 /* Initialize the TCB. All initializations with zero should be
497 performed in 'get_cached_stack'. This way we avoid doing this if
498 the stack freshly allocated with 'mmap'. */
499
d7329d4b 500#if TLS_TCB_AT_TP
76a50749 501 /* Reference to the TCB itself. */
55c11fbd 502 pd->header.self = pd;
76a50749 503
d4f64e1a 504 /* Self-reference for TLS. */
55c11fbd 505 pd->header.tcb = pd;
76a50749
UD
506#endif
507
508 /* Store the address of the start routine and the parameter. Since
509 we do not start the function directly the stillborn thread will
510 get the information from its thread descriptor. */
511 pd->start_routine = start_routine;
512 pd->arg = arg;
513
514 /* Copy the thread attribute flags. */
14ffbc83
UD
515 struct pthread *self = THREAD_SELF;
516 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
517 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
76a50749
UD
518
519 /* Initialize the field for the ID of the thread which is waiting
520 for us. This is a self-reference in case the thread is created
521 detached. */
522 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
523
524 /* The debug events are inherited from the parent. */
14ffbc83
UD
525 pd->eventbuf = self->eventbuf;
526
76a50749 527
14ffbc83
UD
528 /* Copy the parent's scheduling parameters. The flags will say what
529 is valid and what is not. */
530 pd->schedpolicy = self->schedpolicy;
531 pd->schedparam = self->schedparam;
76a50749 532
35f1e827
UD
533 /* Copy the stack guard canary. */
534#ifdef THREAD_COPY_STACK_GUARD
535 THREAD_COPY_STACK_GUARD (pd);
536#endif
537
827b7087
UD
538 /* Copy the pointer guard value. */
539#ifdef THREAD_COPY_POINTER_GUARD
540 THREAD_COPY_POINTER_GUARD (pd);
541#endif
542
14ffbc83 543 /* Determine scheduling parameters for the thread. */
61dd6208 544 if (__builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
14ffbc83 545 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
76a50749 546 {
1e6da2b0 547 INTERNAL_SYSCALL_DECL (scerr);
14ffbc83
UD
548
549 /* Use the scheduling parameters the user provided. */
550 if (iattr->flags & ATTR_FLAG_POLICY_SET)
551 pd->schedpolicy = iattr->schedpolicy;
552 else if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
76a50749 553 {
1e6da2b0 554 pd->schedpolicy = INTERNAL_SYSCALL (sched_getscheduler, scerr, 1, 0);
14ffbc83 555 pd->flags |= ATTR_FLAG_POLICY_SET;
76a50749 556 }
14ffbc83
UD
557
558 if (iattr->flags & ATTR_FLAG_SCHED_SET)
559 memcpy (&pd->schedparam, &iattr->schedparam,
560 sizeof (struct sched_param));
561 else if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
562 {
1e6da2b0 563 INTERNAL_SYSCALL (sched_getparam, scerr, 2, 0, &pd->schedparam);
14ffbc83
UD
564 pd->flags |= ATTR_FLAG_SCHED_SET;
565 }
566
567 /* Check for valid priorities. */
1e6da2b0 568 int minprio = INTERNAL_SYSCALL (sched_get_priority_min, scerr, 1,
14ffbc83 569 iattr->schedpolicy);
1e6da2b0 570 int maxprio = INTERNAL_SYSCALL (sched_get_priority_max, scerr, 1,
14ffbc83
UD
571 iattr->schedpolicy);
572 if (pd->schedparam.sched_priority < minprio
573 || pd->schedparam.sched_priority > maxprio)
76a50749 574 {
1d78f299
UD
575 /* Perhaps a thread wants to change the IDs and if waiting
576 for this stillborn thread. */
577 if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0)
578 == -2, 0))
579 lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);
580
581 __deallocate_stack (pd);
582
61dd6208
SP
583 retval = EINVAL;
584 goto out;
76a50749
UD
585 }
586 }
587
588 /* Pass the descriptor to the caller. */
589 *newthread = (pthread_t) pd;
590
5acf7263
RM
591 LIBC_PROBE (pthread_create, 4, newthread, attr, start_routine, arg);
592
76a50749 593 /* Start the thread. */
61dd6208
SP
594 retval = create_thread (pd, iattr, STACK_VARIABLES_ARGS);
595
596 out:
597 if (__glibc_unlikely (free_cpuset))
598 free (default_attr.cpuset);
599
600 return retval;
76a50749
UD
601}
602versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
603
604
605#if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
606int
607__pthread_create_2_0 (newthread, attr, start_routine, arg)
608 pthread_t *newthread;
609 const pthread_attr_t *attr;
610 void *(*start_routine) (void *);
611 void *arg;
612{
613 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
614 the old size and access to the new members might crash the program.
615 We convert the struct now. */
616 struct pthread_attr new_attr;
617
618 if (attr != NULL)
619 {
620 struct pthread_attr *iattr = (struct pthread_attr *) attr;
621 size_t ps = __getpagesize ();
622
623 /* Copy values from the user-provided attributes. */
624 new_attr.schedparam = iattr->schedparam;
625 new_attr.schedpolicy = iattr->schedpolicy;
626 new_attr.flags = iattr->flags;
627
628 /* Fill in default values for the fields not present in the old
629 implementation. */
630 new_attr.guardsize = ps;
631 new_attr.stackaddr = NULL;
632 new_attr.stacksize = 0;
ca85ede0 633 new_attr.cpuset = NULL;
76a50749
UD
634
635 /* We will pass this value on to the real implementation. */
636 attr = (pthread_attr_t *) &new_attr;
637 }
638
639 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
640}
641compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
642 GLIBC_2_0);
643#endif
7f08f55a
RM
644\f
645/* Information for libthread_db. */
646
647#include "../nptl_db/db_info.c"
b639d0c9
UD
648\f
649/* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
650 functions to be present as well. */
651PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
6c30d38f 652PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
b639d0c9
UD
653PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
654
655PTHREAD_STATIC_FN_REQUIRE (pthread_once)
ef2bb413 656PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
b639d0c9
UD
657
658PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
6c30d38f 659PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
b639d0c9
UD
660PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
661PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)