]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_create.c
Use glibc_likely instead __builtin_expect.
[thirdparty/glibc.git] / nptl / pthread_create.c
CommitLineData
d4697bc9 1/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
76a50749 18
fd5bdc09 19#include <ctype.h>
76a50749
UD
20#include <errno.h>
21#include <stdbool.h>
22#include <stdlib.h>
23#include <string.h>
e054f494 24#include <stdint.h>
76a50749
UD
25#include "pthreadP.h"
26#include <hp-timing.h>
27#include <ldsodefs.h>
3e4fc359 28#include <atomic.h>
12d7ca07 29#include <libc-internal.h>
0e9d6240 30#include <resolv.h>
f8de5057 31#include <kernel-features.h>
76a50749
UD
32
33#include <shlib-compat.h>
34
3a097cc7
RM
35#include <stap-probe.h>
36
76a50749
UD
37
38/* Local function to start thread and handle cleanup. */
39static int start_thread (void *arg);
76a50749
UD
40
41
42/* Nozero if debugging mode is enabled. */
43int __pthread_debug;
44
45/* Globally enabled events. */
e965d514 46static td_thr_events_t __nptl_threads_events __attribute_used__;
76a50749
UD
47
48/* Pointer to descriptor with the last event. */
e965d514 49static struct pthread *__nptl_last_event __attribute_used__;
76a50749 50
47202270
UD
51/* Number of threads running. */
52unsigned int __nptl_nthreads = 1;
53
76a50749
UD
54
55/* Code to allocate and deallocate a stack. */
76a50749
UD
56#include "allocatestack.c"
57
58/* Code to create the thread. */
8dea90aa 59#include <createthread.c>
76a50749
UD
60
61
76a50749 62struct pthread *
90491dc4 63internal_function
76a50749
UD
64__find_in_stack_list (pd)
65 struct pthread *pd;
66{
67 list_t *entry;
68 struct pthread *result = NULL;
69
e51deae7 70 lll_lock (stack_cache_lock, LLL_PRIVATE);
76a50749
UD
71
72 list_for_each (entry, &stack_used)
73 {
74 struct pthread *curp;
75
d4f64e1a 76 curp = list_entry (entry, struct pthread, list);
76a50749
UD
77 if (curp == pd)
78 {
79 result = curp;
80 break;
81 }
82 }
83
84 if (result == NULL)
85 list_for_each (entry, &__stack_user)
86 {
87 struct pthread *curp;
88
d4f64e1a 89 curp = list_entry (entry, struct pthread, list);
76a50749
UD
90 if (curp == pd)
91 {
92 result = curp;
93 break;
94 }
95 }
96
e51deae7 97 lll_unlock (stack_cache_lock, LLL_PRIVATE);
76a50749
UD
98
99 return result;
100}
101
102
103/* Deallocate POSIX thread-local-storage. */
3fa21fd8
UD
104void
105attribute_hidden
106__nptl_deallocate_tsd (void)
76a50749 107{
877e51b2
UD
108 struct pthread *self = THREAD_SELF;
109
76a50749
UD
110 /* Maybe no data was ever allocated. This happens often so we have
111 a flag for this. */
877e51b2 112 if (THREAD_GETMEM (self, specific_used))
76a50749
UD
113 {
114 size_t round;
6b4686a5 115 size_t cnt;
76a50749 116
6b4686a5
UD
117 round = 0;
118 do
76a50749 119 {
76a50749
UD
120 size_t idx;
121
c5acd3d7 122 /* So far no new nonzero data entry. */
877e51b2 123 THREAD_SETMEM (self, specific_used, false);
c5acd3d7 124
76a50749 125 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
6b4686a5
UD
126 {
127 struct pthread_key_data *level2;
128
877e51b2 129 level2 = THREAD_GETMEM_NC (self, specific, cnt);
6b4686a5
UD
130
131 if (level2 != NULL)
132 {
133 size_t inner;
134
135 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
136 ++inner, ++idx)
137 {
138 void *data = level2[inner].data;
139
877e51b2
UD
140 if (data != NULL)
141 {
142 /* Always clear the data. */
143 level2[inner].data = NULL;
144
6b4686a5
UD
145 /* Make sure the data corresponds to a valid
146 key. This test fails if the key was
147 deallocated and also if it was
148 re-allocated. It is the user's
149 responsibility to free the memory in this
150 case. */
877e51b2
UD
151 if (level2[inner].seq
152 == __pthread_keys[idx].seq
153 /* It is not necessary to register a destructor
154 function. */
155 && __pthread_keys[idx].destr != NULL)
156 /* Call the user-provided destructor. */
157 __pthread_keys[idx].destr (data);
6b4686a5
UD
158 }
159 }
160 }
161 else
162 idx += PTHREAD_KEY_1STLEVEL_SIZE;
163 }
877e51b2
UD
164
165 if (THREAD_GETMEM (self, specific_used) == 0)
166 /* No data has been modified. */
167 goto just_free;
76a50749 168 }
877e51b2
UD
169 /* We only repeat the process a fixed number of times. */
170 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
76a50749 171
877e51b2
UD
172 /* Just clear the memory of the first block for reuse. */
173 memset (&THREAD_SELF->specific_1stblock, '\0',
174 sizeof (self->specific_1stblock));
6b4686a5 175
877e51b2 176 just_free:
6b4686a5
UD
177 /* Free the memory for the other blocks. */
178 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
179 {
180 struct pthread_key_data *level2;
181
877e51b2 182 level2 = THREAD_GETMEM_NC (self, specific, cnt);
6b4686a5
UD
183 if (level2 != NULL)
184 {
185 /* The first block is allocated as part of the thread
186 descriptor. */
187 free (level2);
877e51b2 188 THREAD_SETMEM_NC (self, specific, cnt, NULL);
6b4686a5
UD
189 }
190 }
191
877e51b2 192 THREAD_SETMEM (self, specific_used, false);
76a50749
UD
193 }
194}
195
196
197/* Deallocate a thread's stack after optionally making sure the thread
198 descriptor is still valid. */
199void
90491dc4 200internal_function
76a50749
UD
201__free_tcb (struct pthread *pd)
202{
203 /* The thread is exiting now. */
ba25bb0f
UD
204 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
205 TERMINATED_BIT) == 0, 1))
76a50749
UD
206 {
207 /* Remove the descriptor from the list. */
208 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
209 /* Something is really wrong. The descriptor for a still
210 running thread is gone. */
211 abort ();
212
f17efcb4 213 /* Free TPP data. */
a1ffb40e 214 if (__glibc_unlikely (pd->tpp != NULL))
f17efcb4
UD
215 {
216 struct priority_protection_data *tpp = pd->tpp;
217
218 pd->tpp = NULL;
219 free (tpp);
220 }
221
76a50749
UD
222 /* Queue the stack memory block for reuse and exit the process. The
223 kernel will signal via writing to the address returned by
224 QUEUE-STACK when the stack is available. */
225 __deallocate_stack (pd);
226 }
227}
228
229
230static int
231start_thread (void *arg)
232{
233 struct pthread *pd = (struct pthread *) arg;
234
235#if HP_TIMING_AVAIL
236 /* Remember the time when the thread was started. */
237 hp_timing_t now;
238 HP_TIMING_NOW (now);
239 THREAD_SETMEM (pd, cpuclock_offset, now);
240#endif
241
0e9d6240
UD
242 /* Initialize resolver state pointer. */
243 __resp = &pd->res;
244
fd5bdc09
UD
245 /* Initialize pointers to locale data. */
246 __ctype_init ();
247
66f1b8ee 248 /* Allow setxid from now onwards. */
a1ffb40e 249 if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
66f1b8ee
UD
250 lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);
251
0f6699ea
UD
252#ifdef __NR_set_robust_list
253# ifndef __ASSUME_SET_ROBUST_LIST
254 if (__set_robust_list_avail >= 0)
255# endif
256 {
257 INTERNAL_SYSCALL_DECL (err);
258 /* This call should never fail because the initial call in init.c
259 succeeded. */
260 INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
261 sizeof (struct robust_list_head));
262 }
263#endif
264
b051fc44
UD
265 /* If the parent was running cancellation handlers while creating
266 the thread the new thread inherited the signal mask. Reset the
267 cancellation signal mask. */
a1ffb40e 268 if (__glibc_unlikely (pd->parent_cancelhandling & CANCELING_BITMASK))
b051fc44
UD
269 {
270 INTERNAL_SYSCALL_DECL (err);
271 sigset_t mask;
272 __sigemptyset (&mask);
273 __sigaddset (&mask, SIGCANCEL);
274 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
275 NULL, _NSIG / 8);
276 }
277
76a50749
UD
278 /* This is where the try/finally block should be created. For
279 compilers without that support we do use setjmp. */
877e51b2
UD
280 struct pthread_unwind_buf unwind_buf;
281
282 /* No previous handlers. */
283 unwind_buf.priv.data.prev = NULL;
284 unwind_buf.priv.data.cleanup = NULL;
285
286 int not_first_call;
287 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
a1ffb40e 288 if (__glibc_likely (! not_first_call))
76a50749 289 {
877e51b2
UD
290 /* Store the new cleanup handler info. */
291 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
292
a1ffb40e 293 if (__glibc_unlikely (pd->stopped_start))
5f66b766
UD
294 {
295 int oldtype = CANCEL_ASYNC ();
362038b0 296
5f66b766 297 /* Get the lock the parent locked to force synchronization. */
e51deae7 298 lll_lock (pd->lock, LLL_PRIVATE);
5f66b766 299 /* And give it up right away. */
e51deae7 300 lll_unlock (pd->lock, LLL_PRIVATE);
362038b0 301
5f66b766
UD
302 CANCEL_RESET (oldtype);
303 }
362038b0 304
3a097cc7
RM
305 LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg);
306
76a50749 307 /* Run the code the user provided. */
42c8f44c
UD
308#ifdef CALL_THREAD_FCT
309 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
310#else
cc775edf 311 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
42c8f44c 312#endif
76a50749
UD
313 }
314
ba384f6e 315 /* Call destructors for the thread_local TLS variables. */
e57b0c61
RM
316#ifndef SHARED
317 if (&__call_tls_dtors != NULL)
318#endif
319 __call_tls_dtors ();
ba384f6e 320
6b4686a5 321 /* Run the destructor for the thread-local data. */
3fa21fd8 322 __nptl_deallocate_tsd ();
6b4686a5 323
12d7ca07
RM
324 /* Clean up any state libc stored in thread-local variables. */
325 __libc_thread_freeres ();
76a50749 326
47202270
UD
327 /* If this is the last thread we terminate the process now. We
328 do not notify the debugger, it might just irritate it if there
329 is no thread left. */
a1ffb40e 330 if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads)))
47202270
UD
331 /* This was the last thread. */
332 exit (0);
333
76a50749 334 /* Report the death of the thread if this is wanted. */
a1ffb40e 335 if (__glibc_unlikely (pd->report_events))
76a50749
UD
336 {
337 /* See whether TD_DEATH is in any of the mask. */
338 const int idx = __td_eventword (TD_DEATH);
339 const uint32_t mask = __td_eventmask (TD_DEATH);
340
341 if ((mask & (__nptl_threads_events.event_bits[idx]
342 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
343 {
344 /* Yep, we have to signal the death. Add the descriptor to
345 the list but only if it is not already on it. */
346 if (pd->nextevent == NULL)
347 {
348 pd->eventbuf.eventnum = TD_DEATH;
349 pd->eventbuf.eventdata = pd;
350
351 do
352 pd->nextevent = __nptl_last_event;
5a3ab2fc
UD
353 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
354 pd, pd->nextevent));
76a50749
UD
355 }
356
357 /* Now call the function to signal the event. */
358 __nptl_death_event ();
359 }
360 }
361
6461e577
RM
362 /* The thread is exiting now. Don't set this bit until after we've hit
363 the event-reporting breakpoint, so that td_thr_get_info on us while at
364 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
365 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
76a50749 366
0f6699ea 367#ifndef __ASSUME_SET_ROBUST_LIST
1bcfb5a5 368 /* If this thread has any robust mutexes locked, handle them now. */
c252ec15 369# ifdef __PTHREAD_MUTEX_HAVE_PREV
0f6699ea
UD
370 void *robust = pd->robust_head.list;
371# else
b007ce7c 372 __pthread_slist_t *robust = pd->robust_list.__next;
0f6699ea 373# endif
df47504c
UD
374 /* We let the kernel do the notification if it is able to do so.
375 If we have to do it here there for sure are no PI mutexes involved
376 since the kernel support for them is even more recent. */
0f6699ea 377 if (__set_robust_list_avail < 0
df47504c 378 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
1bcfb5a5
UD
379 {
380 do
381 {
b007ce7c 382 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
0f6699ea
UD
383 ((char *) robust - offsetof (struct __pthread_mutex_s,
384 __list.__next));
385 robust = *((void **) robust);
d804f5df 386
0f6699ea 387# ifdef __PTHREAD_MUTEX_HAVE_PREV
b007ce7c 388 this->__list.__prev = NULL;
0f6699ea
UD
389# endif
390 this->__list.__next = NULL;
1bcfb5a5 391
e51deae7 392 lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
1bcfb5a5 393 }
df47504c 394 while (robust != (void *) &pd->robust_head);
1bcfb5a5 395 }
0f6699ea 396#endif
1bcfb5a5 397
b42a214c
UD
398 /* Mark the memory of the stack as usable to the kernel. We free
399 everything except for the space used for the TCB itself. */
400 size_t pagesize_m1 = __getpagesize () - 1;
401#ifdef _STACK_GROWS_DOWN
402 char *sp = CURRENT_STACK_FRAME;
403 size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1;
404#else
405# error "to do"
406#endif
407 assert (freesize < pd->stackblock_size);
408 if (freesize > PTHREAD_STACK_MIN)
9043e228 409 __madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
b42a214c 410
76a50749
UD
411 /* If the thread is detached free the TCB. */
412 if (IS_DETACHED (pd))
413 /* Free the TCB. */
414 __free_tcb (pd);
a1ffb40e 415 else if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK))
dff9a7a1
UD
416 {
417 /* Some other thread might call any of the setXid functions and expect
418 us to reply. In this case wait until we did that. */
419 do
085a4412 420 lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE);
dff9a7a1
UD
421 while (pd->cancelhandling & SETXID_BITMASK);
422
423 /* Reset the value so that the stack can be reused. */
424 pd->setxid_futex = 0;
425 }
76a50749
UD
426
427 /* We cannot call '_exit' here. '_exit' will terminate the process.
428
429 The 'exit' implementation in the kernel will signal when the
adcdc775 430 process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID
76a50749
UD
431 flag. The 'tid' field in the TCB will be set to zero.
432
433 The exit code is zero since in case all threads exit by calling
434 'pthread_exit' the exit status must be 0 (zero). */
435 __exit_thread_inline (0);
436
437 /* NOTREACHED */
438 return 0;
439}
440
441
76a50749
UD
442int
443__pthread_create_2_1 (newthread, attr, start_routine, arg)
444 pthread_t *newthread;
445 const pthread_attr_t *attr;
446 void *(*start_routine) (void *);
447 void *arg;
448{
449 STACK_VARIABLES;
76a50749 450
1e6da2b0 451 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
61dd6208
SP
452 struct pthread_attr default_attr;
453 bool free_cpuset = false;
76a50749 454 if (iattr == NULL)
61dd6208
SP
455 {
456 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
457 default_attr = __default_pthread_attr;
458 size_t cpusetsize = default_attr.cpusetsize;
459 if (cpusetsize > 0)
460 {
461 cpu_set_t *cpuset;
462 if (__glibc_likely (__libc_use_alloca (cpusetsize)))
463 cpuset = __alloca (cpusetsize);
464 else
465 {
466 cpuset = malloc (cpusetsize);
467 if (cpuset == NULL)
468 {
469 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
470 return ENOMEM;
471 }
472 free_cpuset = true;
473 }
474 memcpy (cpuset, default_attr.cpuset, cpusetsize);
475 default_attr.cpuset = cpuset;
476 }
477 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
478 iattr = &default_attr;
479 }
76a50749 480
dff9a7a1 481 struct pthread *pd = NULL;
1e6da2b0 482 int err = ALLOCATE_STACK (iattr, &pd);
61dd6208
SP
483 int retval = 0;
484
a1ffb40e 485 if (__glibc_unlikely (err != 0))
76a50749 486 /* Something went wrong. Maybe a parameter of the attributes is
e988dba9
JL
487 invalid or we could not allocate memory. Note we have to
488 translate error codes. */
61dd6208
SP
489 {
490 retval = err == ENOMEM ? EAGAIN : err;
491 goto out;
492 }
76a50749
UD
493
494
495 /* Initialize the TCB. All initializations with zero should be
496 performed in 'get_cached_stack'. This way we avoid doing this if
497 the stack freshly allocated with 'mmap'. */
498
5d5d5969 499#ifdef TLS_TCB_AT_TP
76a50749 500 /* Reference to the TCB itself. */
55c11fbd 501 pd->header.self = pd;
76a50749 502
d4f64e1a 503 /* Self-reference for TLS. */
55c11fbd 504 pd->header.tcb = pd;
76a50749
UD
505#endif
506
507 /* Store the address of the start routine and the parameter. Since
508 we do not start the function directly the stillborn thread will
509 get the information from its thread descriptor. */
510 pd->start_routine = start_routine;
511 pd->arg = arg;
512
513 /* Copy the thread attribute flags. */
14ffbc83
UD
514 struct pthread *self = THREAD_SELF;
515 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
516 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
76a50749
UD
517
518 /* Initialize the field for the ID of the thread which is waiting
519 for us. This is a self-reference in case the thread is created
520 detached. */
521 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
522
523 /* The debug events are inherited from the parent. */
14ffbc83
UD
524 pd->eventbuf = self->eventbuf;
525
76a50749 526
14ffbc83
UD
527 /* Copy the parent's scheduling parameters. The flags will say what
528 is valid and what is not. */
529 pd->schedpolicy = self->schedpolicy;
530 pd->schedparam = self->schedparam;
76a50749 531
35f1e827
UD
532 /* Copy the stack guard canary. */
533#ifdef THREAD_COPY_STACK_GUARD
534 THREAD_COPY_STACK_GUARD (pd);
535#endif
536
827b7087
UD
537 /* Copy the pointer guard value. */
538#ifdef THREAD_COPY_POINTER_GUARD
539 THREAD_COPY_POINTER_GUARD (pd);
540#endif
541
14ffbc83 542 /* Determine scheduling parameters for the thread. */
61dd6208 543 if (__builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
14ffbc83 544 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
76a50749 545 {
1e6da2b0 546 INTERNAL_SYSCALL_DECL (scerr);
14ffbc83
UD
547
548 /* Use the scheduling parameters the user provided. */
549 if (iattr->flags & ATTR_FLAG_POLICY_SET)
550 pd->schedpolicy = iattr->schedpolicy;
551 else if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
76a50749 552 {
1e6da2b0 553 pd->schedpolicy = INTERNAL_SYSCALL (sched_getscheduler, scerr, 1, 0);
14ffbc83 554 pd->flags |= ATTR_FLAG_POLICY_SET;
76a50749 555 }
14ffbc83
UD
556
557 if (iattr->flags & ATTR_FLAG_SCHED_SET)
558 memcpy (&pd->schedparam, &iattr->schedparam,
559 sizeof (struct sched_param));
560 else if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
561 {
1e6da2b0 562 INTERNAL_SYSCALL (sched_getparam, scerr, 2, 0, &pd->schedparam);
14ffbc83
UD
563 pd->flags |= ATTR_FLAG_SCHED_SET;
564 }
565
566 /* Check for valid priorities. */
1e6da2b0 567 int minprio = INTERNAL_SYSCALL (sched_get_priority_min, scerr, 1,
14ffbc83 568 iattr->schedpolicy);
1e6da2b0 569 int maxprio = INTERNAL_SYSCALL (sched_get_priority_max, scerr, 1,
14ffbc83
UD
570 iattr->schedpolicy);
571 if (pd->schedparam.sched_priority < minprio
572 || pd->schedparam.sched_priority > maxprio)
76a50749 573 {
1d78f299
UD
574 /* Perhaps a thread wants to change the IDs and if waiting
575 for this stillborn thread. */
576 if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0)
577 == -2, 0))
578 lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);
579
580 __deallocate_stack (pd);
581
61dd6208
SP
582 retval = EINVAL;
583 goto out;
76a50749
UD
584 }
585 }
586
587 /* Pass the descriptor to the caller. */
588 *newthread = (pthread_t) pd;
589
5acf7263
RM
590 LIBC_PROBE (pthread_create, 4, newthread, attr, start_routine, arg);
591
76a50749 592 /* Start the thread. */
61dd6208
SP
593 retval = create_thread (pd, iattr, STACK_VARIABLES_ARGS);
594
595 out:
596 if (__glibc_unlikely (free_cpuset))
597 free (default_attr.cpuset);
598
599 return retval;
76a50749
UD
600}
601versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
602
603
604#if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
605int
606__pthread_create_2_0 (newthread, attr, start_routine, arg)
607 pthread_t *newthread;
608 const pthread_attr_t *attr;
609 void *(*start_routine) (void *);
610 void *arg;
611{
612 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
613 the old size and access to the new members might crash the program.
614 We convert the struct now. */
615 struct pthread_attr new_attr;
616
617 if (attr != NULL)
618 {
619 struct pthread_attr *iattr = (struct pthread_attr *) attr;
620 size_t ps = __getpagesize ();
621
622 /* Copy values from the user-provided attributes. */
623 new_attr.schedparam = iattr->schedparam;
624 new_attr.schedpolicy = iattr->schedpolicy;
625 new_attr.flags = iattr->flags;
626
627 /* Fill in default values for the fields not present in the old
628 implementation. */
629 new_attr.guardsize = ps;
630 new_attr.stackaddr = NULL;
631 new_attr.stacksize = 0;
ca85ede0 632 new_attr.cpuset = NULL;
76a50749
UD
633
634 /* We will pass this value on to the real implementation. */
635 attr = (pthread_attr_t *) &new_attr;
636 }
637
638 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
639}
640compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
641 GLIBC_2_0);
642#endif
7f08f55a
RM
643\f
644/* Information for libthread_db. */
645
646#include "../nptl_db/db_info.c"
b639d0c9
UD
647\f
648/* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
649 functions to be present as well. */
650PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
6c30d38f 651PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
b639d0c9
UD
652PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
653
654PTHREAD_STATIC_FN_REQUIRE (pthread_once)
ef2bb413 655PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
b639d0c9
UD
656
657PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
6c30d38f 658PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
b639d0c9
UD
659PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
660PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)