]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_create.c
* stdlib/tst-strtod2.c (do_test): Use %tu in fmt string for ptrdiff_t
[thirdparty/glibc.git] / nptl / pthread_create.c
CommitLineData
835abc5c 1/* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20#include <errno.h>
21#include <stdbool.h>
22#include <stdlib.h>
23#include <string.h>
24#include "pthreadP.h"
25#include <hp-timing.h>
26#include <ldsodefs.h>
3e4fc359 27#include <atomic.h>
12d7ca07 28#include <libc-internal.h>
0e9d6240 29#include <resolv.h>
76a50749
UD
30
31#include <shlib-compat.h>
32
33
34/* Local function to start thread and handle cleanup. */
35static int start_thread (void *arg);
76a50749
UD
36
37
38/* Nozero if debugging mode is enabled. */
39int __pthread_debug;
40
41/* Globally enabled events. */
fa9a4ff0 42static td_thr_events_t __nptl_threads_events;
76a50749
UD
43
44/* Pointer to descriptor with the last event. */
fa9a4ff0 45static struct pthread *__nptl_last_event;
76a50749 46
47202270
UD
47/* Number of threads running. */
48unsigned int __nptl_nthreads = 1;
49
76a50749
UD
50
51/* Code to allocate and deallocate a stack. */
76a50749
UD
52#include "allocatestack.c"
53
54/* Code to create the thread. */
8dea90aa 55#include <createthread.c>
76a50749
UD
56
57
76a50749 58struct pthread *
90491dc4 59internal_function
76a50749
UD
60__find_in_stack_list (pd)
61 struct pthread *pd;
62{
63 list_t *entry;
64 struct pthread *result = NULL;
65
66 lll_lock (stack_cache_lock);
67
68 list_for_each (entry, &stack_used)
69 {
70 struct pthread *curp;
71
d4f64e1a 72 curp = list_entry (entry, struct pthread, list);
76a50749
UD
73 if (curp == pd)
74 {
75 result = curp;
76 break;
77 }
78 }
79
80 if (result == NULL)
81 list_for_each (entry, &__stack_user)
82 {
83 struct pthread *curp;
84
d4f64e1a 85 curp = list_entry (entry, struct pthread, list);
76a50749
UD
86 if (curp == pd)
87 {
88 result = curp;
89 break;
90 }
91 }
92
93 lll_unlock (stack_cache_lock);
94
95 return result;
96}
97
98
99/* Deallocate POSIX thread-local-storage. */
3fa21fd8
UD
100void
101attribute_hidden
102__nptl_deallocate_tsd (void)
76a50749 103{
877e51b2
UD
104 struct pthread *self = THREAD_SELF;
105
76a50749
UD
106 /* Maybe no data was ever allocated. This happens often so we have
107 a flag for this. */
877e51b2 108 if (THREAD_GETMEM (self, specific_used))
76a50749
UD
109 {
110 size_t round;
6b4686a5 111 size_t cnt;
76a50749 112
6b4686a5
UD
113 round = 0;
114 do
76a50749 115 {
76a50749
UD
116 size_t idx;
117
c5acd3d7 118 /* So far no new nonzero data entry. */
877e51b2 119 THREAD_SETMEM (self, specific_used, false);
c5acd3d7 120
76a50749 121 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
6b4686a5
UD
122 {
123 struct pthread_key_data *level2;
124
877e51b2 125 level2 = THREAD_GETMEM_NC (self, specific, cnt);
6b4686a5
UD
126
127 if (level2 != NULL)
128 {
129 size_t inner;
130
131 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
132 ++inner, ++idx)
133 {
134 void *data = level2[inner].data;
135
877e51b2
UD
136 if (data != NULL)
137 {
138 /* Always clear the data. */
139 level2[inner].data = NULL;
140
6b4686a5
UD
141 /* Make sure the data corresponds to a valid
142 key. This test fails if the key was
143 deallocated and also if it was
144 re-allocated. It is the user's
145 responsibility to free the memory in this
146 case. */
877e51b2
UD
147 if (level2[inner].seq
148 == __pthread_keys[idx].seq
149 /* It is not necessary to register a destructor
150 function. */
151 && __pthread_keys[idx].destr != NULL)
152 /* Call the user-provided destructor. */
153 __pthread_keys[idx].destr (data);
6b4686a5
UD
154 }
155 }
156 }
157 else
158 idx += PTHREAD_KEY_1STLEVEL_SIZE;
159 }
877e51b2
UD
160
161 if (THREAD_GETMEM (self, specific_used) == 0)
162 /* No data has been modified. */
163 goto just_free;
76a50749 164 }
877e51b2
UD
165 /* We only repeat the process a fixed number of times. */
166 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
76a50749 167
877e51b2
UD
168 /* Just clear the memory of the first block for reuse. */
169 memset (&THREAD_SELF->specific_1stblock, '\0',
170 sizeof (self->specific_1stblock));
6b4686a5 171
877e51b2 172 just_free:
6b4686a5
UD
173 /* Free the memory for the other blocks. */
174 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
175 {
176 struct pthread_key_data *level2;
177
877e51b2 178 level2 = THREAD_GETMEM_NC (self, specific, cnt);
6b4686a5
UD
179 if (level2 != NULL)
180 {
181 /* The first block is allocated as part of the thread
182 descriptor. */
183 free (level2);
877e51b2 184 THREAD_SETMEM_NC (self, specific, cnt, NULL);
6b4686a5
UD
185 }
186 }
187
877e51b2 188 THREAD_SETMEM (self, specific_used, false);
76a50749
UD
189 }
190}
191
192
193/* Deallocate a thread's stack after optionally making sure the thread
194 descriptor is still valid. */
195void
90491dc4 196internal_function
76a50749
UD
197__free_tcb (struct pthread *pd)
198{
199 /* The thread is exiting now. */
ba25bb0f
UD
200 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
201 TERMINATED_BIT) == 0, 1))
76a50749
UD
202 {
203 /* Remove the descriptor from the list. */
204 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
205 /* Something is really wrong. The descriptor for a still
206 running thread is gone. */
207 abort ();
208
f17efcb4
UD
209 /* Free TPP data. */
210 if (__builtin_expect (pd->tpp != NULL, 0))
211 {
212 struct priority_protection_data *tpp = pd->tpp;
213
214 pd->tpp = NULL;
215 free (tpp);
216 }
217
76a50749
UD
218 /* Queue the stack memory block for reuse and exit the process. The
219 kernel will signal via writing to the address returned by
220 QUEUE-STACK when the stack is available. */
221 __deallocate_stack (pd);
222 }
223}
224
225
226static int
227start_thread (void *arg)
228{
229 struct pthread *pd = (struct pthread *) arg;
230
231#if HP_TIMING_AVAIL
232 /* Remember the time when the thread was started. */
233 hp_timing_t now;
234 HP_TIMING_NOW (now);
235 THREAD_SETMEM (pd, cpuclock_offset, now);
236#endif
237
0e9d6240
UD
238 /* Initialize resolver state pointer. */
239 __resp = &pd->res;
240
0f6699ea
UD
241#ifdef __NR_set_robust_list
242# ifndef __ASSUME_SET_ROBUST_LIST
243 if (__set_robust_list_avail >= 0)
244# endif
245 {
246 INTERNAL_SYSCALL_DECL (err);
247 /* This call should never fail because the initial call in init.c
248 succeeded. */
249 INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
250 sizeof (struct robust_list_head));
251 }
252#endif
253
b051fc44
UD
254 /* If the parent was running cancellation handlers while creating
255 the thread the new thread inherited the signal mask. Reset the
256 cancellation signal mask. */
257 if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0))
258 {
259 INTERNAL_SYSCALL_DECL (err);
260 sigset_t mask;
261 __sigemptyset (&mask);
262 __sigaddset (&mask, SIGCANCEL);
263 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
264 NULL, _NSIG / 8);
265 }
266
76a50749
UD
267 /* This is where the try/finally block should be created. For
268 compilers without that support we do use setjmp. */
877e51b2
UD
269 struct pthread_unwind_buf unwind_buf;
270
271 /* No previous handlers. */
272 unwind_buf.priv.data.prev = NULL;
273 unwind_buf.priv.data.cleanup = NULL;
274
275 int not_first_call;
276 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
18d009ca 277 if (__builtin_expect (! not_first_call, 1))
76a50749 278 {
877e51b2
UD
279 /* Store the new cleanup handler info. */
280 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
281
5f66b766
UD
282 if (__builtin_expect (pd->stopped_start, 0))
283 {
284 int oldtype = CANCEL_ASYNC ();
362038b0 285
5f66b766
UD
286 /* Get the lock the parent locked to force synchronization. */
287 lll_lock (pd->lock);
288 /* And give it up right away. */
289 lll_unlock (pd->lock);
362038b0 290
5f66b766
UD
291 CANCEL_RESET (oldtype);
292 }
362038b0 293
76a50749 294 /* Run the code the user provided. */
42c8f44c
UD
295#ifdef CALL_THREAD_FCT
296 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
297#else
cc775edf 298 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
42c8f44c 299#endif
76a50749
UD
300 }
301
6b4686a5 302 /* Run the destructor for the thread-local data. */
3fa21fd8 303 __nptl_deallocate_tsd ();
6b4686a5 304
12d7ca07
RM
305 /* Clean up any state libc stored in thread-local variables. */
306 __libc_thread_freeres ();
76a50749 307
47202270
UD
308 /* If this is the last thread we terminate the process now. We
309 do not notify the debugger, it might just irritate it if there
310 is no thread left. */
729924a0 311 if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0))
47202270
UD
312 /* This was the last thread. */
313 exit (0);
314
76a50749
UD
315 /* Report the death of the thread if this is wanted. */
316 if (__builtin_expect (pd->report_events, 0))
317 {
318 /* See whether TD_DEATH is in any of the mask. */
319 const int idx = __td_eventword (TD_DEATH);
320 const uint32_t mask = __td_eventmask (TD_DEATH);
321
322 if ((mask & (__nptl_threads_events.event_bits[idx]
323 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
324 {
325 /* Yep, we have to signal the death. Add the descriptor to
326 the list but only if it is not already on it. */
327 if (pd->nextevent == NULL)
328 {
329 pd->eventbuf.eventnum = TD_DEATH;
330 pd->eventbuf.eventdata = pd;
331
332 do
333 pd->nextevent = __nptl_last_event;
5a3ab2fc
UD
334 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
335 pd, pd->nextevent));
76a50749
UD
336 }
337
338 /* Now call the function to signal the event. */
339 __nptl_death_event ();
340 }
341 }
342
6461e577
RM
343 /* The thread is exiting now. Don't set this bit until after we've hit
344 the event-reporting breakpoint, so that td_thr_get_info on us while at
345 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
346 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
76a50749 347
0f6699ea 348#ifndef __ASSUME_SET_ROBUST_LIST
1bcfb5a5 349 /* If this thread has any robust mutexes locked, handle them now. */
0f6699ea
UD
350# if __WORDSIZE == 64
351 void *robust = pd->robust_head.list;
352# else
b007ce7c 353 __pthread_slist_t *robust = pd->robust_list.__next;
0f6699ea 354# endif
df47504c
UD
355 /* We let the kernel do the notification if it is able to do so.
356 If we have to do it here there for sure are no PI mutexes involved
357 since the kernel support for them is even more recent. */
0f6699ea 358 if (__set_robust_list_avail < 0
df47504c 359 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
1bcfb5a5
UD
360 {
361 do
362 {
b007ce7c 363 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
0f6699ea
UD
364 ((char *) robust - offsetof (struct __pthread_mutex_s,
365 __list.__next));
366 robust = *((void **) robust);
d804f5df 367
0f6699ea 368# ifdef __PTHREAD_MUTEX_HAVE_PREV
b007ce7c 369 this->__list.__prev = NULL;
0f6699ea
UD
370# endif
371 this->__list.__next = NULL;
1bcfb5a5 372
683040c3 373 lll_robust_mutex_dead (this->__lock);
1bcfb5a5 374 }
df47504c 375 while (robust != (void *) &pd->robust_head);
1bcfb5a5 376 }
0f6699ea 377#endif
1bcfb5a5 378
76a50749
UD
379 /* If the thread is detached free the TCB. */
380 if (IS_DETACHED (pd))
381 /* Free the TCB. */
382 __free_tcb (pd);
dff9a7a1
UD
383 else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0))
384 {
385 /* Some other thread might call any of the setXid functions and expect
386 us to reply. In this case wait until we did that. */
387 do
085a4412 388 lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE);
dff9a7a1
UD
389 while (pd->cancelhandling & SETXID_BITMASK);
390
391 /* Reset the value so that the stack can be reused. */
392 pd->setxid_futex = 0;
393 }
76a50749
UD
394
395 /* We cannot call '_exit' here. '_exit' will terminate the process.
396
397 The 'exit' implementation in the kernel will signal when the
398 process is really dead since 'clone' got passed the CLONE_CLEARTID
399 flag. The 'tid' field in the TCB will be set to zero.
400
401 The exit code is zero since in case all threads exit by calling
402 'pthread_exit' the exit status must be 0 (zero). */
403 __exit_thread_inline (0);
404
405 /* NOTREACHED */
406 return 0;
407}
408
409
76a50749
UD
410/* Default thread attributes for the case when the user does not
411 provide any. */
412static const struct pthread_attr default_attr =
413 {
414 /* Just some value > 0 which gets rounded to the nearest page size. */
415 .guardsize = 1,
416 };
417
418
419int
420__pthread_create_2_1 (newthread, attr, start_routine, arg)
421 pthread_t *newthread;
422 const pthread_attr_t *attr;
423 void *(*start_routine) (void *);
424 void *arg;
425{
426 STACK_VARIABLES;
76a50749 427
1e6da2b0 428 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
76a50749
UD
429 if (iattr == NULL)
430 /* Is this the best idea? On NUMA machines this could mean
431 accessing far-away memory. */
432 iattr = &default_attr;
433
dff9a7a1 434 struct pthread *pd = NULL;
1e6da2b0 435 int err = ALLOCATE_STACK (iattr, &pd);
729924a0 436 if (__builtin_expect (err != 0, 0))
76a50749
UD
437 /* Something went wrong. Maybe a parameter of the attributes is
438 invalid or we could not allocate memory. */
439 return err;
440
441
442 /* Initialize the TCB. All initializations with zero should be
443 performed in 'get_cached_stack'. This way we avoid doing this if
444 the stack freshly allocated with 'mmap'. */
445
5d5d5969 446#ifdef TLS_TCB_AT_TP
76a50749 447 /* Reference to the TCB itself. */
55c11fbd 448 pd->header.self = pd;
76a50749 449
d4f64e1a 450 /* Self-reference for TLS. */
55c11fbd 451 pd->header.tcb = pd;
76a50749
UD
452#endif
453
454 /* Store the address of the start routine and the parameter. Since
455 we do not start the function directly the stillborn thread will
456 get the information from its thread descriptor. */
457 pd->start_routine = start_routine;
458 pd->arg = arg;
459
460 /* Copy the thread attribute flags. */
14ffbc83
UD
461 struct pthread *self = THREAD_SELF;
462 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
463 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
76a50749
UD
464
465 /* Initialize the field for the ID of the thread which is waiting
466 for us. This is a self-reference in case the thread is created
467 detached. */
468 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
469
470 /* The debug events are inherited from the parent. */
14ffbc83
UD
471 pd->eventbuf = self->eventbuf;
472
76a50749 473
14ffbc83
UD
474 /* Copy the parent's scheduling parameters. The flags will say what
475 is valid and what is not. */
476 pd->schedpolicy = self->schedpolicy;
477 pd->schedparam = self->schedparam;
76a50749 478
35f1e827
UD
479 /* Copy the stack guard canary. */
480#ifdef THREAD_COPY_STACK_GUARD
481 THREAD_COPY_STACK_GUARD (pd);
482#endif
483
827b7087
UD
484 /* Copy the pointer guard value. */
485#ifdef THREAD_COPY_POINTER_GUARD
486 THREAD_COPY_POINTER_GUARD (pd);
487#endif
488
14ffbc83
UD
489 /* Determine scheduling parameters for the thread. */
490 if (attr != NULL
491 && __builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
492 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
76a50749 493 {
1e6da2b0 494 INTERNAL_SYSCALL_DECL (scerr);
14ffbc83
UD
495
496 /* Use the scheduling parameters the user provided. */
497 if (iattr->flags & ATTR_FLAG_POLICY_SET)
498 pd->schedpolicy = iattr->schedpolicy;
499 else if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
76a50749 500 {
1e6da2b0 501 pd->schedpolicy = INTERNAL_SYSCALL (sched_getscheduler, scerr, 1, 0);
14ffbc83 502 pd->flags |= ATTR_FLAG_POLICY_SET;
76a50749 503 }
14ffbc83
UD
504
505 if (iattr->flags & ATTR_FLAG_SCHED_SET)
506 memcpy (&pd->schedparam, &iattr->schedparam,
507 sizeof (struct sched_param));
508 else if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
509 {
1e6da2b0 510 INTERNAL_SYSCALL (sched_getparam, scerr, 2, 0, &pd->schedparam);
14ffbc83
UD
511 pd->flags |= ATTR_FLAG_SCHED_SET;
512 }
513
514 /* Check for valid priorities. */
1e6da2b0 515 int minprio = INTERNAL_SYSCALL (sched_get_priority_min, scerr, 1,
14ffbc83 516 iattr->schedpolicy);
1e6da2b0 517 int maxprio = INTERNAL_SYSCALL (sched_get_priority_max, scerr, 1,
14ffbc83
UD
518 iattr->schedpolicy);
519 if (pd->schedparam.sched_priority < minprio
520 || pd->schedparam.sched_priority > maxprio)
76a50749 521 {
14ffbc83
UD
522 err = EINVAL;
523 goto errout;
76a50749
UD
524 }
525 }
526
527 /* Pass the descriptor to the caller. */
528 *newthread = (pthread_t) pd;
529
ebfa58bb
UD
530 /* Remember whether the thread is detached or not. In case of an
531 error we have to free the stacks of non-detached stillborn
532 threads. */
533 bool is_detached = IS_DETACHED (pd);
534
76a50749 535 /* Start the thread. */
80f536db 536 err = create_thread (pd, iattr, STACK_VARIABLES_ARGS);
76a50749
UD
537 if (err != 0)
538 {
539 /* Something went wrong. Free the resources. */
ebfa58bb 540 if (!is_detached)
a1fbd858
UD
541 {
542 errout:
543 __deallocate_stack (pd);
544 }
76a50749
UD
545 return err;
546 }
547
548 return 0;
549}
550versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
551
552
553#if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
554int
555__pthread_create_2_0 (newthread, attr, start_routine, arg)
556 pthread_t *newthread;
557 const pthread_attr_t *attr;
558 void *(*start_routine) (void *);
559 void *arg;
560{
561 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
562 the old size and access to the new members might crash the program.
563 We convert the struct now. */
564 struct pthread_attr new_attr;
565
566 if (attr != NULL)
567 {
568 struct pthread_attr *iattr = (struct pthread_attr *) attr;
569 size_t ps = __getpagesize ();
570
571 /* Copy values from the user-provided attributes. */
572 new_attr.schedparam = iattr->schedparam;
573 new_attr.schedpolicy = iattr->schedpolicy;
574 new_attr.flags = iattr->flags;
575
576 /* Fill in default values for the fields not present in the old
577 implementation. */
578 new_attr.guardsize = ps;
579 new_attr.stackaddr = NULL;
580 new_attr.stacksize = 0;
ca85ede0 581 new_attr.cpuset = NULL;
76a50749
UD
582
583 /* We will pass this value on to the real implementation. */
584 attr = (pthread_attr_t *) &new_attr;
585 }
586
587 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
588}
589compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
590 GLIBC_2_0);
591#endif
7f08f55a
RM
592\f
593/* Information for libthread_db. */
594
595#include "../nptl_db/db_info.c"
b639d0c9
UD
596\f
597/* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
598 functions to be present as well. */
599PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
600PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
601
602PTHREAD_STATIC_FN_REQUIRE (pthread_once)
ef2bb413 603PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
b639d0c9
UD
604
605PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
606PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
607PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)