]> git.ipfire.org Git - thirdparty/glibc.git/blob - nptl/pthread_create.c
Check __PTHREAD_MUTEX_HAVE_PREV for mutex prev
[thirdparty/glibc.git] / nptl / pthread_create.c
1 /* Copyright (C) 2002-2007,2008,2009,2010,2011 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <ctype.h>
20 #include <errno.h>
21 #include <stdbool.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include "pthreadP.h"
25 #include <hp-timing.h>
26 #include <ldsodefs.h>
27 #include <atomic.h>
28 #include <libc-internal.h>
29 #include <resolv.h>
30 #include <kernel-features.h>
31
32 #include <shlib-compat.h>
33
34
35 /* Local function to start thread and handle cleanup. */
36 static int start_thread (void *arg);
37
38
39 /* Nozero if debugging mode is enabled. */
40 int __pthread_debug;
41
42 /* Globally enabled events. */
43 static td_thr_events_t __nptl_threads_events __attribute_used__;
44
45 /* Pointer to descriptor with the last event. */
46 static struct pthread *__nptl_last_event __attribute_used__;
47
48 /* Number of threads running. */
49 unsigned int __nptl_nthreads = 1;
50
51
52 /* Code to allocate and deallocate a stack. */
53 #include "allocatestack.c"
54
55 /* Code to create the thread. */
56 #include <createthread.c>
57
58
59 struct pthread *
60 internal_function
61 __find_in_stack_list (pd)
62 struct pthread *pd;
63 {
64 list_t *entry;
65 struct pthread *result = NULL;
66
67 lll_lock (stack_cache_lock, LLL_PRIVATE);
68
69 list_for_each (entry, &stack_used)
70 {
71 struct pthread *curp;
72
73 curp = list_entry (entry, struct pthread, list);
74 if (curp == pd)
75 {
76 result = curp;
77 break;
78 }
79 }
80
81 if (result == NULL)
82 list_for_each (entry, &__stack_user)
83 {
84 struct pthread *curp;
85
86 curp = list_entry (entry, struct pthread, list);
87 if (curp == pd)
88 {
89 result = curp;
90 break;
91 }
92 }
93
94 lll_unlock (stack_cache_lock, LLL_PRIVATE);
95
96 return result;
97 }
98
99
100 /* Deallocate POSIX thread-local-storage. */
101 void
102 attribute_hidden
103 __nptl_deallocate_tsd (void)
104 {
105 struct pthread *self = THREAD_SELF;
106
107 /* Maybe no data was ever allocated. This happens often so we have
108 a flag for this. */
109 if (THREAD_GETMEM (self, specific_used))
110 {
111 size_t round;
112 size_t cnt;
113
114 round = 0;
115 do
116 {
117 size_t idx;
118
119 /* So far no new nonzero data entry. */
120 THREAD_SETMEM (self, specific_used, false);
121
122 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
123 {
124 struct pthread_key_data *level2;
125
126 level2 = THREAD_GETMEM_NC (self, specific, cnt);
127
128 if (level2 != NULL)
129 {
130 size_t inner;
131
132 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
133 ++inner, ++idx)
134 {
135 void *data = level2[inner].data;
136
137 if (data != NULL)
138 {
139 /* Always clear the data. */
140 level2[inner].data = NULL;
141
142 /* Make sure the data corresponds to a valid
143 key. This test fails if the key was
144 deallocated and also if it was
145 re-allocated. It is the user's
146 responsibility to free the memory in this
147 case. */
148 if (level2[inner].seq
149 == __pthread_keys[idx].seq
150 /* It is not necessary to register a destructor
151 function. */
152 && __pthread_keys[idx].destr != NULL)
153 /* Call the user-provided destructor. */
154 __pthread_keys[idx].destr (data);
155 }
156 }
157 }
158 else
159 idx += PTHREAD_KEY_1STLEVEL_SIZE;
160 }
161
162 if (THREAD_GETMEM (self, specific_used) == 0)
163 /* No data has been modified. */
164 goto just_free;
165 }
166 /* We only repeat the process a fixed number of times. */
167 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
168
169 /* Just clear the memory of the first block for reuse. */
170 memset (&THREAD_SELF->specific_1stblock, '\0',
171 sizeof (self->specific_1stblock));
172
173 just_free:
174 /* Free the memory for the other blocks. */
175 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
176 {
177 struct pthread_key_data *level2;
178
179 level2 = THREAD_GETMEM_NC (self, specific, cnt);
180 if (level2 != NULL)
181 {
182 /* The first block is allocated as part of the thread
183 descriptor. */
184 free (level2);
185 THREAD_SETMEM_NC (self, specific, cnt, NULL);
186 }
187 }
188
189 THREAD_SETMEM (self, specific_used, false);
190 }
191 }
192
193
194 /* Deallocate a thread's stack after optionally making sure the thread
195 descriptor is still valid. */
196 void
197 internal_function
198 __free_tcb (struct pthread *pd)
199 {
200 /* The thread is exiting now. */
201 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
202 TERMINATED_BIT) == 0, 1))
203 {
204 /* Remove the descriptor from the list. */
205 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
206 /* Something is really wrong. The descriptor for a still
207 running thread is gone. */
208 abort ();
209
210 /* Free TPP data. */
211 if (__builtin_expect (pd->tpp != NULL, 0))
212 {
213 struct priority_protection_data *tpp = pd->tpp;
214
215 pd->tpp = NULL;
216 free (tpp);
217 }
218
219 /* Queue the stack memory block for reuse and exit the process. The
220 kernel will signal via writing to the address returned by
221 QUEUE-STACK when the stack is available. */
222 __deallocate_stack (pd);
223 }
224 }
225
226
227 static int
228 start_thread (void *arg)
229 {
230 struct pthread *pd = (struct pthread *) arg;
231
232 #if HP_TIMING_AVAIL
233 /* Remember the time when the thread was started. */
234 hp_timing_t now;
235 HP_TIMING_NOW (now);
236 THREAD_SETMEM (pd, cpuclock_offset, now);
237 #endif
238
239 /* Initialize resolver state pointer. */
240 __resp = &pd->res;
241
242 /* Initialize pointers to locale data. */
243 __ctype_init ();
244
245 /* Allow setxid from now onwards. */
246 if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0) == -2, 0))
247 lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);
248
249 #ifdef __NR_set_robust_list
250 # ifndef __ASSUME_SET_ROBUST_LIST
251 if (__set_robust_list_avail >= 0)
252 # endif
253 {
254 INTERNAL_SYSCALL_DECL (err);
255 /* This call should never fail because the initial call in init.c
256 succeeded. */
257 INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
258 sizeof (struct robust_list_head));
259 }
260 #endif
261
262 /* If the parent was running cancellation handlers while creating
263 the thread the new thread inherited the signal mask. Reset the
264 cancellation signal mask. */
265 if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0))
266 {
267 INTERNAL_SYSCALL_DECL (err);
268 sigset_t mask;
269 __sigemptyset (&mask);
270 __sigaddset (&mask, SIGCANCEL);
271 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
272 NULL, _NSIG / 8);
273 }
274
275 /* This is where the try/finally block should be created. For
276 compilers without that support we do use setjmp. */
277 struct pthread_unwind_buf unwind_buf;
278
279 /* No previous handlers. */
280 unwind_buf.priv.data.prev = NULL;
281 unwind_buf.priv.data.cleanup = NULL;
282
283 int not_first_call;
284 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
285 if (__builtin_expect (! not_first_call, 1))
286 {
287 /* Store the new cleanup handler info. */
288 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
289
290 if (__builtin_expect (pd->stopped_start, 0))
291 {
292 int oldtype = CANCEL_ASYNC ();
293
294 /* Get the lock the parent locked to force synchronization. */
295 lll_lock (pd->lock, LLL_PRIVATE);
296 /* And give it up right away. */
297 lll_unlock (pd->lock, LLL_PRIVATE);
298
299 CANCEL_RESET (oldtype);
300 }
301
302 /* Run the code the user provided. */
303 #ifdef CALL_THREAD_FCT
304 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
305 #else
306 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
307 #endif
308 }
309
310 /* Run the destructor for the thread-local data. */
311 __nptl_deallocate_tsd ();
312
313 /* Clean up any state libc stored in thread-local variables. */
314 __libc_thread_freeres ();
315
316 /* If this is the last thread we terminate the process now. We
317 do not notify the debugger, it might just irritate it if there
318 is no thread left. */
319 if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0))
320 /* This was the last thread. */
321 exit (0);
322
323 /* Report the death of the thread if this is wanted. */
324 if (__builtin_expect (pd->report_events, 0))
325 {
326 /* See whether TD_DEATH is in any of the mask. */
327 const int idx = __td_eventword (TD_DEATH);
328 const uint32_t mask = __td_eventmask (TD_DEATH);
329
330 if ((mask & (__nptl_threads_events.event_bits[idx]
331 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
332 {
333 /* Yep, we have to signal the death. Add the descriptor to
334 the list but only if it is not already on it. */
335 if (pd->nextevent == NULL)
336 {
337 pd->eventbuf.eventnum = TD_DEATH;
338 pd->eventbuf.eventdata = pd;
339
340 do
341 pd->nextevent = __nptl_last_event;
342 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
343 pd, pd->nextevent));
344 }
345
346 /* Now call the function to signal the event. */
347 __nptl_death_event ();
348 }
349 }
350
351 /* The thread is exiting now. Don't set this bit until after we've hit
352 the event-reporting breakpoint, so that td_thr_get_info on us while at
353 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
354 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
355
356 #ifndef __ASSUME_SET_ROBUST_LIST
357 /* If this thread has any robust mutexes locked, handle them now. */
358 # ifdef __PTHREAD_MUTEX_HAVE_PREV
359 void *robust = pd->robust_head.list;
360 # else
361 __pthread_slist_t *robust = pd->robust_list.__next;
362 # endif
363 /* We let the kernel do the notification if it is able to do so.
364 If we have to do it here there for sure are no PI mutexes involved
365 since the kernel support for them is even more recent. */
366 if (__set_robust_list_avail < 0
367 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
368 {
369 do
370 {
371 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
372 ((char *) robust - offsetof (struct __pthread_mutex_s,
373 __list.__next));
374 robust = *((void **) robust);
375
376 # ifdef __PTHREAD_MUTEX_HAVE_PREV
377 this->__list.__prev = NULL;
378 # endif
379 this->__list.__next = NULL;
380
381 lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
382 }
383 while (robust != (void *) &pd->robust_head);
384 }
385 #endif
386
387 /* Mark the memory of the stack as usable to the kernel. We free
388 everything except for the space used for the TCB itself. */
389 size_t pagesize_m1 = __getpagesize () - 1;
390 #ifdef _STACK_GROWS_DOWN
391 char *sp = CURRENT_STACK_FRAME;
392 size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1;
393 #else
394 # error "to do"
395 #endif
396 assert (freesize < pd->stackblock_size);
397 if (freesize > PTHREAD_STACK_MIN)
398 madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
399
400 /* If the thread is detached free the TCB. */
401 if (IS_DETACHED (pd))
402 /* Free the TCB. */
403 __free_tcb (pd);
404 else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0))
405 {
406 /* Some other thread might call any of the setXid functions and expect
407 us to reply. In this case wait until we did that. */
408 do
409 lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE);
410 while (pd->cancelhandling & SETXID_BITMASK);
411
412 /* Reset the value so that the stack can be reused. */
413 pd->setxid_futex = 0;
414 }
415
416 /* We cannot call '_exit' here. '_exit' will terminate the process.
417
418 The 'exit' implementation in the kernel will signal when the
419 process is really dead since 'clone' got passed the CLONE_CLEARTID
420 flag. The 'tid' field in the TCB will be set to zero.
421
422 The exit code is zero since in case all threads exit by calling
423 'pthread_exit' the exit status must be 0 (zero). */
424 __exit_thread_inline (0);
425
426 /* NOTREACHED */
427 return 0;
428 }
429
430
431 /* Default thread attributes for the case when the user does not
432 provide any. */
433 static const struct pthread_attr default_attr =
434 {
435 /* Just some value > 0 which gets rounded to the nearest page size. */
436 .guardsize = 1,
437 };
438
439
440 int
441 __pthread_create_2_1 (newthread, attr, start_routine, arg)
442 pthread_t *newthread;
443 const pthread_attr_t *attr;
444 void *(*start_routine) (void *);
445 void *arg;
446 {
447 STACK_VARIABLES;
448
449 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
450 if (iattr == NULL)
451 /* Is this the best idea? On NUMA machines this could mean
452 accessing far-away memory. */
453 iattr = &default_attr;
454
455 struct pthread *pd = NULL;
456 int err = ALLOCATE_STACK (iattr, &pd);
457 if (__builtin_expect (err != 0, 0))
458 /* Something went wrong. Maybe a parameter of the attributes is
459 invalid or we could not allocate memory. Note we have to
460 translate error codes. */
461 return err == ENOMEM ? EAGAIN : err;
462
463
464 /* Initialize the TCB. All initializations with zero should be
465 performed in 'get_cached_stack'. This way we avoid doing this if
466 the stack freshly allocated with 'mmap'. */
467
468 #ifdef TLS_TCB_AT_TP
469 /* Reference to the TCB itself. */
470 pd->header.self = pd;
471
472 /* Self-reference for TLS. */
473 pd->header.tcb = pd;
474 #endif
475
476 /* Store the address of the start routine and the parameter. Since
477 we do not start the function directly the stillborn thread will
478 get the information from its thread descriptor. */
479 pd->start_routine = start_routine;
480 pd->arg = arg;
481
482 /* Copy the thread attribute flags. */
483 struct pthread *self = THREAD_SELF;
484 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
485 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
486
487 /* Initialize the field for the ID of the thread which is waiting
488 for us. This is a self-reference in case the thread is created
489 detached. */
490 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
491
492 /* The debug events are inherited from the parent. */
493 pd->eventbuf = self->eventbuf;
494
495
496 /* Copy the parent's scheduling parameters. The flags will say what
497 is valid and what is not. */
498 pd->schedpolicy = self->schedpolicy;
499 pd->schedparam = self->schedparam;
500
501 /* Copy the stack guard canary. */
502 #ifdef THREAD_COPY_STACK_GUARD
503 THREAD_COPY_STACK_GUARD (pd);
504 #endif
505
506 /* Copy the pointer guard value. */
507 #ifdef THREAD_COPY_POINTER_GUARD
508 THREAD_COPY_POINTER_GUARD (pd);
509 #endif
510
511 /* Determine scheduling parameters for the thread. */
512 if (attr != NULL
513 && __builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
514 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
515 {
516 INTERNAL_SYSCALL_DECL (scerr);
517
518 /* Use the scheduling parameters the user provided. */
519 if (iattr->flags & ATTR_FLAG_POLICY_SET)
520 pd->schedpolicy = iattr->schedpolicy;
521 else if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
522 {
523 pd->schedpolicy = INTERNAL_SYSCALL (sched_getscheduler, scerr, 1, 0);
524 pd->flags |= ATTR_FLAG_POLICY_SET;
525 }
526
527 if (iattr->flags & ATTR_FLAG_SCHED_SET)
528 memcpy (&pd->schedparam, &iattr->schedparam,
529 sizeof (struct sched_param));
530 else if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
531 {
532 INTERNAL_SYSCALL (sched_getparam, scerr, 2, 0, &pd->schedparam);
533 pd->flags |= ATTR_FLAG_SCHED_SET;
534 }
535
536 /* Check for valid priorities. */
537 int minprio = INTERNAL_SYSCALL (sched_get_priority_min, scerr, 1,
538 iattr->schedpolicy);
539 int maxprio = INTERNAL_SYSCALL (sched_get_priority_max, scerr, 1,
540 iattr->schedpolicy);
541 if (pd->schedparam.sched_priority < minprio
542 || pd->schedparam.sched_priority > maxprio)
543 {
544 /* Perhaps a thread wants to change the IDs and if waiting
545 for this stillborn thread. */
546 if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0)
547 == -2, 0))
548 lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);
549
550 __deallocate_stack (pd);
551
552 return EINVAL;
553 }
554 }
555
556 /* Pass the descriptor to the caller. */
557 *newthread = (pthread_t) pd;
558
559 /* Start the thread. */
560 return create_thread (pd, iattr, STACK_VARIABLES_ARGS);
561 }
562 versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
563
564
565 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
566 int
567 __pthread_create_2_0 (newthread, attr, start_routine, arg)
568 pthread_t *newthread;
569 const pthread_attr_t *attr;
570 void *(*start_routine) (void *);
571 void *arg;
572 {
573 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
574 the old size and access to the new members might crash the program.
575 We convert the struct now. */
576 struct pthread_attr new_attr;
577
578 if (attr != NULL)
579 {
580 struct pthread_attr *iattr = (struct pthread_attr *) attr;
581 size_t ps = __getpagesize ();
582
583 /* Copy values from the user-provided attributes. */
584 new_attr.schedparam = iattr->schedparam;
585 new_attr.schedpolicy = iattr->schedpolicy;
586 new_attr.flags = iattr->flags;
587
588 /* Fill in default values for the fields not present in the old
589 implementation. */
590 new_attr.guardsize = ps;
591 new_attr.stackaddr = NULL;
592 new_attr.stacksize = 0;
593 new_attr.cpuset = NULL;
594
595 /* We will pass this value on to the real implementation. */
596 attr = (pthread_attr_t *) &new_attr;
597 }
598
599 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
600 }
601 compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
602 GLIBC_2_0);
603 #endif
604 \f
605 /* Information for libthread_db. */
606
607 #include "../nptl_db/db_info.c"
608 \f
609 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
610 functions to be present as well. */
611 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
612 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
613 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
614
615 PTHREAD_STATIC_FN_REQUIRE (pthread_once)
616 PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
617
618 PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
619 PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
620 PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
621 PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)