]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_create.c
Update.
[thirdparty/glibc.git] / nptl / pthread_create.c
CommitLineData
14ffbc83 1/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20#include <errno.h>
21#include <stdbool.h>
22#include <stdlib.h>
23#include <string.h>
24#include "pthreadP.h"
25#include <hp-timing.h>
26#include <ldsodefs.h>
3e4fc359 27#include <atomic.h>
12d7ca07 28#include <libc-internal.h>
0e9d6240 29#include <resolv.h>
76a50749
UD
30
31#include <shlib-compat.h>
32
33
34/* Local function to start thread and handle cleanup. */
35static int start_thread (void *arg);
76a50749
UD
36
37
38/* Nozero if debugging mode is enabled. */
39int __pthread_debug;
40
41/* Globally enabled events. */
fa9a4ff0 42static td_thr_events_t __nptl_threads_events;
76a50749
UD
43
44/* Pointer to descriptor with the last event. */
fa9a4ff0 45static struct pthread *__nptl_last_event;
76a50749 46
47202270
UD
47/* Number of threads running. */
48unsigned int __nptl_nthreads = 1;
49
76a50749
UD
50
51/* Code to allocate and deallocate a stack. */
76a50749
UD
52#include "allocatestack.c"
53
54/* Code to create the thread. */
55#include "createthread.c"
56
57
58/* Table of the key information. */
fa9a4ff0 59struct pthread_key_struct __pthread_keys[PTHREAD_KEYS_MAX]
518b5308 60 __attribute__ ((nocommon));
415ef7d8 61hidden_data_def (__pthread_keys)
76a50749 62
76a50749 63struct pthread *
90491dc4 64internal_function
76a50749
UD
65__find_in_stack_list (pd)
66 struct pthread *pd;
67{
68 list_t *entry;
69 struct pthread *result = NULL;
70
71 lll_lock (stack_cache_lock);
72
73 list_for_each (entry, &stack_used)
74 {
75 struct pthread *curp;
76
d4f64e1a 77 curp = list_entry (entry, struct pthread, list);
76a50749
UD
78 if (curp == pd)
79 {
80 result = curp;
81 break;
82 }
83 }
84
85 if (result == NULL)
86 list_for_each (entry, &__stack_user)
87 {
88 struct pthread *curp;
89
d4f64e1a 90 curp = list_entry (entry, struct pthread, list);
76a50749
UD
91 if (curp == pd)
92 {
93 result = curp;
94 break;
95 }
96 }
97
98 lll_unlock (stack_cache_lock);
99
100 return result;
101}
102
103
104/* Deallocate POSIX thread-local-storage. */
3fa21fd8
UD
105void
106attribute_hidden
107__nptl_deallocate_tsd (void)
76a50749 108{
877e51b2
UD
109 struct pthread *self = THREAD_SELF;
110
76a50749
UD
111 /* Maybe no data was ever allocated. This happens often so we have
112 a flag for this. */
877e51b2 113 if (THREAD_GETMEM (self, specific_used))
76a50749
UD
114 {
115 size_t round;
6b4686a5 116 size_t cnt;
76a50749 117
6b4686a5
UD
118 round = 0;
119 do
76a50749 120 {
76a50749
UD
121 size_t idx;
122
c5acd3d7 123 /* So far no new nonzero data entry. */
877e51b2 124 THREAD_SETMEM (self, specific_used, false);
c5acd3d7 125
76a50749 126 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
6b4686a5
UD
127 {
128 struct pthread_key_data *level2;
129
877e51b2 130 level2 = THREAD_GETMEM_NC (self, specific, cnt);
6b4686a5
UD
131
132 if (level2 != NULL)
133 {
134 size_t inner;
135
136 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
137 ++inner, ++idx)
138 {
139 void *data = level2[inner].data;
140
877e51b2
UD
141 if (data != NULL)
142 {
143 /* Always clear the data. */
144 level2[inner].data = NULL;
145
6b4686a5
UD
146 /* Make sure the data corresponds to a valid
147 key. This test fails if the key was
148 deallocated and also if it was
149 re-allocated. It is the user's
150 responsibility to free the memory in this
151 case. */
877e51b2
UD
152 if (level2[inner].seq
153 == __pthread_keys[idx].seq
154 /* It is not necessary to register a destructor
155 function. */
156 && __pthread_keys[idx].destr != NULL)
157 /* Call the user-provided destructor. */
158 __pthread_keys[idx].destr (data);
6b4686a5
UD
159 }
160 }
161 }
162 else
163 idx += PTHREAD_KEY_1STLEVEL_SIZE;
164 }
877e51b2
UD
165
166 if (THREAD_GETMEM (self, specific_used) == 0)
167 /* No data has been modified. */
168 goto just_free;
76a50749 169 }
877e51b2
UD
170 /* We only repeat the process a fixed number of times. */
171 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
76a50749 172
877e51b2
UD
173 /* Just clear the memory of the first block for reuse. */
174 memset (&THREAD_SELF->specific_1stblock, '\0',
175 sizeof (self->specific_1stblock));
6b4686a5 176
877e51b2 177 just_free:
6b4686a5
UD
178 /* Free the memory for the other blocks. */
179 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
180 {
181 struct pthread_key_data *level2;
182
877e51b2 183 level2 = THREAD_GETMEM_NC (self, specific, cnt);
6b4686a5
UD
184 if (level2 != NULL)
185 {
186 /* The first block is allocated as part of the thread
187 descriptor. */
188 free (level2);
877e51b2 189 THREAD_SETMEM_NC (self, specific, cnt, NULL);
6b4686a5
UD
190 }
191 }
192
877e51b2 193 THREAD_SETMEM (self, specific_used, false);
76a50749
UD
194 }
195}
196
197
198/* Deallocate a thread's stack after optionally making sure the thread
199 descriptor is still valid. */
200void
90491dc4 201internal_function
76a50749
UD
202__free_tcb (struct pthread *pd)
203{
204 /* The thread is exiting now. */
ba25bb0f
UD
205 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
206 TERMINATED_BIT) == 0, 1))
76a50749
UD
207 {
208 /* Remove the descriptor from the list. */
209 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
210 /* Something is really wrong. The descriptor for a still
211 running thread is gone. */
212 abort ();
213
76a50749
UD
214 /* Queue the stack memory block for reuse and exit the process. The
215 kernel will signal via writing to the address returned by
216 QUEUE-STACK when the stack is available. */
217 __deallocate_stack (pd);
218 }
219}
220
221
222static int
223start_thread (void *arg)
224{
225 struct pthread *pd = (struct pthread *) arg;
226
227#if HP_TIMING_AVAIL
228 /* Remember the time when the thread was started. */
229 hp_timing_t now;
230 HP_TIMING_NOW (now);
231 THREAD_SETMEM (pd, cpuclock_offset, now);
232#endif
233
0e9d6240
UD
234 /* Initialize resolver state pointer. */
235 __resp = &pd->res;
236
76a50749
UD
237 /* This is where the try/finally block should be created. For
238 compilers without that support we do use setjmp. */
877e51b2
UD
239 struct pthread_unwind_buf unwind_buf;
240
241 /* No previous handlers. */
242 unwind_buf.priv.data.prev = NULL;
243 unwind_buf.priv.data.cleanup = NULL;
244
245 int not_first_call;
246 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
18d009ca 247 if (__builtin_expect (! not_first_call, 1))
76a50749 248 {
877e51b2
UD
249 /* Store the new cleanup handler info. */
250 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
251
5f66b766
UD
252 if (__builtin_expect (pd->stopped_start, 0))
253 {
254 int oldtype = CANCEL_ASYNC ();
362038b0 255
5f66b766
UD
256 /* Get the lock the parent locked to force synchronization. */
257 lll_lock (pd->lock);
258 /* And give it up right away. */
259 lll_unlock (pd->lock);
362038b0 260
5f66b766
UD
261 CANCEL_RESET (oldtype);
262 }
362038b0 263
76a50749 264 /* Run the code the user provided. */
42c8f44c
UD
265#ifdef CALL_THREAD_FCT
266 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
267#else
cc775edf 268 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
42c8f44c 269#endif
76a50749
UD
270 }
271
6b4686a5 272 /* Run the destructor for the thread-local data. */
3fa21fd8 273 __nptl_deallocate_tsd ();
6b4686a5 274
12d7ca07
RM
275 /* Clean up any state libc stored in thread-local variables. */
276 __libc_thread_freeres ();
76a50749 277
47202270
UD
278 /* If this is the last thread we terminate the process now. We
279 do not notify the debugger, it might just irritate it if there
280 is no thread left. */
729924a0 281 if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0))
47202270
UD
282 /* This was the last thread. */
283 exit (0);
284
76a50749
UD
285 /* Report the death of the thread if this is wanted. */
286 if (__builtin_expect (pd->report_events, 0))
287 {
288 /* See whether TD_DEATH is in any of the mask. */
289 const int idx = __td_eventword (TD_DEATH);
290 const uint32_t mask = __td_eventmask (TD_DEATH);
291
292 if ((mask & (__nptl_threads_events.event_bits[idx]
293 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
294 {
295 /* Yep, we have to signal the death. Add the descriptor to
296 the list but only if it is not already on it. */
297 if (pd->nextevent == NULL)
298 {
299 pd->eventbuf.eventnum = TD_DEATH;
300 pd->eventbuf.eventdata = pd;
301
302 do
303 pd->nextevent = __nptl_last_event;
5a3ab2fc
UD
304 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
305 pd, pd->nextevent));
76a50749
UD
306 }
307
308 /* Now call the function to signal the event. */
309 __nptl_death_event ();
310 }
311 }
312
6461e577
RM
313 /* The thread is exiting now. Don't set this bit until after we've hit
314 the event-reporting breakpoint, so that td_thr_get_info on us while at
315 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
316 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
76a50749 317
76a50749
UD
318 /* If the thread is detached free the TCB. */
319 if (IS_DETACHED (pd))
320 /* Free the TCB. */
321 __free_tcb (pd);
322
323 /* We cannot call '_exit' here. '_exit' will terminate the process.
324
325 The 'exit' implementation in the kernel will signal when the
326 process is really dead since 'clone' got passed the CLONE_CLEARTID
327 flag. The 'tid' field in the TCB will be set to zero.
328
329 The exit code is zero since in case all threads exit by calling
330 'pthread_exit' the exit status must be 0 (zero). */
331 __exit_thread_inline (0);
332
333 /* NOTREACHED */
334 return 0;
335}
336
337
76a50749
UD
338/* Default thread attributes for the case when the user does not
339 provide any. */
340static const struct pthread_attr default_attr =
341 {
342 /* Just some value > 0 which gets rounded to the nearest page size. */
343 .guardsize = 1,
344 };
345
346
347int
348__pthread_create_2_1 (newthread, attr, start_routine, arg)
349 pthread_t *newthread;
350 const pthread_attr_t *attr;
351 void *(*start_routine) (void *);
352 void *arg;
353{
354 STACK_VARIABLES;
355 const struct pthread_attr *iattr;
356 struct pthread *pd;
357 int err;
358
359 iattr = (struct pthread_attr *) attr;
360 if (iattr == NULL)
361 /* Is this the best idea? On NUMA machines this could mean
362 accessing far-away memory. */
363 iattr = &default_attr;
364
365 err = ALLOCATE_STACK (iattr, &pd);
729924a0 366 if (__builtin_expect (err != 0, 0))
76a50749
UD
367 /* Something went wrong. Maybe a parameter of the attributes is
368 invalid or we could not allocate memory. */
369 return err;
370
371
372 /* Initialize the TCB. All initializations with zero should be
373 performed in 'get_cached_stack'. This way we avoid doing this if
374 the stack freshly allocated with 'mmap'. */
375
5d5d5969 376#ifdef TLS_TCB_AT_TP
76a50749 377 /* Reference to the TCB itself. */
55c11fbd 378 pd->header.self = pd;
76a50749 379
d4f64e1a 380 /* Self-reference for TLS. */
55c11fbd 381 pd->header.tcb = pd;
76a50749
UD
382#endif
383
384 /* Store the address of the start routine and the parameter. Since
385 we do not start the function directly the stillborn thread will
386 get the information from its thread descriptor. */
387 pd->start_routine = start_routine;
388 pd->arg = arg;
389
390 /* Copy the thread attribute flags. */
14ffbc83
UD
391 struct pthread *self = THREAD_SELF;
392 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
393 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
76a50749
UD
394
395 /* Initialize the field for the ID of the thread which is waiting
396 for us. This is a self-reference in case the thread is created
397 detached. */
398 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
399
400 /* The debug events are inherited from the parent. */
14ffbc83
UD
401 pd->eventbuf = self->eventbuf;
402
76a50749 403
14ffbc83
UD
404 /* Copy the parent's scheduling parameters. The flags will say what
405 is valid and what is not. */
406 pd->schedpolicy = self->schedpolicy;
407 pd->schedparam = self->schedparam;
76a50749 408
14ffbc83
UD
409 /* Determine scheduling parameters for the thread. */
410 if (attr != NULL
411 && __builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
412 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
76a50749 413 {
14ffbc83
UD
414 INTERNAL_SYSCALL_DECL (err);
415
416 /* Use the scheduling parameters the user provided. */
417 if (iattr->flags & ATTR_FLAG_POLICY_SET)
418 pd->schedpolicy = iattr->schedpolicy;
419 else if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
76a50749 420 {
14ffbc83
UD
421 pd->schedpolicy = INTERNAL_SYSCALL (sched_getscheduler, err, 1, 0);
422 pd->flags |= ATTR_FLAG_POLICY_SET;
76a50749 423 }
14ffbc83
UD
424
425 if (iattr->flags & ATTR_FLAG_SCHED_SET)
426 memcpy (&pd->schedparam, &iattr->schedparam,
427 sizeof (struct sched_param));
428 else if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
429 {
430 INTERNAL_SYSCALL (sched_getparam, err, 2, 0, &pd->schedparam);
431 pd->flags |= ATTR_FLAG_SCHED_SET;
432 }
433
434 /* Check for valid priorities. */
435 int minprio = INTERNAL_SYSCALL (sched_get_priority_min, err, 1,
436 iattr->schedpolicy);
437 int maxprio = INTERNAL_SYSCALL (sched_get_priority_max, err, 1,
438 iattr->schedpolicy);
439 if (pd->schedparam.sched_priority < minprio
440 || pd->schedparam.sched_priority > maxprio)
76a50749 441 {
14ffbc83
UD
442 err = EINVAL;
443 goto errout;
76a50749
UD
444 }
445 }
446
447 /* Pass the descriptor to the caller. */
448 *newthread = (pthread_t) pd;
449
ebfa58bb
UD
450 /* Remember whether the thread is detached or not. In case of an
451 error we have to free the stacks of non-detached stillborn
452 threads. */
453 bool is_detached = IS_DETACHED (pd);
454
76a50749 455 /* Start the thread. */
80f536db 456 err = create_thread (pd, iattr, STACK_VARIABLES_ARGS);
76a50749
UD
457 if (err != 0)
458 {
459 /* Something went wrong. Free the resources. */
ebfa58bb 460 if (!is_detached)
a1fbd858
UD
461 {
462 errout:
463 __deallocate_stack (pd);
464 }
76a50749
UD
465 return err;
466 }
467
468 return 0;
469}
470versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
471
472
473#if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
474int
475__pthread_create_2_0 (newthread, attr, start_routine, arg)
476 pthread_t *newthread;
477 const pthread_attr_t *attr;
478 void *(*start_routine) (void *);
479 void *arg;
480{
481 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
482 the old size and access to the new members might crash the program.
483 We convert the struct now. */
484 struct pthread_attr new_attr;
485
486 if (attr != NULL)
487 {
488 struct pthread_attr *iattr = (struct pthread_attr *) attr;
489 size_t ps = __getpagesize ();
490
491 /* Copy values from the user-provided attributes. */
492 new_attr.schedparam = iattr->schedparam;
493 new_attr.schedpolicy = iattr->schedpolicy;
494 new_attr.flags = iattr->flags;
495
496 /* Fill in default values for the fields not present in the old
497 implementation. */
498 new_attr.guardsize = ps;
499 new_attr.stackaddr = NULL;
500 new_attr.stacksize = 0;
ca85ede0 501 new_attr.cpuset = NULL;
76a50749
UD
502
503 /* We will pass this value on to the real implementation. */
504 attr = (pthread_attr_t *) &new_attr;
505 }
506
507 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
508}
509compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
510 GLIBC_2_0);
511#endif
7f08f55a
RM
512\f
513/* Information for libthread_db. */
514
515#include "../nptl_db/db_info.c"