]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_create.c
* csu/tst-empty.c: New file.
[thirdparty/glibc.git] / nptl / pthread_create.c
CommitLineData
3e4fc359 1/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20#include <errno.h>
21#include <stdbool.h>
22#include <stdlib.h>
23#include <string.h>
24#include "pthreadP.h"
25#include <hp-timing.h>
26#include <ldsodefs.h>
3e4fc359 27#include <atomic.h>
12d7ca07 28#include <libc-internal.h>
76a50749
UD
29
30#include <shlib-compat.h>
31
32
33/* Local function to start thread and handle cleanup. */
34static int start_thread (void *arg);
35/* Similar version used when debugging. */
36static int start_thread_debug (void *arg);
37
38
39/* Nozero if debugging mode is enabled. */
40int __pthread_debug;
41
42/* Globally enabled events. */
fa9a4ff0 43static td_thr_events_t __nptl_threads_events;
76a50749
UD
44
45/* Pointer to descriptor with the last event. */
fa9a4ff0 46static struct pthread *__nptl_last_event;
76a50749 47
47202270
UD
48/* Number of threads running. */
49unsigned int __nptl_nthreads = 1;
50
76a50749
UD
51
52/* Code to allocate and deallocate a stack. */
76a50749
UD
53#include "allocatestack.c"
54
55/* Code to create the thread. */
56#include "createthread.c"
57
58
59/* Table of the key information. */
fa9a4ff0 60struct pthread_key_struct __pthread_keys[PTHREAD_KEYS_MAX]
518b5308 61 __attribute__ ((nocommon));
415ef7d8 62hidden_data_def (__pthread_keys)
76a50749 63
fa9a4ff0
RM
64/* This is for libthread_db only. */
65const int __pthread_pthread_sizeof_descr = sizeof (struct pthread);
76a50749
UD
66
67struct pthread *
90491dc4 68internal_function
76a50749
UD
69__find_in_stack_list (pd)
70 struct pthread *pd;
71{
72 list_t *entry;
73 struct pthread *result = NULL;
74
75 lll_lock (stack_cache_lock);
76
77 list_for_each (entry, &stack_used)
78 {
79 struct pthread *curp;
80
d4f64e1a 81 curp = list_entry (entry, struct pthread, list);
76a50749
UD
82 if (curp == pd)
83 {
84 result = curp;
85 break;
86 }
87 }
88
89 if (result == NULL)
90 list_for_each (entry, &__stack_user)
91 {
92 struct pthread *curp;
93
d4f64e1a 94 curp = list_entry (entry, struct pthread, list);
76a50749
UD
95 if (curp == pd)
96 {
97 result = curp;
98 break;
99 }
100 }
101
102 lll_unlock (stack_cache_lock);
103
104 return result;
105}
106
107
108/* Deallocate POSIX thread-local-storage. */
109static void
ba25bb0f 110internal_function
76a50749
UD
111deallocate_tsd (struct pthread *pd)
112{
113 /* Maybe no data was ever allocated. This happens often so we have
114 a flag for this. */
6b4686a5 115 if (THREAD_GETMEM (pd, specific_used))
76a50749
UD
116 {
117 size_t round;
6b4686a5 118 size_t cnt;
76a50749 119
6b4686a5
UD
120 round = 0;
121 do
76a50749 122 {
76a50749
UD
123 size_t idx;
124
c5acd3d7 125 /* So far no new nonzero data entry. */
6b4686a5 126 THREAD_SETMEM (pd, specific_used, false);
c5acd3d7 127
76a50749 128 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
6b4686a5
UD
129 {
130 struct pthread_key_data *level2;
131
132 level2 = THREAD_GETMEM_NC (pd, specific, cnt);
133
134 if (level2 != NULL)
135 {
136 size_t inner;
137
138 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
139 ++inner, ++idx)
140 {
141 void *data = level2[inner].data;
142
143 if (data != NULL
144 /* Make sure the data corresponds to a valid
145 key. This test fails if the key was
146 deallocated and also if it was
147 re-allocated. It is the user's
148 responsibility to free the memory in this
149 case. */
150 && (level2[inner].seq
151 == __pthread_keys[idx].seq)
152 /* It is not necessary to register a destructor
153 function. */
154 && __pthread_keys[idx].destr != NULL)
155 {
156 level2[inner].data = NULL;
157 __pthread_keys[idx].destr (data);
158 }
159 }
160 }
161 else
162 idx += PTHREAD_KEY_1STLEVEL_SIZE;
163 }
76a50749 164 }
6b4686a5
UD
165 while (THREAD_GETMEM (pd, specific_used)
166 && ++round < PTHREAD_DESTRUCTOR_ITERATIONS);
76a50749 167
6b4686a5
UD
168 /* Clear the memory of the first block for reuse. */
169 memset (&pd->specific_1stblock, '\0', sizeof (pd->specific_1stblock));
170
171 /* Free the memory for the other blocks. */
172 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
173 {
174 struct pthread_key_data *level2;
175
176 level2 = THREAD_GETMEM_NC (pd, specific, cnt);
177 if (level2 != NULL)
178 {
179 /* The first block is allocated as part of the thread
180 descriptor. */
181 free (level2);
182 THREAD_SETMEM_NC (pd, specific, cnt, NULL);
183 }
184 }
185
186 THREAD_SETMEM (pd, specific_used, false);
76a50749
UD
187 }
188}
189
190
191/* Deallocate a thread's stack after optionally making sure the thread
192 descriptor is still valid. */
193void
90491dc4 194internal_function
76a50749
UD
195__free_tcb (struct pthread *pd)
196{
197 /* The thread is exiting now. */
ba25bb0f
UD
198 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
199 TERMINATED_BIT) == 0, 1))
76a50749
UD
200 {
201 /* Remove the descriptor from the list. */
202 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
203 /* Something is really wrong. The descriptor for a still
204 running thread is gone. */
205 abort ();
206
76a50749
UD
207 /* Queue the stack memory block for reuse and exit the process. The
208 kernel will signal via writing to the address returned by
209 QUEUE-STACK when the stack is available. */
210 __deallocate_stack (pd);
211 }
212}
213
214
215static int
216start_thread (void *arg)
217{
47202270
UD
218 /* One more thread. */
219 atomic_increment (&__nptl_nthreads);
220
76a50749
UD
221 struct pthread *pd = (struct pthread *) arg;
222
223#if HP_TIMING_AVAIL
224 /* Remember the time when the thread was started. */
225 hp_timing_t now;
226 HP_TIMING_NOW (now);
227 THREAD_SETMEM (pd, cpuclock_offset, now);
228#endif
229
230 /* This is where the try/finally block should be created. For
231 compilers without that support we do use setjmp. */
18d009ca
UD
232 int not_first_call = setjmp (pd->cancelbuf);
233 if (__builtin_expect (! not_first_call, 1))
76a50749
UD
234 {
235 /* Run the code the user provided. */
42c8f44c
UD
236#ifdef CALL_THREAD_FCT
237 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
238#else
cc775edf 239 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
42c8f44c 240#endif
76a50749
UD
241 }
242
6b4686a5
UD
243 /* Run the destructor for the thread-local data. */
244 deallocate_tsd (pd);
245
12d7ca07
RM
246 /* Clean up any state libc stored in thread-local variables. */
247 __libc_thread_freeres ();
76a50749 248
47202270
UD
249 /* If this is the last thread we terminate the process now. We
250 do not notify the debugger, it might just irritate it if there
251 is no thread left. */
729924a0 252 if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0))
47202270
UD
253 /* This was the last thread. */
254 exit (0);
255
76a50749
UD
256 /* Report the death of the thread if this is wanted. */
257 if (__builtin_expect (pd->report_events, 0))
258 {
259 /* See whether TD_DEATH is in any of the mask. */
260 const int idx = __td_eventword (TD_DEATH);
261 const uint32_t mask = __td_eventmask (TD_DEATH);
262
263 if ((mask & (__nptl_threads_events.event_bits[idx]
264 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
265 {
266 /* Yep, we have to signal the death. Add the descriptor to
267 the list but only if it is not already on it. */
268 if (pd->nextevent == NULL)
269 {
270 pd->eventbuf.eventnum = TD_DEATH;
271 pd->eventbuf.eventdata = pd;
272
273 do
274 pd->nextevent = __nptl_last_event;
5a3ab2fc
UD
275 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
276 pd, pd->nextevent));
76a50749
UD
277 }
278
279 /* Now call the function to signal the event. */
280 __nptl_death_event ();
281 }
282 }
283
6461e577
RM
284 /* The thread is exiting now. Don't set this bit until after we've hit
285 the event-reporting breakpoint, so that td_thr_get_info on us while at
286 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
287 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
76a50749 288
76a50749
UD
289 /* If the thread is detached free the TCB. */
290 if (IS_DETACHED (pd))
291 /* Free the TCB. */
292 __free_tcb (pd);
293
294 /* We cannot call '_exit' here. '_exit' will terminate the process.
295
296 The 'exit' implementation in the kernel will signal when the
297 process is really dead since 'clone' got passed the CLONE_CLEARTID
298 flag. The 'tid' field in the TCB will be set to zero.
299
300 The exit code is zero since in case all threads exit by calling
301 'pthread_exit' the exit status must be 0 (zero). */
302 __exit_thread_inline (0);
303
304 /* NOTREACHED */
305 return 0;
306}
307
308
309/* Just list start_thread but we do some more things needed for a run
310 with a debugger attached. */
311static int
312start_thread_debug (void *arg)
313{
314 struct pthread *pd = (struct pthread *) arg;
315
316 /* Get the lock the parent locked to force synchronization. */
317 lll_lock (pd->lock);
318 /* And give it up right away. */
319 lll_unlock (pd->lock);
320
321 /* Now do the actual startup. */
322 return start_thread (arg);
323}
324
325
326/* Default thread attributes for the case when the user does not
327 provide any. */
328static const struct pthread_attr default_attr =
329 {
330 /* Just some value > 0 which gets rounded to the nearest page size. */
331 .guardsize = 1,
332 };
333
334
335int
336__pthread_create_2_1 (newthread, attr, start_routine, arg)
337 pthread_t *newthread;
338 const pthread_attr_t *attr;
339 void *(*start_routine) (void *);
340 void *arg;
341{
342 STACK_VARIABLES;
343 const struct pthread_attr *iattr;
344 struct pthread *pd;
345 int err;
346
347 iattr = (struct pthread_attr *) attr;
348 if (iattr == NULL)
349 /* Is this the best idea? On NUMA machines this could mean
350 accessing far-away memory. */
351 iattr = &default_attr;
352
353 err = ALLOCATE_STACK (iattr, &pd);
729924a0 354 if (__builtin_expect (err != 0, 0))
76a50749
UD
355 /* Something went wrong. Maybe a parameter of the attributes is
356 invalid or we could not allocate memory. */
357 return err;
358
359
360 /* Initialize the TCB. All initializations with zero should be
361 performed in 'get_cached_stack'. This way we avoid doing this if
362 the stack freshly allocated with 'mmap'. */
363
5d5d5969 364#ifdef TLS_TCB_AT_TP
76a50749 365 /* Reference to the TCB itself. */
55c11fbd 366 pd->header.self = pd;
76a50749 367
d4f64e1a 368 /* Self-reference for TLS. */
55c11fbd 369 pd->header.tcb = pd;
76a50749
UD
370#endif
371
372 /* Store the address of the start routine and the parameter. Since
373 we do not start the function directly the stillborn thread will
374 get the information from its thread descriptor. */
375 pd->start_routine = start_routine;
376 pd->arg = arg;
377
378 /* Copy the thread attribute flags. */
379 pd->flags = iattr->flags;
380
381 /* Initialize the field for the ID of the thread which is waiting
382 for us. This is a self-reference in case the thread is created
383 detached. */
384 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
385
386 /* The debug events are inherited from the parent. */
387 pd->eventbuf = THREAD_SELF->eventbuf;
388
389
390 /* Determine scheduling parameters for the thread.
391 XXX How to determine whether scheduling handling is needed? */
392 if (0 && attr != NULL)
393 {
394 if (iattr->flags & ATTR_FLAG_NOTINHERITSCHED)
395 {
396 /* Use the scheduling parameters the user provided. */
397 pd->schedpolicy = iattr->schedpolicy;
398 memcpy (&pd->schedparam, &iattr->schedparam,
399 sizeof (struct sched_param));
400 }
401 else
402 {
403 /* Just store the scheduling attributes of the parent. */
404 pd->schedpolicy = __sched_getscheduler (0);
405 __sched_getparam (0, &pd->schedparam);
406 }
407 }
408
409 /* Pass the descriptor to the caller. */
410 *newthread = (pthread_t) pd;
411
412 /* Start the thread. */
413 err = create_thread (pd, STACK_VARIABLES_ARGS);
414 if (err != 0)
415 {
416 /* Something went wrong. Free the resources. */
417 __deallocate_stack (pd);
418 return err;
419 }
420
421 return 0;
422}
423versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
424
425
426#if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
427int
428__pthread_create_2_0 (newthread, attr, start_routine, arg)
429 pthread_t *newthread;
430 const pthread_attr_t *attr;
431 void *(*start_routine) (void *);
432 void *arg;
433{
434 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
435 the old size and access to the new members might crash the program.
436 We convert the struct now. */
437 struct pthread_attr new_attr;
438
439 if (attr != NULL)
440 {
441 struct pthread_attr *iattr = (struct pthread_attr *) attr;
442 size_t ps = __getpagesize ();
443
444 /* Copy values from the user-provided attributes. */
445 new_attr.schedparam = iattr->schedparam;
446 new_attr.schedpolicy = iattr->schedpolicy;
447 new_attr.flags = iattr->flags;
448
449 /* Fill in default values for the fields not present in the old
450 implementation. */
451 new_attr.guardsize = ps;
452 new_attr.stackaddr = NULL;
453 new_attr.stacksize = 0;
454
455 /* We will pass this value on to the real implementation. */
456 attr = (pthread_attr_t *) &new_attr;
457 }
458
459 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
460}
461compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
462 GLIBC_2_0);
463#endif