]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_create.c
(do_test): Make static.
[thirdparty/glibc.git] / nptl / pthread_create.c
CommitLineData
3e4fc359 1/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20#include <errno.h>
21#include <stdbool.h>
22#include <stdlib.h>
23#include <string.h>
24#include "pthreadP.h"
25#include <hp-timing.h>
26#include <ldsodefs.h>
3e4fc359 27#include <atomic.h>
12d7ca07 28#include <libc-internal.h>
76a50749
UD
29
30#include <shlib-compat.h>
31
32
33/* Local function to start thread and handle cleanup. */
34static int start_thread (void *arg);
35/* Similar version used when debugging. */
36static int start_thread_debug (void *arg);
37
38
39/* Nozero if debugging mode is enabled. */
40int __pthread_debug;
41
42/* Globally enabled events. */
fa9a4ff0 43static td_thr_events_t __nptl_threads_events;
76a50749
UD
44
45/* Pointer to descriptor with the last event. */
fa9a4ff0 46static struct pthread *__nptl_last_event;
76a50749 47
47202270
UD
48/* Number of threads running. */
49unsigned int __nptl_nthreads = 1;
50
76a50749
UD
51
52/* Code to allocate and deallocate a stack. */
76a50749
UD
53#include "allocatestack.c"
54
55/* Code to create the thread. */
56#include "createthread.c"
57
58
59/* Table of the key information. */
fa9a4ff0 60struct pthread_key_struct __pthread_keys[PTHREAD_KEYS_MAX]
518b5308 61 __attribute__ ((nocommon));
415ef7d8 62hidden_data_def (__pthread_keys)
76a50749 63
fa9a4ff0
RM
64/* This is for libthread_db only. */
65const int __pthread_pthread_sizeof_descr = sizeof (struct pthread);
76a50749
UD
66
67struct pthread *
90491dc4 68internal_function
76a50749
UD
69__find_in_stack_list (pd)
70 struct pthread *pd;
71{
72 list_t *entry;
73 struct pthread *result = NULL;
74
75 lll_lock (stack_cache_lock);
76
77 list_for_each (entry, &stack_used)
78 {
79 struct pthread *curp;
80
d4f64e1a 81 curp = list_entry (entry, struct pthread, list);
76a50749
UD
82 if (curp == pd)
83 {
84 result = curp;
85 break;
86 }
87 }
88
89 if (result == NULL)
90 list_for_each (entry, &__stack_user)
91 {
92 struct pthread *curp;
93
d4f64e1a 94 curp = list_entry (entry, struct pthread, list);
76a50749
UD
95 if (curp == pd)
96 {
97 result = curp;
98 break;
99 }
100 }
101
102 lll_unlock (stack_cache_lock);
103
104 return result;
105}
106
107
108/* Deallocate POSIX thread-local-storage. */
109static void
ba25bb0f 110internal_function
76a50749
UD
111deallocate_tsd (struct pthread *pd)
112{
113 /* Maybe no data was ever allocated. This happens often so we have
114 a flag for this. */
115 if (pd->specific_used)
116 {
117 size_t round;
118 bool found_nonzero;
119
120 for (round = 0, found_nonzero = true;
121 found_nonzero && round < PTHREAD_DESTRUCTOR_ITERATIONS;
122 ++round)
123 {
124 size_t cnt;
125 size_t idx;
126
c5acd3d7
UD
127 /* So far no new nonzero data entry. */
128 found_nonzero = false;
129
76a50749
UD
130 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
131 if (pd->specific[cnt] != NULL)
132 {
133 size_t inner;
134
135 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
136 ++inner, ++idx)
137 {
138 void *data = pd->specific[cnt][inner].data;
139
140 if (data != NULL
141 /* Make sure the data corresponds to a valid
142 key. This test fails if the key was
143 deallocated and also if it was
144 re-allocated. It is the user's
145 responsibility to free the memory in this
146 case. */
147 && (pd->specific[cnt][inner].seq
148 == __pthread_keys[idx].seq)
149 /* It is not necessary to register a destructor
150 function. */
151 && __pthread_keys[idx].destr != NULL)
152 {
153 pd->specific[cnt][inner].data = NULL;
154 __pthread_keys[idx].destr (data);
155 found_nonzero = true;
156 }
157 }
158
159 if (cnt != 0)
160 {
161 /* The first block is allocated as part of the thread
162 descriptor. */
163 free (pd->specific[cnt]);
164 pd->specific[cnt] = NULL;
165 }
166 else
167 /* Clear the memory of the first block for reuse. */
df5803bf
UD
168 memset (&pd->specific_1stblock, '\0',
169 sizeof (pd->specific_1stblock));
76a50749
UD
170 }
171 else
172 idx += PTHREAD_KEY_1STLEVEL_SIZE;
173 }
174
175 pd->specific_used = false;
176 }
177}
178
179
180/* Deallocate a thread's stack after optionally making sure the thread
181 descriptor is still valid. */
182void
90491dc4 183internal_function
76a50749
UD
184__free_tcb (struct pthread *pd)
185{
186 /* The thread is exiting now. */
ba25bb0f
UD
187 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
188 TERMINATED_BIT) == 0, 1))
76a50749
UD
189 {
190 /* Remove the descriptor from the list. */
191 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
192 /* Something is really wrong. The descriptor for a still
193 running thread is gone. */
194 abort ();
195
196 /* Run the destructor for the thread-local data. */
197 deallocate_tsd (pd);
198
199 /* Queue the stack memory block for reuse and exit the process. The
200 kernel will signal via writing to the address returned by
201 QUEUE-STACK when the stack is available. */
202 __deallocate_stack (pd);
203 }
204}
205
206
207static int
208start_thread (void *arg)
209{
47202270
UD
210 /* One more thread. */
211 atomic_increment (&__nptl_nthreads);
212
76a50749
UD
213 struct pthread *pd = (struct pthread *) arg;
214
215#if HP_TIMING_AVAIL
216 /* Remember the time when the thread was started. */
217 hp_timing_t now;
218 HP_TIMING_NOW (now);
219 THREAD_SETMEM (pd, cpuclock_offset, now);
220#endif
221
222 /* This is where the try/finally block should be created. For
223 compilers without that support we do use setjmp. */
18d009ca
UD
224 int not_first_call = setjmp (pd->cancelbuf);
225 if (__builtin_expect (! not_first_call, 1))
76a50749
UD
226 {
227 /* Run the code the user provided. */
42c8f44c
UD
228#ifdef CALL_THREAD_FCT
229 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
230#else
cc775edf 231 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
42c8f44c 232#endif
76a50749
UD
233 }
234
12d7ca07
RM
235 /* Clean up any state libc stored in thread-local variables. */
236 __libc_thread_freeres ();
76a50749 237
47202270
UD
238 /* If this is the last thread we terminate the process now. We
239 do not notify the debugger, it might just irritate it if there
240 is no thread left. */
729924a0 241 if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0))
47202270
UD
242 /* This was the last thread. */
243 exit (0);
244
76a50749
UD
245 /* Report the death of the thread if this is wanted. */
246 if (__builtin_expect (pd->report_events, 0))
247 {
248 /* See whether TD_DEATH is in any of the mask. */
249 const int idx = __td_eventword (TD_DEATH);
250 const uint32_t mask = __td_eventmask (TD_DEATH);
251
252 if ((mask & (__nptl_threads_events.event_bits[idx]
253 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
254 {
255 /* Yep, we have to signal the death. Add the descriptor to
256 the list but only if it is not already on it. */
257 if (pd->nextevent == NULL)
258 {
259 pd->eventbuf.eventnum = TD_DEATH;
260 pd->eventbuf.eventdata = pd;
261
262 do
263 pd->nextevent = __nptl_last_event;
5a3ab2fc
UD
264 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
265 pd, pd->nextevent));
76a50749
UD
266 }
267
268 /* Now call the function to signal the event. */
269 __nptl_death_event ();
270 }
271 }
272
6461e577
RM
273 /* The thread is exiting now. Don't set this bit until after we've hit
274 the event-reporting breakpoint, so that td_thr_get_info on us while at
275 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
276 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
76a50749 277
76a50749
UD
278 /* If the thread is detached free the TCB. */
279 if (IS_DETACHED (pd))
280 /* Free the TCB. */
281 __free_tcb (pd);
282
283 /* We cannot call '_exit' here. '_exit' will terminate the process.
284
285 The 'exit' implementation in the kernel will signal when the
286 process is really dead since 'clone' got passed the CLONE_CLEARTID
287 flag. The 'tid' field in the TCB will be set to zero.
288
289 The exit code is zero since in case all threads exit by calling
290 'pthread_exit' the exit status must be 0 (zero). */
291 __exit_thread_inline (0);
292
293 /* NOTREACHED */
294 return 0;
295}
296
297
298/* Just list start_thread but we do some more things needed for a run
299 with a debugger attached. */
300static int
301start_thread_debug (void *arg)
302{
303 struct pthread *pd = (struct pthread *) arg;
304
305 /* Get the lock the parent locked to force synchronization. */
306 lll_lock (pd->lock);
307 /* And give it up right away. */
308 lll_unlock (pd->lock);
309
310 /* Now do the actual startup. */
311 return start_thread (arg);
312}
313
314
315/* Default thread attributes for the case when the user does not
316 provide any. */
317static const struct pthread_attr default_attr =
318 {
319 /* Just some value > 0 which gets rounded to the nearest page size. */
320 .guardsize = 1,
321 };
322
323
324int
325__pthread_create_2_1 (newthread, attr, start_routine, arg)
326 pthread_t *newthread;
327 const pthread_attr_t *attr;
328 void *(*start_routine) (void *);
329 void *arg;
330{
331 STACK_VARIABLES;
332 const struct pthread_attr *iattr;
333 struct pthread *pd;
334 int err;
335
336 iattr = (struct pthread_attr *) attr;
337 if (iattr == NULL)
338 /* Is this the best idea? On NUMA machines this could mean
339 accessing far-away memory. */
340 iattr = &default_attr;
341
342 err = ALLOCATE_STACK (iattr, &pd);
729924a0 343 if (__builtin_expect (err != 0, 0))
76a50749
UD
344 /* Something went wrong. Maybe a parameter of the attributes is
345 invalid or we could not allocate memory. */
346 return err;
347
348
349 /* Initialize the TCB. All initializations with zero should be
350 performed in 'get_cached_stack'. This way we avoid doing this if
351 the stack freshly allocated with 'mmap'. */
352
5d5d5969 353#ifdef TLS_TCB_AT_TP
76a50749 354 /* Reference to the TCB itself. */
55c11fbd 355 pd->header.self = pd;
76a50749 356
d4f64e1a 357 /* Self-reference for TLS. */
55c11fbd 358 pd->header.tcb = pd;
76a50749
UD
359#endif
360
361 /* Store the address of the start routine and the parameter. Since
362 we do not start the function directly the stillborn thread will
363 get the information from its thread descriptor. */
364 pd->start_routine = start_routine;
365 pd->arg = arg;
366
367 /* Copy the thread attribute flags. */
368 pd->flags = iattr->flags;
369
370 /* Initialize the field for the ID of the thread which is waiting
371 for us. This is a self-reference in case the thread is created
372 detached. */
373 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
374
375 /* The debug events are inherited from the parent. */
376 pd->eventbuf = THREAD_SELF->eventbuf;
377
378
379 /* Determine scheduling parameters for the thread.
380 XXX How to determine whether scheduling handling is needed? */
381 if (0 && attr != NULL)
382 {
383 if (iattr->flags & ATTR_FLAG_NOTINHERITSCHED)
384 {
385 /* Use the scheduling parameters the user provided. */
386 pd->schedpolicy = iattr->schedpolicy;
387 memcpy (&pd->schedparam, &iattr->schedparam,
388 sizeof (struct sched_param));
389 }
390 else
391 {
392 /* Just store the scheduling attributes of the parent. */
393 pd->schedpolicy = __sched_getscheduler (0);
394 __sched_getparam (0, &pd->schedparam);
395 }
396 }
397
398 /* Pass the descriptor to the caller. */
399 *newthread = (pthread_t) pd;
400
401 /* Start the thread. */
402 err = create_thread (pd, STACK_VARIABLES_ARGS);
403 if (err != 0)
404 {
405 /* Something went wrong. Free the resources. */
406 __deallocate_stack (pd);
407 return err;
408 }
409
410 return 0;
411}
412versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
413
414
415#if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
416int
417__pthread_create_2_0 (newthread, attr, start_routine, arg)
418 pthread_t *newthread;
419 const pthread_attr_t *attr;
420 void *(*start_routine) (void *);
421 void *arg;
422{
423 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
424 the old size and access to the new members might crash the program.
425 We convert the struct now. */
426 struct pthread_attr new_attr;
427
428 if (attr != NULL)
429 {
430 struct pthread_attr *iattr = (struct pthread_attr *) attr;
431 size_t ps = __getpagesize ();
432
433 /* Copy values from the user-provided attributes. */
434 new_attr.schedparam = iattr->schedparam;
435 new_attr.schedpolicy = iattr->schedpolicy;
436 new_attr.flags = iattr->flags;
437
438 /* Fill in default values for the fields not present in the old
439 implementation. */
440 new_attr.guardsize = ps;
441 new_attr.stackaddr = NULL;
442 new_attr.stacksize = 0;
443
444 /* We will pass this value on to the real implementation. */
445 attr = (pthread_attr_t *) &new_attr;
446 }
447
448 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
449}
450compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
451 GLIBC_2_0);
452#endif