]>
Commit | Line | Data |
---|---|---|
dff8da6b | 1 | /* Copyright (C) 2002-2024 Free Software Foundation, Inc. |
76a50749 | 2 | This file is part of the GNU C Library. |
76a50749 UD |
3 | |
4 | The GNU C Library is free software; you can redistribute it and/or | |
5 | modify it under the terms of the GNU Lesser General Public | |
6 | License as published by the Free Software Foundation; either | |
7 | version 2.1 of the License, or (at your option) any later version. | |
8 | ||
9 | The GNU C Library is distributed in the hope that it will be useful, | |
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | Lesser General Public License for more details. | |
13 | ||
14 | You should have received a copy of the GNU Lesser General Public | |
59ba27a6 | 15 | License along with the GNU C Library; if not, see |
5a82c748 | 16 | <https://www.gnu.org/licenses/>. */ |
76a50749 | 17 | |
fd5bdc09 | 18 | #include <ctype.h> |
76a50749 UD |
19 | #include <errno.h> |
20 | #include <stdbool.h> | |
21 | #include <stdlib.h> | |
22 | #include <string.h> | |
e054f494 | 23 | #include <stdint.h> |
76a50749 UD |
24 | #include "pthreadP.h" |
25 | #include <hp-timing.h> | |
26 | #include <ldsodefs.h> | |
3e4fc359 | 27 | #include <atomic.h> |
2098d403 | 28 | #include <libc-diag.h> |
12d7ca07 | 29 | #include <libc-internal.h> |
0e9d6240 | 30 | #include <resolv.h> |
f8de5057 | 31 | #include <kernel-features.h> |
f214ff74 | 32 | #include <default-sched.h> |
a2f0363f | 33 | #include <futex-internal.h> |
ebff9c5c | 34 | #include <tls-setup.h> |
95e114a0 | 35 | #include <rseq-internal.h> |
d2e04918 | 36 | #include "libioP.h" |
706ad1e7 | 37 | #include <sys/single_threaded.h> |
b8cdc3bb | 38 | #include <version.h> |
d8ea0d01 | 39 | #include <clone_internal.h> |
526c3cf1 | 40 | #include <futex-internal.h> |
76a50749 UD |
41 | |
42 | #include <shlib-compat.h> | |
43 | ||
3a097cc7 RM |
44 | #include <stap-probe.h> |
45 | ||
76a50749 | 46 | |
76a50749 | 47 | /* Globally enabled events. */ |
3d8b5dde | 48 | extern td_thr_events_t __nptl_threads_events; |
fef400a2 | 49 | libc_hidden_proto (__nptl_threads_events) |
3d8b5dde | 50 | td_thr_events_t __nptl_threads_events; |
fef400a2 | 51 | libc_hidden_data_def (__nptl_threads_events) |
76a50749 UD |
52 | |
53 | /* Pointer to descriptor with the last event. */ | |
3d8b5dde | 54 | extern struct pthread *__nptl_last_event; |
fef400a2 | 55 | libc_hidden_proto (__nptl_last_event) |
3d8b5dde | 56 | struct pthread *__nptl_last_event; |
fef400a2 | 57 | libc_hidden_data_def (__nptl_last_event) |
76a50749 | 58 | |
a64afc22 FW |
59 | #ifdef SHARED |
60 | /* This variable is used to access _rtld_global from libthread_db. If | |
61 | GDB loads libpthread before ld.so, it is not possible to resolve | |
62 | _rtld_global directly during libpthread initialization. */ | |
fef400a2 | 63 | struct rtld_global *__nptl_rtld_global = &_rtld_global; |
a64afc22 FW |
64 | #endif |
65 | ||
b8cdc3bb | 66 | /* Version of the library, used in libthread_db to detect mismatches. */ |
fef400a2 | 67 | const char __nptl_version[] = VERSION; |
b8cdc3bb | 68 | |
2f69522d FW |
69 | /* This performs the initialization necessary when going from |
70 | single-threaded to multi-threaded mode for the first time. */ | |
71 | static void | |
72 | late_init (void) | |
73 | { | |
74 | struct sigaction sa; | |
75 | __sigemptyset (&sa.sa_mask); | |
76 | ||
76b0c59e FW |
77 | /* Install the handle to change the threads' uid/gid. Use |
78 | SA_ONSTACK because the signal may be sent to threads that are | |
79 | running with custom stacks. (This is less likely for | |
80 | SIGCANCEL.) */ | |
2f69522d | 81 | sa.sa_sigaction = __nptl_setxid_sighandler; |
76b0c59e | 82 | sa.sa_flags = SA_ONSTACK | SA_SIGINFO | SA_RESTART; |
2f69522d FW |
83 | (void) __libc_sigaction (SIGSETXID, &sa, NULL); |
84 | ||
85 | /* The parent process might have left the signals blocked. Just in | |
86 | case, unblock it. We reuse the signal mask in the sigaction | |
87 | structure. It is already cleared. */ | |
88 | __sigaddset (&sa.sa_mask, SIGCANCEL); | |
89 | __sigaddset (&sa.sa_mask, SIGSETXID); | |
90 | INTERNAL_SYSCALL_CALL (rt_sigprocmask, SIG_UNBLOCK, &sa.sa_mask, | |
91 | NULL, __NSIG_BYTES); | |
92 | } | |
93 | ||
76a50749 | 94 | /* Code to allocate and deallocate a stack. */ |
76a50749 UD |
95 | #include "allocatestack.c" |
96 | ||
f8bf15fe CD |
97 | /* CONCURRENCY NOTES: |
98 | ||
99 | Understanding who is the owner of the 'struct pthread' or 'PD' | |
100 | (refers to the value of the 'struct pthread *pd' function argument) | |
101 | is critically important in determining exactly which operations are | |
102 | allowed and which are not and when, particularly when it comes to the | |
103 | implementation of pthread_create, pthread_join, pthread_detach, and | |
104 | other functions which all operate on PD. | |
105 | ||
106 | The owner of PD is responsible for freeing the final resources | |
107 | associated with PD, and may examine the memory underlying PD at any | |
108 | point in time until it frees it back to the OS or to reuse by the | |
109 | runtime. | |
110 | ||
111 | The thread which calls pthread_create is called the creating thread. | |
112 | The creating thread begins as the owner of PD. | |
113 | ||
114 | During startup the new thread may examine PD in coordination with the | |
115 | owner thread (which may be itself). | |
116 | ||
117 | The four cases of ownership transfer are: | |
118 | ||
119 | (1) Ownership of PD is released to the process (all threads may use it) | |
120 | after the new thread starts in a joinable state | |
121 | i.e. pthread_create returns a usable pthread_t. | |
122 | ||
123 | (2) Ownership of PD is released to the new thread starting in a detached | |
124 | state. | |
125 | ||
126 | (3) Ownership of PD is dynamically released to a running thread via | |
127 | pthread_detach. | |
128 | ||
129 | (4) Ownership of PD is acquired by the thread which calls pthread_join. | |
130 | ||
131 | Implementation notes: | |
132 | ||
133 | The PD->stopped_start and thread_ran variables are used to determine | |
134 | exactly which of the four ownership states we are in and therefore | |
135 | what actions can be taken. For example after (2) we cannot read or | |
136 | write from PD anymore since the thread may no longer exist and the | |
fa17b9c7 CD |
137 | memory may be unmapped. |
138 | ||
139 | It is important to point out that PD->lock is being used both | |
140 | similar to a one-shot semaphore and subsequently as a mutex. The | |
141 | lock is taken in the parent to force the child to wait, and then the | |
142 | child releases the lock. However, this semaphore-like effect is used | |
143 | only for synchronizing the parent and child. After startup the lock | |
144 | is used like a mutex to create a critical section during which a | |
145 | single owner modifies the thread parameters. | |
146 | ||
147 | The most complicated cases happen during thread startup: | |
f8bf15fe CD |
148 | |
149 | (a) If the created thread is in a detached (PTHREAD_CREATE_DETACHED), | |
150 | or joinable (default PTHREAD_CREATE_JOINABLE) state and | |
151 | STOPPED_START is true, then the creating thread has ownership of | |
152 | PD until the PD->lock is released by pthread_create. If any | |
02189e8f | 153 | errors occur we are in states (c) or (d) below. |
f8bf15fe CD |
154 | |
155 | (b) If the created thread is in a detached state | |
156 | (PTHREAD_CREATED_DETACHED), and STOPPED_START is false, then the | |
157 | creating thread has ownership of PD until it invokes the OS | |
158 | kernel's thread creation routine. If this routine returns | |
159 | without error, then the created thread owns PD; otherwise, see | |
02189e8f AZ |
160 | (c) or (d) below. |
161 | ||
162 | (c) If either a joinable or detached thread setup failed and THREAD_RAN | |
163 | is true, then the creating thread releases ownership to the new thread, | |
164 | the created thread sees the failed setup through PD->setup_failed | |
165 | member, releases the PD ownership, and exits. The creating thread will | |
166 | be responsible for cleanup the allocated resources. The THREAD_RAN is | |
167 | local to creating thread and indicate whether thread creation or setup | |
168 | has failed. | |
169 | ||
170 | (d) If the thread creation failed and THREAD_RAN is false (meaning | |
171 | ARCH_CLONE has failed), then the creating thread retains ownership | |
172 | of PD and must cleanup he allocated resource. No waiting for the new | |
173 | thread is required because it never started. | |
f8bf15fe CD |
174 | |
175 | The nptl_db interface: | |
176 | ||
177 | The interface with nptl_db requires that we enqueue PD into a linked | |
178 | list and then call a function which the debugger will trap. The PD | |
179 | will then be dequeued and control returned to the thread. The caller | |
180 | at the time must have ownership of PD and such ownership remains | |
181 | after control returns to thread. The enqueued PD is removed from the | |
182 | linked list by the nptl_db callback td_thr_event_getmsg. The debugger | |
183 | must ensure that the thread does not resume execution, otherwise | |
184 | ownership of PD may be lost and examining PD will not be possible. | |
185 | ||
186 | Note that the GNU Debugger as of (December 10th 2015) commit | |
187 | c2c2a31fdb228d41ce3db62b268efea04bd39c18 no longer uses | |
188 | td_thr_event_getmsg and several other related nptl_db interfaces. The | |
189 | principal reason for this is that nptl_db does not support non-stop | |
190 | mode where other threads can run concurrently and modify runtime | |
191 | structures currently in use by the debugger and the nptl_db | |
192 | interface. | |
193 | ||
194 | Axioms: | |
195 | ||
196 | * The create_thread function can never set stopped_start to false. | |
197 | * The created thread can read stopped_start but never write to it. | |
198 | * The variable thread_ran is set some time after the OS thread | |
199 | creation routine returns, how much time after the thread is created | |
200 | is unspecified, but it should be as quickly as possible. | |
201 | ||
202 | */ | |
203 | ||
204 | /* CREATE THREAD NOTES: | |
205 | ||
f8bf15fe CD |
206 | create_thread must initialize PD->stopped_start. It should be true |
207 | if the STOPPED_START parameter is true, or if create_thread needs the | |
208 | new thread to synchronize at startup for some other implementation | |
209 | reason. If STOPPED_START will be true, then create_thread is obliged | |
210 | to lock PD->lock before starting the thread. Then pthread_create | |
42813c67 | 211 | unlocks PD->lock which synchronizes-with create_thread in the |
f8bf15fe CD |
212 | child thread which does an acquire/release of PD->lock as the last |
213 | action before calling the user entry point. The goal of all of this | |
214 | is to ensure that the required initial thread attributes are applied | |
215 | (by the creating thread) before the new thread runs user code. Note | |
216 | that the the functions pthread_getschedparam, pthread_setschedparam, | |
217 | pthread_setschedprio, __pthread_tpp_change_priority, and | |
218 | __pthread_current_priority reuse the same lock, PD->lock, for a | |
219 | similar purpose e.g. synchronizing the setting of similar thread | |
220 | attributes. These functions are never called before the thread is | |
7f0d9e61 | 221 | created, so don't participate in startup synchronization, but given |
f8bf15fe CD |
222 | that the lock is present already and in the unlocked state, reusing |
223 | it saves space. | |
32fed10f RM |
224 | |
225 | The return value is zero for success or an errno code for failure. | |
226 | If the return value is ENOMEM, that will be translated to EAGAIN, | |
227 | so create_thread need not do that. On failure, *THREAD_RAN should | |
02189e8f AZ |
228 | be set to true iff the thread actually started up but before calling |
229 | the user code (*PD->start_routine). */ | |
42813c67 AZ |
230 | |
231 | static int _Noreturn start_thread (void *arg); | |
232 | ||
32fed10f | 233 | static int create_thread (struct pthread *pd, const struct pthread_attr *attr, |
d8ea0d01 L |
234 | bool *stopped_start, void *stackaddr, |
235 | size_t stacksize, bool *thread_ran) | |
42813c67 AZ |
236 | { |
237 | /* Determine whether the newly created threads has to be started | |
238 | stopped since we have to set the scheduling parameters or set the | |
239 | affinity. */ | |
240 | bool need_setaffinity = (attr != NULL && attr->extension != NULL | |
241 | && attr->extension->cpuset != 0); | |
242 | if (attr != NULL | |
243 | && (__glibc_unlikely (need_setaffinity) | |
244 | || __glibc_unlikely ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0))) | |
245 | *stopped_start = true; | |
246 | ||
247 | pd->stopped_start = *stopped_start; | |
248 | if (__glibc_unlikely (*stopped_start)) | |
249 | lll_lock (pd->lock, LLL_PRIVATE); | |
250 | ||
251 | /* We rely heavily on various flags the CLONE function understands: | |
252 | ||
253 | CLONE_VM, CLONE_FS, CLONE_FILES | |
254 | These flags select semantics with shared address space and | |
255 | file descriptors according to what POSIX requires. | |
256 | ||
257 | CLONE_SIGHAND, CLONE_THREAD | |
258 | This flag selects the POSIX signal semantics and various | |
259 | other kinds of sharing (itimers, POSIX timers, etc.). | |
260 | ||
261 | CLONE_SETTLS | |
262 | The sixth parameter to CLONE determines the TLS area for the | |
263 | new thread. | |
264 | ||
265 | CLONE_PARENT_SETTID | |
266 | The kernels writes the thread ID of the newly created thread | |
267 | into the location pointed to by the fifth parameters to CLONE. | |
268 | ||
269 | Note that it would be semantically equivalent to use | |
270 | CLONE_CHILD_SETTID but it is be more expensive in the kernel. | |
271 | ||
272 | CLONE_CHILD_CLEARTID | |
273 | The kernels clears the thread ID of a thread that has called | |
274 | sys_exit() in the location pointed to by the seventh parameter | |
275 | to CLONE. | |
276 | ||
277 | The termination signal is chosen to be zero which means no signal | |
278 | is sent. */ | |
279 | const int clone_flags = (CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SYSVSEM | |
280 | | CLONE_SIGHAND | CLONE_THREAD | |
281 | | CLONE_SETTLS | CLONE_PARENT_SETTID | |
282 | | CLONE_CHILD_CLEARTID | |
283 | | 0); | |
284 | ||
285 | TLS_DEFINE_INIT_TP (tp, pd); | |
286 | ||
d8ea0d01 L |
287 | struct clone_args args = |
288 | { | |
289 | .flags = clone_flags, | |
290 | .pidfd = (uintptr_t) &pd->tid, | |
291 | .parent_tid = (uintptr_t) &pd->tid, | |
292 | .child_tid = (uintptr_t) &pd->tid, | |
293 | .stack = (uintptr_t) stackaddr, | |
294 | .stack_size = stacksize, | |
295 | .tls = (uintptr_t) tp, | |
296 | }; | |
297 | int ret = __clone_internal (&args, &start_thread, pd); | |
298 | if (__glibc_unlikely (ret == -1)) | |
42813c67 AZ |
299 | return errno; |
300 | ||
02189e8f AZ |
301 | /* It's started now, so if we fail below, we'll have to let it clean itself |
302 | up. */ | |
42813c67 AZ |
303 | *thread_ran = true; |
304 | ||
305 | /* Now we have the possibility to set scheduling parameters etc. */ | |
306 | if (attr != NULL) | |
307 | { | |
42813c67 AZ |
308 | /* Set the affinity mask if necessary. */ |
309 | if (need_setaffinity) | |
310 | { | |
311 | assert (*stopped_start); | |
32fed10f | 312 | |
02189e8f AZ |
313 | int res = INTERNAL_SYSCALL_CALL (sched_setaffinity, pd->tid, |
314 | attr->extension->cpusetsize, | |
315 | attr->extension->cpuset); | |
42813c67 | 316 | if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res))) |
02189e8f | 317 | return INTERNAL_SYSCALL_ERRNO (res); |
42813c67 AZ |
318 | } |
319 | ||
320 | /* Set the scheduling parameters. */ | |
321 | if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0) | |
322 | { | |
323 | assert (*stopped_start); | |
324 | ||
02189e8f AZ |
325 | int res = INTERNAL_SYSCALL_CALL (sched_setscheduler, pd->tid, |
326 | pd->schedpolicy, &pd->schedparam); | |
42813c67 | 327 | if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res))) |
02189e8f | 328 | return INTERNAL_SYSCALL_ERRNO (res); |
42813c67 AZ |
329 | } |
330 | } | |
331 | ||
332 | return 0; | |
333 | } | |
76a50749 | 334 | |
42813c67 AZ |
335 | /* Local function to start thread and handle cleanup. */ |
336 | static int _Noreturn | |
337 | start_thread (void *arg) | |
76a50749 | 338 | { |
42813c67 | 339 | struct pthread *pd = arg; |
76a50749 | 340 | |
02189e8f AZ |
341 | /* We are either in (a) or (b), and in either case we either own PD already |
342 | (2) or are about to own PD (1), and so our only restriction would be that | |
343 | we can't free PD until we know we have ownership (see CONCURRENCY NOTES | |
344 | above). */ | |
345 | if (pd->stopped_start) | |
346 | { | |
347 | bool setup_failed = false; | |
348 | ||
349 | /* Get the lock the parent locked to force synchronization. */ | |
350 | lll_lock (pd->lock, LLL_PRIVATE); | |
351 | ||
352 | /* We have ownership of PD now, for detached threads with setup failure | |
353 | we set it as joinable so the creating thread could synchronous join | |
354 | and free any resource prior return to the pthread_create caller. */ | |
355 | setup_failed = pd->setup_failed == 1; | |
356 | if (setup_failed) | |
357 | pd->joinid = NULL; | |
358 | ||
359 | /* And give it up right away. */ | |
360 | lll_unlock (pd->lock, LLL_PRIVATE); | |
361 | ||
362 | if (setup_failed) | |
363 | goto out; | |
364 | } | |
365 | ||
0e9d6240 UD |
366 | /* Initialize resolver state pointer. */ |
367 | __resp = &pd->res; | |
368 | ||
fd5bdc09 UD |
369 | /* Initialize pointers to locale data. */ |
370 | __ctype_init (); | |
371 | ||
6afce56c AZ |
372 | /* Name the thread stack if kernel supports it. */ |
373 | name_stack_maps (pd, true); | |
374 | ||
95e114a0 | 375 | /* Register rseq TLS to the kernel. */ |
e3e58982 FW |
376 | { |
377 | bool do_rseq = THREAD_GETMEM (pd, flags) & ATTR_FLAG_DO_RSEQ; | |
a41c8e92 FW |
378 | if (!rseq_register_current_thread (pd, do_rseq) && do_rseq) |
379 | __libc_fatal ("Fatal glibc error: rseq registration failed\n"); | |
e3e58982 | 380 | } |
95e114a0 | 381 | |
b03604b1 | 382 | #ifndef __ASSUME_SET_ROBUST_LIST |
442e8a40 | 383 | if (__nptl_set_robust_list_avail) |
b03604b1 | 384 | #endif |
0f6699ea | 385 | { |
0f6699ea UD |
386 | /* This call should never fail because the initial call in init.c |
387 | succeeded. */ | |
bc2eb932 AZ |
388 | INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head, |
389 | sizeof (struct robust_list_head)); | |
0f6699ea | 390 | } |
0f6699ea | 391 | |
76a50749 UD |
392 | /* This is where the try/finally block should be created. For |
393 | compilers without that support we do use setjmp. */ | |
877e51b2 UD |
394 | struct pthread_unwind_buf unwind_buf; |
395 | ||
d6cc1829 | 396 | int not_first_call; |
2098d403 JM |
397 | DIAG_PUSH_NEEDS_COMMENT; |
398 | #if __GNUC_PREREQ (7, 0) | |
399 | /* This call results in a -Wstringop-overflow warning because struct | |
400 | pthread_unwind_buf is smaller than jmp_buf. setjmp and longjmp | |
401 | do not use anything beyond the common prefix (they never access | |
402 | the saved signal mask), so that is a false positive. */ | |
403 | DIAG_IGNORE_NEEDS_COMMENT (11, "-Wstringop-overflow="); | |
404 | #endif | |
d6cc1829 | 405 | not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf); |
2098d403 | 406 | DIAG_POP_NEEDS_COMMENT; |
d6cc1829 L |
407 | |
408 | /* No previous handlers. NB: This must be done after setjmp since the | |
409 | private space in the unwind jump buffer may overlap space used by | |
410 | setjmp to store extra architecture-specific information which is | |
411 | never used by the cancellation-specific __libc_unwind_longjmp. | |
412 | ||
413 | The private space is allowed to overlap because the unwinder never | |
414 | has to return through any of the jumped-to call frames, and thus | |
415 | only a minimum amount of saved data need be stored, and for example, | |
416 | need not include the process signal mask information. This is all | |
417 | an optimization to reduce stack usage when pushing cancellation | |
418 | handlers. */ | |
877e51b2 UD |
419 | unwind_buf.priv.data.prev = NULL; |
420 | unwind_buf.priv.data.cleanup = NULL; | |
421 | ||
b3cae39d | 422 | /* Allow setxid from now onwards. */ |
22f4ab2d | 423 | if (__glibc_unlikely (atomic_exchange_acquire (&pd->setxid_futex, 0) == -2)) |
b3cae39d FW |
424 | futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE); |
425 | ||
a1ffb40e | 426 | if (__glibc_likely (! not_first_call)) |
76a50749 | 427 | { |
877e51b2 UD |
428 | /* Store the new cleanup handler info. */ |
429 | THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf); | |
430 | ||
a1bdd816 | 431 | internal_signal_restore_set (&pd->sigmask); |
e186fc5a | 432 | |
3a097cc7 RM |
433 | LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg); |
434 | ||
76a50749 | 435 | /* Run the code the user provided. */ |
ce7528f6 AZ |
436 | void *ret; |
437 | if (pd->c11) | |
438 | { | |
439 | /* The function pointer of the c11 thread start is cast to an incorrect | |
440 | type on __pthread_create_2_1 call, however it is casted back to correct | |
441 | one so the call behavior is well-defined (it is assumed that pointers | |
442 | to void are able to represent all values of int. */ | |
443 | int (*start)(void*) = (int (*) (void*)) pd->start_routine; | |
444 | ret = (void*) (uintptr_t) start (pd->arg); | |
445 | } | |
446 | else | |
447 | ret = pd->start_routine (pd->arg); | |
448 | THREAD_SETMEM (pd, result, ret); | |
76a50749 UD |
449 | } |
450 | ||
ba384f6e | 451 | /* Call destructors for the thread_local TLS variables. */ |
6333a601 | 452 | call_function_static_weak (__call_tls_dtors); |
ba384f6e | 453 | |
6b4686a5 | 454 | /* Run the destructor for the thread-local data. */ |
3fa21fd8 | 455 | __nptl_deallocate_tsd (); |
6b4686a5 | 456 | |
12d7ca07 RM |
457 | /* Clean up any state libc stored in thread-local variables. */ |
458 | __libc_thread_freeres (); | |
76a50749 UD |
459 | |
460 | /* Report the death of the thread if this is wanted. */ | |
a1ffb40e | 461 | if (__glibc_unlikely (pd->report_events)) |
76a50749 UD |
462 | { |
463 | /* See whether TD_DEATH is in any of the mask. */ | |
464 | const int idx = __td_eventword (TD_DEATH); | |
465 | const uint32_t mask = __td_eventmask (TD_DEATH); | |
466 | ||
467 | if ((mask & (__nptl_threads_events.event_bits[idx] | |
468 | | pd->eventbuf.eventmask.event_bits[idx])) != 0) | |
469 | { | |
470 | /* Yep, we have to signal the death. Add the descriptor to | |
471 | the list but only if it is not already on it. */ | |
472 | if (pd->nextevent == NULL) | |
473 | { | |
474 | pd->eventbuf.eventnum = TD_DEATH; | |
475 | pd->eventbuf.eventdata = pd; | |
476 | ||
477 | do | |
478 | pd->nextevent = __nptl_last_event; | |
5a3ab2fc UD |
479 | while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event, |
480 | pd, pd->nextevent)); | |
76a50749 UD |
481 | } |
482 | ||
f8bf15fe CD |
483 | /* Now call the function which signals the event. See |
484 | CONCURRENCY NOTES for the nptl_db interface comments. */ | |
76a50749 UD |
485 | __nptl_death_event (); |
486 | } | |
487 | } | |
488 | ||
6461e577 RM |
489 | /* The thread is exiting now. Don't set this bit until after we've hit |
490 | the event-reporting breakpoint, so that td_thr_get_info on us while at | |
491 | the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */ | |
a30e9603 | 492 | atomic_fetch_or_relaxed (&pd->cancelhandling, EXITING_BITMASK); |
76a50749 | 493 | |
4a07fbb6 | 494 | if (__glibc_unlikely (atomic_fetch_add_relaxed (&__nptl_nthreads, -1) == 1)) |
8fe503f7 AZ |
495 | /* This was the last thread. */ |
496 | exit (0); | |
497 | ||
526c3cf1 FW |
498 | /* This prevents sending a signal from this thread to itself during |
499 | its final stages. This must come after the exit call above | |
2849e2f5 FW |
500 | because atexit handlers must not run with signals blocked. |
501 | ||
502 | Do not block SIGSETXID. The setxid handshake below expects the | |
503 | signal to be delivered. (SIGSETXID cannot run application code, | |
504 | nor does it use pthread_kill.) Reuse the pd->sigmask space for | |
505 | computing the signal mask, to save stack space. */ | |
a1bdd816 AZ |
506 | internal_sigfillset (&pd->sigmask); |
507 | internal_sigdelset (&pd->sigmask, SIGSETXID); | |
2849e2f5 FW |
508 | INTERNAL_SYSCALL_CALL (rt_sigprocmask, SIG_BLOCK, &pd->sigmask, NULL, |
509 | __NSIG_BYTES); | |
526c3cf1 FW |
510 | |
511 | /* Tell __pthread_kill_internal that this thread is about to exit. | |
512 | If there is a __pthread_kill_internal in progress, this delays | |
513 | the thread exit until the signal has been queued by the kernel | |
514 | (so that the TID used to send it remains valid). */ | |
515 | __libc_lock_lock (pd->exit_lock); | |
516 | pd->exiting = true; | |
517 | __libc_lock_unlock (pd->exit_lock); | |
518 | ||
0f6699ea | 519 | #ifndef __ASSUME_SET_ROBUST_LIST |
1bcfb5a5 | 520 | /* If this thread has any robust mutexes locked, handle them now. */ |
06be6368 | 521 | # if __PTHREAD_MUTEX_HAVE_PREV |
0f6699ea UD |
522 | void *robust = pd->robust_head.list; |
523 | # else | |
b007ce7c | 524 | __pthread_slist_t *robust = pd->robust_list.__next; |
0f6699ea | 525 | # endif |
df47504c UD |
526 | /* We let the kernel do the notification if it is able to do so. |
527 | If we have to do it here there for sure are no PI mutexes involved | |
528 | since the kernel support for them is even more recent. */ | |
442e8a40 | 529 | if (!__nptl_set_robust_list_avail |
df47504c | 530 | && __builtin_expect (robust != (void *) &pd->robust_head, 0)) |
1bcfb5a5 UD |
531 | { |
532 | do | |
533 | { | |
b007ce7c | 534 | struct __pthread_mutex_s *this = (struct __pthread_mutex_s *) |
0f6699ea UD |
535 | ((char *) robust - offsetof (struct __pthread_mutex_s, |
536 | __list.__next)); | |
537 | robust = *((void **) robust); | |
d804f5df | 538 | |
06be6368 | 539 | # if __PTHREAD_MUTEX_HAVE_PREV |
b007ce7c | 540 | this->__list.__prev = NULL; |
0f6699ea UD |
541 | # endif |
542 | this->__list.__next = NULL; | |
1bcfb5a5 | 543 | |
8114b95c | 544 | atomic_fetch_or_acquire (&this->__lock, FUTEX_OWNER_DIED); |
a2f0363f TR |
545 | futex_wake ((unsigned int *) &this->__lock, 1, |
546 | /* XYZ */ FUTEX_SHARED); | |
1bcfb5a5 | 547 | } |
df47504c | 548 | while (robust != (void *) &pd->robust_head); |
1bcfb5a5 | 549 | } |
0f6699ea | 550 | #endif |
1bcfb5a5 | 551 | |
08794225 SN |
552 | if (!pd->user_stack) |
553 | advise_stack_range (pd->stackblock, pd->stackblock_size, (uintptr_t) pd, | |
554 | pd->guardsize); | |
b42a214c | 555 | |
4cab20fa | 556 | if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK)) |
dff9a7a1 UD |
557 | { |
558 | /* Some other thread might call any of the setXid functions and expect | |
559 | us to reply. In this case wait until we did that. */ | |
560 | do | |
a2f0363f TR |
561 | /* XXX This differs from the typical futex_wait_simple pattern in that |
562 | the futex_wait condition (setxid_futex) is different from the | |
563 | condition used in the surrounding loop (cancelhandling). We need | |
564 | to check and document why this is correct. */ | |
565 | futex_wait_simple (&pd->setxid_futex, 0, FUTEX_PRIVATE); | |
dff9a7a1 UD |
566 | while (pd->cancelhandling & SETXID_BITMASK); |
567 | ||
568 | /* Reset the value so that the stack can be reused. */ | |
569 | pd->setxid_futex = 0; | |
570 | } | |
76a50749 | 571 | |
4cab20fa AS |
572 | /* If the thread is detached free the TCB. */ |
573 | if (IS_DETACHED (pd)) | |
574 | /* Free the TCB. */ | |
8fbb33b3 | 575 | __nptl_free_tcb (pd); |
4cab20fa | 576 | |
6afce56c AZ |
577 | /* Remove the associated name from the thread stack. */ |
578 | name_stack_maps (pd, false); | |
579 | ||
02189e8f | 580 | out: |
76a50749 UD |
581 | /* We cannot call '_exit' here. '_exit' will terminate the process. |
582 | ||
583 | The 'exit' implementation in the kernel will signal when the | |
adcdc775 | 584 | process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID |
76a50749 UD |
585 | flag. The 'tid' field in the TCB will be set to zero. |
586 | ||
95e114a0 FW |
587 | rseq TLS is still registered at this point. Rely on implicit |
588 | unregistration performed by the kernel on thread teardown. This is not a | |
589 | problem because the rseq TLS lives on the stack, and the stack outlives | |
590 | the thread. If TCB allocation is ever changed, additional steps may be | |
591 | required, such as performing explicit rseq unregistration before | |
592 | reclaiming the rseq TLS area memory. It is NOT sufficient to block | |
593 | signals because the kernel may write to the rseq area even without | |
594 | signals. | |
595 | ||
76a50749 UD |
596 | The exit code is zero since in case all threads exit by calling |
597 | 'pthread_exit' the exit status must be 0 (zero). */ | |
eaa53d0f AZ |
598 | while (1) |
599 | INTERNAL_SYSCALL_CALL (exit, 0); | |
76a50749 UD |
600 | |
601 | /* NOTREACHED */ | |
32fed10f RM |
602 | } |
603 | ||
604 | ||
605 | /* Return true iff obliged to report TD_CREATE events. */ | |
606 | static bool | |
607 | report_thread_creation (struct pthread *pd) | |
608 | { | |
609 | if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF, report_events))) | |
610 | { | |
611 | /* The parent thread is supposed to report events. | |
612 | Check whether the TD_CREATE event is needed, too. */ | |
613 | const size_t idx = __td_eventword (TD_CREATE); | |
614 | const uint32_t mask = __td_eventmask (TD_CREATE); | |
615 | ||
616 | return ((mask & (__nptl_threads_events.event_bits[idx] | |
617 | | pd->eventbuf.eventmask.event_bits[idx])) != 0); | |
618 | } | |
619 | return false; | |
76a50749 UD |
620 | } |
621 | ||
622 | ||
76a50749 | 623 | int |
80d9be81 JM |
624 | __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr, |
625 | void *(*start_routine) (void *), void *arg) | |
76a50749 | 626 | { |
d8ea0d01 L |
627 | void *stackaddr = NULL; |
628 | size_t stacksize = 0; | |
76a50749 | 629 | |
2f69522d FW |
630 | /* Avoid a data race in the multi-threaded case, and call the |
631 | deferred initialization only once. */ | |
baf2a265 | 632 | if (__libc_single_threaded_internal) |
2f69522d FW |
633 | { |
634 | late_init (); | |
baf2a265 AZ |
635 | __libc_single_threaded_internal = 0; |
636 | /* __libc_single_threaded can be accessed through copy relocations, so | |
637 | it requires to update the external copy. */ | |
2f69522d FW |
638 | __libc_single_threaded = 0; |
639 | } | |
706ad1e7 | 640 | |
1e6da2b0 | 641 | const struct pthread_attr *iattr = (struct pthread_attr *) attr; |
c2322a56 | 642 | union pthread_attr_transparent default_attr; |
8111c457 | 643 | bool destroy_default_attr = false; |
ce7528f6 AZ |
644 | bool c11 = (attr == ATTR_C11_THREAD); |
645 | if (iattr == NULL || c11) | |
61dd6208 | 646 | { |
c2322a56 | 647 | int ret = __pthread_getattr_default_np (&default_attr.external); |
8111c457 FW |
648 | if (ret != 0) |
649 | return ret; | |
650 | destroy_default_attr = true; | |
c2322a56 | 651 | iattr = &default_attr.internal; |
61dd6208 | 652 | } |
76a50749 | 653 | |
dff9a7a1 | 654 | struct pthread *pd = NULL; |
d8ea0d01 | 655 | int err = allocate_stack (iattr, &pd, &stackaddr, &stacksize); |
61dd6208 SP |
656 | int retval = 0; |
657 | ||
a1ffb40e | 658 | if (__glibc_unlikely (err != 0)) |
76a50749 | 659 | /* Something went wrong. Maybe a parameter of the attributes is |
e988dba9 JL |
660 | invalid or we could not allocate memory. Note we have to |
661 | translate error codes. */ | |
61dd6208 SP |
662 | { |
663 | retval = err == ENOMEM ? EAGAIN : err; | |
664 | goto out; | |
665 | } | |
76a50749 UD |
666 | |
667 | ||
668 | /* Initialize the TCB. All initializations with zero should be | |
669 | performed in 'get_cached_stack'. This way we avoid doing this if | |
670 | the stack freshly allocated with 'mmap'. */ | |
671 | ||
d7329d4b | 672 | #if TLS_TCB_AT_TP |
76a50749 | 673 | /* Reference to the TCB itself. */ |
55c11fbd | 674 | pd->header.self = pd; |
76a50749 | 675 | |
d4f64e1a | 676 | /* Self-reference for TLS. */ |
55c11fbd | 677 | pd->header.tcb = pd; |
76a50749 UD |
678 | #endif |
679 | ||
680 | /* Store the address of the start routine and the parameter. Since | |
681 | we do not start the function directly the stillborn thread will | |
682 | get the information from its thread descriptor. */ | |
683 | pd->start_routine = start_routine; | |
684 | pd->arg = arg; | |
ce7528f6 | 685 | pd->c11 = c11; |
76a50749 UD |
686 | |
687 | /* Copy the thread attribute flags. */ | |
14ffbc83 UD |
688 | struct pthread *self = THREAD_SELF; |
689 | pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) | |
690 | | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))); | |
76a50749 | 691 | |
e3e58982 FW |
692 | /* Inherit rseq registration state. Without seccomp filters, rseq |
693 | registration will either always fail or always succeed. */ | |
694 | if ((int) THREAD_GETMEM_VOLATILE (self, rseq_area.cpu_id) >= 0) | |
695 | pd->flags |= ATTR_FLAG_DO_RSEQ; | |
696 | ||
76a50749 UD |
697 | /* Initialize the field for the ID of the thread which is waiting |
698 | for us. This is a self-reference in case the thread is created | |
699 | detached. */ | |
700 | pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL; | |
701 | ||
702 | /* The debug events are inherited from the parent. */ | |
14ffbc83 UD |
703 | pd->eventbuf = self->eventbuf; |
704 | ||
76a50749 | 705 | |
14ffbc83 UD |
706 | /* Copy the parent's scheduling parameters. The flags will say what |
707 | is valid and what is not. */ | |
708 | pd->schedpolicy = self->schedpolicy; | |
709 | pd->schedparam = self->schedparam; | |
76a50749 | 710 | |
35f1e827 UD |
711 | /* Copy the stack guard canary. */ |
712 | #ifdef THREAD_COPY_STACK_GUARD | |
713 | THREAD_COPY_STACK_GUARD (pd); | |
714 | #endif | |
715 | ||
827b7087 UD |
716 | /* Copy the pointer guard value. */ |
717 | #ifdef THREAD_COPY_POINTER_GUARD | |
718 | THREAD_COPY_POINTER_GUARD (pd); | |
719 | #endif | |
720 | ||
ebff9c5c L |
721 | /* Setup tcbhead. */ |
722 | tls_setup_tcbhead (pd); | |
723 | ||
32fed10f RM |
724 | /* Verify the sysinfo bits were copied in allocate_stack if needed. */ |
725 | #ifdef NEED_DL_SYSINFO | |
726 | CHECK_THREAD_SYSINFO (pd); | |
727 | #endif | |
728 | ||
14ffbc83 | 729 | /* Determine scheduling parameters for the thread. */ |
61dd6208 | 730 | if (__builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0) |
14ffbc83 | 731 | && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0) |
76a50749 | 732 | { |
14ffbc83 UD |
733 | /* Use the scheduling parameters the user provided. */ |
734 | if (iattr->flags & ATTR_FLAG_POLICY_SET) | |
33cd1f74 RM |
735 | { |
736 | pd->schedpolicy = iattr->schedpolicy; | |
737 | pd->flags |= ATTR_FLAG_POLICY_SET; | |
738 | } | |
14ffbc83 | 739 | if (iattr->flags & ATTR_FLAG_SCHED_SET) |
33cd1f74 RM |
740 | { |
741 | /* The values were validated in pthread_attr_setschedparam. */ | |
742 | pd->schedparam = iattr->schedparam; | |
743 | pd->flags |= ATTR_FLAG_SCHED_SET; | |
744 | } | |
f214ff74 RM |
745 | |
746 | if ((pd->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) | |
747 | != (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) | |
748 | collect_default_sched (pd); | |
76a50749 UD |
749 | } |
750 | ||
d2e04918 SN |
751 | if (__glibc_unlikely (__nptl_nthreads == 1)) |
752 | _IO_enable_locks (); | |
753 | ||
76a50749 UD |
754 | /* Pass the descriptor to the caller. */ |
755 | *newthread = (pthread_t) pd; | |
756 | ||
5acf7263 RM |
757 | LIBC_PROBE (pthread_create, 4, newthread, attr, start_routine, arg); |
758 | ||
32fed10f RM |
759 | /* One more thread. We cannot have the thread do this itself, since it |
760 | might exist but not have been scheduled yet by the time we've returned | |
761 | and need to check the value to behave correctly. We must do it before | |
762 | creating the thread, in case it does get scheduled first and then | |
763 | might mistakenly think it was the only thread. In the failure case, | |
764 | we momentarily store a false value; this doesn't matter because there | |
765 | is no kosher thing a signal handler interrupting us right here can do | |
766 | that cares whether the thread count is correct. */ | |
d1babeb3 | 767 | atomic_fetch_add_relaxed (&__nptl_nthreads, 1); |
32fed10f | 768 | |
f8bf15fe CD |
769 | /* Our local value of stopped_start and thread_ran can be accessed at |
770 | any time. The PD->stopped_start may only be accessed if we have | |
771 | ownership of PD (see CONCURRENCY NOTES above). */ | |
772 | bool stopped_start = false; bool thread_ran = false; | |
32fed10f | 773 | |
b3cae39d FW |
774 | /* Block all signals, so that the new thread starts out with |
775 | signals disabled. This avoids race conditions in the thread | |
776 | startup. */ | |
a1bdd816 AZ |
777 | internal_sigset_t original_sigmask; |
778 | internal_signal_block_all (&original_sigmask); | |
b3cae39d | 779 | |
ec41af45 FW |
780 | if (iattr->extension != NULL && iattr->extension->sigmask_set) |
781 | /* Use the signal mask in the attribute. The internal signals | |
782 | have already been filtered by the public | |
783 | pthread_attr_setsigmask_np interface. */ | |
a1bdd816 | 784 | internal_sigset_from_sigset (&pd->sigmask, &iattr->extension->sigmask); |
ec41af45 FW |
785 | else |
786 | { | |
787 | /* Conceptually, the new thread needs to inherit the signal mask | |
788 | of this thread. Therefore, it needs to restore the saved | |
789 | signal mask of this thread, so save it in the startup | |
790 | information. */ | |
791 | pd->sigmask = original_sigmask; | |
ec41af45 FW |
792 | /* Reset the cancellation signal mask in case this thread is |
793 | running cancellation. */ | |
a1bdd816 | 794 | internal_sigdelset (&pd->sigmask, SIGCANCEL); |
ec41af45 | 795 | } |
b3cae39d | 796 | |
76a50749 | 797 | /* Start the thread. */ |
32fed10f RM |
798 | if (__glibc_unlikely (report_thread_creation (pd))) |
799 | { | |
f8bf15fe CD |
800 | stopped_start = true; |
801 | ||
802 | /* We always create the thread stopped at startup so we can | |
803 | notify the debugger. */ | |
d8ea0d01 L |
804 | retval = create_thread (pd, iattr, &stopped_start, stackaddr, |
805 | stacksize, &thread_ran); | |
32fed10f RM |
806 | if (retval == 0) |
807 | { | |
f8bf15fe CD |
808 | /* We retain ownership of PD until (a) (see CONCURRENCY NOTES |
809 | above). */ | |
810 | ||
811 | /* Assert stopped_start is true in both our local copy and the | |
812 | PD copy. */ | |
813 | assert (stopped_start); | |
32fed10f RM |
814 | assert (pd->stopped_start); |
815 | ||
816 | /* Now fill in the information about the new thread in | |
817 | the newly created thread's data structure. We cannot let | |
818 | the new thread do this since we don't know whether it was | |
819 | already scheduled when we send the event. */ | |
820 | pd->eventbuf.eventnum = TD_CREATE; | |
821 | pd->eventbuf.eventdata = pd; | |
822 | ||
823 | /* Enqueue the descriptor. */ | |
824 | do | |
825 | pd->nextevent = __nptl_last_event; | |
826 | while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event, | |
827 | pd, pd->nextevent) | |
828 | != 0); | |
829 | ||
f8bf15fe CD |
830 | /* Now call the function which signals the event. See |
831 | CONCURRENCY NOTES for the nptl_db interface comments. */ | |
32fed10f RM |
832 | __nptl_create_event (); |
833 | } | |
834 | } | |
835 | else | |
d8ea0d01 L |
836 | retval = create_thread (pd, iattr, &stopped_start, stackaddr, |
837 | stacksize, &thread_ran); | |
32fed10f | 838 | |
b3cae39d FW |
839 | /* Return to the previous signal mask, after creating the new |
840 | thread. */ | |
a1bdd816 | 841 | internal_signal_restore_set (&original_sigmask); |
b3cae39d | 842 | |
32fed10f RM |
843 | if (__glibc_unlikely (retval != 0)) |
844 | { | |
32fed10f | 845 | if (thread_ran) |
02189e8f AZ |
846 | /* State (c) and we not have PD ownership (see CONCURRENCY NOTES |
847 | above). We can assert that STOPPED_START must have been true | |
848 | because thread creation didn't fail, but thread attribute setting | |
849 | did. */ | |
850 | { | |
851 | assert (stopped_start); | |
852 | /* Signal the created thread to release PD ownership and early | |
853 | exit so it could be joined. */ | |
854 | pd->setup_failed = 1; | |
855 | lll_unlock (pd->lock, LLL_PRIVATE); | |
856 | ||
857 | /* Similar to pthread_join, but since thread creation has failed at | |
858 | startup there is no need to handle all the steps. */ | |
859 | pid_t tid; | |
860 | while ((tid = atomic_load_acquire (&pd->tid)) != 0) | |
861 | __futex_abstimed_wait_cancelable64 ((unsigned int *) &pd->tid, | |
862 | tid, 0, NULL, LLL_SHARED); | |
863 | } | |
f8bf15fe | 864 | |
02189e8f AZ |
865 | /* State (c) or (d) and we have ownership of PD (see CONCURRENCY |
866 | NOTES above). */ | |
32fed10f | 867 | |
02189e8f | 868 | /* Oops, we lied for a second. */ |
a364a3a7 | 869 | atomic_fetch_add_relaxed (&__nptl_nthreads, -1); |
32fed10f | 870 | |
02189e8f AZ |
871 | /* Free the resources. */ |
872 | __nptl_deallocate_stack (pd); | |
32fed10f RM |
873 | |
874 | /* We have to translate error codes. */ | |
875 | if (retval == ENOMEM) | |
876 | retval = EAGAIN; | |
877 | } | |
878 | else | |
879 | { | |
f8bf15fe CD |
880 | /* We don't know if we have PD ownership. Once we check the local |
881 | stopped_start we'll know if we're in state (a) or (b) (see | |
882 | CONCURRENCY NOTES above). */ | |
883 | if (stopped_start) | |
884 | /* State (a), we own PD. The thread blocked on this lock either | |
885 | because we're doing TD_CREATE event reporting, or for some | |
886 | other reason that create_thread chose. Now let it run | |
887 | free. */ | |
32fed10f RM |
888 | lll_unlock (pd->lock, LLL_PRIVATE); |
889 | ||
890 | /* We now have for sure more than one thread. The main thread might | |
891 | not yet have the flag set. No need to set the global variable | |
892 | again if this is what we use. */ | |
893 | THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1); | |
894 | } | |
61dd6208 SP |
895 | |
896 | out: | |
8111c457 | 897 | if (destroy_default_attr) |
c2322a56 | 898 | __pthread_attr_destroy (&default_attr.external); |
61dd6208 SP |
899 | |
900 | return retval; | |
76a50749 | 901 | } |
f47f1d91 FW |
902 | versioned_symbol (libc, __pthread_create_2_1, pthread_create, GLIBC_2_34); |
903 | libc_hidden_ver (__pthread_create_2_1, __pthread_create) | |
904 | #ifndef SHARED | |
905 | strong_alias (__pthread_create_2_1, __pthread_create) | |
906 | #endif | |
76a50749 | 907 | |
f47f1d91 FW |
908 | #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_1, GLIBC_2_34) |
909 | compat_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1); | |
910 | #endif | |
76a50749 | 911 | |
f47f1d91 | 912 | #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1) |
76a50749 | 913 | int |
80d9be81 JM |
914 | __pthread_create_2_0 (pthread_t *newthread, const pthread_attr_t *attr, |
915 | void *(*start_routine) (void *), void *arg) | |
76a50749 UD |
916 | { |
917 | /* The ATTR attribute is not really of type `pthread_attr_t *'. It has | |
918 | the old size and access to the new members might crash the program. | |
919 | We convert the struct now. */ | |
920 | struct pthread_attr new_attr; | |
921 | ||
922 | if (attr != NULL) | |
923 | { | |
924 | struct pthread_attr *iattr = (struct pthread_attr *) attr; | |
925 | size_t ps = __getpagesize (); | |
926 | ||
927 | /* Copy values from the user-provided attributes. */ | |
928 | new_attr.schedparam = iattr->schedparam; | |
929 | new_attr.schedpolicy = iattr->schedpolicy; | |
930 | new_attr.flags = iattr->flags; | |
931 | ||
932 | /* Fill in default values for the fields not present in the old | |
933 | implementation. */ | |
934 | new_attr.guardsize = ps; | |
935 | new_attr.stackaddr = NULL; | |
936 | new_attr.stacksize = 0; | |
7538d461 | 937 | new_attr.extension = NULL; |
76a50749 UD |
938 | |
939 | /* We will pass this value on to the real implementation. */ | |
940 | attr = (pthread_attr_t *) &new_attr; | |
941 | } | |
942 | ||
943 | return __pthread_create_2_1 (newthread, attr, start_routine, arg); | |
944 | } | |
945 | compat_symbol (libpthread, __pthread_create_2_0, pthread_create, | |
946 | GLIBC_2_0); | |
947 | #endif | |
7f08f55a RM |
948 | \f |
949 | /* Information for libthread_db. */ | |
950 | ||
951 | #include "../nptl_db/db_info.c" | |
b639d0c9 UD |
952 | \f |
953 | /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread | |
954 | functions to be present as well. */ | |
fa872e1b AZ |
955 | PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_lock) |
956 | PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_trylock) | |
957 | PTHREAD_STATIC_FN_REQUIRE (__pthread_mutex_unlock) | |
b639d0c9 | 958 | |
fa872e1b AZ |
959 | PTHREAD_STATIC_FN_REQUIRE (__pthread_once) |
960 | PTHREAD_STATIC_FN_REQUIRE (__pthread_cancel) | |
b639d0c9 | 961 | |
fa872e1b AZ |
962 | PTHREAD_STATIC_FN_REQUIRE (__pthread_key_create) |
963 | PTHREAD_STATIC_FN_REQUIRE (__pthread_key_delete) | |
964 | PTHREAD_STATIC_FN_REQUIRE (__pthread_setspecific) | |
965 | PTHREAD_STATIC_FN_REQUIRE (__pthread_getspecific) |