]>
Commit | Line | Data |
---|---|---|
bfff8b1b | 1 | /* Copyright (C) 2002-2017 Free Software Foundation, Inc. |
76a50749 UD |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. | |
4 | ||
5 | The GNU C Library is free software; you can redistribute it and/or | |
6 | modify it under the terms of the GNU Lesser General Public | |
7 | License as published by the Free Software Foundation; either | |
8 | version 2.1 of the License, or (at your option) any later version. | |
9 | ||
10 | The GNU C Library is distributed in the hope that it will be useful, | |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | Lesser General Public License for more details. | |
14 | ||
15 | You should have received a copy of the GNU Lesser General Public | |
59ba27a6 PE |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ | |
76a50749 UD |
18 | |
19 | #include <assert.h> | |
20 | #include <errno.h> | |
2edb61e3 | 21 | #include <signal.h> |
76a50749 UD |
22 | #include <stdint.h> |
23 | #include <string.h> | |
24 | #include <unistd.h> | |
25 | #include <sys/mman.h> | |
26 | #include <sys/param.h> | |
13880b30 | 27 | #include <dl-sysdep.h> |
e6c61494 | 28 | #include <dl-tls.h> |
76a50749 | 29 | #include <tls.h> |
7a775e6b | 30 | #include <list.h> |
2edb61e3 | 31 | #include <lowlevellock.h> |
a2f0363f | 32 | #include <futex-internal.h> |
f8de5057 | 33 | #include <kernel-features.h> |
93a6d082 | 34 | #include <stack-aliasing.h> |
76a50749 UD |
35 | |
36 | ||
5d5d5969 | 37 | #ifndef NEED_SEPARATE_REGISTER_STACK |
76a50749 UD |
38 | |
39 | /* Most architectures have exactly one stack pointer. Some have more. */ | |
dff9a7a1 | 40 | # define STACK_VARIABLES void *stackaddr = NULL |
76a50749 UD |
41 | |
42 | /* How to pass the values to the 'create_thread' function. */ | |
debddf64 | 43 | # define STACK_VARIABLES_ARGS stackaddr |
76a50749 UD |
44 | |
45 | /* How to declare function which gets there parameters. */ | |
debddf64 | 46 | # define STACK_VARIABLES_PARMS void *stackaddr |
76a50749 | 47 | |
5d5d5969 | 48 | /* How to declare allocate_stack. */ |
debddf64 | 49 | # define ALLOCATE_STACK_PARMS void **stack |
5d5d5969 RM |
50 | |
51 | /* This is how the function is called. We do it this way to allow | |
52 | other variants of the function to have more parameters. */ | |
debddf64 | 53 | # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr) |
5d5d5969 RM |
54 | |
55 | #else | |
56 | ||
debddf64 UD |
57 | /* We need two stacks. The kernel will place them but we have to tell |
58 | the kernel about the size of the reserved address space. */ | |
dff9a7a1 | 59 | # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0 |
debddf64 UD |
60 | |
61 | /* How to pass the values to the 'create_thread' function. */ | |
62 | # define STACK_VARIABLES_ARGS stackaddr, stacksize | |
63 | ||
64 | /* How to declare function which gets there parameters. */ | |
65 | # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize | |
66 | ||
67 | /* How to declare allocate_stack. */ | |
68 | # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize | |
69 | ||
70 | /* This is how the function is called. We do it this way to allow | |
71 | other variants of the function to have more parameters. */ | |
72 | # define ALLOCATE_STACK(attr, pd) \ | |
5d5d5969 RM |
73 | allocate_stack (attr, pd, &stackaddr, &stacksize) |
74 | ||
75 | #endif | |
76 | ||
76a50749 UD |
77 | |
78 | /* Default alignment of stack. */ | |
79 | #ifndef STACK_ALIGN | |
80 | # define STACK_ALIGN __alignof__ (long double) | |
81 | #endif | |
82 | ||
83 | /* Default value for minimal stack size after allocating thread | |
84 | descriptor and guard. */ | |
85 | #ifndef MINIMAL_REST_STACK | |
86 | # define MINIMAL_REST_STACK 4096 | |
87 | #endif | |
88 | ||
89 | ||
965805e8 UD |
90 | /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for |
91 | a stack. Use it when possible. */ | |
92 | #ifndef MAP_STACK | |
93 | # define MAP_STACK 0 | |
518b5308 | 94 | #endif |
76a50749 | 95 | |
5d5d5969 RM |
96 | /* This yields the pointer that TLS support code calls the thread pointer. */ |
97 | #if TLS_TCB_AT_TP | |
98 | # define TLS_TPADJ(pd) (pd) | |
99 | #elif TLS_DTV_AT_TP | |
299601a1 | 100 | # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE)) |
5d5d5969 | 101 | #endif |
76a50749 UD |
102 | |
103 | /* Cache handling for not-yet free stacks. */ | |
104 | ||
105 | /* Maximum size in kB of cache. */ | |
106 | static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */ | |
107 | static size_t stack_cache_actsize; | |
108 | ||
109 | /* Mutex protecting this variable. */ | |
e51deae7 | 110 | static int stack_cache_lock = LLL_LOCK_INITIALIZER; |
76a50749 UD |
111 | |
112 | /* List of queued stack frames. */ | |
113 | static LIST_HEAD (stack_cache); | |
114 | ||
115 | /* List of the stacks in use. */ | |
116 | static LIST_HEAD (stack_used); | |
117 | ||
5846e22f UD |
118 | /* We need to record what list operations we are going to do so that, |
119 | in case of an asynchronous interruption due to a fork() call, we | |
120 | can correct for the work. */ | |
df9293cb | 121 | static uintptr_t in_flight_stack; |
5846e22f | 122 | |
fde89ad0 RM |
123 | /* List of the threads with user provided stacks in use. No need to |
124 | initialize this, since it's done in __pthread_initialize_minimal. */ | |
125 | list_t __stack_user __attribute__ ((nocommon)); | |
415ef7d8 | 126 | hidden_data_def (__stack_user) |
76a50749 | 127 | |
76a50749 UD |
128 | |
129 | /* Check whether the stack is still used or not. */ | |
7ac5b8e2 | 130 | #define FREE_P(descr) ((descr)->tid <= 0) |
76a50749 UD |
131 | |
132 | ||
5846e22f UD |
133 | static void |
134 | stack_list_del (list_t *elem) | |
135 | { | |
136 | in_flight_stack = (uintptr_t) elem; | |
137 | ||
138 | atomic_write_barrier (); | |
139 | ||
140 | list_del (elem); | |
141 | ||
142 | atomic_write_barrier (); | |
143 | ||
144 | in_flight_stack = 0; | |
145 | } | |
146 | ||
147 | ||
148 | static void | |
149 | stack_list_add (list_t *elem, list_t *list) | |
150 | { | |
151 | in_flight_stack = (uintptr_t) elem | 1; | |
152 | ||
153 | atomic_write_barrier (); | |
154 | ||
155 | list_add (elem, list); | |
156 | ||
157 | atomic_write_barrier (); | |
158 | ||
159 | in_flight_stack = 0; | |
160 | } | |
161 | ||
162 | ||
76a50749 UD |
163 | /* We create a double linked list of all cache entries. Double linked |
164 | because this allows removing entries from the end. */ | |
165 | ||
166 | ||
167 | /* Get a stack frame from the cache. We have to match by size since | |
168 | some blocks might be too small or far too large. */ | |
169 | static struct pthread * | |
170 | get_cached_stack (size_t *sizep, void **memp) | |
171 | { | |
172 | size_t size = *sizep; | |
173 | struct pthread *result = NULL; | |
174 | list_t *entry; | |
175 | ||
e51deae7 | 176 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
76a50749 UD |
177 | |
178 | /* Search the cache for a matching entry. We search for the | |
179 | smallest stack which has at least the required size. Note that | |
180 | in normal situations the size of all allocated stacks is the | |
181 | same. As the very least there are only a few different sizes. | |
182 | Therefore this loop will exit early most of the time with an | |
183 | exact match. */ | |
184 | list_for_each (entry, &stack_cache) | |
185 | { | |
186 | struct pthread *curr; | |
187 | ||
d4f64e1a | 188 | curr = list_entry (entry, struct pthread, list); |
76a50749 UD |
189 | if (FREE_P (curr) && curr->stackblock_size >= size) |
190 | { | |
191 | if (curr->stackblock_size == size) | |
192 | { | |
193 | result = curr; | |
194 | break; | |
195 | } | |
196 | ||
5cfc88a7 UD |
197 | if (result == NULL |
198 | || result->stackblock_size > curr->stackblock_size) | |
76a50749 UD |
199 | result = curr; |
200 | } | |
201 | } | |
202 | ||
203 | if (__builtin_expect (result == NULL, 0) | |
204 | /* Make sure the size difference is not too excessive. In that | |
205 | case we do not use the block. */ | |
206 | || __builtin_expect (result->stackblock_size > 4 * size, 0)) | |
207 | { | |
208 | /* Release the lock. */ | |
e51deae7 | 209 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
76a50749 UD |
210 | |
211 | return NULL; | |
212 | } | |
213 | ||
058e9ba9 AS |
214 | /* Don't allow setxid until cloned. */ |
215 | result->setxid_futex = -1; | |
216 | ||
76a50749 | 217 | /* Dequeue the entry. */ |
5846e22f | 218 | stack_list_del (&result->list); |
76a50749 UD |
219 | |
220 | /* And add to the list of stacks in use. */ | |
5846e22f | 221 | stack_list_add (&result->list, &stack_used); |
76a50749 | 222 | |
76a50749 UD |
223 | /* And decrease the cache size. */ |
224 | stack_cache_actsize -= result->stackblock_size; | |
225 | ||
226 | /* Release the lock early. */ | |
e51deae7 | 227 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
76a50749 | 228 | |
df5803bf | 229 | /* Report size and location of the stack to the caller. */ |
76a50749 UD |
230 | *sizep = result->stackblock_size; |
231 | *memp = result->stackblock; | |
232 | ||
233 | /* Cancellation handling is back to the default. */ | |
234 | result->cancelhandling = 0; | |
235 | result->cleanup = NULL; | |
236 | ||
237 | /* No pending event. */ | |
238 | result->nextevent = NULL; | |
239 | ||
240 | /* Clear the DTV. */ | |
5d5d5969 | 241 | dtv_t *dtv = GET_DTV (TLS_TPADJ (result)); |
8b6785f0 | 242 | for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt) |
a2ff21f8 | 243 | free (dtv[1 + cnt].pointer.to_free); |
8b6785f0 | 244 | memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t)); |
76a50749 UD |
245 | |
246 | /* Re-initialize the TLS. */ | |
5d5d5969 RM |
247 | _dl_allocate_tls_init (TLS_TPADJ (result)); |
248 | ||
249 | return result; | |
76a50749 UD |
250 | } |
251 | ||
252 | ||
ba408f84 | 253 | /* Free stacks until cache size is lower than LIMIT. */ |
cca50323 UD |
254 | void |
255 | __free_stacks (size_t limit) | |
ba408f84 UD |
256 | { |
257 | /* We reduce the size of the cache. Remove the last entries until | |
258 | the size is below the limit. */ | |
259 | list_t *entry; | |
260 | list_t *prev; | |
261 | ||
262 | /* Search from the end of the list. */ | |
263 | list_for_each_prev_safe (entry, prev, &stack_cache) | |
264 | { | |
265 | struct pthread *curr; | |
266 | ||
267 | curr = list_entry (entry, struct pthread, list); | |
268 | if (FREE_P (curr)) | |
269 | { | |
270 | /* Unlink the block. */ | |
5846e22f | 271 | stack_list_del (entry); |
ba408f84 UD |
272 | |
273 | /* Account for the freed memory. */ | |
274 | stack_cache_actsize -= curr->stackblock_size; | |
275 | ||
276 | /* Free the memory associated with the ELF TLS. */ | |
277 | _dl_deallocate_tls (TLS_TPADJ (curr), false); | |
278 | ||
279 | /* Remove this block. This should never fail. If it does | |
280 | something is really wrong. */ | |
281 | if (munmap (curr->stackblock, curr->stackblock_size) != 0) | |
282 | abort (); | |
283 | ||
284 | /* Maybe we have freed enough. */ | |
285 | if (stack_cache_actsize <= limit) | |
286 | break; | |
287 | } | |
288 | } | |
289 | } | |
290 | ||
291 | ||
76a50749 UD |
292 | /* Add a stack frame which is not used anymore to the stack. Must be |
293 | called with the cache lock held. */ | |
4301f7e2 | 294 | static inline void |
dd9423a6 | 295 | __attribute ((always_inline)) |
76a50749 UD |
296 | queue_stack (struct pthread *stack) |
297 | { | |
298 | /* We unconditionally add the stack to the list. The memory may | |
299 | still be in use but it will not be reused until the kernel marks | |
300 | the stack as not used anymore. */ | |
5846e22f | 301 | stack_list_add (&stack->list, &stack_cache); |
76a50749 UD |
302 | |
303 | stack_cache_actsize += stack->stackblock_size; | |
a1ffb40e | 304 | if (__glibc_unlikely (stack_cache_actsize > stack_cache_maxsize)) |
cca50323 | 305 | __free_stacks (stack_cache_maxsize); |
76a50749 UD |
306 | } |
307 | ||
308 | ||
279f1143 UD |
309 | static int |
310 | internal_function | |
311 | change_stack_perm (struct pthread *pd | |
312 | #ifdef NEED_SEPARATE_REGISTER_STACK | |
313 | , size_t pagemask | |
314 | #endif | |
315 | ) | |
316 | { | |
317 | #ifdef NEED_SEPARATE_REGISTER_STACK | |
318 | void *stack = (pd->stackblock | |
319 | + (((((pd->stackblock_size - pd->guardsize) / 2) | |
320 | & pagemask) + pd->guardsize) & pagemask)); | |
321 | size_t len = pd->stackblock + pd->stackblock_size - stack; | |
63e82b9d | 322 | #elif _STACK_GROWS_DOWN |
279f1143 UD |
323 | void *stack = pd->stackblock + pd->guardsize; |
324 | size_t len = pd->stackblock_size - pd->guardsize; | |
63e82b9d UD |
325 | #elif _STACK_GROWS_UP |
326 | void *stack = pd->stackblock; | |
327 | size_t len = (uintptr_t) pd - pd->guardsize - (uintptr_t) pd->stackblock; | |
328 | #else | |
329 | # error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP" | |
279f1143 UD |
330 | #endif |
331 | if (mprotect (stack, len, PROT_READ | PROT_WRITE | PROT_EXEC) != 0) | |
332 | return errno; | |
333 | ||
334 | return 0; | |
335 | } | |
336 | ||
0edbf123 AZ |
337 | /* Return the guard page position on allocated stack. */ |
338 | static inline char * | |
339 | __attribute ((always_inline)) | |
340 | guard_position (void *mem, size_t size, size_t guardsize, struct pthread *pd, | |
341 | size_t pagesize_m1) | |
342 | { | |
343 | #ifdef NEED_SEPARATE_REGISTER_STACK | |
344 | return mem + (((size - guardsize) / 2) & ~pagesize_m1); | |
345 | #elif _STACK_GROWS_DOWN | |
346 | return mem; | |
347 | #elif _STACK_GROWS_UP | |
348 | return (char *) (((uintptr_t) pd - guardsize) & ~pagesize_m1); | |
349 | #endif | |
350 | } | |
351 | ||
352 | /* Based on stack allocated with PROT_NONE, setup the required portions with | |
353 | 'prot' flags based on the guard page position. */ | |
354 | static inline int | |
355 | setup_stack_prot (char *mem, size_t size, char *guard, size_t guardsize, | |
356 | const int prot) | |
357 | { | |
358 | char *guardend = guard + guardsize; | |
359 | #if _STACK_GROWS_DOWN | |
360 | /* As defined at guard_position, for architectures with downward stack | |
361 | the guard page is always at start of the allocated area. */ | |
362 | if (mprotect (guardend, size - guardsize, prot) != 0) | |
363 | return errno; | |
364 | #else | |
365 | size_t mprots1 = (uintptr_t) guard - (uintptr_t) mem; | |
366 | if (mprotect (mem, mprots1, prot) != 0) | |
367 | return errno; | |
368 | size_t mprots2 = ((uintptr_t) mem + size) - (uintptr_t) guardend; | |
369 | if (mprotect (guardend, mprots2, prot) != 0) | |
370 | return errno; | |
371 | #endif | |
372 | return 0; | |
373 | } | |
76a50749 | 374 | |
c0609c5c CD |
375 | /* Returns a usable stack for a new thread either by allocating a |
376 | new stack or reusing a cached stack of sufficient size. | |
377 | ATTR must be non-NULL and point to a valid pthread_attr. | |
378 | PDP must be non-NULL. */ | |
76a50749 UD |
379 | static int |
380 | allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, | |
5d5d5969 | 381 | ALLOCATE_STACK_PARMS) |
76a50749 UD |
382 | { |
383 | struct pthread *pd; | |
384 | size_t size; | |
e1798f55 | 385 | size_t pagesize_m1 = __getpagesize () - 1; |
76a50749 | 386 | |
4f088329 | 387 | assert (powerof2 (pagesize_m1 + 1)); |
76a50749 UD |
388 | assert (TCB_ALIGNMENT >= STACK_ALIGN); |
389 | ||
390 | /* Get the stack size from the attribute if it is set. Otherwise we | |
391 | use the default we determined at start time. */ | |
61dd6208 SP |
392 | if (attr->stacksize != 0) |
393 | size = attr->stacksize; | |
394 | else | |
395 | { | |
396 | lll_lock (__default_pthread_attr_lock, LLL_PRIVATE); | |
397 | size = __default_pthread_attr.stacksize; | |
398 | lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE); | |
399 | } | |
76a50749 UD |
400 | |
401 | /* Get memory for the stack. */ | |
a1ffb40e | 402 | if (__glibc_unlikely (attr->flags & ATTR_FLAG_STACKADDR)) |
76a50749 UD |
403 | { |
404 | uintptr_t adj; | |
d615a473 CD |
405 | char *stackaddr = (char *) attr->stackaddr; |
406 | ||
407 | /* Assume the same layout as the _STACK_GROWS_DOWN case, with struct | |
408 | pthread at the top of the stack block. Later we adjust the guard | |
409 | location and stack address to match the _STACK_GROWS_UP case. */ | |
410 | if (_STACK_GROWS_UP) | |
411 | stackaddr += attr->stacksize; | |
76a50749 UD |
412 | |
413 | /* If the user also specified the size of the stack make sure it | |
414 | is large enough. */ | |
415 | if (attr->stacksize != 0 | |
416 | && attr->stacksize < (__static_tls_size + MINIMAL_REST_STACK)) | |
417 | return EINVAL; | |
418 | ||
419 | /* Adjust stack size for alignment of the TLS block. */ | |
5d5d5969 | 420 | #if TLS_TCB_AT_TP |
d615a473 | 421 | adj = ((uintptr_t) stackaddr - TLS_TCB_SIZE) |
5d5d5969 RM |
422 | & __static_tls_align_m1; |
423 | assert (size > adj + TLS_TCB_SIZE); | |
424 | #elif TLS_DTV_AT_TP | |
d615a473 | 425 | adj = ((uintptr_t) stackaddr - __static_tls_size) |
5d5d5969 | 426 | & __static_tls_align_m1; |
76a50749 | 427 | assert (size > adj); |
5d5d5969 | 428 | #endif |
76a50749 UD |
429 | |
430 | /* The user provided some memory. Let's hope it matches the | |
431 | size... We do not allocate guard pages if the user provided | |
432 | the stack. It is the user's responsibility to do this if it | |
433 | is wanted. */ | |
5d5d5969 | 434 | #if TLS_TCB_AT_TP |
d615a473 | 435 | pd = (struct pthread *) ((uintptr_t) stackaddr |
5d5d5969 RM |
436 | - TLS_TCB_SIZE - adj); |
437 | #elif TLS_DTV_AT_TP | |
d615a473 | 438 | pd = (struct pthread *) (((uintptr_t) stackaddr |
66f1b8ee | 439 | - __static_tls_size - adj) |
299601a1 | 440 | - TLS_PRE_TCB_SIZE); |
5d5d5969 | 441 | #endif |
76a50749 | 442 | |
4301f7e2 | 443 | /* The user provided stack memory needs to be cleared. */ |
76a50749 UD |
444 | memset (pd, '\0', sizeof (struct pthread)); |
445 | ||
446 | /* The first TSD block is included in the TCB. */ | |
447 | pd->specific[0] = pd->specific_1stblock; | |
448 | ||
4301f7e2 | 449 | /* Remember the stack-related values. */ |
d615a473 | 450 | pd->stackblock = (char *) stackaddr - size; |
580088c9 | 451 | pd->stackblock_size = size; |
76a50749 | 452 | |
4301f7e2 UD |
453 | /* This is a user-provided stack. It will not be queued in the |
454 | stack cache nor will the memory (except the TLS memory) be freed. */ | |
76a50749 UD |
455 | pd->user_stack = true; |
456 | ||
4301f7e2 | 457 | /* This is at least the second thread. */ |
468777e1 | 458 | pd->header.multiple_threads = 1; |
bbde8527 | 459 | #ifndef TLS_MULTIPLE_THREADS_IN_TCB |
5a03acfe | 460 | __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1; |
d4f64e1a | 461 | #endif |
9ae0909b | 462 | |
6df7ffad | 463 | #ifndef __ASSUME_PRIVATE_FUTEX |
5a8075b1 | 464 | /* The thread must know when private futexes are supported. */ |
6df7ffad UD |
465 | pd->header.private_futex = THREAD_GETMEM (THREAD_SELF, |
466 | header.private_futex); | |
5a8075b1 UD |
467 | #endif |
468 | ||
13880b30 | 469 | #ifdef NEED_DL_SYSINFO |
674b8978 | 470 | SETUP_THREAD_SYSINFO (pd); |
13880b30 UD |
471 | #endif |
472 | ||
058e9ba9 AS |
473 | /* Don't allow setxid until cloned. */ |
474 | pd->setxid_futex = -1; | |
475 | ||
76a50749 | 476 | /* Allocate the DTV for this thread. */ |
5d5d5969 | 477 | if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL) |
1ab1fa6f UD |
478 | { |
479 | /* Something went wrong. */ | |
480 | assert (errno == ENOMEM); | |
caafb2b0 | 481 | return errno; |
1ab1fa6f | 482 | } |
76a50749 UD |
483 | |
484 | ||
4301f7e2 | 485 | /* Prepare to modify global data. */ |
e51deae7 | 486 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
76a50749 UD |
487 | |
488 | /* And add to the list of stacks in use. */ | |
d4f64e1a | 489 | list_add (&pd->list, &__stack_user); |
76a50749 | 490 | |
e51deae7 | 491 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
76a50749 UD |
492 | } |
493 | else | |
494 | { | |
54ee14b3 | 495 | /* Allocate some anonymous memory. If possible use the cache. */ |
76a50749 UD |
496 | size_t guardsize; |
497 | size_t reqsize; | |
498 | void *mem; | |
54ee14b3 UD |
499 | const int prot = (PROT_READ | PROT_WRITE |
500 | | ((GL(dl_stack_flags) & PF_X) ? PROT_EXEC : 0)); | |
76a50749 UD |
501 | |
502 | /* Adjust the stack size for alignment. */ | |
bc6389ad | 503 | size &= ~__static_tls_align_m1; |
76a50749 UD |
504 | assert (size != 0); |
505 | ||
506 | /* Make sure the size of the stack is enough for the guard and | |
507 | eventually the thread descriptor. */ | |
4f088329 | 508 | guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1; |
db13ddbc UD |
509 | if (__builtin_expect (size < ((guardsize + __static_tls_size |
510 | + MINIMAL_REST_STACK + pagesize_m1) | |
511 | & ~pagesize_m1), | |
4f088329 | 512 | 0)) |
76a50749 UD |
513 | /* The stack is too small (or the guard too large). */ |
514 | return EINVAL; | |
515 | ||
4301f7e2 | 516 | /* Try to get a stack from the cache. */ |
76a50749 UD |
517 | reqsize = size; |
518 | pd = get_cached_stack (&size, &mem); | |
519 | if (pd == NULL) | |
520 | { | |
bca2d208 | 521 | /* To avoid aliasing effects on a larger scale than pages we |
59b28a2e UD |
522 | adjust the allocated stack size if necessary. This way |
523 | allocations directly following each other will not have | |
524 | aliasing problems. */ | |
525 | #if MULTI_PAGE_ALIASING != 0 | |
526 | if ((size % MULTI_PAGE_ALIASING) == 0) | |
527 | size += pagesize_m1 + 1; | |
528 | #endif | |
529 | ||
0edbf123 AZ |
530 | /* If a guard page is required, avoid committing memory by first |
531 | allocate with PROT_NONE and then reserve with required permission | |
532 | excluding the guard page. */ | |
533 | mem = mmap (NULL, size, (guardsize == 0) ? prot : PROT_NONE, | |
965805e8 | 534 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); |
76a50749 | 535 | |
a1ffb40e | 536 | if (__glibc_unlikely (mem == MAP_FAILED)) |
caafb2b0 | 537 | return errno; |
76a50749 | 538 | |
6461e577 RM |
539 | /* SIZE is guaranteed to be greater than zero. |
540 | So we can never get a null pointer back from mmap. */ | |
76a50749 UD |
541 | assert (mem != NULL); |
542 | ||
543 | /* Place the thread descriptor at the end of the stack. */ | |
5d5d5969 | 544 | #if TLS_TCB_AT_TP |
37f8abad | 545 | pd = (struct pthread *) ((char *) mem + size) - 1; |
5d5d5969 | 546 | #elif TLS_DTV_AT_TP |
37f8abad | 547 | pd = (struct pthread *) ((((uintptr_t) mem + size |
5d5d5969 | 548 | - __static_tls_size) |
299601a1 UD |
549 | & ~__static_tls_align_m1) |
550 | - TLS_PRE_TCB_SIZE); | |
5d5d5969 | 551 | #endif |
76a50749 | 552 | |
0edbf123 AZ |
553 | /* Now mprotect the required region excluding the guard area. */ |
554 | if (__glibc_likely (guardsize > 0)) | |
555 | { | |
556 | char *guard = guard_position (mem, size, guardsize, pd, | |
557 | pagesize_m1); | |
558 | if (setup_stack_prot (mem, size, guard, guardsize, prot) != 0) | |
559 | { | |
560 | munmap (mem, size); | |
561 | return errno; | |
562 | } | |
563 | } | |
564 | ||
76a50749 UD |
565 | /* Remember the stack-related values. */ |
566 | pd->stackblock = mem; | |
567 | pd->stackblock_size = size; | |
0edbf123 AZ |
568 | /* Update guardsize for newly allocated guardsize to avoid |
569 | an mprotect in guard resize below. */ | |
570 | pd->guardsize = guardsize; | |
76a50749 UD |
571 | |
572 | /* We allocated the first block thread-specific data array. | |
573 | This address will not change for the lifetime of this | |
574 | descriptor. */ | |
575 | pd->specific[0] = pd->specific_1stblock; | |
576 | ||
4301f7e2 | 577 | /* This is at least the second thread. */ |
468777e1 | 578 | pd->header.multiple_threads = 1; |
bbde8527 | 579 | #ifndef TLS_MULTIPLE_THREADS_IN_TCB |
5a03acfe | 580 | __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1; |
d4f64e1a | 581 | #endif |
9ae0909b | 582 | |
d4201cc4 | 583 | #ifndef __ASSUME_PRIVATE_FUTEX |
5a8075b1 | 584 | /* The thread must know when private futexes are supported. */ |
e59660bc | 585 | pd->header.private_futex = THREAD_GETMEM (THREAD_SELF, |
66f1b8ee | 586 | header.private_futex); |
d4201cc4 | 587 | #endif |
5a8075b1 | 588 | |
13880b30 | 589 | #ifdef NEED_DL_SYSINFO |
674b8978 | 590 | SETUP_THREAD_SYSINFO (pd); |
13880b30 UD |
591 | #endif |
592 | ||
058e9ba9 AS |
593 | /* Don't allow setxid until cloned. */ |
594 | pd->setxid_futex = -1; | |
595 | ||
76a50749 | 596 | /* Allocate the DTV for this thread. */ |
5d5d5969 | 597 | if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL) |
76a50749 UD |
598 | { |
599 | /* Something went wrong. */ | |
1ab1fa6f | 600 | assert (errno == ENOMEM); |
76a50749 UD |
601 | |
602 | /* Free the stack memory we just allocated. */ | |
4301f7e2 | 603 | (void) munmap (mem, size); |
76a50749 | 604 | |
caafb2b0 | 605 | return errno; |
76a50749 UD |
606 | } |
607 | ||
608 | ||
4301f7e2 | 609 | /* Prepare to modify global data. */ |
e51deae7 | 610 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
76a50749 UD |
611 | |
612 | /* And add to the list of stacks in use. */ | |
5846e22f | 613 | stack_list_add (&pd->list, &stack_used); |
76a50749 | 614 | |
e51deae7 | 615 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
76a50749 UD |
616 | |
617 | ||
279f1143 UD |
618 | /* There might have been a race. Another thread might have |
619 | caused the stacks to get exec permission while this new | |
620 | stack was prepared. Detect if this was possible and | |
621 | change the permission if necessary. */ | |
622 | if (__builtin_expect ((GL(dl_stack_flags) & PF_X) != 0 | |
623 | && (prot & PROT_EXEC) == 0, 0)) | |
624 | { | |
625 | int err = change_stack_perm (pd | |
626 | #ifdef NEED_SEPARATE_REGISTER_STACK | |
627 | , ~pagesize_m1 | |
628 | #endif | |
629 | ); | |
630 | if (err != 0) | |
631 | { | |
632 | /* Free the stack memory we just allocated. */ | |
633 | (void) munmap (mem, size); | |
634 | ||
635 | return err; | |
636 | } | |
637 | } | |
638 | ||
639 | ||
76a50749 UD |
640 | /* Note that all of the stack and the thread descriptor is |
641 | zeroed. This means we do not have to initialize fields | |
642 | with initial value zero. This is specifically true for | |
643 | the 'tid' field which is always set back to zero once the | |
644 | stack is not used anymore and for the 'guardsize' field | |
645 | which will be read next. */ | |
646 | } | |
647 | ||
648 | /* Create or resize the guard area if necessary. */ | |
a1ffb40e | 649 | if (__glibc_unlikely (guardsize > pd->guardsize)) |
76a50749 | 650 | { |
0edbf123 AZ |
651 | char *guard = guard_position (mem, size, guardsize, pd, |
652 | pagesize_m1); | |
5d5d5969 | 653 | if (mprotect (guard, guardsize, PROT_NONE) != 0) |
76a50749 | 654 | { |
76a50749 | 655 | mprot_error: |
e51deae7 | 656 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
76a50749 UD |
657 | |
658 | /* Remove the thread from the list. */ | |
5846e22f | 659 | stack_list_del (&pd->list); |
76a50749 | 660 | |
e51deae7 | 661 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
76a50749 | 662 | |
d130a341 | 663 | /* Get rid of the TLS block we allocated. */ |
5d5d5969 | 664 | _dl_deallocate_tls (TLS_TPADJ (pd), false); |
d130a341 UD |
665 | |
666 | /* Free the stack memory regardless of whether the size | |
667 | of the cache is over the limit or not. If this piece | |
668 | of memory caused problems we better do not use it | |
76a50749 UD |
669 | anymore. Uh, and we ignore possible errors. There |
670 | is nothing we could do. */ | |
671 | (void) munmap (mem, size); | |
672 | ||
caafb2b0 | 673 | return errno; |
76a50749 UD |
674 | } |
675 | ||
676 | pd->guardsize = guardsize; | |
677 | } | |
678 | else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize, | |
679 | 0)) | |
680 | { | |
681 | /* The old guard area is too large. */ | |
5d5d5969 RM |
682 | |
683 | #ifdef NEED_SEPARATE_REGISTER_STACK | |
684 | char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1); | |
685 | char *oldguard = mem + (((size - pd->guardsize) / 2) & ~pagesize_m1); | |
686 | ||
687 | if (oldguard < guard | |
54ee14b3 | 688 | && mprotect (oldguard, guard - oldguard, prot) != 0) |
5d5d5969 RM |
689 | goto mprot_error; |
690 | ||
691 | if (mprotect (guard + guardsize, | |
692 | oldguard + pd->guardsize - guard - guardsize, | |
54ee14b3 | 693 | prot) != 0) |
5d5d5969 | 694 | goto mprot_error; |
63e82b9d | 695 | #elif _STACK_GROWS_DOWN |
a7720b5e | 696 | if (mprotect ((char *) mem + guardsize, pd->guardsize - guardsize, |
54ee14b3 | 697 | prot) != 0) |
76a50749 | 698 | goto mprot_error; |
63e82b9d UD |
699 | #elif _STACK_GROWS_UP |
700 | if (mprotect ((char *) pd - pd->guardsize, | |
701 | pd->guardsize - guardsize, prot) != 0) | |
702 | goto mprot_error; | |
5d5d5969 | 703 | #endif |
76a50749 UD |
704 | |
705 | pd->guardsize = guardsize; | |
706 | } | |
5adac0e4 UD |
707 | /* The pthread_getattr_np() calls need to get passed the size |
708 | requested in the attribute, regardless of how large the | |
709 | actually used guardsize is. */ | |
710 | pd->reported_guardsize = guardsize; | |
76a50749 UD |
711 | } |
712 | ||
ae9e6b36 RM |
713 | /* Initialize the lock. We have to do this unconditionally since the |
714 | stillborn thread could be canceled while the lock is taken. */ | |
f1205aa7 | 715 | pd->lock = LLL_LOCK_INITIALIZER; |
f1205aa7 | 716 | |
0f6699ea UD |
717 | /* The robust mutex lists also need to be initialized |
718 | unconditionally because the cleanup for the previous stack owner | |
719 | might have happened in the kernel. */ | |
720 | pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock) | |
721 | - offsetof (pthread_mutex_t, | |
722 | __data.__list.__next)); | |
723 | pd->robust_head.list_op_pending = NULL; | |
724 | #ifdef __PTHREAD_MUTEX_HAVE_PREV | |
725 | pd->robust_prev = &pd->robust_head; | |
726 | #endif | |
727 | pd->robust_head.list = &pd->robust_head; | |
728 | ||
76a50749 UD |
729 | /* We place the thread descriptor at the end of the stack. */ |
730 | *pdp = pd; | |
731 | ||
3fad53ec MF |
732 | #if _STACK_GROWS_DOWN |
733 | void *stacktop; | |
734 | ||
735 | # if TLS_TCB_AT_TP | |
df5803bf | 736 | /* The stack begins before the TCB and the static TLS block. */ |
5d5d5969 | 737 | stacktop = ((char *) (pd + 1) - __static_tls_size); |
3fad53ec | 738 | # elif TLS_DTV_AT_TP |
5d5d5969 | 739 | stacktop = (char *) (pd - 1); |
3fad53ec | 740 | # endif |
5d5d5969 | 741 | |
3fad53ec | 742 | # ifdef NEED_SEPARATE_REGISTER_STACK |
5d5d5969 RM |
743 | *stack = pd->stackblock; |
744 | *stacksize = stacktop - *stack; | |
3fad53ec | 745 | # else |
5d5d5969 | 746 | *stack = stacktop; |
3fad53ec MF |
747 | # endif |
748 | #else | |
63e82b9d | 749 | *stack = pd->stackblock; |
76a50749 UD |
750 | #endif |
751 | ||
752 | return 0; | |
753 | } | |
754 | ||
76a50749 UD |
755 | |
756 | void | |
90491dc4 | 757 | internal_function |
76a50749 UD |
758 | __deallocate_stack (struct pthread *pd) |
759 | { | |
e51deae7 | 760 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
76a50749 UD |
761 | |
762 | /* Remove the thread from the list of threads with user defined | |
763 | stacks. */ | |
5846e22f | 764 | stack_list_del (&pd->list); |
76a50749 UD |
765 | |
766 | /* Not much to do. Just free the mmap()ed memory. Note that we do | |
767 | not reset the 'used' flag in the 'tid' field. This is done by | |
768 | the kernel. If no thread has been created yet this field is | |
769 | still zero. */ | |
a1ffb40e | 770 | if (__glibc_likely (! pd->user_stack)) |
76a50749 UD |
771 | (void) queue_stack (pd); |
772 | else | |
773 | /* Free the memory associated with the ELF TLS. */ | |
5d5d5969 | 774 | _dl_deallocate_tls (TLS_TPADJ (pd), false); |
76a50749 | 775 | |
e51deae7 | 776 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
76a50749 UD |
777 | } |
778 | ||
779 | ||
54ee14b3 UD |
780 | int |
781 | internal_function | |
d1fc817e | 782 | __make_stacks_executable (void **stack_endp) |
54ee14b3 | 783 | { |
eec8b6ca UD |
784 | /* First the main thread's stack. */ |
785 | int err = _dl_make_stack_executable (stack_endp); | |
786 | if (err != 0) | |
787 | return err; | |
d1fc817e | 788 | |
54ee14b3 UD |
789 | #ifdef NEED_SEPARATE_REGISTER_STACK |
790 | const size_t pagemask = ~(__getpagesize () - 1); | |
791 | #endif | |
792 | ||
e51deae7 | 793 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
54ee14b3 | 794 | |
54ee14b3 UD |
795 | list_t *runp; |
796 | list_for_each (runp, &stack_used) | |
797 | { | |
279f1143 | 798 | err = change_stack_perm (list_entry (runp, struct pthread, list) |
54ee14b3 | 799 | #ifdef NEED_SEPARATE_REGISTER_STACK |
279f1143 | 800 | , pagemask |
54ee14b3 | 801 | #endif |
279f1143 UD |
802 | ); |
803 | if (err != 0) | |
804 | break; | |
54ee14b3 UD |
805 | } |
806 | ||
69c9fa04 UD |
807 | /* Also change the permission for the currently unused stacks. This |
808 | might be wasted time but better spend it here than adding a check | |
809 | in the fast path. */ | |
00700865 RM |
810 | if (err == 0) |
811 | list_for_each (runp, &stack_cache) | |
812 | { | |
813 | err = change_stack_perm (list_entry (runp, struct pthread, list) | |
69c9fa04 | 814 | #ifdef NEED_SEPARATE_REGISTER_STACK |
00700865 | 815 | , pagemask |
69c9fa04 | 816 | #endif |
00700865 RM |
817 | ); |
818 | if (err != 0) | |
819 | break; | |
820 | } | |
69c9fa04 | 821 | |
e51deae7 | 822 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
54ee14b3 | 823 | |
54ee14b3 UD |
824 | return err; |
825 | } | |
826 | ||
827 | ||
76a50749 UD |
828 | /* In case of a fork() call the memory allocation in the child will be |
829 | the same but only one thread is running. All stacks except that of | |
830 | the one running thread are not used anymore. We have to recycle | |
831 | them. */ | |
832 | void | |
833 | __reclaim_stacks (void) | |
834 | { | |
835 | struct pthread *self = (struct pthread *) THREAD_SELF; | |
836 | ||
5846e22f UD |
837 | /* No locking necessary. The caller is the only stack in use. But |
838 | we have to be aware that we might have interrupted a list | |
839 | operation. */ | |
840 | ||
df9293cb | 841 | if (in_flight_stack != 0) |
5846e22f UD |
842 | { |
843 | bool add_p = in_flight_stack & 1; | |
4b3d3e28 | 844 | list_t *elem = (list_t *) (in_flight_stack & ~(uintptr_t) 1); |
5846e22f UD |
845 | |
846 | if (add_p) | |
847 | { | |
fc75bf46 SP |
848 | /* We always add at the beginning of the list. So in this case we |
849 | only need to check the beginning of these lists to see if the | |
850 | pointers at the head of the list are inconsistent. */ | |
851 | list_t *l = NULL; | |
852 | ||
853 | if (stack_used.next->prev != &stack_used) | |
854 | l = &stack_used; | |
855 | else if (stack_cache.next->prev != &stack_cache) | |
856 | l = &stack_cache; | |
857 | ||
858 | if (l != NULL) | |
859 | { | |
860 | assert (l->next->prev == elem); | |
861 | elem->next = l->next; | |
862 | elem->prev = l; | |
863 | l->next = elem; | |
864 | } | |
5846e22f UD |
865 | } |
866 | else | |
867 | { | |
868 | /* We can simply always replay the delete operation. */ | |
df9293cb UD |
869 | elem->next->prev = elem->prev; |
870 | elem->prev->next = elem->next; | |
5846e22f | 871 | } |
5846e22f | 872 | } |
76a50749 UD |
873 | |
874 | /* Mark all stacks except the still running one as free. */ | |
875 | list_t *runp; | |
876 | list_for_each (runp, &stack_used) | |
877 | { | |
a1260d92 | 878 | struct pthread *curp = list_entry (runp, struct pthread, list); |
76a50749 UD |
879 | if (curp != self) |
880 | { | |
881 | /* This marks the stack as free. */ | |
882 | curp->tid = 0; | |
883 | ||
884 | /* Account for the size of the stack. */ | |
885 | stack_cache_actsize += curp->stackblock_size; | |
2a01ce56 UD |
886 | |
887 | if (curp->specific_used) | |
888 | { | |
889 | /* Clear the thread-specific data. */ | |
890 | memset (curp->specific_1stblock, '\0', | |
891 | sizeof (curp->specific_1stblock)); | |
892 | ||
893 | curp->specific_used = false; | |
894 | ||
895 | for (size_t cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt) | |
896 | if (curp->specific[cnt] != NULL) | |
897 | { | |
898 | memset (curp->specific[cnt], '\0', | |
899 | sizeof (curp->specific_1stblock)); | |
900 | ||
901 | /* We have allocated the block which we do not | |
902 | free here so re-set the bit. */ | |
903 | curp->specific_used = true; | |
904 | } | |
905 | } | |
76a50749 UD |
906 | } |
907 | } | |
908 | ||
909 | /* Add the stack of all running threads to the cache. */ | |
910 | list_splice (&stack_used, &stack_cache); | |
911 | ||
912 | /* Remove the entry for the current thread to from the cache list | |
913 | and add it to the list of running threads. Which of the two | |
914 | lists is decided by the user_stack flag. */ | |
5846e22f | 915 | stack_list_del (&self->list); |
76a50749 UD |
916 | |
917 | /* Re-initialize the lists for all the threads. */ | |
918 | INIT_LIST_HEAD (&stack_used); | |
919 | INIT_LIST_HEAD (&__stack_user); | |
920 | ||
a1ffb40e | 921 | if (__glibc_unlikely (THREAD_GETMEM (self, user_stack))) |
d4f64e1a | 922 | list_add (&self->list, &__stack_user); |
76a50749 | 923 | else |
cca50323 | 924 | list_add (&self->list, &stack_used); |
76a50749 UD |
925 | |
926 | /* There is one thread running. */ | |
47202270 | 927 | __nptl_nthreads = 1; |
76a50749 | 928 | |
cca50323 UD |
929 | in_flight_stack = 0; |
930 | ||
61dd6208 | 931 | /* Initialize locks. */ |
76a50749 | 932 | stack_cache_lock = LLL_LOCK_INITIALIZER; |
61dd6208 | 933 | __default_pthread_attr_lock = LLL_LOCK_INITIALIZER; |
76a50749 | 934 | } |
4165d44d UD |
935 | |
936 | ||
937 | #if HP_TIMING_AVAIL | |
b639d0c9 | 938 | # undef __find_thread_by_id |
4165d44d | 939 | /* Find a thread given the thread ID. */ |
4165d44d | 940 | attribute_hidden |
c56da3a3 | 941 | struct pthread * |
4165d44d UD |
942 | __find_thread_by_id (pid_t tid) |
943 | { | |
944 | struct pthread *result = NULL; | |
945 | ||
e51deae7 | 946 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
4165d44d UD |
947 | |
948 | /* Iterate over the list with system-allocated threads first. */ | |
949 | list_t *runp; | |
950 | list_for_each (runp, &stack_used) | |
951 | { | |
952 | struct pthread *curp; | |
953 | ||
954 | curp = list_entry (runp, struct pthread, list); | |
955 | ||
956 | if (curp->tid == tid) | |
957 | { | |
958 | result = curp; | |
959 | goto out; | |
960 | } | |
961 | } | |
962 | ||
963 | /* Now the list with threads using user-allocated stacks. */ | |
964 | list_for_each (runp, &__stack_user) | |
965 | { | |
966 | struct pthread *curp; | |
967 | ||
968 | curp = list_entry (runp, struct pthread, list); | |
969 | ||
970 | if (curp->tid == tid) | |
971 | { | |
972 | result = curp; | |
973 | goto out; | |
974 | } | |
975 | } | |
976 | ||
977 | out: | |
e51deae7 | 978 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
4165d44d UD |
979 | |
980 | return result; | |
981 | } | |
982 | #endif | |
adc12574 | 983 | |
dff9a7a1 | 984 | |
327ae257 | 985 | #ifdef SIGSETXID |
dff9a7a1 UD |
986 | static void |
987 | internal_function | |
25db0f6c | 988 | setxid_mark_thread (struct xid_command *cmdp, struct pthread *t) |
dff9a7a1 | 989 | { |
25db0f6c DJ |
990 | int ch; |
991 | ||
66f1b8ee UD |
992 | /* Wait until this thread is cloned. */ |
993 | if (t->setxid_futex == -1 | |
994 | && ! atomic_compare_and_exchange_bool_acq (&t->setxid_futex, -2, -1)) | |
995 | do | |
a2f0363f | 996 | futex_wait_simple (&t->setxid_futex, -2, FUTEX_PRIVATE); |
66f1b8ee UD |
997 | while (t->setxid_futex == -2); |
998 | ||
25db0f6c DJ |
999 | /* Don't let the thread exit before the setxid handler runs. */ |
1000 | t->setxid_futex = 0; | |
1001 | ||
1002 | do | |
dff9a7a1 | 1003 | { |
25db0f6c | 1004 | ch = t->cancelhandling; |
dff9a7a1 | 1005 | |
25db0f6c DJ |
1006 | /* If the thread is exiting right now, ignore it. */ |
1007 | if ((ch & EXITING_BITMASK) != 0) | |
523df511 AS |
1008 | { |
1009 | /* Release the futex if there is no other setxid in | |
1010 | progress. */ | |
1011 | if ((ch & SETXID_BITMASK) == 0) | |
1012 | { | |
1013 | t->setxid_futex = 1; | |
a2f0363f | 1014 | futex_wake (&t->setxid_futex, 1, FUTEX_PRIVATE); |
523df511 AS |
1015 | } |
1016 | return; | |
1017 | } | |
25db0f6c DJ |
1018 | } |
1019 | while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling, | |
1020 | ch | SETXID_BITMASK, ch)); | |
1021 | } | |
1022 | ||
1023 | ||
1024 | static void | |
1025 | internal_function | |
1026 | setxid_unmark_thread (struct xid_command *cmdp, struct pthread *t) | |
1027 | { | |
1028 | int ch; | |
1029 | ||
1030 | do | |
1031 | { | |
1032 | ch = t->cancelhandling; | |
1033 | if ((ch & SETXID_BITMASK) == 0) | |
1034 | return; | |
dff9a7a1 | 1035 | } |
25db0f6c DJ |
1036 | while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling, |
1037 | ch & ~SETXID_BITMASK, ch)); | |
1038 | ||
1039 | /* Release the futex just in case. */ | |
1040 | t->setxid_futex = 1; | |
a2f0363f | 1041 | futex_wake (&t->setxid_futex, 1, FUTEX_PRIVATE); |
25db0f6c DJ |
1042 | } |
1043 | ||
1044 | ||
1045 | static int | |
1046 | internal_function | |
1047 | setxid_signal_thread (struct xid_command *cmdp, struct pthread *t) | |
1048 | { | |
1049 | if ((t->cancelhandling & SETXID_BITMASK) == 0) | |
1050 | return 0; | |
dff9a7a1 UD |
1051 | |
1052 | int val; | |
c579f48e | 1053 | pid_t pid = __getpid (); |
015a5d22 | 1054 | INTERNAL_SYSCALL_DECL (err); |
c579f48e | 1055 | val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, t->tid, SIGSETXID); |
dff9a7a1 | 1056 | |
25db0f6c | 1057 | /* If this failed, it must have had not started yet or else exited. */ |
dff9a7a1 | 1058 | if (!INTERNAL_SYSCALL_ERROR_P (val, err)) |
25db0f6c DJ |
1059 | { |
1060 | atomic_increment (&cmdp->cntr); | |
1061 | return 1; | |
1062 | } | |
1063 | else | |
1064 | return 0; | |
dff9a7a1 UD |
1065 | } |
1066 | ||
771eb141 FW |
1067 | /* Check for consistency across set*id system call results. The abort |
1068 | should not happen as long as all privileges changes happen through | |
1069 | the glibc wrappers. ERROR must be 0 (no error) or an errno | |
1070 | code. */ | |
1071 | void | |
1072 | attribute_hidden | |
1073 | __nptl_setxid_error (struct xid_command *cmdp, int error) | |
1074 | { | |
1075 | do | |
1076 | { | |
1077 | int olderror = cmdp->error; | |
1078 | if (olderror == error) | |
1079 | break; | |
1080 | if (olderror != -1) | |
1081 | /* Mismatch between current and previous results. */ | |
1082 | abort (); | |
1083 | } | |
1084 | while (atomic_compare_and_exchange_bool_acq (&cmdp->error, error, -1)); | |
1085 | } | |
dff9a7a1 | 1086 | |
ccd8de9a | 1087 | int |
2edb61e3 UD |
1088 | attribute_hidden |
1089 | __nptl_setxid (struct xid_command *cmdp) | |
1090 | { | |
25db0f6c | 1091 | int signalled; |
ccd8de9a | 1092 | int result; |
e51deae7 | 1093 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
2edb61e3 UD |
1094 | |
1095 | __xidcmd = cmdp; | |
1096 | cmdp->cntr = 0; | |
771eb141 | 1097 | cmdp->error = -1; |
2edb61e3 | 1098 | |
2edb61e3 UD |
1099 | struct pthread *self = THREAD_SELF; |
1100 | ||
1101 | /* Iterate over the list with system-allocated threads first. */ | |
1102 | list_t *runp; | |
1103 | list_for_each (runp, &stack_used) | |
1104 | { | |
1105 | struct pthread *t = list_entry (runp, struct pthread, list); | |
dff9a7a1 UD |
1106 | if (t == self) |
1107 | continue; | |
2edb61e3 | 1108 | |
25db0f6c | 1109 | setxid_mark_thread (cmdp, t); |
2edb61e3 UD |
1110 | } |
1111 | ||
1112 | /* Now the list with threads using user-allocated stacks. */ | |
1113 | list_for_each (runp, &__stack_user) | |
1114 | { | |
1115 | struct pthread *t = list_entry (runp, struct pthread, list); | |
dff9a7a1 UD |
1116 | if (t == self) |
1117 | continue; | |
2edb61e3 | 1118 | |
25db0f6c DJ |
1119 | setxid_mark_thread (cmdp, t); |
1120 | } | |
1121 | ||
1122 | /* Iterate until we don't succeed in signalling anyone. That means | |
1123 | we have gotten all running threads, and their children will be | |
1124 | automatically correct once started. */ | |
1125 | do | |
1126 | { | |
1127 | signalled = 0; | |
1128 | ||
1129 | list_for_each (runp, &stack_used) | |
1130 | { | |
1131 | struct pthread *t = list_entry (runp, struct pthread, list); | |
1132 | if (t == self) | |
1133 | continue; | |
1134 | ||
1135 | signalled += setxid_signal_thread (cmdp, t); | |
1136 | } | |
1137 | ||
1138 | list_for_each (runp, &__stack_user) | |
1139 | { | |
1140 | struct pthread *t = list_entry (runp, struct pthread, list); | |
1141 | if (t == self) | |
1142 | continue; | |
1143 | ||
1144 | signalled += setxid_signal_thread (cmdp, t); | |
1145 | } | |
1146 | ||
1147 | int cur = cmdp->cntr; | |
1148 | while (cur != 0) | |
1149 | { | |
a2f0363f TR |
1150 | futex_wait_simple ((unsigned int *) &cmdp->cntr, cur, |
1151 | FUTEX_PRIVATE); | |
25db0f6c DJ |
1152 | cur = cmdp->cntr; |
1153 | } | |
1154 | } | |
1155 | while (signalled != 0); | |
1156 | ||
1157 | /* Clean up flags, so that no thread blocks during exit waiting | |
1158 | for a signal which will never come. */ | |
1159 | list_for_each (runp, &stack_used) | |
1160 | { | |
1161 | struct pthread *t = list_entry (runp, struct pthread, list); | |
1162 | if (t == self) | |
1163 | continue; | |
1164 | ||
1165 | setxid_unmark_thread (cmdp, t); | |
2edb61e3 UD |
1166 | } |
1167 | ||
25db0f6c | 1168 | list_for_each (runp, &__stack_user) |
2edb61e3 | 1169 | { |
25db0f6c DJ |
1170 | struct pthread *t = list_entry (runp, struct pthread, list); |
1171 | if (t == self) | |
1172 | continue; | |
1173 | ||
1174 | setxid_unmark_thread (cmdp, t); | |
2edb61e3 UD |
1175 | } |
1176 | ||
ccd8de9a UD |
1177 | /* This must be last, otherwise the current thread might not have |
1178 | permissions to send SIGSETXID syscall to the other threads. */ | |
015a5d22 | 1179 | INTERNAL_SYSCALL_DECL (err); |
ccd8de9a UD |
1180 | result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, err, 3, |
1181 | cmdp->id[0], cmdp->id[1], cmdp->id[2]); | |
771eb141 FW |
1182 | int error = 0; |
1183 | if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err))) | |
ccd8de9a | 1184 | { |
771eb141 FW |
1185 | error = INTERNAL_SYSCALL_ERRNO (result, err); |
1186 | __set_errno (error); | |
ccd8de9a UD |
1187 | result = -1; |
1188 | } | |
771eb141 | 1189 | __nptl_setxid_error (cmdp, error); |
ccd8de9a | 1190 | |
e51deae7 | 1191 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
ccd8de9a | 1192 | return result; |
2edb61e3 | 1193 | } |
327ae257 RM |
1194 | #endif /* SIGSETXID. */ |
1195 | ||
2edb61e3 | 1196 | |
adc12574 UD |
1197 | static inline void __attribute__((always_inline)) |
1198 | init_one_static_tls (struct pthread *curp, struct link_map *map) | |
1199 | { | |
adc12574 UD |
1200 | # if TLS_TCB_AT_TP |
1201 | void *dest = (char *) curp - map->l_tls_offset; | |
1202 | # elif TLS_DTV_AT_TP | |
1203 | void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE; | |
1204 | # else | |
1205 | # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" | |
1206 | # endif | |
1207 | ||
17af5da9 | 1208 | /* Initialize the memory. */ |
73d61e4f AM |
1209 | memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size), |
1210 | '\0', map->l_tls_blocksize - map->l_tls_initimage_size); | |
adc12574 UD |
1211 | } |
1212 | ||
1213 | void | |
1214 | attribute_hidden | |
1215 | __pthread_init_static_tls (struct link_map *map) | |
1216 | { | |
e51deae7 | 1217 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
adc12574 UD |
1218 | |
1219 | /* Iterate over the list with system-allocated threads first. */ | |
1220 | list_t *runp; | |
1221 | list_for_each (runp, &stack_used) | |
1222 | init_one_static_tls (list_entry (runp, struct pthread, list), map); | |
1223 | ||
1224 | /* Now the list with threads using user-allocated stacks. */ | |
1225 | list_for_each (runp, &__stack_user) | |
f1205aa7 | 1226 | init_one_static_tls (list_entry (runp, struct pthread, list), map); |
adc12574 | 1227 | |
e51deae7 | 1228 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
adc12574 | 1229 | } |
df94b641 UD |
1230 | |
1231 | ||
1232 | void | |
1233 | attribute_hidden | |
1234 | __wait_lookup_done (void) | |
1235 | { | |
e51deae7 | 1236 | lll_lock (stack_cache_lock, LLL_PRIVATE); |
df94b641 UD |
1237 | |
1238 | struct pthread *self = THREAD_SELF; | |
1239 | ||
1240 | /* Iterate over the list with system-allocated threads first. */ | |
1241 | list_t *runp; | |
1242 | list_for_each (runp, &stack_used) | |
1243 | { | |
1244 | struct pthread *t = list_entry (runp, struct pthread, list); | |
991fa82b | 1245 | if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED) |
df94b641 UD |
1246 | continue; |
1247 | ||
991fa82b | 1248 | int *const gscope_flagp = &t->header.gscope_flag; |
df94b641 UD |
1249 | |
1250 | /* We have to wait until this thread is done with the global | |
1251 | scope. First tell the thread that we are waiting and | |
1252 | possibly have to be woken. */ | |
1253 | if (atomic_compare_and_exchange_bool_acq (gscope_flagp, | |
1254 | THREAD_GSCOPE_FLAG_WAIT, | |
1255 | THREAD_GSCOPE_FLAG_USED)) | |
1256 | continue; | |
1257 | ||
1258 | do | |
a2f0363f TR |
1259 | futex_wait_simple ((unsigned int *) gscope_flagp, |
1260 | THREAD_GSCOPE_FLAG_WAIT, FUTEX_PRIVATE); | |
df94b641 UD |
1261 | while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT); |
1262 | } | |
1263 | ||
1264 | /* Now the list with threads using user-allocated stacks. */ | |
1265 | list_for_each (runp, &__stack_user) | |
1266 | { | |
1267 | struct pthread *t = list_entry (runp, struct pthread, list); | |
991fa82b | 1268 | if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED) |
df94b641 UD |
1269 | continue; |
1270 | ||
991fa82b | 1271 | int *const gscope_flagp = &t->header.gscope_flag; |
df94b641 UD |
1272 | |
1273 | /* We have to wait until this thread is done with the global | |
1274 | scope. First tell the thread that we are waiting and | |
1275 | possibly have to be woken. */ | |
1276 | if (atomic_compare_and_exchange_bool_acq (gscope_flagp, | |
1277 | THREAD_GSCOPE_FLAG_WAIT, | |
1278 | THREAD_GSCOPE_FLAG_USED)) | |
1279 | continue; | |
1280 | ||
1281 | do | |
a2f0363f TR |
1282 | futex_wait_simple ((unsigned int *) gscope_flagp, |
1283 | THREAD_GSCOPE_FLAG_WAIT, FUTEX_PRIVATE); | |
df94b641 UD |
1284 | while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT); |
1285 | } | |
1286 | ||
e51deae7 | 1287 | lll_unlock (stack_cache_lock, LLL_PRIVATE); |
df94b641 | 1288 | } |