]>
Commit | Line | Data |
---|---|---|
bfff8b1b | 1 | /* Copyright (C) 2002-2017 Free Software Foundation, Inc. |
76a50749 UD |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. | |
4 | ||
5 | The GNU C Library is free software; you can redistribute it and/or | |
6 | modify it under the terms of the GNU Lesser General Public | |
7 | License as published by the Free Software Foundation; either | |
8 | version 2.1 of the License, or (at your option) any later version. | |
9 | ||
10 | The GNU C Library is distributed in the hope that it will be useful, | |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | Lesser General Public License for more details. | |
14 | ||
15 | You should have received a copy of the GNU Lesser General Public | |
59ba27a6 PE |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ | |
76a50749 UD |
18 | |
19 | #ifndef _DESCR_H | |
20 | #define _DESCR_H 1 | |
21 | ||
22 | #include <limits.h> | |
23 | #include <sched.h> | |
24 | #include <setjmp.h> | |
25 | #include <stdbool.h> | |
26 | #include <sys/types.h> | |
27 | #include <hp-timing.h> | |
500b3a49 | 28 | #include <list_t.h> |
76a50749 UD |
29 | #include <lowlevellock.h> |
30 | #include <pthreaddef.h> | |
afb2e954 | 31 | #include <dl-sysdep.h> |
76a50749 | 32 | #include "../nptl_db/thread_db.h" |
d4f64e1a | 33 | #include <tls.h> |
3e2ee6f0 | 34 | #include <unwind.h> |
500b3a49 | 35 | #include <bits/types/res_state.h> |
6df7ffad | 36 | #include <kernel-features.h> |
76a50749 UD |
37 | |
38 | #ifndef TCB_ALIGNMENT | |
39 | # define TCB_ALIGNMENT sizeof (double) | |
40 | #endif | |
41 | ||
42 | ||
43 | /* We keep thread specific data in a special data structure, a two-level | |
44 | array. The top-level array contains pointers to dynamically allocated | |
45 | arrays of a certain number of data pointers. So we can implement a | |
46 | sparse array. Each dynamic second-level array has | |
47 | PTHREAD_KEY_2NDLEVEL_SIZE | |
48 | entries. This value shouldn't be too large. */ | |
49 | #define PTHREAD_KEY_2NDLEVEL_SIZE 32 | |
50 | ||
51 | /* We need to address PTHREAD_KEYS_MAX key with PTHREAD_KEY_2NDLEVEL_SIZE | |
52 | keys in each subarray. */ | |
53 | #define PTHREAD_KEY_1STLEVEL_SIZE \ | |
54 | ((PTHREAD_KEYS_MAX + PTHREAD_KEY_2NDLEVEL_SIZE - 1) \ | |
55 | / PTHREAD_KEY_2NDLEVEL_SIZE) | |
56 | ||
57 | ||
6efd4814 UD |
58 | |
59 | ||
60 | /* Internal version of the buffer to store cancellation handler | |
61 | information. */ | |
62 | struct pthread_unwind_buf | |
63 | { | |
68107ec0 UD |
64 | struct |
65 | { | |
66 | __jmp_buf jmp_buf; | |
67 | int mask_was_saved; | |
68 | } cancel_jmp_buf[1]; | |
69 | ||
6efd4814 UD |
70 | union |
71 | { | |
72 | /* This is the placeholder of the public version. */ | |
68107ec0 | 73 | void *pad[4]; |
6efd4814 UD |
74 | |
75 | struct | |
76 | { | |
6efd4814 | 77 | /* Pointer to the previous cleanup buffer. */ |
49b65043 | 78 | struct pthread_unwind_buf *prev; |
6efd4814 UD |
79 | |
80 | /* Backward compatibility: state of the old-style cleanup | |
81 | handler at the time of the previous new-style cleanup handler | |
82 | installment. */ | |
83 | struct _pthread_cleanup_buffer *cleanup; | |
84 | ||
85 | /* Cancellation type before the push call. */ | |
86 | int canceltype; | |
87 | } data; | |
88 | } priv; | |
6efd4814 UD |
89 | }; |
90 | ||
91 | ||
2edb61e3 UD |
92 | /* Opcodes and data types for communication with the signal handler to |
93 | change user/group IDs. */ | |
94 | struct xid_command | |
95 | { | |
96 | int syscall_no; | |
b007ce7c | 97 | long int id[3]; |
2edb61e3 | 98 | volatile int cntr; |
771eb141 | 99 | volatile int error; /* -1: no call yet, 0: success seen, >0: error seen. */ |
2edb61e3 UD |
100 | }; |
101 | ||
102 | ||
0f6699ea UD |
103 | /* Data structure used by the kernel to find robust futexes. */ |
104 | struct robust_list_head | |
105 | { | |
106 | void *list; | |
107 | long int futex_offset; | |
108 | void *list_op_pending; | |
109 | }; | |
110 | ||
111 | ||
f17efcb4 UD |
112 | /* Data strcture used to handle thread priority protection. */ |
113 | struct priority_protection_data | |
114 | { | |
115 | int priomax; | |
116 | unsigned int priomap[]; | |
117 | }; | |
118 | ||
119 | ||
76a50749 UD |
120 | /* Thread descriptor data structure. */ |
121 | struct pthread | |
122 | { | |
76a50749 UD |
123 | union |
124 | { | |
55c11fbd RM |
125 | #if !TLS_DTV_AT_TP |
126 | /* This overlaps the TCB as used for TLS without threads (see tls.h). */ | |
127 | tcbhead_t header; | |
bbde8527 | 128 | #else |
468777e1 UD |
129 | struct |
130 | { | |
439bf404 SP |
131 | /* multiple_threads is enabled either when the process has spawned at |
132 | least one thread or when a single-threaded process cancels itself. | |
133 | This enables additional code to introduce locking before doing some | |
134 | compare_and_exchange operations and also enable cancellation points. | |
135 | The concepts of multiple threads and cancellation points ideally | |
136 | should be separate, since it is not necessary for multiple threads to | |
137 | have been created for cancellation points to be enabled, as is the | |
138 | case is when single-threaded process cancels itself. | |
139 | ||
140 | Since enabling multiple_threads enables additional code in | |
141 | cancellation points and compare_and_exchange operations, there is a | |
142 | potential for an unneeded performance hit when it is enabled in a | |
143 | single-threaded, self-canceling process. This is OK though, since a | |
144 | single-threaded process will enable async cancellation only when it | |
145 | looks to cancel itself and is hence going to end anyway. */ | |
468777e1 | 146 | int multiple_threads; |
991fa82b | 147 | int gscope_flag; |
6df7ffad UD |
148 | # ifndef __ASSUME_PRIVATE_FUTEX |
149 | int private_futex; | |
150 | # endif | |
468777e1 | 151 | } header; |
d4f64e1a RM |
152 | #endif |
153 | ||
55c11fbd RM |
154 | /* This extra padding has no special purpose, and this structure layout |
155 | is private and subject to change without affecting the official ABI. | |
156 | We just have it here in case it might be convenient for some | |
157 | implementation-specific instrumentation hack or suchlike. */ | |
9e9b8cb8 | 158 | void *__padding[24]; |
55c11fbd RM |
159 | }; |
160 | ||
d4f64e1a RM |
161 | /* This descriptor's link on the `stack_used' or `__stack_user' list. */ |
162 | list_t list; | |
76a50749 | 163 | |
bd8bb78b UD |
164 | /* Thread ID - which is also a 'is this thread descriptor (and |
165 | therefore stack) used' flag. */ | |
166 | pid_t tid; | |
167 | ||
c579f48e AZ |
168 | /* Ununsed. */ |
169 | pid_t pid_ununsed; | |
cb5b9388 | 170 | |
1bcfb5a5 | 171 | /* List of robust mutexes the thread is holding. */ |
1bcfb5a5 | 172 | #ifdef __PTHREAD_MUTEX_HAVE_PREV |
0f6699ea UD |
173 | void *robust_prev; |
174 | struct robust_list_head robust_head; | |
175 | ||
176 | /* The list above is strange. It is basically a double linked list | |
177 | but the pointer to the next/previous element of the list points | |
178 | in the middle of the object, the __next element. Whenever | |
179 | casting to __pthread_list_t we need to adjust the pointer | |
8f9450a0 TR |
180 | first. |
181 | These operations are effectively concurrent code in that the thread | |
182 | can get killed at any point in time and the kernel takes over. Thus, | |
183 | the __next elements are a kind of concurrent list and we need to | |
184 | enforce using compiler barriers that the individual operations happen | |
185 | in such a way that the kernel always sees a consistent list. The | |
186 | backward links (ie, the __prev elements) are not used by the kernel. | |
187 | FIXME We should use relaxed MO atomic operations here and signal fences | |
188 | because this kind of concurrency is similar to synchronizing with a | |
189 | signal handler. */ | |
0f6699ea | 190 | # define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next)) |
b007ce7c | 191 | |
df47504c | 192 | # define ENQUEUE_MUTEX_BOTH(mutex, val) \ |
1bcfb5a5 | 193 | do { \ |
df47504c UD |
194 | __pthread_list_t *next = (__pthread_list_t *) \ |
195 | ((((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_head.list)) & ~1ul) \ | |
196 | - QUEUE_PTR_ADJUST); \ | |
0f6699ea | 197 | next->__prev = (void *) &mutex->__data.__list.__next; \ |
df47504c UD |
198 | mutex->__data.__list.__next = THREAD_GETMEM (THREAD_SELF, \ |
199 | robust_head.list); \ | |
0f6699ea | 200 | mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head; \ |
8f9450a0 TR |
201 | /* Ensure that the new list entry is ready before we insert it. */ \ |
202 | __asm ("" ::: "memory"); \ | |
0f6699ea | 203 | THREAD_SETMEM (THREAD_SELF, robust_head.list, \ |
df47504c UD |
204 | (void *) (((uintptr_t) &mutex->__data.__list.__next) \ |
205 | | val)); \ | |
1bcfb5a5 UD |
206 | } while (0) |
207 | # define DEQUEUE_MUTEX(mutex) \ | |
208 | do { \ | |
0f6699ea | 209 | __pthread_list_t *next = (__pthread_list_t *) \ |
df47504c UD |
210 | ((char *) (((uintptr_t) mutex->__data.__list.__next) & ~1ul) \ |
211 | - QUEUE_PTR_ADJUST); \ | |
0f6699ea UD |
212 | next->__prev = mutex->__data.__list.__prev; \ |
213 | __pthread_list_t *prev = (__pthread_list_t *) \ | |
df47504c UD |
214 | ((char *) (((uintptr_t) mutex->__data.__list.__prev) & ~1ul) \ |
215 | - QUEUE_PTR_ADJUST); \ | |
0f6699ea | 216 | prev->__next = mutex->__data.__list.__next; \ |
8f9450a0 TR |
217 | /* Ensure that we remove the entry from the list before we change the \ |
218 | __next pointer of the entry, which is read by the kernel. */ \ | |
219 | __asm ("" ::: "memory"); \ | |
b007ce7c UD |
220 | mutex->__data.__list.__prev = NULL; \ |
221 | mutex->__data.__list.__next = NULL; \ | |
1bcfb5a5 UD |
222 | } while (0) |
223 | #else | |
0f6699ea UD |
224 | union |
225 | { | |
226 | __pthread_slist_t robust_list; | |
227 | struct robust_list_head robust_head; | |
228 | }; | |
b007ce7c | 229 | |
df47504c | 230 | # define ENQUEUE_MUTEX_BOTH(mutex, val) \ |
1bcfb5a5 | 231 | do { \ |
b007ce7c UD |
232 | mutex->__data.__list.__next \ |
233 | = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \ | |
8f9450a0 TR |
234 | /* Ensure that the new list entry is ready before we insert it. */ \ |
235 | __asm ("" ::: "memory"); \ | |
df47504c | 236 | THREAD_SETMEM (THREAD_SELF, robust_list.__next, \ |
457b559e | 237 | (void *) (((uintptr_t) &mutex->__data.__list) | val)); \ |
1bcfb5a5 UD |
238 | } while (0) |
239 | # define DEQUEUE_MUTEX(mutex) \ | |
240 | do { \ | |
df47504c UD |
241 | __pthread_slist_t *runp = (__pthread_slist_t *) \ |
242 | (((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_list.__next)) & ~1ul); \ | |
b007ce7c | 243 | if (runp == &mutex->__data.__list) \ |
683040c3 | 244 | THREAD_SETMEM (THREAD_SELF, robust_list.__next, runp->__next); \ |
1bcfb5a5 UD |
245 | else \ |
246 | { \ | |
df47504c UD |
247 | __pthread_slist_t *next = (__pthread_slist_t *) \ |
248 | (((uintptr_t) runp->__next) & ~1ul); \ | |
249 | while (next != &mutex->__data.__list) \ | |
250 | { \ | |
251 | runp = next; \ | |
252 | next = (__pthread_slist_t *) (((uintptr_t) runp->__next) & ~1ul); \ | |
253 | } \ | |
1bcfb5a5 | 254 | \ |
df47504c | 255 | runp->__next = next->__next; \ |
8f9450a0 TR |
256 | /* Ensure that we remove the entry from the list before we change the \ |
257 | __next pointer of the entry, which is read by the kernel. */ \ | |
258 | __asm ("" ::: "memory"); \ | |
b007ce7c | 259 | mutex->__data.__list.__next = NULL; \ |
1bcfb5a5 UD |
260 | } \ |
261 | } while (0) | |
262 | #endif | |
df47504c UD |
263 | #define ENQUEUE_MUTEX(mutex) ENQUEUE_MUTEX_BOTH (mutex, 0) |
264 | #define ENQUEUE_MUTEX_PI(mutex) ENQUEUE_MUTEX_BOTH (mutex, 1) | |
1bcfb5a5 | 265 | |
86bfff4d UD |
266 | /* List of cleanup buffers. */ |
267 | struct _pthread_cleanup_buffer *cleanup; | |
268 | ||
6efd4814 | 269 | /* Unwind information. */ |
49b65043 | 270 | struct pthread_unwind_buf *cleanup_jmp_buf; |
6efd4814 UD |
271 | #define HAVE_CLEANUP_JMP_BUF |
272 | ||
c70ad7d7 UD |
273 | /* Flags determining processing of cancellation. */ |
274 | int cancelhandling; | |
275 | /* Bit set if cancellation is disabled. */ | |
276 | #define CANCELSTATE_BIT 0 | |
bd03a1af | 277 | #define CANCELSTATE_BITMASK (0x01 << CANCELSTATE_BIT) |
c70ad7d7 UD |
278 | /* Bit set if asynchronous cancellation mode is selected. */ |
279 | #define CANCELTYPE_BIT 1 | |
bd03a1af | 280 | #define CANCELTYPE_BITMASK (0x01 << CANCELTYPE_BIT) |
c70ad7d7 UD |
281 | /* Bit set if canceling has been initiated. */ |
282 | #define CANCELING_BIT 2 | |
bd03a1af | 283 | #define CANCELING_BITMASK (0x01 << CANCELING_BIT) |
c70ad7d7 UD |
284 | /* Bit set if canceled. */ |
285 | #define CANCELED_BIT 3 | |
bd03a1af | 286 | #define CANCELED_BITMASK (0x01 << CANCELED_BIT) |
c70ad7d7 UD |
287 | /* Bit set if thread is exiting. */ |
288 | #define EXITING_BIT 4 | |
bd03a1af | 289 | #define EXITING_BITMASK (0x01 << EXITING_BIT) |
c70ad7d7 UD |
290 | /* Bit set if thread terminated and TCB is freed. */ |
291 | #define TERMINATED_BIT 5 | |
bd03a1af | 292 | #define TERMINATED_BITMASK (0x01 << TERMINATED_BIT) |
dff9a7a1 UD |
293 | /* Bit set if thread is supposed to change XID. */ |
294 | #define SETXID_BIT 6 | |
bd03a1af | 295 | #define SETXID_BITMASK (0x01 << SETXID_BIT) |
c70ad7d7 | 296 | /* Mask for the rest. Helps the compiler to optimize. */ |
dff9a7a1 | 297 | #define CANCEL_RESTMASK 0xffffff80 |
c70ad7d7 UD |
298 | |
299 | #define CANCEL_ENABLED_AND_CANCELED(value) \ | |
300 | (((value) & (CANCELSTATE_BITMASK | CANCELED_BITMASK | EXITING_BITMASK \ | |
301 | | CANCEL_RESTMASK | TERMINATED_BITMASK)) == CANCELED_BITMASK) | |
302 | #define CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS(value) \ | |
303 | (((value) & (CANCELSTATE_BITMASK | CANCELTYPE_BITMASK | CANCELED_BITMASK \ | |
304 | | EXITING_BITMASK | CANCEL_RESTMASK | TERMINATED_BITMASK)) \ | |
305 | == (CANCELTYPE_BITMASK | CANCELED_BITMASK)) | |
5869d4ec | 306 | |
341c566f UD |
307 | /* Flags. Including those copied from the thread attribute. */ |
308 | int flags; | |
309 | ||
5869d4ec UD |
310 | /* We allocate one block of references here. This should be enough |
311 | to avoid allocating any memory dynamically for most applications. */ | |
76a50749 UD |
312 | struct pthread_key_data |
313 | { | |
314 | /* Sequence number. We use uintptr_t to not require padding on | |
315 | 32- and 64-bit machines. On 64-bit machines it helps to avoid | |
316 | wrapping, too. */ | |
317 | uintptr_t seq; | |
318 | ||
319 | /* Data pointer. */ | |
320 | void *data; | |
5869d4ec UD |
321 | } specific_1stblock[PTHREAD_KEY_2NDLEVEL_SIZE]; |
322 | ||
a334319f UD |
323 | /* Two-level array for the thread-specific data. */ |
324 | struct pthread_key_data *specific[PTHREAD_KEY_1STLEVEL_SIZE]; | |
325 | ||
dff9a7a1 UD |
326 | /* Flag which is set when specific data is set. */ |
327 | bool specific_used; | |
328 | ||
c70ad7d7 UD |
329 | /* True if events must be reported. */ |
330 | bool report_events; | |
331 | ||
76a50749 UD |
332 | /* True if the user provided the stack. */ |
333 | bool user_stack; | |
334 | ||
5f66b766 UD |
335 | /* True if thread must stop at startup time. */ |
336 | bool stopped_start; | |
337 | ||
b051fc44 UD |
338 | /* The parent's cancel handling at the time of the pthread_create |
339 | call. This might be needed to undo the effects of a cancellation. */ | |
340 | int parent_cancelhandling; | |
341 | ||
5869d4ec | 342 | /* Lock to synchronize access to the descriptor. */ |
e51deae7 | 343 | int lock; |
76a50749 | 344 | |
dff9a7a1 | 345 | /* Lock for synchronizing setxid calls. */ |
45a8f0e6 | 346 | unsigned int setxid_futex; |
dff9a7a1 | 347 | |
76a50749 UD |
348 | #if HP_TIMING_AVAIL |
349 | /* Offset of the CPU clock at start thread start time. */ | |
350 | hp_timing_t cpuclock_offset; | |
351 | #endif | |
352 | ||
353 | /* If the thread waits to join another one the ID of the latter is | |
354 | stored here. | |
355 | ||
356 | In case a thread is detached this field contains a pointer of the | |
357 | TCB if the thread itself. This is something which cannot happen | |
358 | in normal operation. */ | |
359 | struct pthread *joinid; | |
360 | /* Check whether a thread is detached. */ | |
361 | #define IS_DETACHED(pd) ((pd)->joinid == (pd)) | |
362 | ||
76a50749 UD |
363 | /* The result of the thread function. */ |
364 | void *result; | |
365 | ||
366 | /* Scheduling parameters for the new thread. */ | |
367 | struct sched_param schedparam; | |
368 | int schedpolicy; | |
369 | ||
370 | /* Start position of the code to be executed and the argument passed | |
371 | to the function. */ | |
372 | void *(*start_routine) (void *); | |
373 | void *arg; | |
374 | ||
375 | /* Debug state. */ | |
376 | td_eventbuf_t eventbuf; | |
377 | /* Next descriptor with a pending event. */ | |
378 | struct pthread *nextevent; | |
379 | ||
68107ec0 UD |
380 | /* Machine-specific unwind info. */ |
381 | struct _Unwind_Exception exc; | |
68107ec0 | 382 | |
76a50749 UD |
383 | /* If nonzero pointer to area allocated for the stack and its |
384 | size. */ | |
385 | void *stackblock; | |
386 | size_t stackblock_size; | |
387 | /* Size of the included guard area. */ | |
388 | size_t guardsize; | |
5adac0e4 UD |
389 | /* This is what the user specified and what we will report. */ |
390 | size_t reported_guardsize; | |
0e9d6240 | 391 | |
f17efcb4 UD |
392 | /* Thread Priority Protection data. */ |
393 | struct priority_protection_data *tpp; | |
394 | ||
0e9d6240 UD |
395 | /* Resolver state. */ |
396 | struct __res_state res; | |
253eb3a0 | 397 | |
c5132ca1 RM |
398 | /* This member must be last. */ |
399 | char end_padding[]; | |
253eb3a0 | 400 | |
c5132ca1 RM |
401 | #define PTHREAD_STRUCT_END_PADDING \ |
402 | (sizeof (struct pthread) - offsetof (struct pthread, end_padding)) | |
76a50749 UD |
403 | } __attribute ((aligned (TCB_ALIGNMENT))); |
404 | ||
405 | ||
406 | #endif /* descr.h */ |