]>
Commit | Line | Data |
---|---|---|
d614a753 | 1 | /* Copyright (C) 2002-2020 Free Software Foundation, Inc. |
76a50749 UD |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. | |
4 | ||
5 | The GNU C Library is free software; you can redistribute it and/or | |
6 | modify it under the terms of the GNU Lesser General Public | |
7 | License as published by the Free Software Foundation; either | |
8 | version 2.1 of the License, or (at your option) any later version. | |
9 | ||
10 | The GNU C Library is distributed in the hope that it will be useful, | |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | Lesser General Public License for more details. | |
14 | ||
15 | You should have received a copy of the GNU Lesser General Public | |
59ba27a6 | 16 | License along with the GNU C Library; if not, see |
5a82c748 | 17 | <https://www.gnu.org/licenses/>. */ |
76a50749 UD |
18 | |
19 | #ifndef _DESCR_H | |
20 | #define _DESCR_H 1 | |
21 | ||
22 | #include <limits.h> | |
23 | #include <sched.h> | |
24 | #include <setjmp.h> | |
25 | #include <stdbool.h> | |
26 | #include <sys/types.h> | |
27 | #include <hp-timing.h> | |
500b3a49 | 28 | #include <list_t.h> |
76a50749 UD |
29 | #include <lowlevellock.h> |
30 | #include <pthreaddef.h> | |
afb2e954 | 31 | #include <dl-sysdep.h> |
a198d37a | 32 | #include <thread_db.h> |
d4f64e1a | 33 | #include <tls.h> |
3e2ee6f0 | 34 | #include <unwind.h> |
500b3a49 | 35 | #include <bits/types/res_state.h> |
6df7ffad | 36 | #include <kernel-features.h> |
76a50749 UD |
37 | |
38 | #ifndef TCB_ALIGNMENT | |
39 | # define TCB_ALIGNMENT sizeof (double) | |
40 | #endif | |
41 | ||
42 | ||
43 | /* We keep thread specific data in a special data structure, a two-level | |
44 | array. The top-level array contains pointers to dynamically allocated | |
45 | arrays of a certain number of data pointers. So we can implement a | |
46 | sparse array. Each dynamic second-level array has | |
47 | PTHREAD_KEY_2NDLEVEL_SIZE | |
48 | entries. This value shouldn't be too large. */ | |
49 | #define PTHREAD_KEY_2NDLEVEL_SIZE 32 | |
50 | ||
51 | /* We need to address PTHREAD_KEYS_MAX key with PTHREAD_KEY_2NDLEVEL_SIZE | |
52 | keys in each subarray. */ | |
53 | #define PTHREAD_KEY_1STLEVEL_SIZE \ | |
54 | ((PTHREAD_KEYS_MAX + PTHREAD_KEY_2NDLEVEL_SIZE - 1) \ | |
55 | / PTHREAD_KEY_2NDLEVEL_SIZE) | |
56 | ||
57 | ||
6efd4814 UD |
58 | |
59 | ||
60 | /* Internal version of the buffer to store cancellation handler | |
61 | information. */ | |
62 | struct pthread_unwind_buf | |
63 | { | |
68107ec0 UD |
64 | struct |
65 | { | |
66 | __jmp_buf jmp_buf; | |
67 | int mask_was_saved; | |
68 | } cancel_jmp_buf[1]; | |
69 | ||
6efd4814 UD |
70 | union |
71 | { | |
72 | /* This is the placeholder of the public version. */ | |
68107ec0 | 73 | void *pad[4]; |
6efd4814 UD |
74 | |
75 | struct | |
76 | { | |
6efd4814 | 77 | /* Pointer to the previous cleanup buffer. */ |
49b65043 | 78 | struct pthread_unwind_buf *prev; |
6efd4814 UD |
79 | |
80 | /* Backward compatibility: state of the old-style cleanup | |
81 | handler at the time of the previous new-style cleanup handler | |
82 | installment. */ | |
83 | struct _pthread_cleanup_buffer *cleanup; | |
84 | ||
85 | /* Cancellation type before the push call. */ | |
86 | int canceltype; | |
87 | } data; | |
88 | } priv; | |
6efd4814 UD |
89 | }; |
90 | ||
91 | ||
2edb61e3 UD |
92 | /* Opcodes and data types for communication with the signal handler to |
93 | change user/group IDs. */ | |
94 | struct xid_command | |
95 | { | |
96 | int syscall_no; | |
b007ce7c | 97 | long int id[3]; |
2edb61e3 | 98 | volatile int cntr; |
771eb141 | 99 | volatile int error; /* -1: no call yet, 0: success seen, >0: error seen. */ |
2edb61e3 UD |
100 | }; |
101 | ||
102 | ||
0f6699ea UD |
103 | /* Data structure used by the kernel to find robust futexes. */ |
104 | struct robust_list_head | |
105 | { | |
106 | void *list; | |
107 | long int futex_offset; | |
108 | void *list_op_pending; | |
109 | }; | |
110 | ||
111 | ||
f17efcb4 UD |
112 | /* Data strcture used to handle thread priority protection. */ |
113 | struct priority_protection_data | |
114 | { | |
115 | int priomax; | |
116 | unsigned int priomap[]; | |
117 | }; | |
118 | ||
119 | ||
76a50749 UD |
120 | /* Thread descriptor data structure. */ |
121 | struct pthread | |
122 | { | |
76a50749 UD |
123 | union |
124 | { | |
55c11fbd RM |
125 | #if !TLS_DTV_AT_TP |
126 | /* This overlaps the TCB as used for TLS without threads (see tls.h). */ | |
127 | tcbhead_t header; | |
bbde8527 | 128 | #else |
468777e1 UD |
129 | struct |
130 | { | |
439bf404 SP |
131 | /* multiple_threads is enabled either when the process has spawned at |
132 | least one thread or when a single-threaded process cancels itself. | |
133 | This enables additional code to introduce locking before doing some | |
134 | compare_and_exchange operations and also enable cancellation points. | |
135 | The concepts of multiple threads and cancellation points ideally | |
136 | should be separate, since it is not necessary for multiple threads to | |
137 | have been created for cancellation points to be enabled, as is the | |
138 | case is when single-threaded process cancels itself. | |
139 | ||
140 | Since enabling multiple_threads enables additional code in | |
141 | cancellation points and compare_and_exchange operations, there is a | |
142 | potential for an unneeded performance hit when it is enabled in a | |
143 | single-threaded, self-canceling process. This is OK though, since a | |
144 | single-threaded process will enable async cancellation only when it | |
145 | looks to cancel itself and is hence going to end anyway. */ | |
468777e1 | 146 | int multiple_threads; |
991fa82b | 147 | int gscope_flag; |
468777e1 | 148 | } header; |
d4f64e1a RM |
149 | #endif |
150 | ||
55c11fbd RM |
151 | /* This extra padding has no special purpose, and this structure layout |
152 | is private and subject to change without affecting the official ABI. | |
153 | We just have it here in case it might be convenient for some | |
154 | implementation-specific instrumentation hack or suchlike. */ | |
9e9b8cb8 | 155 | void *__padding[24]; |
55c11fbd RM |
156 | }; |
157 | ||
d4f64e1a RM |
158 | /* This descriptor's link on the `stack_used' or `__stack_user' list. */ |
159 | list_t list; | |
76a50749 | 160 | |
bd8bb78b UD |
161 | /* Thread ID - which is also a 'is this thread descriptor (and |
162 | therefore stack) used' flag. */ | |
163 | pid_t tid; | |
164 | ||
c579f48e AZ |
165 | /* Ununsed. */ |
166 | pid_t pid_ununsed; | |
cb5b9388 | 167 | |
1bcfb5a5 | 168 | /* List of robust mutexes the thread is holding. */ |
06be6368 | 169 | #if __PTHREAD_MUTEX_HAVE_PREV |
0f6699ea UD |
170 | void *robust_prev; |
171 | struct robust_list_head robust_head; | |
172 | ||
173 | /* The list above is strange. It is basically a double linked list | |
174 | but the pointer to the next/previous element of the list points | |
175 | in the middle of the object, the __next element. Whenever | |
176 | casting to __pthread_list_t we need to adjust the pointer | |
8f9450a0 TR |
177 | first. |
178 | These operations are effectively concurrent code in that the thread | |
179 | can get killed at any point in time and the kernel takes over. Thus, | |
180 | the __next elements are a kind of concurrent list and we need to | |
181 | enforce using compiler barriers that the individual operations happen | |
182 | in such a way that the kernel always sees a consistent list. The | |
183 | backward links (ie, the __prev elements) are not used by the kernel. | |
184 | FIXME We should use relaxed MO atomic operations here and signal fences | |
185 | because this kind of concurrency is similar to synchronizing with a | |
186 | signal handler. */ | |
0f6699ea | 187 | # define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next)) |
b007ce7c | 188 | |
df47504c | 189 | # define ENQUEUE_MUTEX_BOTH(mutex, val) \ |
1bcfb5a5 | 190 | do { \ |
df47504c UD |
191 | __pthread_list_t *next = (__pthread_list_t *) \ |
192 | ((((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_head.list)) & ~1ul) \ | |
193 | - QUEUE_PTR_ADJUST); \ | |
0f6699ea | 194 | next->__prev = (void *) &mutex->__data.__list.__next; \ |
df47504c UD |
195 | mutex->__data.__list.__next = THREAD_GETMEM (THREAD_SELF, \ |
196 | robust_head.list); \ | |
0f6699ea | 197 | mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head; \ |
8f9450a0 TR |
198 | /* Ensure that the new list entry is ready before we insert it. */ \ |
199 | __asm ("" ::: "memory"); \ | |
0f6699ea | 200 | THREAD_SETMEM (THREAD_SELF, robust_head.list, \ |
df47504c UD |
201 | (void *) (((uintptr_t) &mutex->__data.__list.__next) \ |
202 | | val)); \ | |
1bcfb5a5 UD |
203 | } while (0) |
204 | # define DEQUEUE_MUTEX(mutex) \ | |
205 | do { \ | |
0f6699ea | 206 | __pthread_list_t *next = (__pthread_list_t *) \ |
df47504c UD |
207 | ((char *) (((uintptr_t) mutex->__data.__list.__next) & ~1ul) \ |
208 | - QUEUE_PTR_ADJUST); \ | |
0f6699ea UD |
209 | next->__prev = mutex->__data.__list.__prev; \ |
210 | __pthread_list_t *prev = (__pthread_list_t *) \ | |
df47504c UD |
211 | ((char *) (((uintptr_t) mutex->__data.__list.__prev) & ~1ul) \ |
212 | - QUEUE_PTR_ADJUST); \ | |
0f6699ea | 213 | prev->__next = mutex->__data.__list.__next; \ |
8f9450a0 TR |
214 | /* Ensure that we remove the entry from the list before we change the \ |
215 | __next pointer of the entry, which is read by the kernel. */ \ | |
216 | __asm ("" ::: "memory"); \ | |
b007ce7c UD |
217 | mutex->__data.__list.__prev = NULL; \ |
218 | mutex->__data.__list.__next = NULL; \ | |
1bcfb5a5 UD |
219 | } while (0) |
220 | #else | |
0f6699ea UD |
221 | union |
222 | { | |
223 | __pthread_slist_t robust_list; | |
224 | struct robust_list_head robust_head; | |
225 | }; | |
b007ce7c | 226 | |
df47504c | 227 | # define ENQUEUE_MUTEX_BOTH(mutex, val) \ |
1bcfb5a5 | 228 | do { \ |
b007ce7c UD |
229 | mutex->__data.__list.__next \ |
230 | = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \ | |
8f9450a0 TR |
231 | /* Ensure that the new list entry is ready before we insert it. */ \ |
232 | __asm ("" ::: "memory"); \ | |
df47504c | 233 | THREAD_SETMEM (THREAD_SELF, robust_list.__next, \ |
457b559e | 234 | (void *) (((uintptr_t) &mutex->__data.__list) | val)); \ |
1bcfb5a5 UD |
235 | } while (0) |
236 | # define DEQUEUE_MUTEX(mutex) \ | |
237 | do { \ | |
df47504c UD |
238 | __pthread_slist_t *runp = (__pthread_slist_t *) \ |
239 | (((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_list.__next)) & ~1ul); \ | |
b007ce7c | 240 | if (runp == &mutex->__data.__list) \ |
683040c3 | 241 | THREAD_SETMEM (THREAD_SELF, robust_list.__next, runp->__next); \ |
1bcfb5a5 UD |
242 | else \ |
243 | { \ | |
df47504c UD |
244 | __pthread_slist_t *next = (__pthread_slist_t *) \ |
245 | (((uintptr_t) runp->__next) & ~1ul); \ | |
246 | while (next != &mutex->__data.__list) \ | |
247 | { \ | |
248 | runp = next; \ | |
249 | next = (__pthread_slist_t *) (((uintptr_t) runp->__next) & ~1ul); \ | |
250 | } \ | |
1bcfb5a5 | 251 | \ |
df47504c | 252 | runp->__next = next->__next; \ |
8f9450a0 TR |
253 | /* Ensure that we remove the entry from the list before we change the \ |
254 | __next pointer of the entry, which is read by the kernel. */ \ | |
255 | __asm ("" ::: "memory"); \ | |
b007ce7c | 256 | mutex->__data.__list.__next = NULL; \ |
1bcfb5a5 UD |
257 | } \ |
258 | } while (0) | |
259 | #endif | |
df47504c UD |
260 | #define ENQUEUE_MUTEX(mutex) ENQUEUE_MUTEX_BOTH (mutex, 0) |
261 | #define ENQUEUE_MUTEX_PI(mutex) ENQUEUE_MUTEX_BOTH (mutex, 1) | |
1bcfb5a5 | 262 | |
86bfff4d UD |
263 | /* List of cleanup buffers. */ |
264 | struct _pthread_cleanup_buffer *cleanup; | |
265 | ||
6efd4814 | 266 | /* Unwind information. */ |
49b65043 | 267 | struct pthread_unwind_buf *cleanup_jmp_buf; |
6efd4814 UD |
268 | #define HAVE_CLEANUP_JMP_BUF |
269 | ||
c70ad7d7 UD |
270 | /* Flags determining processing of cancellation. */ |
271 | int cancelhandling; | |
272 | /* Bit set if cancellation is disabled. */ | |
273 | #define CANCELSTATE_BIT 0 | |
bd03a1af | 274 | #define CANCELSTATE_BITMASK (0x01 << CANCELSTATE_BIT) |
c70ad7d7 UD |
275 | /* Bit set if asynchronous cancellation mode is selected. */ |
276 | #define CANCELTYPE_BIT 1 | |
bd03a1af | 277 | #define CANCELTYPE_BITMASK (0x01 << CANCELTYPE_BIT) |
c70ad7d7 UD |
278 | /* Bit set if canceling has been initiated. */ |
279 | #define CANCELING_BIT 2 | |
bd03a1af | 280 | #define CANCELING_BITMASK (0x01 << CANCELING_BIT) |
c70ad7d7 UD |
281 | /* Bit set if canceled. */ |
282 | #define CANCELED_BIT 3 | |
bd03a1af | 283 | #define CANCELED_BITMASK (0x01 << CANCELED_BIT) |
c70ad7d7 UD |
284 | /* Bit set if thread is exiting. */ |
285 | #define EXITING_BIT 4 | |
bd03a1af | 286 | #define EXITING_BITMASK (0x01 << EXITING_BIT) |
c70ad7d7 UD |
287 | /* Bit set if thread terminated and TCB is freed. */ |
288 | #define TERMINATED_BIT 5 | |
bd03a1af | 289 | #define TERMINATED_BITMASK (0x01 << TERMINATED_BIT) |
dff9a7a1 UD |
290 | /* Bit set if thread is supposed to change XID. */ |
291 | #define SETXID_BIT 6 | |
bd03a1af | 292 | #define SETXID_BITMASK (0x01 << SETXID_BIT) |
c70ad7d7 | 293 | /* Mask for the rest. Helps the compiler to optimize. */ |
dff9a7a1 | 294 | #define CANCEL_RESTMASK 0xffffff80 |
c70ad7d7 UD |
295 | |
296 | #define CANCEL_ENABLED_AND_CANCELED(value) \ | |
297 | (((value) & (CANCELSTATE_BITMASK | CANCELED_BITMASK | EXITING_BITMASK \ | |
298 | | CANCEL_RESTMASK | TERMINATED_BITMASK)) == CANCELED_BITMASK) | |
299 | #define CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS(value) \ | |
300 | (((value) & (CANCELSTATE_BITMASK | CANCELTYPE_BITMASK | CANCELED_BITMASK \ | |
301 | | EXITING_BITMASK | CANCEL_RESTMASK | TERMINATED_BITMASK)) \ | |
302 | == (CANCELTYPE_BITMASK | CANCELED_BITMASK)) | |
5869d4ec | 303 | |
341c566f UD |
304 | /* Flags. Including those copied from the thread attribute. */ |
305 | int flags; | |
306 | ||
5869d4ec UD |
307 | /* We allocate one block of references here. This should be enough |
308 | to avoid allocating any memory dynamically for most applications. */ | |
76a50749 UD |
309 | struct pthread_key_data |
310 | { | |
311 | /* Sequence number. We use uintptr_t to not require padding on | |
312 | 32- and 64-bit machines. On 64-bit machines it helps to avoid | |
313 | wrapping, too. */ | |
314 | uintptr_t seq; | |
315 | ||
316 | /* Data pointer. */ | |
317 | void *data; | |
5869d4ec UD |
318 | } specific_1stblock[PTHREAD_KEY_2NDLEVEL_SIZE]; |
319 | ||
a334319f UD |
320 | /* Two-level array for the thread-specific data. */ |
321 | struct pthread_key_data *specific[PTHREAD_KEY_1STLEVEL_SIZE]; | |
322 | ||
dff9a7a1 UD |
323 | /* Flag which is set when specific data is set. */ |
324 | bool specific_used; | |
325 | ||
c70ad7d7 UD |
326 | /* True if events must be reported. */ |
327 | bool report_events; | |
328 | ||
76a50749 UD |
329 | /* True if the user provided the stack. */ |
330 | bool user_stack; | |
331 | ||
5f66b766 UD |
332 | /* True if thread must stop at startup time. */ |
333 | bool stopped_start; | |
334 | ||
b051fc44 UD |
335 | /* The parent's cancel handling at the time of the pthread_create |
336 | call. This might be needed to undo the effects of a cancellation. */ | |
337 | int parent_cancelhandling; | |
338 | ||
5869d4ec | 339 | /* Lock to synchronize access to the descriptor. */ |
e51deae7 | 340 | int lock; |
76a50749 | 341 | |
dff9a7a1 | 342 | /* Lock for synchronizing setxid calls. */ |
45a8f0e6 | 343 | unsigned int setxid_futex; |
dff9a7a1 | 344 | |
1e372ded | 345 | #if HP_TIMING_INLINE |
38cc11da | 346 | hp_timing_t cpuclock_offset_ununsed; |
76a50749 UD |
347 | #endif |
348 | ||
349 | /* If the thread waits to join another one the ID of the latter is | |
350 | stored here. | |
351 | ||
352 | In case a thread is detached this field contains a pointer of the | |
353 | TCB if the thread itself. This is something which cannot happen | |
354 | in normal operation. */ | |
355 | struct pthread *joinid; | |
356 | /* Check whether a thread is detached. */ | |
357 | #define IS_DETACHED(pd) ((pd)->joinid == (pd)) | |
358 | ||
76a50749 UD |
359 | /* The result of the thread function. */ |
360 | void *result; | |
361 | ||
362 | /* Scheduling parameters for the new thread. */ | |
363 | struct sched_param schedparam; | |
364 | int schedpolicy; | |
365 | ||
366 | /* Start position of the code to be executed and the argument passed | |
367 | to the function. */ | |
368 | void *(*start_routine) (void *); | |
369 | void *arg; | |
370 | ||
371 | /* Debug state. */ | |
372 | td_eventbuf_t eventbuf; | |
373 | /* Next descriptor with a pending event. */ | |
374 | struct pthread *nextevent; | |
375 | ||
68107ec0 UD |
376 | /* Machine-specific unwind info. */ |
377 | struct _Unwind_Exception exc; | |
68107ec0 | 378 | |
630f4cc3 | 379 | /* If nonzero, pointer to the area allocated for the stack and guard. */ |
76a50749 | 380 | void *stackblock; |
630f4cc3 | 381 | /* Size of the stackblock area including the guard. */ |
76a50749 UD |
382 | size_t stackblock_size; |
383 | /* Size of the included guard area. */ | |
384 | size_t guardsize; | |
5adac0e4 UD |
385 | /* This is what the user specified and what we will report. */ |
386 | size_t reported_guardsize; | |
0e9d6240 | 387 | |
f17efcb4 UD |
388 | /* Thread Priority Protection data. */ |
389 | struct priority_protection_data *tpp; | |
390 | ||
0e9d6240 UD |
391 | /* Resolver state. */ |
392 | struct __res_state res; | |
253eb3a0 | 393 | |
ce7528f6 AZ |
394 | /* Indicates whether is a C11 thread created by thrd_creat. */ |
395 | bool c11; | |
396 | ||
c5132ca1 RM |
397 | /* This member must be last. */ |
398 | char end_padding[]; | |
253eb3a0 | 399 | |
c5132ca1 RM |
400 | #define PTHREAD_STRUCT_END_PADDING \ |
401 | (sizeof (struct pthread) - offsetof (struct pthread, end_padding)) | |
76a50749 UD |
402 | } __attribute ((aligned (TCB_ALIGNMENT))); |
403 | ||
404 | ||
405 | #endif /* descr.h */ |