]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/descr.h
INSTALL, install.texi: minor updates, regenerate
[thirdparty/glibc.git] / nptl / descr.h
CommitLineData
dff8da6b 1/* Copyright (C) 2002-2024 Free Software Foundation, Inc.
76a50749 2 This file is part of the GNU C Library.
76a50749
UD
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
59ba27a6 15 License along with the GNU C Library; if not, see
5a82c748 16 <https://www.gnu.org/licenses/>. */
76a50749
UD
17
18#ifndef _DESCR_H
19#define _DESCR_H 1
20
21#include <limits.h>
22#include <sched.h>
23#include <setjmp.h>
24#include <stdbool.h>
25#include <sys/types.h>
26#include <hp-timing.h>
500b3a49 27#include <list_t.h>
76a50749
UD
28#include <lowlevellock.h>
29#include <pthreaddef.h>
afb2e954 30#include <dl-sysdep.h>
a198d37a 31#include <thread_db.h>
d4f64e1a 32#include <tls.h>
3e2ee6f0 33#include <unwind.h>
500b3a49 34#include <bits/types/res_state.h>
6df7ffad 35#include <kernel-features.h>
9deec7c8 36#include <tls-internal-struct.h>
a1bdd816 37#include <internal-sigset.h>
76a50749
UD
38
39#ifndef TCB_ALIGNMENT
23c77f60
FW
40# define TCB_ALIGNMENT 32
41#elif TCB_ALIGNMENT < 32
42# error TCB_ALIGNMENT must be at least 32
76a50749
UD
43#endif
44
45
46/* We keep thread specific data in a special data structure, a two-level
47 array. The top-level array contains pointers to dynamically allocated
48 arrays of a certain number of data pointers. So we can implement a
49 sparse array. Each dynamic second-level array has
50 PTHREAD_KEY_2NDLEVEL_SIZE
51 entries. This value shouldn't be too large. */
52#define PTHREAD_KEY_2NDLEVEL_SIZE 32
53
54/* We need to address PTHREAD_KEYS_MAX key with PTHREAD_KEY_2NDLEVEL_SIZE
55 keys in each subarray. */
56#define PTHREAD_KEY_1STLEVEL_SIZE \
57 ((PTHREAD_KEYS_MAX + PTHREAD_KEY_2NDLEVEL_SIZE - 1) \
58 / PTHREAD_KEY_2NDLEVEL_SIZE)
59
60
6efd4814
UD
61
62
63/* Internal version of the buffer to store cancellation handler
64 information. */
65struct pthread_unwind_buf
66{
68107ec0
UD
67 struct
68 {
69 __jmp_buf jmp_buf;
70 int mask_was_saved;
71 } cancel_jmp_buf[1];
72
6efd4814
UD
73 union
74 {
75 /* This is the placeholder of the public version. */
68107ec0 76 void *pad[4];
6efd4814
UD
77
78 struct
79 {
6efd4814 80 /* Pointer to the previous cleanup buffer. */
49b65043 81 struct pthread_unwind_buf *prev;
6efd4814
UD
82
83 /* Backward compatibility: state of the old-style cleanup
84 handler at the time of the previous new-style cleanup handler
85 installment. */
86 struct _pthread_cleanup_buffer *cleanup;
87
88 /* Cancellation type before the push call. */
89 int canceltype;
90 } data;
91 } priv;
6efd4814
UD
92};
93
94
2edb61e3
UD
95/* Opcodes and data types for communication with the signal handler to
96 change user/group IDs. */
97struct xid_command
98{
99 int syscall_no;
0ad926f3
L
100 /* Enforce zero-extension for the pointer argument in
101
102 int setgroups (size_t size, const gid_t *list);
103
104 The kernel XID arguments are unsigned and do not require sign
105 extension. */
106 unsigned long int id[3];
2edb61e3 107 volatile int cntr;
771eb141 108 volatile int error; /* -1: no call yet, 0: success seen, >0: error seen. */
2edb61e3
UD
109};
110
111
0f6699ea
UD
112/* Data structure used by the kernel to find robust futexes. */
113struct robust_list_head
114{
115 void *list;
116 long int futex_offset;
117 void *list_op_pending;
118};
119
120
7f0d9e61 121/* Data structure used to handle thread priority protection. */
f17efcb4
UD
122struct priority_protection_data
123{
124 int priomax;
125 unsigned int priomap[];
126};
127
128
76a50749
UD
129/* Thread descriptor data structure. */
130struct pthread
131{
76a50749
UD
132 union
133 {
55c11fbd
RM
134#if !TLS_DTV_AT_TP
135 /* This overlaps the TCB as used for TLS without threads (see tls.h). */
136 tcbhead_t header;
bbde8527 137#else
468777e1
UD
138 struct
139 {
439bf404
SP
140 /* multiple_threads is enabled either when the process has spawned at
141 least one thread or when a single-threaded process cancels itself.
142 This enables additional code to introduce locking before doing some
143 compare_and_exchange operations and also enable cancellation points.
144 The concepts of multiple threads and cancellation points ideally
145 should be separate, since it is not necessary for multiple threads to
146 have been created for cancellation points to be enabled, as is the
147 case is when single-threaded process cancels itself.
148
149 Since enabling multiple_threads enables additional code in
150 cancellation points and compare_and_exchange operations, there is a
151 potential for an unneeded performance hit when it is enabled in a
152 single-threaded, self-canceling process. This is OK though, since a
153 single-threaded process will enable async cancellation only when it
154 looks to cancel itself and is hence going to end anyway. */
468777e1 155 int multiple_threads;
991fa82b 156 int gscope_flag;
468777e1 157 } header;
d4f64e1a
RM
158#endif
159
55c11fbd
RM
160 /* This extra padding has no special purpose, and this structure layout
161 is private and subject to change without affecting the official ABI.
162 We just have it here in case it might be convenient for some
163 implementation-specific instrumentation hack or suchlike. */
9e9b8cb8 164 void *__padding[24];
55c11fbd
RM
165 };
166
1daccf40
FW
167 /* This descriptor's link on the GL (dl_stack_used) or
168 GL (dl_stack_user) list. */
d4f64e1a 169 list_t list;
76a50749 170
bd8bb78b
UD
171 /* Thread ID - which is also a 'is this thread descriptor (and
172 therefore stack) used' flag. */
173 pid_t tid;
174
1bcfb5a5 175 /* List of robust mutexes the thread is holding. */
06be6368 176#if __PTHREAD_MUTEX_HAVE_PREV
0f6699ea
UD
177 void *robust_prev;
178 struct robust_list_head robust_head;
179
180 /* The list above is strange. It is basically a double linked list
181 but the pointer to the next/previous element of the list points
182 in the middle of the object, the __next element. Whenever
183 casting to __pthread_list_t we need to adjust the pointer
8f9450a0
TR
184 first.
185 These operations are effectively concurrent code in that the thread
186 can get killed at any point in time and the kernel takes over. Thus,
187 the __next elements are a kind of concurrent list and we need to
188 enforce using compiler barriers that the individual operations happen
189 in such a way that the kernel always sees a consistent list. The
190 backward links (ie, the __prev elements) are not used by the kernel.
191 FIXME We should use relaxed MO atomic operations here and signal fences
192 because this kind of concurrency is similar to synchronizing with a
193 signal handler. */
0f6699ea 194# define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next))
b007ce7c 195
df47504c 196# define ENQUEUE_MUTEX_BOTH(mutex, val) \
1bcfb5a5 197 do { \
df47504c
UD
198 __pthread_list_t *next = (__pthread_list_t *) \
199 ((((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_head.list)) & ~1ul) \
200 - QUEUE_PTR_ADJUST); \
0f6699ea 201 next->__prev = (void *) &mutex->__data.__list.__next; \
df47504c
UD
202 mutex->__data.__list.__next = THREAD_GETMEM (THREAD_SELF, \
203 robust_head.list); \
0f6699ea 204 mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head; \
8f9450a0
TR
205 /* Ensure that the new list entry is ready before we insert it. */ \
206 __asm ("" ::: "memory"); \
0f6699ea 207 THREAD_SETMEM (THREAD_SELF, robust_head.list, \
df47504c
UD
208 (void *) (((uintptr_t) &mutex->__data.__list.__next) \
209 | val)); \
1bcfb5a5
UD
210 } while (0)
211# define DEQUEUE_MUTEX(mutex) \
212 do { \
0f6699ea 213 __pthread_list_t *next = (__pthread_list_t *) \
df47504c
UD
214 ((char *) (((uintptr_t) mutex->__data.__list.__next) & ~1ul) \
215 - QUEUE_PTR_ADJUST); \
0f6699ea
UD
216 next->__prev = mutex->__data.__list.__prev; \
217 __pthread_list_t *prev = (__pthread_list_t *) \
df47504c
UD
218 ((char *) (((uintptr_t) mutex->__data.__list.__prev) & ~1ul) \
219 - QUEUE_PTR_ADJUST); \
0f6699ea 220 prev->__next = mutex->__data.__list.__next; \
8f9450a0
TR
221 /* Ensure that we remove the entry from the list before we change the \
222 __next pointer of the entry, which is read by the kernel. */ \
223 __asm ("" ::: "memory"); \
b007ce7c
UD
224 mutex->__data.__list.__prev = NULL; \
225 mutex->__data.__list.__next = NULL; \
1bcfb5a5
UD
226 } while (0)
227#else
0f6699ea
UD
228 union
229 {
230 __pthread_slist_t robust_list;
231 struct robust_list_head robust_head;
232 };
b007ce7c 233
df47504c 234# define ENQUEUE_MUTEX_BOTH(mutex, val) \
1bcfb5a5 235 do { \
b007ce7c
UD
236 mutex->__data.__list.__next \
237 = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \
8f9450a0
TR
238 /* Ensure that the new list entry is ready before we insert it. */ \
239 __asm ("" ::: "memory"); \
df47504c 240 THREAD_SETMEM (THREAD_SELF, robust_list.__next, \
457b559e 241 (void *) (((uintptr_t) &mutex->__data.__list) | val)); \
1bcfb5a5
UD
242 } while (0)
243# define DEQUEUE_MUTEX(mutex) \
244 do { \
df47504c
UD
245 __pthread_slist_t *runp = (__pthread_slist_t *) \
246 (((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_list.__next)) & ~1ul); \
b007ce7c 247 if (runp == &mutex->__data.__list) \
683040c3 248 THREAD_SETMEM (THREAD_SELF, robust_list.__next, runp->__next); \
1bcfb5a5
UD
249 else \
250 { \
df47504c
UD
251 __pthread_slist_t *next = (__pthread_slist_t *) \
252 (((uintptr_t) runp->__next) & ~1ul); \
253 while (next != &mutex->__data.__list) \
254 { \
255 runp = next; \
256 next = (__pthread_slist_t *) (((uintptr_t) runp->__next) & ~1ul); \
257 } \
1bcfb5a5 258 \
df47504c 259 runp->__next = next->__next; \
8f9450a0
TR
260 /* Ensure that we remove the entry from the list before we change the \
261 __next pointer of the entry, which is read by the kernel. */ \
262 __asm ("" ::: "memory"); \
b007ce7c 263 mutex->__data.__list.__next = NULL; \
1bcfb5a5
UD
264 } \
265 } while (0)
266#endif
df47504c
UD
267#define ENQUEUE_MUTEX(mutex) ENQUEUE_MUTEX_BOTH (mutex, 0)
268#define ENQUEUE_MUTEX_PI(mutex) ENQUEUE_MUTEX_BOTH (mutex, 1)
1bcfb5a5 269
86bfff4d
UD
270 /* List of cleanup buffers. */
271 struct _pthread_cleanup_buffer *cleanup;
272
6efd4814 273 /* Unwind information. */
49b65043 274 struct pthread_unwind_buf *cleanup_jmp_buf;
6efd4814
UD
275#define HAVE_CLEANUP_JMP_BUF
276
c70ad7d7
UD
277 /* Flags determining processing of cancellation. */
278 int cancelhandling;
40465600
AZ
279 /* Bit set if cancellation is disabled. */
280#define CANCELSTATE_BIT 0
281#define CANCELSTATE_BITMASK (1 << CANCELSTATE_BIT)
282 /* Bit set if asynchronous cancellation mode is selected. */
283#define CANCELTYPE_BIT 1
284#define CANCELTYPE_BITMASK (1 << CANCELTYPE_BIT)
285 /* Bit set if canceling has been initiated. */
286#define CANCELING_BIT 2
287#define CANCELING_BITMASK (1 << CANCELING_BIT)
c70ad7d7
UD
288 /* Bit set if canceled. */
289#define CANCELED_BIT 3
40465600 290#define CANCELED_BITMASK (1 << CANCELED_BIT)
c70ad7d7
UD
291 /* Bit set if thread is exiting. */
292#define EXITING_BIT 4
40465600 293#define EXITING_BITMASK (1 << EXITING_BIT)
c70ad7d7
UD
294 /* Bit set if thread terminated and TCB is freed. */
295#define TERMINATED_BIT 5
40465600 296#define TERMINATED_BITMASK (1 << TERMINATED_BIT)
dff9a7a1
UD
297 /* Bit set if thread is supposed to change XID. */
298#define SETXID_BIT 6
40465600 299#define SETXID_BITMASK (1 << SETXID_BIT)
5869d4ec 300
341c566f
UD
301 /* Flags. Including those copied from the thread attribute. */
302 int flags;
303
5869d4ec
UD
304 /* We allocate one block of references here. This should be enough
305 to avoid allocating any memory dynamically for most applications. */
76a50749
UD
306 struct pthread_key_data
307 {
308 /* Sequence number. We use uintptr_t to not require padding on
309 32- and 64-bit machines. On 64-bit machines it helps to avoid
310 wrapping, too. */
311 uintptr_t seq;
312
313 /* Data pointer. */
314 void *data;
5869d4ec
UD
315 } specific_1stblock[PTHREAD_KEY_2NDLEVEL_SIZE];
316
a334319f
UD
317 /* Two-level array for the thread-specific data. */
318 struct pthread_key_data *specific[PTHREAD_KEY_1STLEVEL_SIZE];
319
dff9a7a1
UD
320 /* Flag which is set when specific data is set. */
321 bool specific_used;
322
c70ad7d7
UD
323 /* True if events must be reported. */
324 bool report_events;
325
76a50749
UD
326 /* True if the user provided the stack. */
327 bool user_stack;
328
5f66b766
UD
329 /* True if thread must stop at startup time. */
330 bool stopped_start;
331
02189e8f
AZ
332 /* Indicate that a thread creation setup has failed (for instance the
333 scheduler or affinity). */
334 int setup_failed;
b051fc44 335
5869d4ec 336 /* Lock to synchronize access to the descriptor. */
e51deae7 337 int lock;
76a50749 338
dff9a7a1 339 /* Lock for synchronizing setxid calls. */
45a8f0e6 340 unsigned int setxid_futex;
dff9a7a1 341
76a50749
UD
342 /* If the thread waits to join another one the ID of the latter is
343 stored here.
344
345 In case a thread is detached this field contains a pointer of the
346 TCB if the thread itself. This is something which cannot happen
347 in normal operation. */
348 struct pthread *joinid;
349 /* Check whether a thread is detached. */
350#define IS_DETACHED(pd) ((pd)->joinid == (pd))
351
76a50749
UD
352 /* The result of the thread function. */
353 void *result;
354
355 /* Scheduling parameters for the new thread. */
356 struct sched_param schedparam;
357 int schedpolicy;
358
359 /* Start position of the code to be executed and the argument passed
360 to the function. */
361 void *(*start_routine) (void *);
362 void *arg;
363
364 /* Debug state. */
365 td_eventbuf_t eventbuf;
366 /* Next descriptor with a pending event. */
367 struct pthread *nextevent;
368
68107ec0
UD
369 /* Machine-specific unwind info. */
370 struct _Unwind_Exception exc;
68107ec0 371
630f4cc3 372 /* If nonzero, pointer to the area allocated for the stack and guard. */
76a50749 373 void *stackblock;
630f4cc3 374 /* Size of the stackblock area including the guard. */
76a50749
UD
375 size_t stackblock_size;
376 /* Size of the included guard area. */
377 size_t guardsize;
5adac0e4
UD
378 /* This is what the user specified and what we will report. */
379 size_t reported_guardsize;
0e9d6240 380
f17efcb4
UD
381 /* Thread Priority Protection data. */
382 struct priority_protection_data *tpp;
383
0e9d6240
UD
384 /* Resolver state. */
385 struct __res_state res;
253eb3a0 386
b3cae39d
FW
387 /* Signal mask for the new thread. Used during thread startup to
388 restore the signal mask. (Threads are launched with all signals
389 masked.) */
a1bdd816 390 internal_sigset_t sigmask;
b3cae39d 391
ee1ada1b
FW
392 /* Used by the exception handling implementation in the dynamic loader. */
393 struct rtld_catch *rtld_catch;
394
ce7528f6
AZ
395 /* Indicates whether is a C11 thread created by thrd_creat. */
396 bool c11;
397
526c3cf1
FW
398 /* Used in __pthread_kill_internal to detected a thread that has
399 exited or is about to exit. exit_lock must only be acquired
400 after blocking signals. */
401 bool exiting;
402 int exit_lock; /* A low-level lock (for use with __libc_lock_init etc). */
403
9deec7c8
AZ
404 /* Used on strsignal. */
405 struct tls_internal_t tls_state;
406
2c6b4b27
FW
407 /* rseq area registered with the kernel. Use a custom definition
408 here to isolate from kernel struct rseq changes. The
409 implementation of sched_getcpu needs acccess to the cpu_id field;
410 the other fields are unused and not included here. */
411 union
412 {
413 struct
414 {
415 uint32_t cpu_id_start;
416 uint32_t cpu_id;
417 };
418 char pad[32]; /* Original rseq area size. */
419 } rseq_area __attribute__ ((aligned (32)));
95e114a0 420
3edc4ff2
PE
421 /* Amount of end padding, if any, in this structure.
422 This definition relies on rseq_area being last. */
c5132ca1 423#define PTHREAD_STRUCT_END_PADDING \
3edc4ff2 424 (sizeof (struct pthread) - offsetof (struct pthread, rseq_area) \
2c6b4b27 425 + sizeof ((struct pthread) {}.rseq_area))
76a50749
UD
426} __attribute ((aligned (TCB_ALIGNMENT)));
427
40465600
AZ
428static inline bool
429cancel_enabled_and_canceled (int value)
430{
431 return (value & (CANCELSTATE_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
432 | TERMINATED_BITMASK))
433 == CANCELED_BITMASK;
434}
435
436static inline bool
437cancel_enabled_and_canceled_and_async (int value)
438{
439 return ((value) & (CANCELSTATE_BITMASK | CANCELTYPE_BITMASK | CANCELED_BITMASK
440 | EXITING_BITMASK | TERMINATED_BITMASK))
441 == (CANCELTYPE_BITMASK | CANCELED_BITMASK);
442}
443
c79a31fb
FW
444/* This yields the pointer that TLS support code calls the thread pointer. */
445#if TLS_TCB_AT_TP
446# define TLS_TPADJ(pd) (pd)
447#elif TLS_DTV_AT_TP
448# define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
449#endif
76a50749
UD
450
451#endif /* descr.h */