]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/x86_64/nptl/tls.h
ccb5f24d920b38c398ad4cecbbaac0cb6d406a17
[thirdparty/glibc.git] / sysdeps / x86_64 / nptl / tls.h
1 /* Definition for thread-local data handling. nptl/x86_64 version.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #ifndef _TLS_H
20 #define _TLS_H 1
21
22 #ifndef __ASSEMBLER__
23 # include <asm/prctl.h> /* For ARCH_SET_FS. */
24 # include <stdbool.h>
25 # include <stddef.h>
26 # include <stdint.h>
27 # include <stdlib.h>
28 # include <sysdep.h>
29 # include <libc-pointer-arith.h> /* For cast_to_integer. */
30 # include <kernel-features.h>
31 # include <dl-dtv.h>
32
33 /* Replacement type for __m128 since this file is included by ld.so,
34 which is compiled with -mno-sse. It must not change the alignment
35 of rtld_savespace_sse. */
36 typedef struct
37 {
38 int i[4];
39 } __128bits;
40
41
42 typedef struct
43 {
44 void *tcb; /* Pointer to the TCB. Not necessarily the
45 thread descriptor used by libpthread. */
46 dtv_t *dtv;
47 void *self; /* Pointer to the thread descriptor. */
48 int multiple_threads;
49 int gscope_flag;
50 uintptr_t sysinfo;
51 uintptr_t stack_guard;
52 uintptr_t pointer_guard;
53 unsigned long int unused_vgetcpu_cache[2];
54 /* Bit 0: X86_FEATURE_1_IBT.
55 Bit 1: X86_FEATURE_1_SHSTK.
56 */
57 unsigned int feature_1;
58 int __glibc_unused1;
59 /* Reservation of some values for the TM ABI. */
60 void *__private_tm[4];
61 /* GCC split stack support. */
62 void *__private_ss;
63 /* The lowest address of shadow stack, */
64 unsigned long long int ssp_base;
65 /* Must be kept even if it is no longer used by glibc since programs,
66 like AddressSanitizer, depend on the size of tcbhead_t. */
67 __128bits __glibc_unused2[8][4] __attribute__ ((aligned (32)));
68
69 void *__padding[8];
70 } tcbhead_t;
71
72 # ifdef __ILP32__
73 /* morestack.S in libgcc uses offset 0x40 to access __private_ss, */
74 _Static_assert (offsetof (tcbhead_t, __private_ss) == 0x40,
75 "offset of __private_ss != 0x40");
76 /* NB: ssp_base used to be "long int __glibc_reserved2", which was
77 changed from 32 bits to 64 bits. Make sure that the offset of the
78 next field, __glibc_unused2, is unchanged. */
79 _Static_assert (offsetof (tcbhead_t, __glibc_unused2) == 0x60,
80 "offset of __glibc_unused2 != 0x60");
81 # else
82 /* morestack.S in libgcc uses offset 0x70 to access __private_ss, */
83 _Static_assert (offsetof (tcbhead_t, __private_ss) == 0x70,
84 "offset of __private_ss != 0x70");
85 _Static_assert (offsetof (tcbhead_t, __glibc_unused2) == 0x80,
86 "offset of __glibc_unused2 != 0x80");
87 # endif
88
89 #else /* __ASSEMBLER__ */
90 # include <tcb-offsets.h>
91 #endif
92
93
94 /* Alignment requirement for the stack. */
95 #define STACK_ALIGN 16
96
97
98 #ifndef __ASSEMBLER__
99 /* Get system call information. */
100 # include <sysdep.h>
101
102 #define LOCK_PREFIX "lock;"
103
104 /* This is the size of the initial TCB. Can't be just sizeof (tcbhead_t),
105 because NPTL getpid, __libc_alloca_cutoff etc. need (almost) the whole
106 struct pthread even when not linked with -lpthread. */
107 # define TLS_INIT_TCB_SIZE sizeof (struct pthread)
108
109 /* Alignment requirements for the initial TCB. */
110 # define TLS_INIT_TCB_ALIGN __alignof__ (struct pthread)
111
112 /* This is the size of the TCB. */
113 # define TLS_TCB_SIZE sizeof (struct pthread)
114
115 /* Alignment requirements for the TCB. */
116 # define TLS_TCB_ALIGN __alignof__ (struct pthread)
117
118 /* The TCB can have any size and the memory following the address the
119 thread pointer points to is unspecified. Allocate the TCB there. */
120 # define TLS_TCB_AT_TP 1
121 # define TLS_DTV_AT_TP 0
122
123 /* Get the thread descriptor definition. */
124 # include <nptl/descr.h>
125
126
127 /* Install the dtv pointer. The pointer passed is to the element with
128 index -1 which contain the length. */
129 # define INSTALL_DTV(descr, dtvp) \
130 ((tcbhead_t *) (descr))->dtv = (dtvp) + 1
131
132 /* Install new dtv for current thread. */
133 # define INSTALL_NEW_DTV(dtvp) \
134 ({ struct pthread *__pd; \
135 THREAD_SETMEM (__pd, header.dtv, (dtvp)); })
136
137 /* Return dtv of given thread descriptor. */
138 # define GET_DTV(descr) \
139 (((tcbhead_t *) (descr))->dtv)
140
141
142 /* Code to initially initialize the thread pointer. This might need
143 special attention since 'errno' is not yet available and if the
144 operation can cause a failure 'errno' must not be touched.
145
146 We have to make the syscall for both uses of the macro since the
147 address might be (and probably is) different. */
148 # define TLS_INIT_TP(thrdescr) \
149 ({ void *_thrdescr = (thrdescr); \
150 tcbhead_t *_head = _thrdescr; \
151 int _result; \
152 \
153 _head->tcb = _thrdescr; \
154 /* For now the thread descriptor is at the same address. */ \
155 _head->self = _thrdescr; \
156 \
157 /* It is a simple syscall to set the %fs value for the thread. */ \
158 asm volatile ("syscall" \
159 : "=a" (_result) \
160 : "0" ((unsigned long int) __NR_arch_prctl), \
161 "D" ((unsigned long int) ARCH_SET_FS), \
162 "S" (_thrdescr) \
163 : "memory", "cc", "r11", "cx"); \
164 \
165 _result ? "cannot set %fs base address for thread-local storage" : 0; \
166 })
167
168 # define TLS_DEFINE_INIT_TP(tp, pd) void *tp = (pd)
169
170
171 /* Return the address of the dtv for the current thread. */
172 # define THREAD_DTV() \
173 ({ struct pthread *__pd; \
174 THREAD_GETMEM (__pd, header.dtv); })
175
176
177 /* Return the thread descriptor for the current thread.
178
179 The contained asm must *not* be marked volatile since otherwise
180 assignments like
181 pthread_descr self = thread_self();
182 do not get optimized away. */
183 # if __GNUC_PREREQ (6, 0)
184 # define THREAD_SELF \
185 (*(struct pthread *__seg_fs *) offsetof (struct pthread, header.self))
186 # else
187 # define THREAD_SELF \
188 ({ struct pthread *__self; \
189 asm ("mov %%fs:%c1,%0" : "=r" (__self) \
190 : "i" (offsetof (struct pthread, header.self))); \
191 __self;})
192 # endif
193
194 /* Magic for libthread_db to know how to do THREAD_SELF. */
195 # define DB_THREAD_SELF_INCLUDE <sys/reg.h> /* For the FS constant. */
196 # define DB_THREAD_SELF CONST_THREAD_AREA (64, FS)
197
198 /* Read member of the thread descriptor directly. */
199 # define THREAD_GETMEM(descr, member) \
200 ({ __typeof (descr->member) __value; \
201 _Static_assert (sizeof (__value) == 1 \
202 || sizeof (__value) == 4 \
203 || sizeof (__value) == 8, \
204 "size of per-thread data"); \
205 if (sizeof (__value) == 1) \
206 asm volatile ("movb %%fs:%P2,%b0" \
207 : "=q" (__value) \
208 : "0" (0), "i" (offsetof (struct pthread, member))); \
209 else if (sizeof (__value) == 4) \
210 asm volatile ("movl %%fs:%P1,%0" \
211 : "=r" (__value) \
212 : "i" (offsetof (struct pthread, member))); \
213 else /* 8 */ \
214 { \
215 asm volatile ("movq %%fs:%P1,%q0" \
216 : "=r" (__value) \
217 : "i" (offsetof (struct pthread, member))); \
218 } \
219 __value; })
220
221
222 /* Same as THREAD_GETMEM, but the member offset can be non-constant. */
223 # define THREAD_GETMEM_NC(descr, member, idx) \
224 ({ __typeof (descr->member[0]) __value; \
225 _Static_assert (sizeof (__value) == 1 \
226 || sizeof (__value) == 4 \
227 || sizeof (__value) == 8, \
228 "size of per-thread data"); \
229 if (sizeof (__value) == 1) \
230 asm volatile ("movb %%fs:%P2(%q3),%b0" \
231 : "=q" (__value) \
232 : "0" (0), "i" (offsetof (struct pthread, member[0])), \
233 "r" (idx)); \
234 else if (sizeof (__value) == 4) \
235 asm volatile ("movl %%fs:%P1(,%q2,4),%0" \
236 : "=r" (__value) \
237 : "i" (offsetof (struct pthread, member[0])), "r" (idx));\
238 else /* 8 */ \
239 { \
240 asm volatile ("movq %%fs:%P1(,%q2,8),%q0" \
241 : "=r" (__value) \
242 : "i" (offsetof (struct pthread, member[0])), \
243 "r" (idx)); \
244 } \
245 __value; })
246
247
248 /* Loading addresses of objects on x86-64 needs to be treated special
249 when generating PIC code. */
250 #ifdef __pic__
251 # define IMM_MODE "nr"
252 #else
253 # define IMM_MODE "ir"
254 #endif
255
256
257 /* Set member of the thread descriptor directly. */
258 # define THREAD_SETMEM(descr, member, value) \
259 ({ \
260 _Static_assert (sizeof (descr->member) == 1 \
261 || sizeof (descr->member) == 4 \
262 || sizeof (descr->member) == 8, \
263 "size of per-thread data"); \
264 if (sizeof (descr->member) == 1) \
265 asm volatile ("movb %b0,%%fs:%P1" : \
266 : "iq" (value), \
267 "i" (offsetof (struct pthread, member))); \
268 else if (sizeof (descr->member) == 4) \
269 asm volatile ("movl %0,%%fs:%P1" : \
270 : IMM_MODE (value), \
271 "i" (offsetof (struct pthread, member))); \
272 else /* 8 */ \
273 { \
274 asm volatile ("movq %q0,%%fs:%P1" : \
275 : IMM_MODE ((uint64_t) cast_to_integer (value)), \
276 "i" (offsetof (struct pthread, member))); \
277 }})
278
279
280 /* Same as THREAD_SETMEM, but the member offset can be non-constant. */
281 # define THREAD_SETMEM_NC(descr, member, idx, value) \
282 ({ \
283 _Static_assert (sizeof (descr->member[0]) == 1 \
284 || sizeof (descr->member[0]) == 4 \
285 || sizeof (descr->member[0]) == 8, \
286 "size of per-thread data"); \
287 if (sizeof (descr->member[0]) == 1) \
288 asm volatile ("movb %b0,%%fs:%P1(%q2)" : \
289 : "iq" (value), \
290 "i" (offsetof (struct pthread, member[0])), \
291 "r" (idx)); \
292 else if (sizeof (descr->member[0]) == 4) \
293 asm volatile ("movl %0,%%fs:%P1(,%q2,4)" : \
294 : IMM_MODE (value), \
295 "i" (offsetof (struct pthread, member[0])), \
296 "r" (idx)); \
297 else /* 8 */ \
298 { \
299 asm volatile ("movq %q0,%%fs:%P1(,%q2,8)" : \
300 : IMM_MODE ((uint64_t) cast_to_integer (value)), \
301 "i" (offsetof (struct pthread, member[0])), \
302 "r" (idx)); \
303 }})
304
305
306 /* Set the stack guard field in TCB head. */
307 # define THREAD_SET_STACK_GUARD(value) \
308 THREAD_SETMEM (THREAD_SELF, header.stack_guard, value)
309 # define THREAD_COPY_STACK_GUARD(descr) \
310 ((descr)->header.stack_guard \
311 = THREAD_GETMEM (THREAD_SELF, header.stack_guard))
312
313
314 /* Set the pointer guard field in the TCB head. */
315 # define THREAD_SET_POINTER_GUARD(value) \
316 THREAD_SETMEM (THREAD_SELF, header.pointer_guard, value)
317 # define THREAD_COPY_POINTER_GUARD(descr) \
318 ((descr)->header.pointer_guard \
319 = THREAD_GETMEM (THREAD_SELF, header.pointer_guard))
320
321
322 /* Get and set the global scope generation counter in the TCB head. */
323 # define THREAD_GSCOPE_IN_TCB 1
324 # define THREAD_GSCOPE_FLAG_UNUSED 0
325 # define THREAD_GSCOPE_FLAG_USED 1
326 # define THREAD_GSCOPE_FLAG_WAIT 2
327 # define THREAD_GSCOPE_RESET_FLAG() \
328 do \
329 { int __res; \
330 asm volatile ("xchgl %0, %%fs:%P1" \
331 : "=r" (__res) \
332 : "i" (offsetof (struct pthread, header.gscope_flag)), \
333 "0" (THREAD_GSCOPE_FLAG_UNUSED)); \
334 if (__res == THREAD_GSCOPE_FLAG_WAIT) \
335 lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
336 } \
337 while (0)
338 # define THREAD_GSCOPE_SET_FLAG() \
339 THREAD_SETMEM (THREAD_SELF, header.gscope_flag, THREAD_GSCOPE_FLAG_USED)
340
341 #endif /* __ASSEMBLER__ */
342
343 #endif /* tls.h */