1 /* Definition for thread-local data handling. nptl/x86_64 version.
2 Copyright (C) 2002-2009, 2011-2012 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 # include <asm/prctl.h> /* For ARCH_SET_FS. */
29 # include <kernel-features.h>
31 /* Replacement type for __m128 since this file is included by ld.so,
32 which is compiled with -mno-sse. It must not change the alignment
33 of rtld_savespace_sse. */
40 /* Type for the dtv. */
54 void *tcb
; /* Pointer to the TCB. Not necessarily the
55 thread descriptor used by libpthread. */
57 void *self
; /* Pointer to the thread descriptor. */
61 uintptr_t stack_guard
;
62 uintptr_t pointer_guard
;
63 unsigned long int vgetcpu_cache
[2];
64 # ifndef __ASSUME_PRIVATE_FUTEX
69 int rtld_must_xmm_save
;
70 /* Reservation of some values for the TM ABI. */
71 void *__private_tm
[5];
73 /* Have space for the post-AVX register size. */
74 __128bits rtld_savespace_sse
[8][4] __attribute__ ((aligned (32)));
79 #else /* __ASSEMBLER__ */
80 # include <tcb-offsets.h>
84 /* Alignment requirement for the stack. */
85 #define STACK_ALIGN 16
89 /* Get system call information. */
93 /* Get the thread descriptor definition. */
94 # include <nptl/descr.h>
98 # define LOCK_PREFIX /* nothing */
100 # define LOCK_PREFIX "lock;"
104 /* This is the size of the initial TCB. Can't be just sizeof (tcbhead_t),
105 because NPTL getpid, __libc_alloca_cutoff etc. need (almost) the whole
106 struct pthread even when not linked with -lpthread. */
107 # define TLS_INIT_TCB_SIZE sizeof (struct pthread)
109 /* Alignment requirements for the initial TCB. */
110 # define TLS_INIT_TCB_ALIGN __alignof__ (struct pthread)
112 /* This is the size of the TCB. */
113 # define TLS_TCB_SIZE sizeof (struct pthread)
115 /* Alignment requirements for the TCB. */
116 # define TLS_TCB_ALIGN __alignof__ (struct pthread)
118 /* The TCB can have any size and the memory following the address the
119 thread pointer points to is unspecified. Allocate the TCB there. */
120 # define TLS_TCB_AT_TP 1
123 /* Install the dtv pointer. The pointer passed is to the element with
124 index -1 which contain the length. */
125 # define INSTALL_DTV(descr, dtvp) \
126 ((tcbhead_t *) (descr))->dtv = (dtvp) + 1
128 /* Install new dtv for current thread. */
129 # define INSTALL_NEW_DTV(dtvp) \
130 ({ struct pthread *__pd; \
131 THREAD_SETMEM (__pd, header.dtv, (dtvp)); })
133 /* Return dtv of given thread descriptor. */
134 # define GET_DTV(descr) \
135 (((tcbhead_t *) (descr))->dtv)
138 /* Code to initially initialize the thread pointer. This might need
139 special attention since 'errno' is not yet available and if the
140 operation can cause a failure 'errno' must not be touched.
142 We have to make the syscall for both uses of the macro since the
143 address might be (and probably is) different. */
144 # define TLS_INIT_TP(thrdescr, secondcall) \
145 ({ void *_thrdescr = (thrdescr); \
146 tcbhead_t *_head = _thrdescr; \
149 _head->tcb = _thrdescr; \
150 /* For now the thread descriptor is at the same address. */ \
151 _head->self = _thrdescr; \
153 /* It is a simple syscall to set the %fs value for the thread. */ \
154 asm volatile ("syscall" \
156 : "0" ((unsigned long int) __NR_arch_prctl), \
157 "D" ((unsigned long int) ARCH_SET_FS), \
159 : "memory", "cc", "r11", "cx"); \
161 _result ? "cannot set %fs base address for thread-local storage" : 0; \
165 /* Return the address of the dtv for the current thread. */
166 # define THREAD_DTV() \
167 ({ struct pthread *__pd; \
168 THREAD_GETMEM (__pd, header.dtv); })
171 /* Return the thread descriptor for the current thread.
173 The contained asm must *not* be marked volatile since otherwise
175 pthread_descr self = thread_self();
176 do not get optimized away. */
177 # define THREAD_SELF \
178 ({ struct pthread *__self; \
179 asm ("mov %%fs:%c1,%0" : "=r" (__self) \
180 : "i" (offsetof (struct pthread, header.self))); \
183 /* Magic for libthread_db to know how to do THREAD_SELF. */
184 # define DB_THREAD_SELF_INCLUDE <sys/reg.h> /* For the FS constant. */
185 # define DB_THREAD_SELF CONST_THREAD_AREA (64, FS)
187 /* Read member of the thread descriptor directly. */
188 # define THREAD_GETMEM(descr, member) \
189 ({ __typeof (descr->member) __value; \
190 if (sizeof (__value) == 1) \
191 asm volatile ("movb %%fs:%P2,%b0" \
193 : "0" (0), "i" (offsetof (struct pthread, member))); \
194 else if (sizeof (__value) == 4) \
195 asm volatile ("movl %%fs:%P1,%0" \
197 : "i" (offsetof (struct pthread, member))); \
200 if (sizeof (__value) != 8) \
201 /* There should not be any value with a size other than 1, \
205 asm volatile ("movq %%fs:%P1,%q0" \
207 : "i" (offsetof (struct pthread, member))); \
212 /* Same as THREAD_GETMEM, but the member offset can be non-constant. */
213 # define THREAD_GETMEM_NC(descr, member, idx) \
214 ({ __typeof (descr->member[0]) __value; \
215 if (sizeof (__value) == 1) \
216 asm volatile ("movb %%fs:%P2(%q3),%b0" \
218 : "0" (0), "i" (offsetof (struct pthread, member[0])), \
220 else if (sizeof (__value) == 4) \
221 asm volatile ("movl %%fs:%P1(,%q2,4),%0" \
223 : "i" (offsetof (struct pthread, member[0])), "r" (idx));\
226 if (sizeof (__value) != 8) \
227 /* There should not be any value with a size other than 1, \
231 asm volatile ("movq %%fs:%P1(,%q2,8),%q0" \
233 : "i" (offsetof (struct pthread, member[0])), \
239 /* Loading addresses of objects on x86-64 needs to be treated special
240 when generating PIC code. */
242 # define IMM_MODE "nr"
244 # define IMM_MODE "ir"
248 /* Same as THREAD_SETMEM, but the member offset can be non-constant. */
249 # define THREAD_SETMEM(descr, member, value) \
250 ({ if (sizeof (descr->member) == 1) \
251 asm volatile ("movb %b0,%%fs:%P1" : \
253 "i" (offsetof (struct pthread, member))); \
254 else if (sizeof (descr->member) == 4) \
255 asm volatile ("movl %0,%%fs:%P1" : \
256 : IMM_MODE (value), \
257 "i" (offsetof (struct pthread, member))); \
260 if (sizeof (descr->member) != 8) \
261 /* There should not be any value with a size other than 1, \
265 asm volatile ("movq %q0,%%fs:%P1" : \
266 : IMM_MODE ((uint64_t) (value)), \
267 "i" (offsetof (struct pthread, member))); \
271 /* Set member of the thread descriptor directly. */
272 # define THREAD_SETMEM_NC(descr, member, idx, value) \
273 ({ if (sizeof (descr->member[0]) == 1) \
274 asm volatile ("movb %b0,%%fs:%P1(%q2)" : \
276 "i" (offsetof (struct pthread, member[0])), \
278 else if (sizeof (descr->member[0]) == 4) \
279 asm volatile ("movl %0,%%fs:%P1(,%q2,4)" : \
280 : IMM_MODE (value), \
281 "i" (offsetof (struct pthread, member[0])), \
285 if (sizeof (descr->member[0]) != 8) \
286 /* There should not be any value with a size other than 1, \
290 asm volatile ("movq %q0,%%fs:%P1(,%q2,8)" : \
291 : IMM_MODE ((uint64_t) (value)), \
292 "i" (offsetof (struct pthread, member[0])), \
297 /* Atomic compare and exchange on TLS, returning old value. */
298 # define THREAD_ATOMIC_CMPXCHG_VAL(descr, member, newval, oldval) \
299 ({ __typeof (descr->member) __ret; \
300 __typeof (oldval) __old = (oldval); \
301 if (sizeof (descr->member) == 4) \
302 asm volatile (LOCK_PREFIX "cmpxchgl %2, %%fs:%P3" \
304 : "0" (__old), "r" (newval), \
305 "i" (offsetof (struct pthread, member))); \
307 /* Not necessary for other sizes in the moment. */ \
312 /* Atomic logical and. */
313 # define THREAD_ATOMIC_AND(descr, member, val) \
314 (void) ({ if (sizeof ((descr)->member) == 4) \
315 asm volatile (LOCK_PREFIX "andl %1, %%fs:%P0" \
316 :: "i" (offsetof (struct pthread, member)), \
319 /* Not necessary for other sizes in the moment. */ \
323 /* Atomic set bit. */
324 # define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
325 (void) ({ if (sizeof ((descr)->member) == 4) \
326 asm volatile (LOCK_PREFIX "orl %1, %%fs:%P0" \
327 :: "i" (offsetof (struct pthread, member)), \
328 "ir" (1 << (bit))); \
330 /* Not necessary for other sizes in the moment. */ \
334 # define CALL_THREAD_FCT(descr) \
336 asm volatile ("movq %%fs:%P2, %%rdi\n\t" \
339 : "i" (offsetof (struct pthread, start_routine)), \
340 "i" (offsetof (struct pthread, arg)) \
341 : "di", "si", "cx", "dx", "r8", "r9", "r10", "r11", \
346 /* Set the stack guard field in TCB head. */
347 # define THREAD_SET_STACK_GUARD(value) \
348 THREAD_SETMEM (THREAD_SELF, header.stack_guard, value)
349 # define THREAD_COPY_STACK_GUARD(descr) \
350 ((descr)->header.stack_guard \
351 = THREAD_GETMEM (THREAD_SELF, header.stack_guard))
354 /* Set the pointer guard field in the TCB head. */
355 # define THREAD_SET_POINTER_GUARD(value) \
356 THREAD_SETMEM (THREAD_SELF, header.pointer_guard, value)
357 # define THREAD_COPY_POINTER_GUARD(descr) \
358 ((descr)->header.pointer_guard \
359 = THREAD_GETMEM (THREAD_SELF, header.pointer_guard))
362 /* Get and set the global scope generation counter in the TCB head. */
363 # define THREAD_GSCOPE_FLAG_UNUSED 0
364 # define THREAD_GSCOPE_FLAG_USED 1
365 # define THREAD_GSCOPE_FLAG_WAIT 2
366 # define THREAD_GSCOPE_RESET_FLAG() \
369 asm volatile ("xchgl %0, %%fs:%P1" \
371 : "i" (offsetof (struct pthread, header.gscope_flag)), \
372 "0" (THREAD_GSCOPE_FLAG_UNUSED)); \
373 if (__res == THREAD_GSCOPE_FLAG_WAIT) \
374 lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
377 # define THREAD_GSCOPE_SET_FLAG() \
378 THREAD_SETMEM (THREAD_SELF, header.gscope_flag, THREAD_GSCOPE_FLAG_USED)
379 # define THREAD_GSCOPE_WAIT() \
380 GL(dl_wait_lookup_done) ()
384 /* Defined in dl-trampoline.S. */
385 extern void _dl_x86_64_save_sse (void);
386 extern void _dl_x86_64_restore_sse (void);
388 # define RTLD_CHECK_FOREIGN_CALL \
389 (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save) != 0)
391 /* NB: Don't use the xchg operation because that would imply a lock
392 prefix which is expensive and unnecessary. The cache line is also
393 not contested at all. */
394 # define RTLD_ENABLE_FOREIGN_CALL \
395 int old_rtld_must_xmm_save = THREAD_GETMEM (THREAD_SELF, \
396 header.rtld_must_xmm_save); \
397 THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, 1)
399 # define RTLD_PREPARE_FOREIGN_CALL \
400 do if (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save)) \
402 _dl_x86_64_save_sse (); \
403 THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, 0); \
407 # define RTLD_FINALIZE_FOREIGN_CALL \
409 if (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save) == 0) \
410 _dl_x86_64_restore_sse (); \
411 THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, \
412 old_rtld_must_xmm_save); \
417 #endif /* __ASSEMBLER__ */