1 /* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
25 #include <sys/param.h>
32 #define TUNABLE_NAMESPACE rtld
33 #include <dl-tunables.h>
35 /* Surplus static TLS, GLRO(dl_tls_static_surplus), is used for
37 - IE TLS in libc.so for all dlmopen namespaces except in the initial
38 one where libc.so is not loaded dynamically but at startup time,
39 - IE TLS in other libraries which may be dynamically loaded even in the
41 - and optionally for optimizing dynamic TLS access.
43 The maximum number of namespaces is DL_NNS, but to support that many
44 namespaces correctly the static TLS allocation should be significantly
45 increased, which may cause problems with small thread stacks due to the
46 way static TLS is accounted (bug 11787).
48 So there is a rtld.nns tunable limit on the number of supported namespaces
49 that affects the size of the static TLS and by default it's small enough
50 not to cause problems with existing applications. The limit is not
51 enforced or checked: it is the user's responsibility to increase rtld.nns
52 if more dlmopen namespaces are used.
54 Audit modules use their own namespaces, they are not included in rtld.nns,
55 but come on top when computing the number of namespaces. */
57 /* Size of initial-exec TLS in libc.so. */
58 #define LIBC_IE_TLS 192
59 /* Size of initial-exec TLS in libraries other than libc.so.
60 This should be large enough to cover runtime libraries of the
61 compiler such as libgomp and libraries in libc other than libc.so. */
62 #define OTHER_IE_TLS 144
64 /* Calculate the size of the static TLS surplus, when the given
65 number of audit modules are loaded. Must be called after the
66 number of audit modules is known and before static TLS allocation. */
68 _dl_tls_static_surplus_init (size_t naudit
)
73 nns
= TUNABLE_GET (nns
, size_t, NULL
);
74 opt_tls
= TUNABLE_GET (optional_static_tls
, size_t, NULL
);
76 /* Default values of the tunables. */
82 if (DL_NNS
- nns
< naudit
)
83 _dl_fatal_printf ("Failed loading %lu audit modules, %lu are supported.\n",
84 (unsigned long) naudit
, (unsigned long) (DL_NNS
- nns
));
87 GL(dl_tls_static_optional
) = opt_tls
;
88 GLRO(dl_tls_static_surplus
) = ((nns
- 1) * LIBC_IE_TLS
93 /* Out-of-memory handler. */
95 __attribute__ ((__noreturn__
))
98 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
103 _dl_next_tls_modid (void)
107 if (__builtin_expect (GL(dl_tls_dtv_gaps
), false))
110 struct dtv_slotinfo_list
*runp
= GL(dl_tls_dtv_slotinfo_list
);
112 /* Note that this branch will never be executed during program
113 start since there are no gaps at that time. Therefore it
114 does not matter that the dl_tls_dtv_slotinfo is not allocated
115 yet when the function is called for the first times.
117 NB: the offset +1 is due to the fact that DTV[0] is used
118 for something else. */
119 result
= GL(dl_tls_static_nelem
) + 1;
120 if (result
<= GL(dl_tls_max_dtv_idx
))
123 while (result
- disp
< runp
->len
)
125 if (runp
->slotinfo
[result
- disp
].map
== NULL
)
129 assert (result
<= GL(dl_tls_max_dtv_idx
) + 1);
132 if (result
- disp
< runp
->len
)
137 while ((runp
= runp
->next
) != NULL
);
139 if (result
> GL(dl_tls_max_dtv_idx
))
141 /* The new index must indeed be exactly one higher than the
143 assert (result
== GL(dl_tls_max_dtv_idx
) + 1);
144 /* There is no gap anymore. */
145 GL(dl_tls_dtv_gaps
) = false;
152 /* No gaps, allocate a new entry. */
155 result
= ++GL(dl_tls_max_dtv_idx
);
163 _dl_count_modids (void)
165 /* It is rare that we have gaps; see elf/dl-open.c (_dl_open) where
166 we fail to load a module and unload it leaving a gap. If we don't
167 have gaps then the number of modids is the current maximum so
169 if (__glibc_likely (!GL(dl_tls_dtv_gaps
)))
170 return GL(dl_tls_max_dtv_idx
);
172 /* We have gaps and are forced to count the non-NULL entries. */
174 struct dtv_slotinfo_list
*runp
= GL(dl_tls_dtv_slotinfo_list
);
177 for (size_t i
= 0; i
< runp
->len
; ++i
)
178 if (runp
->slotinfo
[i
].map
!= NULL
)
190 _dl_determine_tlsoffset (void)
192 size_t max_align
= TLS_TCB_ALIGN
;
193 /* libc.so with rseq has TLS with 32-byte alignment. Since TLS is
194 initialized before audit modules are loaded and slotinfo
195 information is available, this is not taken into account below in
197 max_align
= MAX (max_align
, 32U);
200 size_t freebottom
= 0;
202 /* The first element of the dtv slot info list is allocated. */
203 assert (GL(dl_tls_dtv_slotinfo_list
) != NULL
);
204 /* There is at this point only one element in the
205 dl_tls_dtv_slotinfo_list list. */
206 assert (GL(dl_tls_dtv_slotinfo_list
)->next
== NULL
);
208 struct dtv_slotinfo
*slotinfo
= GL(dl_tls_dtv_slotinfo_list
)->slotinfo
;
210 /* Determining the offset of the various parts of the static TLS
211 block has several dependencies. In addition we have to work
212 around bugs in some toolchains.
214 Each TLS block from the objects available at link time has a size
215 and an alignment requirement. The GNU ld computes the alignment
216 requirements for the data at the positions *in the file*, though.
217 I.e, it is not simply possible to allocate a block with the size
218 of the TLS program header entry. The data is layed out assuming
219 that the first byte of the TLS block fulfills
221 p_vaddr mod p_align == &TLS_BLOCK mod p_align
223 This means we have to add artificial padding at the beginning of
224 the TLS block. These bytes are never used for the TLS data in
225 this module but the first byte allocated must be aligned
226 according to mod p_align == 0 so that the first byte of the TLS
227 block is aligned according to p_vaddr mod p_align. This is ugly
228 and the linker can help by computing the offsets in the TLS block
229 assuming the first byte of the TLS block is aligned according to
232 The extra space which might be allocated before the first byte of
233 the TLS block need not go unused. The code below tries to use
234 that memory for the next TLS block. This can work if the total
235 memory requirement for the next TLS block is smaller than the
239 /* We simply start with zero. */
242 for (size_t cnt
= 0; slotinfo
[cnt
].map
!= NULL
; ++cnt
)
244 assert (cnt
< GL(dl_tls_dtv_slotinfo_list
)->len
);
246 size_t firstbyte
= (-slotinfo
[cnt
].map
->l_tls_firstbyte_offset
247 & (slotinfo
[cnt
].map
->l_tls_align
- 1));
249 max_align
= MAX (max_align
, slotinfo
[cnt
].map
->l_tls_align
);
251 if (freebottom
- freetop
>= slotinfo
[cnt
].map
->l_tls_blocksize
)
253 off
= roundup (freetop
+ slotinfo
[cnt
].map
->l_tls_blocksize
254 - firstbyte
, slotinfo
[cnt
].map
->l_tls_align
)
256 if (off
<= freebottom
)
260 /* XXX For some architectures we perhaps should store the
262 slotinfo
[cnt
].map
->l_tls_offset
= off
;
267 off
= roundup (offset
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
,
268 slotinfo
[cnt
].map
->l_tls_align
) + firstbyte
;
269 if (off
> offset
+ slotinfo
[cnt
].map
->l_tls_blocksize
270 + (freebottom
- freetop
))
273 freebottom
= off
- slotinfo
[cnt
].map
->l_tls_blocksize
;
277 /* XXX For some architectures we perhaps should store the
279 slotinfo
[cnt
].map
->l_tls_offset
= off
;
282 GL(dl_tls_static_used
) = offset
;
283 GL(dl_tls_static_size
) = (roundup (offset
+ GLRO(dl_tls_static_surplus
),
287 /* The TLS blocks start right after the TCB. */
288 size_t offset
= TLS_TCB_SIZE
;
290 for (size_t cnt
= 0; slotinfo
[cnt
].map
!= NULL
; ++cnt
)
292 assert (cnt
< GL(dl_tls_dtv_slotinfo_list
)->len
);
294 size_t firstbyte
= (-slotinfo
[cnt
].map
->l_tls_firstbyte_offset
295 & (slotinfo
[cnt
].map
->l_tls_align
- 1));
297 max_align
= MAX (max_align
, slotinfo
[cnt
].map
->l_tls_align
);
299 if (slotinfo
[cnt
].map
->l_tls_blocksize
<= freetop
- freebottom
)
301 off
= roundup (freebottom
, slotinfo
[cnt
].map
->l_tls_align
);
302 if (off
- freebottom
< firstbyte
)
303 off
+= slotinfo
[cnt
].map
->l_tls_align
;
304 if (off
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
<= freetop
)
306 slotinfo
[cnt
].map
->l_tls_offset
= off
- firstbyte
;
307 freebottom
= (off
+ slotinfo
[cnt
].map
->l_tls_blocksize
313 off
= roundup (offset
, slotinfo
[cnt
].map
->l_tls_align
);
314 if (off
- offset
< firstbyte
)
315 off
+= slotinfo
[cnt
].map
->l_tls_align
;
317 slotinfo
[cnt
].map
->l_tls_offset
= off
- firstbyte
;
318 if (off
- firstbyte
- offset
> freetop
- freebottom
)
321 freetop
= off
- firstbyte
;
324 offset
= off
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
;
327 GL(dl_tls_static_used
) = offset
;
328 GL(dl_tls_static_size
) = roundup (offset
+ GLRO(dl_tls_static_surplus
),
331 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
334 /* The alignment requirement for the static TLS block. */
335 GL(dl_tls_static_align
) = max_align
;
340 allocate_dtv (void *result
)
345 /* We allocate a few more elements in the dtv than are needed for the
346 initial set of modules. This should avoid in most cases expansions
348 dtv_length
= GL(dl_tls_max_dtv_idx
) + DTV_SURPLUS
;
349 dtv
= calloc (dtv_length
+ 2, sizeof (dtv_t
));
352 /* This is the initial length of the dtv. */
353 dtv
[0].counter
= dtv_length
;
355 /* The rest of the dtv (including the generation counter) is
356 Initialize with zero to indicate nothing there. */
358 /* Add the dtv to the thread data structures. */
359 INSTALL_DTV (result
, dtv
);
368 /* Get size and alignment requirements of the static TLS block. */
370 _dl_get_tls_static_info (size_t *sizep
, size_t *alignp
)
372 *sizep
= GL(dl_tls_static_size
);
373 *alignp
= GL(dl_tls_static_align
);
376 /* Derive the location of the pointer to the start of the original
377 allocation (before alignment) from the pointer to the TCB. */
378 static inline void **
379 tcb_to_pointer_to_free_location (void *tcb
)
382 /* The TCB follows the TLS blocks, and the pointer to the front
384 void **original_pointer_location
= tcb
+ TLS_TCB_SIZE
;
386 /* The TCB comes first, preceded by the pre-TCB, and the pointer is
388 void **original_pointer_location
= tcb
- TLS_PRE_TCB_SIZE
- sizeof (void *);
390 return original_pointer_location
;
394 _dl_allocate_tls_storage (void)
397 size_t size
= GL(dl_tls_static_size
);
401 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
402 ^ This should be returned. */
403 size
+= TLS_PRE_TCB_SIZE
;
406 /* Perform the allocation. Reserve space for the required alignment
407 and the pointer to the original allocation. */
408 size_t alignment
= GL(dl_tls_static_align
);
409 void *allocated
= malloc (size
+ alignment
+ sizeof (void *));
410 if (__glibc_unlikely (allocated
== NULL
))
413 /* Perform alignment and allocate the DTV. */
415 /* The TCB follows the TLS blocks, which determine the alignment.
416 (TCB alignment requirements have been taken into account when
417 calculating GL(dl_tls_static_align).) */
418 void *aligned
= (void *) roundup ((uintptr_t) allocated
, alignment
);
419 result
= aligned
+ size
- TLS_TCB_SIZE
;
421 /* Clear the TCB data structure. We can't ask the caller (i.e.
422 libpthread) to do it, because we will initialize the DTV et al. */
423 memset (result
, '\0', TLS_TCB_SIZE
);
425 /* Pre-TCB and TCB come before the TLS blocks. The layout computed
426 in _dl_determine_tlsoffset assumes that the TCB is aligned to the
427 TLS block alignment, and not just the TLS blocks after it. This
428 can leave an unused alignment gap between the TCB and the TLS
430 result
= (void *) roundup
431 (sizeof (void *) + TLS_PRE_TCB_SIZE
+ (uintptr_t) allocated
,
434 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before
435 it. We can't ask the caller (i.e. libpthread) to do it, because
436 we will initialize the DTV et al. */
437 memset (result
- TLS_PRE_TCB_SIZE
, '\0', TLS_PRE_TCB_SIZE
+ TLS_TCB_SIZE
);
440 /* Record the value of the original pointer for later
442 *tcb_to_pointer_to_free_location (result
) = allocated
;
444 result
= allocate_dtv (result
);
452 extern dtv_t _dl_static_dtv
[];
453 # define _dl_initial_dtv (&_dl_static_dtv[1])
457 _dl_resize_dtv (dtv_t
*dtv
)
459 /* Resize the dtv. */
461 /* Load GL(dl_tls_max_dtv_idx) atomically since it may be written to by
462 other threads concurrently. */
464 = atomic_load_acquire (&GL(dl_tls_max_dtv_idx
)) + DTV_SURPLUS
;
465 size_t oldsize
= dtv
[-1].counter
;
467 if (dtv
== GL(dl_initial_dtv
))
469 /* This is the initial dtv that was either statically allocated in
470 __libc_setup_tls or allocated during rtld startup using the
471 dl-minimal.c malloc instead of the real malloc. We can't free
472 it, we have to abandon the old storage. */
474 newp
= malloc ((2 + newsize
) * sizeof (dtv_t
));
477 memcpy (newp
, &dtv
[-1], (2 + oldsize
) * sizeof (dtv_t
));
481 newp
= realloc (&dtv
[-1],
482 (2 + newsize
) * sizeof (dtv_t
));
487 newp
[0].counter
= newsize
;
489 /* Clear the newly allocated part. */
490 memset (newp
+ 2 + oldsize
, '\0',
491 (newsize
- oldsize
) * sizeof (dtv_t
));
493 /* Return the generation counter. */
499 _dl_allocate_tls_init (void *result
)
502 /* The memory allocation failed. */
505 dtv_t
*dtv
= GET_DTV (result
);
506 struct dtv_slotinfo_list
*listp
;
510 /* Check if the current dtv is big enough. */
511 if (dtv
[-1].counter
< GL(dl_tls_max_dtv_idx
))
513 /* Resize the dtv. */
514 dtv
= _dl_resize_dtv (dtv
);
516 /* Install this new dtv in the thread data structures. */
517 INSTALL_DTV (result
, &dtv
[-1]);
520 /* We have to prepare the dtv for all currently loaded modules using
521 TLS. For those which are dynamically loaded we add the values
522 indicating deferred allocation. */
523 listp
= GL(dl_tls_dtv_slotinfo_list
);
528 for (cnt
= total
== 0 ? 1 : 0; cnt
< listp
->len
; ++cnt
)
530 struct link_map
*map
;
533 /* Check for the total number of used slots. */
534 if (total
+ cnt
> GL(dl_tls_max_dtv_idx
))
537 map
= listp
->slotinfo
[cnt
].map
;
542 /* Keep track of the maximum generation number. This might
543 not be the generation counter. */
544 assert (listp
->slotinfo
[cnt
].gen
<= GL(dl_tls_generation
));
545 maxgen
= MAX (maxgen
, listp
->slotinfo
[cnt
].gen
);
547 dtv
[map
->l_tls_modid
].pointer
.val
= TLS_DTV_UNALLOCATED
;
548 dtv
[map
->l_tls_modid
].pointer
.to_free
= NULL
;
550 if (map
->l_tls_offset
== NO_TLS_OFFSET
551 || map
->l_tls_offset
== FORCED_DYNAMIC_TLS_OFFSET
)
554 assert (map
->l_tls_modid
== total
+ cnt
);
555 assert (map
->l_tls_blocksize
>= map
->l_tls_initimage_size
);
557 assert ((size_t) map
->l_tls_offset
>= map
->l_tls_blocksize
);
558 dest
= (char *) result
- map
->l_tls_offset
;
560 dest
= (char *) result
+ map
->l_tls_offset
;
562 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
565 /* Set up the DTV entry. The simplified __tls_get_addr that
566 some platforms use in static programs requires it. */
567 dtv
[map
->l_tls_modid
].pointer
.val
= dest
;
569 /* Copy the initialization image and clear the BSS part. */
570 memset (__mempcpy (dest
, map
->l_tls_initimage
,
571 map
->l_tls_initimage_size
), '\0',
572 map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
576 if (total
>= GL(dl_tls_max_dtv_idx
))
580 assert (listp
!= NULL
);
583 /* The DTV version is up-to-date now. */
584 dtv
[0].counter
= maxgen
;
588 rtld_hidden_def (_dl_allocate_tls_init
)
591 _dl_allocate_tls (void *mem
)
593 return _dl_allocate_tls_init (mem
== NULL
594 ? _dl_allocate_tls_storage ()
595 : allocate_dtv (mem
));
597 rtld_hidden_def (_dl_allocate_tls
)
601 _dl_deallocate_tls (void *tcb
, bool dealloc_tcb
)
603 dtv_t
*dtv
= GET_DTV (tcb
);
605 /* We need to free the memory allocated for non-static TLS. */
606 for (size_t cnt
= 0; cnt
< dtv
[-1].counter
; ++cnt
)
607 free (dtv
[1 + cnt
].pointer
.to_free
);
609 /* The array starts with dtv[-1]. */
610 if (dtv
!= GL(dl_initial_dtv
))
614 free (*tcb_to_pointer_to_free_location (tcb
));
616 rtld_hidden_def (_dl_deallocate_tls
)
620 /* The __tls_get_addr function has two basic forms which differ in the
621 arguments. The IA-64 form takes two parameters, the module ID and
622 offset. The form used, among others, on IA-32 takes a reference to
623 a special structure which contain the same information. The second
624 form seems to be more often used (in the moment) so we default to
625 it. Users of the IA-64 form have to provide adequate definitions
626 of the following macros. */
627 # ifndef GET_ADDR_ARGS
628 # define GET_ADDR_ARGS tls_index *ti
629 # define GET_ADDR_PARAM ti
631 # ifndef GET_ADDR_MODULE
632 # define GET_ADDR_MODULE ti->ti_module
634 # ifndef GET_ADDR_OFFSET
635 # define GET_ADDR_OFFSET ti->ti_offset
638 /* Allocate one DTV entry. */
639 static struct dtv_pointer
640 allocate_dtv_entry (size_t alignment
, size_t size
)
642 if (powerof2 (alignment
) && alignment
<= _Alignof (max_align_t
))
644 /* The alignment is supported by malloc. */
645 void *ptr
= malloc (size
);
646 return (struct dtv_pointer
) { ptr
, ptr
};
649 /* Emulate memalign to by manually aligning a pointer returned by
650 malloc. First compute the size with an overflow check. */
651 size_t alloc_size
= size
+ alignment
;
652 if (alloc_size
< size
)
653 return (struct dtv_pointer
) {};
655 /* Perform the allocation. This is the pointer we need to free
657 void *start
= malloc (alloc_size
);
659 return (struct dtv_pointer
) {};
661 /* Find the aligned position within the larger allocation. */
662 void *aligned
= (void *) roundup ((uintptr_t) start
, alignment
);
664 return (struct dtv_pointer
) { .val
= aligned
, .to_free
= start
};
667 static struct dtv_pointer
668 allocate_and_init (struct link_map
*map
)
670 struct dtv_pointer result
= allocate_dtv_entry
671 (map
->l_tls_align
, map
->l_tls_blocksize
);
672 if (result
.val
== NULL
)
675 /* Initialize the memory. */
676 memset (__mempcpy (result
.val
, map
->l_tls_initimage
,
677 map
->l_tls_initimage_size
),
678 '\0', map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
685 _dl_update_slotinfo (unsigned long int req_modid
)
687 struct link_map
*the_map
= NULL
;
688 dtv_t
*dtv
= THREAD_DTV ();
690 /* The global dl_tls_dtv_slotinfo array contains for each module
691 index the generation counter current when the entry was created.
692 This array never shrinks so that all module indices which were
693 valid at some time can be used to access it. Before the first
694 use of a new module index in this function the array was extended
695 appropriately. Access also does not have to be guarded against
696 modifications of the array. It is assumed that pointer-size
697 values can be read atomically even in SMP environments. It is
698 possible that other threads at the same time dynamically load
699 code and therefore add to the slotinfo list. This is a problem
700 since we must not pick up any information about incomplete work.
701 The solution to this is to ignore all dtv slots which were
702 created after the one we are currently interested. We know that
703 dynamic loading for this module is completed and this is the last
704 load operation we know finished. */
705 unsigned long int idx
= req_modid
;
706 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
708 while (idx
>= listp
->len
)
714 if (dtv
[0].counter
< listp
->slotinfo
[idx
].gen
)
716 /* The generation counter for the slot is higher than what the
717 current dtv implements. We have to update the whole dtv but
718 only those entries with a generation counter <= the one for
719 the entry we need. */
720 size_t new_gen
= listp
->slotinfo
[idx
].gen
;
723 /* We have to look through the entire dtv slotinfo list. */
724 listp
= GL(dl_tls_dtv_slotinfo_list
);
727 for (size_t cnt
= total
== 0 ? 1 : 0; cnt
< listp
->len
; ++cnt
)
729 size_t gen
= listp
->slotinfo
[cnt
].gen
;
732 /* This is a slot for a generation younger than the
733 one we are handling now. It might be incompletely
734 set up so ignore it. */
737 /* If the entry is older than the current dtv layout we
738 know we don't have to handle it. */
739 if (gen
<= dtv
[0].counter
)
742 /* If there is no map this means the entry is empty. */
743 struct link_map
*map
= listp
->slotinfo
[cnt
].map
;
746 if (dtv
[-1].counter
>= total
+ cnt
)
748 /* If this modid was used at some point the memory
749 might still be allocated. */
750 free (dtv
[total
+ cnt
].pointer
.to_free
);
751 dtv
[total
+ cnt
].pointer
.val
= TLS_DTV_UNALLOCATED
;
752 dtv
[total
+ cnt
].pointer
.to_free
= NULL
;
758 /* Check whether the current dtv array is large enough. */
759 size_t modid
= map
->l_tls_modid
;
760 assert (total
+ cnt
== modid
);
761 if (dtv
[-1].counter
< modid
)
763 /* Resize the dtv. */
764 dtv
= _dl_resize_dtv (dtv
);
766 assert (modid
<= dtv
[-1].counter
);
768 /* Install this new dtv in the thread data
770 INSTALL_NEW_DTV (dtv
);
773 /* If there is currently memory allocate for this
774 dtv entry free it. */
775 /* XXX Ideally we will at some point create a memory
777 free (dtv
[modid
].pointer
.to_free
);
778 dtv
[modid
].pointer
.val
= TLS_DTV_UNALLOCATED
;
779 dtv
[modid
].pointer
.to_free
= NULL
;
781 if (modid
== req_modid
)
787 while ((listp
= listp
->next
) != NULL
);
789 /* This will be the new maximum generation counter. */
790 dtv
[0].counter
= new_gen
;
798 __attribute_noinline__
799 tls_get_addr_tail (GET_ADDR_ARGS
, dtv_t
*dtv
, struct link_map
*the_map
)
801 /* The allocation was deferred. Do it now. */
804 /* Find the link map for this module. */
805 size_t idx
= GET_ADDR_MODULE
;
806 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
808 while (idx
>= listp
->len
)
814 the_map
= listp
->slotinfo
[idx
].map
;
817 /* Make sure that, if a dlopen running in parallel forces the
818 variable into static storage, we'll wait until the address in the
819 static TLS block is set up, and use that. If we're undecided
820 yet, make sure we make the decision holding the lock as well. */
821 if (__glibc_unlikely (the_map
->l_tls_offset
822 != FORCED_DYNAMIC_TLS_OFFSET
))
824 __rtld_lock_lock_recursive (GL(dl_load_lock
));
825 if (__glibc_likely (the_map
->l_tls_offset
== NO_TLS_OFFSET
))
827 the_map
->l_tls_offset
= FORCED_DYNAMIC_TLS_OFFSET
;
828 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
830 else if (__glibc_likely (the_map
->l_tls_offset
831 != FORCED_DYNAMIC_TLS_OFFSET
))
834 void *p
= (char *) THREAD_SELF
- the_map
->l_tls_offset
;
836 void *p
= (char *) THREAD_SELF
+ the_map
->l_tls_offset
+ TLS_PRE_TCB_SIZE
;
838 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
840 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
842 dtv
[GET_ADDR_MODULE
].pointer
.to_free
= NULL
;
843 dtv
[GET_ADDR_MODULE
].pointer
.val
= p
;
845 return (char *) p
+ GET_ADDR_OFFSET
;
848 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
850 struct dtv_pointer result
= allocate_and_init (the_map
);
851 dtv
[GET_ADDR_MODULE
].pointer
= result
;
852 assert (result
.to_free
!= NULL
);
854 return (char *) result
.val
+ GET_ADDR_OFFSET
;
858 static struct link_map
*
859 __attribute_noinline__
860 update_get_addr (GET_ADDR_ARGS
)
862 struct link_map
*the_map
= _dl_update_slotinfo (GET_ADDR_MODULE
);
863 dtv_t
*dtv
= THREAD_DTV ();
865 void *p
= dtv
[GET_ADDR_MODULE
].pointer
.val
;
867 if (__glibc_unlikely (p
== TLS_DTV_UNALLOCATED
))
868 return tls_get_addr_tail (GET_ADDR_PARAM
, dtv
, the_map
);
870 return (void *) p
+ GET_ADDR_OFFSET
;
873 /* For all machines that have a non-macro version of __tls_get_addr, we
874 want to use rtld_hidden_proto/rtld_hidden_def in order to call the
875 internal alias for __tls_get_addr from ld.so. This avoids a PLT entry
876 in ld.so for __tls_get_addr. */
878 #ifndef __tls_get_addr
879 extern void * __tls_get_addr (GET_ADDR_ARGS
);
880 rtld_hidden_proto (__tls_get_addr
)
881 rtld_hidden_def (__tls_get_addr
)
884 /* The generic dynamic and local dynamic model cannot be used in
885 statically linked applications. */
887 __tls_get_addr (GET_ADDR_ARGS
)
889 dtv_t
*dtv
= THREAD_DTV ();
891 if (__glibc_unlikely (dtv
[0].counter
!= GL(dl_tls_generation
)))
892 return update_get_addr (GET_ADDR_PARAM
);
894 void *p
= dtv
[GET_ADDR_MODULE
].pointer
.val
;
896 if (__glibc_unlikely (p
== TLS_DTV_UNALLOCATED
))
897 return tls_get_addr_tail (GET_ADDR_PARAM
, dtv
, NULL
);
899 return (char *) p
+ GET_ADDR_OFFSET
;
904 /* Look up the module's TLS block as for __tls_get_addr,
905 but never touch anything. Return null if it's not allocated yet. */
907 _dl_tls_get_addr_soft (struct link_map
*l
)
909 if (__glibc_unlikely (l
->l_tls_modid
== 0))
910 /* This module has no TLS segment. */
913 dtv_t
*dtv
= THREAD_DTV ();
914 if (__glibc_unlikely (dtv
[0].counter
!= GL(dl_tls_generation
)))
916 /* This thread's DTV is not completely current,
917 but it might already cover this module. */
919 if (l
->l_tls_modid
>= dtv
[-1].counter
)
923 size_t idx
= l
->l_tls_modid
;
924 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
925 while (idx
>= listp
->len
)
931 /* We've reached the slot for this module.
932 If its generation counter is higher than the DTV's,
933 this thread does not know about this module yet. */
934 if (dtv
[0].counter
< listp
->slotinfo
[idx
].gen
)
938 void *data
= dtv
[l
->l_tls_modid
].pointer
.val
;
939 if (__glibc_unlikely (data
== TLS_DTV_UNALLOCATED
))
940 /* The DTV is current, but this thread has not yet needed
941 to allocate this module's segment. */
949 _dl_add_to_slotinfo (struct link_map
*l
, bool do_add
)
951 /* Now that we know the object is loaded successfully add
952 modules containing TLS data to the dtv info table. We
953 might have to increase its size. */
954 struct dtv_slotinfo_list
*listp
;
955 struct dtv_slotinfo_list
*prevp
;
956 size_t idx
= l
->l_tls_modid
;
958 /* Find the place in the dtv slotinfo list. */
959 listp
= GL(dl_tls_dtv_slotinfo_list
);
960 prevp
= NULL
; /* Needed to shut up gcc. */
963 /* Does it fit in the array of this list element? */
964 if (idx
< listp
->len
)
970 while (listp
!= NULL
);
974 /* When we come here it means we have to add a new element
975 to the slotinfo list. And the new module must be in
979 listp
= prevp
->next
= (struct dtv_slotinfo_list
*)
980 malloc (sizeof (struct dtv_slotinfo_list
)
981 + TLS_SLOTINFO_SURPLUS
* sizeof (struct dtv_slotinfo
));
984 /* We ran out of memory. We will simply fail this
985 call but don't undo anything we did so far. The
986 application will crash or be terminated anyway very
989 /* We have to do this since some entries in the dtv
990 slotinfo array might already point to this
992 ++GL(dl_tls_generation
);
994 _dl_signal_error (ENOMEM
, "dlopen", NULL
, N_("\
995 cannot create TLS data structures"));
998 listp
->len
= TLS_SLOTINFO_SURPLUS
;
1000 memset (listp
->slotinfo
, '\0',
1001 TLS_SLOTINFO_SURPLUS
* sizeof (struct dtv_slotinfo
));
1004 /* Add the information into the slotinfo data structure. */
1007 listp
->slotinfo
[idx
].map
= l
;
1008 listp
->slotinfo
[idx
].gen
= GL(dl_tls_generation
) + 1;