]> git.ipfire.org Git - thirdparty/glibc.git/blame - elf/dl-tls.c
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / elf / dl-tls.c
CommitLineData
b6ab06ce 1/* Thread-local storage handling in the ELF dynamic linker. Generic version.
688903eb 2 Copyright (C) 2002-2018 Free Software Foundation, Inc.
b6ab06ce
UD
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
b6ab06ce
UD
18
19#include <assert.h>
20#include <errno.h>
21#include <libintl.h>
22#include <signal.h>
23#include <stdlib.h>
24#include <unistd.h>
25#include <sys/param.h>
d8dd0080 26#include <atomic.h>
b6ab06ce
UD
27
28#include <tls.h>
11bf311e
UD
29#include <dl-tls.h>
30#include <ldsodefs.h>
b6ab06ce
UD
31
32/* Amount of excess space to allocate in the static TLS area
33 to allow dynamic loading of modules defining IE-model TLS data. */
11bf311e 34#define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
b6ab06ce 35
b6ab06ce
UD
36
37/* Out-of-memory handler. */
b6ab06ce
UD
38static void
39__attribute__ ((__noreturn__))
40oom (void)
41{
42 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
43}
b6ab06ce
UD
44
45
46size_t
b6ab06ce
UD
47_dl_next_tls_modid (void)
48{
49 size_t result;
50
51 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
52 {
53 size_t disp = 0;
54 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
55
56 /* Note that this branch will never be executed during program
57 start since there are no gaps at that time. Therefore it
58 does not matter that the dl_tls_dtv_slotinfo is not allocated
59 yet when the function is called for the first times.
60
61 NB: the offset +1 is due to the fact that DTV[0] is used
62 for something else. */
63 result = GL(dl_tls_static_nelem) + 1;
64 if (result <= GL(dl_tls_max_dtv_idx))
65 do
66 {
67 while (result - disp < runp->len)
68 {
69 if (runp->slotinfo[result - disp].map == NULL)
70 break;
71
72 ++result;
73 assert (result <= GL(dl_tls_max_dtv_idx) + 1);
74 }
75
76 if (result - disp < runp->len)
77 break;
78
79 disp += runp->len;
80 }
81 while ((runp = runp->next) != NULL);
82
83 if (result > GL(dl_tls_max_dtv_idx))
84 {
85 /* The new index must indeed be exactly one higher than the
86 previous high. */
87 assert (result == GL(dl_tls_max_dtv_idx) + 1);
88 /* There is no gap anymore. */
89 GL(dl_tls_dtv_gaps) = false;
90
91 goto nogaps;
92 }
93 }
94 else
95 {
96 /* No gaps, allocate a new entry. */
97 nogaps:
98
99 result = ++GL(dl_tls_max_dtv_idx);
100 }
101
102 return result;
103}
104
105
d0503676 106size_t
d0503676
CD
107_dl_count_modids (void)
108{
109 /* It is rare that we have gaps; see elf/dl-open.c (_dl_open) where
110 we fail to load a module and unload it leaving a gap. If we don't
111 have gaps then the number of modids is the current maximum so
112 return that. */
113 if (__glibc_likely (!GL(dl_tls_dtv_gaps)))
114 return GL(dl_tls_max_dtv_idx);
115
116 /* We have gaps and are forced to count the non-NULL entries. */
117 size_t n = 0;
118 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
119 while (runp != NULL)
120 {
121 for (size_t i = 0; i < runp->len; ++i)
122 if (runp->slotinfo[i].map != NULL)
123 ++n;
124
125 runp = runp->next;
126 }
127
128 return n;
129}
130
131
11bf311e 132#ifdef SHARED
b6ab06ce 133void
b6ab06ce
UD
134_dl_determine_tlsoffset (void)
135{
136 size_t max_align = TLS_TCB_ALIGN;
137 size_t freetop = 0;
138 size_t freebottom = 0;
139
140 /* The first element of the dtv slot info list is allocated. */
141 assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
142 /* There is at this point only one element in the
143 dl_tls_dtv_slotinfo_list list. */
144 assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
145
146 struct dtv_slotinfo *slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
147
148 /* Determining the offset of the various parts of the static TLS
149 block has several dependencies. In addition we have to work
150 around bugs in some toolchains.
151
152 Each TLS block from the objects available at link time has a size
153 and an alignment requirement. The GNU ld computes the alignment
154 requirements for the data at the positions *in the file*, though.
155 I.e, it is not simply possible to allocate a block with the size
156 of the TLS program header entry. The data is layed out assuming
157 that the first byte of the TLS block fulfills
158
159 p_vaddr mod p_align == &TLS_BLOCK mod p_align
160
161 This means we have to add artificial padding at the beginning of
162 the TLS block. These bytes are never used for the TLS data in
163 this module but the first byte allocated must be aligned
164 according to mod p_align == 0 so that the first byte of the TLS
165 block is aligned according to p_vaddr mod p_align. This is ugly
166 and the linker can help by computing the offsets in the TLS block
167 assuming the first byte of the TLS block is aligned according to
168 p_align.
169
170 The extra space which might be allocated before the first byte of
171 the TLS block need not go unused. The code below tries to use
172 that memory for the next TLS block. This can work if the total
173 memory requirement for the next TLS block is smaller than the
174 gap. */
175
11bf311e 176#if TLS_TCB_AT_TP
b6ab06ce
UD
177 /* We simply start with zero. */
178 size_t offset = 0;
179
180 for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
181 {
182 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
183
184 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
185 & (slotinfo[cnt].map->l_tls_align - 1));
186 size_t off;
187 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
188
189 if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
190 {
191 off = roundup (freetop + slotinfo[cnt].map->l_tls_blocksize
192 - firstbyte, slotinfo[cnt].map->l_tls_align)
193 + firstbyte;
194 if (off <= freebottom)
195 {
196 freetop = off;
197
198 /* XXX For some architectures we perhaps should store the
199 negative offset. */
200 slotinfo[cnt].map->l_tls_offset = off;
201 continue;
202 }
203 }
204
205 off = roundup (offset + slotinfo[cnt].map->l_tls_blocksize - firstbyte,
206 slotinfo[cnt].map->l_tls_align) + firstbyte;
207 if (off > offset + slotinfo[cnt].map->l_tls_blocksize
208 + (freebottom - freetop))
209 {
210 freetop = offset;
211 freebottom = off - slotinfo[cnt].map->l_tls_blocksize;
212 }
213 offset = off;
214
215 /* XXX For some architectures we perhaps should store the
216 negative offset. */
217 slotinfo[cnt].map->l_tls_offset = off;
218 }
219
220 GL(dl_tls_static_used) = offset;
221 GL(dl_tls_static_size) = (roundup (offset + TLS_STATIC_SURPLUS, max_align)
222 + TLS_TCB_SIZE);
11bf311e 223#elif TLS_DTV_AT_TP
b6ab06ce
UD
224 /* The TLS blocks start right after the TCB. */
225 size_t offset = TLS_TCB_SIZE;
226
227 for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
228 {
229 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
230
231 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
232 & (slotinfo[cnt].map->l_tls_align - 1));
233 size_t off;
234 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
235
236 if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
237 {
238 off = roundup (freebottom, slotinfo[cnt].map->l_tls_align);
239 if (off - freebottom < firstbyte)
240 off += slotinfo[cnt].map->l_tls_align;
241 if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
242 {
243 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
244 freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
245 - firstbyte);
246 continue;
247 }
248 }
249
250 off = roundup (offset, slotinfo[cnt].map->l_tls_align);
251 if (off - offset < firstbyte)
252 off += slotinfo[cnt].map->l_tls_align;
253
254 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
255 if (off - firstbyte - offset > freetop - freebottom)
256 {
257 freebottom = offset;
258 freetop = off - firstbyte;
259 }
260
261 offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
262 }
263
264 GL(dl_tls_static_used) = offset;
265 GL(dl_tls_static_size) = roundup (offset + TLS_STATIC_SURPLUS,
266 TLS_TCB_ALIGN);
11bf311e
UD
267#else
268# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
269#endif
b6ab06ce
UD
270
271 /* The alignment requirement for the static TLS block. */
272 GL(dl_tls_static_align) = max_align;
273}
d08ab9ce 274#endif /* SHARED */
b6ab06ce
UD
275
276static void *
b6ab06ce
UD
277allocate_dtv (void *result)
278{
279 dtv_t *dtv;
280 size_t dtv_length;
281
282 /* We allocate a few more elements in the dtv than are needed for the
283 initial set of modules. This should avoid in most cases expansions
284 of the dtv. */
285 dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
dd654bf9 286 dtv = calloc (dtv_length + 2, sizeof (dtv_t));
b6ab06ce
UD
287 if (dtv != NULL)
288 {
289 /* This is the initial length of the dtv. */
290 dtv[0].counter = dtv_length;
291
292 /* The rest of the dtv (including the generation counter) is
293 Initialize with zero to indicate nothing there. */
294
295 /* Add the dtv to the thread data structures. */
296 INSTALL_DTV (result, dtv);
297 }
298 else
299 result = NULL;
300
301 return result;
302}
303
304
305/* Get size and alignment requirements of the static TLS block. */
306void
b6ab06ce
UD
307_dl_get_tls_static_info (size_t *sizep, size_t *alignp)
308{
309 *sizep = GL(dl_tls_static_size);
310 *alignp = GL(dl_tls_static_align);
311}
312
6c444ad6
FW
313/* Derive the location of the pointer to the start of the original
314 allocation (before alignment) from the pointer to the TCB. */
315static inline void **
316tcb_to_pointer_to_free_location (void *tcb)
317{
318#if TLS_TCB_AT_TP
319 /* The TCB follows the TLS blocks, and the pointer to the front
320 follows the TCB. */
321 void **original_pointer_location = tcb + TLS_TCB_SIZE;
322#elif TLS_DTV_AT_TP
323 /* The TCB comes first, preceded by the pre-TCB, and the pointer is
324 before that. */
325 void **original_pointer_location = tcb - TLS_PRE_TCB_SIZE - sizeof (void *);
326#endif
327 return original_pointer_location;
328}
b6ab06ce
UD
329
330void *
b6ab06ce
UD
331_dl_allocate_tls_storage (void)
332{
333 void *result;
334 size_t size = GL(dl_tls_static_size);
335
11bf311e 336#if TLS_DTV_AT_TP
b6ab06ce
UD
337 /* Memory layout is:
338 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
339 ^ This should be returned. */
6c444ad6 340 size += TLS_PRE_TCB_SIZE;
11bf311e 341#endif
b6ab06ce 342
6c444ad6
FW
343 /* Perform the allocation. Reserve space for the required alignment
344 and the pointer to the original allocation. */
345 size_t alignment = GL(dl_tls_static_align);
346 void *allocated = malloc (size + alignment + sizeof (void *));
347 if (__glibc_unlikely (allocated == NULL))
348 return NULL;
b6ab06ce 349
6c444ad6 350 /* Perform alignment and allocate the DTV. */
11bf311e 351#if TLS_TCB_AT_TP
6c444ad6
FW
352 /* The TCB follows the TLS blocks, which determine the alignment.
353 (TCB alignment requirements have been taken into account when
354 calculating GL(dl_tls_static_align).) */
355 void *aligned = (void *) roundup ((uintptr_t) allocated, alignment);
356 result = aligned + size - TLS_TCB_SIZE;
357
358 /* Clear the TCB data structure. We can't ask the caller (i.e.
359 libpthread) to do it, because we will initialize the DTV et al. */
360 memset (result, '\0', TLS_TCB_SIZE);
11bf311e 361#elif TLS_DTV_AT_TP
6c444ad6
FW
362 /* Pre-TCB and TCB come before the TLS blocks. The layout computed
363 in _dl_determine_tlsoffset assumes that the TCB is aligned to the
364 TLS block alignment, and not just the TLS blocks after it. This
365 can leave an unused alignment gap between the TCB and the TLS
366 blocks. */
367 result = (void *) roundup
368 (sizeof (void *) + TLS_PRE_TCB_SIZE + (uintptr_t) allocated,
369 alignment);
370
371 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before
372 it. We can't ask the caller (i.e. libpthread) to do it, because
373 we will initialize the DTV et al. */
374 memset (result - TLS_PRE_TCB_SIZE, '\0', TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
11bf311e 375#endif
b6ab06ce 376
6c444ad6
FW
377 /* Record the value of the original pointer for later
378 deallocation. */
379 *tcb_to_pointer_to_free_location (result) = allocated;
b6ab06ce 380
6c444ad6
FW
381 result = allocate_dtv (result);
382 if (result == NULL)
383 free (allocated);
b6ab06ce
UD
384 return result;
385}
386
387
d8dd0080
L
388#ifndef SHARED
389extern dtv_t _dl_static_dtv[];
390# define _dl_initial_dtv (&_dl_static_dtv[1])
391#endif
392
393static dtv_t *
394_dl_resize_dtv (dtv_t *dtv)
395{
396 /* Resize the dtv. */
397 dtv_t *newp;
398 /* Load GL(dl_tls_max_dtv_idx) atomically since it may be written to by
399 other threads concurrently. */
400 size_t newsize
401 = atomic_load_acquire (&GL(dl_tls_max_dtv_idx)) + DTV_SURPLUS;
402 size_t oldsize = dtv[-1].counter;
403
404 if (dtv == GL(dl_initial_dtv))
405 {
406 /* This is the initial dtv that was either statically allocated in
407 __libc_setup_tls or allocated during rtld startup using the
408 dl-minimal.c malloc instead of the real malloc. We can't free
409 it, we have to abandon the old storage. */
410
411 newp = malloc ((2 + newsize) * sizeof (dtv_t));
412 if (newp == NULL)
413 oom ();
414 memcpy (newp, &dtv[-1], (2 + oldsize) * sizeof (dtv_t));
415 }
416 else
417 {
418 newp = realloc (&dtv[-1],
419 (2 + newsize) * sizeof (dtv_t));
420 if (newp == NULL)
421 oom ();
422 }
423
424 newp[0].counter = newsize;
425
426 /* Clear the newly allocated part. */
427 memset (newp + 2 + oldsize, '\0',
428 (newsize - oldsize) * sizeof (dtv_t));
429
430 /* Return the generation counter. */
431 return &newp[1];
432}
433
434
b6ab06ce 435void *
b6ab06ce
UD
436_dl_allocate_tls_init (void *result)
437{
438 if (result == NULL)
439 /* The memory allocation failed. */
440 return NULL;
441
442 dtv_t *dtv = GET_DTV (result);
443 struct dtv_slotinfo_list *listp;
444 size_t total = 0;
445 size_t maxgen = 0;
446
d8dd0080
L
447 /* Check if the current dtv is big enough. */
448 if (dtv[-1].counter < GL(dl_tls_max_dtv_idx))
449 {
450 /* Resize the dtv. */
451 dtv = _dl_resize_dtv (dtv);
452
453 /* Install this new dtv in the thread data structures. */
454 INSTALL_DTV (result, &dtv[-1]);
455 }
456
b6ab06ce
UD
457 /* We have to prepare the dtv for all currently loaded modules using
458 TLS. For those which are dynamically loaded we add the values
459 indicating deferred allocation. */
460 listp = GL(dl_tls_dtv_slotinfo_list);
461 while (1)
462 {
463 size_t cnt;
464
465 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
466 {
467 struct link_map *map;
468 void *dest;
469
470 /* Check for the total number of used slots. */
471 if (total + cnt > GL(dl_tls_max_dtv_idx))
472 break;
473
474 map = listp->slotinfo[cnt].map;
475 if (map == NULL)
476 /* Unused entry. */
477 continue;
478
479 /* Keep track of the maximum generation number. This might
480 not be the generation counter. */
d0503676 481 assert (listp->slotinfo[cnt].gen <= GL(dl_tls_generation));
b6ab06ce
UD
482 maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
483
f8aeae34 484 dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
a2ff21f8 485 dtv[map->l_tls_modid].pointer.to_free = NULL;
f8aeae34 486
4c533566
UD
487 if (map->l_tls_offset == NO_TLS_OFFSET
488 || map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
f8aeae34 489 continue;
b6ab06ce 490
f8aeae34 491 assert (map->l_tls_modid == total + cnt);
b6ab06ce 492 assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
11bf311e 493#if TLS_TCB_AT_TP
b6ab06ce
UD
494 assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
495 dest = (char *) result - map->l_tls_offset;
11bf311e 496#elif TLS_DTV_AT_TP
b6ab06ce 497 dest = (char *) result + map->l_tls_offset;
11bf311e
UD
498#else
499# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
500#endif
b6ab06ce 501
17af5da9
AO
502 /* Set up the DTV entry. The simplified __tls_get_addr that
503 some platforms use in static programs requires it. */
504 dtv[map->l_tls_modid].pointer.val = dest;
505
b6ab06ce 506 /* Copy the initialization image and clear the BSS part. */
b6ab06ce
UD
507 memset (__mempcpy (dest, map->l_tls_initimage,
508 map->l_tls_initimage_size), '\0',
509 map->l_tls_blocksize - map->l_tls_initimage_size);
510 }
511
512 total += cnt;
513 if (total >= GL(dl_tls_max_dtv_idx))
514 break;
515
516 listp = listp->next;
517 assert (listp != NULL);
518 }
519
520 /* The DTV version is up-to-date now. */
521 dtv[0].counter = maxgen;
522
523 return result;
524}
525rtld_hidden_def (_dl_allocate_tls_init)
526
527void *
b6ab06ce
UD
528_dl_allocate_tls (void *mem)
529{
530 return _dl_allocate_tls_init (mem == NULL
531 ? _dl_allocate_tls_storage ()
532 : allocate_dtv (mem));
533}
534rtld_hidden_def (_dl_allocate_tls)
535
536
537void
b6ab06ce
UD
538_dl_deallocate_tls (void *tcb, bool dealloc_tcb)
539{
540 dtv_t *dtv = GET_DTV (tcb);
541
542 /* We need to free the memory allocated for non-static TLS. */
543 for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt)
a2ff21f8 544 free (dtv[1 + cnt].pointer.to_free);
b6ab06ce
UD
545
546 /* The array starts with dtv[-1]. */
04570aaa 547 if (dtv != GL(dl_initial_dtv))
dd654bf9 548 free (dtv - 1);
b6ab06ce
UD
549
550 if (dealloc_tcb)
6c444ad6 551 free (*tcb_to_pointer_to_free_location (tcb));
b6ab06ce
UD
552}
553rtld_hidden_def (_dl_deallocate_tls)
554
555
11bf311e 556#ifdef SHARED
b6ab06ce
UD
557/* The __tls_get_addr function has two basic forms which differ in the
558 arguments. The IA-64 form takes two parameters, the module ID and
559 offset. The form used, among others, on IA-32 takes a reference to
560 a special structure which contain the same information. The second
561 form seems to be more often used (in the moment) so we default to
562 it. Users of the IA-64 form have to provide adequate definitions
563 of the following macros. */
11bf311e
UD
564# ifndef GET_ADDR_ARGS
565# define GET_ADDR_ARGS tls_index *ti
27a25b6e 566# define GET_ADDR_PARAM ti
11bf311e
UD
567# endif
568# ifndef GET_ADDR_MODULE
569# define GET_ADDR_MODULE ti->ti_module
570# endif
571# ifndef GET_ADDR_OFFSET
572# define GET_ADDR_OFFSET ti->ti_offset
573# endif
b6ab06ce 574
a2ff21f8
FW
575/* Allocate one DTV entry. */
576static struct dtv_pointer
577allocate_dtv_entry (size_t alignment, size_t size)
578{
579 if (powerof2 (alignment) && alignment <= _Alignof (max_align_t))
580 {
581 /* The alignment is supported by malloc. */
582 void *ptr = malloc (size);
583 return (struct dtv_pointer) { ptr, ptr };
584 }
b6ab06ce 585
a2ff21f8
FW
586 /* Emulate memalign to by manually aligning a pointer returned by
587 malloc. First compute the size with an overflow check. */
588 size_t alloc_size = size + alignment;
589 if (alloc_size < size)
590 return (struct dtv_pointer) {};
591
592 /* Perform the allocation. This is the pointer we need to free
593 later. */
594 void *start = malloc (alloc_size);
595 if (start == NULL)
596 return (struct dtv_pointer) {};
597
598 /* Find the aligned position within the larger allocation. */
599 void *aligned = (void *) roundup ((uintptr_t) start, alignment);
600
601 return (struct dtv_pointer) { .val = aligned, .to_free = start };
602}
603
604static struct dtv_pointer
73d61e4f 605allocate_and_init (struct link_map *map)
b6ab06ce 606{
a2ff21f8
FW
607 struct dtv_pointer result = allocate_dtv_entry
608 (map->l_tls_align, map->l_tls_blocksize);
609 if (result.val == NULL)
b6ab06ce
UD
610 oom ();
611
73d61e4f 612 /* Initialize the memory. */
a2ff21f8
FW
613 memset (__mempcpy (result.val, map->l_tls_initimage,
614 map->l_tls_initimage_size),
b6ab06ce
UD
615 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
616
a2ff21f8 617 return result;
b6ab06ce
UD
618}
619
620
621struct link_map *
622_dl_update_slotinfo (unsigned long int req_modid)
623{
624 struct link_map *the_map = NULL;
625 dtv_t *dtv = THREAD_DTV ();
626
627 /* The global dl_tls_dtv_slotinfo array contains for each module
628 index the generation counter current when the entry was created.
629 This array never shrinks so that all module indices which were
630 valid at some time can be used to access it. Before the first
631 use of a new module index in this function the array was extended
632 appropriately. Access also does not have to be guarded against
633 modifications of the array. It is assumed that pointer-size
634 values can be read atomically even in SMP environments. It is
635 possible that other threads at the same time dynamically load
636 code and therefore add to the slotinfo list. This is a problem
637 since we must not pick up any information about incomplete work.
638 The solution to this is to ignore all dtv slots which were
639 created after the one we are currently interested. We know that
640 dynamic loading for this module is completed and this is the last
641 load operation we know finished. */
642 unsigned long int idx = req_modid;
643 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
644
645 while (idx >= listp->len)
646 {
647 idx -= listp->len;
648 listp = listp->next;
649 }
650
651 if (dtv[0].counter < listp->slotinfo[idx].gen)
652 {
653 /* The generation counter for the slot is higher than what the
654 current dtv implements. We have to update the whole dtv but
655 only those entries with a generation counter <= the one for
656 the entry we need. */
657 size_t new_gen = listp->slotinfo[idx].gen;
658 size_t total = 0;
73d61e4f 659
b6ab06ce
UD
660 /* We have to look through the entire dtv slotinfo list. */
661 listp = GL(dl_tls_dtv_slotinfo_list);
662 do
663 {
664 for (size_t cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
665 {
666 size_t gen = listp->slotinfo[cnt].gen;
667
668 if (gen > new_gen)
669 /* This is a slot for a generation younger than the
670 one we are handling now. It might be incompletely
671 set up so ignore it. */
672 continue;
673
674 /* If the entry is older than the current dtv layout we
675 know we don't have to handle it. */
676 if (gen <= dtv[0].counter)
677 continue;
678
679 /* If there is no map this means the entry is empty. */
680 struct link_map *map = listp->slotinfo[cnt].map;
681 if (map == NULL)
682 {
f8aeae34 683 if (dtv[-1].counter >= total + cnt)
b6ab06ce 684 {
f8aeae34
AO
685 /* If this modid was used at some point the memory
686 might still be allocated. */
a2ff21f8 687 free (dtv[total + cnt].pointer.to_free);
dd654bf9 688 dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
a2ff21f8 689 dtv[total + cnt].pointer.to_free = NULL;
b6ab06ce
UD
690 }
691
692 continue;
693 }
694
695 /* Check whether the current dtv array is large enough. */
dd654bf9
AM
696 size_t modid = map->l_tls_modid;
697 assert (total + cnt == modid);
b6ab06ce
UD
698 if (dtv[-1].counter < modid)
699 {
d8dd0080
L
700 /* Resize the dtv. */
701 dtv = _dl_resize_dtv (dtv);
b6ab06ce 702
d8dd0080 703 assert (modid <= dtv[-1].counter);
b6ab06ce
UD
704
705 /* Install this new dtv in the thread data
706 structures. */
707 INSTALL_NEW_DTV (dtv);
708 }
709
710 /* If there is currently memory allocate for this
711 dtv entry free it. */
712 /* XXX Ideally we will at some point create a memory
713 pool. */
a2ff21f8 714 free (dtv[modid].pointer.to_free);
b6ab06ce 715 dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
a2ff21f8 716 dtv[modid].pointer.to_free = NULL;
b6ab06ce
UD
717
718 if (modid == req_modid)
719 the_map = map;
720 }
721
722 total += listp->len;
723 }
724 while ((listp = listp->next) != NULL);
725
726 /* This will be the new maximum generation counter. */
727 dtv[0].counter = new_gen;
728 }
729
730 return the_map;
731}
732
733
a3636e8b
UD
734static void *
735__attribute_noinline__
27a25b6e 736tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map)
a3636e8b
UD
737{
738 /* The allocation was deferred. Do it now. */
739 if (the_map == NULL)
740 {
741 /* Find the link map for this module. */
27a25b6e 742 size_t idx = GET_ADDR_MODULE;
a3636e8b
UD
743 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
744
745 while (idx >= listp->len)
746 {
747 idx -= listp->len;
748 listp = listp->next;
749 }
750
751 the_map = listp->slotinfo[idx].map;
752 }
73d61e4f 753
73d61e4f
AM
754 /* Make sure that, if a dlopen running in parallel forces the
755 variable into static storage, we'll wait until the address in the
756 static TLS block is set up, and use that. If we're undecided
757 yet, make sure we make the decision holding the lock as well. */
f8aeae34
AO
758 if (__glibc_unlikely (the_map->l_tls_offset
759 != FORCED_DYNAMIC_TLS_OFFSET))
7f507ee1 760 {
73d61e4f 761 __rtld_lock_lock_recursive (GL(dl_load_lock));
a1ffb40e 762 if (__glibc_likely (the_map->l_tls_offset == NO_TLS_OFFSET))
a3636e8b 763 {
73d61e4f
AM
764 the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
765 __rtld_lock_unlock_recursive (GL(dl_load_lock));
766 }
f8aeae34
AO
767 else if (__glibc_likely (the_map->l_tls_offset
768 != FORCED_DYNAMIC_TLS_OFFSET))
73d61e4f 769 {
f8aeae34
AO
770#if TLS_TCB_AT_TP
771 void *p = (char *) THREAD_SELF - the_map->l_tls_offset;
772#elif TLS_DTV_AT_TP
773 void *p = (char *) THREAD_SELF + the_map->l_tls_offset + TLS_PRE_TCB_SIZE;
774#else
775# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
776#endif
73d61e4f 777 __rtld_lock_unlock_recursive (GL(dl_load_lock));
73d61e4f 778
a2ff21f8 779 dtv[GET_ADDR_MODULE].pointer.to_free = NULL;
f8aeae34
AO
780 dtv[GET_ADDR_MODULE].pointer.val = p;
781
782 return (char *) p + GET_ADDR_OFFSET;
a3636e8b 783 }
f8aeae34
AO
784 else
785 __rtld_lock_unlock_recursive (GL(dl_load_lock));
a3636e8b 786 }
a2ff21f8
FW
787 struct dtv_pointer result = allocate_and_init (the_map);
788 dtv[GET_ADDR_MODULE].pointer = result;
789 assert (result.to_free != NULL);
a3636e8b 790
a2ff21f8 791 return (char *) result.val + GET_ADDR_OFFSET;
27a25b6e
UD
792}
793
794
795static struct link_map *
796__attribute_noinline__
797update_get_addr (GET_ADDR_ARGS)
798{
799 struct link_map *the_map = _dl_update_slotinfo (GET_ADDR_MODULE);
800 dtv_t *dtv = THREAD_DTV ();
801
802 void *p = dtv[GET_ADDR_MODULE].pointer.val;
803
a1ffb40e 804 if (__glibc_unlikely (p == TLS_DTV_UNALLOCATED))
27a25b6e
UD
805 return tls_get_addr_tail (GET_ADDR_PARAM, dtv, the_map);
806
57b957eb 807 return (void *) p + GET_ADDR_OFFSET;
a3636e8b
UD
808}
809
050f7298
L
810/* For all machines that have a non-macro version of __tls_get_addr, we
811 want to use rtld_hidden_proto/rtld_hidden_def in order to call the
812 internal alias for __tls_get_addr from ld.so. This avoids a PLT entry
813 in ld.so for __tls_get_addr. */
814
815#ifndef __tls_get_addr
816extern void * __tls_get_addr (GET_ADDR_ARGS);
817rtld_hidden_proto (__tls_get_addr)
818rtld_hidden_def (__tls_get_addr)
819#endif
a3636e8b 820
b6ab06ce
UD
821/* The generic dynamic and local dynamic model cannot be used in
822 statically linked applications. */
823void *
824__tls_get_addr (GET_ADDR_ARGS)
825{
826 dtv_t *dtv = THREAD_DTV ();
b6ab06ce 827
a1ffb40e 828 if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation)))
27a25b6e 829 return update_get_addr (GET_ADDR_PARAM);
b6ab06ce 830
27a25b6e 831 void *p = dtv[GET_ADDR_MODULE].pointer.val;
b6ab06ce 832
a1ffb40e 833 if (__glibc_unlikely (p == TLS_DTV_UNALLOCATED))
27a25b6e 834 return tls_get_addr_tail (GET_ADDR_PARAM, dtv, NULL);
b6ab06ce
UD
835
836 return (char *) p + GET_ADDR_OFFSET;
837}
11bf311e 838#endif
b6ab06ce
UD
839
840
d78efd9f
RM
841/* Look up the module's TLS block as for __tls_get_addr,
842 but never touch anything. Return null if it's not allocated yet. */
843void *
d78efd9f
RM
844_dl_tls_get_addr_soft (struct link_map *l)
845{
a1ffb40e 846 if (__glibc_unlikely (l->l_tls_modid == 0))
d78efd9f
RM
847 /* This module has no TLS segment. */
848 return NULL;
849
850 dtv_t *dtv = THREAD_DTV ();
a1ffb40e 851 if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation)))
d78efd9f
RM
852 {
853 /* This thread's DTV is not completely current,
854 but it might already cover this module. */
855
856 if (l->l_tls_modid >= dtv[-1].counter)
857 /* Nope. */
858 return NULL;
859
860 size_t idx = l->l_tls_modid;
861 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
862 while (idx >= listp->len)
863 {
864 idx -= listp->len;
865 listp = listp->next;
866 }
867
868 /* We've reached the slot for this module.
869 If its generation counter is higher than the DTV's,
870 this thread does not know about this module yet. */
871 if (dtv[0].counter < listp->slotinfo[idx].gen)
872 return NULL;
873 }
874
875 void *data = dtv[l->l_tls_modid].pointer.val;
a1ffb40e 876 if (__glibc_unlikely (data == TLS_DTV_UNALLOCATED))
d78efd9f
RM
877 /* The DTV is current, but this thread has not yet needed
878 to allocate this module's segment. */
879 data = NULL;
880
881 return data;
882}
883
b6ab06ce
UD
884
885void
d78efd9f 886_dl_add_to_slotinfo (struct link_map *l)
b6ab06ce
UD
887{
888 /* Now that we know the object is loaded successfully add
889 modules containing TLS data to the dtv info table. We
890 might have to increase its size. */
891 struct dtv_slotinfo_list *listp;
892 struct dtv_slotinfo_list *prevp;
893 size_t idx = l->l_tls_modid;
894
895 /* Find the place in the dtv slotinfo list. */
896 listp = GL(dl_tls_dtv_slotinfo_list);
897 prevp = NULL; /* Needed to shut up gcc. */
898 do
899 {
900 /* Does it fit in the array of this list element? */
901 if (idx < listp->len)
902 break;
903 idx -= listp->len;
904 prevp = listp;
905 listp = listp->next;
906 }
907 while (listp != NULL);
908
909 if (listp == NULL)
910 {
911 /* When we come here it means we have to add a new element
912 to the slotinfo list. And the new module must be in
913 the first slot. */
914 assert (idx == 0);
915
916 listp = prevp->next = (struct dtv_slotinfo_list *)
917 malloc (sizeof (struct dtv_slotinfo_list)
918 + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
919 if (listp == NULL)
920 {
921 /* We ran out of memory. We will simply fail this
922 call but don't undo anything we did so far. The
923 application will crash or be terminated anyway very
924 soon. */
925
926 /* We have to do this since some entries in the dtv
927 slotinfo array might already point to this
928 generation. */
929 ++GL(dl_tls_generation);
930
931 _dl_signal_error (ENOMEM, "dlopen", NULL, N_("\
932cannot create TLS data structures"));
933 }
934
935 listp->len = TLS_SLOTINFO_SURPLUS;
936 listp->next = NULL;
937 memset (listp->slotinfo, '\0',
938 TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
939 }
940
941 /* Add the information into the slotinfo data structure. */
942 listp->slotinfo[idx].map = l;
943 listp->slotinfo[idx].gen = GL(dl_tls_generation) + 1;
944}