]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-tls.c
[BZ #2510, BZ #2830, BZ #3137, BZ #3313, BZ #3426, BZ #3465, BZ #3480, BZ #3483,...
[thirdparty/glibc.git] / elf / dl-tls.c
1 /* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002,2003,2004,2005,2006 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20 #include <assert.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <signal.h>
24 #include <stdlib.h>
25 #include <unistd.h>
26 #include <sys/param.h>
27
28 #include <tls.h>
29 #include <dl-tls.h>
30 #include <ldsodefs.h>
31
32 /* Amount of excess space to allocate in the static TLS area
33 to allow dynamic loading of modules defining IE-model TLS data. */
34 #define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
35
36 /* Value used for dtv entries for which the allocation is delayed. */
37 #define TLS_DTV_UNALLOCATED ((void *) -1l)
38
39
40 /* Out-of-memory handler. */
41 #ifdef SHARED
42 static void
43 __attribute__ ((__noreturn__))
44 oom (void)
45 {
46 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
47 }
48 #endif
49
50
51 size_t
52 internal_function
53 _dl_next_tls_modid (void)
54 {
55 size_t result;
56
57 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
58 {
59 size_t disp = 0;
60 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
61
62 /* Note that this branch will never be executed during program
63 start since there are no gaps at that time. Therefore it
64 does not matter that the dl_tls_dtv_slotinfo is not allocated
65 yet when the function is called for the first times.
66
67 NB: the offset +1 is due to the fact that DTV[0] is used
68 for something else. */
69 result = GL(dl_tls_static_nelem) + 1;
70 if (result <= GL(dl_tls_max_dtv_idx))
71 do
72 {
73 while (result - disp < runp->len)
74 {
75 if (runp->slotinfo[result - disp].map == NULL)
76 break;
77
78 ++result;
79 assert (result <= GL(dl_tls_max_dtv_idx) + 1);
80 }
81
82 if (result - disp < runp->len)
83 break;
84
85 disp += runp->len;
86 }
87 while ((runp = runp->next) != NULL);
88
89 if (result > GL(dl_tls_max_dtv_idx))
90 {
91 /* The new index must indeed be exactly one higher than the
92 previous high. */
93 assert (result == GL(dl_tls_max_dtv_idx) + 1);
94 /* There is no gap anymore. */
95 GL(dl_tls_dtv_gaps) = false;
96
97 goto nogaps;
98 }
99 }
100 else
101 {
102 /* No gaps, allocate a new entry. */
103 nogaps:
104
105 result = ++GL(dl_tls_max_dtv_idx);
106 }
107
108 return result;
109 }
110
111
112 #ifdef SHARED
113 void
114 internal_function
115 _dl_determine_tlsoffset (void)
116 {
117 size_t max_align = TLS_TCB_ALIGN;
118 size_t freetop = 0;
119 size_t freebottom = 0;
120
121 /* The first element of the dtv slot info list is allocated. */
122 assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
123 /* There is at this point only one element in the
124 dl_tls_dtv_slotinfo_list list. */
125 assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
126
127 struct dtv_slotinfo *slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
128
129 /* Determining the offset of the various parts of the static TLS
130 block has several dependencies. In addition we have to work
131 around bugs in some toolchains.
132
133 Each TLS block from the objects available at link time has a size
134 and an alignment requirement. The GNU ld computes the alignment
135 requirements for the data at the positions *in the file*, though.
136 I.e, it is not simply possible to allocate a block with the size
137 of the TLS program header entry. The data is layed out assuming
138 that the first byte of the TLS block fulfills
139
140 p_vaddr mod p_align == &TLS_BLOCK mod p_align
141
142 This means we have to add artificial padding at the beginning of
143 the TLS block. These bytes are never used for the TLS data in
144 this module but the first byte allocated must be aligned
145 according to mod p_align == 0 so that the first byte of the TLS
146 block is aligned according to p_vaddr mod p_align. This is ugly
147 and the linker can help by computing the offsets in the TLS block
148 assuming the first byte of the TLS block is aligned according to
149 p_align.
150
151 The extra space which might be allocated before the first byte of
152 the TLS block need not go unused. The code below tries to use
153 that memory for the next TLS block. This can work if the total
154 memory requirement for the next TLS block is smaller than the
155 gap. */
156
157 #if TLS_TCB_AT_TP
158 /* We simply start with zero. */
159 size_t offset = 0;
160
161 for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
162 {
163 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
164
165 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
166 & (slotinfo[cnt].map->l_tls_align - 1));
167 size_t off;
168 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
169
170 if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
171 {
172 off = roundup (freetop + slotinfo[cnt].map->l_tls_blocksize
173 - firstbyte, slotinfo[cnt].map->l_tls_align)
174 + firstbyte;
175 if (off <= freebottom)
176 {
177 freetop = off;
178
179 /* XXX For some architectures we perhaps should store the
180 negative offset. */
181 slotinfo[cnt].map->l_tls_offset = off;
182 continue;
183 }
184 }
185
186 off = roundup (offset + slotinfo[cnt].map->l_tls_blocksize - firstbyte,
187 slotinfo[cnt].map->l_tls_align) + firstbyte;
188 if (off > offset + slotinfo[cnt].map->l_tls_blocksize
189 + (freebottom - freetop))
190 {
191 freetop = offset;
192 freebottom = off - slotinfo[cnt].map->l_tls_blocksize;
193 }
194 offset = off;
195
196 /* XXX For some architectures we perhaps should store the
197 negative offset. */
198 slotinfo[cnt].map->l_tls_offset = off;
199 }
200
201 GL(dl_tls_static_used) = offset;
202 GL(dl_tls_static_size) = (roundup (offset + TLS_STATIC_SURPLUS, max_align)
203 + TLS_TCB_SIZE);
204 #elif TLS_DTV_AT_TP
205 /* The TLS blocks start right after the TCB. */
206 size_t offset = TLS_TCB_SIZE;
207
208 for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
209 {
210 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
211
212 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
213 & (slotinfo[cnt].map->l_tls_align - 1));
214 size_t off;
215 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
216
217 if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
218 {
219 off = roundup (freebottom, slotinfo[cnt].map->l_tls_align);
220 if (off - freebottom < firstbyte)
221 off += slotinfo[cnt].map->l_tls_align;
222 if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
223 {
224 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
225 freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
226 - firstbyte);
227 continue;
228 }
229 }
230
231 off = roundup (offset, slotinfo[cnt].map->l_tls_align);
232 if (off - offset < firstbyte)
233 off += slotinfo[cnt].map->l_tls_align;
234
235 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
236 if (off - firstbyte - offset > freetop - freebottom)
237 {
238 freebottom = offset;
239 freetop = off - firstbyte;
240 }
241
242 offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
243 }
244
245 GL(dl_tls_static_used) = offset;
246 GL(dl_tls_static_size) = roundup (offset + TLS_STATIC_SURPLUS,
247 TLS_TCB_ALIGN);
248 #else
249 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
250 #endif
251
252 /* The alignment requirement for the static TLS block. */
253 GL(dl_tls_static_align) = max_align;
254 }
255
256
257 /* This is called only when the data structure setup was skipped at startup,
258 when there was no need for it then. Now we have dynamically loaded
259 something needing TLS, or libpthread needs it. */
260 int
261 internal_function
262 _dl_tls_setup (void)
263 {
264 assert (GL(dl_tls_dtv_slotinfo_list) == NULL);
265 assert (GL(dl_tls_max_dtv_idx) == 0);
266
267 const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS;
268
269 GL(dl_tls_dtv_slotinfo_list)
270 = calloc (1, (sizeof (struct dtv_slotinfo_list)
271 + nelem * sizeof (struct dtv_slotinfo)));
272 if (GL(dl_tls_dtv_slotinfo_list) == NULL)
273 return -1;
274
275 GL(dl_tls_dtv_slotinfo_list)->len = nelem;
276
277 /* Number of elements in the static TLS block. It can't be zero
278 because of various assumptions. The one element is null. */
279 GL(dl_tls_static_nelem) = GL(dl_tls_max_dtv_idx) = 1;
280
281 /* This initializes more variables for us. */
282 _dl_determine_tlsoffset ();
283
284 return 0;
285 }
286 rtld_hidden_def (_dl_tls_setup)
287 #endif
288
289 static void *
290 internal_function
291 allocate_dtv (void *result)
292 {
293 dtv_t *dtv;
294 size_t dtv_length;
295
296 /* We allocate a few more elements in the dtv than are needed for the
297 initial set of modules. This should avoid in most cases expansions
298 of the dtv. */
299 dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
300 dtv = calloc (dtv_length + 2, sizeof (dtv_t));
301 if (dtv != NULL)
302 {
303 /* This is the initial length of the dtv. */
304 dtv[0].counter = dtv_length;
305
306 /* The rest of the dtv (including the generation counter) is
307 Initialize with zero to indicate nothing there. */
308
309 /* Add the dtv to the thread data structures. */
310 INSTALL_DTV (result, dtv);
311 }
312 else
313 result = NULL;
314
315 return result;
316 }
317
318
319 /* Get size and alignment requirements of the static TLS block. */
320 void
321 internal_function
322 _dl_get_tls_static_info (size_t *sizep, size_t *alignp)
323 {
324 *sizep = GL(dl_tls_static_size);
325 *alignp = GL(dl_tls_static_align);
326 }
327
328
329 void *
330 internal_function
331 _dl_allocate_tls_storage (void)
332 {
333 void *result;
334 size_t size = GL(dl_tls_static_size);
335
336 #if TLS_DTV_AT_TP
337 /* Memory layout is:
338 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
339 ^ This should be returned. */
340 size += (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
341 & ~(GL(dl_tls_static_align) - 1);
342 #endif
343
344 /* Allocate a correctly aligned chunk of memory. */
345 result = __libc_memalign (GL(dl_tls_static_align), size);
346 if (__builtin_expect (result != NULL, 1))
347 {
348 /* Allocate the DTV. */
349 void *allocated = result;
350
351 #if TLS_TCB_AT_TP
352 /* The TCB follows the TLS blocks. */
353 result = (char *) result + size - TLS_TCB_SIZE;
354
355 /* Clear the TCB data structure. We can't ask the caller (i.e.
356 libpthread) to do it, because we will initialize the DTV et al. */
357 memset (result, '\0', TLS_TCB_SIZE);
358 #elif TLS_DTV_AT_TP
359 result = (char *) result + size - GL(dl_tls_static_size);
360
361 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
362 We can't ask the caller (i.e. libpthread) to do it, because we will
363 initialize the DTV et al. */
364 memset ((char *) result - TLS_PRE_TCB_SIZE, '\0',
365 TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
366 #endif
367
368 result = allocate_dtv (result);
369 if (result == NULL)
370 free (allocated);
371 }
372
373 return result;
374 }
375
376
377 void *
378 internal_function
379 _dl_allocate_tls_init (void *result)
380 {
381 if (result == NULL)
382 /* The memory allocation failed. */
383 return NULL;
384
385 dtv_t *dtv = GET_DTV (result);
386 struct dtv_slotinfo_list *listp;
387 size_t total = 0;
388 size_t maxgen = 0;
389
390 /* We have to prepare the dtv for all currently loaded modules using
391 TLS. For those which are dynamically loaded we add the values
392 indicating deferred allocation. */
393 listp = GL(dl_tls_dtv_slotinfo_list);
394 while (1)
395 {
396 size_t cnt;
397
398 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
399 {
400 struct link_map *map;
401 void *dest;
402
403 /* Check for the total number of used slots. */
404 if (total + cnt > GL(dl_tls_max_dtv_idx))
405 break;
406
407 map = listp->slotinfo[cnt].map;
408 if (map == NULL)
409 /* Unused entry. */
410 continue;
411
412 /* Keep track of the maximum generation number. This might
413 not be the generation counter. */
414 maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
415
416 if (map->l_tls_offset == NO_TLS_OFFSET)
417 {
418 /* For dynamically loaded modules we simply store
419 the value indicating deferred allocation. */
420 dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
421 dtv[map->l_tls_modid].pointer.is_static = false;
422 continue;
423 }
424
425 assert (map->l_tls_modid == cnt);
426 assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
427 #if TLS_TCB_AT_TP
428 assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
429 dest = (char *) result - map->l_tls_offset;
430 #elif TLS_DTV_AT_TP
431 dest = (char *) result + map->l_tls_offset;
432 #else
433 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
434 #endif
435
436 /* Copy the initialization image and clear the BSS part. */
437 dtv[map->l_tls_modid].pointer.val = dest;
438 dtv[map->l_tls_modid].pointer.is_static = true;
439 memset (__mempcpy (dest, map->l_tls_initimage,
440 map->l_tls_initimage_size), '\0',
441 map->l_tls_blocksize - map->l_tls_initimage_size);
442 }
443
444 total += cnt;
445 if (total >= GL(dl_tls_max_dtv_idx))
446 break;
447
448 listp = listp->next;
449 assert (listp != NULL);
450 }
451
452 /* The DTV version is up-to-date now. */
453 dtv[0].counter = maxgen;
454
455 return result;
456 }
457 rtld_hidden_def (_dl_allocate_tls_init)
458
459 void *
460 internal_function
461 _dl_allocate_tls (void *mem)
462 {
463 return _dl_allocate_tls_init (mem == NULL
464 ? _dl_allocate_tls_storage ()
465 : allocate_dtv (mem));
466 }
467 rtld_hidden_def (_dl_allocate_tls)
468
469
470 void
471 internal_function
472 _dl_deallocate_tls (void *tcb, bool dealloc_tcb)
473 {
474 dtv_t *dtv = GET_DTV (tcb);
475
476 /* We need to free the memory allocated for non-static TLS. */
477 for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt)
478 if (! dtv[1 + cnt].pointer.is_static
479 && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
480 free (dtv[1 + cnt].pointer.val);
481
482 /* The array starts with dtv[-1]. */
483 #ifdef SHARED
484 if (dtv != GL(dl_initial_dtv))
485 #endif
486 free (dtv - 1);
487
488 if (dealloc_tcb)
489 {
490 #if TLS_TCB_AT_TP
491 /* The TCB follows the TLS blocks. Back up to free the whole block. */
492 tcb -= GL(dl_tls_static_size) - TLS_TCB_SIZE;
493 #elif TLS_DTV_AT_TP
494 /* Back up the TLS_PRE_TCB_SIZE bytes. */
495 tcb -= (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
496 & ~(GL(dl_tls_static_align) - 1);
497 #endif
498 free (tcb);
499 }
500 }
501 rtld_hidden_def (_dl_deallocate_tls)
502
503
504 #ifdef SHARED
505 /* The __tls_get_addr function has two basic forms which differ in the
506 arguments. The IA-64 form takes two parameters, the module ID and
507 offset. The form used, among others, on IA-32 takes a reference to
508 a special structure which contain the same information. The second
509 form seems to be more often used (in the moment) so we default to
510 it. Users of the IA-64 form have to provide adequate definitions
511 of the following macros. */
512 # ifndef GET_ADDR_ARGS
513 # define GET_ADDR_ARGS tls_index *ti
514 # endif
515 # ifndef GET_ADDR_MODULE
516 # define GET_ADDR_MODULE ti->ti_module
517 # endif
518 # ifndef GET_ADDR_OFFSET
519 # define GET_ADDR_OFFSET ti->ti_offset
520 # endif
521
522
523 static void *
524 allocate_and_init (struct link_map *map)
525 {
526 void *newp;
527
528 newp = __libc_memalign (map->l_tls_align, map->l_tls_blocksize);
529 if (newp == NULL)
530 oom ();
531
532 /* Initialize the memory. */
533 memset (__mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
534 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
535
536 return newp;
537 }
538
539
540 struct link_map *
541 _dl_update_slotinfo (unsigned long int req_modid)
542 {
543 struct link_map *the_map = NULL;
544 dtv_t *dtv = THREAD_DTV ();
545
546 /* The global dl_tls_dtv_slotinfo array contains for each module
547 index the generation counter current when the entry was created.
548 This array never shrinks so that all module indices which were
549 valid at some time can be used to access it. Before the first
550 use of a new module index in this function the array was extended
551 appropriately. Access also does not have to be guarded against
552 modifications of the array. It is assumed that pointer-size
553 values can be read atomically even in SMP environments. It is
554 possible that other threads at the same time dynamically load
555 code and therefore add to the slotinfo list. This is a problem
556 since we must not pick up any information about incomplete work.
557 The solution to this is to ignore all dtv slots which were
558 created after the one we are currently interested. We know that
559 dynamic loading for this module is completed and this is the last
560 load operation we know finished. */
561 unsigned long int idx = req_modid;
562 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
563
564 while (idx >= listp->len)
565 {
566 idx -= listp->len;
567 listp = listp->next;
568 }
569
570 if (dtv[0].counter < listp->slotinfo[idx].gen)
571 {
572 /* The generation counter for the slot is higher than what the
573 current dtv implements. We have to update the whole dtv but
574 only those entries with a generation counter <= the one for
575 the entry we need. */
576 size_t new_gen = listp->slotinfo[idx].gen;
577 size_t total = 0;
578
579 /* We have to look through the entire dtv slotinfo list. */
580 listp = GL(dl_tls_dtv_slotinfo_list);
581 do
582 {
583 for (size_t cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
584 {
585 size_t gen = listp->slotinfo[cnt].gen;
586
587 if (gen > new_gen)
588 /* This is a slot for a generation younger than the
589 one we are handling now. It might be incompletely
590 set up so ignore it. */
591 continue;
592
593 /* If the entry is older than the current dtv layout we
594 know we don't have to handle it. */
595 if (gen <= dtv[0].counter)
596 continue;
597
598 /* If there is no map this means the entry is empty. */
599 struct link_map *map = listp->slotinfo[cnt].map;
600 if (map == NULL)
601 {
602 /* If this modid was used at some point the memory
603 might still be allocated. */
604 if (! dtv[total + cnt].pointer.is_static
605 && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
606 {
607 free (dtv[total + cnt].pointer.val);
608 dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
609 }
610
611 continue;
612 }
613
614 /* Check whether the current dtv array is large enough. */
615 size_t modid = map->l_tls_modid;
616 assert (total + cnt == modid);
617 if (dtv[-1].counter < modid)
618 {
619 /* Reallocate the dtv. */
620 dtv_t *newp;
621 size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
622 size_t oldsize = dtv[-1].counter;
623
624 assert (map->l_tls_modid <= newsize);
625
626 if (dtv == GL(dl_initial_dtv))
627 {
628 /* This is the initial dtv that was allocated
629 during rtld startup using the dl-minimal.c
630 malloc instead of the real malloc. We can't
631 free it, we have to abandon the old storage. */
632
633 newp = malloc ((2 + newsize) * sizeof (dtv_t));
634 if (newp == NULL)
635 oom ();
636 memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t));
637 }
638 else
639 {
640 newp = realloc (&dtv[-1],
641 (2 + newsize) * sizeof (dtv_t));
642 if (newp == NULL)
643 oom ();
644 }
645
646 newp[0].counter = newsize;
647
648 /* Clear the newly allocated part. */
649 memset (newp + 2 + oldsize, '\0',
650 (newsize - oldsize) * sizeof (dtv_t));
651
652 /* Point dtv to the generation counter. */
653 dtv = &newp[1];
654
655 /* Install this new dtv in the thread data
656 structures. */
657 INSTALL_NEW_DTV (dtv);
658 }
659
660 /* If there is currently memory allocate for this
661 dtv entry free it. */
662 /* XXX Ideally we will at some point create a memory
663 pool. */
664 if (! dtv[modid].pointer.is_static
665 && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
666 /* Note that free is called for NULL is well. We
667 deallocate even if it is this dtv entry we are
668 supposed to load. The reason is that we call
669 memalign and not malloc. */
670 free (dtv[modid].pointer.val);
671
672 /* This module is loaded dynamically- We defer memory
673 allocation. */
674 dtv[modid].pointer.is_static = false;
675 dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
676
677 if (modid == req_modid)
678 the_map = map;
679 }
680
681 total += listp->len;
682 }
683 while ((listp = listp->next) != NULL);
684
685 /* This will be the new maximum generation counter. */
686 dtv[0].counter = new_gen;
687 }
688
689 return the_map;
690 }
691
692
693 /* The generic dynamic and local dynamic model cannot be used in
694 statically linked applications. */
695 void *
696 __tls_get_addr (GET_ADDR_ARGS)
697 {
698 dtv_t *dtv = THREAD_DTV ();
699 struct link_map *the_map = NULL;
700 void *p;
701
702 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
703 the_map = _dl_update_slotinfo (GET_ADDR_MODULE);
704
705 p = dtv[GET_ADDR_MODULE].pointer.val;
706
707 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
708 {
709 /* The allocation was deferred. Do it now. */
710 if (the_map == NULL)
711 {
712 /* Find the link map for this module. */
713 size_t idx = GET_ADDR_MODULE;
714 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
715
716 while (idx >= listp->len)
717 {
718 idx -= listp->len;
719 listp = listp->next;
720 }
721
722 the_map = listp->slotinfo[idx].map;
723 }
724
725 p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
726 dtv[GET_ADDR_MODULE].pointer.is_static = false;
727 }
728
729 return (char *) p + GET_ADDR_OFFSET;
730 }
731 #endif
732
733
734 /* Look up the module's TLS block as for __tls_get_addr,
735 but never touch anything. Return null if it's not allocated yet. */
736 void *
737 internal_function
738 _dl_tls_get_addr_soft (struct link_map *l)
739 {
740 if (__builtin_expect (l->l_tls_modid == 0, 0))
741 /* This module has no TLS segment. */
742 return NULL;
743
744 dtv_t *dtv = THREAD_DTV ();
745 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
746 {
747 /* This thread's DTV is not completely current,
748 but it might already cover this module. */
749
750 if (l->l_tls_modid >= dtv[-1].counter)
751 /* Nope. */
752 return NULL;
753
754 size_t idx = l->l_tls_modid;
755 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
756 while (idx >= listp->len)
757 {
758 idx -= listp->len;
759 listp = listp->next;
760 }
761
762 /* We've reached the slot for this module.
763 If its generation counter is higher than the DTV's,
764 this thread does not know about this module yet. */
765 if (dtv[0].counter < listp->slotinfo[idx].gen)
766 return NULL;
767 }
768
769 void *data = dtv[l->l_tls_modid].pointer.val;
770 if (__builtin_expect (data == TLS_DTV_UNALLOCATED, 0))
771 /* The DTV is current, but this thread has not yet needed
772 to allocate this module's segment. */
773 data = NULL;
774
775 return data;
776 }
777
778
779 void
780 _dl_add_to_slotinfo (struct link_map *l)
781 {
782 /* Now that we know the object is loaded successfully add
783 modules containing TLS data to the dtv info table. We
784 might have to increase its size. */
785 struct dtv_slotinfo_list *listp;
786 struct dtv_slotinfo_list *prevp;
787 size_t idx = l->l_tls_modid;
788
789 /* Find the place in the dtv slotinfo list. */
790 listp = GL(dl_tls_dtv_slotinfo_list);
791 prevp = NULL; /* Needed to shut up gcc. */
792 do
793 {
794 /* Does it fit in the array of this list element? */
795 if (idx < listp->len)
796 break;
797 idx -= listp->len;
798 prevp = listp;
799 listp = listp->next;
800 }
801 while (listp != NULL);
802
803 if (listp == NULL)
804 {
805 /* When we come here it means we have to add a new element
806 to the slotinfo list. And the new module must be in
807 the first slot. */
808 assert (idx == 0);
809
810 listp = prevp->next = (struct dtv_slotinfo_list *)
811 malloc (sizeof (struct dtv_slotinfo_list)
812 + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
813 if (listp == NULL)
814 {
815 /* We ran out of memory. We will simply fail this
816 call but don't undo anything we did so far. The
817 application will crash or be terminated anyway very
818 soon. */
819
820 /* We have to do this since some entries in the dtv
821 slotinfo array might already point to this
822 generation. */
823 ++GL(dl_tls_generation);
824
825 _dl_signal_error (ENOMEM, "dlopen", NULL, N_("\
826 cannot create TLS data structures"));
827 }
828
829 listp->len = TLS_SLOTINFO_SURPLUS;
830 listp->next = NULL;
831 memset (listp->slotinfo, '\0',
832 TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
833 }
834
835 /* Add the information into the slotinfo data structure. */
836 listp->slotinfo[idx].map = l;
837 listp->slotinfo[idx].gen = GL(dl_tls_generation) + 1;
838 }