From: Brooks Moses Date: Mon, 24 Feb 2014 19:04:49 +0000 (-0800) Subject: Revert upstream removal of async-safe TLS patches. X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3e9a530aaeb891ed13b8b2c44df7b0d040f1096a;p=thirdparty%2Fglibc.git Revert upstream removal of async-safe TLS patches. --- diff --git a/elf/Versions b/elf/Versions index 80cf1c47aae..05eba2ab58a 100644 --- a/elf/Versions +++ b/elf/Versions @@ -61,6 +61,7 @@ ld { _dl_argv; _dl_find_dso_for_object; _dl_get_tls_static_info; _dl_deallocate_tls; _dl_make_stack_executable; _dl_rtld_di_serinfo; _dl_starting_up; + _dl_clear_dtv; _rtld_global; _rtld_global_ro; # Only here for gdb while a better method is developed. diff --git a/elf/dl-misc.c b/elf/dl-misc.c index b7174994cd8..b3c56ab6b04 100644 --- a/elf/dl-misc.c +++ b/elf/dl-misc.c @@ -440,3 +440,144 @@ _dl_strtoul (const char *nptr, char **endptr) return result; } + +/* To support accessing TLS variables from signal handlers, we need an + async signal safe memory allocator. These routines are never + themselves invoked reentrantly (all calls to them are surrounded by + signal masks) but may be invoked concurrently from many threads. + The current implementation is not particularly performant nor space + efficient, but it will be used rarely (and only in binaries that use + dlopen.) The API matches that of malloc() and friends. */ + +struct __signal_safe_allocator_header +{ + size_t size; + void *start; +}; + +static inline struct __signal_safe_allocator_header * +ptr_to_signal_safe_allocator_header (void *ptr) +{ + return (struct __signal_safe_allocator_header *) + ((char *) (ptr) - sizeof (struct __signal_safe_allocator_header)); +} + +void *weak_function +__signal_safe_memalign (size_t boundary, size_t size) +{ + struct __signal_safe_allocator_header *header; + + if (boundary < sizeof (*header)) + boundary = sizeof (*header); + + /* Boundary must be a power of two. */ + if (!powerof2 (boundary)) + return NULL; + + size_t pg = GLRO (dl_pagesize); + size_t padded_size; + if (boundary <= pg) + { + /* We'll get a pointer certainly aligned to boundary, so just + add one more boundary-sized chunk to hold the header. */ + padded_size = roundup (size, boundary) + boundary; + } + else + { + /* If we want K pages aligned to a J-page boundary, K+J+1 pages + contains at least one such region that isn't directly at the start + (so we can place the header.) This is wasteful, but you're the one + who wanted 64K-aligned TLS. */ + padded_size = roundup (size, pg) + boundary + pg; + } + + + size_t actual_size = roundup (padded_size, pg); + void *actual = mmap (NULL, actual_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + if (actual == MAP_FAILED) + return NULL; + + if (boundary <= pg) + { + header = actual + boundary - sizeof (*header); + } + else + { + intptr_t actual_pg = ((intptr_t) actual) / pg; + intptr_t boundary_pg = boundary / pg; + intptr_t start_pg = actual_pg + boundary_pg; + start_pg -= start_pg % boundary_pg; + if (start_pg > (actual_pg + 1)) + { + int ret = munmap (actual, (start_pg - actual_pg - 1) * pg); + assert (ret == 0); + actual = (void *) ((start_pg - 1) * pg); + } + char *start = (void *) (start_pg * pg); + header = ptr_to_signal_safe_allocator_header (start); + } + + header->size = actual_size; + header->start = actual; + void *ptr = header; + ptr += sizeof (*header); + if (((intptr_t) ptr) % boundary != 0) + _dl_fatal_printf ("__signal_safe_memalign produced incorrect alignment\n"); + return ptr; +} + +void * weak_function +__signal_safe_malloc (size_t size) +{ + return __signal_safe_memalign (1, size); +} + +void weak_function +__signal_safe_free (void *ptr) +{ + if (ptr == NULL) + return; + + struct __signal_safe_allocator_header *header + = ptr_to_signal_safe_allocator_header (ptr); + int ret = munmap (header->start, header->size); + + assert (ret == 0); +} + +void * weak_function +__signal_safe_realloc (void *ptr, size_t size) +{ + if (size == 0) + { + __signal_safe_free (ptr); + return NULL; + } + if (ptr == NULL) + return __signal_safe_malloc (size); + + struct __signal_safe_allocator_header *header + = ptr_to_signal_safe_allocator_header (ptr); + size_t old_size = header->size; + if (old_size - sizeof (*header) >= size) + return ptr; + + void *new_ptr = __signal_safe_malloc (size); + if (new_ptr == NULL) + return NULL; + + memcpy (new_ptr, ptr, old_size); + __signal_safe_free (ptr); + + return new_ptr; +} + +void * weak_function +__signal_safe_calloc (size_t nmemb, size_t size) +{ + void *ptr = __signal_safe_malloc (nmemb * size); + if (ptr == NULL) + return NULL; + return memset (ptr, 0, nmemb * size); +} diff --git a/elf/dl-open.c b/elf/dl-open.c index 0cf786b5467..178270ca155 100644 --- a/elf/dl-open.c +++ b/elf/dl-open.c @@ -495,7 +495,10 @@ TLS generation counter wrapped! Please report this.")); generation of the DSO we are allocating data for. */ _dl_update_slotinfo (imap->l_tls_modid); #endif - + /* We do this iteration under a signal mask in dl-reloc; why not + here? Because these symbols are new and dlopen hasn't + returned yet. So we can't possibly be racing with a TLS + access to them from another thread. */ GL(dl_init_static_tls) (imap); assert (imap->l_need_tls_init == 0); } diff --git a/elf/dl-reloc.c b/elf/dl-reloc.c index 0ed9398ad05..86964535664 100644 --- a/elf/dl-reloc.c +++ b/elf/dl-reloc.c @@ -16,8 +16,10 @@ License along with the GNU C Library; if not, see . */ +#include #include #include +#include #include #include #include @@ -70,8 +72,6 @@ _dl_try_allocate_static_tls (struct link_map *map) size_t offset = GL(dl_tls_static_used) + (freebytes - n * map->l_tls_align - map->l_tls_firstbyte_offset); - - map->l_tls_offset = GL(dl_tls_static_used) = offset; #elif TLS_DTV_AT_TP /* dl_tls_static_used includes the TCB at the beginning. */ size_t offset = (ALIGN_UP(GL(dl_tls_static_used) @@ -83,7 +83,36 @@ _dl_try_allocate_static_tls (struct link_map *map) if (used > GL(dl_tls_static_size)) goto fail; - map->l_tls_offset = offset; +#else +# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" +#endif + /* We've computed the new value we want, now try to install it. */ + ptrdiff_t val; + if ((val = map->l_tls_offset) == NO_TLS_OFFSET) + { + /* l_tls_offset starts out at NO_TLS_OFFSET, and all attempts to + change it go from NO_TLS_OFFSET to some other value. We use + compare_and_exchange to ensure only one attempt succeeds. We + don't actually need any memory ordering here, but _acq is the + weakest available. */ + (void ) atomic_compare_and_exchange_bool_acq (&map->l_tls_offset, + offset, + NO_TLS_OFFSET); + val = map->l_tls_offset; + assert (val != NO_TLS_OFFSET); + } + if (val != offset) + { + /* We'd like to set a static offset for this section, but another + thread has already used a dynamic TLS block for it. Since we can + only use static offsets if everyone does (and it's not practical + to move that thread's dynamic block), we have to fail. */ + goto fail; + } + /* We installed the value; now update the globals. */ +#if TLS_TCB_AT_TP + GL(dl_tls_static_used) = offset; +#elif TLS_DTV_AT_TP map->l_tls_firstbyte_offset = GL(dl_tls_static_used); GL(dl_tls_static_used) = used; #else @@ -114,8 +143,17 @@ void __attribute_noinline__ _dl_allocate_static_tls (struct link_map *map) { - if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET - || _dl_try_allocate_static_tls (map)) + /* We wrap this in a signal mask because it has to iterate all threads + (including this one) and update this map's TLS entry. A signal handler + accessing TLS would try to do the same update and break. */ + sigset_t old; + _dl_mask_all_signals (&old); + int err = -1; + if (map->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET) + err = _dl_try_allocate_static_tls (map); + + _dl_unmask_signals (&old); + if (err != 0) { _dl_signal_error (0, map->l_name, NULL, N_("\ cannot allocate memory in static TLS block")); diff --git a/elf/dl-tls.c b/elf/dl-tls.c index c87caf13d6a..7770ec3be51 100644 --- a/elf/dl-tls.c +++ b/elf/dl-tls.c @@ -17,6 +17,7 @@ . */ #include +#include #include #include #include @@ -283,7 +284,7 @@ allocate_dtv (void *result) initial set of modules. This should avoid in most cases expansions of the dtv. */ dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS; - dtv = calloc (dtv_length + 2, sizeof (dtv_t)); + dtv = __signal_safe_calloc (dtv_length + 2, sizeof (dtv_t)); if (dtv != NULL) { /* This is the initial length of the dtv. */ @@ -533,6 +534,18 @@ _dl_allocate_tls (void *mem) } rtld_hidden_def (_dl_allocate_tls) +void +internal_function +_dl_clear_dtv (dtv_t *dtv) +{ + for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt) + if (! dtv[1 + cnt].pointer.is_static + && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED) + __signal_safe_free (dtv[1 + cnt].pointer.val); + memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t)); +} + +rtld_hidden_def (_dl_clear_dtv) void _dl_deallocate_tls (void *tcb, bool dealloc_tcb) @@ -541,11 +554,11 @@ _dl_deallocate_tls (void *tcb, bool dealloc_tcb) /* We need to free the memory allocated for non-static TLS. */ for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt) - free (dtv[1 + cnt].pointer.to_free); + __signal_safe_free (dtv[1 + cnt].pointer.to_free); /* The array starts with dtv[-1]. */ if (dtv != GL(dl_initial_dtv)) - free (dtv - 1); + __signal_safe_free (dtv - 1); if (dealloc_tcb) free (*tcb_to_pointer_to_free_location (tcb)); @@ -572,52 +585,24 @@ rtld_hidden_def (_dl_deallocate_tls) # define GET_ADDR_OFFSET ti->ti_offset # endif -/* Allocate one DTV entry. */ -static struct dtv_pointer -allocate_dtv_entry (size_t alignment, size_t size) -{ - if (powerof2 (alignment) && alignment <= _Alignof (max_align_t)) - { - /* The alignment is supported by malloc. */ - void *ptr = malloc (size); - return (struct dtv_pointer) { ptr, ptr }; - } - - /* Emulate memalign to by manually aligning a pointer returned by - malloc. First compute the size with an overflow check. */ - size_t alloc_size = size + alignment; - if (alloc_size < size) - return (struct dtv_pointer) {}; - - /* Perform the allocation. This is the pointer we need to free - later. */ - void *start = malloc (alloc_size); - if (start == NULL) - return (struct dtv_pointer) {}; - - /* Find the aligned position within the larger allocation. */ - void *aligned = (void *) roundup ((uintptr_t) start, alignment); - - return (struct dtv_pointer) { .val = aligned, .to_free = start }; -} - -static struct dtv_pointer -allocate_and_init (struct link_map *map) +static void +allocate_and_init (dtv_t *dtv, struct link_map *map) { - struct dtv_pointer result = allocate_dtv_entry - (map->l_tls_align, map->l_tls_blocksize); - if (result.val == NULL) + void *newp; + newp = __signal_safe_memalign (map->l_tls_align, map->l_tls_blocksize); + if (newp == NULL) oom (); - /* Initialize the memory. */ - memset (__mempcpy (result.val, map->l_tls_initimage, + /* Initialize the memory. Since this is our thread's space, we are + under a signal mask, and no one has touched this section before, + we can safely just overwrite whatever's there. */ + memset (__mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size), '\0', map->l_tls_blocksize - map->l_tls_initimage_size); - return result; + dtv->pointer.val = newp; } - struct link_map * _dl_update_slotinfo (unsigned long int req_modid) { @@ -656,7 +641,15 @@ _dl_update_slotinfo (unsigned long int req_modid) the entry we need. */ size_t new_gen = listp->slotinfo[idx].gen; size_t total = 0; - + sigset_t old; + + _dl_mask_all_signals (&old); + /* We use the signal mask as a lock against reentrancy here. + Check that a signal taken before the lock didn't already + update us. */ + dtv = THREAD_DTV (); + if (dtv[0].counter >= listp->slotinfo[idx].gen) + goto out; /* We have to look through the entire dtv slotinfo list. */ listp = GL(dl_tls_dtv_slotinfo_list); do @@ -676,6 +669,8 @@ _dl_update_slotinfo (unsigned long int req_modid) if (gen <= dtv[0].counter) continue; + size_t modid = total + cnt; + /* If there is no map this means the entry is empty. */ struct link_map *map = listp->slotinfo[cnt].map; if (map == NULL) @@ -684,7 +679,7 @@ _dl_update_slotinfo (unsigned long int req_modid) { /* If this modid was used at some point the memory might still be allocated. */ - free (dtv[total + cnt].pointer.to_free); + __signal_safe_free (dtv[total + cnt].pointer.to_free); dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED; dtv[total + cnt].pointer.to_free = NULL; } @@ -692,9 +687,8 @@ _dl_update_slotinfo (unsigned long int req_modid) continue; } + assert (modid == map->l_tls_modid); /* Check whether the current dtv array is large enough. */ - size_t modid = map->l_tls_modid; - assert (total + cnt == modid); if (dtv[-1].counter < modid) { /* Resize the dtv. */ @@ -711,7 +705,7 @@ _dl_update_slotinfo (unsigned long int req_modid) dtv entry free it. */ /* XXX Ideally we will at some point create a memory pool. */ - free (dtv[modid].pointer.to_free); + __signal_safe_free (dtv[modid].pointer.to_free); dtv[modid].pointer.val = TLS_DTV_UNALLOCATED; dtv[modid].pointer.to_free = NULL; @@ -725,6 +719,8 @@ _dl_update_slotinfo (unsigned long int req_modid) /* This will be the new maximum generation counter. */ dtv[0].counter = new_gen; + out: + _dl_unmask_signals (&old); } return the_map; @@ -750,6 +746,39 @@ tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map) the_map = listp->slotinfo[idx].map; } +#if 0 + sigset_t old; + _dl_mask_all_signals (&old); + + /* As with update_slotinfo, we use the sigmask as a check against + reentrancy. */ + if (dtv[GET_ADDR_MODULE].pointer.val != TLS_DTV_UNALLOCATED) + { + assert (dtv[GET_ADDR_MODULE].pointer.val != TLS_DTV_UNALLOCATED); + _dl_unmask_signals (&old); + + return (char *) dtv[GET_ADDR_MODULE].pointer.val + GET_ADDR_OFFSET; + } + + /* Synchronize against a parallel dlopen() forcing this variable + into static storage. If that happens, we have to be more careful + about initializing the area, as that dlopen() will be iterating + the threads to do so itself. */ + ptrdiff_t offset; + if ((offset = the_map->l_tls_offset) == NO_TLS_OFFSET) + { + /* l_tls_offset starts out at NO_TLS_OFFSET, and all attempts to + change it go from NO_TLS_OFFSET to some other value. We use + compare_and_exchange to ensure only one attempt succeeds. We + don't actually need any memory ordering here, but _acq is the + weakest available. */ + (void) atomic_compare_and_exchange_bool_acq (&the_map->l_tls_offset, + FORCED_DYNAMIC_TLS_OFFSET, + NO_TLS_OFFSET); + offset = the_map->l_tls_offset; + assert (offset != NO_TLS_OFFSET); + } + #endif /* Make sure that, if a dlopen running in parallel forces the variable into static storage, we'll wait until the address in the @@ -789,6 +818,36 @@ tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map) assert (result.to_free != NULL); return (char *) result.val + GET_ADDR_OFFSET; + +#if 0 + if (offset == FORCED_DYNAMIC_TLS_OFFSET) + { + allocate_and_init (&dtv[GET_ADDR_MODULE], the_map); + } + else + { + void **pp = &dtv[GET_ADDR_MODULE].pointer.val; + while (atomic_forced_read (*pp) == TLS_DTV_UNALLOCATED) + { + /* for lack of a better (safe) thing to do, just spin. + Someone else (not us; it's done under a signal mask) set + this map to a static TLS offset, and they'll iterate all + threads to initialize it. They'll eventually write + to pointer.val, at which point we know they've fully + completed initialization. */ + atomic_delay (); + } + /* Make sure we've picked up their initialization of the actual + block; this pairs against the write barrier in + init_one_static_tls, guaranteeing that we see their write of + the tls_initimage into the static region. */ + atomic_read_barrier (); + } + assert (dtv[GET_ADDR_MODULE].pointer.val != TLS_DTV_UNALLOCATED); + _dl_unmask_signals (&old); + + return (char *) dtv[GET_ADDR_MODULE].pointer.val + GET_ADDR_OFFSET; +#endif } diff --git a/nptl/Makefile b/nptl/Makefile index 40680e854e0..093df4fa1fd 100644 --- a/nptl/Makefile +++ b/nptl/Makefile @@ -374,6 +374,7 @@ tests += tst-cancelx2 tst-cancelx3 tst-cancelx4 tst-cancelx5 \ tst-oncex3 tst-oncex4 ifeq ($(build-shared),yes) tests += tst-atfork2 tst-tls4 tst-_res1 tst-fini1 tst-compat-forwarder +tests += tst-tls7 tests-internal += tst-tls3 tst-tls3-malloc tst-tls5 tst-stackguard1 tests-nolibpthread += tst-fini1 ifeq ($(have-z-execstack),yes) @@ -389,6 +390,7 @@ modules-names = tst-atfork2mod tst-tls3mod tst-tls4moda tst-tls4modb \ tst-tls5modd tst-tls5mode tst-tls5modf tst-stack4mod \ tst-_res1mod1 tst-_res1mod2 tst-fini1mod \ tst-join7mod tst-compat-forwarder-mod +modules-names += tst-tls7mod ifneq ($(with-clang),yes) modules-names += tst-execstack-mod endif @@ -406,6 +408,7 @@ tst-tls5modc.so-no-z-defs = yes tst-tls5modd.so-no-z-defs = yes tst-tls5mode.so-no-z-defs = yes tst-tls5modf.so-no-z-defs = yes +tst-tls7mod.so-no-z-defs = yes ifeq ($(build-shared),yes) # Build all the modules even when not actually running test programs. @@ -600,6 +603,12 @@ $(objpfx)tst-tls5: $(objpfx)tst-tls5mod.so $(shared-thread-library) LDFLAGS-tst-tls5 = $(no-as-needed) LDFLAGS-tst-tls5mod.so = -Wl,-soname,tst-tls5mod.so +# ensure free(malloc()) isn't optimized out +CFLAGS-tst-tls7.c = -fno-builtin-malloc -fno-builtin-free +$(objpfx)tst-tls7: $(libdl) $(shared-thread-library) +$(objpfx)tst-tls7.out: $(objpfx)tst-tls7mod.so +$(objpfx)tst-tls7mod.so: $(shared-thread-library) + ifeq ($(build-shared),yes) $(objpfx)tst-tls6.out: tst-tls6.sh $(objpfx)tst-tls5 \ $(objpfx)tst-tls5moda.so $(objpfx)tst-tls5modb.so \ diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c index b374f4794d0..37576562f8e 100644 --- a/nptl/allocatestack.c +++ b/nptl/allocatestack.c @@ -239,9 +239,7 @@ get_cached_stack (size_t *sizep, void **memp) /* Clear the DTV. */ dtv_t *dtv = GET_DTV (TLS_TPADJ (result)); - for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt) - free (dtv[1 + cnt].pointer.to_free); - memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t)); + _dl_clear_dtv (dtv); /* Re-initialize the TLS. */ _dl_allocate_tls_init (TLS_TPADJ (result)); @@ -1244,6 +1242,15 @@ init_one_static_tls (struct pthread *curp, struct link_map *map) /* Initialize the memory. */ memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size), '\0', map->l_tls_blocksize - map->l_tls_initimage_size); + + /* Fill in the DTV slot so that a later LD/GD access will find it. */ + dtv[map->l_tls_modid].pointer.is_static = true; + /* Pairs against the read barrier in tls_get_attr_tail, guaranteeing + any thread waiting for an update to pointer.val sees the + initimage write. */ + atomic_write_barrier (); + dtv[map->l_tls_modid].pointer.val = dest; + } void diff --git a/nptl/tst-tls7.c b/nptl/tst-tls7.c new file mode 100644 index 00000000000..3e85a6ec85c --- /dev/null +++ b/nptl/tst-tls7.c @@ -0,0 +1,143 @@ +/* Copyright (C) 2013 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + + +/* This test checks that TLS in a dlopened object works when first accessed + from a signal handler. */ + +#include +#include +#include +#include +#include +#include +#include +#include + +void * +spin (void *ignored) +{ + while (1) + { + /* busywork */ + free (malloc (128)); + } + + /* never reached */ + return NULL; +} + +static void (*tls7mod_action) (int, siginfo_t *, void *); + +static void +action (int signo, siginfo_t *info, void *ignored) +{ + sem_t *sem = info->si_value.sival_ptr; + + atomic_read_barrier (); + assert (tls7mod_action != NULL); + (*tls7mod_action) (signo, info, ignored); + + /* This sem_post may trigger dlclose, which will invalidate tls7mod_action. + It is important to do that only after tls7mod_action is no longer + active. */ + sem_post (sem); +} + +int +do_test (void) +{ + pthread_t th[10]; + + for (int i = 0; i < 10; ++i) + { + if (pthread_create (&th[i], NULL, spin, NULL)) + { + puts ("pthread_create failed"); + exit (1); + } + } +#define NITERS 75 + + for (int i = 0; i < NITERS; ++i) + { + void *h = dlopen ("tst-tls7mod.so", RTLD_LAZY); + if (h == NULL) + { + puts ("dlopen failed"); + exit (1); + } + + tls7mod_action = dlsym (h, "action"); + if (tls7mod_action == NULL) + { + puts ("dlsym for action failed"); + exit (1); + } + atomic_write_barrier (); + + struct sigaction sa; + sa.sa_sigaction = action; + sigemptyset (&sa.sa_mask); + sa.sa_flags = SA_SIGINFO; + if (sigaction (SIGUSR1, &sa, NULL)) + { + puts ("sigaction failed"); + exit (1); + } + + sem_t sem; + if (sem_init (&sem, 0, 0)) + { + puts ("sem_init failed"); + } + + sigval_t val; + val.sival_ptr = &sem; + for (int i = 0; i < 10; ++i) + { + if (pthread_sigqueue (th[i], SIGUSR1, val)) + { + puts ("pthread_sigqueue failed"); + } + } + + + for (int i = 0; i < 10; ++i) + { + if (sem_wait (&sem)) + { + puts ("sem_wait failed"); + } + } + + /* Paranoia. */ + tls7mod_action = NULL; + + if (dlclose (h)) + { + puts ("dlclose failed"); + exit (1); + } + } + return 0; +} + +#define TIMEOUT 8 + +#define TEST_FUNCTION do_test () +#include "../test-skeleton.c" diff --git a/nptl/tst-tls7mod.c b/nptl/tst-tls7mod.c new file mode 100644 index 00000000000..da5af56370b --- /dev/null +++ b/nptl/tst-tls7mod.c @@ -0,0 +1,40 @@ +/* Copyright (C) 2013 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +/* Dynamic module with TLS to be accessed by a signal handler to check safety + of that mode. */ + +#include +#include +#include + +/* This is an unlikely value to see in incorrectly initialized TLS + block -- make sure we're initialized properly. */ +static __thread intptr_t tls_data = 0xdeadbeef; + +void +action (int signo, siginfo_t *info, void *ignored) +{ + if (tls_data != 0xdeadbeef) + { + write (STDOUT_FILENO, "wrong TLS value\n", 17); + _exit (1); + } + + /* arbitrary choice, just write something unique-ish. */ + tls_data = (intptr_t) info; +} diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h index 5e7224bb499..7326e118449 100644 --- a/sysdeps/generic/ldsodefs.h +++ b/sysdeps/generic/ldsodefs.h @@ -257,6 +257,11 @@ extern unsigned long int _dl_higher_prime_number (unsigned long int n) /* A stripped down strtoul-like implementation. */ uint64_t _dl_strtoul (const char *, char **) attribute_hidden; +/* Mask every signal, returning the previous sigmask in OLD. */ +extern void _dl_mask_all_signals (sigset_t *old) internal_function; +/* Undo _dl_mask_all_signals. */ +extern void _dl_unmask_signals (sigset_t *old) internal_function; + /* Function used as argument for `_dl_receive_error' function. The arguments are the error code, error string, and the objname the error occurred in. */ @@ -1096,6 +1101,17 @@ extern void *_dl_allocate_tls_storage (void) attribute_hidden; extern void *_dl_allocate_tls_init (void *); rtld_hidden_proto (_dl_allocate_tls_init) +/* Remove all allocated dynamic TLS regions from a DTV + for reuse by new thread. */ +extern void _dl_clear_dtv (dtv_t *dtv) internal_function; +rtld_hidden_proto (_dl_clear_dtv) + +extern void *__signal_safe_memalign (size_t boundary, size_t size); +extern void *__signal_safe_malloc (size_t size); +extern void __signal_safe_free (void *ptr); +extern void *__signal_safe_realloc (void *ptr, size_t size); +extern void *__signal_safe_calloc (size_t nmemb, size_t size); + /* Deallocate memory allocated with _dl_allocate_tls. */ extern void _dl_deallocate_tls (void *tcb, bool dealloc_tcb); rtld_hidden_proto (_dl_deallocate_tls) diff --git a/sysdeps/mach/hurd/dl-sysdep.h b/sysdeps/mach/hurd/dl-sysdep.h index 9a1f353a8d4..dff8b9be16f 100644 --- a/sysdeps/mach/hurd/dl-sysdep.h +++ b/sysdeps/mach/hurd/dl-sysdep.h @@ -29,3 +29,10 @@ # define DL_ARGV_NOT_RELRO 1 # define LIBC_STACK_END_NOT_RELRO 1 #endif + +#include +inline void _dl_mask_all_signals (sigset_t *) internal_function; +inline void _dl_mask_all_signals (sigset_t *) { } + +inline void _dl_unmask_all_signals (sigset_t *) internal_function; +inline void _dl_unmask_all_signals (sigset_t *) { } diff --git a/sysdeps/unix/sysv/linux/dl-sysdep.c b/sysdeps/unix/sysv/linux/dl-sysdep.c index b4cda3486a0..f8d1d5d0583 100644 --- a/sysdeps/unix/sysv/linux/dl-sysdep.c +++ b/sysdeps/unix/sysv/linux/dl-sysdep.c @@ -19,6 +19,7 @@ /* Linux needs some special initialization, but otherwise uses the generic dynamic linker system interface code. */ +#include #include #include #include @@ -130,3 +131,48 @@ _dl_discover_osversion (void) return version; } + +/* Mask every signal, returning the previous sigmask in OLD. */ +void +internal_function +_dl_mask_all_signals (sigset_t *old) +{ + int ret; + sigset_t new; + + sigfillset (&new); + + /* This function serves as a replacement to pthread_sigmask, which + isn't available from within the dynamic linker since it would require + linking with libpthread. We duplicate some of the functionality here + to avoid requiring libpthread. This isn't quite identical to + pthread_sigmask in that we do not mask internal signals used for + cancellation and setxid handling. This disables asynchronous + cancellation for the duration the signals are disabled, but it's a + small window, and prevents any problems with the use of TLS variables + in the signal handlers that would have executed. */ + + /* It's very important we don't touch errno here, as that's TLS; since this + gets called from get_tls_addr we might end up recursing. */ + + INTERNAL_SYSCALL_DECL (err); + + ret = INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, &new, old, + _NSIG / 8); + + assert (ret == 0); +} + +/* Return sigmask to what it was before a call to _dl_mask_all_signals. */ +void +internal_function +_dl_unmask_signals (sigset_t *old) +{ + int ret; + INTERNAL_SYSCALL_DECL (err); + + ret = INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, old, NULL, + _NSIG / 8); + + assert (ret == 0); +} diff --git a/sysdeps/unix/sysv/linux/dl-sysdep.h b/sysdeps/unix/sysv/linux/dl-sysdep.h index 9750145404d..0c64545ed36 100644 --- a/sysdeps/unix/sysv/linux/dl-sysdep.h +++ b/sysdeps/unix/sysv/linux/dl-sysdep.h @@ -30,4 +30,8 @@ /* Get version of the OS. */ extern int _dl_discover_osversion (void) attribute_hidden; # define HAVE_DL_DISCOVER_OSVERSION 1 + +#include +void _dl_mask_all_signals (sigset_t *) internal_function; +void _dl_unmask_all_signals (sigset_t *) internal_function; #endif