/* Close a shared object opened by `_dl_open'.
- Copyright (C) 1996-2014 Free Software Foundation, Inc.
+ Copyright (C) 1996-2019 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
+ <https://www.gnu.org/licenses/>. */
#include <assert.h>
#include <dlfcn.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
-#include <bits/libc-lock.h>
+#include <libc-lock.h>
#include <ldsodefs.h>
#include <sys/types.h>
#include <sys/mman.h>
void
-_dl_close_worker (struct link_map *map)
+_dl_close_worker (struct link_map *map, bool force)
{
/* One less direct use. */
--map->l_direct_opencount;
char done[nloaded];
struct link_map *maps[nloaded];
+ /* Clear DF_1_NODELETE to force object deletion. We don't need to touch
+ l_tls_dtor_count because forced object deletion only happens when an
+ error occurs during object load. Destructor registration for TLS
+ non-POD objects should not have happened till then for this
+ object. */
+ if (force)
+ map->l_flags_1 &= ~DF_1_NODELETE;
+
/* Run over the list and assign indexes to the link maps and enter
them into the MAPS array. */
int idx = 0;
l->l_idx = idx;
maps[idx] = l;
++idx;
+
}
assert (idx == nloaded);
if (l->l_type == lt_loaded
&& l->l_direct_opencount == 0
&& (l->l_flags_1 & DF_1_NODELETE) == 0
+ /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
+ acquire is sufficient and correct. */
+ && atomic_load_acquire (&l->l_tls_dtor_count) == 0
&& !used[done_index])
continue;
}
}
- /* Sort the entries. */
- _dl_sort_fini (maps, nloaded, used, nsid);
+ /* Sort the entries. We can skip looking for the binary itself which is
+ at the front of the search list for the main namespace. */
+ _dl_sort_maps (maps + (nsid == LM_ID_BASE), nloaded - (nsid == LM_ID_BASE),
+ used + (nsid == LM_ID_BASE), true);
/* Call all termination functions at once. */
#ifdef SHARED
}
}
+ /* Reset unique symbols if forced. */
+ if (force)
+ {
+ struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
+ __rtld_lock_lock_recursive (tab->lock);
+ struct unique_sym *entries = tab->entries;
+ if (entries != NULL)
+ {
+ size_t idx, size = tab->size;
+ for (idx = 0; idx < size; ++idx)
+ {
+ /* Clear unique symbol entries that belong to this
+ object. */
+ if (entries[idx].name != NULL
+ && entries[idx].map == imap)
+ {
+ entries[idx].name = NULL;
+ entries[idx].hashval = 0;
+ tab->n_elements--;
+ }
+ }
+ }
+ __rtld_lock_unlock_recursive (tab->lock);
+ }
+
/* We can unmap all the maps at once. We determined the
start address and length when we loaded the object and
the `munmap' call does the rest. */
DL_UNMAP (imap);
/* Finally, unlink the data structure and free it. */
- if (imap->l_prev != NULL)
- imap->l_prev->l_next = imap->l_next;
- else
+#if DL_NNS == 1
+ /* The assert in the (imap->l_prev == NULL) case gives
+ the compiler license to warn that NS points outside
+ the dl_ns array bounds in that case (as nsid != LM_ID_BASE
+ is tantamount to nsid >= DL_NNS). That should be impossible
+ in this configuration, so just assert about it instead. */
+ assert (nsid == LM_ID_BASE);
+ assert (imap->l_prev != NULL);
+#else
+ if (imap->l_prev == NULL)
{
assert (nsid != LM_ID_BASE);
ns->_ns_loaded = imap->l_next;
we leave for debuggers to examine. */
r->r_map = (void *) ns->_ns_loaded;
}
+ else
+#endif
+ imap->l_prev->l_next = imap->l_next;
--ns->_ns_nloaded;
if (imap->l_next != NULL)
{
struct link_map *map = _map;
- /* First see whether we can remove the object at all. */
+ /* We must take the lock to examine the contents of map and avoid
+ concurrent dlopens. */
+ __rtld_lock_lock_recursive (GL(dl_load_lock));
+
+ /* At this point we are guaranteed nobody else is touching the list of
+ loaded maps, but a concurrent dlclose might have freed our map
+ before we took the lock. There is no way to detect this (see below)
+ so we proceed assuming this isn't the case. First see whether we
+ can remove the object at all. */
if (__glibc_unlikely (map->l_flags_1 & DF_1_NODELETE))
{
- assert (map->l_init_called);
/* Nope. Do nothing. */
+ __rtld_lock_unlock_recursive (GL(dl_load_lock));
return;
}
+ /* At present this is an unreliable check except in the case where the
+ caller has recursively called dlclose and we are sure the link map
+ has not been freed. In a non-recursive dlclose the map itself
+ might have been freed and this access is potentially a data race
+ with whatever other use this memory might have now, or worse we
+ might silently corrupt memory if it looks enough like a link map.
+ POSIX has language in dlclose that appears to guarantee that this
+ should be a detectable case and given that dlclose should be threadsafe
+ we need this to be a reliable detection.
+ This is bug 20990. */
if (__builtin_expect (map->l_direct_opencount, 1) == 0)
- GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
-
- /* Acquire the lock. */
- __rtld_lock_lock_recursive (GL(dl_load_lock));
+ {
+ __rtld_lock_unlock_recursive (GL(dl_load_lock));
+ _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
+ }
- _dl_close_worker (map);
+ _dl_close_worker (map, false);
__rtld_lock_unlock_recursive (GL(dl_load_lock));
}