From 0330a08d8a0a3e4ab29573f045138f9754d0cda4 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Wed, 6 Apr 2005 02:49:59 +0000 Subject: [PATCH] 2005-03-18 Ulrich Drepper [BZ #821] * elf/dl-fini.c (_dl_fini): Split sorting of the maps in separate function _dl_sort_fini. (_dl_sort_fini): New function. * sysdeps/generic/ldsodefs.h: Declare _dl_sort_fini. * elf/dl-close.c (_dl_close): Call _dl_sort_fini before running destructors to call them in the right order. [BZ #821] * include/link.h (struct link_map): Remove l_opencount. Add l_removed. Change type of l_idx to int. * elf/dl-close.c: Basically rewrite. Do not use l_opencount to determine whether a DSO has to be unloaded. Instead compute this in this function. * elf/dl-deps.c: No need to manipulate l_opencount anymore. * elf/dl-lookup.c: Likewise. * elf/rtld.c: Likewise * elf/dl-open.c: Likewise. Use l_init_called to determine whether object was just loaded. * elf/dl-fini.c: Bump l_direct_opencount instead of l_opencount. * elf/dl-load.c (_dl_map_object_from_fd): Do not recognize DSO which is about to be unloaded as a match. (_dl_map_object): Likewise. * elf/do-lookup.h (do_lookup_x): Do not look into DSO which is about to be unloaded. * elf/circleload1.c: Don't use l_opencount anymore. * elf/neededtest.c: Likewise. * elf/neededtest2.c: Likewise. * elf/neededtest3.c: Likewise. * elf/neededtest4.c: Likewise. * elf/unload.c: Likewise. * elf/unload2.c: Likewise. * elf/loadtest.c: Likewise. --- elf/dl-close.c | 360 +++++++++++++++++++++---------------------------- elf/dl-fini.c | 183 ++++++++++++++----------- 2 files changed, 257 insertions(+), 286 deletions(-) diff --git a/elf/dl-close.c b/elf/dl-close.c index f40d5b0d89c..df4f5aed873 100644 --- a/elf/dl-close.c +++ b/elf/dl-close.c @@ -102,18 +102,9 @@ void internal_function _dl_close (void *_map) { - struct reldep_list - { - struct link_map **rellist; - unsigned int nrellist; - unsigned int nhandled; - struct reldep_list *next; - bool handled[0]; - } *reldeps = NULL; - struct link_map **list; struct link_map *map = _map; unsigned int i; - unsigned int *new_opencount; + Lmid_t ns = map->l_ns; #ifdef USE_TLS bool any_tls = false; #endif @@ -124,162 +115,134 @@ _dl_close (void *_map) /* Nope. Do nothing. */ return; - if (__builtin_expect (map->l_opencount, 1) == 0) + if (__builtin_expect (map->l_direct_opencount, 1) == 0) GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open")); /* Acquire the lock. */ __rtld_lock_lock_recursive (GL(dl_load_lock)); /* One less direct use. */ - assert (map->l_direct_opencount > 0); --map->l_direct_opencount; /* Decrement the reference count. */ - if (map->l_opencount > 1 || map->l_type != lt_loaded) + if (map->l_direct_opencount > 1 || map->l_type != lt_loaded) { /* There are still references to this object. Do nothing more. */ if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0)) - GLRO(dl_debug_printf) ("\nclosing file=%s; opencount == %u\n", - map->l_name, map->l_opencount); - - /* Decrement the object's reference counter, not the dependencies'. */ - --map->l_opencount; - - /* If the direct use counter reaches zero we have to decrement - all the dependencies' usage counter. */ - if (map->l_direct_opencount == 0) - for (i = 1; i < map->l_searchlist.r_nlist; ++i) - --map->l_searchlist.r_list[i]->l_opencount; + GLRO(dl_debug_printf) ("\nclosing file=%s; direct_opencount == %u\n", + map->l_name, map->l_direct_opencount); __rtld_lock_unlock_recursive (GL(dl_load_lock)); return; } - list = map->l_initfini; + const unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded; + char used[nloaded]; + char done[nloaded]; + struct link_map *maps[nloaded]; - /* Compute the new l_opencount values. */ - i = map->l_searchlist.r_nlist; - if (__builtin_expect (i == 0, 0)) - /* This can happen if we handle relocation dependencies for an - object which wasn't loaded directly. */ - for (i = 1; list[i] != NULL; ++i) - ; + /* Run over the list and assign indeces to the link maps and enter + them into the MAPS array. */ + int idx = 0; + for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next) + { + l->l_idx = idx; + maps[idx] = l; + ++idx; + } + assert (idx == nloaded); - unsigned int nopencount = i; - new_opencount = (unsigned int *) alloca (i * sizeof (unsigned int)); + /* Prepare the bitmaps. */ + memset (used, '\0', sizeof (used)); + memset (done, '\0', sizeof (done)); - for (i = 0; list[i] != NULL; ++i) + /* Keep track of the lowest index link map we have covered already. */ + int done_index = -1; + while (++done_index < nloaded) { - list[i]->l_idx = i; - new_opencount[i] = list[i]->l_opencount; - } - --new_opencount[0]; - for (i = 1; list[i] != NULL; ++i) - if ((list[i]->l_flags_1 & DF_1_NODELETE) == 0 - /* Decrement counter. */ - && (assert (new_opencount[i] > 0), --new_opencount[i] == 0)) - { - void mark_removed (struct link_map *remmap) - { - /* Test whether this object was also loaded directly. */ - if (remmap->l_searchlist.r_list != NULL - && remmap->l_direct_opencount > 0) - { - /* In this case we have to decrement all the dependencies of - this object. They are all in MAP's dependency list. */ - unsigned int j; - struct link_map **dep_list = remmap->l_searchlist.r_list; - - for (j = 1; j < remmap->l_searchlist.r_nlist; ++j) - if (! (dep_list[j]->l_flags_1 & DF_1_NODELETE) - || ! dep_list[j]->l_init_called) + struct link_map *l = maps[done_index]; + + if (done[done_index]) + /* Already handled. */ + continue; + + /* Check whether this object is still used. */ + if (l->l_type == lt_loaded + && l->l_direct_opencount == 0 + && (l->l_flags_1 & DF_1_NODELETE) == 0 + && !used[done_index]) + continue; + + /* We need this object and we handle it now. */ + done[done_index] = 1; + used[done_index] = 1; + /* Signal the object is still needed. */ + l->l_idx = -1; + + /* Mark all dependencies as used. */ + if (l->l_initfini != NULL) + { + struct link_map **lp = &l->l_initfini[1]; + while (*lp != NULL) + { + if ((*lp)->l_idx != -1) { - assert (dep_list[j]->l_idx < map->l_searchlist.r_nlist); - assert (new_opencount[dep_list[j]->l_idx] > 0); - if (--new_opencount[dep_list[j]->l_idx] == 0) + assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded); + + if (!used[(*lp)->l_idx]) { - assert (dep_list[j]->l_type == lt_loaded); - mark_removed (dep_list[j]); + used[(*lp)->l_idx] = 1; + if ((*lp)->l_idx - 1 < done_index) + done_index = (*lp)->l_idx - 1; } } - } - if (remmap->l_reldeps != NULL) + ++lp; + } + } + /* And the same for relocation dependencies. */ + if (l->l_reldeps != NULL) + for (unsigned int j = 0; j < l->l_reldepsact; ++j) + { + struct link_map *jmap = l->l_reldeps[j]; + + if (jmap->l_idx != -1) { - unsigned int j; - for (j = 0; j < remmap->l_reldepsact; ++j) + assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded); + + if (!used[jmap->l_idx]) { - struct link_map *depmap = remmap->l_reldeps[j]; - - /* Find out whether this object is in our list. */ - if (depmap->l_idx < nopencount - && list[depmap->l_idx] == depmap) - { - /* Yes, it is. If is has a search list, make a - recursive call to handle this. */ - if (depmap->l_searchlist.r_list != NULL) - { - assert (new_opencount[depmap->l_idx] > 0); - if (--new_opencount[depmap->l_idx] == 0) - { - /* This one is now gone, too. */ - assert (depmap->l_type == lt_loaded); - mark_removed (depmap); - } - } - else - { - /* Otherwise we have to handle the dependency - deallocation here. */ - unsigned int k; - for (k = 0; depmap->l_initfini[k] != NULL; ++k) - { - struct link_map *rl = depmap->l_initfini[k]; - - if (rl->l_idx < nopencount - && list[rl->l_idx] == rl) - { - assert (new_opencount[rl->l_idx] > 0); - if (--new_opencount[rl->l_idx] == 0) - { - /* Another module to remove. */ - assert (rl->l_type == lt_loaded); - mark_removed (rl); - } - } - else - { - assert (rl->l_opencount > 0); - if (--rl->l_opencount == 0) - mark_removed (rl); - } - } - } - } + used[jmap->l_idx] = 1; + if (jmap->l_idx - 1 < done_index) + done_index = jmap->l_idx - 1; } } } + } - mark_removed (list[i]); - } - assert (new_opencount[0] == 0); + /* Sort the entries. */ + _dl_sort_fini (GL(dl_ns)[ns]._ns_loaded, maps, nloaded, used, ns); - /* Call all termination functions at once. */ - for (i = 0; list[i] != NULL; ++i) + bool unload_any = false; + unsigned int first_loaded = ~0; + for (i = 0; i < nloaded; ++i) { - struct link_map *imap = list[i]; - if (new_opencount[i] == 0 && imap->l_type == lt_loaded - && (imap->l_flags_1 & DF_1_NODELETE) == 0) - { - /* When debugging print a message first. */ - if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0)) - GLRO(dl_debug_printf) ("\ncalling fini: %s [%lu]\n\n", - imap->l_name, imap->l_ns); + struct link_map *imap = maps[i]; + + if (!used[i]) + { + assert (imap->l_type == lt_loaded + && (imap->l_flags_1 & DF_1_NODELETE) == 0); /* Call its termination function. Do not do it for half-cooked objects. */ if (imap->l_init_called) { + /* When debugging print a message first. */ + if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0)) + GLRO(dl_debug_printf) ("\ncalling fini: %s [%lu]\n\n", + imap->l_name, ns); + if (imap->l_info[DT_FINI_ARRAY] != NULL) { ElfW(Addr) *array = @@ -299,42 +262,60 @@ _dl_close (void *_map) + imap->l_info[DT_FINI]->d_un.d_ptr))) (); } - /* This object must not be used anymore. We must remove the - reference from the scope. */ - unsigned int j; - struct link_map **searchlist = map->l_searchlist.r_list; - unsigned int nsearchlist = map->l_searchlist.r_nlist; - -#ifndef NDEBUG - bool found = false; -#endif - for (j = 0; j < nsearchlist; ++j) - if (imap == searchlist[j]) - { - /* This is the object to remove. Copy all the - following ones. */ - while (++j < nsearchlist) - searchlist[j - 1] = searchlist[j]; - - searchlist[j - 1] = NULL; + /* This object must not be used anymore. */ + imap->l_removed = 1; - --map->l_searchlist.r_nlist; + /* We indeed have an object to remove. */ + unload_any = true; -#ifndef NDEBUG - found = true; -#endif - break; - } - assert (found); + /* Remember where the first dynamically loaded object is. */ + if (i < first_loaded) + first_loaded = i; } + /* Else used[i]. */ + else if (imap->l_type == lt_loaded) + { + if (imap->l_searchlist.r_list == NULL + && imap->l_initfini != NULL) + { + /* The object is still used. But the object we are + unloading right now is responsible for loading it. If + the current object does not have it's own scope yet we + have to create one. This has to be done before running + the finalizers. + + To do this count the number of dependencies. */ + unsigned int cnt; + for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt) + ; + + /* We simply reuse the l_initfini list. */ + imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1]; + imap->l_searchlist.r_nlist = cnt; + + for (cnt = 0; imap->l_scope[cnt] != NULL; ++cnt) + if (imap->l_scope[cnt] == &map->l_searchlist) + { + imap->l_scope[cnt] = &imap->l_searchlist; + break; + } + } - /* Store the new l_opencount value. */ - imap->l_opencount = new_opencount[i]; + /* The loader is gone, so mark the object as not having one. + Note: l_idx == -1 -> object will be removed. */ + if (imap->l_loader != NULL && imap->l_loader->l_idx != -1) + imap->l_loader = NULL; - /* Just a sanity check. */ - assert (imap->l_type == lt_loaded || imap->l_opencount > 0); + /* Remember where the first dynamically loaded object is. */ + if (i < first_loaded) + first_loaded = i; + } } + /* If there are no objects to unload, do nothing further. */ + if (!unload_any) + goto out; + /* Notify the debugger we are about to remove some loaded objects. */ _r_debug.r_state = RT_DELETE; GLRO(dl_debug_state) (); @@ -347,12 +328,12 @@ _dl_close (void *_map) /* Check each element of the search list to see if all references to it are gone. */ - for (i = 0; list[i] != NULL; ++i) + for (i = first_loaded; i < nloaded; ++i) { - struct link_map *imap = list[i]; - if (imap->l_opencount == 0 && imap->l_type == lt_loaded) + struct link_map *imap = maps[i]; + if (!used[i]) { - struct libname_list *lnp; + assert (imap->l_type == lt_loaded); /* That was the last reference, and this was a dlopen-loaded object. We can unmap it. */ @@ -482,39 +463,13 @@ _dl_close (void *_map) if (imap->l_origin != (char *) -1) free ((char *) imap->l_origin); - /* If the object has relocation dependencies save this - information for latter. */ - if (__builtin_expect (imap->l_reldeps != NULL, 0)) - { - struct reldep_list *newrel; - - newrel = (struct reldep_list *) alloca (sizeof (*reldeps) - + (imap->l_reldepsact - * sizeof (bool))); - newrel->rellist = imap->l_reldeps; - newrel->nrellist = imap->l_reldepsact; - newrel->next = reldeps; - - newrel->nhandled = imap->l_reldepsact; - unsigned int j; - for (j = 0; j < imap->l_reldepsact; ++j) - { - /* Find out whether this object is in our list. */ - if (imap->l_reldeps[j]->l_idx < nopencount - && list[imap->l_reldeps[j]->l_idx] == imap->l_reldeps[j]) - /* Yes, it is. */ - newrel->handled[j] = true; - else - newrel->handled[j] = false; - } - - reldeps = newrel; - } + free (imap->l_reldeps); /* This name always is allocated. */ free (imap->l_name); /* Remove the list with all the names of the shared object. */ - lnp = imap->l_libname; + + struct libname_list *lnp = imap->l_libname; do { struct libname_list *this = lnp; @@ -525,8 +480,7 @@ _dl_close (void *_map) while (lnp != NULL); /* Remove the searchlists. */ - if (imap != map) - free (imap->l_initfini); + free (imap->l_initfini); /* Remove the scope array if we allocated it. */ if (imap->l_scope != imap->l_scope_mem) @@ -560,26 +514,8 @@ _dl_close (void *_map) _r_debug.r_state = RT_CONSISTENT; GLRO(dl_debug_state) (); - /* Now we can perhaps also remove the modules for which we had - dependencies because of symbol lookup. */ - while (__builtin_expect (reldeps != NULL, 0)) - { - while (reldeps->nrellist-- > 0) - /* Some of the relocation dependencies might be on the - dependency list of the object we are closing right now. - They were already handled. Do not close them again. */ - if (reldeps->nrellist < reldeps->nhandled - && ! reldeps->handled[reldeps->nrellist]) - _dl_close (reldeps->rellist[reldeps->nrellist]); - - free (reldeps->rellist); - - reldeps = reldeps->next; - } - - free (list); - /* Release the lock. */ + out: __rtld_lock_unlock_recursive (GL(dl_load_lock)); } libc_hidden_def (_dl_close) @@ -657,3 +593,7 @@ libc_freeres_fn (free_mem) } #endif } + +#ifdef SHARED +#include "dl-fini.c" +#endif diff --git a/elf/dl-fini.c b/elf/dl-fini.c index f43f4a00ed2..09643968288 100644 --- a/elf/dl-fini.c +++ b/elf/dl-fini.c @@ -23,10 +23,97 @@ #include +void +internal_function +_dl_sort_fini (struct link_map *l, struct link_map **maps, size_t nmaps, + char *used, Lmid_t ns) +{ + if (ns == LM_ID_BASE) + /* The main executable always comes first. */ + l = l->l_next; + + for (; l != NULL; l = l->l_next) + /* Do not handle ld.so in secondary namespaces and object which + are not removed. */ + if (l == l->l_real && l->l_idx != -1) + { + /* Find the place in the 'maps' array. */ + unsigned int j; + for (j = ns == LM_ID_BASE ? 1 : 0; maps[j] != l; ++j) + assert (j < nmaps); + + /* Find all object for which the current one is a dependency + and move the found object (if necessary) in front. */ + for (unsigned int k = j + 1; k < nmaps; ++k) + { + struct link_map **runp = maps[k]->l_initfini; + if (runp != NULL) + { + while (*runp != NULL) + if (*runp == l) + { + struct link_map *here = maps[k]; + + /* Move it now. */ + memmove (&maps[j] + 1, + &maps[j], (k - j) * sizeof (struct link_map *)); + maps[j] = here; + + if (used != NULL) + { + char here_used = used[k]; + + memmove (&used[j] + 1, + &used[j], (k - j) * sizeof (char)); + used[j] = here_used; + } + + ++j; + + break; + } + else + ++runp; + } + + if (__builtin_expect (maps[k]->l_reldeps != NULL, 0)) + { + unsigned int m = maps[k]->l_reldepsact; + struct link_map **relmaps = maps[k]->l_reldeps; + + while (m-- > 0) + { + if (relmaps[m] == l) + { + struct link_map *here = maps[k]; + + /* Move it now. */ + memmove (&maps[j] + 1, + &maps[j], + (k - j) * sizeof (struct link_map *)); + maps[j] = here; + + if (used != NULL) + { + char here_used = used[k]; + + memmove (&used[j] + 1, + &used[j], (k - j) * sizeof (char)); + used[j] = here_used; + } + + break; + } + } + } + } + } +} + +#if !defined SHARED || defined IS_IN_rtld /* Type of the constructor functions. */ typedef void (*fini_t) (void); - void internal_function _dl_fini (void) @@ -48,16 +135,16 @@ _dl_fini (void) /* We run the destructors of the main namespaces last. As for the other namespaces, we pick run the destructors in them in reverse order of the namespace ID. */ - for (Lmid_t cnt = DL_NNS - 1; cnt >= 0; --cnt) + for (Lmid_t ns = DL_NNS - 1; ns >= 0; --ns) { /* Protect against concurrent loads and unloads. */ __rtld_lock_lock_recursive (GL(dl_load_lock)); - unsigned int nloaded = GL(dl_ns)[cnt]._ns_nloaded; + unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded; /* XXX Could it be (in static binaries) that there is no object loaded? */ - assert (cnt != LM_ID_BASE || nloaded > 0); + assert (ns != LM_ID_BASE || nloaded > 0); /* Now we can allocate an array to hold all the pointers and copy the pointers in. */ @@ -76,86 +163,27 @@ _dl_fini (void) unsigned int i; struct link_map *l; - for (l = GL(dl_ns)[cnt]._ns_loaded, i = 0; l != NULL; l = l->l_next) + for (l = GL(dl_ns)[ns]._ns_loaded, i = 0; l != NULL; l = l->l_next) /* Do not handle ld.so in secondary namespaces. */ if (l == l->l_real) { assert (i < nloaded); - maps[i++] = l; + maps[i] = l; + l->l_idx = i; + ++i; - /* Bump l_opencount of all objects so that they are not - dlclose()ed from underneath us. */ - ++l->l_opencount; + /* Bump l_direct_opencount of all objects so that they are + not dlclose()ed from underneath us. */ + ++l->l_direct_opencount; } - assert (cnt != LM_ID_BASE || i == nloaded); - assert (cnt == LM_ID_BASE || i == nloaded || i == nloaded - 1); + assert (ns != LM_ID_BASE || i == nloaded); + assert (ns == LM_ID_BASE || i == nloaded || i == nloaded - 1); unsigned int nmaps = i; if (nmaps != 0) - { - /* Now we have to do the sorting. */ - l = GL(dl_ns)[cnt]._ns_loaded; - if (cnt == LM_ID_BASE) - /* The main executable always comes first. */ - l = l->l_next; - for (; l != NULL; l = l->l_next) - /* Do not handle ld.so in secondary namespaces. */ - if (l == l->l_real) - { - /* Find the place in the 'maps' array. */ - unsigned int j; - for (j = cnt == LM_ID_BASE ? 1 : 0; maps[j] != l; ++j) - assert (j < nmaps); - - /* Find all object for which the current one is a dependency - and move the found object (if necessary) in front. */ - for (unsigned int k = j + 1; k < nmaps; ++k) - { - struct link_map **runp = maps[k]->l_initfini; - if (runp != NULL) - { - while (*runp != NULL) - if (*runp == l) - { - struct link_map *here = maps[k]; - - /* Move it now. */ - memmove (&maps[j] + 1, - &maps[j], - (k - j) * sizeof (struct link_map *)); - maps[j++] = here; - - break; - } - else - ++runp; - } - - if (__builtin_expect (maps[k]->l_reldeps != NULL, 0)) - { - unsigned int m = maps[k]->l_reldepsact; - struct link_map **relmaps = maps[k]->l_reldeps; - - while (m-- > 0) - { - if (relmaps[m] == l) - { - struct link_map *here = maps[k]; - - /* Move it now. */ - memmove (&maps[j] + 1, - &maps[j], - (k - j) * sizeof (struct link_map *)); - maps[j] = here; - - break; - } - } - } - } - } - } + /* Now we have to do the sorting. */ + _dl_sort_fini (GL(dl_ns)[ns]._ns_loaded, maps, nmaps, NULL, ns); /* We do not rely on the linked list of loaded object anymore from this point on. We have our own list here (maps). The various @@ -191,7 +219,7 @@ _dl_fini (void) 0)) _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n", l->l_name[0] ? l->l_name : rtld_progname, - cnt); + ns); /* First see whether an array is given. */ if (l->l_info[DT_FINI_ARRAY] != NULL) @@ -211,14 +239,17 @@ _dl_fini (void) } /* Correct the previous increment. */ - --l->l_opencount; + --l->l_direct_opencount; } } +#ifdef SHARED if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS, 0)) _dl_debug_printf ("\nruntime linker statistics:\n" " final number of relocations: %lu\n" "final number of relocations from cache: %lu\n", GL(dl_num_relocations), GL(dl_num_cache_relocations)); +#endif } +#endif -- 2.47.2