]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-close.c
.
[thirdparty/glibc.git] / elf / dl-close.c
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2005, 2006, 2007 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <errno.h>
23 #include <libintl.h>
24 #include <stddef.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <sys/types.h>
32 #include <sys/mman.h>
33 #include <sysdep-cancel.h>
34 #include <tls.h>
35
36
37 /* Type of the constructor functions. */
38 typedef void (*fini_t) (void);
39
40
41 /* Special l_idx value used to indicate which objects remain loaded. */
42 #define IDX_STILL_USED -1
43
44
45 #ifdef USE_TLS
46 /* Returns true we an non-empty was found. */
47 static bool
48 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
49 bool should_be_there)
50 {
51 if (idx - disp >= listp->len)
52 {
53 if (listp->next == NULL)
54 {
55 /* The index is not actually valid in the slotinfo list,
56 because this object was closed before it was fully set
57 up due to some error. */
58 assert (! should_be_there);
59 }
60 else
61 {
62 if (remove_slotinfo (idx, listp->next, disp + listp->len,
63 should_be_there))
64 return true;
65
66 /* No non-empty entry. Search from the end of this element's
67 slotinfo array. */
68 idx = disp + listp->len;
69 }
70 }
71 else
72 {
73 struct link_map *old_map = listp->slotinfo[idx - disp].map;
74
75 /* The entry might still be in its unused state if we are closing an
76 object that wasn't fully set up. */
77 if (__builtin_expect (old_map != NULL, 1))
78 {
79 assert (old_map->l_tls_modid == idx);
80
81 /* Mark the entry as unused. */
82 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
83 listp->slotinfo[idx - disp].map = NULL;
84 }
85
86 /* If this is not the last currently used entry no need to look
87 further. */
88 if (idx != GL(dl_tls_max_dtv_idx))
89 return true;
90 }
91
92 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
93 {
94 --idx;
95
96 if (listp->slotinfo[idx - disp].map != NULL)
97 {
98 /* Found a new last used index. */
99 GL(dl_tls_max_dtv_idx) = idx;
100 return true;
101 }
102 }
103
104 /* No non-entry in this list element. */
105 return false;
106 }
107 #endif
108
109
110 void
111 _dl_close_worker (struct link_map *map)
112 {
113 /* One less direct use. */
114 --map->l_direct_opencount;
115
116 /* If _dl_close is called recursively (some destructor call dlclose),
117 just record that the parent _dl_close will need to do garbage collection
118 again and return. */
119 static enum { not_pending, pending, rerun } dl_close_state;
120
121 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
122 || dl_close_state != not_pending)
123 {
124 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
125 dl_close_state = rerun;
126
127 /* There are still references to this object. Do nothing more. */
128 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
129 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
130 map->l_name, map->l_direct_opencount);
131
132 return;
133 }
134
135 Lmid_t nsid = map->l_ns;
136 struct link_namespaces *ns = &GL(dl_ns)[nsid];
137
138 retry:
139 dl_close_state = pending;
140
141 #ifdef USE_TLS
142 bool any_tls = false;
143 #endif
144 const unsigned int nloaded = ns->_ns_nloaded;
145 char used[nloaded];
146 char done[nloaded];
147 struct link_map *maps[nloaded];
148
149 /* Run over the list and assign indexes to the link maps and enter
150 them into the MAPS array. */
151 int idx = 0;
152 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
153 {
154 l->l_idx = idx;
155 maps[idx] = l;
156 ++idx;
157 }
158 assert (idx == nloaded);
159
160 /* Prepare the bitmaps. */
161 memset (used, '\0', sizeof (used));
162 memset (done, '\0', sizeof (done));
163
164 /* Keep track of the lowest index link map we have covered already. */
165 int done_index = -1;
166 while (++done_index < nloaded)
167 {
168 struct link_map *l = maps[done_index];
169
170 if (done[done_index])
171 /* Already handled. */
172 continue;
173
174 /* Check whether this object is still used. */
175 if (l->l_type == lt_loaded
176 && l->l_direct_opencount == 0
177 && (l->l_flags_1 & DF_1_NODELETE) == 0
178 && !used[done_index])
179 continue;
180
181 /* We need this object and we handle it now. */
182 done[done_index] = 1;
183 used[done_index] = 1;
184 /* Signal the object is still needed. */
185 l->l_idx = IDX_STILL_USED;
186
187 /* Mark all dependencies as used. */
188 if (l->l_initfini != NULL)
189 {
190 struct link_map **lp = &l->l_initfini[1];
191 while (*lp != NULL)
192 {
193 if ((*lp)->l_idx != IDX_STILL_USED)
194 {
195 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
196
197 if (!used[(*lp)->l_idx])
198 {
199 used[(*lp)->l_idx] = 1;
200 if ((*lp)->l_idx - 1 < done_index)
201 done_index = (*lp)->l_idx - 1;
202 }
203 }
204
205 ++lp;
206 }
207 }
208 /* And the same for relocation dependencies. */
209 if (l->l_reldeps != NULL)
210 for (unsigned int j = 0; j < l->l_reldepsact; ++j)
211 {
212 struct link_map *jmap = l->l_reldeps[j];
213
214 if (jmap->l_idx != IDX_STILL_USED)
215 {
216 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
217
218 if (!used[jmap->l_idx])
219 {
220 used[jmap->l_idx] = 1;
221 if (jmap->l_idx - 1 < done_index)
222 done_index = jmap->l_idx - 1;
223 }
224 }
225 }
226 }
227
228 /* Sort the entries. */
229 _dl_sort_fini (ns->_ns_loaded, maps, nloaded, used, nsid);
230
231 /* Call all termination functions at once. */
232 #ifdef SHARED
233 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
234 #endif
235 bool unload_any = false;
236 bool scope_mem_left = false;
237 unsigned int unload_global = 0;
238 unsigned int first_loaded = ~0;
239 for (unsigned int i = 0; i < nloaded; ++i)
240 {
241 struct link_map *imap = maps[i];
242
243 /* All elements must be in the same namespace. */
244 assert (imap->l_ns == nsid);
245
246 if (!used[i])
247 {
248 assert (imap->l_type == lt_loaded
249 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
250
251 /* Call its termination function. Do not do it for
252 half-cooked objects. */
253 if (imap->l_init_called)
254 {
255 /* When debugging print a message first. */
256 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
257 0))
258 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
259 imap->l_name, nsid);
260
261 if (imap->l_info[DT_FINI_ARRAY] != NULL)
262 {
263 ElfW(Addr) *array =
264 (ElfW(Addr) *) (imap->l_addr
265 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
266 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
267 / sizeof (ElfW(Addr)));
268
269 while (sz-- > 0)
270 ((fini_t) array[sz]) ();
271 }
272
273 /* Next try the old-style destructor. */
274 if (imap->l_info[DT_FINI] != NULL)
275 (*(void (*) (void)) DL_DT_FINI_ADDRESS
276 (imap, ((void *) imap->l_addr
277 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
278 }
279
280 #ifdef SHARED
281 /* Auditing checkpoint: we have a new object. */
282 if (__builtin_expect (do_audit, 0))
283 {
284 struct audit_ifaces *afct = GLRO(dl_audit);
285 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
286 {
287 if (afct->objclose != NULL)
288 /* Return value is ignored. */
289 (void) afct->objclose (&imap->l_audit[cnt].cookie);
290
291 afct = afct->next;
292 }
293 }
294 #endif
295
296 /* This object must not be used anymore. */
297 imap->l_removed = 1;
298
299 /* We indeed have an object to remove. */
300 unload_any = true;
301
302 if (imap->l_global)
303 ++unload_global;
304
305 /* Remember where the first dynamically loaded object is. */
306 if (i < first_loaded)
307 first_loaded = i;
308 }
309 /* Else used[i]. */
310 else if (imap->l_type == lt_loaded)
311 {
312 struct r_scope_elem *new_list = NULL;
313
314 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
315 {
316 /* The object is still used. But one of the objects we are
317 unloading right now is responsible for loading it. If
318 the current object does not have it's own scope yet we
319 have to create one. This has to be done before running
320 the finalizers.
321
322 To do this count the number of dependencies. */
323 unsigned int cnt;
324 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
325 ;
326
327 /* We simply reuse the l_initfini list. */
328 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
329 imap->l_searchlist.r_nlist = cnt;
330
331 new_list = &imap->l_searchlist;
332 }
333
334 /* Count the number of scopes which remain after the unload.
335 When we add the local search list count it. Always add
336 one for the terminating NULL pointer. */
337 size_t remain = (new_list != NULL) + 1;
338 bool removed_any = false;
339 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
340 /* This relies on l_scope[] entries being always set either
341 to its own l_symbolic_searchlist address, or some map's
342 l_searchlist address. */
343 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
344 {
345 struct link_map *tmap = (struct link_map *)
346 ((char *) imap->l_scope[cnt]
347 - offsetof (struct link_map, l_searchlist));
348 assert (tmap->l_ns == nsid);
349 if (tmap->l_idx == IDX_STILL_USED)
350 ++remain;
351 else
352 removed_any = true;
353 }
354 else
355 ++remain;
356
357 if (removed_any)
358 {
359 /* Always allocate a new array for the scope. This is
360 necessary since we must be able to determine the last
361 user of the current array. If possible use the link map's
362 memory. */
363 size_t new_size;
364 struct r_scope_elem **newp;
365
366 #define SCOPE_ELEMS(imap) \
367 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
368
369 if (imap->l_scope != imap->l_scope_mem
370 && remain < SCOPE_ELEMS (imap))
371 {
372 new_size = SCOPE_ELEMS (imap);
373 newp = imap->l_scope_mem;
374 }
375 else
376 {
377 new_size = imap->l_scope_max;
378 newp = (struct r_scope_elem **)
379 malloc (new_size * sizeof (struct r_scope_elem *));
380 if (newp == NULL)
381 _dl_signal_error (ENOMEM, "dlclose", NULL,
382 N_("cannot create scope list"));
383 }
384
385 /* Copy over the remaining scope elements. */
386 remain = 0;
387 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
388 {
389 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
390 {
391 struct link_map *tmap = (struct link_map *)
392 ((char *) imap->l_scope[cnt]
393 - offsetof (struct link_map, l_searchlist));
394 if (tmap->l_idx != IDX_STILL_USED)
395 {
396 /* Remove the scope. Or replace with own map's
397 scope. */
398 if (new_list != NULL)
399 {
400 newp[remain++] = new_list;
401 new_list = NULL;
402 }
403 continue;
404 }
405 }
406
407 newp[remain++] = imap->l_scope[cnt];
408 }
409 newp[remain] = NULL;
410
411 struct r_scope_elem **old = imap->l_scope;
412
413 imap->l_scope = newp;
414
415 /* No user anymore, we can free it now. */
416 if (old != imap->l_scope_mem)
417 {
418 if (_dl_scope_free (old))
419 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
420 no need to repeat it. */
421 scope_mem_left = false;
422 }
423 else
424 scope_mem_left = true;
425
426 imap->l_scope_max = new_size;
427 }
428
429 /* The loader is gone, so mark the object as not having one.
430 Note: l_idx != IDX_STILL_USED -> object will be removed. */
431 if (imap->l_loader != NULL
432 && imap->l_loader->l_idx != IDX_STILL_USED)
433 imap->l_loader = NULL;
434
435 /* Remember where the first dynamically loaded object is. */
436 if (i < first_loaded)
437 first_loaded = i;
438 }
439 }
440
441 /* If there are no objects to unload, do nothing further. */
442 if (!unload_any)
443 goto out;
444
445 #ifdef SHARED
446 /* Auditing checkpoint: we will start deleting objects. */
447 if (__builtin_expect (do_audit, 0))
448 {
449 struct link_map *head = ns->_ns_loaded;
450 struct audit_ifaces *afct = GLRO(dl_audit);
451 /* Do not call the functions for any auditing object. */
452 if (head->l_auditing == 0)
453 {
454 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
455 {
456 if (afct->activity != NULL)
457 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
458
459 afct = afct->next;
460 }
461 }
462 }
463 #endif
464
465 /* Notify the debugger we are about to remove some loaded objects. */
466 struct r_debug *r = _dl_debug_initialize (0, nsid);
467 r->r_state = RT_DELETE;
468 _dl_debug_state ();
469
470 if (unload_global)
471 {
472 /* Some objects are in the global scope list. Remove them. */
473 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
474 unsigned int i;
475 unsigned int j = 0;
476 unsigned int cnt = ns_msl->r_nlist;
477
478 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
479 --cnt;
480
481 if (cnt + unload_global == ns_msl->r_nlist)
482 /* Speed up removing most recently added objects. */
483 j = cnt;
484 else
485 for (i = 0; i < cnt; i++)
486 if (ns_msl->r_list[i]->l_removed == 0)
487 {
488 if (i != j)
489 ns_msl->r_list[j] = ns_msl->r_list[i];
490 j++;
491 }
492 ns_msl->r_nlist = j;
493 }
494
495 if (!RTLD_SINGLE_THREAD_P
496 && (unload_global
497 || scope_mem_left
498 || (GL(dl_scope_free_list) != NULL
499 && GL(dl_scope_free_list)->count)))
500 {
501 struct dl_scope_free_list *fsl;
502
503 THREAD_GSCOPE_WAIT ();
504 /* Now we can free any queued old scopes. */
505 if ((fsl = GL(dl_scope_free_list)) != NULL)
506 while (fsl->count > 0)
507 free (fsl->list[--fsl->count]);
508 }
509
510 #ifdef USE_TLS
511 size_t tls_free_start;
512 size_t tls_free_end;
513 tls_free_start = tls_free_end = NO_TLS_OFFSET;
514 #endif
515
516 /* Check each element of the search list to see if all references to
517 it are gone. */
518 for (unsigned int i = first_loaded; i < nloaded; ++i)
519 {
520 struct link_map *imap = maps[i];
521 if (!used[i])
522 {
523 assert (imap->l_type == lt_loaded);
524
525 /* That was the last reference, and this was a dlopen-loaded
526 object. We can unmap it. */
527
528 #ifdef USE_TLS
529 /* Remove the object from the dtv slotinfo array if it uses TLS. */
530 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
531 {
532 any_tls = true;
533
534 if (GL(dl_tls_dtv_slotinfo_list) != NULL
535 && ! remove_slotinfo (imap->l_tls_modid,
536 GL(dl_tls_dtv_slotinfo_list), 0,
537 imap->l_init_called))
538 /* All dynamically loaded modules with TLS are unloaded. */
539 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
540
541 if (imap->l_tls_offset != NO_TLS_OFFSET)
542 {
543 /* Collect a contiguous chunk built from the objects in
544 this search list, going in either direction. When the
545 whole chunk is at the end of the used area then we can
546 reclaim it. */
547 # if TLS_TCB_AT_TP
548 if (tls_free_start == NO_TLS_OFFSET
549 || (size_t) imap->l_tls_offset == tls_free_start)
550 {
551 /* Extend the contiguous chunk being reclaimed. */
552 tls_free_start
553 = imap->l_tls_offset - imap->l_tls_blocksize;
554
555 if (tls_free_end == NO_TLS_OFFSET)
556 tls_free_end = imap->l_tls_offset;
557 }
558 else if (imap->l_tls_offset - imap->l_tls_blocksize
559 == tls_free_end)
560 /* Extend the chunk backwards. */
561 tls_free_end = imap->l_tls_offset;
562 else
563 {
564 /* This isn't contiguous with the last chunk freed.
565 One of them will be leaked unless we can free
566 one block right away. */
567 if (tls_free_end == GL(dl_tls_static_used))
568 {
569 GL(dl_tls_static_used) = tls_free_start;
570 tls_free_end = imap->l_tls_offset;
571 tls_free_start
572 = tls_free_end - imap->l_tls_blocksize;
573 }
574 else if ((size_t) imap->l_tls_offset
575 == GL(dl_tls_static_used))
576 GL(dl_tls_static_used)
577 = imap->l_tls_offset - imap->l_tls_blocksize;
578 else if (tls_free_end < (size_t) imap->l_tls_offset)
579 {
580 /* We pick the later block. It has a chance to
581 be freed. */
582 tls_free_end = imap->l_tls_offset;
583 tls_free_start
584 = tls_free_end - imap->l_tls_blocksize;
585 }
586 }
587 # elif TLS_DTV_AT_TP
588 if ((size_t) imap->l_tls_offset == tls_free_end)
589 /* Extend the contiguous chunk being reclaimed. */
590 tls_free_end -= imap->l_tls_blocksize;
591 else if (imap->l_tls_offset + imap->l_tls_blocksize
592 == tls_free_start)
593 /* Extend the chunk backwards. */
594 tls_free_start = imap->l_tls_offset;
595 else
596 {
597 /* This isn't contiguous with the last chunk freed.
598 One of them will be leaked. */
599 if (tls_free_end == GL(dl_tls_static_used))
600 GL(dl_tls_static_used) = tls_free_start;
601 tls_free_start = imap->l_tls_offset;
602 tls_free_end = tls_free_start + imap->l_tls_blocksize;
603 }
604 # else
605 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
606 # endif
607 }
608 }
609 #endif
610
611 /* We can unmap all the maps at once. We determined the
612 start address and length when we loaded the object and
613 the `munmap' call does the rest. */
614 DL_UNMAP (imap);
615
616 /* Finally, unlink the data structure and free it. */
617 if (imap->l_prev != NULL)
618 imap->l_prev->l_next = imap->l_next;
619 else
620 {
621 #ifdef SHARED
622 assert (nsid != LM_ID_BASE);
623 #endif
624 ns->_ns_loaded = imap->l_next;
625 }
626
627 --ns->_ns_nloaded;
628 if (imap->l_next != NULL)
629 imap->l_next->l_prev = imap->l_prev;
630
631 free (imap->l_versions);
632 if (imap->l_origin != (char *) -1)
633 free ((char *) imap->l_origin);
634
635 free (imap->l_reldeps);
636
637 /* Print debugging message. */
638 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
639 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
640 imap->l_name, imap->l_ns);
641
642 /* This name always is allocated. */
643 free (imap->l_name);
644 /* Remove the list with all the names of the shared object. */
645
646 struct libname_list *lnp = imap->l_libname;
647 do
648 {
649 struct libname_list *this = lnp;
650 lnp = lnp->next;
651 if (!this->dont_free)
652 free (this);
653 }
654 while (lnp != NULL);
655
656 /* Remove the searchlists. */
657 free (imap->l_initfini);
658
659 /* Remove the scope array if we allocated it. */
660 if (imap->l_scope != imap->l_scope_mem)
661 free (imap->l_scope);
662
663 if (imap->l_phdr_allocated)
664 free ((void *) imap->l_phdr);
665
666 if (imap->l_rpath_dirs.dirs != (void *) -1)
667 free (imap->l_rpath_dirs.dirs);
668 if (imap->l_runpath_dirs.dirs != (void *) -1)
669 free (imap->l_runpath_dirs.dirs);
670
671 free (imap);
672 }
673 }
674
675 #ifdef USE_TLS
676 /* If we removed any object which uses TLS bump the generation counter. */
677 if (any_tls)
678 {
679 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
680 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
681
682 if (tls_free_end == GL(dl_tls_static_used))
683 GL(dl_tls_static_used) = tls_free_start;
684 }
685 #endif
686
687 #ifdef SHARED
688 /* Auditing checkpoint: we have deleted all objects. */
689 if (__builtin_expect (do_audit, 0))
690 {
691 struct link_map *head = ns->_ns_loaded;
692 /* Do not call the functions for any auditing object. */
693 if (head->l_auditing == 0)
694 {
695 struct audit_ifaces *afct = GLRO(dl_audit);
696 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
697 {
698 if (afct->activity != NULL)
699 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
700
701 afct = afct->next;
702 }
703 }
704 }
705 #endif
706
707 /* Notify the debugger those objects are finalized and gone. */
708 r->r_state = RT_CONSISTENT;
709 _dl_debug_state ();
710
711 /* Recheck if we need to retry, release the lock. */
712 out:
713 if (dl_close_state == rerun)
714 goto retry;
715
716 dl_close_state = not_pending;
717 }
718
719
720 void
721 _dl_close (void *_map)
722 {
723 struct link_map *map = _map;
724
725 /* First see whether we can remove the object at all. */
726 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0))
727 {
728 assert (map->l_init_called);
729 /* Nope. Do nothing. */
730 return;
731 }
732
733 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
734 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
735
736 /* Acquire the lock. */
737 __rtld_lock_lock_recursive (GL(dl_load_lock));
738
739 _dl_close_worker (map);
740
741 __rtld_lock_unlock_recursive (GL(dl_load_lock));
742 }
743
744
745 #ifdef USE_TLS
746 static bool __libc_freeres_fn_section
747 free_slotinfo (struct dtv_slotinfo_list **elemp)
748 {
749 size_t cnt;
750
751 if (*elemp == NULL)
752 /* Nothing here, all is removed (or there never was anything). */
753 return true;
754
755 if (!free_slotinfo (&(*elemp)->next))
756 /* We cannot free the entry. */
757 return false;
758
759 /* That cleared our next pointer for us. */
760
761 for (cnt = 0; cnt < (*elemp)->len; ++cnt)
762 if ((*elemp)->slotinfo[cnt].map != NULL)
763 /* Still used. */
764 return false;
765
766 /* We can remove the list element. */
767 free (*elemp);
768 *elemp = NULL;
769
770 return true;
771 }
772 #endif
773
774
775 libc_freeres_fn (free_mem)
776 {
777 for (Lmid_t nsid = 0; nsid < DL_NNS; ++nsid)
778 if (__builtin_expect (GL(dl_ns)[nsid]._ns_global_scope_alloc, 0) != 0
779 && (GL(dl_ns)[nsid]._ns_main_searchlist->r_nlist
780 // XXX Check whether we need NS-specific initial_searchlist
781 == GLRO(dl_initial_searchlist).r_nlist))
782 {
783 /* All object dynamically loaded by the program are unloaded. Free
784 the memory allocated for the global scope variable. */
785 struct link_map **old = GL(dl_ns)[nsid]._ns_main_searchlist->r_list;
786
787 /* Put the old map in. */
788 GL(dl_ns)[nsid]._ns_main_searchlist->r_list
789 // XXX Check whether we need NS-specific initial_searchlist
790 = GLRO(dl_initial_searchlist).r_list;
791 /* Signal that the original map is used. */
792 GL(dl_ns)[nsid]._ns_global_scope_alloc = 0;
793
794 /* Now free the old map. */
795 free (old);
796 }
797
798 #ifdef USE_TLS
799 if (USE___THREAD || GL(dl_tls_dtv_slotinfo_list) != NULL)
800 {
801 /* Free the memory allocated for the dtv slotinfo array. We can do
802 this only if all modules which used this memory are unloaded. */
803 # ifdef SHARED
804 if (GL(dl_initial_dtv) == NULL)
805 /* There was no initial TLS setup, it was set up later when
806 it used the normal malloc. */
807 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
808 else
809 # endif
810 /* The first element of the list does not have to be deallocated.
811 It was allocated in the dynamic linker (i.e., with a different
812 malloc), and in the static library it's in .bss space. */
813 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);
814 }
815 #endif
816
817 void *scope_free_list = GL(dl_scope_free_list);
818 GL(dl_scope_free_list) = NULL;
819 free (scope_free_list);
820 }