]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-close.c
c9a7d06577878e916c02e6e6494df51b9a2a4eef
[thirdparty/glibc.git] / elf / dl-close.c
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stddef.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <libc-lock.h>
29 #include <ldsodefs.h>
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
35 #include <dl-find_object.h>
36
37 #include <dl-unmap-segments.h>
38
39 /* Special l_idx value used to indicate which objects remain loaded. */
40 #define IDX_STILL_USED -1
41
42
43 /* Returns true we an non-empty was found. */
44 static bool
45 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
46 bool should_be_there)
47 {
48 if (idx - disp >= listp->len)
49 {
50 if (listp->next == NULL)
51 {
52 /* The index is not actually valid in the slotinfo list,
53 because this object was closed before it was fully set
54 up due to some error. */
55 assert (! should_be_there);
56 }
57 else
58 {
59 if (remove_slotinfo (idx, listp->next, disp + listp->len,
60 should_be_there))
61 return true;
62
63 /* No non-empty entry. Search from the end of this element's
64 slotinfo array. */
65 idx = disp + listp->len;
66 }
67 }
68 else
69 {
70 struct link_map *old_map = listp->slotinfo[idx - disp].map;
71
72 /* The entry might still be in its unused state if we are closing an
73 object that wasn't fully set up. */
74 if (__glibc_likely (old_map != NULL))
75 {
76 /* Mark the entry as unused. These can be read concurrently. */
77 atomic_store_relaxed (&listp->slotinfo[idx - disp].gen,
78 GL(dl_tls_generation) + 1);
79 atomic_store_relaxed (&listp->slotinfo[idx - disp].map, NULL);
80 }
81
82 /* If this is not the last currently used entry no need to look
83 further. */
84 if (idx != GL(dl_tls_max_dtv_idx))
85 {
86 /* There is an unused dtv entry in the middle. */
87 GL(dl_tls_dtv_gaps) = true;
88 return true;
89 }
90 }
91
92 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
93 {
94 --idx;
95
96 if (listp->slotinfo[idx - disp].map != NULL)
97 {
98 /* Found a new last used index. This can be read concurrently. */
99 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), idx);
100 return true;
101 }
102 }
103
104 /* No non-entry in this list element. */
105 return false;
106 }
107
108 void
109 _dl_close_worker (struct link_map *map, bool force)
110 {
111 /* One less direct use. */
112 --map->l_direct_opencount;
113
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
116 again and return. */
117 static enum { not_pending, pending, rerun } dl_close_state;
118
119 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
120 || dl_close_state != not_pending)
121 {
122 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
123 dl_close_state = rerun;
124
125 /* There are still references to this object. Do nothing more. */
126 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
127 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
128 map->l_name, map->l_direct_opencount);
129
130 return;
131 }
132
133 Lmid_t nsid = map->l_ns;
134 struct link_namespaces *ns = &GL(dl_ns)[nsid];
135
136 retry:
137 dl_close_state = pending;
138
139 bool any_tls = false;
140 const unsigned int nloaded = ns->_ns_nloaded;
141
142 /* Run over the list and assign indexes to the link maps. */
143 int idx = 0;
144 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
145 {
146 l->l_map_used = 0;
147 l->l_map_done = 0;
148 l->l_idx = idx;
149 ++idx;
150 }
151 assert (idx == nloaded);
152
153 /* Keep marking link maps until no new link maps are found. */
154 for (struct link_map *l = ns->_ns_loaded; l != NULL; )
155 {
156 /* next is reset to earlier link maps for remarking. */
157 struct link_map *next = l->l_next;
158 int next_idx = l->l_idx + 1; /* next->l_idx, but covers next == NULL. */
159
160 if (l->l_map_done)
161 {
162 /* Already handled. */
163 l = next;
164 continue;
165 }
166
167 /* Check whether this object is still used. */
168 if (l->l_type == lt_loaded
169 && l->l_direct_opencount == 0
170 && !l->l_nodelete_active
171 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
172 acquire is sufficient and correct. */
173 && atomic_load_acquire (&l->l_tls_dtor_count) == 0
174 && !l->l_map_used)
175 {
176 l = next;
177 continue;
178 }
179
180 /* We need this object and we handle it now. */
181 l->l_map_used = 1;
182 l->l_map_done = 1;
183 /* Signal the object is still needed. */
184 l->l_idx = IDX_STILL_USED;
185
186 /* Mark all dependencies as used. */
187 if (l->l_initfini != NULL)
188 {
189 /* We are always the zeroth entry, and since we don't include
190 ourselves in the dependency analysis start at 1. */
191 struct link_map **lp = &l->l_initfini[1];
192 while (*lp != NULL)
193 {
194 if ((*lp)->l_idx != IDX_STILL_USED)
195 {
196 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
197
198 if (!(*lp)->l_map_used)
199 {
200 (*lp)->l_map_used = 1;
201 /* If we marked a new object as used, and we've
202 already processed it, then we need to go back
203 and process again from that point forward to
204 ensure we keep all of its dependencies also. */
205 if ((*lp)->l_idx < next_idx)
206 {
207 next = *lp;
208 next_idx = next->l_idx;
209 }
210 }
211 }
212
213 ++lp;
214 }
215 }
216 /* And the same for relocation dependencies. */
217 if (l->l_reldeps != NULL)
218 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
219 {
220 struct link_map *jmap = l->l_reldeps->list[j];
221
222 if (jmap->l_idx != IDX_STILL_USED)
223 {
224 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
225
226 if (!jmap->l_map_used)
227 {
228 jmap->l_map_used = 1;
229 if (jmap->l_idx < next_idx)
230 {
231 next = jmap;
232 next_idx = next->l_idx;
233 }
234 }
235 }
236 }
237
238 l = next;
239 }
240
241 /* Call the destructors in reverse constructor order, and remove the
242 closed link maps from the list. */
243 for (struct link_map **init_called_head = &_dl_init_called_list;
244 *init_called_head != NULL; )
245 {
246 struct link_map *imap = *init_called_head;
247
248 /* _dl_init_called_list is global, to produce a global odering.
249 Ignore the other namespaces (and link maps that are still used). */
250 if (imap->l_ns != nsid || imap->l_map_used)
251 init_called_head = &imap->l_init_called_next;
252 else
253 {
254 assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
255
256 /* _dl_init_called_list is updated at the same time as
257 l_init_called. */
258 assert (imap->l_init_called);
259
260 if (imap->l_info[DT_FINI_ARRAY] != NULL
261 || imap->l_info[DT_FINI] != NULL)
262 _dl_catch_exception (NULL, _dl_call_fini, imap);
263
264 #ifdef SHARED
265 /* Auditing checkpoint: we remove an object. */
266 _dl_audit_objclose (imap);
267 #endif
268 /* Unlink this link map. */
269 *init_called_head = imap->l_init_called_next;
270 }
271 }
272
273
274 bool unload_any = false;
275 bool scope_mem_left = false;
276 unsigned int unload_global = 0;
277
278 /* For skipping un-unloadable link maps in the second loop. */
279 struct link_map *first_loaded = ns->_ns_loaded;
280
281 /* Iterate over the namespace to find objects to unload. Some
282 unloadable objects may not be on _dl_init_called_list due to
283 dlopen failure. */
284 for (struct link_map *imap = first_loaded; imap != NULL; imap = imap->l_next)
285 {
286 if (!imap->l_map_used)
287 {
288 /* This object must not be used anymore. */
289 imap->l_removed = 1;
290
291 /* We indeed have an object to remove. */
292 unload_any = true;
293
294 if (imap->l_global)
295 ++unload_global;
296
297 /* Remember where the first dynamically loaded object is. */
298 if (first_loaded == NULL)
299 first_loaded = imap;
300 }
301 /* Else imap->l_map_used. */
302 else if (imap->l_type == lt_loaded)
303 {
304 struct r_scope_elem *new_list = NULL;
305
306 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
307 {
308 /* The object is still used. But one of the objects we are
309 unloading right now is responsible for loading it. If
310 the current object does not have it's own scope yet we
311 have to create one. This has to be done before running
312 the finalizers.
313
314 To do this count the number of dependencies. */
315 unsigned int cnt;
316 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
317 ;
318
319 /* We simply reuse the l_initfini list. */
320 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
321 imap->l_searchlist.r_nlist = cnt;
322
323 new_list = &imap->l_searchlist;
324 }
325
326 /* Count the number of scopes which remain after the unload.
327 When we add the local search list count it. Always add
328 one for the terminating NULL pointer. */
329 size_t remain = (new_list != NULL) + 1;
330 bool removed_any = false;
331 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
332 /* This relies on l_scope[] entries being always set either
333 to its own l_symbolic_searchlist address, or some map's
334 l_searchlist address. */
335 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
336 {
337 struct link_map *tmap = (struct link_map *)
338 ((char *) imap->l_scope[cnt]
339 - offsetof (struct link_map, l_searchlist));
340 assert (tmap->l_ns == nsid);
341 if (tmap->l_idx == IDX_STILL_USED)
342 ++remain;
343 else
344 removed_any = true;
345 }
346 else
347 ++remain;
348
349 if (removed_any)
350 {
351 /* Always allocate a new array for the scope. This is
352 necessary since we must be able to determine the last
353 user of the current array. If possible use the link map's
354 memory. */
355 size_t new_size;
356 struct r_scope_elem **newp;
357
358 #define SCOPE_ELEMS(imap) \
359 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
360
361 if (imap->l_scope != imap->l_scope_mem
362 && remain < SCOPE_ELEMS (imap))
363 {
364 new_size = SCOPE_ELEMS (imap);
365 newp = imap->l_scope_mem;
366 }
367 else
368 {
369 new_size = imap->l_scope_max;
370 newp = (struct r_scope_elem **)
371 malloc (new_size * sizeof (struct r_scope_elem *));
372 if (newp == NULL)
373 _dl_signal_error (ENOMEM, "dlclose", NULL,
374 N_("cannot create scope list"));
375 }
376
377 /* Copy over the remaining scope elements. */
378 remain = 0;
379 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
380 {
381 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
382 {
383 struct link_map *tmap = (struct link_map *)
384 ((char *) imap->l_scope[cnt]
385 - offsetof (struct link_map, l_searchlist));
386 if (tmap->l_idx != IDX_STILL_USED)
387 {
388 /* Remove the scope. Or replace with own map's
389 scope. */
390 if (new_list != NULL)
391 {
392 newp[remain++] = new_list;
393 new_list = NULL;
394 }
395 continue;
396 }
397 }
398
399 newp[remain++] = imap->l_scope[cnt];
400 }
401 newp[remain] = NULL;
402
403 struct r_scope_elem **old = imap->l_scope;
404
405 imap->l_scope = newp;
406
407 /* No user anymore, we can free it now. */
408 if (old != imap->l_scope_mem)
409 {
410 if (_dl_scope_free (old))
411 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
412 no need to repeat it. */
413 scope_mem_left = false;
414 }
415 else
416 scope_mem_left = true;
417
418 imap->l_scope_max = new_size;
419 }
420 else if (new_list != NULL)
421 {
422 /* We didn't change the scope array, so reset the search
423 list. */
424 imap->l_searchlist.r_list = NULL;
425 imap->l_searchlist.r_nlist = 0;
426 }
427
428 /* The loader is gone, so mark the object as not having one.
429 Note: l_idx != IDX_STILL_USED -> object will be removed. */
430 if (imap->l_loader != NULL
431 && imap->l_loader->l_idx != IDX_STILL_USED)
432 imap->l_loader = NULL;
433
434 /* Remember where the first dynamically loaded object is. */
435 if (first_loaded == NULL)
436 first_loaded = imap;
437 }
438 }
439
440 /* If there are no objects to unload, do nothing further. */
441 if (!unload_any)
442 goto out;
443
444 #ifdef SHARED
445 /* Auditing checkpoint: we will start deleting objects. */
446 _dl_audit_activity_nsid (nsid, LA_ACT_DELETE);
447 #endif
448
449 /* Notify the debugger we are about to remove some loaded objects. */
450 struct r_debug *r = _dl_debug_update (nsid);
451 r->r_state = RT_DELETE;
452 _dl_debug_state ();
453 LIBC_PROBE (unmap_start, 2, nsid, r);
454
455 if (unload_global)
456 {
457 /* Some objects are in the global scope list. Remove them. */
458 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
459 unsigned int i;
460 unsigned int j = 0;
461 unsigned int cnt = ns_msl->r_nlist;
462
463 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
464 --cnt;
465
466 if (cnt + unload_global == ns_msl->r_nlist)
467 /* Speed up removing most recently added objects. */
468 j = cnt;
469 else
470 for (i = 0; i < cnt; i++)
471 if (ns_msl->r_list[i]->l_removed == 0)
472 {
473 if (i != j)
474 ns_msl->r_list[j] = ns_msl->r_list[i];
475 j++;
476 }
477 ns_msl->r_nlist = j;
478 }
479
480 if (!RTLD_SINGLE_THREAD_P
481 && (unload_global
482 || scope_mem_left
483 || (GL(dl_scope_free_list) != NULL
484 && GL(dl_scope_free_list)->count)))
485 {
486 THREAD_GSCOPE_WAIT ();
487
488 /* Now we can free any queued old scopes. */
489 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
490 if (fsl != NULL)
491 while (fsl->count > 0)
492 free (fsl->list[--fsl->count]);
493 }
494
495 size_t tls_free_start;
496 size_t tls_free_end;
497 tls_free_start = tls_free_end = NO_TLS_OFFSET;
498
499 /* Protects global and module specitic TLS state. */
500 __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
501
502 /* We modify the list of loaded objects. */
503 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
504
505 /* Check each element of the search list to see if all references to
506 it are gone. */
507 for (struct link_map *imap = first_loaded; imap != NULL; )
508 {
509 if (imap->l_map_used)
510 imap = imap->l_next;
511 else
512 {
513 assert (imap->l_type == lt_loaded);
514
515 /* That was the last reference, and this was a dlopen-loaded
516 object. We can unmap it. */
517
518 /* Remove the object from the dtv slotinfo array if it uses TLS. */
519 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
520 {
521 any_tls = true;
522
523 if (GL(dl_tls_dtv_slotinfo_list) != NULL
524 && ! remove_slotinfo (imap->l_tls_modid,
525 GL(dl_tls_dtv_slotinfo_list), 0,
526 imap->l_init_called))
527 /* All dynamically loaded modules with TLS are unloaded. */
528 /* Can be read concurrently. */
529 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
530 GL(dl_tls_static_nelem));
531
532 if (imap->l_tls_offset != NO_TLS_OFFSET
533 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
534 {
535 /* Collect a contiguous chunk built from the objects in
536 this search list, going in either direction. When the
537 whole chunk is at the end of the used area then we can
538 reclaim it. */
539 #if TLS_TCB_AT_TP
540 if (tls_free_start == NO_TLS_OFFSET
541 || (size_t) imap->l_tls_offset == tls_free_start)
542 {
543 /* Extend the contiguous chunk being reclaimed. */
544 tls_free_start
545 = imap->l_tls_offset - imap->l_tls_blocksize;
546
547 if (tls_free_end == NO_TLS_OFFSET)
548 tls_free_end = imap->l_tls_offset;
549 }
550 else if (imap->l_tls_offset - imap->l_tls_blocksize
551 == tls_free_end)
552 /* Extend the chunk backwards. */
553 tls_free_end = imap->l_tls_offset;
554 else
555 {
556 /* This isn't contiguous with the last chunk freed.
557 One of them will be leaked unless we can free
558 one block right away. */
559 if (tls_free_end == GL(dl_tls_static_used))
560 {
561 GL(dl_tls_static_used) = tls_free_start;
562 tls_free_end = imap->l_tls_offset;
563 tls_free_start
564 = tls_free_end - imap->l_tls_blocksize;
565 }
566 else if ((size_t) imap->l_tls_offset
567 == GL(dl_tls_static_used))
568 GL(dl_tls_static_used)
569 = imap->l_tls_offset - imap->l_tls_blocksize;
570 else if (tls_free_end < (size_t) imap->l_tls_offset)
571 {
572 /* We pick the later block. It has a chance to
573 be freed. */
574 tls_free_end = imap->l_tls_offset;
575 tls_free_start
576 = tls_free_end - imap->l_tls_blocksize;
577 }
578 }
579 #elif TLS_DTV_AT_TP
580 if (tls_free_start == NO_TLS_OFFSET)
581 {
582 tls_free_start = imap->l_tls_firstbyte_offset;
583 tls_free_end = (imap->l_tls_offset
584 + imap->l_tls_blocksize);
585 }
586 else if (imap->l_tls_firstbyte_offset == tls_free_end)
587 /* Extend the contiguous chunk being reclaimed. */
588 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
589 else if (imap->l_tls_offset + imap->l_tls_blocksize
590 == tls_free_start)
591 /* Extend the chunk backwards. */
592 tls_free_start = imap->l_tls_firstbyte_offset;
593 /* This isn't contiguous with the last chunk freed.
594 One of them will be leaked unless we can free
595 one block right away. */
596 else if (imap->l_tls_offset + imap->l_tls_blocksize
597 == GL(dl_tls_static_used))
598 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
599 else if (tls_free_end == GL(dl_tls_static_used))
600 {
601 GL(dl_tls_static_used) = tls_free_start;
602 tls_free_start = imap->l_tls_firstbyte_offset;
603 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
604 }
605 else if (tls_free_end < imap->l_tls_firstbyte_offset)
606 {
607 /* We pick the later block. It has a chance to
608 be freed. */
609 tls_free_start = imap->l_tls_firstbyte_offset;
610 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
611 }
612 #else
613 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
614 #endif
615 }
616 }
617
618 /* Reset unique symbols if forced. */
619 if (force)
620 {
621 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
622 __rtld_lock_lock_recursive (tab->lock);
623 struct unique_sym *entries = tab->entries;
624 if (entries != NULL)
625 {
626 size_t idx, size = tab->size;
627 for (idx = 0; idx < size; ++idx)
628 {
629 /* Clear unique symbol entries that belong to this
630 object. */
631 if (entries[idx].name != NULL
632 && entries[idx].map == imap)
633 {
634 entries[idx].name = NULL;
635 entries[idx].hashval = 0;
636 tab->n_elements--;
637 }
638 }
639 }
640 __rtld_lock_unlock_recursive (tab->lock);
641 }
642
643 /* We can unmap all the maps at once. We determined the
644 start address and length when we loaded the object and
645 the `munmap' call does the rest. */
646 DL_UNMAP (imap);
647
648 /* Finally, unlink the data structure and free it. */
649 #if DL_NNS == 1
650 /* The assert in the (imap->l_prev == NULL) case gives
651 the compiler license to warn that NS points outside
652 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
653 is tantamount to nsid >= DL_NNS). That should be impossible
654 in this configuration, so just assert about it instead. */
655 assert (nsid == LM_ID_BASE);
656 assert (imap->l_prev != NULL);
657 #else
658 if (imap->l_prev == NULL)
659 {
660 assert (nsid != LM_ID_BASE);
661 ns->_ns_loaded = imap->l_next;
662
663 /* Update the pointer to the head of the list
664 we leave for debuggers to examine. */
665 r->r_map = (void *) ns->_ns_loaded;
666 }
667 else
668 #endif
669 imap->l_prev->l_next = imap->l_next;
670
671 --ns->_ns_nloaded;
672 if (imap->l_next != NULL)
673 imap->l_next->l_prev = imap->l_prev;
674
675 /* Update the data used by _dl_find_object. */
676 _dl_find_object_dlclose (imap);
677
678 free (imap->l_versions);
679 if (imap->l_origin != (char *) -1)
680 free ((char *) imap->l_origin);
681
682 free (imap->l_reldeps);
683
684 /* Print debugging message. */
685 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
686 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
687 imap->l_name, imap->l_ns);
688
689 /* This name always is allocated. */
690 free (imap->l_name);
691 /* Remove the list with all the names of the shared object. */
692
693 struct libname_list *lnp = imap->l_libname;
694 do
695 {
696 struct libname_list *this = lnp;
697 lnp = lnp->next;
698 if (!this->dont_free)
699 free (this);
700 }
701 while (lnp != NULL);
702
703 /* Remove the searchlists. */
704 free (imap->l_initfini);
705
706 /* Remove the scope array if we allocated it. */
707 if (imap->l_scope != imap->l_scope_mem)
708 free (imap->l_scope);
709
710 if (imap->l_phdr_allocated)
711 free ((void *) imap->l_phdr);
712
713 if (imap->l_rpath_dirs.dirs != (void *) -1)
714 free (imap->l_rpath_dirs.dirs);
715 if (imap->l_runpath_dirs.dirs != (void *) -1)
716 free (imap->l_runpath_dirs.dirs);
717
718 /* Clear GL(dl_initfirst) when freeing its link_map memory. */
719 if (imap == GL(dl_initfirst))
720 GL(dl_initfirst) = NULL;
721
722 struct link_map *next = imap->l_next;
723 free (imap);
724 imap = next;
725 }
726 }
727
728 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
729
730 /* If we removed any object which uses TLS bump the generation counter. */
731 if (any_tls)
732 {
733 size_t newgen = GL(dl_tls_generation) + 1;
734 if (__glibc_unlikely (newgen == 0))
735 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
736 /* Can be read concurrently. */
737 atomic_store_release (&GL(dl_tls_generation), newgen);
738
739 if (tls_free_end == GL(dl_tls_static_used))
740 GL(dl_tls_static_used) = tls_free_start;
741 }
742
743 /* TLS is cleaned up for the unloaded modules. */
744 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
745
746 #ifdef SHARED
747 /* Auditing checkpoint: we have deleted all objects. Also, do not notify
748 auditors of the cleanup of a failed audit module loading attempt. */
749 _dl_audit_activity_nsid (nsid, LA_ACT_CONSISTENT);
750 #endif
751
752 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
753 && nsid == GL(dl_nns) - 1)
754 do
755 --GL(dl_nns);
756 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
757
758 /* Notify the debugger those objects are finalized and gone. */
759 r->r_state = RT_CONSISTENT;
760 _dl_debug_state ();
761 LIBC_PROBE (unmap_complete, 2, nsid, r);
762
763 /* Recheck if we need to retry, release the lock. */
764 out:
765 if (dl_close_state == rerun)
766 goto retry;
767
768 dl_close_state = not_pending;
769 }
770
771
772 void
773 _dl_close (void *_map)
774 {
775 struct link_map *map = _map;
776
777 /* We must take the lock to examine the contents of map and avoid
778 concurrent dlopens. */
779 __rtld_lock_lock_recursive (GL(dl_load_lock));
780
781 /* At this point we are guaranteed nobody else is touching the list of
782 loaded maps, but a concurrent dlclose might have freed our map
783 before we took the lock. There is no way to detect this (see below)
784 so we proceed assuming this isn't the case. First see whether we
785 can remove the object at all. */
786 if (__glibc_unlikely (map->l_nodelete_active))
787 {
788 /* Nope. Do nothing. */
789 __rtld_lock_unlock_recursive (GL(dl_load_lock));
790 return;
791 }
792
793 /* At present this is an unreliable check except in the case where the
794 caller has recursively called dlclose and we are sure the link map
795 has not been freed. In a non-recursive dlclose the map itself
796 might have been freed and this access is potentially a data race
797 with whatever other use this memory might have now, or worse we
798 might silently corrupt memory if it looks enough like a link map.
799 POSIX has language in dlclose that appears to guarantee that this
800 should be a detectable case and given that dlclose should be threadsafe
801 we need this to be a reliable detection.
802 This is bug 20990. */
803 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
804 {
805 __rtld_lock_unlock_recursive (GL(dl_load_lock));
806 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
807 }
808
809 _dl_close_worker (map, false);
810
811 __rtld_lock_unlock_recursive (GL(dl_load_lock));
812 }