]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-close.c
83e4f012b216d8a66b77e8309fd11743900e8bb3
[thirdparty/glibc.git] / elf / dl-close.c
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2025 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stddef.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <libc-lock.h>
29 #include <ldsodefs.h>
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
35 #include <dl-find_object.h>
36
37 #include <dl-unmap-segments.h>
38
39 /* Special l_idx value used to indicate which objects remain loaded. */
40 #define IDX_STILL_USED -1
41
42
43 /* Returns true we an non-empty was found. */
44 static bool
45 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
46 bool should_be_there)
47 {
48 if (idx - disp >= listp->len)
49 {
50 if (listp->next == NULL)
51 {
52 /* The index is not actually valid in the slotinfo list,
53 because this object was closed before it was fully set
54 up due to some error. */
55 assert (! should_be_there);
56 }
57 else
58 {
59 if (remove_slotinfo (idx, listp->next, disp + listp->len,
60 should_be_there))
61 return true;
62
63 /* No non-empty entry. Search from the end of this element's
64 slotinfo array. */
65 idx = disp + listp->len;
66 }
67 }
68 else
69 {
70 struct link_map *old_map = listp->slotinfo[idx - disp].map;
71
72 /* The entry might still be in its unused state if we are closing an
73 object that wasn't fully set up. */
74 if (__glibc_likely (old_map != NULL))
75 {
76 /* Mark the entry as unused. These can be read concurrently. */
77 atomic_store_relaxed (&listp->slotinfo[idx - disp].gen,
78 GL(dl_tls_generation) + 1);
79 atomic_store_relaxed (&listp->slotinfo[idx - disp].map, NULL);
80 }
81
82 /* If this is not the last currently used entry no need to look
83 further. */
84 if (idx != GL(dl_tls_max_dtv_idx))
85 {
86 /* There is an unused dtv entry in the middle. */
87 GL(dl_tls_dtv_gaps) = true;
88 return true;
89 }
90 }
91
92 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
93 {
94 --idx;
95
96 if (listp->slotinfo[idx - disp].map != NULL)
97 {
98 /* Found a new last used index. This can be read concurrently. */
99 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), idx);
100 return true;
101 }
102 }
103
104 /* No non-entry in this list element. */
105 return false;
106 }
107
108 void
109 _dl_close_worker (struct link_map *map, bool force)
110 {
111 /* One less direct use. */
112 --map->l_direct_opencount;
113
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
116 again and return. */
117 static enum { not_pending, pending, rerun } dl_close_state;
118
119 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
120 || dl_close_state != not_pending)
121 {
122 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
123 dl_close_state = rerun;
124
125 /* There are still references to this object. Do nothing more. */
126 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
127 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
128 map->l_name, map->l_direct_opencount);
129
130 return;
131 }
132
133 Lmid_t nsid = map->l_ns;
134 struct link_namespaces *ns = &GL(dl_ns)[nsid];
135
136 retry:
137 dl_close_state = pending;
138
139 bool any_tls = false;
140 const unsigned int nloaded = ns->_ns_nloaded;
141 struct link_map *maps[nloaded];
142
143 /* Run over the list and assign indexes to the link maps and enter
144 them into the MAPS array. */
145 int idx = 0;
146 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
147 {
148 l->l_map_used = 0;
149 l->l_map_done = 0;
150 l->l_idx = idx;
151 maps[idx] = l;
152 ++idx;
153 }
154 assert (idx == nloaded);
155
156 /* Put the dlclose'd map first, so that its destructor runs first.
157 The map variable is NULL after a retry. */
158 if (map != NULL)
159 {
160 maps[map->l_idx] = maps[0];
161 maps[map->l_idx]->l_idx = map->l_idx;
162 maps[0] = map;
163 maps[0]->l_idx = 0;
164 }
165
166 /* Keep track of the lowest index link map we have covered already. */
167 int done_index = -1;
168 while (++done_index < nloaded)
169 {
170 struct link_map *l = maps[done_index];
171
172 if (l->l_map_done)
173 /* Already handled. */
174 continue;
175
176 /* Check whether this object is still used. */
177 if (l->l_type == lt_loaded
178 && l->l_direct_opencount == 0
179 && !l->l_nodelete_active
180 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
181 acquire is sufficient and correct. */
182 && atomic_load_acquire (&l->l_tls_dtor_count) == 0
183 && !l->l_map_used)
184 continue;
185
186 /* We need this object and we handle it now. */
187 l->l_map_used = 1;
188 l->l_map_done = 1;
189 /* Signal the object is still needed. */
190 l->l_idx = IDX_STILL_USED;
191
192 /* Mark all dependencies as used. */
193 if (l->l_initfini != NULL)
194 {
195 /* We are always the zeroth entry, and since we don't include
196 ourselves in the dependency analysis start at 1. */
197 struct link_map **lp = &l->l_initfini[1];
198 while (*lp != NULL)
199 {
200 if ((*lp)->l_idx != IDX_STILL_USED)
201 {
202 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
203
204 if (!(*lp)->l_map_used)
205 {
206 (*lp)->l_map_used = 1;
207 /* If we marked a new object as used, and we've
208 already processed it, then we need to go back
209 and process again from that point forward to
210 ensure we keep all of its dependencies also. */
211 if ((*lp)->l_idx - 1 < done_index)
212 done_index = (*lp)->l_idx - 1;
213 }
214 }
215
216 ++lp;
217 }
218 }
219 /* And the same for relocation dependencies. */
220 if (l->l_reldeps != NULL)
221 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
222 {
223 struct link_map *jmap = l->l_reldeps->list[j];
224
225 if (jmap->l_idx != IDX_STILL_USED)
226 {
227 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
228
229 if (!jmap->l_map_used)
230 {
231 jmap->l_map_used = 1;
232 if (jmap->l_idx - 1 < done_index)
233 done_index = jmap->l_idx - 1;
234 }
235 }
236 }
237 }
238
239 /* Sort the entries. Unless retrying, the maps[0] object (the
240 original argument to dlclose) needs to remain first, so that its
241 destructor runs first. */
242 _dl_sort_maps (maps, nloaded, /* force_first */ map != NULL, true);
243
244 /* Call all termination functions at once. */
245 bool unload_any = false;
246 bool scope_mem_left = false;
247 unsigned int unload_global = 0;
248 unsigned int first_loaded = ~0;
249 for (unsigned int i = 0; i < nloaded; ++i)
250 {
251 struct link_map *imap = maps[i];
252
253 /* All elements must be in the same namespace. */
254 assert (imap->l_ns == nsid);
255
256 if (!imap->l_map_used)
257 {
258 assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
259
260 /* Call its termination function. Do not do it for
261 half-cooked objects. Temporarily disable exception
262 handling, so that errors are fatal. */
263 if (imap->l_init_called)
264 _dl_catch_exception (NULL, _dl_call_fini, imap);
265
266 #ifdef SHARED
267 /* Auditing checkpoint: we will start deleting objects.
268 This is supposed to happen before la_objclose (see _dl_fini),
269 but only once per non-recursive dlclose call. */
270 if (!unload_any)
271 _dl_audit_activity_nsid (nsid, LA_ACT_DELETE);
272
273 /* Auditing checkpoint: we remove an object. */
274 _dl_audit_objclose (imap);
275 #endif
276
277 /* This object must not be used anymore. */
278 imap->l_removed = 1;
279
280 /* We indeed have an object to remove. */
281 unload_any = true;
282
283 if (imap->l_global)
284 ++unload_global;
285
286 /* Remember where the first dynamically loaded object is. */
287 if (i < first_loaded)
288 first_loaded = i;
289 }
290 /* Else imap->l_map_used. */
291 else if (imap->l_type == lt_loaded)
292 {
293 struct r_scope_elem *new_list = NULL;
294
295 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
296 {
297 /* The object is still used. But one of the objects we are
298 unloading right now is responsible for loading it. If
299 the current object does not have it's own scope yet we
300 have to create one. This has to be done before running
301 the finalizers.
302
303 To do this count the number of dependencies. */
304 unsigned int cnt;
305 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
306 ;
307
308 /* We simply reuse the l_initfini list. */
309 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
310 imap->l_searchlist.r_nlist = cnt;
311
312 new_list = &imap->l_searchlist;
313 }
314
315 /* Count the number of scopes which remain after the unload.
316 When we add the local search list count it. Always add
317 one for the terminating NULL pointer. */
318 size_t remain = (new_list != NULL) + 1;
319 bool removed_any = false;
320 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
321 /* This relies on l_scope[] entries being always set either
322 to its own l_symbolic_searchlist address, or some map's
323 l_searchlist address. */
324 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
325 {
326 struct link_map *tmap = (struct link_map *)
327 ((char *) imap->l_scope[cnt]
328 - offsetof (struct link_map, l_searchlist));
329 assert (tmap->l_ns == nsid);
330 if (tmap->l_idx == IDX_STILL_USED)
331 ++remain;
332 else
333 removed_any = true;
334 }
335 else
336 ++remain;
337
338 if (removed_any)
339 {
340 /* Always allocate a new array for the scope. This is
341 necessary since we must be able to determine the last
342 user of the current array. If possible use the link map's
343 memory. */
344 size_t new_size;
345 struct r_scope_elem **newp;
346
347 #define SCOPE_ELEMS(imap) \
348 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
349
350 if (imap->l_scope != imap->l_scope_mem
351 && remain < SCOPE_ELEMS (imap))
352 {
353 new_size = SCOPE_ELEMS (imap);
354 newp = imap->l_scope_mem;
355 }
356 else
357 {
358 new_size = imap->l_scope_max;
359 newp = (struct r_scope_elem **)
360 malloc (new_size * sizeof (struct r_scope_elem *));
361 if (newp == NULL)
362 _dl_signal_error (ENOMEM, "dlclose", NULL,
363 N_("cannot create scope list"));
364 }
365
366 /* Copy over the remaining scope elements. */
367 remain = 0;
368 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
369 {
370 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
371 {
372 struct link_map *tmap = (struct link_map *)
373 ((char *) imap->l_scope[cnt]
374 - offsetof (struct link_map, l_searchlist));
375 if (tmap->l_idx != IDX_STILL_USED)
376 {
377 /* Remove the scope. Or replace with own map's
378 scope. */
379 if (new_list != NULL)
380 {
381 newp[remain++] = new_list;
382 new_list = NULL;
383 }
384 continue;
385 }
386 }
387
388 newp[remain++] = imap->l_scope[cnt];
389 }
390 newp[remain] = NULL;
391
392 struct r_scope_elem **old = imap->l_scope;
393
394 imap->l_scope = newp;
395
396 /* No user anymore, we can free it now. */
397 if (old != imap->l_scope_mem)
398 {
399 if (_dl_scope_free (old))
400 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
401 no need to repeat it. */
402 scope_mem_left = false;
403 }
404 else
405 scope_mem_left = true;
406
407 imap->l_scope_max = new_size;
408 }
409 else if (new_list != NULL)
410 {
411 /* We didn't change the scope array, so reset the search
412 list. */
413 imap->l_searchlist.r_list = NULL;
414 imap->l_searchlist.r_nlist = 0;
415 }
416
417 /* The loader is gone, so mark the object as not having one.
418 Note: l_idx != IDX_STILL_USED -> object will be removed. */
419 if (imap->l_loader != NULL
420 && imap->l_loader->l_idx != IDX_STILL_USED)
421 imap->l_loader = NULL;
422
423 /* Remember where the first dynamically loaded object is. */
424 if (i < first_loaded)
425 first_loaded = i;
426 }
427 }
428
429 /* If there are no objects to unload, do nothing further. */
430 if (!unload_any)
431 goto out;
432
433 /* Notify the debugger we are about to remove some loaded objects.
434 LA_ACT_DELETE has already been signalled above for !unload_any. */
435 struct r_debug *r = _dl_debug_update (nsid);
436 _dl_debug_change_state (r, RT_DELETE);
437 LIBC_PROBE (unmap_start, 2, nsid, r);
438
439 if (unload_global)
440 {
441 /* Some objects are in the global scope list. Remove them. */
442 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
443 unsigned int i;
444 unsigned int j = 0;
445 unsigned int cnt = ns_msl->r_nlist;
446
447 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
448 --cnt;
449
450 if (cnt + unload_global == ns_msl->r_nlist)
451 /* Speed up removing most recently added objects. */
452 j = cnt;
453 else
454 for (i = 0; i < cnt; i++)
455 if (ns_msl->r_list[i]->l_removed == 0)
456 {
457 if (i != j)
458 ns_msl->r_list[j] = ns_msl->r_list[i];
459 j++;
460 }
461 ns_msl->r_nlist = j;
462 }
463
464 if (!RTLD_SINGLE_THREAD_P
465 && (unload_global
466 || scope_mem_left
467 || (GL(dl_scope_free_list) != NULL
468 && GL(dl_scope_free_list)->count)))
469 {
470 THREAD_GSCOPE_WAIT ();
471
472 /* Now we can free any queued old scopes. */
473 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
474 if (fsl != NULL)
475 while (fsl->count > 0)
476 free (fsl->list[--fsl->count]);
477 }
478
479 size_t tls_free_start;
480 size_t tls_free_end;
481 tls_free_start = tls_free_end = NO_TLS_OFFSET;
482
483 /* Protects global and module specitic TLS state. */
484 __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
485
486 /* We modify the list of loaded objects. */
487 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
488
489 /* Check each element of the search list to see if all references to
490 it are gone. */
491 for (unsigned int i = first_loaded; i < nloaded; ++i)
492 {
493 struct link_map *imap = maps[i];
494 if (!imap->l_map_used)
495 {
496 assert (imap->l_type == lt_loaded);
497
498 /* That was the last reference, and this was a dlopen-loaded
499 object. We can unmap it. */
500
501 /* Remove the object from the dtv slotinfo array if it uses TLS. */
502 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
503 {
504 any_tls = true;
505
506 if (GL(dl_tls_dtv_slotinfo_list) != NULL
507 && ! remove_slotinfo (imap->l_tls_modid,
508 GL(dl_tls_dtv_slotinfo_list), 0,
509 imap->l_init_called))
510 /* All dynamically loaded modules with TLS are unloaded. */
511 /* Can be read concurrently. */
512 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
513 GL(dl_tls_static_nelem));
514
515 if (imap->l_tls_offset != NO_TLS_OFFSET
516 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
517 {
518 /* Collect a contiguous chunk built from the objects in
519 this search list, going in either direction. When the
520 whole chunk is at the end of the used area then we can
521 reclaim it. */
522 #if TLS_TCB_AT_TP
523 if (tls_free_start == NO_TLS_OFFSET
524 || (size_t) imap->l_tls_offset == tls_free_start)
525 {
526 /* Extend the contiguous chunk being reclaimed. */
527 tls_free_start
528 = imap->l_tls_offset - imap->l_tls_blocksize;
529
530 if (tls_free_end == NO_TLS_OFFSET)
531 tls_free_end = imap->l_tls_offset;
532 }
533 else if (imap->l_tls_offset - imap->l_tls_blocksize
534 == tls_free_end)
535 /* Extend the chunk backwards. */
536 tls_free_end = imap->l_tls_offset;
537 else
538 {
539 /* This isn't contiguous with the last chunk freed.
540 One of them will be leaked unless we can free
541 one block right away. */
542 if (tls_free_end == GL(dl_tls_static_used))
543 {
544 GL(dl_tls_static_used) = tls_free_start;
545 tls_free_end = imap->l_tls_offset;
546 tls_free_start
547 = tls_free_end - imap->l_tls_blocksize;
548 }
549 else if ((size_t) imap->l_tls_offset
550 == GL(dl_tls_static_used))
551 GL(dl_tls_static_used)
552 = imap->l_tls_offset - imap->l_tls_blocksize;
553 else if (tls_free_end < (size_t) imap->l_tls_offset)
554 {
555 /* We pick the later block. It has a chance to
556 be freed. */
557 tls_free_end = imap->l_tls_offset;
558 tls_free_start
559 = tls_free_end - imap->l_tls_blocksize;
560 }
561 }
562 #elif TLS_DTV_AT_TP
563 if (tls_free_start == NO_TLS_OFFSET)
564 {
565 tls_free_start = imap->l_tls_firstbyte_offset;
566 tls_free_end = (imap->l_tls_offset
567 + imap->l_tls_blocksize);
568 }
569 else if (imap->l_tls_firstbyte_offset == tls_free_end)
570 /* Extend the contiguous chunk being reclaimed. */
571 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
572 else if (imap->l_tls_offset + imap->l_tls_blocksize
573 == tls_free_start)
574 /* Extend the chunk backwards. */
575 tls_free_start = imap->l_tls_firstbyte_offset;
576 /* This isn't contiguous with the last chunk freed.
577 One of them will be leaked unless we can free
578 one block right away. */
579 else if (imap->l_tls_offset + imap->l_tls_blocksize
580 == GL(dl_tls_static_used))
581 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
582 else if (tls_free_end == GL(dl_tls_static_used))
583 {
584 GL(dl_tls_static_used) = tls_free_start;
585 tls_free_start = imap->l_tls_firstbyte_offset;
586 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
587 }
588 else if (tls_free_end < imap->l_tls_firstbyte_offset)
589 {
590 /* We pick the later block. It has a chance to
591 be freed. */
592 tls_free_start = imap->l_tls_firstbyte_offset;
593 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
594 }
595 #else
596 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
597 #endif
598 }
599 }
600
601 /* Reset unique symbols if forced. */
602 if (force)
603 {
604 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
605 __rtld_lock_lock_recursive (tab->lock);
606 struct unique_sym *entries = tab->entries;
607 if (entries != NULL)
608 {
609 size_t idx, size = tab->size;
610 for (idx = 0; idx < size; ++idx)
611 {
612 /* Clear unique symbol entries that belong to this
613 object. */
614 if (entries[idx].name != NULL
615 && entries[idx].map == imap)
616 {
617 entries[idx].name = NULL;
618 entries[idx].hashval = 0;
619 tab->n_elements--;
620 }
621 }
622 }
623 __rtld_lock_unlock_recursive (tab->lock);
624 }
625
626 /* We can unmap all the maps at once. We determined the
627 start address and length when we loaded the object and
628 the `munmap' call does the rest. */
629 DL_UNMAP (imap);
630
631 /* Finally, unlink the data structure and free it. */
632 #if DL_NNS == 1
633 /* The assert in the (imap->l_prev == NULL) case gives
634 the compiler license to warn that NS points outside
635 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
636 is tantamount to nsid >= DL_NNS). That should be impossible
637 in this configuration, so just assert about it instead. */
638 assert (nsid == LM_ID_BASE);
639 assert (imap->l_prev != NULL);
640 #else
641 if (imap->l_prev == NULL)
642 {
643 assert (nsid != LM_ID_BASE);
644 ns->_ns_loaded = imap->l_next;
645
646 /* Update the pointer to the head of the list
647 we leave for debuggers to examine. */
648 r->r_map = (void *) ns->_ns_loaded;
649 }
650 else
651 #endif
652 imap->l_prev->l_next = imap->l_next;
653
654 --ns->_ns_nloaded;
655 if (imap->l_next != NULL)
656 imap->l_next->l_prev = imap->l_prev;
657
658 /* Update the data used by _dl_find_object. */
659 _dl_find_object_dlclose (imap);
660
661 free (imap->l_versions);
662 if (imap->l_origin != (char *) -1)
663 free ((char *) imap->l_origin);
664
665 free (imap->l_reldeps);
666
667 /* Print debugging message. */
668 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
669 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
670 imap->l_name, imap->l_ns);
671
672 /* This name always is allocated. */
673 free (imap->l_name);
674 /* Remove the list with all the names of the shared object. */
675
676 struct libname_list *lnp = imap->l_libname;
677 do
678 {
679 struct libname_list *this = lnp;
680 lnp = lnp->next;
681 if (!this->dont_free)
682 free (this);
683 }
684 while (lnp != NULL);
685
686 /* Remove the searchlists. */
687 free (imap->l_initfini);
688
689 /* Remove the scope array if we allocated it. */
690 if (imap->l_scope != imap->l_scope_mem)
691 free (imap->l_scope);
692
693 if (imap->l_phdr_allocated)
694 free ((void *) imap->l_phdr);
695
696 if (imap->l_rpath_dirs.dirs != (void *) -1)
697 free (imap->l_rpath_dirs.dirs);
698 if (imap->l_runpath_dirs.dirs != (void *) -1)
699 free (imap->l_runpath_dirs.dirs);
700
701 /* Clear GL(dl_initfirst) when freeing its link_map memory. */
702 if (imap == GL(dl_initfirst))
703 GL(dl_initfirst) = NULL;
704
705 free (imap);
706 }
707 }
708
709 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
710
711 /* If we removed any object which uses TLS bump the generation counter. */
712 if (any_tls)
713 {
714 size_t newgen = GL(dl_tls_generation) + 1;
715 if (__glibc_unlikely (newgen == 0))
716 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
717 /* Can be read concurrently. */
718 atomic_store_release (&GL(dl_tls_generation), newgen);
719
720 if (tls_free_end == GL(dl_tls_static_used))
721 GL(dl_tls_static_used) = tls_free_start;
722 }
723
724 /* TLS is cleaned up for the unloaded modules. */
725 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
726
727 /* Notify the debugger those objects are finalized and gone. */
728 _dl_debug_change_state (r, RT_CONSISTENT);
729 LIBC_PROBE (unmap_complete, 2, nsid, r);
730
731 #ifdef SHARED
732 /* Auditing checkpoint: we have deleted all objects. Also, do not notify
733 auditors of the cleanup of a failed audit module loading attempt. */
734 _dl_audit_activity_nsid (nsid, LA_ACT_CONSISTENT);
735 #endif
736
737 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
738 && nsid == GL(dl_nns) - 1)
739 do
740 --GL(dl_nns);
741 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
742
743 /* Recheck if we need to retry, release the lock. */
744 out:
745 if (dl_close_state == rerun)
746 {
747 /* The map may have been deallocated. */
748 map = NULL;
749 goto retry;
750 }
751
752 dl_close_state = not_pending;
753 }
754
755
756 void
757 _dl_close (void *_map)
758 {
759 struct link_map *map = _map;
760
761 /* We must take the lock to examine the contents of map and avoid
762 concurrent dlopens. */
763 __rtld_lock_lock_recursive (GL(dl_load_lock));
764
765 /* At this point we are guaranteed nobody else is touching the list of
766 loaded maps, but a concurrent dlclose might have freed our map
767 before we took the lock. There is no way to detect this (see below)
768 so we proceed assuming this isn't the case. First see whether we
769 can remove the object at all. */
770 if (__glibc_unlikely (map->l_nodelete_active))
771 {
772 /* Nope. Do nothing. */
773 __rtld_lock_unlock_recursive (GL(dl_load_lock));
774 return;
775 }
776
777 /* At present this is an unreliable check except in the case where the
778 caller has recursively called dlclose and we are sure the link map
779 has not been freed. In a non-recursive dlclose the map itself
780 might have been freed and this access is potentially a data race
781 with whatever other use this memory might have now, or worse we
782 might silently corrupt memory if it looks enough like a link map.
783 POSIX has language in dlclose that appears to guarantee that this
784 should be a detectable case and given that dlclose should be threadsafe
785 we need this to be a reliable detection.
786 This is bug 20990. */
787 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
788 {
789 __rtld_lock_unlock_recursive (GL(dl_load_lock));
790 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
791 }
792
793 _dl_close_worker (map, false);
794
795 __rtld_lock_unlock_recursive (GL(dl_load_lock));
796 }