]> git.ipfire.org Git - thirdparty/glibc.git/blame - elf/dl-close.c
htl: Drop ptr_pthread_once from pthread_functions
[thirdparty/glibc.git] / elf / dl-close.c
CommitLineData
26b4d766 1/* Close a shared object opened by `_dl_open'.
26420023 2 Copyright (C) 1996-2025 Free Software Foundation, Inc.
afd4eb37
UD
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
41bdb6e2
AJ
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
afd4eb37
UD
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
41bdb6e2 13 Lesser General Public License for more details.
afd4eb37 14
41bdb6e2 15 You should have received a copy of the GNU Lesser General Public
59ba27a6 16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
ba79d61b 18
7afab53d 19#include <assert.h>
ba79d61b 20#include <dlfcn.h>
1100f849 21#include <errno.h>
8e17ea58 22#include <libintl.h>
bfc832cc 23#include <stddef.h>
b209e34a 24#include <stdio.h>
ba79d61b 25#include <stdlib.h>
8d6468d0 26#include <string.h>
9dcafc55 27#include <unistd.h>
ec999b8e 28#include <libc-lock.h>
b8445829 29#include <ldsodefs.h>
ba79d61b
RM
30#include <sys/types.h>
31#include <sys/mman.h>
609cf614 32#include <sysdep-cancel.h>
df94b641 33#include <tls.h>
815e6fa3 34#include <stap-probe.h>
5d28a896 35#include <dl-find_object.h>
ba79d61b 36
fcccd512
RM
37#include <dl-unmap-segments.h>
38
1100f849
UD
39/* Special l_idx value used to indicate which objects remain loaded. */
40#define IDX_STILL_USED -1
41
42
fc093be1
UD
43/* Returns true we an non-empty was found. */
44static bool
1f0c4a10
RM
45remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
46 bool should_be_there)
fc093be1
UD
47{
48 if (idx - disp >= listp->len)
49 {
1f0c4a10
RM
50 if (listp->next == NULL)
51 {
52 /* The index is not actually valid in the slotinfo list,
8265947d
RM
53 because this object was closed before it was fully set
54 up due to some error. */
1f0c4a10
RM
55 assert (! should_be_there);
56 }
57 else
58 {
59 if (remove_slotinfo (idx, listp->next, disp + listp->len,
60 should_be_there))
61 return true;
fc093be1 62
1f0c4a10
RM
63 /* No non-empty entry. Search from the end of this element's
64 slotinfo array. */
65 idx = disp + listp->len;
66 }
fc093be1
UD
67 }
68 else
69 {
70 struct link_map *old_map = listp->slotinfo[idx - disp].map;
fc093be1 71
2430d57a
RM
72 /* The entry might still be in its unused state if we are closing an
73 object that wasn't fully set up. */
a1ffb40e 74 if (__glibc_likely (old_map != NULL))
2430d57a 75 {
f4f8f4d4
SN
76 /* Mark the entry as unused. These can be read concurrently. */
77 atomic_store_relaxed (&listp->slotinfo[idx - disp].gen,
78 GL(dl_tls_generation) + 1);
79 atomic_store_relaxed (&listp->slotinfo[idx - disp].map, NULL);
2430d57a 80 }
fc093be1
UD
81
82 /* If this is not the last currently used entry no need to look
83 further. */
2430d57a 84 if (idx != GL(dl_tls_max_dtv_idx))
ba33937b
AZ
85 {
86 /* There is an unused dtv entry in the middle. */
87 GL(dl_tls_dtv_gaps) = true;
88 return true;
89 }
fc093be1
UD
90 }
91
06a04e09 92 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
fc093be1
UD
93 {
94 --idx;
95
96 if (listp->slotinfo[idx - disp].map != NULL)
97 {
f4f8f4d4
SN
98 /* Found a new last used index. This can be read concurrently. */
99 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), idx);
fc093be1
UD
100 return true;
101 }
102 }
103
104 /* No non-entry in this list element. */
105 return false;
106}
fc093be1 107
ba79d61b 108void
02d5e5d9 109_dl_close_worker (struct link_map *map, bool force)
ba79d61b 110{
c0f62c56 111 /* One less direct use. */
c0f62c56
UD
112 --map->l_direct_opencount;
113
bfc832cc
UD
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
116 again and return. */
117 static enum { not_pending, pending, rerun } dl_close_state;
118
119 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
120 || dl_close_state != not_pending)
26b4d766 121 {
0479b305
AS
122 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
123 dl_close_state = rerun;
bfc832cc 124
26b4d766 125 /* There are still references to this object. Do nothing more. */
a1ffb40e 126 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
20fe49b9
UD
127 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
128 map->l_name, map->l_direct_opencount);
a334319f 129
26b4d766
UD
130 return;
131 }
ba79d61b 132
2e81d449
UD
133 Lmid_t nsid = map->l_ns;
134 struct link_namespaces *ns = &GL(dl_ns)[nsid];
135
bfc832cc
UD
136 retry:
137 dl_close_state = pending;
138
bfc832cc 139 bool any_tls = false;
2ad9b674 140 const unsigned int nloaded = ns->_ns_nloaded;
dd32e1db 141 struct link_map *maps[nloaded];
20fe49b9 142
dd32e1db
FW
143 /* Run over the list and assign indexes to the link maps and enter
144 them into the MAPS array. */
20fe49b9 145 int idx = 0;
2e81d449 146 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
20fe49b9 147 {
15a0c573
CLT
148 l->l_map_used = 0;
149 l->l_map_done = 0;
20fe49b9 150 l->l_idx = idx;
dd32e1db 151 maps[idx] = l;
20fe49b9
UD
152 ++idx;
153 }
154 assert (idx == nloaded);
c4bb124a 155
849274d4
FW
156 /* Put the dlclose'd map first, so that its destructor runs first.
157 The map variable is NULL after a retry. */
158 if (map != NULL)
159 {
160 maps[map->l_idx] = maps[0];
161 maps[map->l_idx]->l_idx = map->l_idx;
162 maps[0] = map;
163 maps[0]->l_idx = 0;
164 }
165
dd32e1db
FW
166 /* Keep track of the lowest index link map we have covered already. */
167 int done_index = -1;
168 while (++done_index < nloaded)
0ecb606c 169 {
dd32e1db 170 struct link_map *l = maps[done_index];
20fe49b9 171
15a0c573 172 if (l->l_map_done)
dd32e1db
FW
173 /* Already handled. */
174 continue;
20fe49b9
UD
175
176 /* Check whether this object is still used. */
177 if (l->l_type == lt_loaded
178 && l->l_direct_opencount == 0
f8ed116a 179 && !l->l_nodelete_active
90b37cac
SP
180 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
181 acquire is sufficient and correct. */
182 && atomic_load_acquire (&l->l_tls_dtor_count) == 0
15a0c573 183 && !l->l_map_used)
dd32e1db 184 continue;
20fe49b9
UD
185
186 /* We need this object and we handle it now. */
15a0c573
CLT
187 l->l_map_used = 1;
188 l->l_map_done = 1;
c3381f3e 189 /* Signal the object is still needed. */
1100f849 190 l->l_idx = IDX_STILL_USED;
20fe49b9
UD
191
192 /* Mark all dependencies as used. */
193 if (l->l_initfini != NULL)
194 {
36129722
CD
195 /* We are always the zeroth entry, and since we don't include
196 ourselves in the dependency analysis start at 1. */
20fe49b9
UD
197 struct link_map **lp = &l->l_initfini[1];
198 while (*lp != NULL)
199 {
1100f849 200 if ((*lp)->l_idx != IDX_STILL_USED)
556224ab 201 {
c3381f3e
UD
202 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
203
15a0c573 204 if (!(*lp)->l_map_used)
c3381f3e 205 {
15a0c573 206 (*lp)->l_map_used = 1;
36129722
CD
207 /* If we marked a new object as used, and we've
208 already processed it, then we need to go back
209 and process again from that point forward to
210 ensure we keep all of its dependencies also. */
dd32e1db
FW
211 if ((*lp)->l_idx - 1 < done_index)
212 done_index = (*lp)->l_idx - 1;
c3381f3e 213 }
556224ab 214 }
556224ab 215
20fe49b9
UD
216 ++lp;
217 }
218 }
219 /* And the same for relocation dependencies. */
220 if (l->l_reldeps != NULL)
385b4cf4 221 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
20fe49b9 222 {
385b4cf4 223 struct link_map *jmap = l->l_reldeps->list[j];
20fe49b9 224
1100f849 225 if (jmap->l_idx != IDX_STILL_USED)
556224ab 226 {
c3381f3e
UD
227 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
228
15a0c573 229 if (!jmap->l_map_used)
c3381f3e 230 {
15a0c573 231 jmap->l_map_used = 1;
dd32e1db
FW
232 if (jmap->l_idx - 1 < done_index)
233 done_index = jmap->l_idx - 1;
c3381f3e 234 }
556224ab
UD
235 }
236 }
6985865b 237 }
c3381f3e 238
849274d4
FW
239 /* Sort the entries. Unless retrying, the maps[0] object (the
240 original argument to dlclose) needs to remain first, so that its
241 destructor runs first. */
242 _dl_sort_maps (maps, nloaded, /* force_first */ map != NULL, true);
dd32e1db
FW
243
244 /* Call all termination functions at once. */
245 bool unload_any = false;
246 bool scope_mem_left = false;
247 unsigned int unload_global = 0;
248 unsigned int first_loaded = ~0;
249 for (unsigned int i = 0; i < nloaded; ++i)
a709dd43 250 {
dd32e1db 251 struct link_map *imap = maps[i];
9dcafc55 252
dd32e1db
FW
253 /* All elements must be in the same namespace. */
254 assert (imap->l_ns == nsid);
255
256 if (!imap->l_map_used)
a709dd43 257 {
f8ed116a 258 assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
20fe49b9 259
dd32e1db
FW
260 /* Call its termination function. Do not do it for
261 half-cooked objects. Temporarily disable exception
262 handling, so that errors are fatal. */
263 if (imap->l_init_called)
6f360366 264 _dl_catch_exception (NULL, _dl_call_fini, imap);
dacc8ffa 265
9dcafc55 266#ifdef SHARED
495b96e0
FW
267 /* Auditing checkpoint: we will start deleting objects.
268 This is supposed to happen before la_objclose (see _dl_fini),
269 but only once per non-recursive dlclose call. */
270 if (!unload_any)
271 _dl_audit_activity_nsid (nsid, LA_ACT_DELETE);
272
a3d731d3 273 /* Auditing checkpoint: we remove an object. */
311c9ee5 274 _dl_audit_objclose (imap);
9dcafc55
UD
275#endif
276
20fe49b9
UD
277 /* This object must not be used anymore. */
278 imap->l_removed = 1;
aff4519d 279
20fe49b9
UD
280 /* We indeed have an object to remove. */
281 unload_any = true;
aff4519d 282
e8b6b64d
UD
283 if (imap->l_global)
284 ++unload_global;
285
20fe49b9 286 /* Remember where the first dynamically loaded object is. */
dd32e1db
FW
287 if (i < first_loaded)
288 first_loaded = i;
a709dd43 289 }
15a0c573 290 /* Else imap->l_map_used. */
20fe49b9
UD
291 else if (imap->l_type == lt_loaded)
292 {
1100f849
UD
293 struct r_scope_elem *new_list = NULL;
294
295 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
20fe49b9 296 {
bfc832cc 297 /* The object is still used. But one of the objects we are
20fe49b9
UD
298 unloading right now is responsible for loading it. If
299 the current object does not have it's own scope yet we
300 have to create one. This has to be done before running
301 the finalizers.
302
303 To do this count the number of dependencies. */
304 unsigned int cnt;
305 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
306 ;
307
308 /* We simply reuse the l_initfini list. */
309 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
310 imap->l_searchlist.r_nlist = cnt;
311
1100f849 312 new_list = &imap->l_searchlist;
20fe49b9 313 }
1100f849
UD
314
315 /* Count the number of scopes which remain after the unload.
316 When we add the local search list count it. Always add
317 one for the terminating NULL pointer. */
318 size_t remain = (new_list != NULL) + 1;
319 bool removed_any = false;
c0a777e8 320 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
1100f849
UD
321 /* This relies on l_scope[] entries being always set either
322 to its own l_symbolic_searchlist address, or some map's
323 l_searchlist address. */
c0a777e8 324 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
1100f849
UD
325 {
326 struct link_map *tmap = (struct link_map *)
c0a777e8 327 ((char *) imap->l_scope[cnt]
1100f849 328 - offsetof (struct link_map, l_searchlist));
2e81d449 329 assert (tmap->l_ns == nsid);
1100f849
UD
330 if (tmap->l_idx == IDX_STILL_USED)
331 ++remain;
332 else
333 removed_any = true;
334 }
335 else
336 ++remain;
337
338 if (removed_any)
1ee2ff20 339 {
1100f849
UD
340 /* Always allocate a new array for the scope. This is
341 necessary since we must be able to determine the last
342 user of the current array. If possible use the link map's
343 memory. */
344 size_t new_size;
c0a777e8
UD
345 struct r_scope_elem **newp;
346
347#define SCOPE_ELEMS(imap) \
348 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
349
350 if (imap->l_scope != imap->l_scope_mem
351 && remain < SCOPE_ELEMS (imap))
1100f849 352 {
c0a777e8
UD
353 new_size = SCOPE_ELEMS (imap);
354 newp = imap->l_scope_mem;
1100f849
UD
355 }
356 else
357 {
358 new_size = imap->l_scope_max;
c0a777e8
UD
359 newp = (struct r_scope_elem **)
360 malloc (new_size * sizeof (struct r_scope_elem *));
1100f849
UD
361 if (newp == NULL)
362 _dl_signal_error (ENOMEM, "dlclose", NULL,
363 N_("cannot create scope list"));
364 }
365
1100f849
UD
366 /* Copy over the remaining scope elements. */
367 remain = 0;
c0a777e8 368 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
1ee2ff20 369 {
c0a777e8 370 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
1ee2ff20 371 {
1100f849 372 struct link_map *tmap = (struct link_map *)
c0a777e8 373 ((char *) imap->l_scope[cnt]
1100f849
UD
374 - offsetof (struct link_map, l_searchlist));
375 if (tmap->l_idx != IDX_STILL_USED)
376 {
377 /* Remove the scope. Or replace with own map's
378 scope. */
379 if (new_list != NULL)
380 {
c0a777e8 381 newp[remain++] = new_list;
1100f849
UD
382 new_list = NULL;
383 }
384 continue;
385 }
1ee2ff20 386 }
1100f849 387
c0a777e8 388 newp[remain++] = imap->l_scope[cnt];
1ee2ff20 389 }
c0a777e8 390 newp[remain] = NULL;
1100f849 391
c0a777e8 392 struct r_scope_elem **old = imap->l_scope;
1100f849 393
e4eb675d 394 imap->l_scope = newp;
1100f849
UD
395
396 /* No user anymore, we can free it now. */
c0a777e8 397 if (old != imap->l_scope_mem)
e4eb675d
UD
398 {
399 if (_dl_scope_free (old))
400 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
401 no need to repeat it. */
402 scope_mem_left = false;
403 }
404 else
405 scope_mem_left = true;
1100f849
UD
406
407 imap->l_scope_max = new_size;
1ee2ff20 408 }
39dd69df
AS
409 else if (new_list != NULL)
410 {
411 /* We didn't change the scope array, so reset the search
412 list. */
413 imap->l_searchlist.r_list = NULL;
414 imap->l_searchlist.r_nlist = 0;
415 }
42c4f32a 416
c3381f3e 417 /* The loader is gone, so mark the object as not having one.
1100f849
UD
418 Note: l_idx != IDX_STILL_USED -> object will be removed. */
419 if (imap->l_loader != NULL
420 && imap->l_loader->l_idx != IDX_STILL_USED)
20fe49b9 421 imap->l_loader = NULL;
aff4519d 422
20fe49b9 423 /* Remember where the first dynamically loaded object is. */
dd32e1db
FW
424 if (i < first_loaded)
425 first_loaded = i;
20fe49b9 426 }
a709dd43
UD
427 }
428
20fe49b9
UD
429 /* If there are no objects to unload, do nothing further. */
430 if (!unload_any)
431 goto out;
432
495b96e0
FW
433 /* Notify the debugger we are about to remove some loaded objects.
434 LA_ACT_DELETE has already been signalled above for !unload_any. */
a93d9e03 435 struct r_debug *r = _dl_debug_update (nsid);
8329939a 436 _dl_debug_change_state (r, RT_DELETE);
815e6fa3 437 LIBC_PROBE (unmap_start, 2, nsid, r);
4d6acc61 438
e8b6b64d
UD
439 if (unload_global)
440 {
441 /* Some objects are in the global scope list. Remove them. */
442 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
443 unsigned int i;
444 unsigned int j = 0;
445 unsigned int cnt = ns_msl->r_nlist;
446
447 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
448 --cnt;
449
450 if (cnt + unload_global == ns_msl->r_nlist)
451 /* Speed up removing most recently added objects. */
452 j = cnt;
453 else
b7c08a66 454 for (i = 0; i < cnt; i++)
e8b6b64d
UD
455 if (ns_msl->r_list[i]->l_removed == 0)
456 {
457 if (i != j)
458 ns_msl->r_list[j] = ns_msl->r_list[i];
459 j++;
460 }
461 ns_msl->r_nlist = j;
e4eb675d 462 }
e8b6b64d 463
e4eb675d
UD
464 if (!RTLD_SINGLE_THREAD_P
465 && (unload_global
466 || scope_mem_left
467 || (GL(dl_scope_free_list) != NULL
468 && GL(dl_scope_free_list)->count)))
469 {
470 THREAD_GSCOPE_WAIT ();
471
472 /* Now we can free any queued old scopes. */
385b4cf4 473 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
e4eb675d
UD
474 if (fsl != NULL)
475 while (fsl->count > 0)
476 free (fsl->list[--fsl->count]);
e8b6b64d
UD
477 }
478
541765b6
UD
479 size_t tls_free_start;
480 size_t tls_free_end;
481 tls_free_start = tls_free_end = NO_TLS_OFFSET;
c877418f 482
83b53232
SN
483 /* Protects global and module specitic TLS state. */
484 __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
485
5a2a1d75
AS
486 /* We modify the list of loaded objects. */
487 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
488
ba79d61b
RM
489 /* Check each element of the search list to see if all references to
490 it are gone. */
dd32e1db 491 for (unsigned int i = first_loaded; i < nloaded; ++i)
ba79d61b 492 {
dd32e1db
FW
493 struct link_map *imap = maps[i];
494 if (!imap->l_map_used)
ba79d61b 495 {
20fe49b9 496 assert (imap->l_type == lt_loaded);
a8a1269d 497
ba79d61b
RM
498 /* That was the last reference, and this was a dlopen-loaded
499 object. We can unmap it. */
ba79d61b 500
1f0c4a10 501 /* Remove the object from the dtv slotinfo array if it uses TLS. */
a1ffb40e 502 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
a04586d8 503 {
a04586d8 504 any_tls = true;
bb4cb252 505
9dcafc55
UD
506 if (GL(dl_tls_dtv_slotinfo_list) != NULL
507 && ! remove_slotinfo (imap->l_tls_modid,
508 GL(dl_tls_dtv_slotinfo_list), 0,
509 imap->l_init_called))
fc093be1 510 /* All dynamically loaded modules with TLS are unloaded. */
f4f8f4d4
SN
511 /* Can be read concurrently. */
512 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
513 GL(dl_tls_static_nelem));
c877418f 514
4c533566
UD
515 if (imap->l_tls_offset != NO_TLS_OFFSET
516 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
c877418f
RM
517 {
518 /* Collect a contiguous chunk built from the objects in
519 this search list, going in either direction. When the
520 whole chunk is at the end of the used area then we can
521 reclaim it. */
11bf311e 522#if TLS_TCB_AT_TP
541765b6
UD
523 if (tls_free_start == NO_TLS_OFFSET
524 || (size_t) imap->l_tls_offset == tls_free_start)
525 {
526 /* Extend the contiguous chunk being reclaimed. */
527 tls_free_start
528 = imap->l_tls_offset - imap->l_tls_blocksize;
529
530 if (tls_free_end == NO_TLS_OFFSET)
531 tls_free_end = imap->l_tls_offset;
532 }
533 else if (imap->l_tls_offset - imap->l_tls_blocksize
534 == tls_free_end)
535 /* Extend the chunk backwards. */
536 tls_free_end = imap->l_tls_offset;
537 else
538 {
539 /* This isn't contiguous with the last chunk freed.
540 One of them will be leaked unless we can free
541 one block right away. */
542 if (tls_free_end == GL(dl_tls_static_used))
543 {
544 GL(dl_tls_static_used) = tls_free_start;
545 tls_free_end = imap->l_tls_offset;
546 tls_free_start
547 = tls_free_end - imap->l_tls_blocksize;
548 }
549 else if ((size_t) imap->l_tls_offset
550 == GL(dl_tls_static_used))
551 GL(dl_tls_static_used)
552 = imap->l_tls_offset - imap->l_tls_blocksize;
553 else if (tls_free_end < (size_t) imap->l_tls_offset)
554 {
555 /* We pick the later block. It has a chance to
556 be freed. */
557 tls_free_end = imap->l_tls_offset;
558 tls_free_start
559 = tls_free_end - imap->l_tls_blocksize;
560 }
561 }
11bf311e 562#elif TLS_DTV_AT_TP
66bdbaa4
AM
563 if (tls_free_start == NO_TLS_OFFSET)
564 {
565 tls_free_start = imap->l_tls_firstbyte_offset;
566 tls_free_end = (imap->l_tls_offset
567 + imap->l_tls_blocksize);
568 }
569 else if (imap->l_tls_firstbyte_offset == tls_free_end)
c877418f 570 /* Extend the contiguous chunk being reclaimed. */
66bdbaa4 571 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
c877418f
RM
572 else if (imap->l_tls_offset + imap->l_tls_blocksize
573 == tls_free_start)
574 /* Extend the chunk backwards. */
66bdbaa4
AM
575 tls_free_start = imap->l_tls_firstbyte_offset;
576 /* This isn't contiguous with the last chunk freed.
577 One of them will be leaked unless we can free
578 one block right away. */
579 else if (imap->l_tls_offset + imap->l_tls_blocksize
580 == GL(dl_tls_static_used))
581 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
582 else if (tls_free_end == GL(dl_tls_static_used))
c877418f 583 {
66bdbaa4
AM
584 GL(dl_tls_static_used) = tls_free_start;
585 tls_free_start = imap->l_tls_firstbyte_offset;
586 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
587 }
588 else if (tls_free_end < imap->l_tls_firstbyte_offset)
589 {
590 /* We pick the later block. It has a chance to
591 be freed. */
592 tls_free_start = imap->l_tls_firstbyte_offset;
593 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
c877418f 594 }
11bf311e
UD
595#else
596# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
597#endif
c877418f 598 }
a04586d8 599 }
a04586d8 600
02d5e5d9
PK
601 /* Reset unique symbols if forced. */
602 if (force)
603 {
604 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
605 __rtld_lock_lock_recursive (tab->lock);
606 struct unique_sym *entries = tab->entries;
607 if (entries != NULL)
608 {
609 size_t idx, size = tab->size;
610 for (idx = 0; idx < size; ++idx)
611 {
612 /* Clear unique symbol entries that belong to this
613 object. */
614 if (entries[idx].name != NULL
615 && entries[idx].map == imap)
616 {
617 entries[idx].name = NULL;
618 entries[idx].hashval = 0;
619 tab->n_elements--;
620 }
621 }
622 }
623 __rtld_lock_unlock_recursive (tab->lock);
624 }
625
a8a1269d 626 /* We can unmap all the maps at once. We determined the
4ce636da
UD
627 start address and length when we loaded the object and
628 the `munmap' call does the rest. */
09bf6406 629 DL_UNMAP (imap);
22bc7978 630
ba79d61b 631 /* Finally, unlink the data structure and free it. */
2bd2cad9
RM
632#if DL_NNS == 1
633 /* The assert in the (imap->l_prev == NULL) case gives
634 the compiler license to warn that NS points outside
635 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
636 is tantamount to nsid >= DL_NNS). That should be impossible
637 in this configuration, so just assert about it instead. */
638 assert (nsid == LM_ID_BASE);
639 assert (imap->l_prev != NULL);
640#else
641 if (imap->l_prev == NULL)
c0f62c56 642 {
2e81d449 643 assert (nsid != LM_ID_BASE);
b7c08a66
RM
644 ns->_ns_loaded = imap->l_next;
645
646 /* Update the pointer to the head of the list
647 we leave for debuggers to examine. */
648 r->r_map = (void *) ns->_ns_loaded;
c0f62c56 649 }
2bd2cad9
RM
650 else
651#endif
652 imap->l_prev->l_next = imap->l_next;
c0f62c56 653
2e81d449 654 --ns->_ns_nloaded;
c0f62c56 655 if (imap->l_next != NULL)
af69217f 656 imap->l_next->l_prev = imap->l_prev;
a8a1269d 657
5d28a896
FW
658 /* Update the data used by _dl_find_object. */
659 _dl_find_object_dlclose (imap);
660
556224ab
UD
661 free (imap->l_versions);
662 if (imap->l_origin != (char *) -1)
a8a1269d
UD
663 free ((char *) imap->l_origin);
664
20fe49b9 665 free (imap->l_reldeps);
4b4fcf99 666
ac53c9c6 667 /* Print debugging message. */
a1ffb40e 668 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
ac53c9c6
UD
669 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
670 imap->l_name, imap->l_ns);
671
4ce636da 672 /* This name always is allocated. */
a8a1269d 673 free (imap->l_name);
4ce636da 674 /* Remove the list with all the names of the shared object. */
20fe49b9
UD
675
676 struct libname_list *lnp = imap->l_libname;
a8a1269d
UD
677 do
678 {
76156ea1 679 struct libname_list *this = lnp;
a8a1269d 680 lnp = lnp->next;
11810621
UD
681 if (!this->dont_free)
682 free (this);
a8a1269d
UD
683 }
684 while (lnp != NULL);
a8a1269d 685
4ce636da 686 /* Remove the searchlists. */
20fe49b9 687 free (imap->l_initfini);
4ce636da 688
5a21d307 689 /* Remove the scope array if we allocated it. */
c0a777e8
UD
690 if (imap->l_scope != imap->l_scope_mem)
691 free (imap->l_scope);
5a21d307 692
7bcaca43 693 if (imap->l_phdr_allocated)
15925412 694 free ((void *) imap->l_phdr);
7bcaca43 695
f55727ca
UD
696 if (imap->l_rpath_dirs.dirs != (void *) -1)
697 free (imap->l_rpath_dirs.dirs);
698 if (imap->l_runpath_dirs.dirs != (void *) -1)
699 free (imap->l_runpath_dirs.dirs);
700
5177d85b
L
701 /* Clear GL(dl_initfirst) when freeing its link_map memory. */
702 if (imap == GL(dl_initfirst))
703 GL(dl_initfirst) = NULL;
704
af69217f 705 free (imap);
ba79d61b
RM
706 }
707 }
708
5a2a1d75
AS
709 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
710
c877418f 711 /* If we removed any object which uses TLS bump the generation counter. */
bb4cb252 712 if (any_tls)
c877418f 713 {
f4f8f4d4
SN
714 size_t newgen = GL(dl_tls_generation) + 1;
715 if (__glibc_unlikely (newgen == 0))
8b748aed 716 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
f4f8f4d4 717 /* Can be read concurrently. */
d2123d68 718 atomic_store_release (&GL(dl_tls_generation), newgen);
c877418f
RM
719
720 if (tls_free_end == GL(dl_tls_static_used))
721 GL(dl_tls_static_used) = tls_free_start;
722 }
a04586d8 723
83b53232
SN
724 /* TLS is cleaned up for the unloaded modules. */
725 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
726
e096b7a1 727 /* Notify the debugger those objects are finalized and gone. */
8329939a 728 _dl_debug_change_state (r, RT_CONSISTENT);
e096b7a1
FW
729 LIBC_PROBE (unmap_complete, 2, nsid, r);
730
9dcafc55 731#ifdef SHARED
3dac3959
AZ
732 /* Auditing checkpoint: we have deleted all objects. Also, do not notify
733 auditors of the cleanup of a failed audit module loading attempt. */
734 _dl_audit_activity_nsid (nsid, LA_ACT_CONSISTENT);
9dcafc55
UD
735#endif
736
22c83193
UD
737 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
738 && nsid == GL(dl_nns) - 1)
739 do
0d23a5c1 740 --GL(dl_nns);
22c83193
UD
741 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
742
bfc832cc 743 /* Recheck if we need to retry, release the lock. */
20fe49b9 744 out:
bfc832cc 745 if (dl_close_state == rerun)
849274d4
FW
746 {
747 /* The map may have been deallocated. */
748 map = NULL;
749 goto retry;
750 }
bfc832cc
UD
751
752 dl_close_state = not_pending;
131c4428
UD
753}
754
755
756void
757_dl_close (void *_map)
758{
759 struct link_map *map = _map;
760
57707b7f
CD
761 /* We must take the lock to examine the contents of map and avoid
762 concurrent dlopens. */
763 __rtld_lock_lock_recursive (GL(dl_load_lock));
764
765 /* At this point we are guaranteed nobody else is touching the list of
766 loaded maps, but a concurrent dlclose might have freed our map
767 before we took the lock. There is no way to detect this (see below)
768 so we proceed assuming this isn't the case. First see whether we
769 can remove the object at all. */
f8ed116a 770 if (__glibc_unlikely (map->l_nodelete_active))
131c4428 771 {
131c4428 772 /* Nope. Do nothing. */
57707b7f 773 __rtld_lock_unlock_recursive (GL(dl_load_lock));
131c4428
UD
774 return;
775 }
776
57707b7f
CD
777 /* At present this is an unreliable check except in the case where the
778 caller has recursively called dlclose and we are sure the link map
779 has not been freed. In a non-recursive dlclose the map itself
780 might have been freed and this access is potentially a data race
781 with whatever other use this memory might have now, or worse we
782 might silently corrupt memory if it looks enough like a link map.
783 POSIX has language in dlclose that appears to guarantee that this
784 should be a detectable case and given that dlclose should be threadsafe
785 we need this to be a reliable detection.
786 This is bug 20990. */
131c4428 787 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
57707b7f
CD
788 {
789 __rtld_lock_unlock_recursive (GL(dl_load_lock));
790 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
791 }
131c4428 792
02d5e5d9 793 _dl_close_worker (map, false);
131c4428 794
d3c9f895 795 __rtld_lock_unlock_recursive (GL(dl_load_lock));
e3e5f672 796}