]> git.ipfire.org Git - thirdparty/glibc.git/blame - elf/dl-close.c
test-container: Fix "unused code" warnings on HURD
[thirdparty/glibc.git] / elf / dl-close.c
CommitLineData
26b4d766 1/* Close a shared object opened by `_dl_open'.
581c785b 2 Copyright (C) 1996-2022 Free Software Foundation, Inc.
afd4eb37
UD
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
41bdb6e2
AJ
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
afd4eb37
UD
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
41bdb6e2 13 Lesser General Public License for more details.
afd4eb37 14
41bdb6e2 15 You should have received a copy of the GNU Lesser General Public
59ba27a6 16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
ba79d61b 18
7afab53d 19#include <assert.h>
ba79d61b 20#include <dlfcn.h>
1100f849 21#include <errno.h>
8e17ea58 22#include <libintl.h>
bfc832cc 23#include <stddef.h>
b209e34a 24#include <stdio.h>
ba79d61b 25#include <stdlib.h>
8d6468d0 26#include <string.h>
9dcafc55 27#include <unistd.h>
ec999b8e 28#include <libc-lock.h>
b8445829 29#include <ldsodefs.h>
ba79d61b
RM
30#include <sys/types.h>
31#include <sys/mman.h>
609cf614 32#include <sysdep-cancel.h>
df94b641 33#include <tls.h>
815e6fa3 34#include <stap-probe.h>
5d28a896 35#include <dl-find_object.h>
ba79d61b 36
fcccd512
RM
37#include <dl-unmap-segments.h>
38
ba79d61b 39
dacc8ffa
UD
40/* Type of the constructor functions. */
41typedef void (*fini_t) (void);
42
43
1100f849
UD
44/* Special l_idx value used to indicate which objects remain loaded. */
45#define IDX_STILL_USED -1
46
47
fc093be1
UD
48/* Returns true we an non-empty was found. */
49static bool
1f0c4a10
RM
50remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
51 bool should_be_there)
fc093be1
UD
52{
53 if (idx - disp >= listp->len)
54 {
1f0c4a10
RM
55 if (listp->next == NULL)
56 {
57 /* The index is not actually valid in the slotinfo list,
8265947d
RM
58 because this object was closed before it was fully set
59 up due to some error. */
1f0c4a10
RM
60 assert (! should_be_there);
61 }
62 else
63 {
64 if (remove_slotinfo (idx, listp->next, disp + listp->len,
65 should_be_there))
66 return true;
fc093be1 67
1f0c4a10
RM
68 /* No non-empty entry. Search from the end of this element's
69 slotinfo array. */
70 idx = disp + listp->len;
71 }
fc093be1
UD
72 }
73 else
74 {
75 struct link_map *old_map = listp->slotinfo[idx - disp].map;
fc093be1 76
2430d57a
RM
77 /* The entry might still be in its unused state if we are closing an
78 object that wasn't fully set up. */
a1ffb40e 79 if (__glibc_likely (old_map != NULL))
2430d57a 80 {
f4f8f4d4
SN
81 /* Mark the entry as unused. These can be read concurrently. */
82 atomic_store_relaxed (&listp->slotinfo[idx - disp].gen,
83 GL(dl_tls_generation) + 1);
84 atomic_store_relaxed (&listp->slotinfo[idx - disp].map, NULL);
2430d57a 85 }
fc093be1
UD
86
87 /* If this is not the last currently used entry no need to look
88 further. */
2430d57a 89 if (idx != GL(dl_tls_max_dtv_idx))
ba33937b
AZ
90 {
91 /* There is an unused dtv entry in the middle. */
92 GL(dl_tls_dtv_gaps) = true;
93 return true;
94 }
fc093be1
UD
95 }
96
06a04e09 97 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
fc093be1
UD
98 {
99 --idx;
100
101 if (listp->slotinfo[idx - disp].map != NULL)
102 {
f4f8f4d4
SN
103 /* Found a new last used index. This can be read concurrently. */
104 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), idx);
fc093be1
UD
105 return true;
106 }
107 }
108
109 /* No non-entry in this list element. */
110 return false;
111}
fc093be1 112
79e0cd7b
FW
113/* Invoke dstructors for CLOSURE (a struct link_map *). Called with
114 exception handling temporarily disabled, to make errors fatal. */
115static void
116call_destructors (void *closure)
117{
118 struct link_map *map = closure;
119
120 if (map->l_info[DT_FINI_ARRAY] != NULL)
121 {
122 ElfW(Addr) *array =
123 (ElfW(Addr) *) (map->l_addr
124 + map->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
125 unsigned int sz = (map->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
126 / sizeof (ElfW(Addr)));
127
128 while (sz-- > 0)
129 ((fini_t) array[sz]) ();
130 }
131
132 /* Next try the old-style destructor. */
133 if (map->l_info[DT_FINI] != NULL)
134 DL_CALL_DT_FINI (map, ((void *) map->l_addr
135 + map->l_info[DT_FINI]->d_un.d_ptr));
136}
fc093be1 137
ba79d61b 138void
02d5e5d9 139_dl_close_worker (struct link_map *map, bool force)
ba79d61b 140{
c0f62c56 141 /* One less direct use. */
c0f62c56
UD
142 --map->l_direct_opencount;
143
bfc832cc
UD
144 /* If _dl_close is called recursively (some destructor call dlclose),
145 just record that the parent _dl_close will need to do garbage collection
146 again and return. */
147 static enum { not_pending, pending, rerun } dl_close_state;
148
149 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
150 || dl_close_state != not_pending)
26b4d766 151 {
0479b305
AS
152 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
153 dl_close_state = rerun;
bfc832cc 154
26b4d766 155 /* There are still references to this object. Do nothing more. */
a1ffb40e 156 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
20fe49b9
UD
157 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
158 map->l_name, map->l_direct_opencount);
a334319f 159
26b4d766
UD
160 return;
161 }
ba79d61b 162
2e81d449
UD
163 Lmid_t nsid = map->l_ns;
164 struct link_namespaces *ns = &GL(dl_ns)[nsid];
165
bfc832cc
UD
166 retry:
167 dl_close_state = pending;
168
bfc832cc 169 bool any_tls = false;
2e81d449 170 const unsigned int nloaded = ns->_ns_nloaded;
20fe49b9
UD
171 struct link_map *maps[nloaded];
172
bfc832cc 173 /* Run over the list and assign indexes to the link maps and enter
20fe49b9
UD
174 them into the MAPS array. */
175 int idx = 0;
2e81d449 176 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
20fe49b9 177 {
15a0c573
CLT
178 l->l_map_used = 0;
179 l->l_map_done = 0;
20fe49b9
UD
180 l->l_idx = idx;
181 maps[idx] = l;
182 ++idx;
183 }
184 assert (idx == nloaded);
c4bb124a 185
20fe49b9
UD
186 /* Keep track of the lowest index link map we have covered already. */
187 int done_index = -1;
188 while (++done_index < nloaded)
0ecb606c 189 {
20fe49b9
UD
190 struct link_map *l = maps[done_index];
191
15a0c573 192 if (l->l_map_done)
20fe49b9
UD
193 /* Already handled. */
194 continue;
195
196 /* Check whether this object is still used. */
197 if (l->l_type == lt_loaded
198 && l->l_direct_opencount == 0
f8ed116a 199 && !l->l_nodelete_active
90b37cac
SP
200 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
201 acquire is sufficient and correct. */
202 && atomic_load_acquire (&l->l_tls_dtor_count) == 0
15a0c573 203 && !l->l_map_used)
20fe49b9
UD
204 continue;
205
206 /* We need this object and we handle it now. */
15a0c573
CLT
207 l->l_map_used = 1;
208 l->l_map_done = 1;
c3381f3e 209 /* Signal the object is still needed. */
1100f849 210 l->l_idx = IDX_STILL_USED;
20fe49b9
UD
211
212 /* Mark all dependencies as used. */
213 if (l->l_initfini != NULL)
214 {
36129722
CD
215 /* We are always the zeroth entry, and since we don't include
216 ourselves in the dependency analysis start at 1. */
20fe49b9
UD
217 struct link_map **lp = &l->l_initfini[1];
218 while (*lp != NULL)
219 {
1100f849 220 if ((*lp)->l_idx != IDX_STILL_USED)
556224ab 221 {
c3381f3e
UD
222 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
223
15a0c573 224 if (!(*lp)->l_map_used)
c3381f3e 225 {
15a0c573 226 (*lp)->l_map_used = 1;
36129722
CD
227 /* If we marked a new object as used, and we've
228 already processed it, then we need to go back
229 and process again from that point forward to
230 ensure we keep all of its dependencies also. */
c3381f3e
UD
231 if ((*lp)->l_idx - 1 < done_index)
232 done_index = (*lp)->l_idx - 1;
233 }
556224ab 234 }
556224ab 235
20fe49b9
UD
236 ++lp;
237 }
238 }
239 /* And the same for relocation dependencies. */
240 if (l->l_reldeps != NULL)
385b4cf4 241 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
20fe49b9 242 {
385b4cf4 243 struct link_map *jmap = l->l_reldeps->list[j];
20fe49b9 244
1100f849 245 if (jmap->l_idx != IDX_STILL_USED)
556224ab 246 {
c3381f3e
UD
247 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
248
15a0c573 249 if (!jmap->l_map_used)
c3381f3e 250 {
15a0c573 251 jmap->l_map_used = 1;
c3381f3e
UD
252 if (jmap->l_idx - 1 < done_index)
253 done_index = jmap->l_idx - 1;
254 }
556224ab
UD
255 }
256 }
20fe49b9 257 }
42c4f32a 258
c2c299fd
AS
259 /* Sort the entries. We can skip looking for the binary itself which is
260 at the front of the search list for the main namespace. */
15a0c573 261 _dl_sort_maps (maps, nloaded, (nsid == LM_ID_BASE), true);
c3381f3e 262
a709dd43 263 /* Call all termination functions at once. */
20fe49b9 264 bool unload_any = false;
e4eb675d 265 bool scope_mem_left = false;
e8b6b64d 266 unsigned int unload_global = 0;
20fe49b9 267 unsigned int first_loaded = ~0;
ffd0e1b7 268 for (unsigned int i = 0; i < nloaded; ++i)
a709dd43 269 {
20fe49b9 270 struct link_map *imap = maps[i];
9dcafc55
UD
271
272 /* All elements must be in the same namespace. */
2e81d449 273 assert (imap->l_ns == nsid);
9dcafc55 274
15a0c573 275 if (!imap->l_map_used)
a709dd43 276 {
f8ed116a 277 assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
20fe49b9 278
aff4519d 279 /* Call its termination function. Do not do it for
79e0cd7b
FW
280 half-cooked objects. Temporarily disable exception
281 handling, so that errors are fatal. */
aff4519d 282 if (imap->l_init_called)
dacc8ffa 283 {
ac53c9c6
UD
284 /* When debugging print a message first. */
285 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
286 0))
287 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
2e81d449 288 imap->l_name, nsid);
ac53c9c6 289
79e0cd7b
FW
290 if (imap->l_info[DT_FINI_ARRAY] != NULL
291 || imap->l_info[DT_FINI] != NULL)
292 _dl_catch_exception (NULL, call_destructors, imap);
dacc8ffa
UD
293 }
294
9dcafc55 295#ifdef SHARED
a3d731d3 296 /* Auditing checkpoint: we remove an object. */
311c9ee5 297 _dl_audit_objclose (imap);
9dcafc55
UD
298#endif
299
20fe49b9
UD
300 /* This object must not be used anymore. */
301 imap->l_removed = 1;
aff4519d 302
20fe49b9
UD
303 /* We indeed have an object to remove. */
304 unload_any = true;
aff4519d 305
e8b6b64d
UD
306 if (imap->l_global)
307 ++unload_global;
308
20fe49b9
UD
309 /* Remember where the first dynamically loaded object is. */
310 if (i < first_loaded)
311 first_loaded = i;
a709dd43 312 }
15a0c573 313 /* Else imap->l_map_used. */
20fe49b9
UD
314 else if (imap->l_type == lt_loaded)
315 {
1100f849
UD
316 struct r_scope_elem *new_list = NULL;
317
318 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
20fe49b9 319 {
bfc832cc 320 /* The object is still used. But one of the objects we are
20fe49b9
UD
321 unloading right now is responsible for loading it. If
322 the current object does not have it's own scope yet we
323 have to create one. This has to be done before running
324 the finalizers.
325
326 To do this count the number of dependencies. */
327 unsigned int cnt;
328 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
329 ;
330
331 /* We simply reuse the l_initfini list. */
332 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
333 imap->l_searchlist.r_nlist = cnt;
334
1100f849 335 new_list = &imap->l_searchlist;
20fe49b9 336 }
1100f849
UD
337
338 /* Count the number of scopes which remain after the unload.
339 When we add the local search list count it. Always add
340 one for the terminating NULL pointer. */
341 size_t remain = (new_list != NULL) + 1;
342 bool removed_any = false;
c0a777e8 343 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
1100f849
UD
344 /* This relies on l_scope[] entries being always set either
345 to its own l_symbolic_searchlist address, or some map's
346 l_searchlist address. */
c0a777e8 347 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
1100f849
UD
348 {
349 struct link_map *tmap = (struct link_map *)
c0a777e8 350 ((char *) imap->l_scope[cnt]
1100f849 351 - offsetof (struct link_map, l_searchlist));
2e81d449 352 assert (tmap->l_ns == nsid);
1100f849
UD
353 if (tmap->l_idx == IDX_STILL_USED)
354 ++remain;
355 else
356 removed_any = true;
357 }
358 else
359 ++remain;
360
361 if (removed_any)
1ee2ff20 362 {
1100f849
UD
363 /* Always allocate a new array for the scope. This is
364 necessary since we must be able to determine the last
365 user of the current array. If possible use the link map's
366 memory. */
367 size_t new_size;
c0a777e8
UD
368 struct r_scope_elem **newp;
369
370#define SCOPE_ELEMS(imap) \
371 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
372
373 if (imap->l_scope != imap->l_scope_mem
374 && remain < SCOPE_ELEMS (imap))
1100f849 375 {
c0a777e8
UD
376 new_size = SCOPE_ELEMS (imap);
377 newp = imap->l_scope_mem;
1100f849
UD
378 }
379 else
380 {
381 new_size = imap->l_scope_max;
c0a777e8
UD
382 newp = (struct r_scope_elem **)
383 malloc (new_size * sizeof (struct r_scope_elem *));
1100f849
UD
384 if (newp == NULL)
385 _dl_signal_error (ENOMEM, "dlclose", NULL,
386 N_("cannot create scope list"));
387 }
388
1100f849
UD
389 /* Copy over the remaining scope elements. */
390 remain = 0;
c0a777e8 391 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
1ee2ff20 392 {
c0a777e8 393 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
1ee2ff20 394 {
1100f849 395 struct link_map *tmap = (struct link_map *)
c0a777e8 396 ((char *) imap->l_scope[cnt]
1100f849
UD
397 - offsetof (struct link_map, l_searchlist));
398 if (tmap->l_idx != IDX_STILL_USED)
399 {
400 /* Remove the scope. Or replace with own map's
401 scope. */
402 if (new_list != NULL)
403 {
c0a777e8 404 newp[remain++] = new_list;
1100f849
UD
405 new_list = NULL;
406 }
407 continue;
408 }
1ee2ff20 409 }
1100f849 410
c0a777e8 411 newp[remain++] = imap->l_scope[cnt];
1ee2ff20 412 }
c0a777e8 413 newp[remain] = NULL;
1100f849 414
c0a777e8 415 struct r_scope_elem **old = imap->l_scope;
1100f849 416
e4eb675d 417 imap->l_scope = newp;
1100f849
UD
418
419 /* No user anymore, we can free it now. */
c0a777e8 420 if (old != imap->l_scope_mem)
e4eb675d
UD
421 {
422 if (_dl_scope_free (old))
423 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
424 no need to repeat it. */
425 scope_mem_left = false;
426 }
427 else
428 scope_mem_left = true;
1100f849
UD
429
430 imap->l_scope_max = new_size;
1ee2ff20 431 }
39dd69df
AS
432 else if (new_list != NULL)
433 {
434 /* We didn't change the scope array, so reset the search
435 list. */
436 imap->l_searchlist.r_list = NULL;
437 imap->l_searchlist.r_nlist = 0;
438 }
42c4f32a 439
c3381f3e 440 /* The loader is gone, so mark the object as not having one.
1100f849
UD
441 Note: l_idx != IDX_STILL_USED -> object will be removed. */
442 if (imap->l_loader != NULL
443 && imap->l_loader->l_idx != IDX_STILL_USED)
20fe49b9 444 imap->l_loader = NULL;
aff4519d 445
20fe49b9
UD
446 /* Remember where the first dynamically loaded object is. */
447 if (i < first_loaded)
448 first_loaded = i;
449 }
a709dd43
UD
450 }
451
20fe49b9
UD
452 /* If there are no objects to unload, do nothing further. */
453 if (!unload_any)
454 goto out;
455
9dcafc55
UD
456#ifdef SHARED
457 /* Auditing checkpoint: we will start deleting objects. */
3dac3959 458 _dl_audit_activity_nsid (nsid, LA_ACT_DELETE);
9dcafc55
UD
459#endif
460
4d6acc61 461 /* Notify the debugger we are about to remove some loaded objects. */
a93d9e03 462 struct r_debug *r = _dl_debug_update (nsid);
9dcafc55
UD
463 r->r_state = RT_DELETE;
464 _dl_debug_state ();
815e6fa3 465 LIBC_PROBE (unmap_start, 2, nsid, r);
4d6acc61 466
e8b6b64d
UD
467 if (unload_global)
468 {
469 /* Some objects are in the global scope list. Remove them. */
470 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
471 unsigned int i;
472 unsigned int j = 0;
473 unsigned int cnt = ns_msl->r_nlist;
474
475 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
476 --cnt;
477
478 if (cnt + unload_global == ns_msl->r_nlist)
479 /* Speed up removing most recently added objects. */
480 j = cnt;
481 else
b7c08a66 482 for (i = 0; i < cnt; i++)
e8b6b64d
UD
483 if (ns_msl->r_list[i]->l_removed == 0)
484 {
485 if (i != j)
486 ns_msl->r_list[j] = ns_msl->r_list[i];
487 j++;
488 }
489 ns_msl->r_nlist = j;
e4eb675d 490 }
e8b6b64d 491
e4eb675d
UD
492 if (!RTLD_SINGLE_THREAD_P
493 && (unload_global
494 || scope_mem_left
495 || (GL(dl_scope_free_list) != NULL
496 && GL(dl_scope_free_list)->count)))
497 {
498 THREAD_GSCOPE_WAIT ();
499
500 /* Now we can free any queued old scopes. */
385b4cf4 501 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
e4eb675d
UD
502 if (fsl != NULL)
503 while (fsl->count > 0)
504 free (fsl->list[--fsl->count]);
e8b6b64d
UD
505 }
506
541765b6
UD
507 size_t tls_free_start;
508 size_t tls_free_end;
509 tls_free_start = tls_free_end = NO_TLS_OFFSET;
c877418f 510
83b53232
SN
511 /* Protects global and module specitic TLS state. */
512 __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
513
5a2a1d75
AS
514 /* We modify the list of loaded objects. */
515 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
516
ba79d61b
RM
517 /* Check each element of the search list to see if all references to
518 it are gone. */
ffd0e1b7 519 for (unsigned int i = first_loaded; i < nloaded; ++i)
ba79d61b 520 {
20fe49b9 521 struct link_map *imap = maps[i];
15a0c573 522 if (!imap->l_map_used)
ba79d61b 523 {
20fe49b9 524 assert (imap->l_type == lt_loaded);
a8a1269d 525
ba79d61b
RM
526 /* That was the last reference, and this was a dlopen-loaded
527 object. We can unmap it. */
ba79d61b 528
1f0c4a10 529 /* Remove the object from the dtv slotinfo array if it uses TLS. */
a1ffb40e 530 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
a04586d8 531 {
a04586d8 532 any_tls = true;
bb4cb252 533
9dcafc55
UD
534 if (GL(dl_tls_dtv_slotinfo_list) != NULL
535 && ! remove_slotinfo (imap->l_tls_modid,
536 GL(dl_tls_dtv_slotinfo_list), 0,
537 imap->l_init_called))
fc093be1 538 /* All dynamically loaded modules with TLS are unloaded. */
f4f8f4d4
SN
539 /* Can be read concurrently. */
540 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
541 GL(dl_tls_static_nelem));
c877418f 542
4c533566
UD
543 if (imap->l_tls_offset != NO_TLS_OFFSET
544 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
c877418f
RM
545 {
546 /* Collect a contiguous chunk built from the objects in
547 this search list, going in either direction. When the
548 whole chunk is at the end of the used area then we can
549 reclaim it. */
11bf311e 550#if TLS_TCB_AT_TP
541765b6
UD
551 if (tls_free_start == NO_TLS_OFFSET
552 || (size_t) imap->l_tls_offset == tls_free_start)
553 {
554 /* Extend the contiguous chunk being reclaimed. */
555 tls_free_start
556 = imap->l_tls_offset - imap->l_tls_blocksize;
557
558 if (tls_free_end == NO_TLS_OFFSET)
559 tls_free_end = imap->l_tls_offset;
560 }
561 else if (imap->l_tls_offset - imap->l_tls_blocksize
562 == tls_free_end)
563 /* Extend the chunk backwards. */
564 tls_free_end = imap->l_tls_offset;
565 else
566 {
567 /* This isn't contiguous with the last chunk freed.
568 One of them will be leaked unless we can free
569 one block right away. */
570 if (tls_free_end == GL(dl_tls_static_used))
571 {
572 GL(dl_tls_static_used) = tls_free_start;
573 tls_free_end = imap->l_tls_offset;
574 tls_free_start
575 = tls_free_end - imap->l_tls_blocksize;
576 }
577 else if ((size_t) imap->l_tls_offset
578 == GL(dl_tls_static_used))
579 GL(dl_tls_static_used)
580 = imap->l_tls_offset - imap->l_tls_blocksize;
581 else if (tls_free_end < (size_t) imap->l_tls_offset)
582 {
583 /* We pick the later block. It has a chance to
584 be freed. */
585 tls_free_end = imap->l_tls_offset;
586 tls_free_start
587 = tls_free_end - imap->l_tls_blocksize;
588 }
589 }
11bf311e 590#elif TLS_DTV_AT_TP
66bdbaa4
AM
591 if (tls_free_start == NO_TLS_OFFSET)
592 {
593 tls_free_start = imap->l_tls_firstbyte_offset;
594 tls_free_end = (imap->l_tls_offset
595 + imap->l_tls_blocksize);
596 }
597 else if (imap->l_tls_firstbyte_offset == tls_free_end)
c877418f 598 /* Extend the contiguous chunk being reclaimed. */
66bdbaa4 599 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
c877418f
RM
600 else if (imap->l_tls_offset + imap->l_tls_blocksize
601 == tls_free_start)
602 /* Extend the chunk backwards. */
66bdbaa4
AM
603 tls_free_start = imap->l_tls_firstbyte_offset;
604 /* This isn't contiguous with the last chunk freed.
605 One of them will be leaked unless we can free
606 one block right away. */
607 else if (imap->l_tls_offset + imap->l_tls_blocksize
608 == GL(dl_tls_static_used))
609 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
610 else if (tls_free_end == GL(dl_tls_static_used))
c877418f 611 {
66bdbaa4
AM
612 GL(dl_tls_static_used) = tls_free_start;
613 tls_free_start = imap->l_tls_firstbyte_offset;
614 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
615 }
616 else if (tls_free_end < imap->l_tls_firstbyte_offset)
617 {
618 /* We pick the later block. It has a chance to
619 be freed. */
620 tls_free_start = imap->l_tls_firstbyte_offset;
621 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
c877418f 622 }
11bf311e
UD
623#else
624# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
625#endif
c877418f 626 }
a04586d8 627 }
a04586d8 628
02d5e5d9
PK
629 /* Reset unique symbols if forced. */
630 if (force)
631 {
632 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
633 __rtld_lock_lock_recursive (tab->lock);
634 struct unique_sym *entries = tab->entries;
635 if (entries != NULL)
636 {
637 size_t idx, size = tab->size;
638 for (idx = 0; idx < size; ++idx)
639 {
640 /* Clear unique symbol entries that belong to this
641 object. */
642 if (entries[idx].name != NULL
643 && entries[idx].map == imap)
644 {
645 entries[idx].name = NULL;
646 entries[idx].hashval = 0;
647 tab->n_elements--;
648 }
649 }
650 }
651 __rtld_lock_unlock_recursive (tab->lock);
652 }
653
a8a1269d 654 /* We can unmap all the maps at once. We determined the
4ce636da
UD
655 start address and length when we loaded the object and
656 the `munmap' call does the rest. */
09bf6406 657 DL_UNMAP (imap);
22bc7978 658
ba79d61b 659 /* Finally, unlink the data structure and free it. */
2bd2cad9
RM
660#if DL_NNS == 1
661 /* The assert in the (imap->l_prev == NULL) case gives
662 the compiler license to warn that NS points outside
663 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
664 is tantamount to nsid >= DL_NNS). That should be impossible
665 in this configuration, so just assert about it instead. */
666 assert (nsid == LM_ID_BASE);
667 assert (imap->l_prev != NULL);
668#else
669 if (imap->l_prev == NULL)
c0f62c56 670 {
2e81d449 671 assert (nsid != LM_ID_BASE);
b7c08a66
RM
672 ns->_ns_loaded = imap->l_next;
673
674 /* Update the pointer to the head of the list
675 we leave for debuggers to examine. */
676 r->r_map = (void *) ns->_ns_loaded;
c0f62c56 677 }
2bd2cad9
RM
678 else
679#endif
680 imap->l_prev->l_next = imap->l_next;
c0f62c56 681
2e81d449 682 --ns->_ns_nloaded;
c0f62c56 683 if (imap->l_next != NULL)
af69217f 684 imap->l_next->l_prev = imap->l_prev;
a8a1269d 685
5d28a896
FW
686 /* Update the data used by _dl_find_object. */
687 _dl_find_object_dlclose (imap);
688
556224ab
UD
689 free (imap->l_versions);
690 if (imap->l_origin != (char *) -1)
a8a1269d
UD
691 free ((char *) imap->l_origin);
692
20fe49b9 693 free (imap->l_reldeps);
4b4fcf99 694
ac53c9c6 695 /* Print debugging message. */
a1ffb40e 696 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
ac53c9c6
UD
697 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
698 imap->l_name, imap->l_ns);
699
4ce636da 700 /* This name always is allocated. */
a8a1269d 701 free (imap->l_name);
4ce636da 702 /* Remove the list with all the names of the shared object. */
20fe49b9
UD
703
704 struct libname_list *lnp = imap->l_libname;
a8a1269d
UD
705 do
706 {
76156ea1 707 struct libname_list *this = lnp;
a8a1269d 708 lnp = lnp->next;
11810621
UD
709 if (!this->dont_free)
710 free (this);
a8a1269d
UD
711 }
712 while (lnp != NULL);
a8a1269d 713
4ce636da 714 /* Remove the searchlists. */
20fe49b9 715 free (imap->l_initfini);
4ce636da 716
5a21d307 717 /* Remove the scope array if we allocated it. */
c0a777e8
UD
718 if (imap->l_scope != imap->l_scope_mem)
719 free (imap->l_scope);
5a21d307 720
7bcaca43 721 if (imap->l_phdr_allocated)
15925412 722 free ((void *) imap->l_phdr);
7bcaca43 723
f55727ca
UD
724 if (imap->l_rpath_dirs.dirs != (void *) -1)
725 free (imap->l_rpath_dirs.dirs);
726 if (imap->l_runpath_dirs.dirs != (void *) -1)
727 free (imap->l_runpath_dirs.dirs);
728
5177d85b
L
729 /* Clear GL(dl_initfirst) when freeing its link_map memory. */
730 if (imap == GL(dl_initfirst))
731 GL(dl_initfirst) = NULL;
732
af69217f 733 free (imap);
ba79d61b
RM
734 }
735 }
736
5a2a1d75
AS
737 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
738
c877418f 739 /* If we removed any object which uses TLS bump the generation counter. */
bb4cb252 740 if (any_tls)
c877418f 741 {
f4f8f4d4
SN
742 size_t newgen = GL(dl_tls_generation) + 1;
743 if (__glibc_unlikely (newgen == 0))
8b748aed 744 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
f4f8f4d4
SN
745 /* Can be read concurrently. */
746 atomic_store_relaxed (&GL(dl_tls_generation), newgen);
c877418f
RM
747
748 if (tls_free_end == GL(dl_tls_static_used))
749 GL(dl_tls_static_used) = tls_free_start;
750 }
a04586d8 751
83b53232
SN
752 /* TLS is cleaned up for the unloaded modules. */
753 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
754
9dcafc55 755#ifdef SHARED
3dac3959
AZ
756 /* Auditing checkpoint: we have deleted all objects. Also, do not notify
757 auditors of the cleanup of a failed audit module loading attempt. */
758 _dl_audit_activity_nsid (nsid, LA_ACT_CONSISTENT);
9dcafc55
UD
759#endif
760
22c83193
UD
761 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
762 && nsid == GL(dl_nns) - 1)
763 do
0d23a5c1 764 --GL(dl_nns);
22c83193
UD
765 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
766
e3e5f672 767 /* Notify the debugger those objects are finalized and gone. */
9dcafc55
UD
768 r->r_state = RT_CONSISTENT;
769 _dl_debug_state ();
815e6fa3 770 LIBC_PROBE (unmap_complete, 2, nsid, r);
4b4fcf99 771
bfc832cc 772 /* Recheck if we need to retry, release the lock. */
20fe49b9 773 out:
bfc832cc
UD
774 if (dl_close_state == rerun)
775 goto retry;
776
777 dl_close_state = not_pending;
131c4428
UD
778}
779
780
781void
782_dl_close (void *_map)
783{
784 struct link_map *map = _map;
785
57707b7f
CD
786 /* We must take the lock to examine the contents of map and avoid
787 concurrent dlopens. */
788 __rtld_lock_lock_recursive (GL(dl_load_lock));
789
790 /* At this point we are guaranteed nobody else is touching the list of
791 loaded maps, but a concurrent dlclose might have freed our map
792 before we took the lock. There is no way to detect this (see below)
793 so we proceed assuming this isn't the case. First see whether we
794 can remove the object at all. */
f8ed116a 795 if (__glibc_unlikely (map->l_nodelete_active))
131c4428 796 {
131c4428 797 /* Nope. Do nothing. */
57707b7f 798 __rtld_lock_unlock_recursive (GL(dl_load_lock));
131c4428
UD
799 return;
800 }
801
57707b7f
CD
802 /* At present this is an unreliable check except in the case where the
803 caller has recursively called dlclose and we are sure the link map
804 has not been freed. In a non-recursive dlclose the map itself
805 might have been freed and this access is potentially a data race
806 with whatever other use this memory might have now, or worse we
807 might silently corrupt memory if it looks enough like a link map.
808 POSIX has language in dlclose that appears to guarantee that this
809 should be a detectable case and given that dlclose should be threadsafe
810 we need this to be a reliable detection.
811 This is bug 20990. */
131c4428 812 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
57707b7f
CD
813 {
814 __rtld_lock_unlock_recursive (GL(dl_load_lock));
815 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
816 }
131c4428 817
02d5e5d9 818 _dl_close_worker (map, false);
131c4428 819
d3c9f895 820 __rtld_lock_unlock_recursive (GL(dl_load_lock));
e3e5f672 821}