]> git.ipfire.org Git - thirdparty/glibc.git/blame - elf/dl-close.c
y2038: linux: Provide __timer_gettime64 implementation
[thirdparty/glibc.git] / elf / dl-close.c
CommitLineData
26b4d766 1/* Close a shared object opened by `_dl_open'.
04277e02 2 Copyright (C) 1996-2019 Free Software Foundation, Inc.
afd4eb37
UD
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
41bdb6e2
AJ
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
afd4eb37
UD
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
41bdb6e2 13 Lesser General Public License for more details.
afd4eb37 14
41bdb6e2 15 You should have received a copy of the GNU Lesser General Public
59ba27a6 16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
ba79d61b 18
7afab53d 19#include <assert.h>
ba79d61b 20#include <dlfcn.h>
1100f849 21#include <errno.h>
8e17ea58 22#include <libintl.h>
bfc832cc 23#include <stddef.h>
b209e34a 24#include <stdio.h>
ba79d61b 25#include <stdlib.h>
8d6468d0 26#include <string.h>
9dcafc55 27#include <unistd.h>
ec999b8e 28#include <libc-lock.h>
b8445829 29#include <ldsodefs.h>
ba79d61b
RM
30#include <sys/types.h>
31#include <sys/mman.h>
609cf614 32#include <sysdep-cancel.h>
df94b641 33#include <tls.h>
815e6fa3 34#include <stap-probe.h>
ba79d61b 35
fcccd512
RM
36#include <dl-unmap-segments.h>
37
ba79d61b 38
dacc8ffa
UD
39/* Type of the constructor functions. */
40typedef void (*fini_t) (void);
41
42
1100f849
UD
43/* Special l_idx value used to indicate which objects remain loaded. */
44#define IDX_STILL_USED -1
45
46
fc093be1
UD
47/* Returns true we an non-empty was found. */
48static bool
1f0c4a10
RM
49remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
50 bool should_be_there)
fc093be1
UD
51{
52 if (idx - disp >= listp->len)
53 {
1f0c4a10
RM
54 if (listp->next == NULL)
55 {
56 /* The index is not actually valid in the slotinfo list,
8265947d
RM
57 because this object was closed before it was fully set
58 up due to some error. */
1f0c4a10
RM
59 assert (! should_be_there);
60 }
61 else
62 {
63 if (remove_slotinfo (idx, listp->next, disp + listp->len,
64 should_be_there))
65 return true;
fc093be1 66
1f0c4a10
RM
67 /* No non-empty entry. Search from the end of this element's
68 slotinfo array. */
69 idx = disp + listp->len;
70 }
fc093be1
UD
71 }
72 else
73 {
74 struct link_map *old_map = listp->slotinfo[idx - disp].map;
fc093be1 75
2430d57a
RM
76 /* The entry might still be in its unused state if we are closing an
77 object that wasn't fully set up. */
a1ffb40e 78 if (__glibc_likely (old_map != NULL))
2430d57a
RM
79 {
80 assert (old_map->l_tls_modid == idx);
81
82 /* Mark the entry as unused. */
83 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
84 listp->slotinfo[idx - disp].map = NULL;
85 }
fc093be1
UD
86
87 /* If this is not the last currently used entry no need to look
88 further. */
2430d57a 89 if (idx != GL(dl_tls_max_dtv_idx))
fc093be1 90 return true;
fc093be1
UD
91 }
92
06a04e09 93 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
fc093be1
UD
94 {
95 --idx;
96
97 if (listp->slotinfo[idx - disp].map != NULL)
98 {
99 /* Found a new last used index. */
100 GL(dl_tls_max_dtv_idx) = idx;
101 return true;
102 }
103 }
104
105 /* No non-entry in this list element. */
106 return false;
107}
fc093be1 108
79e0cd7b
FW
109/* Invoke dstructors for CLOSURE (a struct link_map *). Called with
110 exception handling temporarily disabled, to make errors fatal. */
111static void
112call_destructors (void *closure)
113{
114 struct link_map *map = closure;
115
116 if (map->l_info[DT_FINI_ARRAY] != NULL)
117 {
118 ElfW(Addr) *array =
119 (ElfW(Addr) *) (map->l_addr
120 + map->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
121 unsigned int sz = (map->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
122 / sizeof (ElfW(Addr)));
123
124 while (sz-- > 0)
125 ((fini_t) array[sz]) ();
126 }
127
128 /* Next try the old-style destructor. */
129 if (map->l_info[DT_FINI] != NULL)
130 DL_CALL_DT_FINI (map, ((void *) map->l_addr
131 + map->l_info[DT_FINI]->d_un.d_ptr));
132}
fc093be1 133
ba79d61b 134void
02d5e5d9 135_dl_close_worker (struct link_map *map, bool force)
ba79d61b 136{
c0f62c56 137 /* One less direct use. */
c0f62c56
UD
138 --map->l_direct_opencount;
139
bfc832cc
UD
140 /* If _dl_close is called recursively (some destructor call dlclose),
141 just record that the parent _dl_close will need to do garbage collection
142 again and return. */
143 static enum { not_pending, pending, rerun } dl_close_state;
144
145 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
146 || dl_close_state != not_pending)
26b4d766 147 {
0479b305
AS
148 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
149 dl_close_state = rerun;
bfc832cc 150
26b4d766 151 /* There are still references to this object. Do nothing more. */
a1ffb40e 152 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
20fe49b9
UD
153 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
154 map->l_name, map->l_direct_opencount);
a334319f 155
26b4d766
UD
156 return;
157 }
ba79d61b 158
2e81d449
UD
159 Lmid_t nsid = map->l_ns;
160 struct link_namespaces *ns = &GL(dl_ns)[nsid];
161
bfc832cc
UD
162 retry:
163 dl_close_state = pending;
164
bfc832cc 165 bool any_tls = false;
2e81d449 166 const unsigned int nloaded = ns->_ns_nloaded;
c3381f3e
UD
167 char used[nloaded];
168 char done[nloaded];
20fe49b9
UD
169 struct link_map *maps[nloaded];
170
bfc832cc 171 /* Run over the list and assign indexes to the link maps and enter
20fe49b9
UD
172 them into the MAPS array. */
173 int idx = 0;
2e81d449 174 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
20fe49b9
UD
175 {
176 l->l_idx = idx;
177 maps[idx] = l;
178 ++idx;
02d5e5d9 179
20fe49b9
UD
180 }
181 assert (idx == nloaded);
c4bb124a 182
20fe49b9
UD
183 /* Prepare the bitmaps. */
184 memset (used, '\0', sizeof (used));
185 memset (done, '\0', sizeof (done));
0ecb606c 186
20fe49b9
UD
187 /* Keep track of the lowest index link map we have covered already. */
188 int done_index = -1;
189 while (++done_index < nloaded)
0ecb606c 190 {
20fe49b9
UD
191 struct link_map *l = maps[done_index];
192
c3381f3e 193 if (done[done_index])
20fe49b9
UD
194 /* Already handled. */
195 continue;
196
197 /* Check whether this object is still used. */
198 if (l->l_type == lt_loaded
199 && l->l_direct_opencount == 0
f63b7381 200 && l->l_nodelete != link_map_nodelete_active
90b37cac
SP
201 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
202 acquire is sufficient and correct. */
203 && atomic_load_acquire (&l->l_tls_dtor_count) == 0
c3381f3e 204 && !used[done_index])
20fe49b9
UD
205 continue;
206
207 /* We need this object and we handle it now. */
c3381f3e
UD
208 done[done_index] = 1;
209 used[done_index] = 1;
210 /* Signal the object is still needed. */
1100f849 211 l->l_idx = IDX_STILL_USED;
20fe49b9
UD
212
213 /* Mark all dependencies as used. */
214 if (l->l_initfini != NULL)
215 {
36129722
CD
216 /* We are always the zeroth entry, and since we don't include
217 ourselves in the dependency analysis start at 1. */
20fe49b9
UD
218 struct link_map **lp = &l->l_initfini[1];
219 while (*lp != NULL)
220 {
1100f849 221 if ((*lp)->l_idx != IDX_STILL_USED)
556224ab 222 {
c3381f3e
UD
223 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
224
225 if (!used[(*lp)->l_idx])
226 {
227 used[(*lp)->l_idx] = 1;
36129722
CD
228 /* If we marked a new object as used, and we've
229 already processed it, then we need to go back
230 and process again from that point forward to
231 ensure we keep all of its dependencies also. */
c3381f3e
UD
232 if ((*lp)->l_idx - 1 < done_index)
233 done_index = (*lp)->l_idx - 1;
234 }
556224ab 235 }
556224ab 236
20fe49b9
UD
237 ++lp;
238 }
239 }
240 /* And the same for relocation dependencies. */
241 if (l->l_reldeps != NULL)
385b4cf4 242 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
20fe49b9 243 {
385b4cf4 244 struct link_map *jmap = l->l_reldeps->list[j];
20fe49b9 245
1100f849 246 if (jmap->l_idx != IDX_STILL_USED)
556224ab 247 {
c3381f3e
UD
248 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
249
250 if (!used[jmap->l_idx])
251 {
252 used[jmap->l_idx] = 1;
253 if (jmap->l_idx - 1 < done_index)
254 done_index = jmap->l_idx - 1;
255 }
556224ab
UD
256 }
257 }
20fe49b9 258 }
42c4f32a 259
c2c299fd
AS
260 /* Sort the entries. We can skip looking for the binary itself which is
261 at the front of the search list for the main namespace. */
262 _dl_sort_maps (maps + (nsid == LM_ID_BASE), nloaded - (nsid == LM_ID_BASE),
263 used + (nsid == LM_ID_BASE), true);
c3381f3e 264
a709dd43 265 /* Call all termination functions at once. */
29f97654 266#ifdef SHARED
2e81d449 267 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
29f97654 268#endif
20fe49b9 269 bool unload_any = false;
e4eb675d 270 bool scope_mem_left = false;
e8b6b64d 271 unsigned int unload_global = 0;
20fe49b9 272 unsigned int first_loaded = ~0;
ffd0e1b7 273 for (unsigned int i = 0; i < nloaded; ++i)
a709dd43 274 {
20fe49b9 275 struct link_map *imap = maps[i];
9dcafc55
UD
276
277 /* All elements must be in the same namespace. */
2e81d449 278 assert (imap->l_ns == nsid);
9dcafc55 279
c3381f3e 280 if (!used[i])
a709dd43 281 {
20fe49b9 282 assert (imap->l_type == lt_loaded
f63b7381 283 && imap->l_nodelete != link_map_nodelete_active);
20fe49b9 284
aff4519d 285 /* Call its termination function. Do not do it for
79e0cd7b
FW
286 half-cooked objects. Temporarily disable exception
287 handling, so that errors are fatal. */
aff4519d 288 if (imap->l_init_called)
dacc8ffa 289 {
ac53c9c6
UD
290 /* When debugging print a message first. */
291 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
292 0))
293 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
2e81d449 294 imap->l_name, nsid);
ac53c9c6 295
79e0cd7b
FW
296 if (imap->l_info[DT_FINI_ARRAY] != NULL
297 || imap->l_info[DT_FINI] != NULL)
298 _dl_catch_exception (NULL, call_destructors, imap);
dacc8ffa
UD
299 }
300
9dcafc55 301#ifdef SHARED
a3d731d3 302 /* Auditing checkpoint: we remove an object. */
a1ffb40e 303 if (__glibc_unlikely (do_audit))
9dcafc55
UD
304 {
305 struct audit_ifaces *afct = GLRO(dl_audit);
306 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
307 {
308 if (afct->objclose != NULL)
e1d559f3
FW
309 {
310 struct auditstate *state
311 = link_map_audit_state (imap, cnt);
312 /* Return value is ignored. */
313 (void) afct->objclose (&state->cookie);
314 }
9dcafc55
UD
315
316 afct = afct->next;
317 }
318 }
319#endif
320
20fe49b9
UD
321 /* This object must not be used anymore. */
322 imap->l_removed = 1;
aff4519d 323
20fe49b9
UD
324 /* We indeed have an object to remove. */
325 unload_any = true;
aff4519d 326
e8b6b64d
UD
327 if (imap->l_global)
328 ++unload_global;
329
20fe49b9
UD
330 /* Remember where the first dynamically loaded object is. */
331 if (i < first_loaded)
332 first_loaded = i;
a709dd43 333 }
c3381f3e 334 /* Else used[i]. */
20fe49b9
UD
335 else if (imap->l_type == lt_loaded)
336 {
1100f849
UD
337 struct r_scope_elem *new_list = NULL;
338
339 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
20fe49b9 340 {
bfc832cc 341 /* The object is still used. But one of the objects we are
20fe49b9
UD
342 unloading right now is responsible for loading it. If
343 the current object does not have it's own scope yet we
344 have to create one. This has to be done before running
345 the finalizers.
346
347 To do this count the number of dependencies. */
348 unsigned int cnt;
349 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
350 ;
351
352 /* We simply reuse the l_initfini list. */
353 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
354 imap->l_searchlist.r_nlist = cnt;
355
1100f849 356 new_list = &imap->l_searchlist;
20fe49b9 357 }
1100f849
UD
358
359 /* Count the number of scopes which remain after the unload.
360 When we add the local search list count it. Always add
361 one for the terminating NULL pointer. */
362 size_t remain = (new_list != NULL) + 1;
363 bool removed_any = false;
c0a777e8 364 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
1100f849
UD
365 /* This relies on l_scope[] entries being always set either
366 to its own l_symbolic_searchlist address, or some map's
367 l_searchlist address. */
c0a777e8 368 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
1100f849
UD
369 {
370 struct link_map *tmap = (struct link_map *)
c0a777e8 371 ((char *) imap->l_scope[cnt]
1100f849 372 - offsetof (struct link_map, l_searchlist));
2e81d449 373 assert (tmap->l_ns == nsid);
1100f849
UD
374 if (tmap->l_idx == IDX_STILL_USED)
375 ++remain;
376 else
377 removed_any = true;
378 }
379 else
380 ++remain;
381
382 if (removed_any)
1ee2ff20 383 {
1100f849
UD
384 /* Always allocate a new array for the scope. This is
385 necessary since we must be able to determine the last
386 user of the current array. If possible use the link map's
387 memory. */
388 size_t new_size;
c0a777e8
UD
389 struct r_scope_elem **newp;
390
391#define SCOPE_ELEMS(imap) \
392 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
393
394 if (imap->l_scope != imap->l_scope_mem
395 && remain < SCOPE_ELEMS (imap))
1100f849 396 {
c0a777e8
UD
397 new_size = SCOPE_ELEMS (imap);
398 newp = imap->l_scope_mem;
1100f849
UD
399 }
400 else
401 {
402 new_size = imap->l_scope_max;
c0a777e8
UD
403 newp = (struct r_scope_elem **)
404 malloc (new_size * sizeof (struct r_scope_elem *));
1100f849
UD
405 if (newp == NULL)
406 _dl_signal_error (ENOMEM, "dlclose", NULL,
407 N_("cannot create scope list"));
408 }
409
1100f849
UD
410 /* Copy over the remaining scope elements. */
411 remain = 0;
c0a777e8 412 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
1ee2ff20 413 {
c0a777e8 414 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
1ee2ff20 415 {
1100f849 416 struct link_map *tmap = (struct link_map *)
c0a777e8 417 ((char *) imap->l_scope[cnt]
1100f849
UD
418 - offsetof (struct link_map, l_searchlist));
419 if (tmap->l_idx != IDX_STILL_USED)
420 {
421 /* Remove the scope. Or replace with own map's
422 scope. */
423 if (new_list != NULL)
424 {
c0a777e8 425 newp[remain++] = new_list;
1100f849
UD
426 new_list = NULL;
427 }
428 continue;
429 }
1ee2ff20 430 }
1100f849 431
c0a777e8 432 newp[remain++] = imap->l_scope[cnt];
1ee2ff20 433 }
c0a777e8 434 newp[remain] = NULL;
1100f849 435
c0a777e8 436 struct r_scope_elem **old = imap->l_scope;
1100f849 437
e4eb675d 438 imap->l_scope = newp;
1100f849
UD
439
440 /* No user anymore, we can free it now. */
c0a777e8 441 if (old != imap->l_scope_mem)
e4eb675d
UD
442 {
443 if (_dl_scope_free (old))
444 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
445 no need to repeat it. */
446 scope_mem_left = false;
447 }
448 else
449 scope_mem_left = true;
1100f849
UD
450
451 imap->l_scope_max = new_size;
1ee2ff20 452 }
39dd69df
AS
453 else if (new_list != NULL)
454 {
455 /* We didn't change the scope array, so reset the search
456 list. */
457 imap->l_searchlist.r_list = NULL;
458 imap->l_searchlist.r_nlist = 0;
459 }
42c4f32a 460
c3381f3e 461 /* The loader is gone, so mark the object as not having one.
1100f849
UD
462 Note: l_idx != IDX_STILL_USED -> object will be removed. */
463 if (imap->l_loader != NULL
464 && imap->l_loader->l_idx != IDX_STILL_USED)
20fe49b9 465 imap->l_loader = NULL;
aff4519d 466
20fe49b9
UD
467 /* Remember where the first dynamically loaded object is. */
468 if (i < first_loaded)
469 first_loaded = i;
470 }
a709dd43
UD
471 }
472
20fe49b9
UD
473 /* If there are no objects to unload, do nothing further. */
474 if (!unload_any)
475 goto out;
476
9dcafc55
UD
477#ifdef SHARED
478 /* Auditing checkpoint: we will start deleting objects. */
a1ffb40e 479 if (__glibc_unlikely (do_audit))
9dcafc55 480 {
2e81d449 481 struct link_map *head = ns->_ns_loaded;
9dcafc55
UD
482 struct audit_ifaces *afct = GLRO(dl_audit);
483 /* Do not call the functions for any auditing object. */
484 if (head->l_auditing == 0)
485 {
486 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
487 {
488 if (afct->activity != NULL)
e1d559f3
FW
489 {
490 struct auditstate *state = link_map_audit_state (head, cnt);
491 afct->activity (&state->cookie, LA_ACT_DELETE);
492 }
9dcafc55
UD
493
494 afct = afct->next;
495 }
496 }
497 }
498#endif
499
4d6acc61 500 /* Notify the debugger we are about to remove some loaded objects. */
2e81d449 501 struct r_debug *r = _dl_debug_initialize (0, nsid);
9dcafc55
UD
502 r->r_state = RT_DELETE;
503 _dl_debug_state ();
815e6fa3 504 LIBC_PROBE (unmap_start, 2, nsid, r);
4d6acc61 505
e8b6b64d
UD
506 if (unload_global)
507 {
508 /* Some objects are in the global scope list. Remove them. */
509 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
510 unsigned int i;
511 unsigned int j = 0;
512 unsigned int cnt = ns_msl->r_nlist;
513
514 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
515 --cnt;
516
517 if (cnt + unload_global == ns_msl->r_nlist)
518 /* Speed up removing most recently added objects. */
519 j = cnt;
520 else
b7c08a66 521 for (i = 0; i < cnt; i++)
e8b6b64d
UD
522 if (ns_msl->r_list[i]->l_removed == 0)
523 {
524 if (i != j)
525 ns_msl->r_list[j] = ns_msl->r_list[i];
526 j++;
527 }
528 ns_msl->r_nlist = j;
e4eb675d 529 }
e8b6b64d 530
e4eb675d
UD
531 if (!RTLD_SINGLE_THREAD_P
532 && (unload_global
533 || scope_mem_left
534 || (GL(dl_scope_free_list) != NULL
535 && GL(dl_scope_free_list)->count)))
536 {
537 THREAD_GSCOPE_WAIT ();
538
539 /* Now we can free any queued old scopes. */
385b4cf4 540 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
e4eb675d
UD
541 if (fsl != NULL)
542 while (fsl->count > 0)
543 free (fsl->list[--fsl->count]);
e8b6b64d
UD
544 }
545
541765b6
UD
546 size_t tls_free_start;
547 size_t tls_free_end;
548 tls_free_start = tls_free_end = NO_TLS_OFFSET;
c877418f 549
5a2a1d75
AS
550 /* We modify the list of loaded objects. */
551 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
552
ba79d61b
RM
553 /* Check each element of the search list to see if all references to
554 it are gone. */
ffd0e1b7 555 for (unsigned int i = first_loaded; i < nloaded; ++i)
ba79d61b 556 {
20fe49b9 557 struct link_map *imap = maps[i];
c3381f3e 558 if (!used[i])
ba79d61b 559 {
20fe49b9 560 assert (imap->l_type == lt_loaded);
a8a1269d 561
ba79d61b
RM
562 /* That was the last reference, and this was a dlopen-loaded
563 object. We can unmap it. */
ba79d61b 564
1f0c4a10 565 /* Remove the object from the dtv slotinfo array if it uses TLS. */
a1ffb40e 566 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
a04586d8 567 {
a04586d8 568 any_tls = true;
bb4cb252 569
9dcafc55
UD
570 if (GL(dl_tls_dtv_slotinfo_list) != NULL
571 && ! remove_slotinfo (imap->l_tls_modid,
572 GL(dl_tls_dtv_slotinfo_list), 0,
573 imap->l_init_called))
fc093be1
UD
574 /* All dynamically loaded modules with TLS are unloaded. */
575 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
c877418f 576
4c533566
UD
577 if (imap->l_tls_offset != NO_TLS_OFFSET
578 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
c877418f
RM
579 {
580 /* Collect a contiguous chunk built from the objects in
581 this search list, going in either direction. When the
582 whole chunk is at the end of the used area then we can
583 reclaim it. */
11bf311e 584#if TLS_TCB_AT_TP
541765b6
UD
585 if (tls_free_start == NO_TLS_OFFSET
586 || (size_t) imap->l_tls_offset == tls_free_start)
587 {
588 /* Extend the contiguous chunk being reclaimed. */
589 tls_free_start
590 = imap->l_tls_offset - imap->l_tls_blocksize;
591
592 if (tls_free_end == NO_TLS_OFFSET)
593 tls_free_end = imap->l_tls_offset;
594 }
595 else if (imap->l_tls_offset - imap->l_tls_blocksize
596 == tls_free_end)
597 /* Extend the chunk backwards. */
598 tls_free_end = imap->l_tls_offset;
599 else
600 {
601 /* This isn't contiguous with the last chunk freed.
602 One of them will be leaked unless we can free
603 one block right away. */
604 if (tls_free_end == GL(dl_tls_static_used))
605 {
606 GL(dl_tls_static_used) = tls_free_start;
607 tls_free_end = imap->l_tls_offset;
608 tls_free_start
609 = tls_free_end - imap->l_tls_blocksize;
610 }
611 else if ((size_t) imap->l_tls_offset
612 == GL(dl_tls_static_used))
613 GL(dl_tls_static_used)
614 = imap->l_tls_offset - imap->l_tls_blocksize;
615 else if (tls_free_end < (size_t) imap->l_tls_offset)
616 {
617 /* We pick the later block. It has a chance to
618 be freed. */
619 tls_free_end = imap->l_tls_offset;
620 tls_free_start
621 = tls_free_end - imap->l_tls_blocksize;
622 }
623 }
11bf311e 624#elif TLS_DTV_AT_TP
66bdbaa4
AM
625 if (tls_free_start == NO_TLS_OFFSET)
626 {
627 tls_free_start = imap->l_tls_firstbyte_offset;
628 tls_free_end = (imap->l_tls_offset
629 + imap->l_tls_blocksize);
630 }
631 else if (imap->l_tls_firstbyte_offset == tls_free_end)
c877418f 632 /* Extend the contiguous chunk being reclaimed. */
66bdbaa4 633 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
c877418f
RM
634 else if (imap->l_tls_offset + imap->l_tls_blocksize
635 == tls_free_start)
636 /* Extend the chunk backwards. */
66bdbaa4
AM
637 tls_free_start = imap->l_tls_firstbyte_offset;
638 /* This isn't contiguous with the last chunk freed.
639 One of them will be leaked unless we can free
640 one block right away. */
641 else if (imap->l_tls_offset + imap->l_tls_blocksize
642 == GL(dl_tls_static_used))
643 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
644 else if (tls_free_end == GL(dl_tls_static_used))
c877418f 645 {
66bdbaa4
AM
646 GL(dl_tls_static_used) = tls_free_start;
647 tls_free_start = imap->l_tls_firstbyte_offset;
648 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
649 }
650 else if (tls_free_end < imap->l_tls_firstbyte_offset)
651 {
652 /* We pick the later block. It has a chance to
653 be freed. */
654 tls_free_start = imap->l_tls_firstbyte_offset;
655 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
c877418f 656 }
11bf311e
UD
657#else
658# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
659#endif
c877418f 660 }
a04586d8 661 }
a04586d8 662
02d5e5d9
PK
663 /* Reset unique symbols if forced. */
664 if (force)
665 {
666 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
667 __rtld_lock_lock_recursive (tab->lock);
668 struct unique_sym *entries = tab->entries;
669 if (entries != NULL)
670 {
671 size_t idx, size = tab->size;
672 for (idx = 0; idx < size; ++idx)
673 {
674 /* Clear unique symbol entries that belong to this
675 object. */
676 if (entries[idx].name != NULL
677 && entries[idx].map == imap)
678 {
679 entries[idx].name = NULL;
680 entries[idx].hashval = 0;
681 tab->n_elements--;
682 }
683 }
684 }
685 __rtld_lock_unlock_recursive (tab->lock);
686 }
687
a8a1269d 688 /* We can unmap all the maps at once. We determined the
4ce636da
UD
689 start address and length when we loaded the object and
690 the `munmap' call does the rest. */
09bf6406 691 DL_UNMAP (imap);
22bc7978 692
ba79d61b 693 /* Finally, unlink the data structure and free it. */
2bd2cad9
RM
694#if DL_NNS == 1
695 /* The assert in the (imap->l_prev == NULL) case gives
696 the compiler license to warn that NS points outside
697 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
698 is tantamount to nsid >= DL_NNS). That should be impossible
699 in this configuration, so just assert about it instead. */
700 assert (nsid == LM_ID_BASE);
701 assert (imap->l_prev != NULL);
702#else
703 if (imap->l_prev == NULL)
c0f62c56 704 {
2e81d449 705 assert (nsid != LM_ID_BASE);
b7c08a66
RM
706 ns->_ns_loaded = imap->l_next;
707
708 /* Update the pointer to the head of the list
709 we leave for debuggers to examine. */
710 r->r_map = (void *) ns->_ns_loaded;
c0f62c56 711 }
2bd2cad9
RM
712 else
713#endif
714 imap->l_prev->l_next = imap->l_next;
c0f62c56 715
2e81d449 716 --ns->_ns_nloaded;
c0f62c56 717 if (imap->l_next != NULL)
af69217f 718 imap->l_next->l_prev = imap->l_prev;
a8a1269d 719
556224ab
UD
720 free (imap->l_versions);
721 if (imap->l_origin != (char *) -1)
a8a1269d
UD
722 free ((char *) imap->l_origin);
723
20fe49b9 724 free (imap->l_reldeps);
4b4fcf99 725
ac53c9c6 726 /* Print debugging message. */
a1ffb40e 727 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
ac53c9c6
UD
728 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
729 imap->l_name, imap->l_ns);
730
4ce636da 731 /* This name always is allocated. */
a8a1269d 732 free (imap->l_name);
4ce636da 733 /* Remove the list with all the names of the shared object. */
20fe49b9
UD
734
735 struct libname_list *lnp = imap->l_libname;
a8a1269d
UD
736 do
737 {
76156ea1 738 struct libname_list *this = lnp;
a8a1269d 739 lnp = lnp->next;
11810621
UD
740 if (!this->dont_free)
741 free (this);
a8a1269d
UD
742 }
743 while (lnp != NULL);
a8a1269d 744
4ce636da 745 /* Remove the searchlists. */
20fe49b9 746 free (imap->l_initfini);
4ce636da 747
5a21d307 748 /* Remove the scope array if we allocated it. */
c0a777e8
UD
749 if (imap->l_scope != imap->l_scope_mem)
750 free (imap->l_scope);
5a21d307 751
7bcaca43 752 if (imap->l_phdr_allocated)
15925412 753 free ((void *) imap->l_phdr);
7bcaca43 754
f55727ca
UD
755 if (imap->l_rpath_dirs.dirs != (void *) -1)
756 free (imap->l_rpath_dirs.dirs);
757 if (imap->l_runpath_dirs.dirs != (void *) -1)
758 free (imap->l_runpath_dirs.dirs);
759
af69217f 760 free (imap);
ba79d61b
RM
761 }
762 }
763
5a2a1d75
AS
764 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
765
c877418f 766 /* If we removed any object which uses TLS bump the generation counter. */
bb4cb252 767 if (any_tls)
c877418f 768 {
a1ffb40e 769 if (__glibc_unlikely (++GL(dl_tls_generation) == 0))
8b748aed 770 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
c877418f
RM
771
772 if (tls_free_end == GL(dl_tls_static_used))
773 GL(dl_tls_static_used) = tls_free_start;
774 }
a04586d8 775
9dcafc55
UD
776#ifdef SHARED
777 /* Auditing checkpoint: we have deleted all objects. */
a1ffb40e 778 if (__glibc_unlikely (do_audit))
9dcafc55 779 {
2e81d449 780 struct link_map *head = ns->_ns_loaded;
9dcafc55
UD
781 /* Do not call the functions for any auditing object. */
782 if (head->l_auditing == 0)
783 {
784 struct audit_ifaces *afct = GLRO(dl_audit);
785 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
786 {
787 if (afct->activity != NULL)
e1d559f3
FW
788 {
789 struct auditstate *state = link_map_audit_state (head, cnt);
790 afct->activity (&state->cookie, LA_ACT_CONSISTENT);
791 }
9dcafc55
UD
792
793 afct = afct->next;
794 }
795 }
796 }
797#endif
798
22c83193
UD
799 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
800 && nsid == GL(dl_nns) - 1)
801 do
0d23a5c1 802 --GL(dl_nns);
22c83193
UD
803 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
804
e3e5f672 805 /* Notify the debugger those objects are finalized and gone. */
9dcafc55
UD
806 r->r_state = RT_CONSISTENT;
807 _dl_debug_state ();
815e6fa3 808 LIBC_PROBE (unmap_complete, 2, nsid, r);
4b4fcf99 809
bfc832cc 810 /* Recheck if we need to retry, release the lock. */
20fe49b9 811 out:
bfc832cc
UD
812 if (dl_close_state == rerun)
813 goto retry;
814
815 dl_close_state = not_pending;
131c4428
UD
816}
817
818
819void
820_dl_close (void *_map)
821{
822 struct link_map *map = _map;
823
57707b7f
CD
824 /* We must take the lock to examine the contents of map and avoid
825 concurrent dlopens. */
826 __rtld_lock_lock_recursive (GL(dl_load_lock));
827
828 /* At this point we are guaranteed nobody else is touching the list of
829 loaded maps, but a concurrent dlclose might have freed our map
830 before we took the lock. There is no way to detect this (see below)
831 so we proceed assuming this isn't the case. First see whether we
832 can remove the object at all. */
f63b7381 833 if (__glibc_unlikely (map->l_nodelete == link_map_nodelete_active))
131c4428 834 {
131c4428 835 /* Nope. Do nothing. */
57707b7f 836 __rtld_lock_unlock_recursive (GL(dl_load_lock));
131c4428
UD
837 return;
838 }
839
57707b7f
CD
840 /* At present this is an unreliable check except in the case where the
841 caller has recursively called dlclose and we are sure the link map
842 has not been freed. In a non-recursive dlclose the map itself
843 might have been freed and this access is potentially a data race
844 with whatever other use this memory might have now, or worse we
845 might silently corrupt memory if it looks enough like a link map.
846 POSIX has language in dlclose that appears to guarantee that this
847 should be a detectable case and given that dlclose should be threadsafe
848 we need this to be a reliable detection.
849 This is bug 20990. */
131c4428 850 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
57707b7f
CD
851 {
852 __rtld_lock_unlock_recursive (GL(dl_load_lock));
853 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
854 }
131c4428 855
02d5e5d9 856 _dl_close_worker (map, false);
131c4428 857
d3c9f895 858 __rtld_lock_unlock_recursive (GL(dl_load_lock));
e3e5f672 859}