]> git.ipfire.org Git - thirdparty/glibc.git/blame - elf/dl-close.c
S390: Fix "backtrace() returns infinitely deep stack frames with makecontext()" ...
[thirdparty/glibc.git] / elf / dl-close.c
CommitLineData
26b4d766 1/* Close a shared object opened by `_dl_open'.
b168057a 2 Copyright (C) 1996-2015 Free Software Foundation, Inc.
afd4eb37
UD
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
41bdb6e2
AJ
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
afd4eb37
UD
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
41bdb6e2 13 Lesser General Public License for more details.
afd4eb37 14
41bdb6e2 15 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
ba79d61b 18
7afab53d 19#include <assert.h>
ba79d61b 20#include <dlfcn.h>
1100f849 21#include <errno.h>
8e17ea58 22#include <libintl.h>
bfc832cc 23#include <stddef.h>
b209e34a 24#include <stdio.h>
ba79d61b 25#include <stdlib.h>
8d6468d0 26#include <string.h>
9dcafc55 27#include <unistd.h>
a853022c 28#include <bits/libc-lock.h>
b8445829 29#include <ldsodefs.h>
ba79d61b
RM
30#include <sys/types.h>
31#include <sys/mman.h>
609cf614 32#include <sysdep-cancel.h>
df94b641 33#include <tls.h>
815e6fa3 34#include <stap-probe.h>
ba79d61b 35
fcccd512
RM
36#include <dl-unmap-segments.h>
37
ba79d61b 38
dacc8ffa
UD
39/* Type of the constructor functions. */
40typedef void (*fini_t) (void);
41
42
1100f849
UD
43/* Special l_idx value used to indicate which objects remain loaded. */
44#define IDX_STILL_USED -1
45
46
fc093be1
UD
47/* Returns true we an non-empty was found. */
48static bool
1f0c4a10
RM
49remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
50 bool should_be_there)
fc093be1
UD
51{
52 if (idx - disp >= listp->len)
53 {
1f0c4a10
RM
54 if (listp->next == NULL)
55 {
56 /* The index is not actually valid in the slotinfo list,
8265947d
RM
57 because this object was closed before it was fully set
58 up due to some error. */
1f0c4a10
RM
59 assert (! should_be_there);
60 }
61 else
62 {
63 if (remove_slotinfo (idx, listp->next, disp + listp->len,
64 should_be_there))
65 return true;
fc093be1 66
1f0c4a10
RM
67 /* No non-empty entry. Search from the end of this element's
68 slotinfo array. */
69 idx = disp + listp->len;
70 }
fc093be1
UD
71 }
72 else
73 {
74 struct link_map *old_map = listp->slotinfo[idx - disp].map;
fc093be1 75
2430d57a
RM
76 /* The entry might still be in its unused state if we are closing an
77 object that wasn't fully set up. */
a1ffb40e 78 if (__glibc_likely (old_map != NULL))
2430d57a
RM
79 {
80 assert (old_map->l_tls_modid == idx);
81
82 /* Mark the entry as unused. */
83 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
84 listp->slotinfo[idx - disp].map = NULL;
85 }
fc093be1
UD
86
87 /* If this is not the last currently used entry no need to look
88 further. */
2430d57a 89 if (idx != GL(dl_tls_max_dtv_idx))
fc093be1 90 return true;
fc093be1
UD
91 }
92
06a04e09 93 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
fc093be1
UD
94 {
95 --idx;
96
97 if (listp->slotinfo[idx - disp].map != NULL)
98 {
99 /* Found a new last used index. */
100 GL(dl_tls_max_dtv_idx) = idx;
101 return true;
102 }
103 }
104
105 /* No non-entry in this list element. */
106 return false;
107}
fc093be1
UD
108
109
ba79d61b 110void
131c4428 111_dl_close_worker (struct link_map *map)
ba79d61b 112{
c0f62c56 113 /* One less direct use. */
c0f62c56
UD
114 --map->l_direct_opencount;
115
bfc832cc
UD
116 /* If _dl_close is called recursively (some destructor call dlclose),
117 just record that the parent _dl_close will need to do garbage collection
118 again and return. */
119 static enum { not_pending, pending, rerun } dl_close_state;
120
121 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
122 || dl_close_state != not_pending)
26b4d766 123 {
0479b305
AS
124 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
125 dl_close_state = rerun;
bfc832cc 126
26b4d766 127 /* There are still references to this object. Do nothing more. */
a1ffb40e 128 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
20fe49b9
UD
129 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
130 map->l_name, map->l_direct_opencount);
a334319f 131
26b4d766
UD
132 return;
133 }
ba79d61b 134
2e81d449
UD
135 Lmid_t nsid = map->l_ns;
136 struct link_namespaces *ns = &GL(dl_ns)[nsid];
137
bfc832cc
UD
138 retry:
139 dl_close_state = pending;
140
bfc832cc 141 bool any_tls = false;
2e81d449 142 const unsigned int nloaded = ns->_ns_nloaded;
c3381f3e
UD
143 char used[nloaded];
144 char done[nloaded];
20fe49b9
UD
145 struct link_map *maps[nloaded];
146
bfc832cc 147 /* Run over the list and assign indexes to the link maps and enter
20fe49b9
UD
148 them into the MAPS array. */
149 int idx = 0;
2e81d449 150 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
20fe49b9
UD
151 {
152 l->l_idx = idx;
153 maps[idx] = l;
154 ++idx;
155 }
156 assert (idx == nloaded);
c4bb124a 157
20fe49b9
UD
158 /* Prepare the bitmaps. */
159 memset (used, '\0', sizeof (used));
160 memset (done, '\0', sizeof (done));
0ecb606c 161
20fe49b9
UD
162 /* Keep track of the lowest index link map we have covered already. */
163 int done_index = -1;
164 while (++done_index < nloaded)
0ecb606c 165 {
20fe49b9
UD
166 struct link_map *l = maps[done_index];
167
c3381f3e 168 if (done[done_index])
20fe49b9
UD
169 /* Already handled. */
170 continue;
171
172 /* Check whether this object is still used. */
173 if (l->l_type == lt_loaded
174 && l->l_direct_opencount == 0
175 && (l->l_flags_1 & DF_1_NODELETE) == 0
c3381f3e 176 && !used[done_index])
20fe49b9
UD
177 continue;
178
179 /* We need this object and we handle it now. */
c3381f3e
UD
180 done[done_index] = 1;
181 used[done_index] = 1;
182 /* Signal the object is still needed. */
1100f849 183 l->l_idx = IDX_STILL_USED;
20fe49b9
UD
184
185 /* Mark all dependencies as used. */
186 if (l->l_initfini != NULL)
187 {
36129722
CD
188 /* We are always the zeroth entry, and since we don't include
189 ourselves in the dependency analysis start at 1. */
20fe49b9
UD
190 struct link_map **lp = &l->l_initfini[1];
191 while (*lp != NULL)
192 {
1100f849 193 if ((*lp)->l_idx != IDX_STILL_USED)
556224ab 194 {
c3381f3e
UD
195 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
196
197 if (!used[(*lp)->l_idx])
198 {
199 used[(*lp)->l_idx] = 1;
36129722
CD
200 /* If we marked a new object as used, and we've
201 already processed it, then we need to go back
202 and process again from that point forward to
203 ensure we keep all of its dependencies also. */
c3381f3e
UD
204 if ((*lp)->l_idx - 1 < done_index)
205 done_index = (*lp)->l_idx - 1;
206 }
556224ab 207 }
556224ab 208
20fe49b9
UD
209 ++lp;
210 }
211 }
212 /* And the same for relocation dependencies. */
213 if (l->l_reldeps != NULL)
385b4cf4 214 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
20fe49b9 215 {
385b4cf4 216 struct link_map *jmap = l->l_reldeps->list[j];
20fe49b9 217
1100f849 218 if (jmap->l_idx != IDX_STILL_USED)
556224ab 219 {
c3381f3e
UD
220 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
221
222 if (!used[jmap->l_idx])
223 {
224 used[jmap->l_idx] = 1;
225 if (jmap->l_idx - 1 < done_index)
226 done_index = jmap->l_idx - 1;
227 }
556224ab
UD
228 }
229 }
20fe49b9 230 }
42c4f32a 231
c3381f3e 232 /* Sort the entries. */
c8835729 233 _dl_sort_fini (maps, nloaded, used, nsid);
c3381f3e 234
a709dd43 235 /* Call all termination functions at once. */
29f97654 236#ifdef SHARED
2e81d449 237 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
29f97654 238#endif
20fe49b9 239 bool unload_any = false;
e4eb675d 240 bool scope_mem_left = false;
e8b6b64d 241 unsigned int unload_global = 0;
20fe49b9 242 unsigned int first_loaded = ~0;
ffd0e1b7 243 for (unsigned int i = 0; i < nloaded; ++i)
a709dd43 244 {
20fe49b9 245 struct link_map *imap = maps[i];
9dcafc55
UD
246
247 /* All elements must be in the same namespace. */
2e81d449 248 assert (imap->l_ns == nsid);
9dcafc55 249
c3381f3e 250 if (!used[i])
a709dd43 251 {
20fe49b9
UD
252 assert (imap->l_type == lt_loaded
253 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
254
aff4519d
UD
255 /* Call its termination function. Do not do it for
256 half-cooked objects. */
257 if (imap->l_init_called)
dacc8ffa 258 {
ac53c9c6
UD
259 /* When debugging print a message first. */
260 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
261 0))
262 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
2e81d449 263 imap->l_name, nsid);
ac53c9c6 264
aff4519d
UD
265 if (imap->l_info[DT_FINI_ARRAY] != NULL)
266 {
267 ElfW(Addr) *array =
268 (ElfW(Addr) *) (imap->l_addr
269 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
270 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
271 / sizeof (ElfW(Addr)));
aff4519d 272
62f29da7
UD
273 while (sz-- > 0)
274 ((fini_t) array[sz]) ();
aff4519d
UD
275 }
276
277 /* Next try the old-style destructor. */
278 if (imap->l_info[DT_FINI] != NULL)
daf75146
GM
279 DL_CALL_DT_FINI (imap, ((void *) imap->l_addr
280 + imap->l_info[DT_FINI]->d_un.d_ptr));
dacc8ffa
UD
281 }
282
9dcafc55 283#ifdef SHARED
a3d731d3 284 /* Auditing checkpoint: we remove an object. */
a1ffb40e 285 if (__glibc_unlikely (do_audit))
9dcafc55
UD
286 {
287 struct audit_ifaces *afct = GLRO(dl_audit);
288 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
289 {
290 if (afct->objclose != NULL)
291 /* Return value is ignored. */
292 (void) afct->objclose (&imap->l_audit[cnt].cookie);
293
294 afct = afct->next;
295 }
296 }
297#endif
298
20fe49b9
UD
299 /* This object must not be used anymore. */
300 imap->l_removed = 1;
aff4519d 301
20fe49b9
UD
302 /* We indeed have an object to remove. */
303 unload_any = true;
aff4519d 304
e8b6b64d
UD
305 if (imap->l_global)
306 ++unload_global;
307
20fe49b9
UD
308 /* Remember where the first dynamically loaded object is. */
309 if (i < first_loaded)
310 first_loaded = i;
a709dd43 311 }
c3381f3e 312 /* Else used[i]. */
20fe49b9
UD
313 else if (imap->l_type == lt_loaded)
314 {
1100f849
UD
315 struct r_scope_elem *new_list = NULL;
316
317 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
20fe49b9 318 {
bfc832cc 319 /* The object is still used. But one of the objects we are
20fe49b9
UD
320 unloading right now is responsible for loading it. If
321 the current object does not have it's own scope yet we
322 have to create one. This has to be done before running
323 the finalizers.
324
325 To do this count the number of dependencies. */
326 unsigned int cnt;
327 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
328 ;
329
330 /* We simply reuse the l_initfini list. */
331 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
332 imap->l_searchlist.r_nlist = cnt;
333
1100f849 334 new_list = &imap->l_searchlist;
20fe49b9 335 }
1100f849
UD
336
337 /* Count the number of scopes which remain after the unload.
338 When we add the local search list count it. Always add
339 one for the terminating NULL pointer. */
340 size_t remain = (new_list != NULL) + 1;
341 bool removed_any = false;
c0a777e8 342 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
1100f849
UD
343 /* This relies on l_scope[] entries being always set either
344 to its own l_symbolic_searchlist address, or some map's
345 l_searchlist address. */
c0a777e8 346 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
1100f849
UD
347 {
348 struct link_map *tmap = (struct link_map *)
c0a777e8 349 ((char *) imap->l_scope[cnt]
1100f849 350 - offsetof (struct link_map, l_searchlist));
2e81d449 351 assert (tmap->l_ns == nsid);
1100f849
UD
352 if (tmap->l_idx == IDX_STILL_USED)
353 ++remain;
354 else
355 removed_any = true;
356 }
357 else
358 ++remain;
359
360 if (removed_any)
1ee2ff20 361 {
1100f849
UD
362 /* Always allocate a new array for the scope. This is
363 necessary since we must be able to determine the last
364 user of the current array. If possible use the link map's
365 memory. */
366 size_t new_size;
c0a777e8
UD
367 struct r_scope_elem **newp;
368
369#define SCOPE_ELEMS(imap) \
370 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
371
372 if (imap->l_scope != imap->l_scope_mem
373 && remain < SCOPE_ELEMS (imap))
1100f849 374 {
c0a777e8
UD
375 new_size = SCOPE_ELEMS (imap);
376 newp = imap->l_scope_mem;
1100f849
UD
377 }
378 else
379 {
380 new_size = imap->l_scope_max;
c0a777e8
UD
381 newp = (struct r_scope_elem **)
382 malloc (new_size * sizeof (struct r_scope_elem *));
1100f849
UD
383 if (newp == NULL)
384 _dl_signal_error (ENOMEM, "dlclose", NULL,
385 N_("cannot create scope list"));
386 }
387
1100f849
UD
388 /* Copy over the remaining scope elements. */
389 remain = 0;
c0a777e8 390 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
1ee2ff20 391 {
c0a777e8 392 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
1ee2ff20 393 {
1100f849 394 struct link_map *tmap = (struct link_map *)
c0a777e8 395 ((char *) imap->l_scope[cnt]
1100f849
UD
396 - offsetof (struct link_map, l_searchlist));
397 if (tmap->l_idx != IDX_STILL_USED)
398 {
399 /* Remove the scope. Or replace with own map's
400 scope. */
401 if (new_list != NULL)
402 {
c0a777e8 403 newp[remain++] = new_list;
1100f849
UD
404 new_list = NULL;
405 }
406 continue;
407 }
1ee2ff20 408 }
1100f849 409
c0a777e8 410 newp[remain++] = imap->l_scope[cnt];
1ee2ff20 411 }
c0a777e8 412 newp[remain] = NULL;
1100f849 413
c0a777e8 414 struct r_scope_elem **old = imap->l_scope;
1100f849 415
e4eb675d 416 imap->l_scope = newp;
1100f849
UD
417
418 /* No user anymore, we can free it now. */
c0a777e8 419 if (old != imap->l_scope_mem)
e4eb675d
UD
420 {
421 if (_dl_scope_free (old))
422 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
423 no need to repeat it. */
424 scope_mem_left = false;
425 }
426 else
427 scope_mem_left = true;
1100f849
UD
428
429 imap->l_scope_max = new_size;
1ee2ff20 430 }
39dd69df
AS
431 else if (new_list != NULL)
432 {
433 /* We didn't change the scope array, so reset the search
434 list. */
435 imap->l_searchlist.r_list = NULL;
436 imap->l_searchlist.r_nlist = 0;
437 }
42c4f32a 438
c3381f3e 439 /* The loader is gone, so mark the object as not having one.
1100f849
UD
440 Note: l_idx != IDX_STILL_USED -> object will be removed. */
441 if (imap->l_loader != NULL
442 && imap->l_loader->l_idx != IDX_STILL_USED)
20fe49b9 443 imap->l_loader = NULL;
aff4519d 444
20fe49b9
UD
445 /* Remember where the first dynamically loaded object is. */
446 if (i < first_loaded)
447 first_loaded = i;
448 }
a709dd43
UD
449 }
450
20fe49b9
UD
451 /* If there are no objects to unload, do nothing further. */
452 if (!unload_any)
453 goto out;
454
9dcafc55
UD
455#ifdef SHARED
456 /* Auditing checkpoint: we will start deleting objects. */
a1ffb40e 457 if (__glibc_unlikely (do_audit))
9dcafc55 458 {
2e81d449 459 struct link_map *head = ns->_ns_loaded;
9dcafc55
UD
460 struct audit_ifaces *afct = GLRO(dl_audit);
461 /* Do not call the functions for any auditing object. */
462 if (head->l_auditing == 0)
463 {
464 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
465 {
466 if (afct->activity != NULL)
467 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
468
469 afct = afct->next;
470 }
471 }
472 }
473#endif
474
4d6acc61 475 /* Notify the debugger we are about to remove some loaded objects. */
2e81d449 476 struct r_debug *r = _dl_debug_initialize (0, nsid);
9dcafc55
UD
477 r->r_state = RT_DELETE;
478 _dl_debug_state ();
815e6fa3 479 LIBC_PROBE (unmap_start, 2, nsid, r);
4d6acc61 480
e8b6b64d
UD
481 if (unload_global)
482 {
483 /* Some objects are in the global scope list. Remove them. */
484 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
485 unsigned int i;
486 unsigned int j = 0;
487 unsigned int cnt = ns_msl->r_nlist;
488
489 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
490 --cnt;
491
492 if (cnt + unload_global == ns_msl->r_nlist)
493 /* Speed up removing most recently added objects. */
494 j = cnt;
495 else
b7c08a66 496 for (i = 0; i < cnt; i++)
e8b6b64d
UD
497 if (ns_msl->r_list[i]->l_removed == 0)
498 {
499 if (i != j)
500 ns_msl->r_list[j] = ns_msl->r_list[i];
501 j++;
502 }
503 ns_msl->r_nlist = j;
e4eb675d 504 }
e8b6b64d 505
e4eb675d
UD
506 if (!RTLD_SINGLE_THREAD_P
507 && (unload_global
508 || scope_mem_left
509 || (GL(dl_scope_free_list) != NULL
510 && GL(dl_scope_free_list)->count)))
511 {
512 THREAD_GSCOPE_WAIT ();
513
514 /* Now we can free any queued old scopes. */
385b4cf4 515 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
e4eb675d
UD
516 if (fsl != NULL)
517 while (fsl->count > 0)
518 free (fsl->list[--fsl->count]);
e8b6b64d
UD
519 }
520
541765b6
UD
521 size_t tls_free_start;
522 size_t tls_free_end;
523 tls_free_start = tls_free_end = NO_TLS_OFFSET;
c877418f 524
5a2a1d75
AS
525 /* We modify the list of loaded objects. */
526 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
527
ba79d61b
RM
528 /* Check each element of the search list to see if all references to
529 it are gone. */
ffd0e1b7 530 for (unsigned int i = first_loaded; i < nloaded; ++i)
ba79d61b 531 {
20fe49b9 532 struct link_map *imap = maps[i];
c3381f3e 533 if (!used[i])
ba79d61b 534 {
20fe49b9 535 assert (imap->l_type == lt_loaded);
a8a1269d 536
ba79d61b
RM
537 /* That was the last reference, and this was a dlopen-loaded
538 object. We can unmap it. */
ba79d61b 539
1f0c4a10 540 /* Remove the object from the dtv slotinfo array if it uses TLS. */
a1ffb40e 541 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
a04586d8 542 {
a04586d8 543 any_tls = true;
bb4cb252 544
9dcafc55
UD
545 if (GL(dl_tls_dtv_slotinfo_list) != NULL
546 && ! remove_slotinfo (imap->l_tls_modid,
547 GL(dl_tls_dtv_slotinfo_list), 0,
548 imap->l_init_called))
fc093be1
UD
549 /* All dynamically loaded modules with TLS are unloaded. */
550 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
c877418f 551
4c533566
UD
552 if (imap->l_tls_offset != NO_TLS_OFFSET
553 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
c877418f
RM
554 {
555 /* Collect a contiguous chunk built from the objects in
556 this search list, going in either direction. When the
557 whole chunk is at the end of the used area then we can
558 reclaim it. */
11bf311e 559#if TLS_TCB_AT_TP
541765b6
UD
560 if (tls_free_start == NO_TLS_OFFSET
561 || (size_t) imap->l_tls_offset == tls_free_start)
562 {
563 /* Extend the contiguous chunk being reclaimed. */
564 tls_free_start
565 = imap->l_tls_offset - imap->l_tls_blocksize;
566
567 if (tls_free_end == NO_TLS_OFFSET)
568 tls_free_end = imap->l_tls_offset;
569 }
570 else if (imap->l_tls_offset - imap->l_tls_blocksize
571 == tls_free_end)
572 /* Extend the chunk backwards. */
573 tls_free_end = imap->l_tls_offset;
574 else
575 {
576 /* This isn't contiguous with the last chunk freed.
577 One of them will be leaked unless we can free
578 one block right away. */
579 if (tls_free_end == GL(dl_tls_static_used))
580 {
581 GL(dl_tls_static_used) = tls_free_start;
582 tls_free_end = imap->l_tls_offset;
583 tls_free_start
584 = tls_free_end - imap->l_tls_blocksize;
585 }
586 else if ((size_t) imap->l_tls_offset
587 == GL(dl_tls_static_used))
588 GL(dl_tls_static_used)
589 = imap->l_tls_offset - imap->l_tls_blocksize;
590 else if (tls_free_end < (size_t) imap->l_tls_offset)
591 {
592 /* We pick the later block. It has a chance to
593 be freed. */
594 tls_free_end = imap->l_tls_offset;
595 tls_free_start
596 = tls_free_end - imap->l_tls_blocksize;
597 }
598 }
11bf311e 599#elif TLS_DTV_AT_TP
66bdbaa4
AM
600 if (tls_free_start == NO_TLS_OFFSET)
601 {
602 tls_free_start = imap->l_tls_firstbyte_offset;
603 tls_free_end = (imap->l_tls_offset
604 + imap->l_tls_blocksize);
605 }
606 else if (imap->l_tls_firstbyte_offset == tls_free_end)
c877418f 607 /* Extend the contiguous chunk being reclaimed. */
66bdbaa4 608 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
c877418f
RM
609 else if (imap->l_tls_offset + imap->l_tls_blocksize
610 == tls_free_start)
611 /* Extend the chunk backwards. */
66bdbaa4
AM
612 tls_free_start = imap->l_tls_firstbyte_offset;
613 /* This isn't contiguous with the last chunk freed.
614 One of them will be leaked unless we can free
615 one block right away. */
616 else if (imap->l_tls_offset + imap->l_tls_blocksize
617 == GL(dl_tls_static_used))
618 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
619 else if (tls_free_end == GL(dl_tls_static_used))
c877418f 620 {
66bdbaa4
AM
621 GL(dl_tls_static_used) = tls_free_start;
622 tls_free_start = imap->l_tls_firstbyte_offset;
623 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
624 }
625 else if (tls_free_end < imap->l_tls_firstbyte_offset)
626 {
627 /* We pick the later block. It has a chance to
628 be freed. */
629 tls_free_start = imap->l_tls_firstbyte_offset;
630 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
c877418f 631 }
11bf311e
UD
632#else
633# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
634#endif
c877418f 635 }
a04586d8 636 }
a04586d8 637
a8a1269d 638 /* We can unmap all the maps at once. We determined the
4ce636da
UD
639 start address and length when we loaded the object and
640 the `munmap' call does the rest. */
09bf6406 641 DL_UNMAP (imap);
22bc7978 642
ba79d61b 643 /* Finally, unlink the data structure and free it. */
2bd2cad9
RM
644#if DL_NNS == 1
645 /* The assert in the (imap->l_prev == NULL) case gives
646 the compiler license to warn that NS points outside
647 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
648 is tantamount to nsid >= DL_NNS). That should be impossible
649 in this configuration, so just assert about it instead. */
650 assert (nsid == LM_ID_BASE);
651 assert (imap->l_prev != NULL);
652#else
653 if (imap->l_prev == NULL)
c0f62c56 654 {
2e81d449 655 assert (nsid != LM_ID_BASE);
b7c08a66
RM
656 ns->_ns_loaded = imap->l_next;
657
658 /* Update the pointer to the head of the list
659 we leave for debuggers to examine. */
660 r->r_map = (void *) ns->_ns_loaded;
c0f62c56 661 }
2bd2cad9
RM
662 else
663#endif
664 imap->l_prev->l_next = imap->l_next;
c0f62c56 665
2e81d449 666 --ns->_ns_nloaded;
c0f62c56 667 if (imap->l_next != NULL)
af69217f 668 imap->l_next->l_prev = imap->l_prev;
a8a1269d 669
556224ab
UD
670 free (imap->l_versions);
671 if (imap->l_origin != (char *) -1)
a8a1269d
UD
672 free ((char *) imap->l_origin);
673
20fe49b9 674 free (imap->l_reldeps);
4b4fcf99 675
ac53c9c6 676 /* Print debugging message. */
a1ffb40e 677 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
ac53c9c6
UD
678 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
679 imap->l_name, imap->l_ns);
680
4ce636da 681 /* This name always is allocated. */
a8a1269d 682 free (imap->l_name);
4ce636da 683 /* Remove the list with all the names of the shared object. */
20fe49b9
UD
684
685 struct libname_list *lnp = imap->l_libname;
a8a1269d
UD
686 do
687 {
76156ea1 688 struct libname_list *this = lnp;
a8a1269d 689 lnp = lnp->next;
11810621
UD
690 if (!this->dont_free)
691 free (this);
a8a1269d
UD
692 }
693 while (lnp != NULL);
a8a1269d 694
4ce636da 695 /* Remove the searchlists. */
20fe49b9 696 free (imap->l_initfini);
4ce636da 697
5a21d307 698 /* Remove the scope array if we allocated it. */
c0a777e8
UD
699 if (imap->l_scope != imap->l_scope_mem)
700 free (imap->l_scope);
5a21d307 701
7bcaca43 702 if (imap->l_phdr_allocated)
15925412 703 free ((void *) imap->l_phdr);
7bcaca43 704
f55727ca
UD
705 if (imap->l_rpath_dirs.dirs != (void *) -1)
706 free (imap->l_rpath_dirs.dirs);
707 if (imap->l_runpath_dirs.dirs != (void *) -1)
708 free (imap->l_runpath_dirs.dirs);
709
af69217f 710 free (imap);
ba79d61b
RM
711 }
712 }
713
5a2a1d75
AS
714 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
715
c877418f 716 /* If we removed any object which uses TLS bump the generation counter. */
bb4cb252 717 if (any_tls)
c877418f 718 {
a1ffb40e 719 if (__glibc_unlikely (++GL(dl_tls_generation) == 0))
8b748aed 720 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
c877418f
RM
721
722 if (tls_free_end == GL(dl_tls_static_used))
723 GL(dl_tls_static_used) = tls_free_start;
724 }
a04586d8 725
9dcafc55
UD
726#ifdef SHARED
727 /* Auditing checkpoint: we have deleted all objects. */
a1ffb40e 728 if (__glibc_unlikely (do_audit))
9dcafc55 729 {
2e81d449 730 struct link_map *head = ns->_ns_loaded;
9dcafc55
UD
731 /* Do not call the functions for any auditing object. */
732 if (head->l_auditing == 0)
733 {
734 struct audit_ifaces *afct = GLRO(dl_audit);
735 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
736 {
737 if (afct->activity != NULL)
738 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
739
740 afct = afct->next;
741 }
742 }
743 }
744#endif
745
22c83193
UD
746 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
747 && nsid == GL(dl_nns) - 1)
748 do
0d23a5c1 749 --GL(dl_nns);
22c83193
UD
750 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
751
e3e5f672 752 /* Notify the debugger those objects are finalized and gone. */
9dcafc55
UD
753 r->r_state = RT_CONSISTENT;
754 _dl_debug_state ();
815e6fa3 755 LIBC_PROBE (unmap_complete, 2, nsid, r);
4b4fcf99 756
bfc832cc 757 /* Recheck if we need to retry, release the lock. */
20fe49b9 758 out:
bfc832cc
UD
759 if (dl_close_state == rerun)
760 goto retry;
761
762 dl_close_state = not_pending;
131c4428
UD
763}
764
765
766void
767_dl_close (void *_map)
768{
769 struct link_map *map = _map;
770
771 /* First see whether we can remove the object at all. */
a1ffb40e 772 if (__glibc_unlikely (map->l_flags_1 & DF_1_NODELETE))
131c4428
UD
773 {
774 assert (map->l_init_called);
775 /* Nope. Do nothing. */
776 return;
777 }
778
779 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
780 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
781
782 /* Acquire the lock. */
783 __rtld_lock_lock_recursive (GL(dl_load_lock));
784
785 _dl_close_worker (map);
786
d3c9f895 787 __rtld_lock_unlock_recursive (GL(dl_load_lock));
e3e5f672 788}