]> git.ipfire.org Git - thirdparty/glibc.git/blame - elf/dl-close.c
* pthread_attr_setstacksize.c (NEW_VERNUM): Define to GLIBC_2_3_3
[thirdparty/glibc.git] / elf / dl-close.c
CommitLineData
26b4d766 1/* Close a shared object opened by `_dl_open'.
1ee2ff20 2 Copyright (C) 1996-2005, 2006 Free Software Foundation, Inc.
afd4eb37
UD
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
41bdb6e2
AJ
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
afd4eb37
UD
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
41bdb6e2 13 Lesser General Public License for more details.
afd4eb37 14
41bdb6e2
AJ
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
ba79d61b 19
7afab53d 20#include <assert.h>
ba79d61b 21#include <dlfcn.h>
1100f849 22#include <errno.h>
8e17ea58 23#include <libintl.h>
bfc832cc 24#include <stddef.h>
b209e34a 25#include <stdio.h>
ba79d61b 26#include <stdlib.h>
8d6468d0 27#include <string.h>
9dcafc55 28#include <unistd.h>
a853022c 29#include <bits/libc-lock.h>
b8445829 30#include <ldsodefs.h>
ba79d61b
RM
31#include <sys/types.h>
32#include <sys/mman.h>
609cf614 33#include <sysdep-cancel.h>
ba79d61b
RM
34
35
dacc8ffa
UD
36/* Type of the constructor functions. */
37typedef void (*fini_t) (void);
38
39
1100f849
UD
40/* Special l_idx value used to indicate which objects remain loaded. */
41#define IDX_STILL_USED -1
42
43
fc093be1
UD
44#ifdef USE_TLS
45/* Returns true we an non-empty was found. */
46static bool
1f0c4a10
RM
47remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
48 bool should_be_there)
fc093be1
UD
49{
50 if (idx - disp >= listp->len)
51 {
1f0c4a10
RM
52 if (listp->next == NULL)
53 {
54 /* The index is not actually valid in the slotinfo list,
8265947d
RM
55 because this object was closed before it was fully set
56 up due to some error. */
1f0c4a10
RM
57 assert (! should_be_there);
58 }
59 else
60 {
61 if (remove_slotinfo (idx, listp->next, disp + listp->len,
62 should_be_there))
63 return true;
fc093be1 64
1f0c4a10
RM
65 /* No non-empty entry. Search from the end of this element's
66 slotinfo array. */
67 idx = disp + listp->len;
68 }
fc093be1
UD
69 }
70 else
71 {
72 struct link_map *old_map = listp->slotinfo[idx - disp].map;
fc093be1 73
2430d57a
RM
74 /* The entry might still be in its unused state if we are closing an
75 object that wasn't fully set up. */
76 if (__builtin_expect (old_map != NULL, 1))
77 {
78 assert (old_map->l_tls_modid == idx);
79
80 /* Mark the entry as unused. */
81 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
82 listp->slotinfo[idx - disp].map = NULL;
83 }
fc093be1
UD
84
85 /* If this is not the last currently used entry no need to look
86 further. */
2430d57a 87 if (idx != GL(dl_tls_max_dtv_idx))
fc093be1 88 return true;
fc093be1
UD
89 }
90
06a04e09 91 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
fc093be1
UD
92 {
93 --idx;
94
95 if (listp->slotinfo[idx - disp].map != NULL)
96 {
97 /* Found a new last used index. */
98 GL(dl_tls_max_dtv_idx) = idx;
99 return true;
100 }
101 }
102
103 /* No non-entry in this list element. */
104 return false;
105}
106#endif
107
108
ba79d61b 109void
a334319f 110_dl_close (void *_map)
ba79d61b 111{
a334319f 112 struct link_map *map = _map;
9dcafc55 113 Lmid_t ns = map->l_ns;
a334319f 114 unsigned int i;
a334319f
UD
115 /* First see whether we can remove the object at all. */
116 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0)
117 && map->l_init_called)
118 /* Nope. Do nothing. */
119 return;
120
20fe49b9 121 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
a334319f
UD
122 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
123
124 /* Acquire the lock. */
125 __rtld_lock_lock_recursive (GL(dl_load_lock));
126
c0f62c56 127 /* One less direct use. */
c0f62c56
UD
128 --map->l_direct_opencount;
129
bfc832cc
UD
130 /* If _dl_close is called recursively (some destructor call dlclose),
131 just record that the parent _dl_close will need to do garbage collection
132 again and return. */
133 static enum { not_pending, pending, rerun } dl_close_state;
134
135 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
136 || dl_close_state != not_pending)
26b4d766 137 {
bfc832cc
UD
138 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
139 dl_close_state = rerun;
140
26b4d766 141 /* There are still references to this object. Do nothing more. */
afdca0f2 142 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
20fe49b9
UD
143 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
144 map->l_name, map->l_direct_opencount);
a334319f
UD
145
146 __rtld_lock_unlock_recursive (GL(dl_load_lock));
26b4d766
UD
147 return;
148 }
ba79d61b 149
bfc832cc
UD
150 retry:
151 dl_close_state = pending;
152
153#ifdef USE_TLS
154 bool any_tls = false;
155#endif
20fe49b9 156 const unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded;
c3381f3e
UD
157 char used[nloaded];
158 char done[nloaded];
20fe49b9
UD
159 struct link_map *maps[nloaded];
160
bfc832cc 161 /* Run over the list and assign indexes to the link maps and enter
20fe49b9
UD
162 them into the MAPS array. */
163 int idx = 0;
164 for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
165 {
166 l->l_idx = idx;
167 maps[idx] = l;
168 ++idx;
169 }
170 assert (idx == nloaded);
c4bb124a 171
20fe49b9
UD
172 /* Prepare the bitmaps. */
173 memset (used, '\0', sizeof (used));
174 memset (done, '\0', sizeof (done));
0ecb606c 175
20fe49b9
UD
176 /* Keep track of the lowest index link map we have covered already. */
177 int done_index = -1;
178 while (++done_index < nloaded)
0ecb606c 179 {
20fe49b9
UD
180 struct link_map *l = maps[done_index];
181
c3381f3e 182 if (done[done_index])
20fe49b9
UD
183 /* Already handled. */
184 continue;
185
186 /* Check whether this object is still used. */
187 if (l->l_type == lt_loaded
188 && l->l_direct_opencount == 0
189 && (l->l_flags_1 & DF_1_NODELETE) == 0
c3381f3e 190 && !used[done_index])
20fe49b9
UD
191 continue;
192
193 /* We need this object and we handle it now. */
c3381f3e
UD
194 done[done_index] = 1;
195 used[done_index] = 1;
196 /* Signal the object is still needed. */
1100f849 197 l->l_idx = IDX_STILL_USED;
20fe49b9
UD
198
199 /* Mark all dependencies as used. */
200 if (l->l_initfini != NULL)
201 {
202 struct link_map **lp = &l->l_initfini[1];
203 while (*lp != NULL)
204 {
1100f849 205 if ((*lp)->l_idx != IDX_STILL_USED)
556224ab 206 {
c3381f3e
UD
207 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
208
209 if (!used[(*lp)->l_idx])
210 {
211 used[(*lp)->l_idx] = 1;
212 if ((*lp)->l_idx - 1 < done_index)
213 done_index = (*lp)->l_idx - 1;
214 }
556224ab 215 }
556224ab 216
20fe49b9
UD
217 ++lp;
218 }
219 }
220 /* And the same for relocation dependencies. */
221 if (l->l_reldeps != NULL)
222 for (unsigned int j = 0; j < l->l_reldepsact; ++j)
223 {
224 struct link_map *jmap = l->l_reldeps[j];
225
1100f849 226 if (jmap->l_idx != IDX_STILL_USED)
556224ab 227 {
c3381f3e
UD
228 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
229
230 if (!used[jmap->l_idx])
231 {
232 used[jmap->l_idx] = 1;
233 if (jmap->l_idx - 1 < done_index)
234 done_index = jmap->l_idx - 1;
235 }
556224ab
UD
236 }
237 }
20fe49b9 238 }
42c4f32a 239
c3381f3e
UD
240 /* Sort the entries. */
241 _dl_sort_fini (GL(dl_ns)[ns]._ns_loaded, maps, nloaded, used, ns);
242
a709dd43 243 /* Call all termination functions at once. */
29f97654
UD
244#ifdef SHARED
245 bool do_audit = GLRO(dl_naudit) > 0 && !GL(dl_ns)[ns]._ns_loaded->l_auditing;
246#endif
20fe49b9
UD
247 bool unload_any = false;
248 unsigned int first_loaded = ~0;
249 for (i = 0; i < nloaded; ++i)
a709dd43 250 {
20fe49b9 251 struct link_map *imap = maps[i];
9dcafc55
UD
252
253 /* All elements must be in the same namespace. */
254 assert (imap->l_ns == ns);
255
c3381f3e 256 if (!used[i])
a709dd43 257 {
20fe49b9
UD
258 assert (imap->l_type == lt_loaded
259 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
260
aff4519d
UD
261 /* Call its termination function. Do not do it for
262 half-cooked objects. */
263 if (imap->l_init_called)
dacc8ffa 264 {
ac53c9c6
UD
265 /* When debugging print a message first. */
266 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
267 0))
268 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
269 imap->l_name, ns);
270
aff4519d
UD
271 if (imap->l_info[DT_FINI_ARRAY] != NULL)
272 {
273 ElfW(Addr) *array =
274 (ElfW(Addr) *) (imap->l_addr
275 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
276 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
277 / sizeof (ElfW(Addr)));
aff4519d 278
62f29da7
UD
279 while (sz-- > 0)
280 ((fini_t) array[sz]) ();
aff4519d
UD
281 }
282
283 /* Next try the old-style destructor. */
284 if (imap->l_info[DT_FINI] != NULL)
285 (*(void (*) (void)) DL_DT_FINI_ADDRESS
286 (imap, ((void *) imap->l_addr
287 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
dacc8ffa
UD
288 }
289
9dcafc55
UD
290#ifdef SHARED
291 /* Auditing checkpoint: we have a new object. */
29f97654 292 if (__builtin_expect (do_audit, 0))
9dcafc55
UD
293 {
294 struct audit_ifaces *afct = GLRO(dl_audit);
295 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
296 {
297 if (afct->objclose != NULL)
298 /* Return value is ignored. */
299 (void) afct->objclose (&imap->l_audit[cnt].cookie);
300
301 afct = afct->next;
302 }
303 }
304#endif
305
20fe49b9
UD
306 /* This object must not be used anymore. */
307 imap->l_removed = 1;
aff4519d 308
20fe49b9
UD
309 /* We indeed have an object to remove. */
310 unload_any = true;
aff4519d 311
20fe49b9
UD
312 /* Remember where the first dynamically loaded object is. */
313 if (i < first_loaded)
314 first_loaded = i;
a709dd43 315 }
c3381f3e 316 /* Else used[i]. */
20fe49b9
UD
317 else if (imap->l_type == lt_loaded)
318 {
1100f849
UD
319 struct r_scope_elem *new_list = NULL;
320
321 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
20fe49b9 322 {
bfc832cc 323 /* The object is still used. But one of the objects we are
20fe49b9
UD
324 unloading right now is responsible for loading it. If
325 the current object does not have it's own scope yet we
326 have to create one. This has to be done before running
327 the finalizers.
328
329 To do this count the number of dependencies. */
330 unsigned int cnt;
331 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
332 ;
333
334 /* We simply reuse the l_initfini list. */
335 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
336 imap->l_searchlist.r_nlist = cnt;
337
1100f849 338 new_list = &imap->l_searchlist;
20fe49b9 339 }
1100f849
UD
340
341 /* Count the number of scopes which remain after the unload.
342 When we add the local search list count it. Always add
343 one for the terminating NULL pointer. */
344 size_t remain = (new_list != NULL) + 1;
345 bool removed_any = false;
346 for (size_t cnt = 0; imap->l_scoperec->scope[cnt] != NULL; ++cnt)
347 /* This relies on l_scope[] entries being always set either
348 to its own l_symbolic_searchlist address, or some map's
349 l_searchlist address. */
350 if (imap->l_scoperec->scope[cnt] != &imap->l_symbolic_searchlist)
351 {
352 struct link_map *tmap = (struct link_map *)
353 ((char *) imap->l_scoperec->scope[cnt]
354 - offsetof (struct link_map, l_searchlist));
355 assert (tmap->l_ns == ns);
356 if (tmap->l_idx == IDX_STILL_USED)
357 ++remain;
358 else
359 removed_any = true;
360 }
361 else
362 ++remain;
363
364 if (removed_any)
1ee2ff20 365 {
1100f849
UD
366 /* Always allocate a new array for the scope. This is
367 necessary since we must be able to determine the last
368 user of the current array. If possible use the link map's
369 memory. */
370 size_t new_size;
371 struct r_scoperec *newp;
372 if (imap->l_scoperec != &imap->l_scoperec_mem
373 && remain < NINIT_SCOPE_ELEMS (imap)
374 && imap->l_scoperec_mem.nusers == 0)
375 {
376 new_size = NINIT_SCOPE_ELEMS (imap);
377 newp = &imap->l_scoperec_mem;
378 }
379 else
380 {
381 new_size = imap->l_scope_max;
382 newp = (struct r_scoperec *)
383 malloc (sizeof (struct r_scoperec)
384 + new_size * sizeof (struct r_scope_elem *));
385 if (newp == NULL)
386 _dl_signal_error (ENOMEM, "dlclose", NULL,
387 N_("cannot create scope list"));
388 }
389
390 newp->nusers = 0;
391 newp->remove_after_use = false;
392 newp->notify = false;
393
394 /* Copy over the remaining scope elements. */
395 remain = 0;
396 for (size_t cnt = 0; imap->l_scoperec->scope[cnt] != NULL; ++cnt)
1ee2ff20 397 {
1100f849
UD
398 if (imap->l_scoperec->scope[cnt]
399 != &imap->l_symbolic_searchlist)
1ee2ff20 400 {
1100f849
UD
401 struct link_map *tmap = (struct link_map *)
402 ((char *) imap->l_scoperec->scope[cnt]
403 - offsetof (struct link_map, l_searchlist));
404 if (tmap->l_idx != IDX_STILL_USED)
405 {
406 /* Remove the scope. Or replace with own map's
407 scope. */
408 if (new_list != NULL)
409 {
410 newp->scope[remain++] = new_list;
411 new_list = NULL;
412 }
413 continue;
414 }
1ee2ff20 415 }
1100f849
UD
416
417 newp->scope[remain++] = imap->l_scoperec->scope[cnt];
1ee2ff20 418 }
1100f849
UD
419 newp->scope[remain] = NULL;
420
421 struct r_scoperec *old = imap->l_scoperec;
422
609cf614
UD
423 if (SINGLE_THREAD_P)
424 imap->l_scoperec = newp;
425 else
1100f849 426 {
609cf614
UD
427 __rtld_mrlock_change (imap->l_scoperec_lock);
428 imap->l_scoperec = newp;
429 __rtld_mrlock_done (imap->l_scoperec_lock);
430
431 if (atomic_increment_val (&old->nusers) != 1)
432 {
433 old->remove_after_use = true;
434 old->notify = true;
435 if (atomic_decrement_val (&old->nusers) != 0)
436 __rtld_waitzero (old->nusers);
437 }
1100f849
UD
438 }
439
440 /* No user anymore, we can free it now. */
441 if (old != &imap->l_scoperec_mem)
442 free (old);
443
444 imap->l_scope_max = new_size;
1ee2ff20 445 }
42c4f32a 446
c3381f3e 447 /* The loader is gone, so mark the object as not having one.
1100f849
UD
448 Note: l_idx != IDX_STILL_USED -> object will be removed. */
449 if (imap->l_loader != NULL
450 && imap->l_loader->l_idx != IDX_STILL_USED)
20fe49b9 451 imap->l_loader = NULL;
aff4519d 452
20fe49b9
UD
453 /* Remember where the first dynamically loaded object is. */
454 if (i < first_loaded)
455 first_loaded = i;
456 }
a709dd43
UD
457 }
458
20fe49b9
UD
459 /* If there are no objects to unload, do nothing further. */
460 if (!unload_any)
461 goto out;
462
9dcafc55
UD
463#ifdef SHARED
464 /* Auditing checkpoint: we will start deleting objects. */
29f97654 465 if (__builtin_expect (do_audit, 0))
9dcafc55
UD
466 {
467 struct link_map *head = GL(dl_ns)[ns]._ns_loaded;
468 struct audit_ifaces *afct = GLRO(dl_audit);
469 /* Do not call the functions for any auditing object. */
470 if (head->l_auditing == 0)
471 {
472 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
473 {
474 if (afct->activity != NULL)
475 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
476
477 afct = afct->next;
478 }
479 }
480 }
481#endif
482
4d6acc61 483 /* Notify the debugger we are about to remove some loaded objects. */
29f97654 484 struct r_debug *r = _dl_debug_initialize (0, ns);
9dcafc55
UD
485 r->r_state = RT_DELETE;
486 _dl_debug_state ();
4d6acc61 487
c877418f 488#ifdef USE_TLS
541765b6
UD
489 size_t tls_free_start;
490 size_t tls_free_end;
491 tls_free_start = tls_free_end = NO_TLS_OFFSET;
c877418f
RM
492#endif
493
ba79d61b
RM
494 /* Check each element of the search list to see if all references to
495 it are gone. */
20fe49b9 496 for (i = first_loaded; i < nloaded; ++i)
ba79d61b 497 {
20fe49b9 498 struct link_map *imap = maps[i];
c3381f3e 499 if (!used[i])
ba79d61b 500 {
20fe49b9 501 assert (imap->l_type == lt_loaded);
a8a1269d 502
ba79d61b
RM
503 /* That was the last reference, and this was a dlopen-loaded
504 object. We can unmap it. */
a334319f
UD
505 if (__builtin_expect (imap->l_global, 0))
506 {
507 /* This object is in the global scope list. Remove it. */
9dcafc55 508 unsigned int cnt = GL(dl_ns)[ns]._ns_main_searchlist->r_nlist;
a334319f
UD
509
510 do
511 --cnt;
9dcafc55 512 while (GL(dl_ns)[ns]._ns_main_searchlist->r_list[cnt] != imap);
a334319f
UD
513
514 /* The object was already correctly registered. */
515 while (++cnt
9dcafc55
UD
516 < GL(dl_ns)[ns]._ns_main_searchlist->r_nlist)
517 GL(dl_ns)[ns]._ns_main_searchlist->r_list[cnt - 1]
518 = GL(dl_ns)[ns]._ns_main_searchlist->r_list[cnt];
a334319f 519
9dcafc55 520 --GL(dl_ns)[ns]._ns_main_searchlist->r_nlist;
a334319f 521 }
ba79d61b 522
a04586d8 523#ifdef USE_TLS
1f0c4a10 524 /* Remove the object from the dtv slotinfo array if it uses TLS. */
a04586d8
UD
525 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
526 {
a04586d8 527 any_tls = true;
bb4cb252 528
9dcafc55
UD
529 if (GL(dl_tls_dtv_slotinfo_list) != NULL
530 && ! remove_slotinfo (imap->l_tls_modid,
531 GL(dl_tls_dtv_slotinfo_list), 0,
532 imap->l_init_called))
fc093be1
UD
533 /* All dynamically loaded modules with TLS are unloaded. */
534 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
c877418f 535
299601a1 536 if (imap->l_tls_offset != NO_TLS_OFFSET)
c877418f
RM
537 {
538 /* Collect a contiguous chunk built from the objects in
539 this search list, going in either direction. When the
540 whole chunk is at the end of the used area then we can
541 reclaim it. */
541765b6
UD
542# if TLS_TCB_AT_TP
543 if (tls_free_start == NO_TLS_OFFSET
544 || (size_t) imap->l_tls_offset == tls_free_start)
545 {
546 /* Extend the contiguous chunk being reclaimed. */
547 tls_free_start
548 = imap->l_tls_offset - imap->l_tls_blocksize;
549
550 if (tls_free_end == NO_TLS_OFFSET)
551 tls_free_end = imap->l_tls_offset;
552 }
553 else if (imap->l_tls_offset - imap->l_tls_blocksize
554 == tls_free_end)
555 /* Extend the chunk backwards. */
556 tls_free_end = imap->l_tls_offset;
557 else
558 {
559 /* This isn't contiguous with the last chunk freed.
560 One of them will be leaked unless we can free
561 one block right away. */
562 if (tls_free_end == GL(dl_tls_static_used))
563 {
564 GL(dl_tls_static_used) = tls_free_start;
565 tls_free_end = imap->l_tls_offset;
566 tls_free_start
567 = tls_free_end - imap->l_tls_blocksize;
568 }
569 else if ((size_t) imap->l_tls_offset
570 == GL(dl_tls_static_used))
571 GL(dl_tls_static_used)
572 = imap->l_tls_offset - imap->l_tls_blocksize;
573 else if (tls_free_end < (size_t) imap->l_tls_offset)
574 {
575 /* We pick the later block. It has a chance to
576 be freed. */
577 tls_free_end = imap->l_tls_offset;
578 tls_free_start
579 = tls_free_end - imap->l_tls_blocksize;
580 }
581 }
582# elif TLS_DTV_AT_TP
638bb1f3 583 if ((size_t) imap->l_tls_offset == tls_free_end)
c877418f 584 /* Extend the contiguous chunk being reclaimed. */
541765b6 585 tls_free_end -= imap->l_tls_blocksize;
c877418f
RM
586 else if (imap->l_tls_offset + imap->l_tls_blocksize
587 == tls_free_start)
588 /* Extend the chunk backwards. */
589 tls_free_start = imap->l_tls_offset;
590 else
591 {
592 /* This isn't contiguous with the last chunk freed.
593 One of them will be leaked. */
594 if (tls_free_end == GL(dl_tls_static_used))
595 GL(dl_tls_static_used) = tls_free_start;
596 tls_free_start = imap->l_tls_offset;
597 tls_free_end = tls_free_start + imap->l_tls_blocksize;
598 }
541765b6
UD
599# else
600# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
601# endif
c877418f 602 }
a04586d8
UD
603 }
604#endif
605
a8a1269d 606 /* We can unmap all the maps at once. We determined the
4ce636da
UD
607 start address and length when we loaded the object and
608 the `munmap' call does the rest. */
09bf6406 609 DL_UNMAP (imap);
22bc7978 610
ba79d61b 611 /* Finally, unlink the data structure and free it. */
7afab53d 612 if (imap->l_prev != NULL)
af69217f 613 imap->l_prev->l_next = imap->l_next;
7afab53d 614 else
c0f62c56
UD
615 {
616#ifdef SHARED
9dcafc55 617 assert (ns != LM_ID_BASE);
7afab53d 618#endif
9dcafc55 619 GL(dl_ns)[ns]._ns_loaded = imap->l_next;
c0f62c56
UD
620 }
621
9dcafc55 622 --GL(dl_ns)[ns]._ns_nloaded;
c0f62c56 623 if (imap->l_next != NULL)
af69217f 624 imap->l_next->l_prev = imap->l_prev;
a8a1269d 625
556224ab
UD
626 free (imap->l_versions);
627 if (imap->l_origin != (char *) -1)
a8a1269d
UD
628 free ((char *) imap->l_origin);
629
20fe49b9 630 free (imap->l_reldeps);
4b4fcf99 631
ac53c9c6
UD
632 /* Print debugging message. */
633 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
634 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
635 imap->l_name, imap->l_ns);
636
4ce636da 637 /* This name always is allocated. */
a8a1269d 638 free (imap->l_name);
4ce636da 639 /* Remove the list with all the names of the shared object. */
20fe49b9
UD
640
641 struct libname_list *lnp = imap->l_libname;
a8a1269d
UD
642 do
643 {
76156ea1 644 struct libname_list *this = lnp;
a8a1269d 645 lnp = lnp->next;
11810621
UD
646 if (!this->dont_free)
647 free (this);
a8a1269d
UD
648 }
649 while (lnp != NULL);
a8a1269d 650
4ce636da 651 /* Remove the searchlists. */
20fe49b9 652 free (imap->l_initfini);
4ce636da 653
5a21d307 654 /* Remove the scope array if we allocated it. */
1100f849
UD
655 if (imap->l_scoperec != &imap->l_scoperec_mem)
656 free (imap->l_scoperec);
5a21d307 657
7bcaca43 658 if (imap->l_phdr_allocated)
15925412 659 free ((void *) imap->l_phdr);
7bcaca43 660
f55727ca
UD
661 if (imap->l_rpath_dirs.dirs != (void *) -1)
662 free (imap->l_rpath_dirs.dirs);
663 if (imap->l_runpath_dirs.dirs != (void *) -1)
664 free (imap->l_runpath_dirs.dirs);
665
af69217f 666 free (imap);
ba79d61b
RM
667 }
668 }
669
a04586d8 670#ifdef USE_TLS
c877418f 671 /* If we removed any object which uses TLS bump the generation counter. */
bb4cb252 672 if (any_tls)
c877418f
RM
673 {
674 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
9dcafc55 675 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
c877418f
RM
676
677 if (tls_free_end == GL(dl_tls_static_used))
678 GL(dl_tls_static_used) = tls_free_start;
679 }
a04586d8
UD
680#endif
681
9dcafc55
UD
682#ifdef SHARED
683 /* Auditing checkpoint: we have deleted all objects. */
29f97654 684 if (__builtin_expect (do_audit, 0))
9dcafc55
UD
685 {
686 struct link_map *head = GL(dl_ns)[ns]._ns_loaded;
687 /* Do not call the functions for any auditing object. */
688 if (head->l_auditing == 0)
689 {
690 struct audit_ifaces *afct = GLRO(dl_audit);
691 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
692 {
693 if (afct->activity != NULL)
694 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
695
696 afct = afct->next;
697 }
698 }
699 }
700#endif
701
e3e5f672 702 /* Notify the debugger those objects are finalized and gone. */
9dcafc55
UD
703 r->r_state = RT_CONSISTENT;
704 _dl_debug_state ();
4b4fcf99 705
bfc832cc 706 /* Recheck if we need to retry, release the lock. */
20fe49b9 707 out:
bfc832cc
UD
708 if (dl_close_state == rerun)
709 goto retry;
710
711 dl_close_state = not_pending;
d3c9f895 712 __rtld_lock_unlock_recursive (GL(dl_load_lock));
e3e5f672
UD
713}
714
715
100e184f 716#ifdef USE_TLS
7c11c4a1 717static bool __libc_freeres_fn_section
216455bc 718free_slotinfo (struct dtv_slotinfo_list **elemp)
a04586d8
UD
719{
720 size_t cnt;
721
216455bc 722 if (*elemp == NULL)
69f0c4d8
UD
723 /* Nothing here, all is removed (or there never was anything). */
724 return true;
725
216455bc 726 if (!free_slotinfo (&(*elemp)->next))
a04586d8
UD
727 /* We cannot free the entry. */
728 return false;
729
216455bc 730 /* That cleared our next pointer for us. */
a04586d8 731
216455bc
RM
732 for (cnt = 0; cnt < (*elemp)->len; ++cnt)
733 if ((*elemp)->slotinfo[cnt].map != NULL)
a04586d8
UD
734 /* Still used. */
735 return false;
736
737 /* We can remove the list element. */
216455bc
RM
738 free (*elemp);
739 *elemp = NULL;
a04586d8
UD
740
741 return true;
742}
100e184f 743#endif
a04586d8
UD
744
745
c877418f 746libc_freeres_fn (free_mem)
e3e5f672 747{
a334319f
UD
748 for (Lmid_t ns = 0; ns < DL_NNS; ++ns)
749 if (__builtin_expect (GL(dl_ns)[ns]._ns_global_scope_alloc, 0) != 0
750 && (GL(dl_ns)[ns]._ns_main_searchlist->r_nlist
c0f62c56
UD
751 // XXX Check whether we need NS-specific initial_searchlist
752 == GLRO(dl_initial_searchlist).r_nlist))
753 {
754 /* All object dynamically loaded by the program are unloaded. Free
755 the memory allocated for the global scope variable. */
a334319f 756 struct link_map **old = GL(dl_ns)[ns]._ns_main_searchlist->r_list;
c0f62c56
UD
757
758 /* Put the old map in. */
a334319f 759 GL(dl_ns)[ns]._ns_main_searchlist->r_list
c0f62c56
UD
760 // XXX Check whether we need NS-specific initial_searchlist
761 = GLRO(dl_initial_searchlist).r_list;
762 /* Signal that the original map is used. */
a334319f 763 GL(dl_ns)[ns]._ns_global_scope_alloc = 0;
c0f62c56
UD
764
765 /* Now free the old map. */
766 free (old);
767 }
a04586d8
UD
768
769#ifdef USE_TLS
2d148689
RM
770 if (USE___THREAD || GL(dl_tls_dtv_slotinfo_list) != NULL)
771 {
772 /* Free the memory allocated for the dtv slotinfo array. We can do
216455bc
RM
773 this only if all modules which used this memory are unloaded. */
774# ifdef SHARED
775 if (GL(dl_initial_dtv) == NULL)
776 /* There was no initial TLS setup, it was set up later when
777 it used the normal malloc. */
778 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
7c11c4a1 779 else
216455bc 780# endif
bfc832cc 781 /* The first element of the list does not have to be deallocated.
7c11c4a1
UD
782 It was allocated in the dynamic linker (i.e., with a different
783 malloc), and in the static library it's in .bss space. */
784 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);
2d148689 785 }
a04586d8 786#endif
ba79d61b 787}