]> git.ipfire.org Git - thirdparty/glibc.git/blame - elf/dl-close.c
Fix some -Wformat warnings in rpcgen.
[thirdparty/glibc.git] / elf / dl-close.c
CommitLineData
26b4d766 1/* Close a shared object opened by `_dl_open'.
0479b305 2 Copyright (C) 1996-2012 Free Software Foundation, Inc.
afd4eb37
UD
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
41bdb6e2
AJ
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
afd4eb37
UD
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
41bdb6e2 13 Lesser General Public License for more details.
afd4eb37 14
41bdb6e2 15 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
ba79d61b 18
7afab53d 19#include <assert.h>
ba79d61b 20#include <dlfcn.h>
1100f849 21#include <errno.h>
8e17ea58 22#include <libintl.h>
bfc832cc 23#include <stddef.h>
b209e34a 24#include <stdio.h>
ba79d61b 25#include <stdlib.h>
8d6468d0 26#include <string.h>
9dcafc55 27#include <unistd.h>
a853022c 28#include <bits/libc-lock.h>
b8445829 29#include <ldsodefs.h>
ba79d61b
RM
30#include <sys/types.h>
31#include <sys/mman.h>
609cf614 32#include <sysdep-cancel.h>
df94b641 33#include <tls.h>
ba79d61b
RM
34
35
dacc8ffa
UD
36/* Type of the constructor functions. */
37typedef void (*fini_t) (void);
38
39
1100f849
UD
40/* Special l_idx value used to indicate which objects remain loaded. */
41#define IDX_STILL_USED -1
42
43
fc093be1
UD
44/* Returns true we an non-empty was found. */
45static bool
1f0c4a10
RM
46remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
47 bool should_be_there)
fc093be1
UD
48{
49 if (idx - disp >= listp->len)
50 {
1f0c4a10
RM
51 if (listp->next == NULL)
52 {
53 /* The index is not actually valid in the slotinfo list,
8265947d
RM
54 because this object was closed before it was fully set
55 up due to some error. */
1f0c4a10
RM
56 assert (! should_be_there);
57 }
58 else
59 {
60 if (remove_slotinfo (idx, listp->next, disp + listp->len,
61 should_be_there))
62 return true;
fc093be1 63
1f0c4a10
RM
64 /* No non-empty entry. Search from the end of this element's
65 slotinfo array. */
66 idx = disp + listp->len;
67 }
fc093be1
UD
68 }
69 else
70 {
71 struct link_map *old_map = listp->slotinfo[idx - disp].map;
fc093be1 72
2430d57a
RM
73 /* The entry might still be in its unused state if we are closing an
74 object that wasn't fully set up. */
75 if (__builtin_expect (old_map != NULL, 1))
76 {
77 assert (old_map->l_tls_modid == idx);
78
79 /* Mark the entry as unused. */
80 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
81 listp->slotinfo[idx - disp].map = NULL;
82 }
fc093be1
UD
83
84 /* If this is not the last currently used entry no need to look
85 further. */
2430d57a 86 if (idx != GL(dl_tls_max_dtv_idx))
fc093be1 87 return true;
fc093be1
UD
88 }
89
06a04e09 90 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
fc093be1
UD
91 {
92 --idx;
93
94 if (listp->slotinfo[idx - disp].map != NULL)
95 {
96 /* Found a new last used index. */
97 GL(dl_tls_max_dtv_idx) = idx;
98 return true;
99 }
100 }
101
102 /* No non-entry in this list element. */
103 return false;
104}
fc093be1
UD
105
106
ba79d61b 107void
131c4428 108_dl_close_worker (struct link_map *map)
ba79d61b 109{
c0f62c56 110 /* One less direct use. */
c0f62c56
UD
111 --map->l_direct_opencount;
112
bfc832cc
UD
113 /* If _dl_close is called recursively (some destructor call dlclose),
114 just record that the parent _dl_close will need to do garbage collection
115 again and return. */
116 static enum { not_pending, pending, rerun } dl_close_state;
117
118 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
119 || dl_close_state != not_pending)
26b4d766 120 {
0479b305
AS
121 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
122 dl_close_state = rerun;
bfc832cc 123
26b4d766 124 /* There are still references to this object. Do nothing more. */
afdca0f2 125 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
20fe49b9
UD
126 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
127 map->l_name, map->l_direct_opencount);
a334319f 128
26b4d766
UD
129 return;
130 }
ba79d61b 131
2e81d449
UD
132 Lmid_t nsid = map->l_ns;
133 struct link_namespaces *ns = &GL(dl_ns)[nsid];
134
bfc832cc
UD
135 retry:
136 dl_close_state = pending;
137
bfc832cc 138 bool any_tls = false;
2e81d449 139 const unsigned int nloaded = ns->_ns_nloaded;
c3381f3e
UD
140 char used[nloaded];
141 char done[nloaded];
20fe49b9
UD
142 struct link_map *maps[nloaded];
143
bfc832cc 144 /* Run over the list and assign indexes to the link maps and enter
20fe49b9
UD
145 them into the MAPS array. */
146 int idx = 0;
2e81d449 147 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
20fe49b9
UD
148 {
149 l->l_idx = idx;
150 maps[idx] = l;
151 ++idx;
152 }
153 assert (idx == nloaded);
c4bb124a 154
20fe49b9
UD
155 /* Prepare the bitmaps. */
156 memset (used, '\0', sizeof (used));
157 memset (done, '\0', sizeof (done));
0ecb606c 158
20fe49b9
UD
159 /* Keep track of the lowest index link map we have covered already. */
160 int done_index = -1;
161 while (++done_index < nloaded)
0ecb606c 162 {
20fe49b9
UD
163 struct link_map *l = maps[done_index];
164
c3381f3e 165 if (done[done_index])
20fe49b9
UD
166 /* Already handled. */
167 continue;
168
169 /* Check whether this object is still used. */
170 if (l->l_type == lt_loaded
171 && l->l_direct_opencount == 0
172 && (l->l_flags_1 & DF_1_NODELETE) == 0
c3381f3e 173 && !used[done_index])
20fe49b9
UD
174 continue;
175
176 /* We need this object and we handle it now. */
c3381f3e
UD
177 done[done_index] = 1;
178 used[done_index] = 1;
179 /* Signal the object is still needed. */
1100f849 180 l->l_idx = IDX_STILL_USED;
20fe49b9
UD
181
182 /* Mark all dependencies as used. */
183 if (l->l_initfini != NULL)
184 {
185 struct link_map **lp = &l->l_initfini[1];
186 while (*lp != NULL)
187 {
1100f849 188 if ((*lp)->l_idx != IDX_STILL_USED)
556224ab 189 {
c3381f3e
UD
190 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
191
192 if (!used[(*lp)->l_idx])
193 {
194 used[(*lp)->l_idx] = 1;
195 if ((*lp)->l_idx - 1 < done_index)
196 done_index = (*lp)->l_idx - 1;
197 }
556224ab 198 }
556224ab 199
20fe49b9
UD
200 ++lp;
201 }
202 }
203 /* And the same for relocation dependencies. */
204 if (l->l_reldeps != NULL)
385b4cf4 205 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
20fe49b9 206 {
385b4cf4 207 struct link_map *jmap = l->l_reldeps->list[j];
20fe49b9 208
1100f849 209 if (jmap->l_idx != IDX_STILL_USED)
556224ab 210 {
c3381f3e
UD
211 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
212
213 if (!used[jmap->l_idx])
214 {
215 used[jmap->l_idx] = 1;
216 if (jmap->l_idx - 1 < done_index)
217 done_index = jmap->l_idx - 1;
218 }
556224ab
UD
219 }
220 }
20fe49b9 221 }
42c4f32a 222
c3381f3e 223 /* Sort the entries. */
c8835729 224 _dl_sort_fini (maps, nloaded, used, nsid);
c3381f3e 225
a709dd43 226 /* Call all termination functions at once. */
29f97654 227#ifdef SHARED
2e81d449 228 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
29f97654 229#endif
20fe49b9 230 bool unload_any = false;
e4eb675d 231 bool scope_mem_left = false;
e8b6b64d 232 unsigned int unload_global = 0;
20fe49b9 233 unsigned int first_loaded = ~0;
ffd0e1b7 234 for (unsigned int i = 0; i < nloaded; ++i)
a709dd43 235 {
20fe49b9 236 struct link_map *imap = maps[i];
9dcafc55
UD
237
238 /* All elements must be in the same namespace. */
2e81d449 239 assert (imap->l_ns == nsid);
9dcafc55 240
c3381f3e 241 if (!used[i])
a709dd43 242 {
20fe49b9
UD
243 assert (imap->l_type == lt_loaded
244 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
245
aff4519d
UD
246 /* Call its termination function. Do not do it for
247 half-cooked objects. */
248 if (imap->l_init_called)
dacc8ffa 249 {
ac53c9c6
UD
250 /* When debugging print a message first. */
251 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
252 0))
253 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
2e81d449 254 imap->l_name, nsid);
ac53c9c6 255
aff4519d
UD
256 if (imap->l_info[DT_FINI_ARRAY] != NULL)
257 {
258 ElfW(Addr) *array =
259 (ElfW(Addr) *) (imap->l_addr
260 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
261 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
262 / sizeof (ElfW(Addr)));
aff4519d 263
62f29da7
UD
264 while (sz-- > 0)
265 ((fini_t) array[sz]) ();
aff4519d
UD
266 }
267
268 /* Next try the old-style destructor. */
269 if (imap->l_info[DT_FINI] != NULL)
270 (*(void (*) (void)) DL_DT_FINI_ADDRESS
271 (imap, ((void *) imap->l_addr
272 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
dacc8ffa
UD
273 }
274
9dcafc55 275#ifdef SHARED
a3d731d3 276 /* Auditing checkpoint: we remove an object. */
29f97654 277 if (__builtin_expect (do_audit, 0))
9dcafc55
UD
278 {
279 struct audit_ifaces *afct = GLRO(dl_audit);
280 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
281 {
282 if (afct->objclose != NULL)
283 /* Return value is ignored. */
284 (void) afct->objclose (&imap->l_audit[cnt].cookie);
285
286 afct = afct->next;
287 }
288 }
289#endif
290
20fe49b9
UD
291 /* This object must not be used anymore. */
292 imap->l_removed = 1;
aff4519d 293
20fe49b9
UD
294 /* We indeed have an object to remove. */
295 unload_any = true;
aff4519d 296
e8b6b64d
UD
297 if (imap->l_global)
298 ++unload_global;
299
20fe49b9
UD
300 /* Remember where the first dynamically loaded object is. */
301 if (i < first_loaded)
302 first_loaded = i;
a709dd43 303 }
c3381f3e 304 /* Else used[i]. */
20fe49b9
UD
305 else if (imap->l_type == lt_loaded)
306 {
1100f849
UD
307 struct r_scope_elem *new_list = NULL;
308
309 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
20fe49b9 310 {
bfc832cc 311 /* The object is still used. But one of the objects we are
20fe49b9
UD
312 unloading right now is responsible for loading it. If
313 the current object does not have it's own scope yet we
314 have to create one. This has to be done before running
315 the finalizers.
316
317 To do this count the number of dependencies. */
318 unsigned int cnt;
319 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
320 ;
321
322 /* We simply reuse the l_initfini list. */
323 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
324 imap->l_searchlist.r_nlist = cnt;
325
1100f849 326 new_list = &imap->l_searchlist;
20fe49b9 327 }
1100f849
UD
328
329 /* Count the number of scopes which remain after the unload.
330 When we add the local search list count it. Always add
331 one for the terminating NULL pointer. */
332 size_t remain = (new_list != NULL) + 1;
333 bool removed_any = false;
c0a777e8 334 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
1100f849
UD
335 /* This relies on l_scope[] entries being always set either
336 to its own l_symbolic_searchlist address, or some map's
337 l_searchlist address. */
c0a777e8 338 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
1100f849
UD
339 {
340 struct link_map *tmap = (struct link_map *)
c0a777e8 341 ((char *) imap->l_scope[cnt]
1100f849 342 - offsetof (struct link_map, l_searchlist));
2e81d449 343 assert (tmap->l_ns == nsid);
1100f849
UD
344 if (tmap->l_idx == IDX_STILL_USED)
345 ++remain;
346 else
347 removed_any = true;
348 }
349 else
350 ++remain;
351
352 if (removed_any)
1ee2ff20 353 {
1100f849
UD
354 /* Always allocate a new array for the scope. This is
355 necessary since we must be able to determine the last
356 user of the current array. If possible use the link map's
357 memory. */
358 size_t new_size;
c0a777e8
UD
359 struct r_scope_elem **newp;
360
361#define SCOPE_ELEMS(imap) \
362 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
363
364 if (imap->l_scope != imap->l_scope_mem
365 && remain < SCOPE_ELEMS (imap))
1100f849 366 {
c0a777e8
UD
367 new_size = SCOPE_ELEMS (imap);
368 newp = imap->l_scope_mem;
1100f849
UD
369 }
370 else
371 {
372 new_size = imap->l_scope_max;
c0a777e8
UD
373 newp = (struct r_scope_elem **)
374 malloc (new_size * sizeof (struct r_scope_elem *));
1100f849
UD
375 if (newp == NULL)
376 _dl_signal_error (ENOMEM, "dlclose", NULL,
377 N_("cannot create scope list"));
378 }
379
1100f849
UD
380 /* Copy over the remaining scope elements. */
381 remain = 0;
c0a777e8 382 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
1ee2ff20 383 {
c0a777e8 384 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
1ee2ff20 385 {
1100f849 386 struct link_map *tmap = (struct link_map *)
c0a777e8 387 ((char *) imap->l_scope[cnt]
1100f849
UD
388 - offsetof (struct link_map, l_searchlist));
389 if (tmap->l_idx != IDX_STILL_USED)
390 {
391 /* Remove the scope. Or replace with own map's
392 scope. */
393 if (new_list != NULL)
394 {
c0a777e8 395 newp[remain++] = new_list;
1100f849
UD
396 new_list = NULL;
397 }
398 continue;
399 }
1ee2ff20 400 }
1100f849 401
c0a777e8 402 newp[remain++] = imap->l_scope[cnt];
1ee2ff20 403 }
c0a777e8 404 newp[remain] = NULL;
1100f849 405
c0a777e8 406 struct r_scope_elem **old = imap->l_scope;
1100f849 407
e4eb675d 408 imap->l_scope = newp;
1100f849
UD
409
410 /* No user anymore, we can free it now. */
c0a777e8 411 if (old != imap->l_scope_mem)
e4eb675d
UD
412 {
413 if (_dl_scope_free (old))
414 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
415 no need to repeat it. */
416 scope_mem_left = false;
417 }
418 else
419 scope_mem_left = true;
1100f849
UD
420
421 imap->l_scope_max = new_size;
1ee2ff20 422 }
39dd69df
AS
423 else if (new_list != NULL)
424 {
425 /* We didn't change the scope array, so reset the search
426 list. */
427 imap->l_searchlist.r_list = NULL;
428 imap->l_searchlist.r_nlist = 0;
429 }
42c4f32a 430
c3381f3e 431 /* The loader is gone, so mark the object as not having one.
1100f849
UD
432 Note: l_idx != IDX_STILL_USED -> object will be removed. */
433 if (imap->l_loader != NULL
434 && imap->l_loader->l_idx != IDX_STILL_USED)
20fe49b9 435 imap->l_loader = NULL;
aff4519d 436
20fe49b9
UD
437 /* Remember where the first dynamically loaded object is. */
438 if (i < first_loaded)
439 first_loaded = i;
440 }
a709dd43
UD
441 }
442
20fe49b9
UD
443 /* If there are no objects to unload, do nothing further. */
444 if (!unload_any)
445 goto out;
446
9dcafc55
UD
447#ifdef SHARED
448 /* Auditing checkpoint: we will start deleting objects. */
29f97654 449 if (__builtin_expect (do_audit, 0))
9dcafc55 450 {
2e81d449 451 struct link_map *head = ns->_ns_loaded;
9dcafc55
UD
452 struct audit_ifaces *afct = GLRO(dl_audit);
453 /* Do not call the functions for any auditing object. */
454 if (head->l_auditing == 0)
455 {
456 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
457 {
458 if (afct->activity != NULL)
459 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
460
461 afct = afct->next;
462 }
463 }
464 }
465#endif
466
4d6acc61 467 /* Notify the debugger we are about to remove some loaded objects. */
2e81d449 468 struct r_debug *r = _dl_debug_initialize (0, nsid);
9dcafc55
UD
469 r->r_state = RT_DELETE;
470 _dl_debug_state ();
4d6acc61 471
e8b6b64d
UD
472 if (unload_global)
473 {
474 /* Some objects are in the global scope list. Remove them. */
475 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
476 unsigned int i;
477 unsigned int j = 0;
478 unsigned int cnt = ns_msl->r_nlist;
479
480 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
481 --cnt;
482
483 if (cnt + unload_global == ns_msl->r_nlist)
484 /* Speed up removing most recently added objects. */
485 j = cnt;
486 else
487 for (i = 0; i < cnt; i++)
488 if (ns_msl->r_list[i]->l_removed == 0)
489 {
490 if (i != j)
491 ns_msl->r_list[j] = ns_msl->r_list[i];
492 j++;
493 }
494 ns_msl->r_nlist = j;
e4eb675d 495 }
e8b6b64d 496
e4eb675d
UD
497 if (!RTLD_SINGLE_THREAD_P
498 && (unload_global
499 || scope_mem_left
500 || (GL(dl_scope_free_list) != NULL
501 && GL(dl_scope_free_list)->count)))
502 {
503 THREAD_GSCOPE_WAIT ();
504
505 /* Now we can free any queued old scopes. */
385b4cf4 506 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
e4eb675d
UD
507 if (fsl != NULL)
508 while (fsl->count > 0)
509 free (fsl->list[--fsl->count]);
e8b6b64d
UD
510 }
511
541765b6
UD
512 size_t tls_free_start;
513 size_t tls_free_end;
514 tls_free_start = tls_free_end = NO_TLS_OFFSET;
c877418f 515
5a2a1d75
AS
516 /* We modify the list of loaded objects. */
517 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
518
ba79d61b
RM
519 /* Check each element of the search list to see if all references to
520 it are gone. */
ffd0e1b7 521 for (unsigned int i = first_loaded; i < nloaded; ++i)
ba79d61b 522 {
20fe49b9 523 struct link_map *imap = maps[i];
c3381f3e 524 if (!used[i])
ba79d61b 525 {
20fe49b9 526 assert (imap->l_type == lt_loaded);
a8a1269d 527
ba79d61b
RM
528 /* That was the last reference, and this was a dlopen-loaded
529 object. We can unmap it. */
ba79d61b 530
1f0c4a10 531 /* Remove the object from the dtv slotinfo array if it uses TLS. */
a04586d8
UD
532 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
533 {
a04586d8 534 any_tls = true;
bb4cb252 535
9dcafc55
UD
536 if (GL(dl_tls_dtv_slotinfo_list) != NULL
537 && ! remove_slotinfo (imap->l_tls_modid,
538 GL(dl_tls_dtv_slotinfo_list), 0,
539 imap->l_init_called))
fc093be1
UD
540 /* All dynamically loaded modules with TLS are unloaded. */
541 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
c877418f 542
4c533566
UD
543 if (imap->l_tls_offset != NO_TLS_OFFSET
544 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
c877418f
RM
545 {
546 /* Collect a contiguous chunk built from the objects in
547 this search list, going in either direction. When the
548 whole chunk is at the end of the used area then we can
549 reclaim it. */
11bf311e 550#if TLS_TCB_AT_TP
541765b6
UD
551 if (tls_free_start == NO_TLS_OFFSET
552 || (size_t) imap->l_tls_offset == tls_free_start)
553 {
554 /* Extend the contiguous chunk being reclaimed. */
555 tls_free_start
556 = imap->l_tls_offset - imap->l_tls_blocksize;
557
558 if (tls_free_end == NO_TLS_OFFSET)
559 tls_free_end = imap->l_tls_offset;
560 }
561 else if (imap->l_tls_offset - imap->l_tls_blocksize
562 == tls_free_end)
563 /* Extend the chunk backwards. */
564 tls_free_end = imap->l_tls_offset;
565 else
566 {
567 /* This isn't contiguous with the last chunk freed.
568 One of them will be leaked unless we can free
569 one block right away. */
570 if (tls_free_end == GL(dl_tls_static_used))
571 {
572 GL(dl_tls_static_used) = tls_free_start;
573 tls_free_end = imap->l_tls_offset;
574 tls_free_start
575 = tls_free_end - imap->l_tls_blocksize;
576 }
577 else if ((size_t) imap->l_tls_offset
578 == GL(dl_tls_static_used))
579 GL(dl_tls_static_used)
580 = imap->l_tls_offset - imap->l_tls_blocksize;
581 else if (tls_free_end < (size_t) imap->l_tls_offset)
582 {
583 /* We pick the later block. It has a chance to
584 be freed. */
585 tls_free_end = imap->l_tls_offset;
586 tls_free_start
587 = tls_free_end - imap->l_tls_blocksize;
588 }
589 }
11bf311e 590#elif TLS_DTV_AT_TP
66bdbaa4
AM
591 if (tls_free_start == NO_TLS_OFFSET)
592 {
593 tls_free_start = imap->l_tls_firstbyte_offset;
594 tls_free_end = (imap->l_tls_offset
595 + imap->l_tls_blocksize);
596 }
597 else if (imap->l_tls_firstbyte_offset == tls_free_end)
c877418f 598 /* Extend the contiguous chunk being reclaimed. */
66bdbaa4 599 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
c877418f
RM
600 else if (imap->l_tls_offset + imap->l_tls_blocksize
601 == tls_free_start)
602 /* Extend the chunk backwards. */
66bdbaa4
AM
603 tls_free_start = imap->l_tls_firstbyte_offset;
604 /* This isn't contiguous with the last chunk freed.
605 One of them will be leaked unless we can free
606 one block right away. */
607 else if (imap->l_tls_offset + imap->l_tls_blocksize
608 == GL(dl_tls_static_used))
609 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
610 else if (tls_free_end == GL(dl_tls_static_used))
c877418f 611 {
66bdbaa4
AM
612 GL(dl_tls_static_used) = tls_free_start;
613 tls_free_start = imap->l_tls_firstbyte_offset;
614 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
615 }
616 else if (tls_free_end < imap->l_tls_firstbyte_offset)
617 {
618 /* We pick the later block. It has a chance to
619 be freed. */
620 tls_free_start = imap->l_tls_firstbyte_offset;
621 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
c877418f 622 }
11bf311e
UD
623#else
624# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
625#endif
c877418f 626 }
a04586d8 627 }
a04586d8 628
a8a1269d 629 /* We can unmap all the maps at once. We determined the
4ce636da
UD
630 start address and length when we loaded the object and
631 the `munmap' call does the rest. */
09bf6406 632 DL_UNMAP (imap);
22bc7978 633
ba79d61b 634 /* Finally, unlink the data structure and free it. */
7afab53d 635 if (imap->l_prev != NULL)
af69217f 636 imap->l_prev->l_next = imap->l_next;
7afab53d 637 else
c0f62c56
UD
638 {
639#ifdef SHARED
2e81d449 640 assert (nsid != LM_ID_BASE);
7afab53d 641#endif
2e81d449 642 ns->_ns_loaded = imap->l_next;
c0f62c56
UD
643 }
644
2e81d449 645 --ns->_ns_nloaded;
c0f62c56 646 if (imap->l_next != NULL)
af69217f 647 imap->l_next->l_prev = imap->l_prev;
a8a1269d 648
556224ab
UD
649 free (imap->l_versions);
650 if (imap->l_origin != (char *) -1)
a8a1269d
UD
651 free ((char *) imap->l_origin);
652
20fe49b9 653 free (imap->l_reldeps);
4b4fcf99 654
ac53c9c6
UD
655 /* Print debugging message. */
656 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
657 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
658 imap->l_name, imap->l_ns);
659
4ce636da 660 /* This name always is allocated. */
a8a1269d 661 free (imap->l_name);
4ce636da 662 /* Remove the list with all the names of the shared object. */
20fe49b9
UD
663
664 struct libname_list *lnp = imap->l_libname;
a8a1269d
UD
665 do
666 {
76156ea1 667 struct libname_list *this = lnp;
a8a1269d 668 lnp = lnp->next;
11810621
UD
669 if (!this->dont_free)
670 free (this);
a8a1269d
UD
671 }
672 while (lnp != NULL);
a8a1269d 673
4ce636da 674 /* Remove the searchlists. */
20fe49b9 675 free (imap->l_initfini);
4ce636da 676
5a21d307 677 /* Remove the scope array if we allocated it. */
c0a777e8
UD
678 if (imap->l_scope != imap->l_scope_mem)
679 free (imap->l_scope);
5a21d307 680
7bcaca43 681 if (imap->l_phdr_allocated)
15925412 682 free ((void *) imap->l_phdr);
7bcaca43 683
f55727ca
UD
684 if (imap->l_rpath_dirs.dirs != (void *) -1)
685 free (imap->l_rpath_dirs.dirs);
686 if (imap->l_runpath_dirs.dirs != (void *) -1)
687 free (imap->l_runpath_dirs.dirs);
688
af69217f 689 free (imap);
ba79d61b
RM
690 }
691 }
692
5a2a1d75
AS
693 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
694
c877418f 695 /* If we removed any object which uses TLS bump the generation counter. */
bb4cb252 696 if (any_tls)
c877418f
RM
697 {
698 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
9dcafc55 699 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
c877418f
RM
700
701 if (tls_free_end == GL(dl_tls_static_used))
702 GL(dl_tls_static_used) = tls_free_start;
703 }
a04586d8 704
9dcafc55
UD
705#ifdef SHARED
706 /* Auditing checkpoint: we have deleted all objects. */
29f97654 707 if (__builtin_expect (do_audit, 0))
9dcafc55 708 {
2e81d449 709 struct link_map *head = ns->_ns_loaded;
9dcafc55
UD
710 /* Do not call the functions for any auditing object. */
711 if (head->l_auditing == 0)
712 {
713 struct audit_ifaces *afct = GLRO(dl_audit);
714 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
715 {
716 if (afct->activity != NULL)
717 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
718
719 afct = afct->next;
720 }
721 }
722 }
723#endif
724
22c83193
UD
725 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
726 && nsid == GL(dl_nns) - 1)
727 do
728 {
729 --GL(dl_nns);
730#ifndef SHARED
731 if (GL(dl_nns) == 0)
732 break;
733#endif
734 }
735 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
736
e3e5f672 737 /* Notify the debugger those objects are finalized and gone. */
9dcafc55
UD
738 r->r_state = RT_CONSISTENT;
739 _dl_debug_state ();
4b4fcf99 740
bfc832cc 741 /* Recheck if we need to retry, release the lock. */
20fe49b9 742 out:
bfc832cc
UD
743 if (dl_close_state == rerun)
744 goto retry;
745
746 dl_close_state = not_pending;
131c4428
UD
747}
748
749
750void
751_dl_close (void *_map)
752{
753 struct link_map *map = _map;
754
755 /* First see whether we can remove the object at all. */
756 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0))
757 {
758 assert (map->l_init_called);
759 /* Nope. Do nothing. */
760 return;
761 }
762
763 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
764 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
765
766 /* Acquire the lock. */
767 __rtld_lock_lock_recursive (GL(dl_load_lock));
768
769 _dl_close_worker (map);
770
d3c9f895 771 __rtld_lock_unlock_recursive (GL(dl_load_lock));
e3e5f672 772}