]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-close.c
Use glibc_likely instead __builtin_expect.
[thirdparty/glibc.git] / elf / dl-close.c
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2014 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stddef.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <bits/libc-lock.h>
29 #include <ldsodefs.h>
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
35
36
37 /* Type of the constructor functions. */
38 typedef void (*fini_t) (void);
39
40
41 /* Special l_idx value used to indicate which objects remain loaded. */
42 #define IDX_STILL_USED -1
43
44
45 /* Returns true we an non-empty was found. */
46 static bool
47 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
48 bool should_be_there)
49 {
50 if (idx - disp >= listp->len)
51 {
52 if (listp->next == NULL)
53 {
54 /* The index is not actually valid in the slotinfo list,
55 because this object was closed before it was fully set
56 up due to some error. */
57 assert (! should_be_there);
58 }
59 else
60 {
61 if (remove_slotinfo (idx, listp->next, disp + listp->len,
62 should_be_there))
63 return true;
64
65 /* No non-empty entry. Search from the end of this element's
66 slotinfo array. */
67 idx = disp + listp->len;
68 }
69 }
70 else
71 {
72 struct link_map *old_map = listp->slotinfo[idx - disp].map;
73
74 /* The entry might still be in its unused state if we are closing an
75 object that wasn't fully set up. */
76 if (__glibc_likely (old_map != NULL))
77 {
78 assert (old_map->l_tls_modid == idx);
79
80 /* Mark the entry as unused. */
81 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
82 listp->slotinfo[idx - disp].map = NULL;
83 }
84
85 /* If this is not the last currently used entry no need to look
86 further. */
87 if (idx != GL(dl_tls_max_dtv_idx))
88 return true;
89 }
90
91 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
92 {
93 --idx;
94
95 if (listp->slotinfo[idx - disp].map != NULL)
96 {
97 /* Found a new last used index. */
98 GL(dl_tls_max_dtv_idx) = idx;
99 return true;
100 }
101 }
102
103 /* No non-entry in this list element. */
104 return false;
105 }
106
107
108 void
109 _dl_close_worker (struct link_map *map)
110 {
111 /* One less direct use. */
112 --map->l_direct_opencount;
113
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
116 again and return. */
117 static enum { not_pending, pending, rerun } dl_close_state;
118
119 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
120 || dl_close_state != not_pending)
121 {
122 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
123 dl_close_state = rerun;
124
125 /* There are still references to this object. Do nothing more. */
126 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
127 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
128 map->l_name, map->l_direct_opencount);
129
130 return;
131 }
132
133 Lmid_t nsid = map->l_ns;
134 struct link_namespaces *ns = &GL(dl_ns)[nsid];
135
136 retry:
137 dl_close_state = pending;
138
139 bool any_tls = false;
140 const unsigned int nloaded = ns->_ns_nloaded;
141 char used[nloaded];
142 char done[nloaded];
143 struct link_map *maps[nloaded];
144
145 /* Run over the list and assign indexes to the link maps and enter
146 them into the MAPS array. */
147 int idx = 0;
148 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
149 {
150 l->l_idx = idx;
151 maps[idx] = l;
152 ++idx;
153 }
154 assert (idx == nloaded);
155
156 /* Prepare the bitmaps. */
157 memset (used, '\0', sizeof (used));
158 memset (done, '\0', sizeof (done));
159
160 /* Keep track of the lowest index link map we have covered already. */
161 int done_index = -1;
162 while (++done_index < nloaded)
163 {
164 struct link_map *l = maps[done_index];
165
166 if (done[done_index])
167 /* Already handled. */
168 continue;
169
170 /* Check whether this object is still used. */
171 if (l->l_type == lt_loaded
172 && l->l_direct_opencount == 0
173 && (l->l_flags_1 & DF_1_NODELETE) == 0
174 && !used[done_index])
175 continue;
176
177 /* We need this object and we handle it now. */
178 done[done_index] = 1;
179 used[done_index] = 1;
180 /* Signal the object is still needed. */
181 l->l_idx = IDX_STILL_USED;
182
183 /* Mark all dependencies as used. */
184 if (l->l_initfini != NULL)
185 {
186 /* We are always the zeroth entry, and since we don't include
187 ourselves in the dependency analysis start at 1. */
188 struct link_map **lp = &l->l_initfini[1];
189 while (*lp != NULL)
190 {
191 if ((*lp)->l_idx != IDX_STILL_USED)
192 {
193 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
194
195 if (!used[(*lp)->l_idx])
196 {
197 used[(*lp)->l_idx] = 1;
198 /* If we marked a new object as used, and we've
199 already processed it, then we need to go back
200 and process again from that point forward to
201 ensure we keep all of its dependencies also. */
202 if ((*lp)->l_idx - 1 < done_index)
203 done_index = (*lp)->l_idx - 1;
204 }
205 }
206
207 ++lp;
208 }
209 }
210 /* And the same for relocation dependencies. */
211 if (l->l_reldeps != NULL)
212 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
213 {
214 struct link_map *jmap = l->l_reldeps->list[j];
215
216 if (jmap->l_idx != IDX_STILL_USED)
217 {
218 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
219
220 if (!used[jmap->l_idx])
221 {
222 used[jmap->l_idx] = 1;
223 if (jmap->l_idx - 1 < done_index)
224 done_index = jmap->l_idx - 1;
225 }
226 }
227 }
228 }
229
230 /* Sort the entries. */
231 _dl_sort_fini (maps, nloaded, used, nsid);
232
233 /* Call all termination functions at once. */
234 #ifdef SHARED
235 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
236 #endif
237 bool unload_any = false;
238 bool scope_mem_left = false;
239 unsigned int unload_global = 0;
240 unsigned int first_loaded = ~0;
241 for (unsigned int i = 0; i < nloaded; ++i)
242 {
243 struct link_map *imap = maps[i];
244
245 /* All elements must be in the same namespace. */
246 assert (imap->l_ns == nsid);
247
248 if (!used[i])
249 {
250 assert (imap->l_type == lt_loaded
251 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
252
253 /* Call its termination function. Do not do it for
254 half-cooked objects. */
255 if (imap->l_init_called)
256 {
257 /* When debugging print a message first. */
258 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
259 0))
260 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
261 imap->l_name, nsid);
262
263 if (imap->l_info[DT_FINI_ARRAY] != NULL)
264 {
265 ElfW(Addr) *array =
266 (ElfW(Addr) *) (imap->l_addr
267 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
268 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
269 / sizeof (ElfW(Addr)));
270
271 while (sz-- > 0)
272 ((fini_t) array[sz]) ();
273 }
274
275 /* Next try the old-style destructor. */
276 if (imap->l_info[DT_FINI] != NULL)
277 DL_CALL_DT_FINI (imap, ((void *) imap->l_addr
278 + imap->l_info[DT_FINI]->d_un.d_ptr));
279 }
280
281 #ifdef SHARED
282 /* Auditing checkpoint: we remove an object. */
283 if (__glibc_unlikely (do_audit))
284 {
285 struct audit_ifaces *afct = GLRO(dl_audit);
286 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
287 {
288 if (afct->objclose != NULL)
289 /* Return value is ignored. */
290 (void) afct->objclose (&imap->l_audit[cnt].cookie);
291
292 afct = afct->next;
293 }
294 }
295 #endif
296
297 /* This object must not be used anymore. */
298 imap->l_removed = 1;
299
300 /* We indeed have an object to remove. */
301 unload_any = true;
302
303 if (imap->l_global)
304 ++unload_global;
305
306 /* Remember where the first dynamically loaded object is. */
307 if (i < first_loaded)
308 first_loaded = i;
309 }
310 /* Else used[i]. */
311 else if (imap->l_type == lt_loaded)
312 {
313 struct r_scope_elem *new_list = NULL;
314
315 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
316 {
317 /* The object is still used. But one of the objects we are
318 unloading right now is responsible for loading it. If
319 the current object does not have it's own scope yet we
320 have to create one. This has to be done before running
321 the finalizers.
322
323 To do this count the number of dependencies. */
324 unsigned int cnt;
325 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
326 ;
327
328 /* We simply reuse the l_initfini list. */
329 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
330 imap->l_searchlist.r_nlist = cnt;
331
332 new_list = &imap->l_searchlist;
333 }
334
335 /* Count the number of scopes which remain after the unload.
336 When we add the local search list count it. Always add
337 one for the terminating NULL pointer. */
338 size_t remain = (new_list != NULL) + 1;
339 bool removed_any = false;
340 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
341 /* This relies on l_scope[] entries being always set either
342 to its own l_symbolic_searchlist address, or some map's
343 l_searchlist address. */
344 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
345 {
346 struct link_map *tmap = (struct link_map *)
347 ((char *) imap->l_scope[cnt]
348 - offsetof (struct link_map, l_searchlist));
349 assert (tmap->l_ns == nsid);
350 if (tmap->l_idx == IDX_STILL_USED)
351 ++remain;
352 else
353 removed_any = true;
354 }
355 else
356 ++remain;
357
358 if (removed_any)
359 {
360 /* Always allocate a new array for the scope. This is
361 necessary since we must be able to determine the last
362 user of the current array. If possible use the link map's
363 memory. */
364 size_t new_size;
365 struct r_scope_elem **newp;
366
367 #define SCOPE_ELEMS(imap) \
368 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
369
370 if (imap->l_scope != imap->l_scope_mem
371 && remain < SCOPE_ELEMS (imap))
372 {
373 new_size = SCOPE_ELEMS (imap);
374 newp = imap->l_scope_mem;
375 }
376 else
377 {
378 new_size = imap->l_scope_max;
379 newp = (struct r_scope_elem **)
380 malloc (new_size * sizeof (struct r_scope_elem *));
381 if (newp == NULL)
382 _dl_signal_error (ENOMEM, "dlclose", NULL,
383 N_("cannot create scope list"));
384 }
385
386 /* Copy over the remaining scope elements. */
387 remain = 0;
388 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
389 {
390 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
391 {
392 struct link_map *tmap = (struct link_map *)
393 ((char *) imap->l_scope[cnt]
394 - offsetof (struct link_map, l_searchlist));
395 if (tmap->l_idx != IDX_STILL_USED)
396 {
397 /* Remove the scope. Or replace with own map's
398 scope. */
399 if (new_list != NULL)
400 {
401 newp[remain++] = new_list;
402 new_list = NULL;
403 }
404 continue;
405 }
406 }
407
408 newp[remain++] = imap->l_scope[cnt];
409 }
410 newp[remain] = NULL;
411
412 struct r_scope_elem **old = imap->l_scope;
413
414 imap->l_scope = newp;
415
416 /* No user anymore, we can free it now. */
417 if (old != imap->l_scope_mem)
418 {
419 if (_dl_scope_free (old))
420 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
421 no need to repeat it. */
422 scope_mem_left = false;
423 }
424 else
425 scope_mem_left = true;
426
427 imap->l_scope_max = new_size;
428 }
429 else if (new_list != NULL)
430 {
431 /* We didn't change the scope array, so reset the search
432 list. */
433 imap->l_searchlist.r_list = NULL;
434 imap->l_searchlist.r_nlist = 0;
435 }
436
437 /* The loader is gone, so mark the object as not having one.
438 Note: l_idx != IDX_STILL_USED -> object will be removed. */
439 if (imap->l_loader != NULL
440 && imap->l_loader->l_idx != IDX_STILL_USED)
441 imap->l_loader = NULL;
442
443 /* Remember where the first dynamically loaded object is. */
444 if (i < first_loaded)
445 first_loaded = i;
446 }
447 }
448
449 /* If there are no objects to unload, do nothing further. */
450 if (!unload_any)
451 goto out;
452
453 #ifdef SHARED
454 /* Auditing checkpoint: we will start deleting objects. */
455 if (__glibc_unlikely (do_audit))
456 {
457 struct link_map *head = ns->_ns_loaded;
458 struct audit_ifaces *afct = GLRO(dl_audit);
459 /* Do not call the functions for any auditing object. */
460 if (head->l_auditing == 0)
461 {
462 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
463 {
464 if (afct->activity != NULL)
465 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
466
467 afct = afct->next;
468 }
469 }
470 }
471 #endif
472
473 /* Notify the debugger we are about to remove some loaded objects. */
474 struct r_debug *r = _dl_debug_initialize (0, nsid);
475 r->r_state = RT_DELETE;
476 _dl_debug_state ();
477 LIBC_PROBE (unmap_start, 2, nsid, r);
478
479 if (unload_global)
480 {
481 /* Some objects are in the global scope list. Remove them. */
482 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
483 unsigned int i;
484 unsigned int j = 0;
485 unsigned int cnt = ns_msl->r_nlist;
486
487 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
488 --cnt;
489
490 if (cnt + unload_global == ns_msl->r_nlist)
491 /* Speed up removing most recently added objects. */
492 j = cnt;
493 else
494 for (i = 0; i < cnt; i++)
495 if (ns_msl->r_list[i]->l_removed == 0)
496 {
497 if (i != j)
498 ns_msl->r_list[j] = ns_msl->r_list[i];
499 j++;
500 }
501 ns_msl->r_nlist = j;
502 }
503
504 if (!RTLD_SINGLE_THREAD_P
505 && (unload_global
506 || scope_mem_left
507 || (GL(dl_scope_free_list) != NULL
508 && GL(dl_scope_free_list)->count)))
509 {
510 THREAD_GSCOPE_WAIT ();
511
512 /* Now we can free any queued old scopes. */
513 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
514 if (fsl != NULL)
515 while (fsl->count > 0)
516 free (fsl->list[--fsl->count]);
517 }
518
519 size_t tls_free_start;
520 size_t tls_free_end;
521 tls_free_start = tls_free_end = NO_TLS_OFFSET;
522
523 /* We modify the list of loaded objects. */
524 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
525
526 /* Check each element of the search list to see if all references to
527 it are gone. */
528 for (unsigned int i = first_loaded; i < nloaded; ++i)
529 {
530 struct link_map *imap = maps[i];
531 if (!used[i])
532 {
533 assert (imap->l_type == lt_loaded);
534
535 /* That was the last reference, and this was a dlopen-loaded
536 object. We can unmap it. */
537
538 /* Remove the object from the dtv slotinfo array if it uses TLS. */
539 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
540 {
541 any_tls = true;
542
543 if (GL(dl_tls_dtv_slotinfo_list) != NULL
544 && ! remove_slotinfo (imap->l_tls_modid,
545 GL(dl_tls_dtv_slotinfo_list), 0,
546 imap->l_init_called))
547 /* All dynamically loaded modules with TLS are unloaded. */
548 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
549
550 if (imap->l_tls_offset != NO_TLS_OFFSET
551 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
552 {
553 /* Collect a contiguous chunk built from the objects in
554 this search list, going in either direction. When the
555 whole chunk is at the end of the used area then we can
556 reclaim it. */
557 #if TLS_TCB_AT_TP
558 if (tls_free_start == NO_TLS_OFFSET
559 || (size_t) imap->l_tls_offset == tls_free_start)
560 {
561 /* Extend the contiguous chunk being reclaimed. */
562 tls_free_start
563 = imap->l_tls_offset - imap->l_tls_blocksize;
564
565 if (tls_free_end == NO_TLS_OFFSET)
566 tls_free_end = imap->l_tls_offset;
567 }
568 else if (imap->l_tls_offset - imap->l_tls_blocksize
569 == tls_free_end)
570 /* Extend the chunk backwards. */
571 tls_free_end = imap->l_tls_offset;
572 else
573 {
574 /* This isn't contiguous with the last chunk freed.
575 One of them will be leaked unless we can free
576 one block right away. */
577 if (tls_free_end == GL(dl_tls_static_used))
578 {
579 GL(dl_tls_static_used) = tls_free_start;
580 tls_free_end = imap->l_tls_offset;
581 tls_free_start
582 = tls_free_end - imap->l_tls_blocksize;
583 }
584 else if ((size_t) imap->l_tls_offset
585 == GL(dl_tls_static_used))
586 GL(dl_tls_static_used)
587 = imap->l_tls_offset - imap->l_tls_blocksize;
588 else if (tls_free_end < (size_t) imap->l_tls_offset)
589 {
590 /* We pick the later block. It has a chance to
591 be freed. */
592 tls_free_end = imap->l_tls_offset;
593 tls_free_start
594 = tls_free_end - imap->l_tls_blocksize;
595 }
596 }
597 #elif TLS_DTV_AT_TP
598 if (tls_free_start == NO_TLS_OFFSET)
599 {
600 tls_free_start = imap->l_tls_firstbyte_offset;
601 tls_free_end = (imap->l_tls_offset
602 + imap->l_tls_blocksize);
603 }
604 else if (imap->l_tls_firstbyte_offset == tls_free_end)
605 /* Extend the contiguous chunk being reclaimed. */
606 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
607 else if (imap->l_tls_offset + imap->l_tls_blocksize
608 == tls_free_start)
609 /* Extend the chunk backwards. */
610 tls_free_start = imap->l_tls_firstbyte_offset;
611 /* This isn't contiguous with the last chunk freed.
612 One of them will be leaked unless we can free
613 one block right away. */
614 else if (imap->l_tls_offset + imap->l_tls_blocksize
615 == GL(dl_tls_static_used))
616 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
617 else if (tls_free_end == GL(dl_tls_static_used))
618 {
619 GL(dl_tls_static_used) = tls_free_start;
620 tls_free_start = imap->l_tls_firstbyte_offset;
621 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
622 }
623 else if (tls_free_end < imap->l_tls_firstbyte_offset)
624 {
625 /* We pick the later block. It has a chance to
626 be freed. */
627 tls_free_start = imap->l_tls_firstbyte_offset;
628 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
629 }
630 #else
631 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
632 #endif
633 }
634 }
635
636 /* We can unmap all the maps at once. We determined the
637 start address and length when we loaded the object and
638 the `munmap' call does the rest. */
639 DL_UNMAP (imap);
640
641 /* Finally, unlink the data structure and free it. */
642 if (imap->l_prev != NULL)
643 imap->l_prev->l_next = imap->l_next;
644 else
645 {
646 assert (nsid != LM_ID_BASE);
647 ns->_ns_loaded = imap->l_next;
648
649 /* Update the pointer to the head of the list
650 we leave for debuggers to examine. */
651 r->r_map = (void *) ns->_ns_loaded;
652 }
653
654 --ns->_ns_nloaded;
655 if (imap->l_next != NULL)
656 imap->l_next->l_prev = imap->l_prev;
657
658 free (imap->l_versions);
659 if (imap->l_origin != (char *) -1)
660 free ((char *) imap->l_origin);
661
662 free (imap->l_reldeps);
663
664 /* Print debugging message. */
665 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
666 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
667 imap->l_name, imap->l_ns);
668
669 /* This name always is allocated. */
670 free (imap->l_name);
671 /* Remove the list with all the names of the shared object. */
672
673 struct libname_list *lnp = imap->l_libname;
674 do
675 {
676 struct libname_list *this = lnp;
677 lnp = lnp->next;
678 if (!this->dont_free)
679 free (this);
680 }
681 while (lnp != NULL);
682
683 /* Remove the searchlists. */
684 free (imap->l_initfini);
685
686 /* Remove the scope array if we allocated it. */
687 if (imap->l_scope != imap->l_scope_mem)
688 free (imap->l_scope);
689
690 if (imap->l_phdr_allocated)
691 free ((void *) imap->l_phdr);
692
693 if (imap->l_rpath_dirs.dirs != (void *) -1)
694 free (imap->l_rpath_dirs.dirs);
695 if (imap->l_runpath_dirs.dirs != (void *) -1)
696 free (imap->l_runpath_dirs.dirs);
697
698 free (imap);
699 }
700 }
701
702 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
703
704 /* If we removed any object which uses TLS bump the generation counter. */
705 if (any_tls)
706 {
707 if (__glibc_unlikely (++GL(dl_tls_generation) == 0))
708 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
709
710 if (tls_free_end == GL(dl_tls_static_used))
711 GL(dl_tls_static_used) = tls_free_start;
712 }
713
714 #ifdef SHARED
715 /* Auditing checkpoint: we have deleted all objects. */
716 if (__glibc_unlikely (do_audit))
717 {
718 struct link_map *head = ns->_ns_loaded;
719 /* Do not call the functions for any auditing object. */
720 if (head->l_auditing == 0)
721 {
722 struct audit_ifaces *afct = GLRO(dl_audit);
723 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
724 {
725 if (afct->activity != NULL)
726 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
727
728 afct = afct->next;
729 }
730 }
731 }
732 #endif
733
734 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
735 && nsid == GL(dl_nns) - 1)
736 do
737 --GL(dl_nns);
738 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
739
740 /* Notify the debugger those objects are finalized and gone. */
741 r->r_state = RT_CONSISTENT;
742 _dl_debug_state ();
743 LIBC_PROBE (unmap_complete, 2, nsid, r);
744
745 /* Recheck if we need to retry, release the lock. */
746 out:
747 if (dl_close_state == rerun)
748 goto retry;
749
750 dl_close_state = not_pending;
751 }
752
753
754 void
755 _dl_close (void *_map)
756 {
757 struct link_map *map = _map;
758
759 /* First see whether we can remove the object at all. */
760 if (__glibc_unlikely (map->l_flags_1 & DF_1_NODELETE))
761 {
762 assert (map->l_init_called);
763 /* Nope. Do nothing. */
764 return;
765 }
766
767 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
768 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
769
770 /* Acquire the lock. */
771 __rtld_lock_lock_recursive (GL(dl_load_lock));
772
773 _dl_close_worker (map);
774
775 __rtld_lock_unlock_recursive (GL(dl_load_lock));
776 }