1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2025 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
28 #include <libc-lock.h>
30 #include <sys/types.h>
32 #include <sysdep-cancel.h>
34 #include <stap-probe.h>
35 #include <dl-find_object.h>
37 #include <dl-unmap-segments.h>
39 /* Special l_idx value used to indicate which objects remain loaded. */
40 #define IDX_STILL_USED -1
43 /* Returns true we an non-empty was found. */
45 remove_slotinfo (size_t idx
, struct dtv_slotinfo_list
*listp
, size_t disp
,
48 if (idx
- disp
>= listp
->len
)
50 if (listp
->next
== NULL
)
52 /* The index is not actually valid in the slotinfo list,
53 because this object was closed before it was fully set
54 up due to some error. */
55 assert (! should_be_there
);
59 if (remove_slotinfo (idx
, listp
->next
, disp
+ listp
->len
,
63 /* No non-empty entry. Search from the end of this element's
65 idx
= disp
+ listp
->len
;
70 struct link_map
*old_map
= listp
->slotinfo
[idx
- disp
].map
;
72 /* The entry might still be in its unused state if we are closing an
73 object that wasn't fully set up. */
74 if (__glibc_likely (old_map
!= NULL
))
76 /* Mark the entry as unused. These can be read concurrently. */
77 atomic_store_relaxed (&listp
->slotinfo
[idx
- disp
].gen
,
78 GL(dl_tls_generation
) + 1);
79 atomic_store_relaxed (&listp
->slotinfo
[idx
- disp
].map
, NULL
);
82 /* If this is not the last currently used entry no need to look
84 if (idx
!= GL(dl_tls_max_dtv_idx
))
86 /* There is an unused dtv entry in the middle. */
87 GL(dl_tls_dtv_gaps
) = true;
92 while (idx
- disp
> (disp
== 0 ? 1 + GL(dl_tls_static_nelem
) : 0))
96 if (listp
->slotinfo
[idx
- disp
].map
!= NULL
)
98 /* Found a new last used index. This can be read concurrently. */
99 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx
), idx
);
104 /* No non-entry in this list element. */
109 _dl_close_worker (struct link_map
*map
, bool force
)
111 /* One less direct use. */
112 --map
->l_direct_opencount
;
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
117 static enum { not_pending
, pending
, rerun
} dl_close_state
;
119 if (map
->l_direct_opencount
> 0 || map
->l_type
!= lt_loaded
120 || dl_close_state
!= not_pending
)
122 if (map
->l_direct_opencount
== 0 && map
->l_type
== lt_loaded
)
123 dl_close_state
= rerun
;
125 /* There are still references to this object. Do nothing more. */
126 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
))
127 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
128 map
->l_name
, map
->l_direct_opencount
);
133 Lmid_t nsid
= map
->l_ns
;
134 struct link_namespaces
*ns
= &GL(dl_ns
)[nsid
];
137 dl_close_state
= pending
;
139 bool any_tls
= false;
140 const unsigned int nloaded
= ns
->_ns_nloaded
;
141 struct link_map
*maps
[nloaded
];
143 /* Run over the list and assign indexes to the link maps and enter
144 them into the MAPS array. */
146 for (struct link_map
*l
= ns
->_ns_loaded
; l
!= NULL
; l
= l
->l_next
)
154 assert (idx
== nloaded
);
156 /* Put the dlclose'd map first, so that its destructor runs first.
157 The map variable is NULL after a retry. */
160 maps
[map
->l_idx
] = maps
[0];
161 maps
[map
->l_idx
]->l_idx
= map
->l_idx
;
166 /* Keep track of the lowest index link map we have covered already. */
168 while (++done_index
< nloaded
)
170 struct link_map
*l
= maps
[done_index
];
173 /* Already handled. */
176 /* Check whether this object is still used. */
177 if (l
->l_type
== lt_loaded
178 && l
->l_direct_opencount
== 0
179 && !l
->l_nodelete_active
180 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
181 acquire is sufficient and correct. */
182 && atomic_load_acquire (&l
->l_tls_dtor_count
) == 0
186 /* We need this object and we handle it now. */
189 /* Signal the object is still needed. */
190 l
->l_idx
= IDX_STILL_USED
;
192 /* Mark all dependencies as used. */
193 if (l
->l_initfini
!= NULL
)
195 /* We are always the zeroth entry, and since we don't include
196 ourselves in the dependency analysis start at 1. */
197 struct link_map
**lp
= &l
->l_initfini
[1];
200 if ((*lp
)->l_idx
!= IDX_STILL_USED
)
202 assert ((*lp
)->l_idx
>= 0 && (*lp
)->l_idx
< nloaded
);
204 if (!(*lp
)->l_map_used
)
206 (*lp
)->l_map_used
= 1;
207 /* If we marked a new object as used, and we've
208 already processed it, then we need to go back
209 and process again from that point forward to
210 ensure we keep all of its dependencies also. */
211 if ((*lp
)->l_idx
- 1 < done_index
)
212 done_index
= (*lp
)->l_idx
- 1;
219 /* And the same for relocation dependencies. */
220 if (l
->l_reldeps
!= NULL
)
221 for (unsigned int j
= 0; j
< l
->l_reldeps
->act
; ++j
)
223 struct link_map
*jmap
= l
->l_reldeps
->list
[j
];
225 if (jmap
->l_idx
!= IDX_STILL_USED
)
227 assert (jmap
->l_idx
>= 0 && jmap
->l_idx
< nloaded
);
229 if (!jmap
->l_map_used
)
231 jmap
->l_map_used
= 1;
232 if (jmap
->l_idx
- 1 < done_index
)
233 done_index
= jmap
->l_idx
- 1;
239 /* Sort the entries. Unless retrying, the maps[0] object (the
240 original argument to dlclose) needs to remain first, so that its
241 destructor runs first. */
242 _dl_sort_maps (maps
, nloaded
, /* force_first */ map
!= NULL
, true);
244 /* Call all termination functions at once. */
245 bool unload_any
= false;
246 bool scope_mem_left
= false;
247 unsigned int unload_global
= 0;
248 unsigned int first_loaded
= ~0;
249 for (unsigned int i
= 0; i
< nloaded
; ++i
)
251 struct link_map
*imap
= maps
[i
];
253 /* All elements must be in the same namespace. */
254 assert (imap
->l_ns
== nsid
);
256 if (!imap
->l_map_used
)
258 assert (imap
->l_type
== lt_loaded
&& !imap
->l_nodelete_active
);
260 /* Call its termination function. Do not do it for
261 half-cooked objects. Temporarily disable exception
262 handling, so that errors are fatal. */
263 if (imap
->l_init_called
)
264 _dl_catch_exception (NULL
, _dl_call_fini
, imap
);
267 /* Auditing checkpoint: we will start deleting objects.
268 This is supposed to happen before la_objclose (see _dl_fini),
269 but only once per non-recursive dlclose call. */
271 _dl_audit_activity_nsid (nsid
, LA_ACT_DELETE
);
273 /* Auditing checkpoint: we remove an object. */
274 _dl_audit_objclose (imap
);
277 /* This object must not be used anymore. */
280 /* We indeed have an object to remove. */
286 /* Remember where the first dynamically loaded object is. */
287 if (i
< first_loaded
)
290 /* Else imap->l_map_used. */
291 else if (imap
->l_type
== lt_loaded
)
293 struct r_scope_elem
*new_list
= NULL
;
295 if (imap
->l_searchlist
.r_list
== NULL
&& imap
->l_initfini
!= NULL
)
297 /* The object is still used. But one of the objects we are
298 unloading right now is responsible for loading it. If
299 the current object does not have it's own scope yet we
300 have to create one. This has to be done before running
303 To do this count the number of dependencies. */
305 for (cnt
= 1; imap
->l_initfini
[cnt
] != NULL
; ++cnt
)
308 /* We simply reuse the l_initfini list. */
309 imap
->l_searchlist
.r_list
= &imap
->l_initfini
[cnt
+ 1];
310 imap
->l_searchlist
.r_nlist
= cnt
;
312 new_list
= &imap
->l_searchlist
;
315 /* Count the number of scopes which remain after the unload.
316 When we add the local search list count it. Always add
317 one for the terminating NULL pointer. */
318 size_t remain
= (new_list
!= NULL
) + 1;
319 bool removed_any
= false;
320 for (size_t cnt
= 0; imap
->l_scope
[cnt
] != NULL
; ++cnt
)
321 /* This relies on l_scope[] entries being always set either
322 to its own l_symbolic_searchlist address, or some map's
323 l_searchlist address. */
324 if (imap
->l_scope
[cnt
] != &imap
->l_symbolic_searchlist
)
326 struct link_map
*tmap
= (struct link_map
*)
327 ((char *) imap
->l_scope
[cnt
]
328 - offsetof (struct link_map
, l_searchlist
));
329 assert (tmap
->l_ns
== nsid
);
330 if (tmap
->l_idx
== IDX_STILL_USED
)
340 /* Always allocate a new array for the scope. This is
341 necessary since we must be able to determine the last
342 user of the current array. If possible use the link map's
345 struct r_scope_elem
**newp
;
347 #define SCOPE_ELEMS(imap) \
348 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
350 if (imap
->l_scope
!= imap
->l_scope_mem
351 && remain
< SCOPE_ELEMS (imap
))
353 new_size
= SCOPE_ELEMS (imap
);
354 newp
= imap
->l_scope_mem
;
358 new_size
= imap
->l_scope_max
;
359 newp
= (struct r_scope_elem
**)
360 malloc (new_size
* sizeof (struct r_scope_elem
*));
362 _dl_signal_error (ENOMEM
, "dlclose", NULL
,
363 N_("cannot create scope list"));
366 /* Copy over the remaining scope elements. */
368 for (size_t cnt
= 0; imap
->l_scope
[cnt
] != NULL
; ++cnt
)
370 if (imap
->l_scope
[cnt
] != &imap
->l_symbolic_searchlist
)
372 struct link_map
*tmap
= (struct link_map
*)
373 ((char *) imap
->l_scope
[cnt
]
374 - offsetof (struct link_map
, l_searchlist
));
375 if (tmap
->l_idx
!= IDX_STILL_USED
)
377 /* Remove the scope. Or replace with own map's
379 if (new_list
!= NULL
)
381 newp
[remain
++] = new_list
;
388 newp
[remain
++] = imap
->l_scope
[cnt
];
392 struct r_scope_elem
**old
= imap
->l_scope
;
394 imap
->l_scope
= newp
;
396 /* No user anymore, we can free it now. */
397 if (old
!= imap
->l_scope_mem
)
399 if (_dl_scope_free (old
))
400 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
401 no need to repeat it. */
402 scope_mem_left
= false;
405 scope_mem_left
= true;
407 imap
->l_scope_max
= new_size
;
409 else if (new_list
!= NULL
)
411 /* We didn't change the scope array, so reset the search
413 imap
->l_searchlist
.r_list
= NULL
;
414 imap
->l_searchlist
.r_nlist
= 0;
417 /* The loader is gone, so mark the object as not having one.
418 Note: l_idx != IDX_STILL_USED -> object will be removed. */
419 if (imap
->l_loader
!= NULL
420 && imap
->l_loader
->l_idx
!= IDX_STILL_USED
)
421 imap
->l_loader
= NULL
;
423 /* Remember where the first dynamically loaded object is. */
424 if (i
< first_loaded
)
429 /* If there are no objects to unload, do nothing further. */
433 /* Notify the debugger we are about to remove some loaded objects.
434 LA_ACT_DELETE has already been signalled above for !unload_any. */
435 struct r_debug
*r
= _dl_debug_update (nsid
);
436 _dl_debug_change_state (r
, RT_DELETE
);
437 LIBC_PROBE (unmap_start
, 2, nsid
, r
);
441 /* Some objects are in the global scope list. Remove them. */
442 struct r_scope_elem
*ns_msl
= ns
->_ns_main_searchlist
;
445 unsigned int cnt
= ns_msl
->r_nlist
;
447 while (cnt
> 0 && ns_msl
->r_list
[cnt
- 1]->l_removed
)
450 if (cnt
+ unload_global
== ns_msl
->r_nlist
)
451 /* Speed up removing most recently added objects. */
454 for (i
= 0; i
< cnt
; i
++)
455 if (ns_msl
->r_list
[i
]->l_removed
== 0)
458 ns_msl
->r_list
[j
] = ns_msl
->r_list
[i
];
464 if (!RTLD_SINGLE_THREAD_P
467 || (GL(dl_scope_free_list
) != NULL
468 && GL(dl_scope_free_list
)->count
)))
470 THREAD_GSCOPE_WAIT ();
472 /* Now we can free any queued old scopes. */
473 struct dl_scope_free_list
*fsl
= GL(dl_scope_free_list
);
475 while (fsl
->count
> 0)
476 free (fsl
->list
[--fsl
->count
]);
479 size_t tls_free_start
;
481 tls_free_start
= tls_free_end
= NO_TLS_OFFSET
;
483 /* Protects global and module specitic TLS state. */
484 __rtld_lock_lock_recursive (GL(dl_load_tls_lock
));
486 /* We modify the list of loaded objects. */
487 __rtld_lock_lock_recursive (GL(dl_load_write_lock
));
489 /* Check each element of the search list to see if all references to
491 for (unsigned int i
= first_loaded
; i
< nloaded
; ++i
)
493 struct link_map
*imap
= maps
[i
];
494 if (!imap
->l_map_used
)
496 assert (imap
->l_type
== lt_loaded
);
498 /* That was the last reference, and this was a dlopen-loaded
499 object. We can unmap it. */
501 /* Remove the object from the dtv slotinfo array if it uses TLS. */
502 if (__glibc_unlikely (imap
->l_tls_blocksize
> 0))
506 if (GL(dl_tls_dtv_slotinfo_list
) != NULL
507 && ! remove_slotinfo (imap
->l_tls_modid
,
508 GL(dl_tls_dtv_slotinfo_list
), 0,
509 imap
->l_init_called
))
510 /* All dynamically loaded modules with TLS are unloaded. */
511 /* Can be read concurrently. */
512 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx
),
513 GL(dl_tls_static_nelem
));
515 if (imap
->l_tls_offset
!= NO_TLS_OFFSET
516 && imap
->l_tls_offset
!= FORCED_DYNAMIC_TLS_OFFSET
)
518 /* Collect a contiguous chunk built from the objects in
519 this search list, going in either direction. When the
520 whole chunk is at the end of the used area then we can
523 if (tls_free_start
== NO_TLS_OFFSET
524 || (size_t) imap
->l_tls_offset
== tls_free_start
)
526 /* Extend the contiguous chunk being reclaimed. */
528 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
530 if (tls_free_end
== NO_TLS_OFFSET
)
531 tls_free_end
= imap
->l_tls_offset
;
533 else if (imap
->l_tls_offset
- imap
->l_tls_blocksize
535 /* Extend the chunk backwards. */
536 tls_free_end
= imap
->l_tls_offset
;
539 /* This isn't contiguous with the last chunk freed.
540 One of them will be leaked unless we can free
541 one block right away. */
542 if (tls_free_end
== GL(dl_tls_static_used
))
544 GL(dl_tls_static_used
) = tls_free_start
;
545 tls_free_end
= imap
->l_tls_offset
;
547 = tls_free_end
- imap
->l_tls_blocksize
;
549 else if ((size_t) imap
->l_tls_offset
550 == GL(dl_tls_static_used
))
551 GL(dl_tls_static_used
)
552 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
553 else if (tls_free_end
< (size_t) imap
->l_tls_offset
)
555 /* We pick the later block. It has a chance to
557 tls_free_end
= imap
->l_tls_offset
;
559 = tls_free_end
- imap
->l_tls_blocksize
;
563 if (tls_free_start
== NO_TLS_OFFSET
)
565 tls_free_start
= imap
->l_tls_firstbyte_offset
;
566 tls_free_end
= (imap
->l_tls_offset
567 + imap
->l_tls_blocksize
);
569 else if (imap
->l_tls_firstbyte_offset
== tls_free_end
)
570 /* Extend the contiguous chunk being reclaimed. */
571 tls_free_end
= imap
->l_tls_offset
+ imap
->l_tls_blocksize
;
572 else if (imap
->l_tls_offset
+ imap
->l_tls_blocksize
574 /* Extend the chunk backwards. */
575 tls_free_start
= imap
->l_tls_firstbyte_offset
;
576 /* This isn't contiguous with the last chunk freed.
577 One of them will be leaked unless we can free
578 one block right away. */
579 else if (imap
->l_tls_offset
+ imap
->l_tls_blocksize
580 == GL(dl_tls_static_used
))
581 GL(dl_tls_static_used
) = imap
->l_tls_firstbyte_offset
;
582 else if (tls_free_end
== GL(dl_tls_static_used
))
584 GL(dl_tls_static_used
) = tls_free_start
;
585 tls_free_start
= imap
->l_tls_firstbyte_offset
;
586 tls_free_end
= imap
->l_tls_offset
+ imap
->l_tls_blocksize
;
588 else if (tls_free_end
< imap
->l_tls_firstbyte_offset
)
590 /* We pick the later block. It has a chance to
592 tls_free_start
= imap
->l_tls_firstbyte_offset
;
593 tls_free_end
= imap
->l_tls_offset
+ imap
->l_tls_blocksize
;
596 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
601 /* Reset unique symbols if forced. */
604 struct unique_sym_table
*tab
= &ns
->_ns_unique_sym_table
;
605 __rtld_lock_lock_recursive (tab
->lock
);
606 struct unique_sym
*entries
= tab
->entries
;
609 size_t idx
, size
= tab
->size
;
610 for (idx
= 0; idx
< size
; ++idx
)
612 /* Clear unique symbol entries that belong to this
614 if (entries
[idx
].name
!= NULL
615 && entries
[idx
].map
== imap
)
617 entries
[idx
].name
= NULL
;
618 entries
[idx
].hashval
= 0;
623 __rtld_lock_unlock_recursive (tab
->lock
);
626 /* We can unmap all the maps at once. We determined the
627 start address and length when we loaded the object and
628 the `munmap' call does the rest. */
631 /* Finally, unlink the data structure and free it. */
633 /* The assert in the (imap->l_prev == NULL) case gives
634 the compiler license to warn that NS points outside
635 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
636 is tantamount to nsid >= DL_NNS). That should be impossible
637 in this configuration, so just assert about it instead. */
638 assert (nsid
== LM_ID_BASE
);
639 assert (imap
->l_prev
!= NULL
);
641 if (imap
->l_prev
== NULL
)
643 assert (nsid
!= LM_ID_BASE
);
644 ns
->_ns_loaded
= imap
->l_next
;
646 /* Update the pointer to the head of the list
647 we leave for debuggers to examine. */
648 r
->r_map
= (void *) ns
->_ns_loaded
;
652 imap
->l_prev
->l_next
= imap
->l_next
;
655 if (imap
->l_next
!= NULL
)
656 imap
->l_next
->l_prev
= imap
->l_prev
;
658 /* Update the data used by _dl_find_object. */
659 _dl_find_object_dlclose (imap
);
661 free (imap
->l_versions
);
662 if (imap
->l_origin
!= (char *) -1)
663 free ((char *) imap
->l_origin
);
665 free (imap
->l_reldeps
);
667 /* Print debugging message. */
668 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
))
669 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
670 imap
->l_name
, imap
->l_ns
);
672 /* This name always is allocated. */
674 /* Remove the list with all the names of the shared object. */
676 struct libname_list
*lnp
= imap
->l_libname
;
679 struct libname_list
*this = lnp
;
681 if (!this->dont_free
)
686 /* Remove the searchlists. */
687 free (imap
->l_initfini
);
689 /* Remove the scope array if we allocated it. */
690 if (imap
->l_scope
!= imap
->l_scope_mem
)
691 free (imap
->l_scope
);
693 if (imap
->l_phdr_allocated
)
694 free ((void *) imap
->l_phdr
);
696 if (imap
->l_rpath_dirs
.dirs
!= (void *) -1)
697 free (imap
->l_rpath_dirs
.dirs
);
698 if (imap
->l_runpath_dirs
.dirs
!= (void *) -1)
699 free (imap
->l_runpath_dirs
.dirs
);
701 /* Clear GL(dl_initfirst) when freeing its link_map memory. */
702 if (imap
== GL(dl_initfirst
))
703 GL(dl_initfirst
) = NULL
;
709 __rtld_lock_unlock_recursive (GL(dl_load_write_lock
));
711 /* If we removed any object which uses TLS bump the generation counter. */
714 size_t newgen
= GL(dl_tls_generation
) + 1;
715 if (__glibc_unlikely (newgen
== 0))
716 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO
".\n");
717 /* Can be read concurrently. */
718 atomic_store_release (&GL(dl_tls_generation
), newgen
);
720 if (tls_free_end
== GL(dl_tls_static_used
))
721 GL(dl_tls_static_used
) = tls_free_start
;
724 /* TLS is cleaned up for the unloaded modules. */
725 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock
));
727 /* Notify the debugger those objects are finalized and gone. */
728 _dl_debug_change_state (r
, RT_CONSISTENT
);
729 LIBC_PROBE (unmap_complete
, 2, nsid
, r
);
732 /* Auditing checkpoint: we have deleted all objects. Also, do not notify
733 auditors of the cleanup of a failed audit module loading attempt. */
734 _dl_audit_activity_nsid (nsid
, LA_ACT_CONSISTENT
);
737 if (__builtin_expect (ns
->_ns_loaded
== NULL
, 0)
738 && nsid
== GL(dl_nns
) - 1)
741 while (GL(dl_ns
)[GL(dl_nns
) - 1]._ns_loaded
== NULL
);
743 /* Recheck if we need to retry, release the lock. */
745 if (dl_close_state
== rerun
)
747 /* The map may have been deallocated. */
752 dl_close_state
= not_pending
;
757 _dl_close (void *_map
)
759 struct link_map
*map
= _map
;
761 /* We must take the lock to examine the contents of map and avoid
762 concurrent dlopens. */
763 __rtld_lock_lock_recursive (GL(dl_load_lock
));
765 /* At this point we are guaranteed nobody else is touching the list of
766 loaded maps, but a concurrent dlclose might have freed our map
767 before we took the lock. There is no way to detect this (see below)
768 so we proceed assuming this isn't the case. First see whether we
769 can remove the object at all. */
770 if (__glibc_unlikely (map
->l_nodelete_active
))
772 /* Nope. Do nothing. */
773 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
777 /* At present this is an unreliable check except in the case where the
778 caller has recursively called dlclose and we are sure the link map
779 has not been freed. In a non-recursive dlclose the map itself
780 might have been freed and this access is potentially a data race
781 with whatever other use this memory might have now, or worse we
782 might silently corrupt memory if it looks enough like a link map.
783 POSIX has language in dlclose that appears to guarantee that this
784 should be a detectable case and given that dlclose should be threadsafe
785 we need this to be a reliable detection.
786 This is bug 20990. */
787 if (__builtin_expect (map
->l_direct_opencount
, 1) == 0)
789 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
790 _dl_signal_error (0, map
->l_name
, NULL
, N_("shared object not open"));
793 _dl_close_worker (map
, false);
795 __rtld_lock_unlock_recursive (GL(dl_load_lock
));