]>
Commit | Line | Data |
---|---|---|
26b4d766 | 1 | /* Close a shared object opened by `_dl_open'. |
dff8da6b | 2 | Copyright (C) 1996-2024 Free Software Foundation, Inc. |
afd4eb37 UD |
3 | This file is part of the GNU C Library. |
4 | ||
5 | The GNU C Library is free software; you can redistribute it and/or | |
41bdb6e2 AJ |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either | |
8 | version 2.1 of the License, or (at your option) any later version. | |
afd4eb37 UD |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, | |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
41bdb6e2 | 13 | Lesser General Public License for more details. |
afd4eb37 | 14 | |
41bdb6e2 | 15 | You should have received a copy of the GNU Lesser General Public |
59ba27a6 | 16 | License along with the GNU C Library; if not, see |
5a82c748 | 17 | <https://www.gnu.org/licenses/>. */ |
ba79d61b | 18 | |
7afab53d | 19 | #include <assert.h> |
ba79d61b | 20 | #include <dlfcn.h> |
1100f849 | 21 | #include <errno.h> |
8e17ea58 | 22 | #include <libintl.h> |
bfc832cc | 23 | #include <stddef.h> |
b209e34a | 24 | #include <stdio.h> |
ba79d61b | 25 | #include <stdlib.h> |
8d6468d0 | 26 | #include <string.h> |
9dcafc55 | 27 | #include <unistd.h> |
ec999b8e | 28 | #include <libc-lock.h> |
b8445829 | 29 | #include <ldsodefs.h> |
ba79d61b RM |
30 | #include <sys/types.h> |
31 | #include <sys/mman.h> | |
609cf614 | 32 | #include <sysdep-cancel.h> |
df94b641 | 33 | #include <tls.h> |
815e6fa3 | 34 | #include <stap-probe.h> |
5d28a896 | 35 | #include <dl-find_object.h> |
ba79d61b | 36 | |
fcccd512 RM |
37 | #include <dl-unmap-segments.h> |
38 | ||
1100f849 UD |
39 | /* Special l_idx value used to indicate which objects remain loaded. */ |
40 | #define IDX_STILL_USED -1 | |
41 | ||
42 | ||
fc093be1 UD |
43 | /* Returns true we an non-empty was found. */ |
44 | static bool | |
1f0c4a10 RM |
45 | remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp, |
46 | bool should_be_there) | |
fc093be1 UD |
47 | { |
48 | if (idx - disp >= listp->len) | |
49 | { | |
1f0c4a10 RM |
50 | if (listp->next == NULL) |
51 | { | |
52 | /* The index is not actually valid in the slotinfo list, | |
8265947d RM |
53 | because this object was closed before it was fully set |
54 | up due to some error. */ | |
1f0c4a10 RM |
55 | assert (! should_be_there); |
56 | } | |
57 | else | |
58 | { | |
59 | if (remove_slotinfo (idx, listp->next, disp + listp->len, | |
60 | should_be_there)) | |
61 | return true; | |
fc093be1 | 62 | |
1f0c4a10 RM |
63 | /* No non-empty entry. Search from the end of this element's |
64 | slotinfo array. */ | |
65 | idx = disp + listp->len; | |
66 | } | |
fc093be1 UD |
67 | } |
68 | else | |
69 | { | |
70 | struct link_map *old_map = listp->slotinfo[idx - disp].map; | |
fc093be1 | 71 | |
2430d57a RM |
72 | /* The entry might still be in its unused state if we are closing an |
73 | object that wasn't fully set up. */ | |
a1ffb40e | 74 | if (__glibc_likely (old_map != NULL)) |
2430d57a | 75 | { |
f4f8f4d4 SN |
76 | /* Mark the entry as unused. These can be read concurrently. */ |
77 | atomic_store_relaxed (&listp->slotinfo[idx - disp].gen, | |
78 | GL(dl_tls_generation) + 1); | |
79 | atomic_store_relaxed (&listp->slotinfo[idx - disp].map, NULL); | |
2430d57a | 80 | } |
fc093be1 UD |
81 | |
82 | /* If this is not the last currently used entry no need to look | |
83 | further. */ | |
2430d57a | 84 | if (idx != GL(dl_tls_max_dtv_idx)) |
ba33937b AZ |
85 | { |
86 | /* There is an unused dtv entry in the middle. */ | |
87 | GL(dl_tls_dtv_gaps) = true; | |
88 | return true; | |
89 | } | |
fc093be1 UD |
90 | } |
91 | ||
06a04e09 | 92 | while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0)) |
fc093be1 UD |
93 | { |
94 | --idx; | |
95 | ||
96 | if (listp->slotinfo[idx - disp].map != NULL) | |
97 | { | |
f4f8f4d4 SN |
98 | /* Found a new last used index. This can be read concurrently. */ |
99 | atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), idx); | |
fc093be1 UD |
100 | return true; |
101 | } | |
102 | } | |
103 | ||
104 | /* No non-entry in this list element. */ | |
105 | return false; | |
106 | } | |
fc093be1 | 107 | |
ba79d61b | 108 | void |
02d5e5d9 | 109 | _dl_close_worker (struct link_map *map, bool force) |
ba79d61b | 110 | { |
c0f62c56 | 111 | /* One less direct use. */ |
c0f62c56 UD |
112 | --map->l_direct_opencount; |
113 | ||
bfc832cc UD |
114 | /* If _dl_close is called recursively (some destructor call dlclose), |
115 | just record that the parent _dl_close will need to do garbage collection | |
116 | again and return. */ | |
117 | static enum { not_pending, pending, rerun } dl_close_state; | |
118 | ||
119 | if (map->l_direct_opencount > 0 || map->l_type != lt_loaded | |
120 | || dl_close_state != not_pending) | |
26b4d766 | 121 | { |
0479b305 AS |
122 | if (map->l_direct_opencount == 0 && map->l_type == lt_loaded) |
123 | dl_close_state = rerun; | |
bfc832cc | 124 | |
26b4d766 | 125 | /* There are still references to this object. Do nothing more. */ |
a1ffb40e | 126 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
20fe49b9 UD |
127 | _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n", |
128 | map->l_name, map->l_direct_opencount); | |
a334319f | 129 | |
26b4d766 UD |
130 | return; |
131 | } | |
ba79d61b | 132 | |
2e81d449 UD |
133 | Lmid_t nsid = map->l_ns; |
134 | struct link_namespaces *ns = &GL(dl_ns)[nsid]; | |
135 | ||
bfc832cc UD |
136 | retry: |
137 | dl_close_state = pending; | |
138 | ||
bfc832cc | 139 | bool any_tls = false; |
2ad9b674 | 140 | const unsigned int nloaded = ns->_ns_nloaded; |
dd32e1db | 141 | struct link_map *maps[nloaded]; |
20fe49b9 | 142 | |
dd32e1db FW |
143 | /* Run over the list and assign indexes to the link maps and enter |
144 | them into the MAPS array. */ | |
20fe49b9 | 145 | int idx = 0; |
2e81d449 | 146 | for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next) |
20fe49b9 | 147 | { |
15a0c573 CLT |
148 | l->l_map_used = 0; |
149 | l->l_map_done = 0; | |
20fe49b9 | 150 | l->l_idx = idx; |
dd32e1db | 151 | maps[idx] = l; |
20fe49b9 UD |
152 | ++idx; |
153 | } | |
154 | assert (idx == nloaded); | |
c4bb124a | 155 | |
849274d4 FW |
156 | /* Put the dlclose'd map first, so that its destructor runs first. |
157 | The map variable is NULL after a retry. */ | |
158 | if (map != NULL) | |
159 | { | |
160 | maps[map->l_idx] = maps[0]; | |
161 | maps[map->l_idx]->l_idx = map->l_idx; | |
162 | maps[0] = map; | |
163 | maps[0]->l_idx = 0; | |
164 | } | |
165 | ||
dd32e1db FW |
166 | /* Keep track of the lowest index link map we have covered already. */ |
167 | int done_index = -1; | |
168 | while (++done_index < nloaded) | |
0ecb606c | 169 | { |
dd32e1db | 170 | struct link_map *l = maps[done_index]; |
20fe49b9 | 171 | |
15a0c573 | 172 | if (l->l_map_done) |
dd32e1db FW |
173 | /* Already handled. */ |
174 | continue; | |
20fe49b9 UD |
175 | |
176 | /* Check whether this object is still used. */ | |
177 | if (l->l_type == lt_loaded | |
178 | && l->l_direct_opencount == 0 | |
f8ed116a | 179 | && !l->l_nodelete_active |
90b37cac SP |
180 | /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why |
181 | acquire is sufficient and correct. */ | |
182 | && atomic_load_acquire (&l->l_tls_dtor_count) == 0 | |
15a0c573 | 183 | && !l->l_map_used) |
dd32e1db | 184 | continue; |
20fe49b9 UD |
185 | |
186 | /* We need this object and we handle it now. */ | |
15a0c573 CLT |
187 | l->l_map_used = 1; |
188 | l->l_map_done = 1; | |
c3381f3e | 189 | /* Signal the object is still needed. */ |
1100f849 | 190 | l->l_idx = IDX_STILL_USED; |
20fe49b9 UD |
191 | |
192 | /* Mark all dependencies as used. */ | |
193 | if (l->l_initfini != NULL) | |
194 | { | |
36129722 CD |
195 | /* We are always the zeroth entry, and since we don't include |
196 | ourselves in the dependency analysis start at 1. */ | |
20fe49b9 UD |
197 | struct link_map **lp = &l->l_initfini[1]; |
198 | while (*lp != NULL) | |
199 | { | |
1100f849 | 200 | if ((*lp)->l_idx != IDX_STILL_USED) |
556224ab | 201 | { |
c3381f3e UD |
202 | assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded); |
203 | ||
15a0c573 | 204 | if (!(*lp)->l_map_used) |
c3381f3e | 205 | { |
15a0c573 | 206 | (*lp)->l_map_used = 1; |
36129722 CD |
207 | /* If we marked a new object as used, and we've |
208 | already processed it, then we need to go back | |
209 | and process again from that point forward to | |
210 | ensure we keep all of its dependencies also. */ | |
dd32e1db FW |
211 | if ((*lp)->l_idx - 1 < done_index) |
212 | done_index = (*lp)->l_idx - 1; | |
c3381f3e | 213 | } |
556224ab | 214 | } |
556224ab | 215 | |
20fe49b9 UD |
216 | ++lp; |
217 | } | |
218 | } | |
219 | /* And the same for relocation dependencies. */ | |
220 | if (l->l_reldeps != NULL) | |
385b4cf4 | 221 | for (unsigned int j = 0; j < l->l_reldeps->act; ++j) |
20fe49b9 | 222 | { |
385b4cf4 | 223 | struct link_map *jmap = l->l_reldeps->list[j]; |
20fe49b9 | 224 | |
1100f849 | 225 | if (jmap->l_idx != IDX_STILL_USED) |
556224ab | 226 | { |
c3381f3e UD |
227 | assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded); |
228 | ||
15a0c573 | 229 | if (!jmap->l_map_used) |
c3381f3e | 230 | { |
15a0c573 | 231 | jmap->l_map_used = 1; |
dd32e1db FW |
232 | if (jmap->l_idx - 1 < done_index) |
233 | done_index = jmap->l_idx - 1; | |
c3381f3e | 234 | } |
556224ab UD |
235 | } |
236 | } | |
6985865b | 237 | } |
c3381f3e | 238 | |
849274d4 FW |
239 | /* Sort the entries. Unless retrying, the maps[0] object (the |
240 | original argument to dlclose) needs to remain first, so that its | |
241 | destructor runs first. */ | |
242 | _dl_sort_maps (maps, nloaded, /* force_first */ map != NULL, true); | |
dd32e1db FW |
243 | |
244 | /* Call all termination functions at once. */ | |
245 | bool unload_any = false; | |
246 | bool scope_mem_left = false; | |
247 | unsigned int unload_global = 0; | |
248 | unsigned int first_loaded = ~0; | |
249 | for (unsigned int i = 0; i < nloaded; ++i) | |
a709dd43 | 250 | { |
dd32e1db | 251 | struct link_map *imap = maps[i]; |
9dcafc55 | 252 | |
dd32e1db FW |
253 | /* All elements must be in the same namespace. */ |
254 | assert (imap->l_ns == nsid); | |
255 | ||
256 | if (!imap->l_map_used) | |
a709dd43 | 257 | { |
f8ed116a | 258 | assert (imap->l_type == lt_loaded && !imap->l_nodelete_active); |
20fe49b9 | 259 | |
dd32e1db FW |
260 | /* Call its termination function. Do not do it for |
261 | half-cooked objects. Temporarily disable exception | |
262 | handling, so that errors are fatal. */ | |
263 | if (imap->l_init_called) | |
6f360366 | 264 | _dl_catch_exception (NULL, _dl_call_fini, imap); |
dacc8ffa | 265 | |
9dcafc55 | 266 | #ifdef SHARED |
a3d731d3 | 267 | /* Auditing checkpoint: we remove an object. */ |
311c9ee5 | 268 | _dl_audit_objclose (imap); |
9dcafc55 UD |
269 | #endif |
270 | ||
20fe49b9 UD |
271 | /* This object must not be used anymore. */ |
272 | imap->l_removed = 1; | |
aff4519d | 273 | |
20fe49b9 UD |
274 | /* We indeed have an object to remove. */ |
275 | unload_any = true; | |
aff4519d | 276 | |
e8b6b64d UD |
277 | if (imap->l_global) |
278 | ++unload_global; | |
279 | ||
20fe49b9 | 280 | /* Remember where the first dynamically loaded object is. */ |
dd32e1db FW |
281 | if (i < first_loaded) |
282 | first_loaded = i; | |
a709dd43 | 283 | } |
15a0c573 | 284 | /* Else imap->l_map_used. */ |
20fe49b9 UD |
285 | else if (imap->l_type == lt_loaded) |
286 | { | |
1100f849 UD |
287 | struct r_scope_elem *new_list = NULL; |
288 | ||
289 | if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL) | |
20fe49b9 | 290 | { |
bfc832cc | 291 | /* The object is still used. But one of the objects we are |
20fe49b9 UD |
292 | unloading right now is responsible for loading it. If |
293 | the current object does not have it's own scope yet we | |
294 | have to create one. This has to be done before running | |
295 | the finalizers. | |
296 | ||
297 | To do this count the number of dependencies. */ | |
298 | unsigned int cnt; | |
299 | for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt) | |
300 | ; | |
301 | ||
302 | /* We simply reuse the l_initfini list. */ | |
303 | imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1]; | |
304 | imap->l_searchlist.r_nlist = cnt; | |
305 | ||
1100f849 | 306 | new_list = &imap->l_searchlist; |
20fe49b9 | 307 | } |
1100f849 UD |
308 | |
309 | /* Count the number of scopes which remain after the unload. | |
310 | When we add the local search list count it. Always add | |
311 | one for the terminating NULL pointer. */ | |
312 | size_t remain = (new_list != NULL) + 1; | |
313 | bool removed_any = false; | |
c0a777e8 | 314 | for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt) |
1100f849 UD |
315 | /* This relies on l_scope[] entries being always set either |
316 | to its own l_symbolic_searchlist address, or some map's | |
317 | l_searchlist address. */ | |
c0a777e8 | 318 | if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist) |
1100f849 UD |
319 | { |
320 | struct link_map *tmap = (struct link_map *) | |
c0a777e8 | 321 | ((char *) imap->l_scope[cnt] |
1100f849 | 322 | - offsetof (struct link_map, l_searchlist)); |
2e81d449 | 323 | assert (tmap->l_ns == nsid); |
1100f849 UD |
324 | if (tmap->l_idx == IDX_STILL_USED) |
325 | ++remain; | |
326 | else | |
327 | removed_any = true; | |
328 | } | |
329 | else | |
330 | ++remain; | |
331 | ||
332 | if (removed_any) | |
1ee2ff20 | 333 | { |
1100f849 UD |
334 | /* Always allocate a new array for the scope. This is |
335 | necessary since we must be able to determine the last | |
336 | user of the current array. If possible use the link map's | |
337 | memory. */ | |
338 | size_t new_size; | |
c0a777e8 UD |
339 | struct r_scope_elem **newp; |
340 | ||
341 | #define SCOPE_ELEMS(imap) \ | |
342 | (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0])) | |
343 | ||
344 | if (imap->l_scope != imap->l_scope_mem | |
345 | && remain < SCOPE_ELEMS (imap)) | |
1100f849 | 346 | { |
c0a777e8 UD |
347 | new_size = SCOPE_ELEMS (imap); |
348 | newp = imap->l_scope_mem; | |
1100f849 UD |
349 | } |
350 | else | |
351 | { | |
352 | new_size = imap->l_scope_max; | |
c0a777e8 UD |
353 | newp = (struct r_scope_elem **) |
354 | malloc (new_size * sizeof (struct r_scope_elem *)); | |
1100f849 UD |
355 | if (newp == NULL) |
356 | _dl_signal_error (ENOMEM, "dlclose", NULL, | |
357 | N_("cannot create scope list")); | |
358 | } | |
359 | ||
1100f849 UD |
360 | /* Copy over the remaining scope elements. */ |
361 | remain = 0; | |
c0a777e8 | 362 | for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt) |
1ee2ff20 | 363 | { |
c0a777e8 | 364 | if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist) |
1ee2ff20 | 365 | { |
1100f849 | 366 | struct link_map *tmap = (struct link_map *) |
c0a777e8 | 367 | ((char *) imap->l_scope[cnt] |
1100f849 UD |
368 | - offsetof (struct link_map, l_searchlist)); |
369 | if (tmap->l_idx != IDX_STILL_USED) | |
370 | { | |
371 | /* Remove the scope. Or replace with own map's | |
372 | scope. */ | |
373 | if (new_list != NULL) | |
374 | { | |
c0a777e8 | 375 | newp[remain++] = new_list; |
1100f849 UD |
376 | new_list = NULL; |
377 | } | |
378 | continue; | |
379 | } | |
1ee2ff20 | 380 | } |
1100f849 | 381 | |
c0a777e8 | 382 | newp[remain++] = imap->l_scope[cnt]; |
1ee2ff20 | 383 | } |
c0a777e8 | 384 | newp[remain] = NULL; |
1100f849 | 385 | |
c0a777e8 | 386 | struct r_scope_elem **old = imap->l_scope; |
1100f849 | 387 | |
e4eb675d | 388 | imap->l_scope = newp; |
1100f849 UD |
389 | |
390 | /* No user anymore, we can free it now. */ | |
c0a777e8 | 391 | if (old != imap->l_scope_mem) |
e4eb675d UD |
392 | { |
393 | if (_dl_scope_free (old)) | |
394 | /* If _dl_scope_free used THREAD_GSCOPE_WAIT (), | |
395 | no need to repeat it. */ | |
396 | scope_mem_left = false; | |
397 | } | |
398 | else | |
399 | scope_mem_left = true; | |
1100f849 UD |
400 | |
401 | imap->l_scope_max = new_size; | |
1ee2ff20 | 402 | } |
39dd69df AS |
403 | else if (new_list != NULL) |
404 | { | |
405 | /* We didn't change the scope array, so reset the search | |
406 | list. */ | |
407 | imap->l_searchlist.r_list = NULL; | |
408 | imap->l_searchlist.r_nlist = 0; | |
409 | } | |
42c4f32a | 410 | |
c3381f3e | 411 | /* The loader is gone, so mark the object as not having one. |
1100f849 UD |
412 | Note: l_idx != IDX_STILL_USED -> object will be removed. */ |
413 | if (imap->l_loader != NULL | |
414 | && imap->l_loader->l_idx != IDX_STILL_USED) | |
20fe49b9 | 415 | imap->l_loader = NULL; |
aff4519d | 416 | |
20fe49b9 | 417 | /* Remember where the first dynamically loaded object is. */ |
dd32e1db FW |
418 | if (i < first_loaded) |
419 | first_loaded = i; | |
20fe49b9 | 420 | } |
a709dd43 UD |
421 | } |
422 | ||
20fe49b9 UD |
423 | /* If there are no objects to unload, do nothing further. */ |
424 | if (!unload_any) | |
425 | goto out; | |
426 | ||
9dcafc55 UD |
427 | #ifdef SHARED |
428 | /* Auditing checkpoint: we will start deleting objects. */ | |
3dac3959 | 429 | _dl_audit_activity_nsid (nsid, LA_ACT_DELETE); |
9dcafc55 UD |
430 | #endif |
431 | ||
4d6acc61 | 432 | /* Notify the debugger we are about to remove some loaded objects. */ |
a93d9e03 | 433 | struct r_debug *r = _dl_debug_update (nsid); |
9dcafc55 UD |
434 | r->r_state = RT_DELETE; |
435 | _dl_debug_state (); | |
815e6fa3 | 436 | LIBC_PROBE (unmap_start, 2, nsid, r); |
4d6acc61 | 437 | |
e8b6b64d UD |
438 | if (unload_global) |
439 | { | |
440 | /* Some objects are in the global scope list. Remove them. */ | |
441 | struct r_scope_elem *ns_msl = ns->_ns_main_searchlist; | |
442 | unsigned int i; | |
443 | unsigned int j = 0; | |
444 | unsigned int cnt = ns_msl->r_nlist; | |
445 | ||
446 | while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed) | |
447 | --cnt; | |
448 | ||
449 | if (cnt + unload_global == ns_msl->r_nlist) | |
450 | /* Speed up removing most recently added objects. */ | |
451 | j = cnt; | |
452 | else | |
b7c08a66 | 453 | for (i = 0; i < cnt; i++) |
e8b6b64d UD |
454 | if (ns_msl->r_list[i]->l_removed == 0) |
455 | { | |
456 | if (i != j) | |
457 | ns_msl->r_list[j] = ns_msl->r_list[i]; | |
458 | j++; | |
459 | } | |
460 | ns_msl->r_nlist = j; | |
e4eb675d | 461 | } |
e8b6b64d | 462 | |
e4eb675d UD |
463 | if (!RTLD_SINGLE_THREAD_P |
464 | && (unload_global | |
465 | || scope_mem_left | |
466 | || (GL(dl_scope_free_list) != NULL | |
467 | && GL(dl_scope_free_list)->count))) | |
468 | { | |
469 | THREAD_GSCOPE_WAIT (); | |
470 | ||
471 | /* Now we can free any queued old scopes. */ | |
385b4cf4 | 472 | struct dl_scope_free_list *fsl = GL(dl_scope_free_list); |
e4eb675d UD |
473 | if (fsl != NULL) |
474 | while (fsl->count > 0) | |
475 | free (fsl->list[--fsl->count]); | |
e8b6b64d UD |
476 | } |
477 | ||
541765b6 UD |
478 | size_t tls_free_start; |
479 | size_t tls_free_end; | |
480 | tls_free_start = tls_free_end = NO_TLS_OFFSET; | |
c877418f | 481 | |
83b53232 SN |
482 | /* Protects global and module specitic TLS state. */ |
483 | __rtld_lock_lock_recursive (GL(dl_load_tls_lock)); | |
484 | ||
5a2a1d75 AS |
485 | /* We modify the list of loaded objects. */ |
486 | __rtld_lock_lock_recursive (GL(dl_load_write_lock)); | |
487 | ||
ba79d61b RM |
488 | /* Check each element of the search list to see if all references to |
489 | it are gone. */ | |
dd32e1db | 490 | for (unsigned int i = first_loaded; i < nloaded; ++i) |
ba79d61b | 491 | { |
dd32e1db FW |
492 | struct link_map *imap = maps[i]; |
493 | if (!imap->l_map_used) | |
ba79d61b | 494 | { |
20fe49b9 | 495 | assert (imap->l_type == lt_loaded); |
a8a1269d | 496 | |
ba79d61b RM |
497 | /* That was the last reference, and this was a dlopen-loaded |
498 | object. We can unmap it. */ | |
ba79d61b | 499 | |
1f0c4a10 | 500 | /* Remove the object from the dtv slotinfo array if it uses TLS. */ |
a1ffb40e | 501 | if (__glibc_unlikely (imap->l_tls_blocksize > 0)) |
a04586d8 | 502 | { |
a04586d8 | 503 | any_tls = true; |
bb4cb252 | 504 | |
9dcafc55 UD |
505 | if (GL(dl_tls_dtv_slotinfo_list) != NULL |
506 | && ! remove_slotinfo (imap->l_tls_modid, | |
507 | GL(dl_tls_dtv_slotinfo_list), 0, | |
508 | imap->l_init_called)) | |
fc093be1 | 509 | /* All dynamically loaded modules with TLS are unloaded. */ |
f4f8f4d4 SN |
510 | /* Can be read concurrently. */ |
511 | atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), | |
512 | GL(dl_tls_static_nelem)); | |
c877418f | 513 | |
4c533566 UD |
514 | if (imap->l_tls_offset != NO_TLS_OFFSET |
515 | && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET) | |
c877418f RM |
516 | { |
517 | /* Collect a contiguous chunk built from the objects in | |
518 | this search list, going in either direction. When the | |
519 | whole chunk is at the end of the used area then we can | |
520 | reclaim it. */ | |
11bf311e | 521 | #if TLS_TCB_AT_TP |
541765b6 UD |
522 | if (tls_free_start == NO_TLS_OFFSET |
523 | || (size_t) imap->l_tls_offset == tls_free_start) | |
524 | { | |
525 | /* Extend the contiguous chunk being reclaimed. */ | |
526 | tls_free_start | |
527 | = imap->l_tls_offset - imap->l_tls_blocksize; | |
528 | ||
529 | if (tls_free_end == NO_TLS_OFFSET) | |
530 | tls_free_end = imap->l_tls_offset; | |
531 | } | |
532 | else if (imap->l_tls_offset - imap->l_tls_blocksize | |
533 | == tls_free_end) | |
534 | /* Extend the chunk backwards. */ | |
535 | tls_free_end = imap->l_tls_offset; | |
536 | else | |
537 | { | |
538 | /* This isn't contiguous with the last chunk freed. | |
539 | One of them will be leaked unless we can free | |
540 | one block right away. */ | |
541 | if (tls_free_end == GL(dl_tls_static_used)) | |
542 | { | |
543 | GL(dl_tls_static_used) = tls_free_start; | |
544 | tls_free_end = imap->l_tls_offset; | |
545 | tls_free_start | |
546 | = tls_free_end - imap->l_tls_blocksize; | |
547 | } | |
548 | else if ((size_t) imap->l_tls_offset | |
549 | == GL(dl_tls_static_used)) | |
550 | GL(dl_tls_static_used) | |
551 | = imap->l_tls_offset - imap->l_tls_blocksize; | |
552 | else if (tls_free_end < (size_t) imap->l_tls_offset) | |
553 | { | |
554 | /* We pick the later block. It has a chance to | |
555 | be freed. */ | |
556 | tls_free_end = imap->l_tls_offset; | |
557 | tls_free_start | |
558 | = tls_free_end - imap->l_tls_blocksize; | |
559 | } | |
560 | } | |
11bf311e | 561 | #elif TLS_DTV_AT_TP |
66bdbaa4 AM |
562 | if (tls_free_start == NO_TLS_OFFSET) |
563 | { | |
564 | tls_free_start = imap->l_tls_firstbyte_offset; | |
565 | tls_free_end = (imap->l_tls_offset | |
566 | + imap->l_tls_blocksize); | |
567 | } | |
568 | else if (imap->l_tls_firstbyte_offset == tls_free_end) | |
c877418f | 569 | /* Extend the contiguous chunk being reclaimed. */ |
66bdbaa4 | 570 | tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize; |
c877418f RM |
571 | else if (imap->l_tls_offset + imap->l_tls_blocksize |
572 | == tls_free_start) | |
573 | /* Extend the chunk backwards. */ | |
66bdbaa4 AM |
574 | tls_free_start = imap->l_tls_firstbyte_offset; |
575 | /* This isn't contiguous with the last chunk freed. | |
576 | One of them will be leaked unless we can free | |
577 | one block right away. */ | |
578 | else if (imap->l_tls_offset + imap->l_tls_blocksize | |
579 | == GL(dl_tls_static_used)) | |
580 | GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset; | |
581 | else if (tls_free_end == GL(dl_tls_static_used)) | |
c877418f | 582 | { |
66bdbaa4 AM |
583 | GL(dl_tls_static_used) = tls_free_start; |
584 | tls_free_start = imap->l_tls_firstbyte_offset; | |
585 | tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize; | |
586 | } | |
587 | else if (tls_free_end < imap->l_tls_firstbyte_offset) | |
588 | { | |
589 | /* We pick the later block. It has a chance to | |
590 | be freed. */ | |
591 | tls_free_start = imap->l_tls_firstbyte_offset; | |
592 | tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize; | |
c877418f | 593 | } |
11bf311e UD |
594 | #else |
595 | # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" | |
596 | #endif | |
c877418f | 597 | } |
a04586d8 | 598 | } |
a04586d8 | 599 | |
02d5e5d9 PK |
600 | /* Reset unique symbols if forced. */ |
601 | if (force) | |
602 | { | |
603 | struct unique_sym_table *tab = &ns->_ns_unique_sym_table; | |
604 | __rtld_lock_lock_recursive (tab->lock); | |
605 | struct unique_sym *entries = tab->entries; | |
606 | if (entries != NULL) | |
607 | { | |
608 | size_t idx, size = tab->size; | |
609 | for (idx = 0; idx < size; ++idx) | |
610 | { | |
611 | /* Clear unique symbol entries that belong to this | |
612 | object. */ | |
613 | if (entries[idx].name != NULL | |
614 | && entries[idx].map == imap) | |
615 | { | |
616 | entries[idx].name = NULL; | |
617 | entries[idx].hashval = 0; | |
618 | tab->n_elements--; | |
619 | } | |
620 | } | |
621 | } | |
622 | __rtld_lock_unlock_recursive (tab->lock); | |
623 | } | |
624 | ||
a8a1269d | 625 | /* We can unmap all the maps at once. We determined the |
4ce636da UD |
626 | start address and length when we loaded the object and |
627 | the `munmap' call does the rest. */ | |
09bf6406 | 628 | DL_UNMAP (imap); |
22bc7978 | 629 | |
ba79d61b | 630 | /* Finally, unlink the data structure and free it. */ |
2bd2cad9 RM |
631 | #if DL_NNS == 1 |
632 | /* The assert in the (imap->l_prev == NULL) case gives | |
633 | the compiler license to warn that NS points outside | |
634 | the dl_ns array bounds in that case (as nsid != LM_ID_BASE | |
635 | is tantamount to nsid >= DL_NNS). That should be impossible | |
636 | in this configuration, so just assert about it instead. */ | |
637 | assert (nsid == LM_ID_BASE); | |
638 | assert (imap->l_prev != NULL); | |
639 | #else | |
640 | if (imap->l_prev == NULL) | |
c0f62c56 | 641 | { |
2e81d449 | 642 | assert (nsid != LM_ID_BASE); |
b7c08a66 RM |
643 | ns->_ns_loaded = imap->l_next; |
644 | ||
645 | /* Update the pointer to the head of the list | |
646 | we leave for debuggers to examine. */ | |
647 | r->r_map = (void *) ns->_ns_loaded; | |
c0f62c56 | 648 | } |
2bd2cad9 RM |
649 | else |
650 | #endif | |
651 | imap->l_prev->l_next = imap->l_next; | |
c0f62c56 | 652 | |
2e81d449 | 653 | --ns->_ns_nloaded; |
c0f62c56 | 654 | if (imap->l_next != NULL) |
af69217f | 655 | imap->l_next->l_prev = imap->l_prev; |
a8a1269d | 656 | |
5d28a896 FW |
657 | /* Update the data used by _dl_find_object. */ |
658 | _dl_find_object_dlclose (imap); | |
659 | ||
556224ab UD |
660 | free (imap->l_versions); |
661 | if (imap->l_origin != (char *) -1) | |
a8a1269d UD |
662 | free ((char *) imap->l_origin); |
663 | ||
20fe49b9 | 664 | free (imap->l_reldeps); |
4b4fcf99 | 665 | |
ac53c9c6 | 666 | /* Print debugging message. */ |
a1ffb40e | 667 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
ac53c9c6 UD |
668 | _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n", |
669 | imap->l_name, imap->l_ns); | |
670 | ||
4ce636da | 671 | /* This name always is allocated. */ |
a8a1269d | 672 | free (imap->l_name); |
4ce636da | 673 | /* Remove the list with all the names of the shared object. */ |
20fe49b9 UD |
674 | |
675 | struct libname_list *lnp = imap->l_libname; | |
a8a1269d UD |
676 | do |
677 | { | |
76156ea1 | 678 | struct libname_list *this = lnp; |
a8a1269d | 679 | lnp = lnp->next; |
11810621 UD |
680 | if (!this->dont_free) |
681 | free (this); | |
a8a1269d UD |
682 | } |
683 | while (lnp != NULL); | |
a8a1269d | 684 | |
4ce636da | 685 | /* Remove the searchlists. */ |
20fe49b9 | 686 | free (imap->l_initfini); |
4ce636da | 687 | |
5a21d307 | 688 | /* Remove the scope array if we allocated it. */ |
c0a777e8 UD |
689 | if (imap->l_scope != imap->l_scope_mem) |
690 | free (imap->l_scope); | |
5a21d307 | 691 | |
7bcaca43 | 692 | if (imap->l_phdr_allocated) |
15925412 | 693 | free ((void *) imap->l_phdr); |
7bcaca43 | 694 | |
f55727ca UD |
695 | if (imap->l_rpath_dirs.dirs != (void *) -1) |
696 | free (imap->l_rpath_dirs.dirs); | |
697 | if (imap->l_runpath_dirs.dirs != (void *) -1) | |
698 | free (imap->l_runpath_dirs.dirs); | |
699 | ||
5177d85b L |
700 | /* Clear GL(dl_initfirst) when freeing its link_map memory. */ |
701 | if (imap == GL(dl_initfirst)) | |
702 | GL(dl_initfirst) = NULL; | |
703 | ||
af69217f | 704 | free (imap); |
ba79d61b RM |
705 | } |
706 | } | |
707 | ||
5a2a1d75 AS |
708 | __rtld_lock_unlock_recursive (GL(dl_load_write_lock)); |
709 | ||
c877418f | 710 | /* If we removed any object which uses TLS bump the generation counter. */ |
bb4cb252 | 711 | if (any_tls) |
c877418f | 712 | { |
f4f8f4d4 SN |
713 | size_t newgen = GL(dl_tls_generation) + 1; |
714 | if (__glibc_unlikely (newgen == 0)) | |
8b748aed | 715 | _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n"); |
f4f8f4d4 | 716 | /* Can be read concurrently. */ |
d2123d68 | 717 | atomic_store_release (&GL(dl_tls_generation), newgen); |
c877418f RM |
718 | |
719 | if (tls_free_end == GL(dl_tls_static_used)) | |
720 | GL(dl_tls_static_used) = tls_free_start; | |
721 | } | |
a04586d8 | 722 | |
83b53232 SN |
723 | /* TLS is cleaned up for the unloaded modules. */ |
724 | __rtld_lock_unlock_recursive (GL(dl_load_tls_lock)); | |
725 | ||
9dcafc55 | 726 | #ifdef SHARED |
3dac3959 AZ |
727 | /* Auditing checkpoint: we have deleted all objects. Also, do not notify |
728 | auditors of the cleanup of a failed audit module loading attempt. */ | |
729 | _dl_audit_activity_nsid (nsid, LA_ACT_CONSISTENT); | |
9dcafc55 UD |
730 | #endif |
731 | ||
22c83193 UD |
732 | if (__builtin_expect (ns->_ns_loaded == NULL, 0) |
733 | && nsid == GL(dl_nns) - 1) | |
734 | do | |
0d23a5c1 | 735 | --GL(dl_nns); |
22c83193 UD |
736 | while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL); |
737 | ||
e3e5f672 | 738 | /* Notify the debugger those objects are finalized and gone. */ |
9dcafc55 UD |
739 | r->r_state = RT_CONSISTENT; |
740 | _dl_debug_state (); | |
815e6fa3 | 741 | LIBC_PROBE (unmap_complete, 2, nsid, r); |
4b4fcf99 | 742 | |
bfc832cc | 743 | /* Recheck if we need to retry, release the lock. */ |
20fe49b9 | 744 | out: |
bfc832cc | 745 | if (dl_close_state == rerun) |
849274d4 FW |
746 | { |
747 | /* The map may have been deallocated. */ | |
748 | map = NULL; | |
749 | goto retry; | |
750 | } | |
bfc832cc UD |
751 | |
752 | dl_close_state = not_pending; | |
131c4428 UD |
753 | } |
754 | ||
755 | ||
756 | void | |
757 | _dl_close (void *_map) | |
758 | { | |
759 | struct link_map *map = _map; | |
760 | ||
57707b7f CD |
761 | /* We must take the lock to examine the contents of map and avoid |
762 | concurrent dlopens. */ | |
763 | __rtld_lock_lock_recursive (GL(dl_load_lock)); | |
764 | ||
765 | /* At this point we are guaranteed nobody else is touching the list of | |
766 | loaded maps, but a concurrent dlclose might have freed our map | |
767 | before we took the lock. There is no way to detect this (see below) | |
768 | so we proceed assuming this isn't the case. First see whether we | |
769 | can remove the object at all. */ | |
f8ed116a | 770 | if (__glibc_unlikely (map->l_nodelete_active)) |
131c4428 | 771 | { |
131c4428 | 772 | /* Nope. Do nothing. */ |
57707b7f | 773 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
131c4428 UD |
774 | return; |
775 | } | |
776 | ||
57707b7f CD |
777 | /* At present this is an unreliable check except in the case where the |
778 | caller has recursively called dlclose and we are sure the link map | |
779 | has not been freed. In a non-recursive dlclose the map itself | |
780 | might have been freed and this access is potentially a data race | |
781 | with whatever other use this memory might have now, or worse we | |
782 | might silently corrupt memory if it looks enough like a link map. | |
783 | POSIX has language in dlclose that appears to guarantee that this | |
784 | should be a detectable case and given that dlclose should be threadsafe | |
785 | we need this to be a reliable detection. | |
786 | This is bug 20990. */ | |
131c4428 | 787 | if (__builtin_expect (map->l_direct_opencount, 1) == 0) |
57707b7f CD |
788 | { |
789 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); | |
790 | _dl_signal_error (0, map->l_name, NULL, N_("shared object not open")); | |
791 | } | |
131c4428 | 792 | |
02d5e5d9 | 793 | _dl_close_worker (map, false); |
131c4428 | 794 | |
d3c9f895 | 795 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
e3e5f672 | 796 | } |