]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-close.c
Update.
[thirdparty/glibc.git] / elf / dl-close.c
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2002, 2003, 2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <bits/libc-lock.h>
27 #include <ldsodefs.h>
28 #include <sys/types.h>
29 #include <sys/mman.h>
30
31
32 /* Type of the constructor functions. */
33 typedef void (*fini_t) (void);
34
35
36 #ifdef USE_TLS
37 /* Returns true we an non-empty was found. */
38 static bool
39 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
40 bool should_be_there)
41 {
42 if (idx - disp >= listp->len)
43 {
44 if (listp->next == NULL)
45 {
46 /* The index is not actually valid in the slotinfo list,
47 because this object was closed before it was fully set
48 up due to some error. */
49 assert (! should_be_there);
50 }
51 else
52 {
53 if (remove_slotinfo (idx, listp->next, disp + listp->len,
54 should_be_there))
55 return true;
56
57 /* No non-empty entry. Search from the end of this element's
58 slotinfo array. */
59 idx = disp + listp->len;
60 }
61 }
62 else
63 {
64 struct link_map *old_map = listp->slotinfo[idx - disp].map;
65
66 /* The entry might still be in its unused state if we are closing an
67 object that wasn't fully set up. */
68 if (__builtin_expect (old_map != NULL, 1))
69 {
70 assert (old_map->l_tls_modid == idx);
71
72 /* Mark the entry as unused. */
73 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
74 listp->slotinfo[idx - disp].map = NULL;
75 }
76
77 /* If this is not the last currently used entry no need to look
78 further. */
79 if (idx != GL(dl_tls_max_dtv_idx))
80 return true;
81 }
82
83 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
84 {
85 --idx;
86
87 if (listp->slotinfo[idx - disp].map != NULL)
88 {
89 /* Found a new last used index. */
90 GL(dl_tls_max_dtv_idx) = idx;
91 return true;
92 }
93 }
94
95 /* No non-entry in this list element. */
96 return false;
97 }
98 #endif
99
100
101 void
102 internal_function
103 _dl_close (void *_map)
104 {
105 struct reldep_list
106 {
107 struct link_map **rellist;
108 unsigned int nrellist;
109 unsigned int nhandled;
110 struct reldep_list *next;
111 bool handled[0];
112 } *reldeps = NULL;
113 struct link_map **list;
114 struct link_map *map = _map;
115 unsigned int i;
116 unsigned int *new_opencount;
117 #ifdef USE_TLS
118 bool any_tls = false;
119 #endif
120
121 /* First see whether we can remove the object at all. */
122 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0)
123 && map->l_init_called)
124 /* Nope. Do nothing. */
125 return;
126
127 if (__builtin_expect (map->l_opencount, 1) == 0)
128 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
129
130 /* Acquire the lock. */
131 __rtld_lock_lock_recursive (GL(dl_load_lock));
132
133 /* Decrement the reference count. */
134 if (map->l_opencount > 1 || map->l_type != lt_loaded)
135 {
136 /* There are still references to this object. Do nothing more. */
137 if (__builtin_expect (GL(dl_debug_mask) & DL_DEBUG_FILES, 0))
138 _dl_debug_printf ("\nclosing file=%s; opencount == %u\n",
139 map->l_name, map->l_opencount);
140
141 /* Decrement the object's reference counter, not the dependencies'. */
142 --map->l_opencount;
143
144 __rtld_lock_unlock_recursive (GL(dl_load_lock));
145 return;
146 }
147
148 list = map->l_initfini;
149
150 /* Compute the new l_opencount values. */
151 i = map->l_searchlist.r_nlist;
152 if (__builtin_expect (i == 0, 0))
153 /* This can happen if we handle relocation dependencies for an
154 object which wasn't loaded directly. */
155 for (i = 1; list[i] != NULL; ++i)
156 ;
157
158 unsigned int nopencount = i;
159 new_opencount = (unsigned int *) alloca (i * sizeof (unsigned int));
160
161 for (i = 0; list[i] != NULL; ++i)
162 {
163 list[i]->l_idx = i;
164 new_opencount[i] = list[i]->l_opencount;
165 }
166 --new_opencount[0];
167 for (i = 1; list[i] != NULL; ++i)
168 if ((list[i]->l_flags_1 & DF_1_NODELETE) == 0
169 /* Decrement counter. */
170 && --new_opencount[i] == 0)
171 {
172 void mark_removed (struct link_map *remmap)
173 {
174 /* Test whether this object was also loaded directly. */
175 if (remmap->l_searchlist.r_list != NULL)
176 {
177 /* In this case we have to decrement all the dependencies of
178 this object. They are all in MAP's dependency list. */
179 unsigned int j;
180 struct link_map **dep_list = remmap->l_searchlist.r_list;
181
182 for (j = 1; j < remmap->l_searchlist.r_nlist; ++j)
183 if (! (dep_list[j]->l_flags_1 & DF_1_NODELETE)
184 || ! dep_list[j]->l_init_called)
185 {
186 assert (dep_list[j]->l_idx < map->l_searchlist.r_nlist);
187 if (--new_opencount[dep_list[j]->l_idx] == 0)
188 {
189 assert (dep_list[j]->l_type == lt_loaded);
190 mark_removed (dep_list[j]);
191 }
192 }
193 }
194
195 if (remmap->l_reldeps != NULL)
196 {
197 unsigned int j;
198 for (j = 0; j < remmap->l_reldepsact; ++j)
199 {
200 /* Find out whether this object is in our list. */
201 if (remmap->l_reldeps[j]->l_idx < nopencount
202 && (list[remmap->l_reldeps[j]->l_idx]
203 == remmap->l_reldeps[j]))
204 /* Yes, it is. */
205 if (--new_opencount[remmap->l_reldeps[j]->l_idx] == 0)
206 {
207 /* This one is now gone, too. */
208 assert (remmap->l_reldeps[j]->l_type == lt_loaded);
209 mark_removed (remmap->l_reldeps[j]);
210 }
211 }
212 }
213 }
214
215 mark_removed (list[i]);
216 }
217 assert (new_opencount[0] == 0);
218
219 /* Call all termination functions at once. */
220 for (i = 0; list[i] != NULL; ++i)
221 {
222 struct link_map *imap = list[i];
223 if (new_opencount[i] == 0 && imap->l_type == lt_loaded
224 && (imap->l_flags_1 & DF_1_NODELETE) == 0)
225 {
226 /* When debugging print a message first. */
227 if (__builtin_expect (GL(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0))
228 _dl_debug_printf ("\ncalling fini: %s\n\n", imap->l_name);
229
230 /* Call its termination function. Do not do it for
231 half-cooked objects. */
232 if (imap->l_init_called)
233 {
234 if (imap->l_info[DT_FINI_ARRAY] != NULL)
235 {
236 ElfW(Addr) *array =
237 (ElfW(Addr) *) (imap->l_addr
238 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
239 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
240 / sizeof (ElfW(Addr)));
241
242 while (sz-- > 0)
243 ((fini_t) array[sz]) ();
244 }
245
246 /* Next try the old-style destructor. */
247 if (imap->l_info[DT_FINI] != NULL)
248 (*(void (*) (void)) DL_DT_FINI_ADDRESS
249 (imap, ((void *) imap->l_addr
250 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
251 }
252
253 /* This object must not be used anymore. We must remove the
254 reference from the scope. */
255 unsigned int j;
256 struct link_map **searchlist = map->l_searchlist.r_list;
257 unsigned int nsearchlist = map->l_searchlist.r_nlist;
258
259 #ifndef NDEBUG
260 bool found = false;
261 #endif
262 for (j = 0; j < nsearchlist; ++j)
263 if (imap == searchlist[j])
264 {
265 /* This is the object to remove. Copy all the
266 following ones. */
267 while (++j < nsearchlist)
268 searchlist[j - 1] = searchlist[j];
269
270 searchlist[j - 1] = NULL;
271
272 --map->l_searchlist.r_nlist;
273
274 #ifndef NDEBUG
275 found = true;
276 #endif
277 break;
278 }
279 assert (found);
280 }
281 else if (new_opencount[i] != 0 && imap->l_type == lt_loaded
282 && imap->l_searchlist.r_list == NULL
283 && imap->l_initfini != NULL)
284 {
285 /* The object is still used. But the object we are
286 unloading right now is responsible for loading it. If
287 the current object does not have it's own scope yet we
288 have to create one. This has to be done before running
289 the finalizers.
290
291 To do this count the number of dependencies. */
292 unsigned int cnt;
293 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
294 if (imap->l_initfini[cnt]->l_idx >= i
295 && imap->l_initfini[cnt]->l_idx < nopencount)
296 ++new_opencount[imap->l_initfini[cnt]->l_idx];
297 else
298 ++imap->l_initfini[cnt]->l_opencount;
299
300 /* We simply reuse the l_initfini list. */
301 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
302 imap->l_searchlist.r_nlist = cnt;
303
304 for (cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
305 if (imap->l_scope[cnt] == &map->l_searchlist)
306 {
307 imap->l_scope[cnt] = &imap->l_searchlist;
308 break;
309 }
310 }
311
312 /* Store the new l_opencount value. */
313 imap->l_opencount = new_opencount[i];
314
315 /* Just a sanity check. */
316 assert (imap->l_type == lt_loaded || imap->l_opencount > 0);
317 }
318
319 /* Notify the debugger we are about to remove some loaded objects. */
320 _r_debug.r_state = RT_DELETE;
321 _dl_debug_state ();
322
323 #ifdef USE_TLS
324 size_t tls_free_start;
325 size_t tls_free_end;
326 tls_free_start = tls_free_end = NO_TLS_OFFSET;
327 #endif
328
329 /* Check each element of the search list to see if all references to
330 it are gone. */
331 for (i = 0; list[i] != NULL; ++i)
332 {
333 struct link_map *imap = list[i];
334 if (imap->l_opencount == 0 && imap->l_type == lt_loaded)
335 {
336 struct libname_list *lnp;
337
338 /* That was the last reference, and this was a dlopen-loaded
339 object. We can unmap it. */
340 if (__builtin_expect (imap->l_global, 0))
341 {
342 /* This object is in the global scope list. Remove it. */
343 unsigned int cnt = GL(dl_main_searchlist)->r_nlist;
344
345 do
346 --cnt;
347 while (GL(dl_main_searchlist)->r_list[cnt] != imap);
348
349 /* The object was already correctly registered. */
350 while (++cnt < GL(dl_main_searchlist)->r_nlist)
351 GL(dl_main_searchlist)->r_list[cnt - 1]
352 = GL(dl_main_searchlist)->r_list[cnt];
353
354 --GL(dl_main_searchlist)->r_nlist;
355 }
356
357 #ifdef USE_TLS
358 /* Remove the object from the dtv slotinfo array if it uses TLS. */
359 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
360 {
361 any_tls = true;
362
363 if (! remove_slotinfo (imap->l_tls_modid,
364 GL(dl_tls_dtv_slotinfo_list), 0,
365 imap->l_init_called))
366 /* All dynamically loaded modules with TLS are unloaded. */
367 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
368
369 if (imap->l_tls_offset != NO_TLS_OFFSET)
370 {
371 /* Collect a contiguous chunk built from the objects in
372 this search list, going in either direction. When the
373 whole chunk is at the end of the used area then we can
374 reclaim it. */
375 # if TLS_TCB_AT_TP
376 if (tls_free_start == NO_TLS_OFFSET
377 || (size_t) imap->l_tls_offset == tls_free_start)
378 {
379 /* Extend the contiguous chunk being reclaimed. */
380 tls_free_start
381 = imap->l_tls_offset - imap->l_tls_blocksize;
382
383 if (tls_free_end == NO_TLS_OFFSET)
384 tls_free_end = imap->l_tls_offset;
385 }
386 else if (imap->l_tls_offset - imap->l_tls_blocksize
387 == tls_free_end)
388 /* Extend the chunk backwards. */
389 tls_free_end = imap->l_tls_offset;
390 else
391 {
392 /* This isn't contiguous with the last chunk freed.
393 One of them will be leaked unless we can free
394 one block right away. */
395 if (tls_free_end == GL(dl_tls_static_used))
396 {
397 GL(dl_tls_static_used) = tls_free_start;
398 tls_free_end = imap->l_tls_offset;
399 tls_free_start
400 = tls_free_end - imap->l_tls_blocksize;
401 }
402 else if ((size_t) imap->l_tls_offset
403 == GL(dl_tls_static_used))
404 GL(dl_tls_static_used)
405 = imap->l_tls_offset - imap->l_tls_blocksize;
406 else if (tls_free_end < (size_t) imap->l_tls_offset)
407 {
408 /* We pick the later block. It has a chance to
409 be freed. */
410 tls_free_end = imap->l_tls_offset;
411 tls_free_start
412 = tls_free_end - imap->l_tls_blocksize;
413 }
414 }
415 # elif TLS_DTV_AT_TP
416 if ((size_t) imap->l_tls_offset == tls_free_end)
417 /* Extend the contiguous chunk being reclaimed. */
418 tls_free_end -= imap->l_tls_blocksize;
419 else if (imap->l_tls_offset + imap->l_tls_blocksize
420 == tls_free_start)
421 /* Extend the chunk backwards. */
422 tls_free_start = imap->l_tls_offset;
423 else
424 {
425 /* This isn't contiguous with the last chunk freed.
426 One of them will be leaked. */
427 if (tls_free_end == GL(dl_tls_static_used))
428 GL(dl_tls_static_used) = tls_free_start;
429 tls_free_start = imap->l_tls_offset;
430 tls_free_end = tls_free_start + imap->l_tls_blocksize;
431 }
432 # else
433 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
434 # endif
435 }
436 }
437 #endif
438
439 /* We can unmap all the maps at once. We determined the
440 start address and length when we loaded the object and
441 the `munmap' call does the rest. */
442 DL_UNMAP (imap);
443
444 /* Finally, unlink the data structure and free it. */
445 #ifdef SHARED
446 /* We will unlink the first object only if this is a statically
447 linked program. */
448 assert (imap->l_prev != NULL);
449 imap->l_prev->l_next = imap->l_next;
450 #else
451 if (imap->l_prev != NULL)
452 imap->l_prev->l_next = imap->l_next;
453 else
454 GL(dl_loaded) = imap->l_next;
455 #endif
456 --GL(dl_nloaded);
457 if (imap->l_next)
458 imap->l_next->l_prev = imap->l_prev;
459
460 free (imap->l_versions);
461 if (imap->l_origin != (char *) -1)
462 free ((char *) imap->l_origin);
463
464 /* If the object has relocation dependencies save this
465 information for latter. */
466 if (__builtin_expect (imap->l_reldeps != NULL, 0))
467 {
468 struct reldep_list *newrel;
469
470 newrel = (struct reldep_list *) alloca (sizeof (*reldeps)
471 + (imap->l_reldepsact
472 * sizeof (bool)));
473 newrel->rellist = imap->l_reldeps;
474 newrel->nrellist = imap->l_reldepsact;
475 newrel->next = reldeps;
476
477 newrel->nhandled = imap->l_reldepsact;
478 unsigned int j;
479 for (j = 0; j < imap->l_reldepsact; ++j)
480 {
481 /* Find out whether this object is in our list. */
482 if (imap->l_reldeps[j]->l_idx < nopencount
483 && list[imap->l_reldeps[j]->l_idx] == imap->l_reldeps[j])
484 /* Yes, it is. */
485 newrel->handled[j] = true;
486 else
487 newrel->handled[j] = false;
488 }
489
490 reldeps = newrel;
491 }
492
493 /* This name always is allocated. */
494 free (imap->l_name);
495 /* Remove the list with all the names of the shared object. */
496 lnp = imap->l_libname;
497 do
498 {
499 struct libname_list *this = lnp;
500 lnp = lnp->next;
501 if (!this->dont_free)
502 free (this);
503 }
504 while (lnp != NULL);
505
506 /* Remove the searchlists. */
507 if (imap != map)
508 free (imap->l_initfini);
509
510 /* Remove the scope array if we allocated it. */
511 if (imap->l_scope != imap->l_scope_mem)
512 free (imap->l_scope);
513
514 if (imap->l_phdr_allocated)
515 free ((void *) imap->l_phdr);
516
517 if (imap->l_rpath_dirs.dirs != (void *) -1)
518 free (imap->l_rpath_dirs.dirs);
519 if (imap->l_runpath_dirs.dirs != (void *) -1)
520 free (imap->l_runpath_dirs.dirs);
521
522 free (imap);
523 }
524 }
525
526 #ifdef USE_TLS
527 /* If we removed any object which uses TLS bump the generation counter. */
528 if (any_tls)
529 {
530 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
531 __libc_fatal (_("TLS generation counter wrapped! Please send report with the 'glibcbug' script."));
532
533 if (tls_free_end == GL(dl_tls_static_used))
534 GL(dl_tls_static_used) = tls_free_start;
535 }
536 #endif
537
538 /* Notify the debugger those objects are finalized and gone. */
539 _r_debug.r_state = RT_CONSISTENT;
540 _dl_debug_state ();
541
542 /* Now we can perhaps also remove the modules for which we had
543 dependencies because of symbol lookup. */
544 while (__builtin_expect (reldeps != NULL, 0))
545 {
546 while (reldeps->nrellist-- > 0)
547 /* Some of the relocation dependencies might be on the
548 dependency list of the object we are closing right now.
549 They were already handled. Do not close them again. */
550 if (reldeps->nrellist < reldeps->nhandled
551 && ! reldeps->handled[reldeps->nrellist])
552 _dl_close (reldeps->rellist[reldeps->nrellist]);
553
554 free (reldeps->rellist);
555
556 reldeps = reldeps->next;
557 }
558
559 free (list);
560
561 /* Release the lock. */
562 __rtld_lock_unlock_recursive (GL(dl_load_lock));
563 }
564 libc_hidden_def (_dl_close)
565
566
567 #ifdef USE_TLS
568 static bool
569 free_slotinfo (struct dtv_slotinfo_list **elemp)
570 {
571 size_t cnt;
572
573 if (*elemp == NULL)
574 /* Nothing here, all is removed (or there never was anything). */
575 return true;
576
577 if (!free_slotinfo (&(*elemp)->next))
578 /* We cannot free the entry. */
579 return false;
580
581 /* That cleared our next pointer for us. */
582
583 for (cnt = 0; cnt < (*elemp)->len; ++cnt)
584 if ((*elemp)->slotinfo[cnt].map != NULL)
585 /* Still used. */
586 return false;
587
588 /* We can remove the list element. */
589 free (*elemp);
590 *elemp = NULL;
591
592 return true;
593 }
594 #endif
595
596
597 libc_freeres_fn (free_mem)
598 {
599 if (__builtin_expect (GL(dl_global_scope_alloc), 0) != 0
600 && GL(dl_main_searchlist)->r_nlist == GL(dl_initial_searchlist).r_nlist)
601 {
602 /* All object dynamically loaded by the program are unloaded. Free
603 the memory allocated for the global scope variable. */
604 struct link_map **old = GL(dl_main_searchlist)->r_list;
605
606 /* Put the old map in. */
607 GL(dl_main_searchlist)->r_list = GL(dl_initial_searchlist).r_list;
608 /* Signal that the original map is used. */
609 GL(dl_global_scope_alloc) = 0;
610
611 /* Now free the old map. */
612 free (old);
613 }
614
615 #ifdef USE_TLS
616 if (USE___THREAD || GL(dl_tls_dtv_slotinfo_list) != NULL)
617 {
618 /* Free the memory allocated for the dtv slotinfo array. We can do
619 this only if all modules which used this memory are unloaded. */
620 # ifdef SHARED
621 if (GL(dl_initial_dtv) == NULL)
622 /* There was no initial TLS setup, it was set up later when
623 it used the normal malloc. */
624 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
625 # endif
626 /* The first element of the list does not have to be deallocated.
627 It was allocated in the dynamic linker (i.e., with a different
628 malloc), and in the static library it's in .bss space. */
629 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);
630 }
631 #endif
632 }