]>
git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-fini.c
1 /* Call the termination functions of loaded shared objects.
2 Copyright (C) 1995,96,1998-2002,2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 /* Type of the constructor functions. */
27 typedef void (*fini_t
) (void);
34 /* Lots of fun ahead. We have to call the destructors for all still
35 loaded objects, in all namespaces. The problem is that the ELF
36 specification now demands that dependencies between the modules
37 are taken into account. I.e., the destructor for a module is
38 called before the ones for any of its dependencies.
40 To make things more complicated, we cannot simply use the reverse
41 order of the constructors. Since the user might have loaded objects
42 using `dlopen' there are possibly several other modules with its
43 dependencies to be taken into account. Therefore we have to start
44 determining the order of the modules once again from the beginning. */
45 struct link_map
**maps
= NULL
;
48 /* We run the destructors of the main namespaces last. As for the
49 other namespaces, we pick run the destructors in them in reverse
50 order of the namespace ID. */
51 for (Lmid_t cnt
= DL_NNS
- 1; cnt
>= 0; --cnt
)
53 /* Protect against concurrent loads and unloads. */
54 __rtld_lock_lock_recursive (GL(dl_load_lock
));
56 unsigned int nloaded
= GL(dl_ns
)[cnt
]._ns_nloaded
;
58 /* XXX Could it be (in static binaries) that there is no object
60 assert (cnt
!= LM_ID_BASE
|| nloaded
> 0);
62 /* Now we can allocate an array to hold all the pointers and copy
64 if (maps_size
< nloaded
* sizeof (struct link_map
*))
68 maps_size
= nloaded
* sizeof (struct link_map
*);
69 maps
= (struct link_map
**) alloca (maps_size
);
72 maps
= (struct link_map
**)
73 extend_alloca (maps
, maps_size
,
74 nloaded
* sizeof (struct link_map
*));
79 for (l
= GL(dl_ns
)[cnt
]._ns_loaded
, i
= 0; l
!= NULL
; l
= l
->l_next
)
80 /* Do not handle ld.so in secondary namespaces. */
87 /* Bump l_opencount of all objects so that they are not
88 dlclose()ed from underneath us. */
91 assert (cnt
!= LM_ID_BASE
|| i
== nloaded
);
92 assert (cnt
== LM_ID_BASE
|| i
== nloaded
|| i
== nloaded
- 1);
93 unsigned int nmaps
= i
;
97 /* Now we have to do the sorting. */
98 l
= GL(dl_ns
)[cnt
]._ns_loaded
;
99 if (cnt
== LM_ID_BASE
)
100 /* The main executable always comes first. */
102 for (; l
!= NULL
; l
= l
->l_next
)
103 /* Do not handle ld.so in secondary namespaces. */
106 /* Find the place in the 'maps' array. */
108 for (j
= cnt
== LM_ID_BASE
? 1 : 0; maps
[j
] != l
; ++j
)
111 /* Find all object for which the current one is a dependency
112 and move the found object (if necessary) in front. */
113 for (unsigned int k
= j
+ 1; k
< nmaps
; ++k
)
115 struct link_map
**runp
= maps
[k
]->l_initfini
;
118 while (*runp
!= NULL
)
121 struct link_map
*here
= maps
[k
];
124 memmove (&maps
[j
] + 1,
126 (k
- j
) * sizeof (struct link_map
*));
135 if (__builtin_expect (maps
[k
]->l_reldeps
!= NULL
, 0))
137 unsigned int m
= maps
[k
]->l_reldepsact
;
138 struct link_map
**relmaps
= maps
[k
]->l_reldeps
;
144 struct link_map
*here
= maps
[k
];
147 memmove (&maps
[j
] + 1,
149 (k
- j
) * sizeof (struct link_map
*));
160 /* We do not rely on the linked list of loaded object anymore from
161 this point on. We have our own list here (maps). The various
162 members of this list cannot vanish since the open count is too
163 high and will be decremented in this loop. So we release the
164 lock so that some code which might be called from a destructor
165 can directly or indirectly access the lock. */
166 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
168 /* 'maps' now contains the objects in the right order. Now call the
169 destructors. We have to process this array from the front. */
170 for (i
= 0; i
< nmaps
; ++i
)
174 if (l
->l_init_called
)
176 /* Make sure nothing happens if we are called twice. */
177 l
->l_init_called
= 0;
179 /* Don't call the destructors for objects we are not
181 if (l
->l_name
[0] == '\0' && l
->l_type
== lt_executable
)
184 /* Is there a destructor function? */
185 if (l
->l_info
[DT_FINI_ARRAY
] == NULL
186 && l
->l_info
[DT_FINI
] == NULL
)
189 /* When debugging print a message first. */
190 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_IMPCALLS
,
192 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
193 l
->l_name
[0] ? l
->l_name
: rtld_progname
,
196 /* First see whether an array is given. */
197 if (l
->l_info
[DT_FINI_ARRAY
] != NULL
)
200 (ElfW(Addr
) *) (l
->l_addr
201 + l
->l_info
[DT_FINI_ARRAY
]->d_un
.d_ptr
);
202 unsigned int i
= (l
->l_info
[DT_FINI_ARRAYSZ
]->d_un
.d_val
203 / sizeof (ElfW(Addr
)));
205 ((fini_t
) array
[i
]) ();
208 /* Next try the old-style destructor. */
209 if (l
->l_info
[DT_FINI
] != NULL
)
210 ((fini_t
) DL_DT_FINI_ADDRESS (l
, l
->l_addr
+ l
->l_info
[DT_FINI
]->d_un
.d_ptr
)) ();
213 /* Correct the previous increment. */
218 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_STATISTICS
, 0))
219 _dl_debug_printf ("\nruntime linker statistics:\n"
220 " final number of relocations: %lu\n"
221 "final number of relocations from cache: %lu\n",
222 GL(dl_num_relocations
),
223 GL(dl_num_cache_relocations
));