]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-open.c
New function _dl_find_dso_for_object
[thirdparty/glibc.git] / elf / dl-open.c
1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <caller.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
35 #include <atomic.h>
36
37 #include <dl-dst.h>
38
39
40 extern ElfW(Addr) _dl_sysdep_start (void **start_argptr,
41 void (*dl_main) (const ElfW(Phdr) *phdr,
42 ElfW(Word) phnum,
43 ElfW(Addr) *user_entry,
44 ElfW(auxv_t) *auxv));
45 weak_extern (_dl_sysdep_start)
46
47 extern int __libc_multiple_libcs; /* Defined in init-first.c. */
48
49 /* We must be careful not to leave us in an inconsistent state. Thus we
50 catch any error and re-raise it after cleaning up. */
51
52 struct dl_open_args
53 {
54 const char *file;
55 int mode;
56 /* This is the caller of the dlopen() function. */
57 const void *caller_dlopen;
58 /* This is the caller of _dl_open(). */
59 const void *caller_dl_open;
60 struct link_map *map;
61 /* Namespace ID. */
62 Lmid_t nsid;
63 /* Original parameters to the program and the current environment. */
64 int argc;
65 char **argv;
66 char **env;
67 };
68
69
70 static int
71 add_to_global (struct link_map *new)
72 {
73 struct link_map **new_global;
74 unsigned int to_add = 0;
75 unsigned int cnt;
76
77 /* Count the objects we have to put in the global scope. */
78 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
79 if (new->l_searchlist.r_list[cnt]->l_global == 0)
80 ++to_add;
81
82 /* The symbols of the new objects and its dependencies are to be
83 introduced into the global scope that will be used to resolve
84 references from other dynamically-loaded objects.
85
86 The global scope is the searchlist in the main link map. We
87 extend this list if necessary. There is one problem though:
88 since this structure was allocated very early (before the libc
89 is loaded) the memory it uses is allocated by the malloc()-stub
90 in the ld.so. When we come here these functions are not used
91 anymore. Instead the malloc() implementation of the libc is
92 used. But this means the block from the main map cannot be used
93 in an realloc() call. Therefore we allocate a completely new
94 array the first time we have to add something to the locale scope. */
95
96 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
97 if (ns->_ns_global_scope_alloc == 0)
98 {
99 /* This is the first dynamic object given global scope. */
100 ns->_ns_global_scope_alloc
101 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
102 new_global = (struct link_map **)
103 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
104 if (new_global == NULL)
105 {
106 ns->_ns_global_scope_alloc = 0;
107 nomem:
108 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
109 N_("cannot extend global scope"));
110 return 1;
111 }
112
113 /* Copy over the old entries. */
114 ns->_ns_main_searchlist->r_list
115 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
116 (ns->_ns_main_searchlist->r_nlist
117 * sizeof (struct link_map *)));
118 }
119 else if (ns->_ns_main_searchlist->r_nlist + to_add
120 > ns->_ns_global_scope_alloc)
121 {
122 /* We have to extend the existing array of link maps in the
123 main map. */
124 struct link_map **old_global
125 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
126 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
127
128 new_global = (struct link_map **)
129 malloc (new_nalloc * sizeof (struct link_map *));
130 if (new_global == NULL)
131 goto nomem;
132
133 memcpy (new_global, old_global,
134 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
135
136 ns->_ns_global_scope_alloc = new_nalloc;
137 ns->_ns_main_searchlist->r_list = new_global;
138
139 if (!RTLD_SINGLE_THREAD_P)
140 THREAD_GSCOPE_WAIT ();
141
142 free (old_global);
143 }
144
145 /* Now add the new entries. */
146 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
147 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
148 {
149 struct link_map *map = new->l_searchlist.r_list[cnt];
150
151 if (map->l_global == 0)
152 {
153 map->l_global = 1;
154 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
155
156 /* We modify the global scope. Report this. */
157 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
158 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
159 map->l_name, map->l_ns);
160 }
161 }
162 atomic_write_barrier ();
163 ns->_ns_main_searchlist->r_nlist = new_nlist;
164
165 return 0;
166 }
167
168 /* Search link maps in all namespaces for the DSO that containes the object at
169 address ADDR. Returns the pointer to the link map of the matching DSO, or
170 NULL if a match is not found. */
171 struct link_map *
172 internal_function
173 _dl_find_dso_for_object (const ElfW(Addr) addr)
174 {
175 struct link_map *l;
176
177 /* Find the highest-addressed object that ADDR is not below. */
178 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
179 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
180 if (addr >= l->l_map_start && addr < l->l_map_end
181 && (l->l_contiguous
182 || _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
183 {
184 assert (ns == l->l_ns);
185 return l;
186 }
187 return NULL;
188 }
189 rtld_hidden_def (_dl_find_dso_for_object);
190
191 static void
192 dl_open_worker (void *a)
193 {
194 struct dl_open_args *args = a;
195 const char *file = args->file;
196 int mode = args->mode;
197 struct link_map *call_map = NULL;
198
199 /* Check whether _dl_open() has been called from a valid DSO. */
200 if (__check_caller (args->caller_dl_open,
201 allow_libc|allow_libdl|allow_ldso) != 0)
202 _dl_signal_error (0, "dlopen", NULL, N_("invalid caller"));
203
204 /* Determine the caller's map if necessary. This is needed in case
205 we have a DST, when we don't know the namespace ID we have to put
206 the new object in, or when the file name has no path in which
207 case we need to look along the RUNPATH/RPATH of the caller. */
208 const char *dst = strchr (file, '$');
209 if (dst != NULL || args->nsid == __LM_ID_CALLER
210 || strchr (file, '/') == NULL)
211 {
212 const void *caller_dlopen = args->caller_dlopen;
213
214 #ifdef SHARED
215 /* We have to find out from which object the caller is calling.
216 By default we assume this is the main application. */
217 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
218 #endif
219
220 struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
221
222 if (l)
223 call_map = l;
224
225 if (args->nsid == __LM_ID_CALLER)
226 {
227 #ifndef SHARED
228 /* In statically linked apps there might be no loaded object. */
229 if (call_map == NULL)
230 args->nsid = LM_ID_BASE;
231 else
232 #endif
233 args->nsid = call_map->l_ns;
234 }
235 }
236
237 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
238
239 /* Load the named object. */
240 struct link_map *new;
241 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
242 mode | __RTLD_CALLMAP, args->nsid);
243
244 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
245 set and the object is not already loaded. */
246 if (new == NULL)
247 {
248 assert (mode & RTLD_NOLOAD);
249 return;
250 }
251
252 if (__builtin_expect (mode & __RTLD_SPROF, 0))
253 /* This happens only if we load a DSO for 'sprof'. */
254 return;
255
256 /* This object is directly loaded. */
257 ++new->l_direct_opencount;
258
259 /* It was already open. */
260 if (__builtin_expect (new->l_searchlist.r_list != NULL, 0))
261 {
262 /* Let the user know about the opencount. */
263 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
264 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
265 new->l_name, new->l_ns, new->l_direct_opencount);
266
267 /* If the user requested the object to be in the global namespace
268 but it is not so far, add it now. */
269 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
270 (void) add_to_global (new);
271
272 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
273
274 return;
275 }
276
277 /* Load that object's dependencies. */
278 _dl_map_object_deps (new, NULL, 0, 0,
279 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
280
281 /* So far, so good. Now check the versions. */
282 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
283 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
284 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
285 0, 0);
286
287 #ifdef SHARED
288 /* Auditing checkpoint: we have added all objects. */
289 if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
290 {
291 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
292 /* Do not call the functions for any auditing object. */
293 if (head->l_auditing == 0)
294 {
295 struct audit_ifaces *afct = GLRO(dl_audit);
296 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
297 {
298 if (afct->activity != NULL)
299 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
300
301 afct = afct->next;
302 }
303 }
304 }
305 #endif
306
307 /* Notify the debugger all new objects are now ready to go. */
308 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
309 r->r_state = RT_CONSISTENT;
310 _dl_debug_state ();
311 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
312
313 /* Print scope information. */
314 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
315 _dl_show_scope (new, 0);
316
317 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
318 int reloc_mode = mode & __RTLD_AUDIT;
319 if (GLRO(dl_lazy))
320 reloc_mode |= mode & RTLD_LAZY;
321
322 /* Sort the objects by dependency for the relocation process. This
323 allows IFUNC relocations to work and it also means copy
324 relocation of dependencies are if necessary overwritten. */
325 size_t nmaps = 0;
326 struct link_map *l = new;
327 do
328 {
329 if (! l->l_real->l_relocated)
330 ++nmaps;
331 l = l->l_next;
332 }
333 while (l != NULL);
334 struct link_map *maps[nmaps];
335 nmaps = 0;
336 l = new;
337 do
338 {
339 if (! l->l_real->l_relocated)
340 maps[nmaps++] = l;
341 l = l->l_next;
342 }
343 while (l != NULL);
344 if (nmaps > 1)
345 {
346 uint16_t seen[nmaps];
347 memset (seen, '\0', nmaps);
348 size_t i = 0;
349 while (1)
350 {
351 ++seen[i];
352 struct link_map *thisp = maps[i];
353
354 /* Find the last object in the list for which the current one is
355 a dependency and move the current object behind the object
356 with the dependency. */
357 size_t k = nmaps - 1;
358 while (k > i)
359 {
360 struct link_map **runp = maps[k]->l_initfini;
361 if (runp != NULL)
362 /* Look through the dependencies of the object. */
363 while (*runp != NULL)
364 if (__builtin_expect (*runp++ == thisp, 0))
365 {
366 /* Move the current object to the back past the last
367 object with it as the dependency. */
368 memmove (&maps[i], &maps[i + 1],
369 (k - i) * sizeof (maps[0]));
370 maps[k] = thisp;
371
372 if (seen[i + 1] > nmaps - i)
373 {
374 ++i;
375 goto next_clear;
376 }
377
378 uint16_t this_seen = seen[i];
379 memmove (&seen[i], &seen[i + 1],
380 (k - i) * sizeof (seen[0]));
381 seen[k] = this_seen;
382
383 goto next;
384 }
385
386 --k;
387 }
388
389 if (++i == nmaps)
390 break;
391 next_clear:
392 memset (&seen[i], 0, (nmaps - i) * sizeof (seen[0]));
393 next:;
394 }
395 }
396
397 int relocation_in_progress = 0;
398
399 for (size_t i = nmaps; i-- > 0; )
400 {
401 l = maps[i];
402
403 if (! relocation_in_progress)
404 {
405 /* Notify the debugger that relocations are about to happen. */
406 LIBC_PROBE (reloc_start, 2, args->nsid, r);
407 relocation_in_progress = 1;
408 }
409
410 #ifdef SHARED
411 if (__builtin_expect (GLRO(dl_profile) != NULL, 0))
412 {
413 /* If this here is the shared object which we want to profile
414 make sure the profile is started. We can find out whether
415 this is necessary or not by observing the `_dl_profile_map'
416 variable. If it was NULL but is not NULL afterwars we must
417 start the profiling. */
418 struct link_map *old_profile_map = GL(dl_profile_map);
419
420 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
421
422 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
423 {
424 /* We must prepare the profiling. */
425 _dl_start_profile ();
426
427 /* Prevent unloading the object. */
428 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
429 }
430 }
431 else
432 #endif
433 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
434 }
435
436 /* If the file is not loaded now as a dependency, add the search
437 list of the newly loaded object to the scope. */
438 bool any_tls = false;
439 unsigned int first_static_tls = new->l_searchlist.r_nlist;
440 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
441 {
442 struct link_map *imap = new->l_searchlist.r_list[i];
443 int from_scope = 0;
444
445 /* If the initializer has been called already, the object has
446 not been loaded here and now. */
447 if (imap->l_init_called && imap->l_type == lt_loaded)
448 {
449 struct r_scope_elem **runp = imap->l_scope;
450 size_t cnt = 0;
451
452 while (*runp != NULL)
453 {
454 if (*runp == &new->l_searchlist)
455 break;
456 ++cnt;
457 ++runp;
458 }
459
460 if (*runp != NULL)
461 /* Avoid duplicates. */
462 continue;
463
464 if (__builtin_expect (cnt + 1 >= imap->l_scope_max, 0))
465 {
466 /* The 'r_scope' array is too small. Allocate a new one
467 dynamically. */
468 size_t new_size;
469 struct r_scope_elem **newp;
470
471 #define SCOPE_ELEMS(imap) \
472 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
473
474 if (imap->l_scope != imap->l_scope_mem
475 && imap->l_scope_max < SCOPE_ELEMS (imap))
476 {
477 new_size = SCOPE_ELEMS (imap);
478 newp = imap->l_scope_mem;
479 }
480 else
481 {
482 new_size = imap->l_scope_max * 2;
483 newp = (struct r_scope_elem **)
484 malloc (new_size * sizeof (struct r_scope_elem *));
485 if (newp == NULL)
486 _dl_signal_error (ENOMEM, "dlopen", NULL,
487 N_("cannot create scope list"));
488 }
489
490 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
491 struct r_scope_elem **old = imap->l_scope;
492
493 imap->l_scope = newp;
494
495 if (old != imap->l_scope_mem)
496 _dl_scope_free (old);
497
498 imap->l_scope_max = new_size;
499 }
500
501 /* First terminate the extended list. Otherwise a thread
502 might use the new last element and then use the garbage
503 at offset IDX+1. */
504 imap->l_scope[cnt + 1] = NULL;
505 atomic_write_barrier ();
506 imap->l_scope[cnt] = &new->l_searchlist;
507
508 /* Print only new scope information. */
509 from_scope = cnt;
510 }
511 /* Only add TLS memory if this object is loaded now and
512 therefore is not yet initialized. */
513 else if (! imap->l_init_called
514 /* Only if the module defines thread local data. */
515 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
516 {
517 /* Now that we know the object is loaded successfully add
518 modules containing TLS data to the slot info table. We
519 might have to increase its size. */
520 _dl_add_to_slotinfo (imap);
521
522 if (imap->l_need_tls_init
523 && first_static_tls == new->l_searchlist.r_nlist)
524 first_static_tls = i;
525
526 /* We have to bump the generation counter. */
527 any_tls = true;
528 }
529
530 /* Print scope information. */
531 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
532 _dl_show_scope (imap, from_scope);
533 }
534
535 /* Bump the generation number if necessary. */
536 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
537 _dl_fatal_printf (N_("\
538 TLS generation counter wrapped! Please report this."));
539
540 /* We need a second pass for static tls data, because _dl_update_slotinfo
541 must not be run while calls to _dl_add_to_slotinfo are still pending. */
542 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
543 {
544 struct link_map *imap = new->l_searchlist.r_list[i];
545
546 if (imap->l_need_tls_init
547 && ! imap->l_init_called
548 && imap->l_tls_blocksize > 0)
549 {
550 /* For static TLS we have to allocate the memory here and
551 now. This includes allocating memory in the DTV. But we
552 cannot change any DTV other than our own. So, if we
553 cannot guarantee that there is room in the DTV we don't
554 even try it and fail the load.
555
556 XXX We could track the minimum DTV slots allocated in
557 all threads. */
558 if (! RTLD_SINGLE_THREAD_P && imap->l_tls_modid > DTV_SURPLUS)
559 _dl_signal_error (0, "dlopen", NULL, N_("\
560 cannot load any more object with static TLS"));
561
562 imap->l_need_tls_init = 0;
563 #ifdef SHARED
564 /* Update the slot information data for at least the
565 generation of the DSO we are allocating data for. */
566 _dl_update_slotinfo (imap->l_tls_modid);
567 #endif
568
569 GL(dl_init_static_tls) (imap);
570 assert (imap->l_need_tls_init == 0);
571 }
572 }
573
574 /* Notify the debugger all new objects have been relocated. */
575 if (relocation_in_progress)
576 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
577
578 /* Run the initializer functions of new objects. */
579 _dl_init (new, args->argc, args->argv, args->env);
580
581 /* Now we can make the new map available in the global scope. */
582 if (mode & RTLD_GLOBAL)
583 /* Move the object in the global namespace. */
584 if (add_to_global (new) != 0)
585 /* It failed. */
586 return;
587
588 /* Mark the object as not deletable if the RTLD_NODELETE flags was
589 passed. */
590 if (__builtin_expect (mode & RTLD_NODELETE, 0))
591 new->l_flags_1 |= DF_1_NODELETE;
592
593 #ifndef SHARED
594 /* We must be the static _dl_open in libc.a. A static program that
595 has loaded a dynamic object now has competition. */
596 __libc_multiple_libcs = 1;
597 #endif
598
599 /* Let the user know about the opencount. */
600 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
601 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
602 new->l_name, new->l_ns, new->l_direct_opencount);
603 }
604
605
606 void *
607 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
608 int argc, char *argv[], char *env[])
609 {
610 if ((mode & RTLD_BINDING_MASK) == 0)
611 /* One of the flags must be set. */
612 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
613
614 /* Make sure we are alone. */
615 __rtld_lock_lock_recursive (GL(dl_load_lock));
616
617 if (__builtin_expect (nsid == LM_ID_NEWLM, 0))
618 {
619 /* Find a new namespace. */
620 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
621 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
622 break;
623
624 if (__builtin_expect (nsid == DL_NNS, 0))
625 {
626 /* No more namespace available. */
627 __rtld_lock_unlock_recursive (GL(dl_load_lock));
628
629 _dl_signal_error (EINVAL, file, NULL, N_("\
630 no more namespaces available for dlmopen()"));
631 }
632 else if (nsid == GL(dl_nns))
633 {
634 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
635 ++GL(dl_nns);
636 }
637
638 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
639 }
640 /* Never allow loading a DSO in a namespace which is empty. Such
641 direct placements is only causing problems. Also don't allow
642 loading into a namespace used for auditing. */
643 else if (__builtin_expect (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER, 0)
644 && (GL(dl_ns)[nsid]._ns_nloaded == 0
645 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
646 _dl_signal_error (EINVAL, file, NULL,
647 N_("invalid target namespace in dlmopen()"));
648 #ifndef SHARED
649 else if ((nsid == LM_ID_BASE || nsid == __LM_ID_CALLER)
650 && GL(dl_ns)[LM_ID_BASE]._ns_loaded == NULL
651 && GL(dl_nns) == 0)
652 GL(dl_nns) = 1;
653 #endif
654
655 struct dl_open_args args;
656 args.file = file;
657 args.mode = mode;
658 args.caller_dlopen = caller_dlopen;
659 args.caller_dl_open = RETURN_ADDRESS (0);
660 args.map = NULL;
661 args.nsid = nsid;
662 args.argc = argc;
663 args.argv = argv;
664 args.env = env;
665
666 const char *objname;
667 const char *errstring;
668 bool malloced;
669 int errcode = _dl_catch_error (&objname, &errstring, &malloced,
670 dl_open_worker, &args);
671
672 #if defined USE_LDCONFIG && !defined MAP_COPY
673 /* We must unmap the cache file. */
674 _dl_unload_cache ();
675 #endif
676
677 /* See if an error occurred during loading. */
678 if (__builtin_expect (errstring != NULL, 0))
679 {
680 /* Remove the object from memory. It may be in an inconsistent
681 state if relocation failed, for example. */
682 if (args.map)
683 {
684 /* Maybe some of the modules which were loaded use TLS.
685 Since it will be removed in the following _dl_close call
686 we have to mark the dtv array as having gaps to fill the
687 holes. This is a pessimistic assumption which won't hurt
688 if not true. There is no need to do this when we are
689 loading the auditing DSOs since TLS has not yet been set
690 up. */
691 if ((mode & __RTLD_AUDIT) == 0)
692 GL(dl_tls_dtv_gaps) = true;
693
694 _dl_close_worker (args.map);
695 }
696
697 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
698
699 /* Release the lock. */
700 __rtld_lock_unlock_recursive (GL(dl_load_lock));
701
702 /* Make a local copy of the error string so that we can release the
703 memory allocated for it. */
704 size_t len_errstring = strlen (errstring) + 1;
705 char *local_errstring;
706 if (objname == errstring + len_errstring)
707 {
708 size_t total_len = len_errstring + strlen (objname) + 1;
709 local_errstring = alloca (total_len);
710 memcpy (local_errstring, errstring, total_len);
711 objname = local_errstring + len_errstring;
712 }
713 else
714 {
715 local_errstring = alloca (len_errstring);
716 memcpy (local_errstring, errstring, len_errstring);
717 }
718
719 if (malloced)
720 free ((char *) errstring);
721
722 /* Reraise the error. */
723 _dl_signal_error (errcode, objname, NULL, local_errstring);
724 }
725
726 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
727
728 /* Release the lock. */
729 __rtld_lock_unlock_recursive (GL(dl_load_lock));
730
731 #ifndef SHARED
732 DL_STATIC_INIT (args.map);
733 #endif
734
735 return args.map;
736 }
737
738
739 void
740 _dl_show_scope (struct link_map *l, int from)
741 {
742 _dl_debug_printf ("object=%s [%lu]\n",
743 *l->l_name ? l->l_name : rtld_progname, l->l_ns);
744 if (l->l_scope != NULL)
745 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
746 {
747 _dl_debug_printf (" scope %u:", scope_cnt);
748
749 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
750 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
751 _dl_debug_printf_c (" %s",
752 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
753 else
754 _dl_debug_printf_c (" %s", rtld_progname);
755
756 _dl_debug_printf_c ("\n");
757 }
758 else
759 _dl_debug_printf (" no scope\n");
760 _dl_debug_printf ("\n");
761 }
762
763 #ifdef IS_IN_rtld
764 /* Return non-zero if ADDR lies within one of L's segments. */
765 int
766 internal_function
767 _dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr)
768 {
769 int n = l->l_phnum;
770 const ElfW(Addr) reladdr = addr - l->l_addr;
771
772 while (--n >= 0)
773 if (l->l_phdr[n].p_type == PT_LOAD
774 && reladdr - l->l_phdr[n].p_vaddr >= 0
775 && reladdr - l->l_phdr[n].p_vaddr < l->l_phdr[n].p_memsz)
776 return 1;
777 return 0;
778 }
779 #endif