]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-load.c
Update.
[thirdparty/glibc.git] / elf / dl-load.c
1 /* Map in a shared object's segments from the file.
2 Copyright (C) 1995-2002, 2003, 2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20 #include <elf.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <libintl.h>
24 #include <stdbool.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <ldsodefs.h>
29 #include <bits/wordsize.h>
30 #include <sys/mman.h>
31 #include <sys/param.h>
32 #include <sys/stat.h>
33 #include <sys/types.h>
34 #include "dynamic-link.h"
35 #include <abi-tag.h>
36 #include <dl-osinfo.h>
37 #include <stackinfo.h>
38 #include <caller.h>
39
40 #include <dl-dst.h>
41
42 /* On some systems, no flag bits are given to specify file mapping. */
43 #ifndef MAP_FILE
44 # define MAP_FILE 0
45 #endif
46
47 /* The right way to map in the shared library files is MAP_COPY, which
48 makes a virtual copy of the data at the time of the mmap call; this
49 guarantees the mapped pages will be consistent even if the file is
50 overwritten. Some losing VM systems like Linux's lack MAP_COPY. All we
51 get is MAP_PRIVATE, which copies each page when it is modified; this
52 means if the file is overwritten, we may at some point get some pages
53 from the new version after starting with pages from the old version. */
54 #ifndef MAP_COPY
55 # define MAP_COPY MAP_PRIVATE
56 #endif
57
58 /* We want to prevent people from modifying DSOs which are currently in
59 use. This is what MAP_DENYWRITE is for. */
60 #ifndef MAP_DENYWRITE
61 # define MAP_DENYWRITE 0
62 #endif
63
64 /* Some systems link their relocatable objects for another base address
65 than 0. We want to know the base address for these such that we can
66 subtract this address from the segment addresses during mapping.
67 This results in a more efficient address space usage. Defaults to
68 zero for almost all systems. */
69 #ifndef MAP_BASE_ADDR
70 # define MAP_BASE_ADDR(l) 0
71 #endif
72
73
74 #include <endian.h>
75 #if BYTE_ORDER == BIG_ENDIAN
76 # define byteorder ELFDATA2MSB
77 #elif BYTE_ORDER == LITTLE_ENDIAN
78 # define byteorder ELFDATA2LSB
79 #else
80 # error "Unknown BYTE_ORDER " BYTE_ORDER
81 # define byteorder ELFDATANONE
82 #endif
83
84 #define STRING(x) __STRING (x)
85
86 #ifdef MAP_ANON
87 /* The fd is not examined when using MAP_ANON. */
88 # define ANONFD -1
89 #else
90 int _dl_zerofd = -1;
91 # define ANONFD _dl_zerofd
92 #endif
93
94 /* Handle situations where we have a preferred location in memory for
95 the shared objects. */
96 #ifdef ELF_PREFERRED_ADDRESS_DATA
97 ELF_PREFERRED_ADDRESS_DATA;
98 #endif
99 #ifndef ELF_PREFERRED_ADDRESS
100 # define ELF_PREFERRED_ADDRESS(loader, maplength, mapstartpref) (mapstartpref)
101 #endif
102 #ifndef ELF_FIXED_ADDRESS
103 # define ELF_FIXED_ADDRESS(loader, mapstart) ((void) 0)
104 #endif
105
106
107 int __stack_prot attribute_hidden attribute_relro
108 = (PROT_READ|PROT_WRITE
109 #if _STACK_GROWS_DOWN && defined PROT_GROWSDOWN
110 |PROT_GROWSDOWN
111 #elif _STACK_GROWS_UP && defined PROT_GROWSUP
112 |PROT_GROWSUP
113 #endif
114 );
115
116
117 /* Type for the buffer we put the ELF header and hopefully the program
118 header. This buffer does not really have to be too large. In most
119 cases the program header follows the ELF header directly. If this
120 is not the case all bets are off and we can make the header
121 arbitrarily large and still won't get it read. This means the only
122 question is how large are the ELF and program header combined. The
123 ELF header 32-bit files is 52 bytes long and in 64-bit files is 64
124 bytes long. Each program header entry is again 32 and 56 bytes
125 long respectively. I.e., even with a file which has 7 program
126 header entries we only have to read 512B. Add to this a bit of
127 margin for program notes and reading 512B and 640B for 32-bit and
128 64-bit files respecitvely is enough. If this heuristic should
129 really fail for some file the code in `_dl_map_object_from_fd'
130 knows how to recover. */
131 struct filebuf
132 {
133 ssize_t len;
134 #if __WORDSIZE == 32
135 # define FILEBUF_SIZE 512
136 #else
137 # define FILEBUF_SIZE 640
138 #endif
139 char buf[FILEBUF_SIZE] __attribute__ ((aligned (__alignof (ElfW(Ehdr)))));
140 };
141
142 /* This is the decomposed LD_LIBRARY_PATH search path. */
143 static struct r_search_path_struct env_path_list attribute_relro;
144
145 /* List of the hardware capabilities we might end up using. */
146 static const struct r_strlenpair *capstr attribute_relro;
147 static size_t ncapstr attribute_relro;
148 static size_t max_capstrlen attribute_relro;
149
150
151 /* Get the generated information about the trusted directories. */
152 #include "trusted-dirs.h"
153
154 static const char system_dirs[] = SYSTEM_DIRS;
155 static const size_t system_dirs_len[] =
156 {
157 SYSTEM_DIRS_LEN
158 };
159 #define nsystem_dirs_len \
160 (sizeof (system_dirs_len) / sizeof (system_dirs_len[0]))
161
162
163 /* Local version of `strdup' function. */
164 static inline char *
165 local_strdup (const char *s)
166 {
167 size_t len = strlen (s) + 1;
168 void *new = malloc (len);
169
170 if (new == NULL)
171 return NULL;
172
173 return (char *) memcpy (new, s, len);
174 }
175
176
177 static size_t
178 is_dst (const char *start, const char *name, const char *str,
179 int is_path, int secure)
180 {
181 size_t len;
182 bool is_curly = false;
183
184 if (name[0] == '{')
185 {
186 is_curly = true;
187 ++name;
188 }
189
190 len = 0;
191 while (name[len] == str[len] && name[len] != '\0')
192 ++len;
193
194 if (is_curly)
195 {
196 if (name[len] != '}')
197 return 0;
198
199 /* Point again at the beginning of the name. */
200 --name;
201 /* Skip over closing curly brace and adjust for the --name. */
202 len += 2;
203 }
204 else if (name[len] != '\0' && name[len] != '/'
205 && (!is_path || name[len] != ':'))
206 return 0;
207
208 if (__builtin_expect (secure, 0)
209 && ((name[len] != '\0' && (!is_path || name[len] != ':'))
210 || (name != start + 1 && (!is_path || name[-2] != ':'))))
211 return 0;
212
213 return len;
214 }
215
216
217 size_t
218 _dl_dst_count (const char *name, int is_path)
219 {
220 const char *const start = name;
221 size_t cnt = 0;
222
223 do
224 {
225 size_t len;
226
227 /* $ORIGIN is not expanded for SUID/GUID programs (except if it
228 is $ORIGIN alone) and it must always appear first in path. */
229 ++name;
230 if ((len = is_dst (start, name, "ORIGIN", is_path,
231 INTUSE(__libc_enable_secure))) != 0
232 || (len = is_dst (start, name, "PLATFORM", is_path, 0)) != 0
233 || (len = is_dst (start, name, "LIB", is_path, 0)) != 0)
234 ++cnt;
235
236 name = strchr (name + len, '$');
237 }
238 while (name != NULL);
239
240 return cnt;
241 }
242
243
244 char *
245 _dl_dst_substitute (struct link_map *l, const char *name, char *result,
246 int is_path)
247 {
248 const char *const start = name;
249 char *last_elem, *wp;
250
251 /* Now fill the result path. While copying over the string we keep
252 track of the start of the last path element. When we come accross
253 a DST we copy over the value or (if the value is not available)
254 leave the entire path element out. */
255 last_elem = wp = result;
256
257 do
258 {
259 if (__builtin_expect (*name == '$', 0))
260 {
261 const char *repl = NULL;
262 size_t len;
263
264 ++name;
265 if ((len = is_dst (start, name, "ORIGIN", is_path,
266 INTUSE(__libc_enable_secure))) != 0)
267 repl = l->l_origin;
268 else if ((len = is_dst (start, name, "PLATFORM", is_path, 0)) != 0)
269 repl = GLRO(dl_platform);
270 else if ((len = is_dst (start, name, "LIB", is_path, 0)) != 0)
271 repl = DL_DST_LIB;
272
273 if (repl != NULL && repl != (const char *) -1)
274 {
275 wp = __stpcpy (wp, repl);
276 name += len;
277 }
278 else if (len > 1)
279 {
280 /* We cannot use this path element, the value of the
281 replacement is unknown. */
282 wp = last_elem;
283 name += len;
284 while (*name != '\0' && (!is_path || *name != ':'))
285 ++name;
286 }
287 else
288 /* No DST we recognize. */
289 *wp++ = '$';
290 }
291 else
292 {
293 *wp++ = *name++;
294 if (is_path && *name == ':')
295 last_elem = wp;
296 }
297 }
298 while (*name != '\0');
299
300 *wp = '\0';
301
302 return result;
303 }
304
305
306 /* Return copy of argument with all recognized dynamic string tokens
307 ($ORIGIN and $PLATFORM for now) replaced. On some platforms it
308 might not be possible to determine the path from which the object
309 belonging to the map is loaded. In this case the path element
310 containing $ORIGIN is left out. */
311 static char *
312 expand_dynamic_string_token (struct link_map *l, const char *s)
313 {
314 /* We make two runs over the string. First we determine how large the
315 resulting string is and then we copy it over. Since this is now
316 frequently executed operation we are looking here not for performance
317 but rather for code size. */
318 size_t cnt;
319 size_t total;
320 char *result;
321
322 /* Determine the number of DST elements. */
323 cnt = DL_DST_COUNT (s, 1);
324
325 /* If we do not have to replace anything simply copy the string. */
326 if (__builtin_expect (cnt, 0) == 0)
327 return local_strdup (s);
328
329 /* Determine the length of the substituted string. */
330 total = DL_DST_REQUIRED (l, s, strlen (s), cnt);
331
332 /* Allocate the necessary memory. */
333 result = (char *) malloc (total + 1);
334 if (result == NULL)
335 return NULL;
336
337 return _dl_dst_substitute (l, s, result, 1);
338 }
339
340
341 /* Add `name' to the list of names for a particular shared object.
342 `name' is expected to have been allocated with malloc and will
343 be freed if the shared object already has this name.
344 Returns false if the object already had this name. */
345 static void
346 internal_function
347 add_name_to_object (struct link_map *l, const char *name)
348 {
349 struct libname_list *lnp, *lastp;
350 struct libname_list *newname;
351 size_t name_len;
352
353 lastp = NULL;
354 for (lnp = l->l_libname; lnp != NULL; lastp = lnp, lnp = lnp->next)
355 if (strcmp (name, lnp->name) == 0)
356 return;
357
358 name_len = strlen (name) + 1;
359 newname = (struct libname_list *) malloc (sizeof *newname + name_len);
360 if (newname == NULL)
361 {
362 /* No more memory. */
363 _dl_signal_error (ENOMEM, name, NULL, N_("cannot allocate name record"));
364 return;
365 }
366 /* The object should have a libname set from _dl_new_object. */
367 assert (lastp != NULL);
368
369 newname->name = memcpy (newname + 1, name, name_len);
370 newname->next = NULL;
371 newname->dont_free = 0;
372 lastp->next = newname;
373 }
374
375 /* Standard search directories. */
376 static struct r_search_path_struct rtld_search_dirs attribute_relro;
377
378 static size_t max_dirnamelen;
379
380 static struct r_search_path_elem **
381 fillin_rpath (char *rpath, struct r_search_path_elem **result, const char *sep,
382 int check_trusted, const char *what, const char *where)
383 {
384 char *cp;
385 size_t nelems = 0;
386
387 while ((cp = __strsep (&rpath, sep)) != NULL)
388 {
389 struct r_search_path_elem *dirp;
390 size_t len = strlen (cp);
391
392 /* `strsep' can pass an empty string. This has to be
393 interpreted as `use the current directory'. */
394 if (len == 0)
395 {
396 static const char curwd[] = "./";
397 cp = (char *) curwd;
398 }
399
400 /* Remove trailing slashes (except for "/"). */
401 while (len > 1 && cp[len - 1] == '/')
402 --len;
403
404 /* Now add one if there is none so far. */
405 if (len > 0 && cp[len - 1] != '/')
406 cp[len++] = '/';
407
408 /* Make sure we don't use untrusted directories if we run SUID. */
409 if (__builtin_expect (check_trusted, 0))
410 {
411 const char *trun = system_dirs;
412 size_t idx;
413 int unsecure = 1;
414
415 /* All trusted directories must be complete names. */
416 if (cp[0] == '/')
417 {
418 for (idx = 0; idx < nsystem_dirs_len; ++idx)
419 {
420 if (len == system_dirs_len[idx]
421 && memcmp (trun, cp, len) == 0)
422 {
423 /* Found it. */
424 unsecure = 0;
425 break;
426 }
427
428 trun += system_dirs_len[idx] + 1;
429 }
430 }
431
432 if (unsecure)
433 /* Simply drop this directory. */
434 continue;
435 }
436
437 /* See if this directory is already known. */
438 for (dirp = GL(dl_all_dirs); dirp != NULL; dirp = dirp->next)
439 if (dirp->dirnamelen == len && memcmp (cp, dirp->dirname, len) == 0)
440 break;
441
442 if (dirp != NULL)
443 {
444 /* It is available, see whether it's on our own list. */
445 size_t cnt;
446 for (cnt = 0; cnt < nelems; ++cnt)
447 if (result[cnt] == dirp)
448 break;
449
450 if (cnt == nelems)
451 result[nelems++] = dirp;
452 }
453 else
454 {
455 size_t cnt;
456 enum r_dir_status init_val;
457 size_t where_len = where ? strlen (where) + 1 : 0;
458
459 /* It's a new directory. Create an entry and add it. */
460 dirp = (struct r_search_path_elem *)
461 malloc (sizeof (*dirp) + ncapstr * sizeof (enum r_dir_status)
462 + where_len + len + 1);
463 if (dirp == NULL)
464 _dl_signal_error (ENOMEM, NULL, NULL,
465 N_("cannot create cache for search path"));
466
467 dirp->dirname = ((char *) dirp + sizeof (*dirp)
468 + ncapstr * sizeof (enum r_dir_status));
469 *((char *) __mempcpy ((char *) dirp->dirname, cp, len)) = '\0';
470 dirp->dirnamelen = len;
471
472 if (len > max_dirnamelen)
473 max_dirnamelen = len;
474
475 /* We have to make sure all the relative directories are
476 never ignored. The current directory might change and
477 all our saved information would be void. */
478 init_val = cp[0] != '/' ? existing : unknown;
479 for (cnt = 0; cnt < ncapstr; ++cnt)
480 dirp->status[cnt] = init_val;
481
482 dirp->what = what;
483 if (__builtin_expect (where != NULL, 1))
484 dirp->where = memcpy ((char *) dirp + sizeof (*dirp) + len + 1
485 + (ncapstr * sizeof (enum r_dir_status)),
486 where, where_len);
487 else
488 dirp->where = NULL;
489
490 dirp->next = GL(dl_all_dirs);
491 GL(dl_all_dirs) = dirp;
492
493 /* Put it in the result array. */
494 result[nelems++] = dirp;
495 }
496 }
497
498 /* Terminate the array. */
499 result[nelems] = NULL;
500
501 return result;
502 }
503
504
505 static void
506 internal_function
507 decompose_rpath (struct r_search_path_struct *sps,
508 const char *rpath, struct link_map *l, const char *what)
509 {
510 /* Make a copy we can work with. */
511 const char *where = l->l_name;
512 char *copy;
513 char *cp;
514 struct r_search_path_elem **result;
515 size_t nelems;
516 /* Initialize to please the compiler. */
517 const char *errstring = NULL;
518
519 /* First see whether we must forget the RUNPATH and RPATH from this
520 object. */
521 if (__builtin_expect (GLRO(dl_inhibit_rpath) != NULL, 0)
522 && !INTUSE(__libc_enable_secure))
523 {
524 const char *inhp = GLRO(dl_inhibit_rpath);
525
526 do
527 {
528 const char *wp = where;
529
530 while (*inhp == *wp && *wp != '\0')
531 {
532 ++inhp;
533 ++wp;
534 }
535
536 if (*wp == '\0' && (*inhp == '\0' || *inhp == ':'))
537 {
538 /* This object is on the list of objects for which the
539 RUNPATH and RPATH must not be used. */
540 result = calloc (1, sizeof *result);
541 if (result == NULL)
542 {
543 signal_error_cache:
544 errstring = N_("cannot create cache for search path");
545 signal_error:
546 _dl_signal_error (ENOMEM, NULL, NULL, errstring);
547 }
548
549 sps->dirs = result;
550 sps->malloced = 1;
551
552 return;
553 }
554
555 while (*inhp != '\0')
556 if (*inhp++ == ':')
557 break;
558 }
559 while (*inhp != '\0');
560 }
561
562 /* Make a writable copy. At the same time expand possible dynamic
563 string tokens. */
564 copy = expand_dynamic_string_token (l, rpath);
565 if (copy == NULL)
566 {
567 errstring = N_("cannot create RUNPATH/RPATH copy");
568 goto signal_error;
569 }
570
571 /* Count the number of necessary elements in the result array. */
572 nelems = 0;
573 for (cp = copy; *cp != '\0'; ++cp)
574 if (*cp == ':')
575 ++nelems;
576
577 /* Allocate room for the result. NELEMS + 1 is an upper limit for the
578 number of necessary entries. */
579 result = (struct r_search_path_elem **) malloc ((nelems + 1 + 1)
580 * sizeof (*result));
581 if (result == NULL)
582 goto signal_error_cache;
583
584 fillin_rpath (copy, result, ":", 0, what, where);
585
586 /* Free the copied RPATH string. `fillin_rpath' make own copies if
587 necessary. */
588 free (copy);
589
590 sps->dirs = result;
591 /* The caller will change this value if we haven't used a real malloc. */
592 sps->malloced = 1;
593 }
594
595 /* Make sure cached path information is stored in *SP
596 and return true if there are any paths to search there. */
597 static bool
598 cache_rpath (struct link_map *l,
599 struct r_search_path_struct *sp,
600 int tag,
601 const char *what)
602 {
603 if (sp->dirs == (void *) -1)
604 return false;
605
606 if (sp->dirs != NULL)
607 return true;
608
609 if (l->l_info[tag] == NULL)
610 {
611 /* There is no path. */
612 sp->dirs = (void *) -1;
613 return false;
614 }
615
616 /* Make sure the cache information is available. */
617 decompose_rpath (sp, (const char *) (D_PTR (l, l_info[DT_STRTAB])
618 + l->l_info[tag]->d_un.d_val),
619 l, what);
620 return true;
621 }
622
623
624 void
625 internal_function
626 _dl_init_paths (const char *llp)
627 {
628 size_t idx;
629 const char *strp;
630 struct r_search_path_elem *pelem, **aelem;
631 size_t round_size;
632 #ifdef SHARED
633 struct link_map *l;
634 #endif
635 /* Initialize to please the compiler. */
636 const char *errstring = NULL;
637
638 /* Fill in the information about the application's RPATH and the
639 directories addressed by the LD_LIBRARY_PATH environment variable. */
640
641 /* Get the capabilities. */
642 capstr = _dl_important_hwcaps (GLRO(dl_platform), GLRO(dl_platformlen),
643 &ncapstr, &max_capstrlen);
644
645 /* First set up the rest of the default search directory entries. */
646 aelem = rtld_search_dirs.dirs = (struct r_search_path_elem **)
647 malloc ((nsystem_dirs_len + 1) * sizeof (struct r_search_path_elem *));
648 if (rtld_search_dirs.dirs == NULL)
649 {
650 errstring = N_("cannot create search path array");
651 signal_error:
652 _dl_signal_error (ENOMEM, NULL, NULL, errstring);
653 }
654
655 round_size = ((2 * sizeof (struct r_search_path_elem) - 1
656 + ncapstr * sizeof (enum r_dir_status))
657 / sizeof (struct r_search_path_elem));
658
659 rtld_search_dirs.dirs[0] = (struct r_search_path_elem *)
660 malloc ((sizeof (system_dirs) / sizeof (system_dirs[0]))
661 * round_size * sizeof (struct r_search_path_elem));
662 if (rtld_search_dirs.dirs[0] == NULL)
663 {
664 errstring = N_("cannot create cache for search path");
665 goto signal_error;
666 }
667
668 rtld_search_dirs.malloced = 0;
669 pelem = GL(dl_all_dirs) = rtld_search_dirs.dirs[0];
670 strp = system_dirs;
671 idx = 0;
672
673 do
674 {
675 size_t cnt;
676
677 *aelem++ = pelem;
678
679 pelem->what = "system search path";
680 pelem->where = NULL;
681
682 pelem->dirname = strp;
683 pelem->dirnamelen = system_dirs_len[idx];
684 strp += system_dirs_len[idx] + 1;
685
686 /* System paths must be absolute. */
687 assert (pelem->dirname[0] == '/');
688 for (cnt = 0; cnt < ncapstr; ++cnt)
689 pelem->status[cnt] = unknown;
690
691 pelem->next = (++idx == nsystem_dirs_len ? NULL : (pelem + round_size));
692
693 pelem += round_size;
694 }
695 while (idx < nsystem_dirs_len);
696
697 max_dirnamelen = SYSTEM_DIRS_MAX_LEN;
698 *aelem = NULL;
699
700 #ifdef SHARED
701 /* This points to the map of the main object. */
702 l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
703 if (l != NULL)
704 {
705 assert (l->l_type != lt_loaded);
706
707 if (l->l_info[DT_RUNPATH])
708 {
709 /* Allocate room for the search path and fill in information
710 from RUNPATH. */
711 decompose_rpath (&l->l_runpath_dirs,
712 (const void *) (D_PTR (l, l_info[DT_STRTAB])
713 + l->l_info[DT_RUNPATH]->d_un.d_val),
714 l, "RUNPATH");
715
716 /* The RPATH is ignored. */
717 l->l_rpath_dirs.dirs = (void *) -1;
718 }
719 else
720 {
721 l->l_runpath_dirs.dirs = (void *) -1;
722
723 if (l->l_info[DT_RPATH])
724 {
725 /* Allocate room for the search path and fill in information
726 from RPATH. */
727 decompose_rpath (&l->l_rpath_dirs,
728 (const void *) (D_PTR (l, l_info[DT_STRTAB])
729 + l->l_info[DT_RPATH]->d_un.d_val),
730 l, "RPATH");
731 l->l_rpath_dirs.malloced = 0;
732 }
733 else
734 l->l_rpath_dirs.dirs = (void *) -1;
735 }
736 }
737 #endif /* SHARED */
738
739 if (llp != NULL && *llp != '\0')
740 {
741 size_t nllp;
742 const char *cp = llp;
743 char *llp_tmp = strdupa (llp);
744
745 /* Decompose the LD_LIBRARY_PATH contents. First determine how many
746 elements it has. */
747 nllp = 1;
748 while (*cp)
749 {
750 if (*cp == ':' || *cp == ';')
751 ++nllp;
752 ++cp;
753 }
754
755 env_path_list.dirs = (struct r_search_path_elem **)
756 malloc ((nllp + 1) * sizeof (struct r_search_path_elem *));
757 if (env_path_list.dirs == NULL)
758 {
759 errstring = N_("cannot create cache for search path");
760 goto signal_error;
761 }
762
763 (void) fillin_rpath (llp_tmp, env_path_list.dirs, ":;",
764 INTUSE(__libc_enable_secure), "LD_LIBRARY_PATH",
765 NULL);
766
767 if (env_path_list.dirs[0] == NULL)
768 {
769 free (env_path_list.dirs);
770 env_path_list.dirs = (void *) -1;
771 }
772
773 env_path_list.malloced = 0;
774 }
775 else
776 env_path_list.dirs = (void *) -1;
777
778 /* Remember the last search directory added at startup. */
779 GLRO(dl_init_all_dirs) = GL(dl_all_dirs);
780 }
781
782
783 static void
784 __attribute__ ((noreturn, noinline))
785 lose (int code, int fd, const char *name, char *realname, struct link_map *l,
786 const char *msg)
787 {
788 /* The file might already be closed. */
789 if (fd != -1)
790 (void) __close (fd);
791 if (l != NULL)
792 {
793 /* Remove the stillborn object from the list and free it. */
794 assert (l->l_next == NULL);
795 if (l->l_prev == NULL)
796 /* No other module loaded. This happens only in the static library,
797 or in rtld under --verify. */
798 GL(dl_ns)[l->l_ns]._ns_loaded = NULL;
799 else
800 l->l_prev->l_next = NULL;
801 --GL(dl_ns)[l->l_ns]._ns_nloaded;
802 free (l);
803 }
804 free (realname);
805 _dl_signal_error (code, name, NULL, msg);
806 }
807
808
809 /* Map in the shared object NAME, actually located in REALNAME, and already
810 opened on FD. */
811
812 #ifndef EXTERNAL_MAP_FROM_FD
813 static
814 #endif
815 struct link_map *
816 _dl_map_object_from_fd (const char *name, int fd, struct filebuf *fbp,
817 char *realname, struct link_map *loader, int l_type,
818 int mode, void **stack_endp, Lmid_t nsid)
819 {
820 struct link_map *l = NULL;
821 const ElfW(Ehdr) *header;
822 const ElfW(Phdr) *phdr;
823 const ElfW(Phdr) *ph;
824 size_t maplength;
825 int type;
826 struct stat64 st;
827 /* Initialize to keep the compiler happy. */
828 const char *errstring = NULL;
829 int errval = 0;
830
831 /* Get file information. */
832 if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &st) < 0, 0))
833 {
834 errstring = N_("cannot stat shared object");
835 call_lose_errno:
836 errval = errno;
837 call_lose:
838 lose (errval, fd, name, realname, l, errstring);
839 }
840
841 /* Look again to see if the real name matched another already loaded. */
842 for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
843 if (l->l_ino == st.st_ino && l->l_dev == st.st_dev)
844 {
845 /* The object is already loaded.
846 Just bump its reference count and return it. */
847 __close (fd);
848
849 /* If the name is not in the list of names for this object add
850 it. */
851 free (realname);
852 add_name_to_object (l, name);
853
854 return l;
855 }
856
857 #ifdef SHARED
858 /* When loading into a namespace other than the base one we must
859 avoid loading ld.so since there can only be one copy. Ever. */
860 if (__builtin_expect (nsid != LM_ID_BASE, 0)
861 && ((st.st_ino == GL(dl_rtld_map).l_ino
862 && st.st_dev == GL(dl_rtld_map).l_dev)
863 || _dl_name_match_p (name, &GL(dl_rtld_map))))
864 {
865 /* This is indeed ld.so. Create a new link_map which refers to
866 the real one for almost everything. */
867 l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
868 if (l == NULL)
869 goto fail_new;
870
871 /* Refer to the real descriptor. */
872 l->l_real = &GL(dl_rtld_map);
873
874 /* No need to bump the refcount of the real object, ld.so will
875 never be unloaded. */
876 __close (fd);
877
878 return l;
879 }
880 #endif
881
882 if (mode & RTLD_NOLOAD)
883 /* We are not supposed to load the object unless it is already
884 loaded. So return now. */
885 return NULL;
886
887 /* Print debugging message. */
888 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
889 _dl_debug_printf ("file=%s [%lu]; generating link map\n", name, nsid);
890
891 /* This is the ELF header. We read it in `open_verify'. */
892 header = (void *) fbp->buf;
893
894 #ifndef MAP_ANON
895 # define MAP_ANON 0
896 if (_dl_zerofd == -1)
897 {
898 _dl_zerofd = _dl_sysdep_open_zero_fill ();
899 if (_dl_zerofd == -1)
900 {
901 __close (fd);
902 _dl_signal_error (errno, NULL, NULL,
903 N_("cannot open zero fill device"));
904 }
905 }
906 #endif
907
908 /* Enter the new object in the list of loaded objects. */
909 l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
910 if (__builtin_expect (l == NULL, 0))
911 {
912 #ifdef SHARED
913 fail_new:
914 #endif
915 errstring = N_("cannot create shared object descriptor");
916 goto call_lose_errno;
917 }
918
919 /* Extract the remaining details we need from the ELF header
920 and then read in the program header table. */
921 l->l_entry = header->e_entry;
922 type = header->e_type;
923 l->l_phnum = header->e_phnum;
924
925 maplength = header->e_phnum * sizeof (ElfW(Phdr));
926 if (header->e_phoff + maplength <= (size_t) fbp->len)
927 phdr = (void *) (fbp->buf + header->e_phoff);
928 else
929 {
930 phdr = alloca (maplength);
931 __lseek (fd, header->e_phoff, SEEK_SET);
932 if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
933 {
934 errstring = N_("cannot read file data");
935 goto call_lose_errno;
936 }
937 }
938
939 /* Presumed absent PT_GNU_STACK. */
940 uint_fast16_t stack_flags = PF_R|PF_W|PF_X;
941
942 {
943 /* Scan the program header table, collecting its load commands. */
944 struct loadcmd
945 {
946 ElfW(Addr) mapstart, mapend, dataend, allocend;
947 off_t mapoff;
948 int prot;
949 } loadcmds[l->l_phnum], *c;
950 size_t nloadcmds = 0;
951 bool has_holes = false;
952
953 /* The struct is initialized to zero so this is not necessary:
954 l->l_ld = 0;
955 l->l_phdr = 0;
956 l->l_addr = 0; */
957 for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
958 switch (ph->p_type)
959 {
960 /* These entries tell us where to find things once the file's
961 segments are mapped in. We record the addresses it says
962 verbatim, and later correct for the run-time load address. */
963 case PT_DYNAMIC:
964 l->l_ld = (void *) ph->p_vaddr;
965 l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
966 break;
967
968 case PT_PHDR:
969 l->l_phdr = (void *) ph->p_vaddr;
970 break;
971
972 case PT_LOAD:
973 /* A load command tells us to map in part of the file.
974 We record the load commands and process them all later. */
975 if (__builtin_expect ((ph->p_align & (GLRO(dl_pagesize) - 1)) != 0,
976 0))
977 {
978 errstring = N_("ELF load command alignment not page-aligned");
979 goto call_lose;
980 }
981 if (__builtin_expect (((ph->p_vaddr - ph->p_offset)
982 & (ph->p_align - 1)) != 0, 0))
983 {
984 errstring
985 = N_("ELF load command address/offset not properly aligned");
986 goto call_lose;
987 }
988
989 c = &loadcmds[nloadcmds++];
990 c->mapstart = ph->p_vaddr & ~(ph->p_align - 1);
991 c->mapend = ((ph->p_vaddr + ph->p_filesz + GLRO(dl_pagesize) - 1)
992 & ~(GLRO(dl_pagesize) - 1));
993 c->dataend = ph->p_vaddr + ph->p_filesz;
994 c->allocend = ph->p_vaddr + ph->p_memsz;
995 c->mapoff = ph->p_offset & ~(ph->p_align - 1);
996
997 /* Determine whether there is a gap between the last segment
998 and this one. */
999 if (nloadcmds > 1 && c[-1].mapend != c->mapstart)
1000 has_holes = true;
1001
1002 /* Optimize a common case. */
1003 #if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7
1004 c->prot = (PF_TO_PROT
1005 >> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf;
1006 #else
1007 c->prot = 0;
1008 if (ph->p_flags & PF_R)
1009 c->prot |= PROT_READ;
1010 if (ph->p_flags & PF_W)
1011 c->prot |= PROT_WRITE;
1012 if (ph->p_flags & PF_X)
1013 c->prot |= PROT_EXEC;
1014 #endif
1015 break;
1016
1017 case PT_TLS:
1018 #ifdef USE_TLS
1019 if (ph->p_memsz == 0)
1020 /* Nothing to do for an empty segment. */
1021 break;
1022
1023 l->l_tls_blocksize = ph->p_memsz;
1024 l->l_tls_align = ph->p_align;
1025 if (ph->p_align == 0)
1026 l->l_tls_firstbyte_offset = 0;
1027 else
1028 l->l_tls_firstbyte_offset = ph->p_vaddr & (ph->p_align - 1);
1029 l->l_tls_initimage_size = ph->p_filesz;
1030 /* Since we don't know the load address yet only store the
1031 offset. We will adjust it later. */
1032 l->l_tls_initimage = (void *) ph->p_vaddr;
1033
1034 /* If not loading the initial set of shared libraries,
1035 check whether we should permit loading a TLS segment. */
1036 if (__builtin_expect (l->l_type == lt_library, 1)
1037 /* If GL(dl_tls_dtv_slotinfo_list) == NULL, then rtld.c did
1038 not set up TLS data structures, so don't use them now. */
1039 || __builtin_expect (GL(dl_tls_dtv_slotinfo_list) != NULL, 1))
1040 {
1041 /* Assign the next available module ID. */
1042 l->l_tls_modid = _dl_next_tls_modid ();
1043 break;
1044 }
1045
1046 # ifdef SHARED
1047 if (l->l_prev == NULL)
1048 /* We are loading the executable itself when the dynamic linker
1049 was executed directly. The setup will happen later. */
1050 break;
1051
1052 /* In a static binary there is no way to tell if we dynamically
1053 loaded libpthread. */
1054 if (GL(dl_error_catch_tsd) == &_dl_initial_error_catch_tsd)
1055 # endif
1056 {
1057 /* We have not yet loaded libpthread.
1058 We can do the TLS setup right now! */
1059
1060 void *tcb;
1061
1062 /* The first call allocates TLS bookkeeping data structures.
1063 Then we allocate the TCB for the initial thread. */
1064 if (__builtin_expect (_dl_tls_setup (), 0)
1065 || __builtin_expect ((tcb = _dl_allocate_tls (NULL)) == NULL,
1066 0))
1067 {
1068 errval = ENOMEM;
1069 errstring = N_("\
1070 cannot allocate TLS data structures for initial thread");
1071 goto call_lose;
1072 }
1073
1074 /* Now we install the TCB in the thread register. */
1075 errstring = TLS_INIT_TP (tcb, 0);
1076 if (__builtin_expect (errstring == NULL, 1))
1077 {
1078 /* Now we are all good. */
1079 l->l_tls_modid = ++GL(dl_tls_max_dtv_idx);
1080 break;
1081 }
1082
1083 /* The kernel is too old or somesuch. */
1084 errval = 0;
1085 _dl_deallocate_tls (tcb, 1);
1086 goto call_lose;
1087 }
1088 #endif
1089
1090 /* Uh-oh, the binary expects TLS support but we cannot
1091 provide it. */
1092 errval = 0;
1093 errstring = N_("cannot handle TLS data");
1094 goto call_lose;
1095 break;
1096
1097 case PT_GNU_STACK:
1098 stack_flags = ph->p_flags;
1099 break;
1100
1101 case PT_GNU_RELRO:
1102 l->l_relro_addr = ph->p_vaddr;
1103 l->l_relro_size = ph->p_memsz;
1104 break;
1105 }
1106
1107 if (__builtin_expect (nloadcmds == 0, 0))
1108 {
1109 /* This only happens for a bogus object that will be caught with
1110 another error below. But we don't want to go through the
1111 calculations below using NLOADCMDS - 1. */
1112 errstring = N_("object file has no loadable segments");
1113 goto call_lose;
1114 }
1115
1116 /* Now process the load commands and map segments into memory. */
1117 c = loadcmds;
1118
1119 /* Length of the sections to be loaded. */
1120 maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;
1121
1122 if (__builtin_expect (type, ET_DYN) == ET_DYN)
1123 {
1124 /* This is a position-independent shared object. We can let the
1125 kernel map it anywhere it likes, but we must have space for all
1126 the segments in their specified positions relative to the first.
1127 So we map the first segment without MAP_FIXED, but with its
1128 extent increased to cover all the segments. Then we remove
1129 access from excess portion, and there is known sufficient space
1130 there to remap from the later segments.
1131
1132 As a refinement, sometimes we have an address that we would
1133 prefer to map such objects at; but this is only a preference,
1134 the OS can do whatever it likes. */
1135 ElfW(Addr) mappref;
1136 mappref = (ELF_PREFERRED_ADDRESS (loader, maplength,
1137 c->mapstart & GLRO(dl_use_load_bias))
1138 - MAP_BASE_ADDR (l));
1139
1140 /* Remember which part of the address space this object uses. */
1141 l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
1142 c->prot,
1143 MAP_COPY|MAP_FILE|MAP_DENYWRITE,
1144 fd, c->mapoff);
1145 if (__builtin_expect ((void *) l->l_map_start == MAP_FAILED, 0))
1146 {
1147 map_error:
1148 errstring = N_("failed to map segment from shared object");
1149 goto call_lose_errno;
1150 }
1151
1152 l->l_map_end = l->l_map_start + maplength;
1153 l->l_addr = l->l_map_start - c->mapstart;
1154
1155 if (has_holes)
1156 /* Change protection on the excess portion to disallow all access;
1157 the portions we do not remap later will be inaccessible as if
1158 unallocated. Then jump into the normal segment-mapping loop to
1159 handle the portion of the segment past the end of the file
1160 mapping. */
1161 __mprotect ((caddr_t) (l->l_addr + c->mapend),
1162 loadcmds[nloadcmds - 1].allocend - c->mapend,
1163 PROT_NONE);
1164
1165 goto postmap;
1166 }
1167
1168 /* This object is loaded at a fixed address. This must never
1169 happen for objects loaded with dlopen(). */
1170 if (__builtin_expect ((mode & __RTLD_OPENEXEC) == 0, 0))
1171 {
1172 errstring = N_("cannot dynamically load executable");
1173 goto call_lose;
1174 }
1175
1176 /* Notify ELF_PREFERRED_ADDRESS that we have to load this one
1177 fixed. */
1178 ELF_FIXED_ADDRESS (loader, c->mapstart);
1179
1180
1181 /* Remember which part of the address space this object uses. */
1182 l->l_map_start = c->mapstart + l->l_addr;
1183 l->l_map_end = l->l_map_start + maplength;
1184
1185 while (c < &loadcmds[nloadcmds])
1186 {
1187 if (c->mapend > c->mapstart
1188 /* Map the segment contents from the file. */
1189 && (__mmap ((void *) (l->l_addr + c->mapstart),
1190 c->mapend - c->mapstart, c->prot,
1191 MAP_FIXED|MAP_COPY|MAP_FILE|MAP_DENYWRITE,
1192 fd, c->mapoff)
1193 == MAP_FAILED))
1194 goto map_error;
1195
1196 postmap:
1197 if (c->prot & PROT_EXEC)
1198 l->l_text_end = l->l_addr + c->mapend;
1199
1200 if (l->l_phdr == 0
1201 && (ElfW(Off)) c->mapoff <= header->e_phoff
1202 && ((size_t) (c->mapend - c->mapstart + c->mapoff)
1203 >= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
1204 /* Found the program header in this segment. */
1205 l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);
1206
1207 if (c->allocend > c->dataend)
1208 {
1209 /* Extra zero pages should appear at the end of this segment,
1210 after the data mapped from the file. */
1211 ElfW(Addr) zero, zeroend, zeropage;
1212
1213 zero = l->l_addr + c->dataend;
1214 zeroend = l->l_addr + c->allocend;
1215 zeropage = ((zero + GLRO(dl_pagesize) - 1)
1216 & ~(GLRO(dl_pagesize) - 1));
1217
1218 if (zeroend < zeropage)
1219 /* All the extra data is in the last page of the segment.
1220 We can just zero it. */
1221 zeropage = zeroend;
1222
1223 if (zeropage > zero)
1224 {
1225 /* Zero the final part of the last page of the segment. */
1226 if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
1227 {
1228 /* Dag nab it. */
1229 if (__mprotect ((caddr_t) (zero
1230 & ~(GLRO(dl_pagesize) - 1)),
1231 GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0)
1232 {
1233 errstring = N_("cannot change memory protections");
1234 goto call_lose_errno;
1235 }
1236 }
1237 memset ((void *) zero, '\0', zeropage - zero);
1238 if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
1239 __mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
1240 GLRO(dl_pagesize), c->prot);
1241 }
1242
1243 if (zeroend > zeropage)
1244 {
1245 /* Map the remaining zero pages in from the zero fill FD. */
1246 caddr_t mapat;
1247 mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
1248 c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
1249 ANONFD, 0);
1250 if (__builtin_expect (mapat == MAP_FAILED, 0))
1251 {
1252 errstring = N_("cannot map zero-fill pages");
1253 goto call_lose_errno;
1254 }
1255 }
1256 }
1257
1258 ++c;
1259 }
1260 }
1261
1262 if (l->l_ld == 0)
1263 {
1264 if (__builtin_expect (type == ET_DYN, 0))
1265 {
1266 errstring = N_("object file has no dynamic section");
1267 goto call_lose;
1268 }
1269 }
1270 else
1271 l->l_ld = (ElfW(Dyn) *) ((ElfW(Addr)) l->l_ld + l->l_addr);
1272
1273 elf_get_dynamic_info (l, NULL);
1274
1275 /* Make sure we are not dlopen'ing an object that has the
1276 DF_1_NOOPEN flag set. */
1277 if (__builtin_expect (l->l_flags_1 & DF_1_NOOPEN, 0)
1278 && (mode & __RTLD_DLOPEN))
1279 {
1280 /* We are not supposed to load this object. Free all resources. */
1281 __munmap ((void *) l->l_map_start, l->l_map_end - l->l_map_start);
1282
1283 if (!l->l_libname->dont_free)
1284 free (l->l_libname);
1285
1286 if (l->l_phdr_allocated)
1287 free ((void *) l->l_phdr);
1288
1289 errstring = N_("shared object cannot be dlopen()ed");
1290 goto call_lose;
1291 }
1292
1293 if (l->l_phdr == NULL)
1294 {
1295 /* The program header is not contained in any of the segments.
1296 We have to allocate memory ourself and copy it over from out
1297 temporary place. */
1298 ElfW(Phdr) *newp = (ElfW(Phdr) *) malloc (header->e_phnum
1299 * sizeof (ElfW(Phdr)));
1300 if (newp == NULL)
1301 {
1302 errstring = N_("cannot allocate memory for program header");
1303 goto call_lose_errno;
1304 }
1305
1306 l->l_phdr = memcpy (newp, phdr,
1307 (header->e_phnum * sizeof (ElfW(Phdr))));
1308 l->l_phdr_allocated = 1;
1309 }
1310 else
1311 /* Adjust the PT_PHDR value by the runtime load address. */
1312 l->l_phdr = (ElfW(Phdr) *) ((ElfW(Addr)) l->l_phdr + l->l_addr);
1313
1314 if (__builtin_expect ((stack_flags &~ GL(dl_stack_flags)) & PF_X, 0))
1315 {
1316 /* The stack is presently not executable, but this module
1317 requires that it be executable. We must change the
1318 protection of the variable which contains the flags used in
1319 the mprotect calls. */
1320 #ifdef HAVE_Z_RELRO
1321 if (mode & __RTLD_DLOPEN)
1322 {
1323 uintptr_t p = ((uintptr_t) &__stack_prot) & ~(GLRO(dl_pagesize) - 1);
1324 size_t s = (uintptr_t) &__stack_prot - p + sizeof (int);
1325
1326 __mprotect ((void *) p, s, PROT_READ|PROT_WRITE);
1327 if (__builtin_expect (__check_caller (RETURN_ADDRESS (0),
1328 allow_ldso|allow_libc) == 0,
1329 0))
1330 __stack_prot |= PROT_EXEC;
1331 __mprotect ((void *) p, s, PROT_READ);
1332 }
1333 else
1334 #endif
1335 __stack_prot |= PROT_EXEC;
1336
1337 errval = (*GL(dl_make_stack_executable_hook)) (stack_endp);
1338 if (errval)
1339 {
1340 errstring = N_("\
1341 cannot enable executable stack as shared object requires");
1342 goto call_lose;
1343 }
1344 }
1345
1346 #ifdef USE_TLS
1347 /* Adjust the address of the TLS initialization image. */
1348 if (l->l_tls_initimage != NULL)
1349 l->l_tls_initimage = (char *) l->l_tls_initimage + l->l_addr;
1350 #endif
1351
1352 /* We are done mapping in the file. We no longer need the descriptor. */
1353 if (__builtin_expect (__close (fd) != 0, 0))
1354 {
1355 errstring = N_("cannot close file descriptor");
1356 goto call_lose_errno;
1357 }
1358 /* Signal that we closed the file. */
1359 fd = -1;
1360
1361 if (l->l_type == lt_library && type == ET_EXEC)
1362 l->l_type = lt_executable;
1363
1364 l->l_entry += l->l_addr;
1365
1366 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
1367 _dl_debug_printf ("\
1368 dynamic: 0x%0*lx base: 0x%0*lx size: 0x%0*Zx\n\
1369 entry: 0x%0*lx phdr: 0x%0*lx phnum: %*u\n\n",
1370 (int) sizeof (void *) * 2,
1371 (unsigned long int) l->l_ld,
1372 (int) sizeof (void *) * 2,
1373 (unsigned long int) l->l_addr,
1374 (int) sizeof (void *) * 2, maplength,
1375 (int) sizeof (void *) * 2,
1376 (unsigned long int) l->l_entry,
1377 (int) sizeof (void *) * 2,
1378 (unsigned long int) l->l_phdr,
1379 (int) sizeof (void *) * 2, l->l_phnum);
1380
1381 /* Set up the symbol hash table. */
1382 _dl_setup_hash (l);
1383
1384 /* If this object has DT_SYMBOLIC set modify now its scope. We don't
1385 have to do this for the main map. */
1386 if ((mode & RTLD_DEEPBIND) == 0
1387 && __builtin_expect (l->l_info[DT_SYMBOLIC] != NULL, 0)
1388 && &l->l_searchlist != l->l_scope[0])
1389 {
1390 /* Create an appropriate searchlist. It contains only this map.
1391 This is the definition of DT_SYMBOLIC in SysVr4. */
1392 l->l_symbolic_searchlist.r_list =
1393 (struct link_map **) malloc (sizeof (struct link_map *));
1394
1395 if (l->l_symbolic_searchlist.r_list == NULL)
1396 {
1397 errstring = N_("cannot create searchlist");
1398 goto call_lose_errno;
1399 }
1400
1401 l->l_symbolic_searchlist.r_list[0] = l;
1402 l->l_symbolic_searchlist.r_nlist = 1;
1403
1404 /* Now move the existing entries one back. */
1405 memmove (&l->l_scope[1], &l->l_scope[0],
1406 (l->l_scope_max - 1) * sizeof (l->l_scope[0]));
1407
1408 /* Now add the new entry. */
1409 l->l_scope[0] = &l->l_symbolic_searchlist;
1410 }
1411
1412 /* Remember whether this object must be initialized first. */
1413 if (l->l_flags_1 & DF_1_INITFIRST)
1414 GL(dl_initfirst) = l;
1415
1416 /* Finally the file information. */
1417 l->l_dev = st.st_dev;
1418 l->l_ino = st.st_ino;
1419
1420 /* When we profile the SONAME might be needed for something else but
1421 loading. Add it right away. */
1422 if (__builtin_expect (GLRO(dl_profile) != NULL, 0)
1423 && l->l_info[DT_SONAME] != NULL)
1424 add_name_to_object (l, ((const char *) D_PTR (l, l_info[DT_STRTAB])
1425 + l->l_info[DT_SONAME]->d_un.d_val));
1426
1427 return l;
1428 }
1429 \f
1430 /* Print search path. */
1431 static void
1432 print_search_path (struct r_search_path_elem **list,
1433 const char *what, const char *name)
1434 {
1435 char buf[max_dirnamelen + max_capstrlen];
1436 int first = 1;
1437
1438 _dl_debug_printf (" search path=");
1439
1440 while (*list != NULL && (*list)->what == what) /* Yes, ==. */
1441 {
1442 char *endp = __mempcpy (buf, (*list)->dirname, (*list)->dirnamelen);
1443 size_t cnt;
1444
1445 for (cnt = 0; cnt < ncapstr; ++cnt)
1446 if ((*list)->status[cnt] != nonexisting)
1447 {
1448 char *cp = __mempcpy (endp, capstr[cnt].str, capstr[cnt].len);
1449 if (cp == buf || (cp == buf + 1 && buf[0] == '/'))
1450 cp[0] = '\0';
1451 else
1452 cp[-1] = '\0';
1453
1454 _dl_debug_printf_c (first ? "%s" : ":%s", buf);
1455 first = 0;
1456 }
1457
1458 ++list;
1459 }
1460
1461 if (name != NULL)
1462 _dl_debug_printf_c ("\t\t(%s from file %s)\n", what,
1463 name[0] ? name : rtld_progname);
1464 else
1465 _dl_debug_printf_c ("\t\t(%s)\n", what);
1466 }
1467 \f
1468 /* Open a file and verify it is an ELF file for this architecture. We
1469 ignore only ELF files for other architectures. Non-ELF files and
1470 ELF files with different header information cause fatal errors since
1471 this could mean there is something wrong in the installation and the
1472 user might want to know about this. */
1473 static int
1474 open_verify (const char *name, struct filebuf *fbp)
1475 {
1476 /* This is the expected ELF header. */
1477 #define ELF32_CLASS ELFCLASS32
1478 #define ELF64_CLASS ELFCLASS64
1479 #ifndef VALID_ELF_HEADER
1480 # define VALID_ELF_HEADER(hdr,exp,size) (memcmp (hdr, exp, size) == 0)
1481 # define VALID_ELF_OSABI(osabi) (osabi == ELFOSABI_SYSV)
1482 # define VALID_ELF_ABIVERSION(ver) (ver == 0)
1483 #endif
1484 static const unsigned char expected[EI_PAD] =
1485 {
1486 [EI_MAG0] = ELFMAG0,
1487 [EI_MAG1] = ELFMAG1,
1488 [EI_MAG2] = ELFMAG2,
1489 [EI_MAG3] = ELFMAG3,
1490 [EI_CLASS] = ELFW(CLASS),
1491 [EI_DATA] = byteorder,
1492 [EI_VERSION] = EV_CURRENT,
1493 [EI_OSABI] = ELFOSABI_SYSV,
1494 [EI_ABIVERSION] = 0
1495 };
1496 static const struct
1497 {
1498 ElfW(Word) vendorlen;
1499 ElfW(Word) datalen;
1500 ElfW(Word) type;
1501 char vendor[4];
1502 } expected_note = { 4, 16, 1, "GNU" };
1503 int fd;
1504 /* Initialize it to make the compiler happy. */
1505 const char *errstring = NULL;
1506 int errval = 0;
1507
1508 /* Open the file. We always open files read-only. */
1509 fd = __open (name, O_RDONLY);
1510 if (fd != -1)
1511 {
1512 ElfW(Ehdr) *ehdr;
1513 ElfW(Phdr) *phdr, *ph;
1514 ElfW(Word) *abi_note, abi_note_buf[8];
1515 unsigned int osversion;
1516 size_t maplength;
1517
1518 /* We successfully openened the file. Now verify it is a file
1519 we can use. */
1520 __set_errno (0);
1521 fbp->len = __libc_read (fd, fbp->buf, sizeof (fbp->buf));
1522
1523 /* This is where the ELF header is loaded. */
1524 assert (sizeof (fbp->buf) > sizeof (ElfW(Ehdr)));
1525 ehdr = (ElfW(Ehdr) *) fbp->buf;
1526
1527 /* Now run the tests. */
1528 if (__builtin_expect (fbp->len < (ssize_t) sizeof (ElfW(Ehdr)), 0))
1529 {
1530 errval = errno;
1531 errstring = (errval == 0
1532 ? N_("file too short") : N_("cannot read file data"));
1533 call_lose:
1534 lose (errval, fd, name, NULL, NULL, errstring);
1535 }
1536
1537 /* See whether the ELF header is what we expect. */
1538 if (__builtin_expect (! VALID_ELF_HEADER (ehdr->e_ident, expected,
1539 EI_PAD), 0))
1540 {
1541 /* Something is wrong. */
1542 if (*(Elf32_Word *) &ehdr->e_ident !=
1543 #if BYTE_ORDER == LITTLE_ENDIAN
1544 ((ELFMAG0 << (EI_MAG0 * 8)) |
1545 (ELFMAG1 << (EI_MAG1 * 8)) |
1546 (ELFMAG2 << (EI_MAG2 * 8)) |
1547 (ELFMAG3 << (EI_MAG3 * 8)))
1548 #else
1549 ((ELFMAG0 << (EI_MAG3 * 8)) |
1550 (ELFMAG1 << (EI_MAG2 * 8)) |
1551 (ELFMAG2 << (EI_MAG1 * 8)) |
1552 (ELFMAG3 << (EI_MAG0 * 8)))
1553 #endif
1554 )
1555 errstring = N_("invalid ELF header");
1556 else if (ehdr->e_ident[EI_CLASS] != ELFW(CLASS))
1557 /* This is not a fatal error. On architectures where
1558 32-bit and 64-bit binaries can be run this might
1559 happen. */
1560 goto close_and_out;
1561 else if (ehdr->e_ident[EI_DATA] != byteorder)
1562 {
1563 if (BYTE_ORDER == BIG_ENDIAN)
1564 errstring = N_("ELF file data encoding not big-endian");
1565 else
1566 errstring = N_("ELF file data encoding not little-endian");
1567 }
1568 else if (ehdr->e_ident[EI_VERSION] != EV_CURRENT)
1569 errstring
1570 = N_("ELF file version ident does not match current one");
1571 /* XXX We should be able so set system specific versions which are
1572 allowed here. */
1573 else if (!VALID_ELF_OSABI (ehdr->e_ident[EI_OSABI]))
1574 errstring = N_("ELF file OS ABI invalid");
1575 else if (!VALID_ELF_ABIVERSION (ehdr->e_ident[EI_ABIVERSION]))
1576 errstring = N_("ELF file ABI version invalid");
1577 else
1578 /* Otherwise we don't know what went wrong. */
1579 errstring = N_("internal error");
1580
1581 goto call_lose;
1582 }
1583
1584 if (__builtin_expect (ehdr->e_version, EV_CURRENT) != EV_CURRENT)
1585 {
1586 errstring = N_("ELF file version does not match current one");
1587 goto call_lose;
1588 }
1589 if (! __builtin_expect (elf_machine_matches_host (ehdr), 1))
1590 goto close_and_out;
1591 else if (__builtin_expect (ehdr->e_type, ET_DYN) != ET_DYN
1592 && __builtin_expect (ehdr->e_type, ET_EXEC) != ET_EXEC)
1593 {
1594 errstring = N_("only ET_DYN and ET_EXEC can be loaded");
1595 goto call_lose;
1596 }
1597 else if (__builtin_expect (ehdr->e_phentsize, sizeof (ElfW(Phdr)))
1598 != sizeof (ElfW(Phdr)))
1599 {
1600 errstring = N_("ELF file's phentsize not the expected size");
1601 goto call_lose;
1602 }
1603
1604 maplength = ehdr->e_phnum * sizeof (ElfW(Phdr));
1605 if (ehdr->e_phoff + maplength <= (size_t) fbp->len)
1606 phdr = (void *) (fbp->buf + ehdr->e_phoff);
1607 else
1608 {
1609 phdr = alloca (maplength);
1610 __lseek (fd, ehdr->e_phoff, SEEK_SET);
1611 if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
1612 {
1613 read_error:
1614 errval = errno;
1615 errstring = N_("cannot read file data");
1616 goto call_lose;
1617 }
1618 }
1619
1620 /* Check .note.ABI-tag if present. */
1621 for (ph = phdr; ph < &phdr[ehdr->e_phnum]; ++ph)
1622 if (ph->p_type == PT_NOTE && ph->p_filesz == 32 && ph->p_align >= 4)
1623 {
1624 if (ph->p_offset + 32 <= (size_t) fbp->len)
1625 abi_note = (void *) (fbp->buf + ph->p_offset);
1626 else
1627 {
1628 __lseek (fd, ph->p_offset, SEEK_SET);
1629 if (__libc_read (fd, (void *) abi_note_buf, 32) != 32)
1630 goto read_error;
1631
1632 abi_note = abi_note_buf;
1633 }
1634
1635 if (memcmp (abi_note, &expected_note, sizeof (expected_note)))
1636 continue;
1637
1638 osversion = (abi_note[5] & 0xff) * 65536
1639 + (abi_note[6] & 0xff) * 256
1640 + (abi_note[7] & 0xff);
1641 if (abi_note[4] != __ABI_TAG_OS
1642 || (GLRO(dl_osversion) && GLRO(dl_osversion) < osversion))
1643 {
1644 close_and_out:
1645 __close (fd);
1646 __set_errno (ENOENT);
1647 fd = -1;
1648 }
1649
1650 break;
1651 }
1652 }
1653
1654 return fd;
1655 }
1656 \f
1657 /* Try to open NAME in one of the directories in *DIRSP.
1658 Return the fd, or -1. If successful, fill in *REALNAME
1659 with the malloc'd full directory name. If it turns out
1660 that none of the directories in *DIRSP exists, *DIRSP is
1661 replaced with (void *) -1, and the old value is free()d
1662 if MAY_FREE_DIRS is true. */
1663
1664 static int
1665 open_path (const char *name, size_t namelen, int preloaded,
1666 struct r_search_path_struct *sps, char **realname,
1667 struct filebuf *fbp)
1668 {
1669 struct r_search_path_elem **dirs = sps->dirs;
1670 char *buf;
1671 int fd = -1;
1672 const char *current_what = NULL;
1673 int any = 0;
1674
1675 buf = alloca (max_dirnamelen + max_capstrlen + namelen);
1676 do
1677 {
1678 struct r_search_path_elem *this_dir = *dirs;
1679 size_t buflen = 0;
1680 size_t cnt;
1681 char *edp;
1682 int here_any = 0;
1683 int err;
1684
1685 /* If we are debugging the search for libraries print the path
1686 now if it hasn't happened now. */
1687 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0)
1688 && current_what != this_dir->what)
1689 {
1690 current_what = this_dir->what;
1691 print_search_path (dirs, current_what, this_dir->where);
1692 }
1693
1694 edp = (char *) __mempcpy (buf, this_dir->dirname, this_dir->dirnamelen);
1695 for (cnt = 0; fd == -1 && cnt < ncapstr; ++cnt)
1696 {
1697 /* Skip this directory if we know it does not exist. */
1698 if (this_dir->status[cnt] == nonexisting)
1699 continue;
1700
1701 buflen =
1702 ((char *) __mempcpy (__mempcpy (edp, capstr[cnt].str,
1703 capstr[cnt].len),
1704 name, namelen)
1705 - buf);
1706
1707 /* Print name we try if this is wanted. */
1708 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
1709 _dl_debug_printf (" trying file=%s\n", buf);
1710
1711 fd = open_verify (buf, fbp);
1712 if (this_dir->status[cnt] == unknown)
1713 {
1714 if (fd != -1)
1715 this_dir->status[cnt] = existing;
1716 else
1717 {
1718 /* We failed to open machine dependent library. Let's
1719 test whether there is any directory at all. */
1720 struct stat64 st;
1721
1722 buf[buflen - namelen - 1] = '\0';
1723
1724 if (__xstat64 (_STAT_VER, buf, &st) != 0
1725 || ! S_ISDIR (st.st_mode))
1726 /* The directory does not exist or it is no directory. */
1727 this_dir->status[cnt] = nonexisting;
1728 else
1729 this_dir->status[cnt] = existing;
1730 }
1731 }
1732
1733 /* Remember whether we found any existing directory. */
1734 here_any |= this_dir->status[cnt] == existing;
1735
1736 if (fd != -1 && __builtin_expect (preloaded, 0)
1737 && INTUSE(__libc_enable_secure))
1738 {
1739 /* This is an extra security effort to make sure nobody can
1740 preload broken shared objects which are in the trusted
1741 directories and so exploit the bugs. */
1742 struct stat64 st;
1743
1744 if (__fxstat64 (_STAT_VER, fd, &st) != 0
1745 || (st.st_mode & S_ISUID) == 0)
1746 {
1747 /* The shared object cannot be tested for being SUID
1748 or this bit is not set. In this case we must not
1749 use this object. */
1750 __close (fd);
1751 fd = -1;
1752 /* We simply ignore the file, signal this by setting
1753 the error value which would have been set by `open'. */
1754 errno = ENOENT;
1755 }
1756 }
1757 }
1758
1759 if (fd != -1)
1760 {
1761 *realname = (char *) malloc (buflen);
1762 if (*realname != NULL)
1763 {
1764 memcpy (*realname, buf, buflen);
1765 return fd;
1766 }
1767 else
1768 {
1769 /* No memory for the name, we certainly won't be able
1770 to load and link it. */
1771 __close (fd);
1772 return -1;
1773 }
1774 }
1775 if (here_any && (err = errno) != ENOENT && err != EACCES)
1776 /* The file exists and is readable, but something went wrong. */
1777 return -1;
1778
1779 /* Remember whether we found anything. */
1780 any |= here_any;
1781 }
1782 while (*++dirs != NULL);
1783
1784 /* Remove the whole path if none of the directories exists. */
1785 if (__builtin_expect (! any, 0))
1786 {
1787 /* Paths which were allocated using the minimal malloc() in ld.so
1788 must not be freed using the general free() in libc. */
1789 if (sps->malloced)
1790 free (sps->dirs);
1791 sps->dirs = (void *) -1;
1792 }
1793
1794 return -1;
1795 }
1796
1797 /* Map in the shared object file NAME. */
1798
1799 struct link_map *
1800 internal_function
1801 _dl_map_object (struct link_map *loader, const char *name, int preloaded,
1802 int type, int trace_mode, int mode, Lmid_t nsid)
1803 {
1804 int fd;
1805 char *realname;
1806 char *name_copy;
1807 struct link_map *l;
1808 struct filebuf fb;
1809
1810 assert (nsid >= 0);
1811 assert (nsid < DL_NNS);
1812
1813 /* Look for this name among those already loaded. */
1814 for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
1815 {
1816 /* If the requested name matches the soname of a loaded object,
1817 use that object. Elide this check for names that have not
1818 yet been opened. */
1819 if (__builtin_expect (l->l_faked, 0) != 0)
1820 continue;
1821 if (!_dl_name_match_p (name, l))
1822 {
1823 const char *soname;
1824
1825 if (__builtin_expect (l->l_soname_added, 1)
1826 || l->l_info[DT_SONAME] == NULL)
1827 continue;
1828
1829 soname = ((const char *) D_PTR (l, l_info[DT_STRTAB])
1830 + l->l_info[DT_SONAME]->d_un.d_val);
1831 if (strcmp (name, soname) != 0)
1832 continue;
1833
1834 /* We have a match on a new name -- cache it. */
1835 add_name_to_object (l, soname);
1836 l->l_soname_added = 1;
1837 }
1838
1839 /* We have a match. */
1840 return l;
1841 }
1842
1843 /* Display information if we are debugging. */
1844 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0)
1845 && loader != NULL)
1846 _dl_debug_printf ("\nfile=%s [%lu]; needed by %s [%lu]\n", name, nsid,
1847 loader->l_name[0]
1848 ? loader->l_name : rtld_progname, loader->l_ns);
1849
1850 if (strchr (name, '/') == NULL)
1851 {
1852 /* Search for NAME in several places. */
1853
1854 size_t namelen = strlen (name) + 1;
1855
1856 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
1857 _dl_debug_printf ("find library=%s [%lu]; searching\n", name, nsid);
1858
1859 fd = -1;
1860
1861 /* When the object has the RUNPATH information we don't use any
1862 RPATHs. */
1863 if (loader == NULL || loader->l_info[DT_RUNPATH] == NULL)
1864 {
1865 /* First try the DT_RPATH of the dependent object that caused NAME
1866 to be loaded. Then that object's dependent, and on up. */
1867 for (l = loader; fd == -1 && l; l = l->l_loader)
1868 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
1869 fd = open_path (name, namelen, preloaded, &l->l_rpath_dirs,
1870 &realname, &fb);
1871
1872 /* If dynamically linked, try the DT_RPATH of the executable
1873 itself. NB: we do this for lookups in any namespace. */
1874 if (fd == -1)
1875 {
1876 l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
1877 if (l && l->l_type != lt_loaded && l != loader
1878 && cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
1879 fd = open_path (name, namelen, preloaded, &l->l_rpath_dirs,
1880 &realname, &fb);
1881 }
1882 }
1883
1884 /* Try the LD_LIBRARY_PATH environment variable. */
1885 if (fd == -1 && env_path_list.dirs != (void *) -1)
1886 fd = open_path (name, namelen, preloaded, &env_path_list,
1887 &realname, &fb);
1888
1889 /* Look at the RUNPATH information for this binary. */
1890 if (fd == -1 && loader != NULL
1891 && cache_rpath (loader, &loader->l_runpath_dirs,
1892 DT_RUNPATH, "RUNPATH"))
1893 fd = open_path (name, namelen, preloaded,
1894 &loader->l_runpath_dirs, &realname, &fb);
1895
1896 if (fd == -1
1897 && (__builtin_expect (! preloaded, 1)
1898 || ! INTUSE(__libc_enable_secure)))
1899 {
1900 /* Check the list of libraries in the file /etc/ld.so.cache,
1901 for compatibility with Linux's ldconfig program. */
1902 const char *cached = _dl_load_cache_lookup (name);
1903
1904 if (cached != NULL)
1905 {
1906 #ifdef SHARED
1907 // XXX Correct to unconditionally default to namespace 0?
1908 l = loader ?: GL(dl_ns)[LM_ID_BASE]._ns_loaded;
1909 #else
1910 l = loader;
1911 #endif
1912
1913 /* If the loader has the DF_1_NODEFLIB flag set we must not
1914 use a cache entry from any of these directories. */
1915 if (
1916 #ifndef SHARED
1917 /* 'l' is always != NULL for dynamically linked objects. */
1918 l != NULL &&
1919 #endif
1920 __builtin_expect (l->l_flags_1 & DF_1_NODEFLIB, 0))
1921 {
1922 const char *dirp = system_dirs;
1923 unsigned int cnt = 0;
1924
1925 do
1926 {
1927 if (memcmp (cached, dirp, system_dirs_len[cnt]) == 0)
1928 {
1929 /* The prefix matches. Don't use the entry. */
1930 cached = NULL;
1931 break;
1932 }
1933
1934 dirp += system_dirs_len[cnt] + 1;
1935 ++cnt;
1936 }
1937 while (cnt < nsystem_dirs_len);
1938 }
1939
1940 if (cached != NULL)
1941 {
1942 fd = open_verify (cached, &fb);
1943 if (__builtin_expect (fd != -1, 1))
1944 {
1945 realname = local_strdup (cached);
1946 if (realname == NULL)
1947 {
1948 __close (fd);
1949 fd = -1;
1950 }
1951 }
1952 }
1953 }
1954 }
1955
1956 /* Finally, try the default path. */
1957 if (fd == -1
1958 && ((l = loader ?: GL(dl_ns)[nsid]._ns_loaded) == NULL
1959 || __builtin_expect (!(l->l_flags_1 & DF_1_NODEFLIB), 1))
1960 && rtld_search_dirs.dirs != (void *) -1)
1961 fd = open_path (name, namelen, preloaded, &rtld_search_dirs,
1962 &realname, &fb);
1963
1964 /* Add another newline when we are tracing the library loading. */
1965 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
1966 _dl_debug_printf ("\n");
1967 }
1968 else
1969 {
1970 /* The path may contain dynamic string tokens. */
1971 realname = (loader
1972 ? expand_dynamic_string_token (loader, name)
1973 : local_strdup (name));
1974 if (realname == NULL)
1975 fd = -1;
1976 else
1977 {
1978 fd = open_verify (realname, &fb);
1979 if (__builtin_expect (fd, 0) == -1)
1980 free (realname);
1981 }
1982 }
1983
1984 /* In case the LOADER information has only been provided to get to
1985 the appropriate RUNPATH/RPATH information we do not need it
1986 anymore. */
1987 if (mode & __RTLD_CALLMAP)
1988 loader = NULL;
1989
1990 if (__builtin_expect (fd, 0) == -1)
1991 {
1992 if (trace_mode
1993 && __builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK, 0) == 0)
1994 {
1995 /* We haven't found an appropriate library. But since we
1996 are only interested in the list of libraries this isn't
1997 so severe. Fake an entry with all the information we
1998 have. */
1999 static const Elf_Symndx dummy_bucket = STN_UNDEF;
2000
2001 /* Enter the new object in the list of loaded objects. */
2002 if ((name_copy = local_strdup (name)) == NULL
2003 || (l = _dl_new_object (name_copy, name, type, loader,
2004 mode, nsid)) == NULL)
2005 _dl_signal_error (ENOMEM, name, NULL,
2006 N_("cannot create shared object descriptor"));
2007 /* Signal that this is a faked entry. */
2008 l->l_faked = 1;
2009 /* Since the descriptor is initialized with zero we do not
2010 have do this here.
2011 l->l_reserved = 0; */
2012 l->l_buckets = &dummy_bucket;
2013 l->l_nbuckets = 1;
2014 l->l_relocated = 1;
2015
2016 return l;
2017 }
2018 else
2019 _dl_signal_error (errno, name, NULL,
2020 N_("cannot open shared object file"));
2021 }
2022
2023 void *stack_end = __libc_stack_end;
2024 return _dl_map_object_from_fd (name, fd, &fb, realname, loader, type, mode,
2025 &stack_end, nsid);
2026 }
2027
2028
2029 void
2030 internal_function
2031 _dl_rtld_di_serinfo (struct link_map *loader, Dl_serinfo *si, bool counting)
2032 {
2033 if (counting)
2034 {
2035 si->dls_cnt = 0;
2036 si->dls_size = 0;
2037 }
2038
2039 unsigned int idx = 0;
2040 char *allocptr = (char *) &si->dls_serpath[si->dls_cnt];
2041 void add_path (const struct r_search_path_struct *sps, unsigned int flags)
2042 # define add_path(sps, flags) add_path(sps, 0) /* XXX */
2043 {
2044 if (sps->dirs != (void *) -1)
2045 {
2046 struct r_search_path_elem **dirs = sps->dirs;
2047 do
2048 {
2049 const struct r_search_path_elem *const r = *dirs++;
2050 if (counting)
2051 {
2052 si->dls_cnt++;
2053 si->dls_size += r->dirnamelen;
2054 }
2055 else
2056 {
2057 Dl_serpath *const sp = &si->dls_serpath[idx++];
2058 sp->dls_name = allocptr;
2059 allocptr = __mempcpy (allocptr,
2060 r->dirname, r->dirnamelen - 1);
2061 *allocptr++ = '\0';
2062 sp->dls_flags = flags;
2063 }
2064 }
2065 while (*dirs != NULL);
2066 }
2067 }
2068
2069 /* When the object has the RUNPATH information we don't use any RPATHs. */
2070 if (loader->l_info[DT_RUNPATH] == NULL)
2071 {
2072 /* First try the DT_RPATH of the dependent object that caused NAME
2073 to be loaded. Then that object's dependent, and on up. */
2074
2075 struct link_map *l = loader;
2076 do
2077 {
2078 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2079 add_path (&l->l_rpath_dirs, XXX_RPATH);
2080 l = l->l_loader;
2081 }
2082 while (l != NULL);
2083
2084 /* If dynamically linked, try the DT_RPATH of the executable itself. */
2085 if (loader->l_ns == LM_ID_BASE)
2086 {
2087 l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
2088 if (l != NULL && l->l_type != lt_loaded && l != loader)
2089 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2090 add_path (&l->l_rpath_dirs, XXX_RPATH);
2091 }
2092 }
2093
2094 /* Try the LD_LIBRARY_PATH environment variable. */
2095 add_path (&env_path_list, XXX_ENV);
2096
2097 /* Look at the RUNPATH information for this binary. */
2098 if (cache_rpath (loader, &loader->l_runpath_dirs, DT_RUNPATH, "RUNPATH"))
2099 add_path (&loader->l_runpath_dirs, XXX_RUNPATH);
2100
2101 /* XXX
2102 Here is where ld.so.cache gets checked, but we don't have
2103 a way to indicate that in the results for Dl_serinfo. */
2104
2105 /* Finally, try the default path. */
2106 if (!(loader->l_flags_1 & DF_1_NODEFLIB))
2107 add_path (&rtld_search_dirs, XXX_default);
2108
2109 if (counting)
2110 /* Count the struct size before the string area, which we didn't
2111 know before we completed dls_cnt. */
2112 si->dls_size += (char *) &si->dls_serpath[si->dls_cnt] - (char *) si;
2113 }