]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-load.c
f3717ea58ca69dcd4dac0971ca794213f83d5943
[thirdparty/glibc.git] / elf / dl-load.c
1 /* Map in a shared object's segments from the file.
2 Copyright (C) 1995-2007, 2009, 2010, 2011 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20 #include <elf.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <libintl.h>
24 #include <stdbool.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <ldsodefs.h>
29 #include <bits/wordsize.h>
30 #include <sys/mman.h>
31 #include <sys/param.h>
32 #include <sys/stat.h>
33 #include <sys/types.h>
34 #include "dynamic-link.h"
35 #include <abi-tag.h>
36 #include <stackinfo.h>
37 #include <caller.h>
38 #include <sysdep.h>
39
40 #include <dl-dst.h>
41
42 /* On some systems, no flag bits are given to specify file mapping. */
43 #ifndef MAP_FILE
44 # define MAP_FILE 0
45 #endif
46
47 /* The right way to map in the shared library files is MAP_COPY, which
48 makes a virtual copy of the data at the time of the mmap call; this
49 guarantees the mapped pages will be consistent even if the file is
50 overwritten. Some losing VM systems like Linux's lack MAP_COPY. All we
51 get is MAP_PRIVATE, which copies each page when it is modified; this
52 means if the file is overwritten, we may at some point get some pages
53 from the new version after starting with pages from the old version.
54
55 To make up for the lack and avoid the overwriting problem,
56 what Linux does have is MAP_DENYWRITE. This prevents anyone
57 from modifying the file while we have it mapped. */
58 #ifndef MAP_COPY
59 # ifdef MAP_DENYWRITE
60 # define MAP_COPY (MAP_PRIVATE | MAP_DENYWRITE)
61 # else
62 # define MAP_COPY MAP_PRIVATE
63 # endif
64 #endif
65
66 /* Some systems link their relocatable objects for another base address
67 than 0. We want to know the base address for these such that we can
68 subtract this address from the segment addresses during mapping.
69 This results in a more efficient address space usage. Defaults to
70 zero for almost all systems. */
71 #ifndef MAP_BASE_ADDR
72 # define MAP_BASE_ADDR(l) 0
73 #endif
74
75
76 #include <endian.h>
77 #if BYTE_ORDER == BIG_ENDIAN
78 # define byteorder ELFDATA2MSB
79 #elif BYTE_ORDER == LITTLE_ENDIAN
80 # define byteorder ELFDATA2LSB
81 #else
82 # error "Unknown BYTE_ORDER " BYTE_ORDER
83 # define byteorder ELFDATANONE
84 #endif
85
86 #define STRING(x) __STRING (x)
87
88 /* Handle situations where we have a preferred location in memory for
89 the shared objects. */
90 #ifdef ELF_PREFERRED_ADDRESS_DATA
91 ELF_PREFERRED_ADDRESS_DATA;
92 #endif
93 #ifndef ELF_PREFERRED_ADDRESS
94 # define ELF_PREFERRED_ADDRESS(loader, maplength, mapstartpref) (mapstartpref)
95 #endif
96 #ifndef ELF_FIXED_ADDRESS
97 # define ELF_FIXED_ADDRESS(loader, mapstart) ((void) 0)
98 #endif
99
100
101 int __stack_prot attribute_hidden attribute_relro
102 #if _STACK_GROWS_DOWN && defined PROT_GROWSDOWN
103 = PROT_GROWSDOWN;
104 #elif _STACK_GROWS_UP && defined PROT_GROWSUP
105 = PROT_GROWSUP;
106 #else
107 = 0;
108 #endif
109
110
111 /* Type for the buffer we put the ELF header and hopefully the program
112 header. This buffer does not really have to be too large. In most
113 cases the program header follows the ELF header directly. If this
114 is not the case all bets are off and we can make the header
115 arbitrarily large and still won't get it read. This means the only
116 question is how large are the ELF and program header combined. The
117 ELF header 32-bit files is 52 bytes long and in 64-bit files is 64
118 bytes long. Each program header entry is again 32 and 56 bytes
119 long respectively. I.e., even with a file which has 10 program
120 header entries we only have to read 372B/624B respectively. Add to
121 this a bit of margin for program notes and reading 512B and 832B
122 for 32-bit and 64-bit files respecitvely is enough. If this
123 heuristic should really fail for some file the code in
124 `_dl_map_object_from_fd' knows how to recover. */
125 struct filebuf
126 {
127 ssize_t len;
128 #if __WORDSIZE == 32
129 # define FILEBUF_SIZE 512
130 #else
131 # define FILEBUF_SIZE 832
132 #endif
133 char buf[FILEBUF_SIZE] __attribute__ ((aligned (__alignof (ElfW(Ehdr)))));
134 };
135
136 /* This is the decomposed LD_LIBRARY_PATH search path. */
137 static struct r_search_path_struct env_path_list attribute_relro;
138
139 /* List of the hardware capabilities we might end up using. */
140 static const struct r_strlenpair *capstr attribute_relro;
141 static size_t ncapstr attribute_relro;
142 static size_t max_capstrlen attribute_relro;
143
144
145 /* Get the generated information about the trusted directories. */
146 #include "trusted-dirs.h"
147
148 static const char system_dirs[] = SYSTEM_DIRS;
149 static const size_t system_dirs_len[] =
150 {
151 SYSTEM_DIRS_LEN
152 };
153 #define nsystem_dirs_len \
154 (sizeof (system_dirs_len) / sizeof (system_dirs_len[0]))
155
156
157 /* Local version of `strdup' function. */
158 static char *
159 local_strdup (const char *s)
160 {
161 size_t len = strlen (s) + 1;
162 void *new = malloc (len);
163
164 if (new == NULL)
165 return NULL;
166
167 return (char *) memcpy (new, s, len);
168 }
169
170
171 static bool
172 is_trusted_path (const char *path, size_t len)
173 {
174 const char *trun = system_dirs;
175
176 for (size_t idx = 0; idx < nsystem_dirs_len; ++idx)
177 {
178 if (len == system_dirs_len[idx] && memcmp (trun, path, len) == 0)
179 /* Found it. */
180 return true;
181
182 trun += system_dirs_len[idx] + 1;
183 }
184
185 return false;
186 }
187
188
189 static bool
190 is_trusted_path_normalize (const char *path, size_t len)
191 {
192 if (len == 0)
193 return false;
194
195 if (*path == ':')
196 {
197 ++path;
198 --len;
199 }
200
201 char *npath = (char *) alloca (len + 2);
202 char *wnp = npath;
203 while (*path != '\0')
204 {
205 if (path[0] == '/')
206 {
207 if (path[1] == '.')
208 {
209 if (path[2] == '.' && (path[3] == '/' || path[3] == '\0'))
210 {
211 while (wnp > npath && *--wnp != '/')
212 ;
213 path += 3;
214 continue;
215 }
216 else if (path[2] == '/' || path[2] == '\0')
217 {
218 path += 2;
219 continue;
220 }
221 }
222
223 if (wnp > npath && wnp[-1] == '/')
224 {
225 ++path;
226 continue;
227 }
228 }
229
230 *wnp++ = *path++;
231 }
232
233 if (wnp == npath || wnp[-1] != '/')
234 *wnp++ = '/';
235
236 const char *trun = system_dirs;
237
238 for (size_t idx = 0; idx < nsystem_dirs_len; ++idx)
239 {
240 if (wnp - npath >= system_dirs_len[idx]
241 && memcmp (trun, npath, system_dirs_len[idx]) == 0)
242 /* Found it. */
243 return true;
244
245 trun += system_dirs_len[idx] + 1;
246 }
247
248 return false;
249 }
250
251
252 static size_t
253 is_dst (const char *start, const char *name, const char *str,
254 int is_path, int secure)
255 {
256 size_t len;
257 bool is_curly = false;
258
259 if (name[0] == '{')
260 {
261 is_curly = true;
262 ++name;
263 }
264
265 len = 0;
266 while (name[len] == str[len] && name[len] != '\0')
267 ++len;
268
269 if (is_curly)
270 {
271 if (name[len] != '}')
272 return 0;
273
274 /* Point again at the beginning of the name. */
275 --name;
276 /* Skip over closing curly brace and adjust for the --name. */
277 len += 2;
278 }
279 else if (name[len] != '\0' && name[len] != '/'
280 && (!is_path || name[len] != ':'))
281 return 0;
282
283 if (__builtin_expect (secure, 0)
284 && ((name[len] != '\0' && name[len] != '/'
285 && (!is_path || name[len] != ':'))
286 || (name != start + 1 && (!is_path || name[-2] != ':'))))
287 return 0;
288
289 return len;
290 }
291
292
293 size_t
294 _dl_dst_count (const char *name, int is_path)
295 {
296 const char *const start = name;
297 size_t cnt = 0;
298
299 do
300 {
301 size_t len;
302
303 /* $ORIGIN is not expanded for SUID/GUID programs (except if it
304 is $ORIGIN alone) and it must always appear first in path. */
305 ++name;
306 if ((len = is_dst (start, name, "ORIGIN", is_path,
307 INTUSE(__libc_enable_secure))) != 0
308 || (len = is_dst (start, name, "PLATFORM", is_path, 0)) != 0
309 || (len = is_dst (start, name, "LIB", is_path, 0)) != 0)
310 ++cnt;
311
312 name = strchr (name + len, '$');
313 }
314 while (name != NULL);
315
316 return cnt;
317 }
318
319
320 char *
321 _dl_dst_substitute (struct link_map *l, const char *name, char *result,
322 int is_path)
323 {
324 const char *const start = name;
325
326 /* Now fill the result path. While copying over the string we keep
327 track of the start of the last path element. When we come accross
328 a DST we copy over the value or (if the value is not available)
329 leave the entire path element out. */
330 char *wp = result;
331 char *last_elem = result;
332 bool check_for_trusted = false;
333
334 do
335 {
336 if (__builtin_expect (*name == '$', 0))
337 {
338 const char *repl = NULL;
339 size_t len;
340
341 ++name;
342 if ((len = is_dst (start, name, "ORIGIN", is_path,
343 INTUSE(__libc_enable_secure))) != 0)
344 {
345 #ifndef SHARED
346 if (l == NULL)
347 repl = _dl_get_origin ();
348 else
349 #endif
350 repl = l->l_origin;
351
352 check_for_trusted = (INTUSE(__libc_enable_secure)
353 && l->l_type == lt_executable);
354 }
355 else if ((len = is_dst (start, name, "PLATFORM", is_path, 0)) != 0)
356 repl = GLRO(dl_platform);
357 else if ((len = is_dst (start, name, "LIB", is_path, 0)) != 0)
358 repl = DL_DST_LIB;
359
360 if (repl != NULL && repl != (const char *) -1)
361 {
362 wp = __stpcpy (wp, repl);
363 name += len;
364 }
365 else if (len > 1)
366 {
367 /* We cannot use this path element, the value of the
368 replacement is unknown. */
369 wp = last_elem;
370 name += len;
371 while (*name != '\0' && (!is_path || *name != ':'))
372 ++name;
373 /* Also skip following colon if this is the first rpath
374 element, but keep an empty element at the end. */
375 if (wp == result && is_path && *name == ':' && name[1] != '\0')
376 ++name;
377 }
378 else
379 /* No DST we recognize. */
380 *wp++ = '$';
381 }
382 else
383 {
384 *wp++ = *name++;
385 if (is_path && *name == ':')
386 {
387 /* In SUID/SGID programs, after $ORIGIN expansion the
388 normalized path must be rooted in one of the trusted
389 directories. */
390 if (__builtin_expect (check_for_trusted, false)
391 && !is_trusted_path_normalize (last_elem, wp - last_elem))
392 wp = last_elem;
393 else
394 last_elem = wp;
395
396 check_for_trusted = false;
397 }
398 }
399 }
400 while (*name != '\0');
401
402 /* In SUID/SGID programs, after $ORIGIN expansion the normalized
403 path must be rooted in one of the trusted directories. */
404 if (__builtin_expect (check_for_trusted, false)
405 && !is_trusted_path_normalize (last_elem, wp - last_elem))
406 wp = last_elem;
407
408 *wp = '\0';
409
410 return result;
411 }
412
413
414 /* Return copy of argument with all recognized dynamic string tokens
415 ($ORIGIN and $PLATFORM for now) replaced. On some platforms it
416 might not be possible to determine the path from which the object
417 belonging to the map is loaded. In this case the path element
418 containing $ORIGIN is left out. */
419 static char *
420 expand_dynamic_string_token (struct link_map *l, const char *s, int is_path)
421 {
422 /* We make two runs over the string. First we determine how large the
423 resulting string is and then we copy it over. Since this is no
424 frequently executed operation we are looking here not for performance
425 but rather for code size. */
426 size_t cnt;
427 size_t total;
428 char *result;
429
430 /* Determine the number of DST elements. */
431 cnt = DL_DST_COUNT (s, is_path);
432
433 /* If we do not have to replace anything simply copy the string. */
434 if (__builtin_expect (cnt, 0) == 0)
435 return local_strdup (s);
436
437 /* Determine the length of the substituted string. */
438 total = DL_DST_REQUIRED (l, s, strlen (s), cnt);
439
440 /* Allocate the necessary memory. */
441 result = (char *) malloc (total + 1);
442 if (result == NULL)
443 return NULL;
444
445 return _dl_dst_substitute (l, s, result, is_path);
446 }
447
448
449 /* Add `name' to the list of names for a particular shared object.
450 `name' is expected to have been allocated with malloc and will
451 be freed if the shared object already has this name.
452 Returns false if the object already had this name. */
453 static void
454 internal_function
455 add_name_to_object (struct link_map *l, const char *name)
456 {
457 struct libname_list *lnp, *lastp;
458 struct libname_list *newname;
459 size_t name_len;
460
461 lastp = NULL;
462 for (lnp = l->l_libname; lnp != NULL; lastp = lnp, lnp = lnp->next)
463 if (strcmp (name, lnp->name) == 0)
464 return;
465
466 name_len = strlen (name) + 1;
467 newname = (struct libname_list *) malloc (sizeof *newname + name_len);
468 if (newname == NULL)
469 {
470 /* No more memory. */
471 _dl_signal_error (ENOMEM, name, NULL, N_("cannot allocate name record"));
472 return;
473 }
474 /* The object should have a libname set from _dl_new_object. */
475 assert (lastp != NULL);
476
477 newname->name = memcpy (newname + 1, name, name_len);
478 newname->next = NULL;
479 newname->dont_free = 0;
480 lastp->next = newname;
481 }
482
483 /* Standard search directories. */
484 static struct r_search_path_struct rtld_search_dirs attribute_relro;
485
486 static size_t max_dirnamelen;
487
488 static struct r_search_path_elem **
489 fillin_rpath (char *rpath, struct r_search_path_elem **result, const char *sep,
490 int check_trusted, const char *what, const char *where)
491 {
492 char *cp;
493 size_t nelems = 0;
494
495 while ((cp = __strsep (&rpath, sep)) != NULL)
496 {
497 struct r_search_path_elem *dirp;
498 size_t len = strlen (cp);
499
500 /* `strsep' can pass an empty string. This has to be
501 interpreted as `use the current directory'. */
502 if (len == 0)
503 {
504 static const char curwd[] = "./";
505 cp = (char *) curwd;
506 }
507
508 /* Remove trailing slashes (except for "/"). */
509 while (len > 1 && cp[len - 1] == '/')
510 --len;
511
512 /* Now add one if there is none so far. */
513 if (len > 0 && cp[len - 1] != '/')
514 cp[len++] = '/';
515
516 /* Make sure we don't use untrusted directories if we run SUID. */
517 if (__builtin_expect (check_trusted, 0) && !is_trusted_path (cp, len))
518 continue;
519
520 /* See if this directory is already known. */
521 for (dirp = GL(dl_all_dirs); dirp != NULL; dirp = dirp->next)
522 if (dirp->dirnamelen == len && memcmp (cp, dirp->dirname, len) == 0)
523 break;
524
525 if (dirp != NULL)
526 {
527 /* It is available, see whether it's on our own list. */
528 size_t cnt;
529 for (cnt = 0; cnt < nelems; ++cnt)
530 if (result[cnt] == dirp)
531 break;
532
533 if (cnt == nelems)
534 result[nelems++] = dirp;
535 }
536 else
537 {
538 size_t cnt;
539 enum r_dir_status init_val;
540 size_t where_len = where ? strlen (where) + 1 : 0;
541
542 /* It's a new directory. Create an entry and add it. */
543 dirp = (struct r_search_path_elem *)
544 malloc (sizeof (*dirp) + ncapstr * sizeof (enum r_dir_status)
545 + where_len + len + 1);
546 if (dirp == NULL)
547 _dl_signal_error (ENOMEM, NULL, NULL,
548 N_("cannot create cache for search path"));
549
550 dirp->dirname = ((char *) dirp + sizeof (*dirp)
551 + ncapstr * sizeof (enum r_dir_status));
552 *((char *) __mempcpy ((char *) dirp->dirname, cp, len)) = '\0';
553 dirp->dirnamelen = len;
554
555 if (len > max_dirnamelen)
556 max_dirnamelen = len;
557
558 /* We have to make sure all the relative directories are
559 never ignored. The current directory might change and
560 all our saved information would be void. */
561 init_val = cp[0] != '/' ? existing : unknown;
562 for (cnt = 0; cnt < ncapstr; ++cnt)
563 dirp->status[cnt] = init_val;
564
565 dirp->what = what;
566 if (__builtin_expect (where != NULL, 1))
567 dirp->where = memcpy ((char *) dirp + sizeof (*dirp) + len + 1
568 + (ncapstr * sizeof (enum r_dir_status)),
569 where, where_len);
570 else
571 dirp->where = NULL;
572
573 dirp->next = GL(dl_all_dirs);
574 GL(dl_all_dirs) = dirp;
575
576 /* Put it in the result array. */
577 result[nelems++] = dirp;
578 }
579 }
580
581 /* Terminate the array. */
582 result[nelems] = NULL;
583
584 return result;
585 }
586
587
588 static bool
589 internal_function
590 decompose_rpath (struct r_search_path_struct *sps,
591 const char *rpath, struct link_map *l, const char *what)
592 {
593 /* Make a copy we can work with. */
594 const char *where = l->l_name;
595 char *copy;
596 char *cp;
597 struct r_search_path_elem **result;
598 size_t nelems;
599 /* Initialize to please the compiler. */
600 const char *errstring = NULL;
601
602 /* First see whether we must forget the RUNPATH and RPATH from this
603 object. */
604 if (__builtin_expect (GLRO(dl_inhibit_rpath) != NULL, 0)
605 && !INTUSE(__libc_enable_secure))
606 {
607 const char *inhp = GLRO(dl_inhibit_rpath);
608
609 do
610 {
611 const char *wp = where;
612
613 while (*inhp == *wp && *wp != '\0')
614 {
615 ++inhp;
616 ++wp;
617 }
618
619 if (*wp == '\0' && (*inhp == '\0' || *inhp == ':'))
620 {
621 /* This object is on the list of objects for which the
622 RUNPATH and RPATH must not be used. */
623 sps->dirs = (void *) -1;
624 return false;
625 }
626
627 while (*inhp != '\0')
628 if (*inhp++ == ':')
629 break;
630 }
631 while (*inhp != '\0');
632 }
633
634 /* Make a writable copy. At the same time expand possible dynamic
635 string tokens. */
636 copy = expand_dynamic_string_token (l, rpath, 1);
637 if (copy == NULL)
638 {
639 errstring = N_("cannot create RUNPATH/RPATH copy");
640 goto signal_error;
641 }
642
643 /* Ignore empty rpaths. */
644 if (*copy == 0)
645 {
646 free (copy);
647 sps->dirs = (struct r_search_path_elem **) -1;
648 return false;
649 }
650
651 /* Count the number of necessary elements in the result array. */
652 nelems = 0;
653 for (cp = copy; *cp != '\0'; ++cp)
654 if (*cp == ':')
655 ++nelems;
656
657 /* Allocate room for the result. NELEMS + 1 is an upper limit for the
658 number of necessary entries. */
659 result = (struct r_search_path_elem **) malloc ((nelems + 1 + 1)
660 * sizeof (*result));
661 if (result == NULL)
662 {
663 free (copy);
664 errstring = N_("cannot create cache for search path");
665 signal_error:
666 _dl_signal_error (ENOMEM, NULL, NULL, errstring);
667 }
668
669 fillin_rpath (copy, result, ":", 0, what, where);
670
671 /* Free the copied RPATH string. `fillin_rpath' make own copies if
672 necessary. */
673 free (copy);
674
675 sps->dirs = result;
676 /* The caller will change this value if we haven't used a real malloc. */
677 sps->malloced = 1;
678 return true;
679 }
680
681 /* Make sure cached path information is stored in *SP
682 and return true if there are any paths to search there. */
683 static bool
684 cache_rpath (struct link_map *l,
685 struct r_search_path_struct *sp,
686 int tag,
687 const char *what)
688 {
689 if (sp->dirs == (void *) -1)
690 return false;
691
692 if (sp->dirs != NULL)
693 return true;
694
695 if (l->l_info[tag] == NULL)
696 {
697 /* There is no path. */
698 sp->dirs = (void *) -1;
699 return false;
700 }
701
702 /* Make sure the cache information is available. */
703 return decompose_rpath (sp, (const char *) (D_PTR (l, l_info[DT_STRTAB])
704 + l->l_info[tag]->d_un.d_val),
705 l, what);
706 }
707
708
709 void
710 internal_function
711 _dl_init_paths (const char *llp)
712 {
713 size_t idx;
714 const char *strp;
715 struct r_search_path_elem *pelem, **aelem;
716 size_t round_size;
717 #ifdef SHARED
718 struct link_map *l;
719 #endif
720 /* Initialize to please the compiler. */
721 const char *errstring = NULL;
722
723 /* Fill in the information about the application's RPATH and the
724 directories addressed by the LD_LIBRARY_PATH environment variable. */
725
726 /* Get the capabilities. */
727 capstr = _dl_important_hwcaps (GLRO(dl_platform), GLRO(dl_platformlen),
728 &ncapstr, &max_capstrlen);
729
730 /* First set up the rest of the default search directory entries. */
731 aelem = rtld_search_dirs.dirs = (struct r_search_path_elem **)
732 malloc ((nsystem_dirs_len + 1) * sizeof (struct r_search_path_elem *));
733 if (rtld_search_dirs.dirs == NULL)
734 {
735 errstring = N_("cannot create search path array");
736 signal_error:
737 _dl_signal_error (ENOMEM, NULL, NULL, errstring);
738 }
739
740 round_size = ((2 * sizeof (struct r_search_path_elem) - 1
741 + ncapstr * sizeof (enum r_dir_status))
742 / sizeof (struct r_search_path_elem));
743
744 rtld_search_dirs.dirs[0] = (struct r_search_path_elem *)
745 malloc ((sizeof (system_dirs) / sizeof (system_dirs[0]))
746 * round_size * sizeof (struct r_search_path_elem));
747 if (rtld_search_dirs.dirs[0] == NULL)
748 {
749 errstring = N_("cannot create cache for search path");
750 goto signal_error;
751 }
752
753 rtld_search_dirs.malloced = 0;
754 pelem = GL(dl_all_dirs) = rtld_search_dirs.dirs[0];
755 strp = system_dirs;
756 idx = 0;
757
758 do
759 {
760 size_t cnt;
761
762 *aelem++ = pelem;
763
764 pelem->what = "system search path";
765 pelem->where = NULL;
766
767 pelem->dirname = strp;
768 pelem->dirnamelen = system_dirs_len[idx];
769 strp += system_dirs_len[idx] + 1;
770
771 /* System paths must be absolute. */
772 assert (pelem->dirname[0] == '/');
773 for (cnt = 0; cnt < ncapstr; ++cnt)
774 pelem->status[cnt] = unknown;
775
776 pelem->next = (++idx == nsystem_dirs_len ? NULL : (pelem + round_size));
777
778 pelem += round_size;
779 }
780 while (idx < nsystem_dirs_len);
781
782 max_dirnamelen = SYSTEM_DIRS_MAX_LEN;
783 *aelem = NULL;
784
785 #ifdef SHARED
786 /* This points to the map of the main object. */
787 l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
788 if (l != NULL)
789 {
790 assert (l->l_type != lt_loaded);
791
792 if (l->l_info[DT_RUNPATH])
793 {
794 /* Allocate room for the search path and fill in information
795 from RUNPATH. */
796 decompose_rpath (&l->l_runpath_dirs,
797 (const void *) (D_PTR (l, l_info[DT_STRTAB])
798 + l->l_info[DT_RUNPATH]->d_un.d_val),
799 l, "RUNPATH");
800
801 /* The RPATH is ignored. */
802 l->l_rpath_dirs.dirs = (void *) -1;
803 }
804 else
805 {
806 l->l_runpath_dirs.dirs = (void *) -1;
807
808 if (l->l_info[DT_RPATH])
809 {
810 /* Allocate room for the search path and fill in information
811 from RPATH. */
812 decompose_rpath (&l->l_rpath_dirs,
813 (const void *) (D_PTR (l, l_info[DT_STRTAB])
814 + l->l_info[DT_RPATH]->d_un.d_val),
815 l, "RPATH");
816 l->l_rpath_dirs.malloced = 0;
817 }
818 else
819 l->l_rpath_dirs.dirs = (void *) -1;
820 }
821 }
822 #endif /* SHARED */
823
824 if (llp != NULL && *llp != '\0')
825 {
826 size_t nllp;
827 const char *cp = llp;
828 char *llp_tmp;
829
830 #ifdef SHARED
831 /* Expand DSTs. */
832 size_t cnt = DL_DST_COUNT (llp, 1);
833 if (__builtin_expect (cnt == 0, 1))
834 llp_tmp = strdupa (llp);
835 else
836 {
837 /* Determine the length of the substituted string. */
838 size_t total = DL_DST_REQUIRED (l, llp, strlen (llp), cnt);
839
840 /* Allocate the necessary memory. */
841 llp_tmp = (char *) alloca (total + 1);
842 llp_tmp = _dl_dst_substitute (l, llp, llp_tmp, 1);
843 }
844 #else
845 llp_tmp = strdupa (llp);
846 #endif
847
848 /* Decompose the LD_LIBRARY_PATH contents. First determine how many
849 elements it has. */
850 nllp = 1;
851 while (*cp)
852 {
853 if (*cp == ':' || *cp == ';')
854 ++nllp;
855 ++cp;
856 }
857
858 env_path_list.dirs = (struct r_search_path_elem **)
859 malloc ((nllp + 1) * sizeof (struct r_search_path_elem *));
860 if (env_path_list.dirs == NULL)
861 {
862 errstring = N_("cannot create cache for search path");
863 goto signal_error;
864 }
865
866 (void) fillin_rpath (llp_tmp, env_path_list.dirs, ":;",
867 INTUSE(__libc_enable_secure), "LD_LIBRARY_PATH",
868 NULL);
869
870 if (env_path_list.dirs[0] == NULL)
871 {
872 free (env_path_list.dirs);
873 env_path_list.dirs = (void *) -1;
874 }
875
876 env_path_list.malloced = 0;
877 }
878 else
879 env_path_list.dirs = (void *) -1;
880 }
881
882
883 static void
884 __attribute__ ((noreturn, noinline))
885 lose (int code, int fd, const char *name, char *realname, struct link_map *l,
886 const char *msg, struct r_debug *r)
887 {
888 /* The file might already be closed. */
889 if (fd != -1)
890 (void) __close (fd);
891 if (l != NULL && l->l_origin != (char *) -1l)
892 free ((char *) l->l_origin);
893 free (l);
894 free (realname);
895
896 if (r != NULL)
897 {
898 r->r_state = RT_CONSISTENT;
899 _dl_debug_state ();
900 }
901
902 _dl_signal_error (code, name, NULL, msg);
903 }
904
905
906 /* Map in the shared object NAME, actually located in REALNAME, and already
907 opened on FD. */
908
909 #ifndef EXTERNAL_MAP_FROM_FD
910 static
911 #endif
912 struct link_map *
913 _dl_map_object_from_fd (const char *name, int fd, struct filebuf *fbp,
914 char *realname, struct link_map *loader, int l_type,
915 int mode, void **stack_endp, Lmid_t nsid)
916 {
917 struct link_map *l = NULL;
918 const ElfW(Ehdr) *header;
919 const ElfW(Phdr) *phdr;
920 const ElfW(Phdr) *ph;
921 size_t maplength;
922 int type;
923 struct stat64 st;
924 /* Initialize to keep the compiler happy. */
925 const char *errstring = NULL;
926 int errval = 0;
927 struct r_debug *r = _dl_debug_initialize (0, nsid);
928 bool make_consistent = false;
929
930 /* Get file information. */
931 if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &st) < 0, 0))
932 {
933 errstring = N_("cannot stat shared object");
934 call_lose_errno:
935 errval = errno;
936 call_lose:
937 lose (errval, fd, name, realname, l, errstring,
938 make_consistent ? r : NULL);
939 }
940
941 /* Look again to see if the real name matched another already loaded. */
942 for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
943 if (l->l_removed == 0 && l->l_ino == st.st_ino && l->l_dev == st.st_dev)
944 {
945 /* The object is already loaded.
946 Just bump its reference count and return it. */
947 __close (fd);
948
949 /* If the name is not in the list of names for this object add
950 it. */
951 free (realname);
952 add_name_to_object (l, name);
953
954 return l;
955 }
956
957 #ifdef SHARED
958 /* When loading into a namespace other than the base one we must
959 avoid loading ld.so since there can only be one copy. Ever. */
960 if (__builtin_expect (nsid != LM_ID_BASE, 0)
961 && ((st.st_ino == GL(dl_rtld_map).l_ino
962 && st.st_dev == GL(dl_rtld_map).l_dev)
963 || _dl_name_match_p (name, &GL(dl_rtld_map))))
964 {
965 /* This is indeed ld.so. Create a new link_map which refers to
966 the real one for almost everything. */
967 l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
968 if (l == NULL)
969 goto fail_new;
970
971 /* Refer to the real descriptor. */
972 l->l_real = &GL(dl_rtld_map);
973
974 /* No need to bump the refcount of the real object, ld.so will
975 never be unloaded. */
976 __close (fd);
977
978 /* Add the map for the mirrored object to the object list. */
979 _dl_add_to_namespace_list (l, nsid);
980
981 return l;
982 }
983 #endif
984
985 if (mode & RTLD_NOLOAD)
986 {
987 /* We are not supposed to load the object unless it is already
988 loaded. So return now. */
989 free (realname);
990 __close (fd);
991 return NULL;
992 }
993
994 /* Print debugging message. */
995 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
996 _dl_debug_printf ("file=%s [%lu]; generating link map\n", name, nsid);
997
998 /* This is the ELF header. We read it in `open_verify'. */
999 header = (void *) fbp->buf;
1000
1001 #ifndef MAP_ANON
1002 # define MAP_ANON 0
1003 if (_dl_zerofd == -1)
1004 {
1005 _dl_zerofd = _dl_sysdep_open_zero_fill ();
1006 if (_dl_zerofd == -1)
1007 {
1008 free (realname);
1009 __close (fd);
1010 _dl_signal_error (errno, NULL, NULL,
1011 N_("cannot open zero fill device"));
1012 }
1013 }
1014 #endif
1015
1016 /* Signal that we are going to add new objects. */
1017 if (r->r_state == RT_CONSISTENT)
1018 {
1019 #ifdef SHARED
1020 /* Auditing checkpoint: we are going to add new objects. */
1021 if ((mode & __RTLD_AUDIT) == 0
1022 && __builtin_expect (GLRO(dl_naudit) > 0, 0))
1023 {
1024 struct link_map *head = GL(dl_ns)[nsid]._ns_loaded;
1025 /* Do not call the functions for any auditing object. */
1026 if (head->l_auditing == 0)
1027 {
1028 struct audit_ifaces *afct = GLRO(dl_audit);
1029 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
1030 {
1031 if (afct->activity != NULL)
1032 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_ADD);
1033
1034 afct = afct->next;
1035 }
1036 }
1037 }
1038 #endif
1039
1040 /* Notify the debugger we have added some objects. We need to
1041 call _dl_debug_initialize in a static program in case dynamic
1042 linking has not been used before. */
1043 r->r_state = RT_ADD;
1044 _dl_debug_state ();
1045 make_consistent = true;
1046 }
1047 else
1048 assert (r->r_state == RT_ADD);
1049
1050 /* Enter the new object in the list of loaded objects. */
1051 l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
1052 if (__builtin_expect (l == NULL, 0))
1053 {
1054 #ifdef SHARED
1055 fail_new:
1056 #endif
1057 errstring = N_("cannot create shared object descriptor");
1058 goto call_lose_errno;
1059 }
1060
1061 /* Extract the remaining details we need from the ELF header
1062 and then read in the program header table. */
1063 l->l_entry = header->e_entry;
1064 type = header->e_type;
1065 l->l_phnum = header->e_phnum;
1066
1067 maplength = header->e_phnum * sizeof (ElfW(Phdr));
1068 if (header->e_phoff + maplength <= (size_t) fbp->len)
1069 phdr = (void *) (fbp->buf + header->e_phoff);
1070 else
1071 {
1072 phdr = alloca (maplength);
1073 __lseek (fd, header->e_phoff, SEEK_SET);
1074 if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
1075 {
1076 errstring = N_("cannot read file data");
1077 goto call_lose_errno;
1078 }
1079 }
1080
1081 /* On most platforms presume that PT_GNU_STACK is absent and the stack is
1082 * executable. Other platforms default to a nonexecutable stack and don't
1083 * need PT_GNU_STACK to do so. */
1084 uint_fast16_t stack_flags = DEFAULT_STACK_PERMS;
1085
1086 {
1087 /* Scan the program header table, collecting its load commands. */
1088 struct loadcmd
1089 {
1090 ElfW(Addr) mapstart, mapend, dataend, allocend;
1091 off_t mapoff;
1092 int prot;
1093 } loadcmds[l->l_phnum], *c;
1094 size_t nloadcmds = 0;
1095 bool has_holes = false;
1096
1097 /* The struct is initialized to zero so this is not necessary:
1098 l->l_ld = 0;
1099 l->l_phdr = 0;
1100 l->l_addr = 0; */
1101 for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
1102 switch (ph->p_type)
1103 {
1104 /* These entries tell us where to find things once the file's
1105 segments are mapped in. We record the addresses it says
1106 verbatim, and later correct for the run-time load address. */
1107 case PT_DYNAMIC:
1108 l->l_ld = (void *) ph->p_vaddr;
1109 l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
1110 break;
1111
1112 case PT_PHDR:
1113 l->l_phdr = (void *) ph->p_vaddr;
1114 break;
1115
1116 case PT_LOAD:
1117 /* A load command tells us to map in part of the file.
1118 We record the load commands and process them all later. */
1119 if (__builtin_expect ((ph->p_align & (GLRO(dl_pagesize) - 1)) != 0,
1120 0))
1121 {
1122 errstring = N_("ELF load command alignment not page-aligned");
1123 goto call_lose;
1124 }
1125 if (__builtin_expect (((ph->p_vaddr - ph->p_offset)
1126 & (ph->p_align - 1)) != 0, 0))
1127 {
1128 errstring
1129 = N_("ELF load command address/offset not properly aligned");
1130 goto call_lose;
1131 }
1132
1133 c = &loadcmds[nloadcmds++];
1134 c->mapstart = ph->p_vaddr & ~(GLRO(dl_pagesize) - 1);
1135 c->mapend = ((ph->p_vaddr + ph->p_filesz + GLRO(dl_pagesize) - 1)
1136 & ~(GLRO(dl_pagesize) - 1));
1137 c->dataend = ph->p_vaddr + ph->p_filesz;
1138 c->allocend = ph->p_vaddr + ph->p_memsz;
1139 c->mapoff = ph->p_offset & ~(GLRO(dl_pagesize) - 1);
1140
1141 /* Determine whether there is a gap between the last segment
1142 and this one. */
1143 if (nloadcmds > 1 && c[-1].mapend != c->mapstart)
1144 has_holes = true;
1145
1146 /* Optimize a common case. */
1147 #if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7
1148 c->prot = (PF_TO_PROT
1149 >> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf;
1150 #else
1151 c->prot = 0;
1152 if (ph->p_flags & PF_R)
1153 c->prot |= PROT_READ;
1154 if (ph->p_flags & PF_W)
1155 c->prot |= PROT_WRITE;
1156 if (ph->p_flags & PF_X)
1157 c->prot |= PROT_EXEC;
1158 #endif
1159 break;
1160
1161 case PT_TLS:
1162 if (ph->p_memsz == 0)
1163 /* Nothing to do for an empty segment. */
1164 break;
1165
1166 l->l_tls_blocksize = ph->p_memsz;
1167 l->l_tls_align = ph->p_align;
1168 if (ph->p_align == 0)
1169 l->l_tls_firstbyte_offset = 0;
1170 else
1171 l->l_tls_firstbyte_offset = ph->p_vaddr & (ph->p_align - 1);
1172 l->l_tls_initimage_size = ph->p_filesz;
1173 /* Since we don't know the load address yet only store the
1174 offset. We will adjust it later. */
1175 l->l_tls_initimage = (void *) ph->p_vaddr;
1176
1177 /* If not loading the initial set of shared libraries,
1178 check whether we should permit loading a TLS segment. */
1179 if (__builtin_expect (l->l_type == lt_library, 1)
1180 /* If GL(dl_tls_dtv_slotinfo_list) == NULL, then rtld.c did
1181 not set up TLS data structures, so don't use them now. */
1182 || __builtin_expect (GL(dl_tls_dtv_slotinfo_list) != NULL, 1))
1183 {
1184 /* Assign the next available module ID. */
1185 l->l_tls_modid = _dl_next_tls_modid ();
1186 break;
1187 }
1188
1189 #ifdef SHARED
1190 if (l->l_prev == NULL || (mode & __RTLD_AUDIT) != 0)
1191 /* We are loading the executable itself when the dynamic linker
1192 was executed directly. The setup will happen later. */
1193 break;
1194
1195 /* In a static binary there is no way to tell if we dynamically
1196 loaded libpthread. */
1197 if (GL(dl_error_catch_tsd) == &_dl_initial_error_catch_tsd)
1198 #endif
1199 {
1200 /* We have not yet loaded libpthread.
1201 We can do the TLS setup right now! */
1202
1203 void *tcb;
1204
1205 /* The first call allocates TLS bookkeeping data structures.
1206 Then we allocate the TCB for the initial thread. */
1207 if (__builtin_expect (_dl_tls_setup (), 0)
1208 || __builtin_expect ((tcb = _dl_allocate_tls (NULL)) == NULL,
1209 0))
1210 {
1211 errval = ENOMEM;
1212 errstring = N_("\
1213 cannot allocate TLS data structures for initial thread");
1214 goto call_lose;
1215 }
1216
1217 /* Now we install the TCB in the thread register. */
1218 errstring = TLS_INIT_TP (tcb, 0);
1219 if (__builtin_expect (errstring == NULL, 1))
1220 {
1221 /* Now we are all good. */
1222 l->l_tls_modid = ++GL(dl_tls_max_dtv_idx);
1223 break;
1224 }
1225
1226 /* The kernel is too old or somesuch. */
1227 errval = 0;
1228 _dl_deallocate_tls (tcb, 1);
1229 goto call_lose;
1230 }
1231
1232 /* Uh-oh, the binary expects TLS support but we cannot
1233 provide it. */
1234 errval = 0;
1235 errstring = N_("cannot handle TLS data");
1236 goto call_lose;
1237 break;
1238
1239 case PT_GNU_STACK:
1240 stack_flags = ph->p_flags;
1241 break;
1242
1243 case PT_GNU_RELRO:
1244 l->l_relro_addr = ph->p_vaddr;
1245 l->l_relro_size = ph->p_memsz;
1246 break;
1247 }
1248
1249 if (__builtin_expect (nloadcmds == 0, 0))
1250 {
1251 /* This only happens for a bogus object that will be caught with
1252 another error below. But we don't want to go through the
1253 calculations below using NLOADCMDS - 1. */
1254 errstring = N_("object file has no loadable segments");
1255 goto call_lose;
1256 }
1257
1258 /* Now process the load commands and map segments into memory. */
1259 c = loadcmds;
1260
1261 /* Length of the sections to be loaded. */
1262 maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;
1263
1264 if (__builtin_expect (type, ET_DYN) == ET_DYN)
1265 {
1266 /* This is a position-independent shared object. We can let the
1267 kernel map it anywhere it likes, but we must have space for all
1268 the segments in their specified positions relative to the first.
1269 So we map the first segment without MAP_FIXED, but with its
1270 extent increased to cover all the segments. Then we remove
1271 access from excess portion, and there is known sufficient space
1272 there to remap from the later segments.
1273
1274 As a refinement, sometimes we have an address that we would
1275 prefer to map such objects at; but this is only a preference,
1276 the OS can do whatever it likes. */
1277 ElfW(Addr) mappref;
1278 mappref = (ELF_PREFERRED_ADDRESS (loader, maplength,
1279 c->mapstart & GLRO(dl_use_load_bias))
1280 - MAP_BASE_ADDR (l));
1281
1282 /* Remember which part of the address space this object uses. */
1283 l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
1284 c->prot,
1285 MAP_COPY|MAP_FILE,
1286 fd, c->mapoff);
1287 if (__builtin_expect ((void *) l->l_map_start == MAP_FAILED, 0))
1288 {
1289 map_error:
1290 errstring = N_("failed to map segment from shared object");
1291 goto call_lose_errno;
1292 }
1293
1294 l->l_map_end = l->l_map_start + maplength;
1295 l->l_addr = l->l_map_start - c->mapstart;
1296
1297 if (has_holes)
1298 /* Change protection on the excess portion to disallow all access;
1299 the portions we do not remap later will be inaccessible as if
1300 unallocated. Then jump into the normal segment-mapping loop to
1301 handle the portion of the segment past the end of the file
1302 mapping. */
1303 __mprotect ((caddr_t) (l->l_addr + c->mapend),
1304 loadcmds[nloadcmds - 1].mapstart - c->mapend,
1305 PROT_NONE);
1306
1307 l->l_contiguous = 1;
1308
1309 goto postmap;
1310 }
1311
1312 /* This object is loaded at a fixed address. This must never
1313 happen for objects loaded with dlopen(). */
1314 if (__builtin_expect ((mode & __RTLD_OPENEXEC) == 0, 0))
1315 {
1316 errstring = N_("cannot dynamically load executable");
1317 goto call_lose;
1318 }
1319
1320 /* Notify ELF_PREFERRED_ADDRESS that we have to load this one
1321 fixed. */
1322 ELF_FIXED_ADDRESS (loader, c->mapstart);
1323
1324
1325 /* Remember which part of the address space this object uses. */
1326 l->l_map_start = c->mapstart + l->l_addr;
1327 l->l_map_end = l->l_map_start + maplength;
1328 l->l_contiguous = !has_holes;
1329
1330 while (c < &loadcmds[nloadcmds])
1331 {
1332 if (c->mapend > c->mapstart
1333 /* Map the segment contents from the file. */
1334 && (__mmap ((void *) (l->l_addr + c->mapstart),
1335 c->mapend - c->mapstart, c->prot,
1336 MAP_FIXED|MAP_COPY|MAP_FILE,
1337 fd, c->mapoff)
1338 == MAP_FAILED))
1339 goto map_error;
1340
1341 postmap:
1342 if (c->prot & PROT_EXEC)
1343 l->l_text_end = l->l_addr + c->mapend;
1344
1345 if (l->l_phdr == 0
1346 && (ElfW(Off)) c->mapoff <= header->e_phoff
1347 && ((size_t) (c->mapend - c->mapstart + c->mapoff)
1348 >= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
1349 /* Found the program header in this segment. */
1350 l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);
1351
1352 if (c->allocend > c->dataend)
1353 {
1354 /* Extra zero pages should appear at the end of this segment,
1355 after the data mapped from the file. */
1356 ElfW(Addr) zero, zeroend, zeropage;
1357
1358 zero = l->l_addr + c->dataend;
1359 zeroend = l->l_addr + c->allocend;
1360 zeropage = ((zero + GLRO(dl_pagesize) - 1)
1361 & ~(GLRO(dl_pagesize) - 1));
1362
1363 if (zeroend < zeropage)
1364 /* All the extra data is in the last page of the segment.
1365 We can just zero it. */
1366 zeropage = zeroend;
1367
1368 if (zeropage > zero)
1369 {
1370 /* Zero the final part of the last page of the segment. */
1371 if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
1372 {
1373 /* Dag nab it. */
1374 if (__mprotect ((caddr_t) (zero
1375 & ~(GLRO(dl_pagesize) - 1)),
1376 GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0)
1377 {
1378 errstring = N_("cannot change memory protections");
1379 goto call_lose_errno;
1380 }
1381 }
1382 memset ((void *) zero, '\0', zeropage - zero);
1383 if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
1384 __mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
1385 GLRO(dl_pagesize), c->prot);
1386 }
1387
1388 if (zeroend > zeropage)
1389 {
1390 /* Map the remaining zero pages in from the zero fill FD. */
1391 caddr_t mapat;
1392 mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
1393 c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
1394 -1, 0);
1395 if (__builtin_expect (mapat == MAP_FAILED, 0))
1396 {
1397 errstring = N_("cannot map zero-fill pages");
1398 goto call_lose_errno;
1399 }
1400 }
1401 }
1402
1403 ++c;
1404 }
1405 }
1406
1407 if (l->l_ld == 0)
1408 {
1409 if (__builtin_expect (type == ET_DYN, 0))
1410 {
1411 errstring = N_("object file has no dynamic section");
1412 goto call_lose;
1413 }
1414 }
1415 else
1416 l->l_ld = (ElfW(Dyn) *) ((ElfW(Addr)) l->l_ld + l->l_addr);
1417
1418 elf_get_dynamic_info (l, NULL);
1419
1420 /* Make sure we are not dlopen'ing an object that has the
1421 DF_1_NOOPEN flag set. */
1422 if (__builtin_expect (l->l_flags_1 & DF_1_NOOPEN, 0)
1423 && (mode & __RTLD_DLOPEN))
1424 {
1425 /* We are not supposed to load this object. Free all resources. */
1426 __munmap ((void *) l->l_map_start, l->l_map_end - l->l_map_start);
1427
1428 if (!l->l_libname->dont_free)
1429 free (l->l_libname);
1430
1431 if (l->l_phdr_allocated)
1432 free ((void *) l->l_phdr);
1433
1434 errstring = N_("shared object cannot be dlopen()ed");
1435 goto call_lose;
1436 }
1437
1438 if (l->l_phdr == NULL)
1439 {
1440 /* The program header is not contained in any of the segments.
1441 We have to allocate memory ourself and copy it over from out
1442 temporary place. */
1443 ElfW(Phdr) *newp = (ElfW(Phdr) *) malloc (header->e_phnum
1444 * sizeof (ElfW(Phdr)));
1445 if (newp == NULL)
1446 {
1447 errstring = N_("cannot allocate memory for program header");
1448 goto call_lose_errno;
1449 }
1450
1451 l->l_phdr = memcpy (newp, phdr,
1452 (header->e_phnum * sizeof (ElfW(Phdr))));
1453 l->l_phdr_allocated = 1;
1454 }
1455 else
1456 /* Adjust the PT_PHDR value by the runtime load address. */
1457 l->l_phdr = (ElfW(Phdr) *) ((ElfW(Addr)) l->l_phdr + l->l_addr);
1458
1459 if (__builtin_expect ((stack_flags &~ GL(dl_stack_flags)) & PF_X, 0))
1460 {
1461 if (__builtin_expect (__check_caller (RETURN_ADDRESS (0), allow_ldso),
1462 0) != 0)
1463 {
1464 errstring = N_("invalid caller");
1465 goto call_lose;
1466 }
1467
1468 /* The stack is presently not executable, but this module
1469 requires that it be executable. We must change the
1470 protection of the variable which contains the flags used in
1471 the mprotect calls. */
1472 #ifdef SHARED
1473 if ((mode & (__RTLD_DLOPEN | __RTLD_AUDIT)) == __RTLD_DLOPEN)
1474 {
1475 const uintptr_t p = (uintptr_t) &__stack_prot & -GLRO(dl_pagesize);
1476 const size_t s = (uintptr_t) (&__stack_prot + 1) - p;
1477
1478 struct link_map *const m = &GL(dl_rtld_map);
1479 const uintptr_t relro_end = ((m->l_addr + m->l_relro_addr
1480 + m->l_relro_size)
1481 & -GLRO(dl_pagesize));
1482 if (__builtin_expect (p + s <= relro_end, 1))
1483 {
1484 /* The variable lies in the region protected by RELRO. */
1485 __mprotect ((void *) p, s, PROT_READ|PROT_WRITE);
1486 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1487 __mprotect ((void *) p, s, PROT_READ);
1488 }
1489 else
1490 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1491 }
1492 else
1493 #endif
1494 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1495
1496 #ifdef check_consistency
1497 check_consistency ();
1498 #endif
1499
1500 errval = (*GL(dl_make_stack_executable_hook)) (stack_endp);
1501 if (errval)
1502 {
1503 errstring = N_("\
1504 cannot enable executable stack as shared object requires");
1505 goto call_lose;
1506 }
1507 }
1508
1509 /* Adjust the address of the TLS initialization image. */
1510 if (l->l_tls_initimage != NULL)
1511 l->l_tls_initimage = (char *) l->l_tls_initimage + l->l_addr;
1512
1513 /* We are done mapping in the file. We no longer need the descriptor. */
1514 if (__builtin_expect (__close (fd) != 0, 0))
1515 {
1516 errstring = N_("cannot close file descriptor");
1517 goto call_lose_errno;
1518 }
1519 /* Signal that we closed the file. */
1520 fd = -1;
1521
1522 if (l->l_type == lt_library && type == ET_EXEC)
1523 l->l_type = lt_executable;
1524
1525 l->l_entry += l->l_addr;
1526
1527 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
1528 _dl_debug_printf ("\
1529 dynamic: 0x%0*lx base: 0x%0*lx size: 0x%0*Zx\n\
1530 entry: 0x%0*lx phdr: 0x%0*lx phnum: %*u\n\n",
1531 (int) sizeof (void *) * 2,
1532 (unsigned long int) l->l_ld,
1533 (int) sizeof (void *) * 2,
1534 (unsigned long int) l->l_addr,
1535 (int) sizeof (void *) * 2, maplength,
1536 (int) sizeof (void *) * 2,
1537 (unsigned long int) l->l_entry,
1538 (int) sizeof (void *) * 2,
1539 (unsigned long int) l->l_phdr,
1540 (int) sizeof (void *) * 2, l->l_phnum);
1541
1542 /* Set up the symbol hash table. */
1543 _dl_setup_hash (l);
1544
1545 /* If this object has DT_SYMBOLIC set modify now its scope. We don't
1546 have to do this for the main map. */
1547 if ((mode & RTLD_DEEPBIND) == 0
1548 && __builtin_expect (l->l_info[DT_SYMBOLIC] != NULL, 0)
1549 && &l->l_searchlist != l->l_scope[0])
1550 {
1551 /* Create an appropriate searchlist. It contains only this map.
1552 This is the definition of DT_SYMBOLIC in SysVr4. */
1553 l->l_symbolic_searchlist.r_list[0] = l;
1554 l->l_symbolic_searchlist.r_nlist = 1;
1555
1556 /* Now move the existing entries one back. */
1557 memmove (&l->l_scope[1], &l->l_scope[0],
1558 (l->l_scope_max - 1) * sizeof (l->l_scope[0]));
1559
1560 /* Now add the new entry. */
1561 l->l_scope[0] = &l->l_symbolic_searchlist;
1562 }
1563
1564 /* Remember whether this object must be initialized first. */
1565 if (l->l_flags_1 & DF_1_INITFIRST)
1566 GL(dl_initfirst) = l;
1567
1568 /* Finally the file information. */
1569 l->l_dev = st.st_dev;
1570 l->l_ino = st.st_ino;
1571
1572 /* When we profile the SONAME might be needed for something else but
1573 loading. Add it right away. */
1574 if (__builtin_expect (GLRO(dl_profile) != NULL, 0)
1575 && l->l_info[DT_SONAME] != NULL)
1576 add_name_to_object (l, ((const char *) D_PTR (l, l_info[DT_STRTAB])
1577 + l->l_info[DT_SONAME]->d_un.d_val));
1578
1579 /* Now that the object is fully initialized add it to the object list. */
1580 _dl_add_to_namespace_list (l, nsid);
1581
1582 #ifdef SHARED
1583 /* Auditing checkpoint: we have a new object. */
1584 if (__builtin_expect (GLRO(dl_naudit) > 0, 0)
1585 && !GL(dl_ns)[l->l_ns]._ns_loaded->l_auditing)
1586 {
1587 struct audit_ifaces *afct = GLRO(dl_audit);
1588 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
1589 {
1590 if (afct->objopen != NULL)
1591 {
1592 l->l_audit[cnt].bindflags
1593 = afct->objopen (l, nsid, &l->l_audit[cnt].cookie);
1594
1595 l->l_audit_any_plt |= l->l_audit[cnt].bindflags != 0;
1596 }
1597
1598 afct = afct->next;
1599 }
1600 }
1601 #endif
1602
1603 return l;
1604 }
1605 \f
1606 /* Print search path. */
1607 static void
1608 print_search_path (struct r_search_path_elem **list,
1609 const char *what, const char *name)
1610 {
1611 char buf[max_dirnamelen + max_capstrlen];
1612 int first = 1;
1613
1614 _dl_debug_printf (" search path=");
1615
1616 while (*list != NULL && (*list)->what == what) /* Yes, ==. */
1617 {
1618 char *endp = __mempcpy (buf, (*list)->dirname, (*list)->dirnamelen);
1619 size_t cnt;
1620
1621 for (cnt = 0; cnt < ncapstr; ++cnt)
1622 if ((*list)->status[cnt] != nonexisting)
1623 {
1624 char *cp = __mempcpy (endp, capstr[cnt].str, capstr[cnt].len);
1625 if (cp == buf || (cp == buf + 1 && buf[0] == '/'))
1626 cp[0] = '\0';
1627 else
1628 cp[-1] = '\0';
1629
1630 _dl_debug_printf_c (first ? "%s" : ":%s", buf);
1631 first = 0;
1632 }
1633
1634 ++list;
1635 }
1636
1637 if (name != NULL)
1638 _dl_debug_printf_c ("\t\t(%s from file %s)\n", what,
1639 name[0] ? name : rtld_progname);
1640 else
1641 _dl_debug_printf_c ("\t\t(%s)\n", what);
1642 }
1643 \f
1644 /* Open a file and verify it is an ELF file for this architecture. We
1645 ignore only ELF files for other architectures. Non-ELF files and
1646 ELF files with different header information cause fatal errors since
1647 this could mean there is something wrong in the installation and the
1648 user might want to know about this. */
1649 static int
1650 open_verify (const char *name, struct filebuf *fbp, struct link_map *loader,
1651 int whatcode, bool *found_other_class, bool free_name)
1652 {
1653 /* This is the expected ELF header. */
1654 #define ELF32_CLASS ELFCLASS32
1655 #define ELF64_CLASS ELFCLASS64
1656 #ifndef VALID_ELF_HEADER
1657 # define VALID_ELF_HEADER(hdr,exp,size) (memcmp (hdr, exp, size) == 0)
1658 # define VALID_ELF_OSABI(osabi) (osabi == ELFOSABI_SYSV)
1659 # define VALID_ELF_ABIVERSION(osabi,ver) (ver == 0)
1660 #elif defined MORE_ELF_HEADER_DATA
1661 MORE_ELF_HEADER_DATA;
1662 #endif
1663 static const unsigned char expected[EI_NIDENT] =
1664 {
1665 [EI_MAG0] = ELFMAG0,
1666 [EI_MAG1] = ELFMAG1,
1667 [EI_MAG2] = ELFMAG2,
1668 [EI_MAG3] = ELFMAG3,
1669 [EI_CLASS] = ELFW(CLASS),
1670 [EI_DATA] = byteorder,
1671 [EI_VERSION] = EV_CURRENT,
1672 [EI_OSABI] = ELFOSABI_SYSV,
1673 [EI_ABIVERSION] = 0
1674 };
1675 static const struct
1676 {
1677 ElfW(Word) vendorlen;
1678 ElfW(Word) datalen;
1679 ElfW(Word) type;
1680 char vendor[4];
1681 } expected_note = { 4, 16, 1, "GNU" };
1682 /* Initialize it to make the compiler happy. */
1683 const char *errstring = NULL;
1684 int errval = 0;
1685
1686 #ifdef SHARED
1687 /* Give the auditing libraries a chance. */
1688 if (__builtin_expect (GLRO(dl_naudit) > 0, 0) && whatcode != 0
1689 && loader->l_auditing == 0)
1690 {
1691 struct audit_ifaces *afct = GLRO(dl_audit);
1692 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
1693 {
1694 if (afct->objsearch != NULL)
1695 {
1696 name = afct->objsearch (name, &loader->l_audit[cnt].cookie,
1697 whatcode);
1698 if (name == NULL)
1699 /* Ignore the path. */
1700 return -1;
1701 }
1702
1703 afct = afct->next;
1704 }
1705 }
1706 #endif
1707
1708 /* Open the file. We always open files read-only. */
1709 int fd = __open (name, O_RDONLY | O_CLOEXEC);
1710 if (fd != -1)
1711 {
1712 ElfW(Ehdr) *ehdr;
1713 ElfW(Phdr) *phdr, *ph;
1714 ElfW(Word) *abi_note;
1715 unsigned int osversion;
1716 size_t maplength;
1717
1718 /* We successfully openened the file. Now verify it is a file
1719 we can use. */
1720 __set_errno (0);
1721 fbp->len = __libc_read (fd, fbp->buf, sizeof (fbp->buf));
1722
1723 /* This is where the ELF header is loaded. */
1724 assert (sizeof (fbp->buf) > sizeof (ElfW(Ehdr)));
1725 ehdr = (ElfW(Ehdr) *) fbp->buf;
1726
1727 /* Now run the tests. */
1728 if (__builtin_expect (fbp->len < (ssize_t) sizeof (ElfW(Ehdr)), 0))
1729 {
1730 errval = errno;
1731 errstring = (errval == 0
1732 ? N_("file too short") : N_("cannot read file data"));
1733 call_lose:
1734 if (free_name)
1735 {
1736 char *realname = (char *) name;
1737 name = strdupa (realname);
1738 free (realname);
1739 }
1740 lose (errval, fd, name, NULL, NULL, errstring, NULL);
1741 }
1742
1743 /* See whether the ELF header is what we expect. */
1744 if (__builtin_expect (! VALID_ELF_HEADER (ehdr->e_ident, expected,
1745 EI_ABIVERSION)
1746 || !VALID_ELF_ABIVERSION (ehdr->e_ident[EI_OSABI],
1747 ehdr->e_ident[EI_ABIVERSION])
1748 || memcmp (&ehdr->e_ident[EI_PAD],
1749 &expected[EI_PAD],
1750 EI_NIDENT - EI_PAD) != 0,
1751 0))
1752 {
1753 /* Something is wrong. */
1754 const Elf32_Word *magp = (const void *) ehdr->e_ident;
1755 if (*magp !=
1756 #if BYTE_ORDER == LITTLE_ENDIAN
1757 ((ELFMAG0 << (EI_MAG0 * 8)) |
1758 (ELFMAG1 << (EI_MAG1 * 8)) |
1759 (ELFMAG2 << (EI_MAG2 * 8)) |
1760 (ELFMAG3 << (EI_MAG3 * 8)))
1761 #else
1762 ((ELFMAG0 << (EI_MAG3 * 8)) |
1763 (ELFMAG1 << (EI_MAG2 * 8)) |
1764 (ELFMAG2 << (EI_MAG1 * 8)) |
1765 (ELFMAG3 << (EI_MAG0 * 8)))
1766 #endif
1767 )
1768 errstring = N_("invalid ELF header");
1769 else if (ehdr->e_ident[EI_CLASS] != ELFW(CLASS))
1770 {
1771 /* This is not a fatal error. On architectures where
1772 32-bit and 64-bit binaries can be run this might
1773 happen. */
1774 *found_other_class = true;
1775 goto close_and_out;
1776 }
1777 else if (ehdr->e_ident[EI_DATA] != byteorder)
1778 {
1779 if (BYTE_ORDER == BIG_ENDIAN)
1780 errstring = N_("ELF file data encoding not big-endian");
1781 else
1782 errstring = N_("ELF file data encoding not little-endian");
1783 }
1784 else if (ehdr->e_ident[EI_VERSION] != EV_CURRENT)
1785 errstring
1786 = N_("ELF file version ident does not match current one");
1787 /* XXX We should be able so set system specific versions which are
1788 allowed here. */
1789 else if (!VALID_ELF_OSABI (ehdr->e_ident[EI_OSABI]))
1790 errstring = N_("ELF file OS ABI invalid");
1791 else if (!VALID_ELF_ABIVERSION (ehdr->e_ident[EI_OSABI],
1792 ehdr->e_ident[EI_ABIVERSION]))
1793 errstring = N_("ELF file ABI version invalid");
1794 else if (memcmp (&ehdr->e_ident[EI_PAD], &expected[EI_PAD],
1795 EI_NIDENT - EI_PAD) != 0)
1796 errstring = N_("nonzero padding in e_ident");
1797 else
1798 /* Otherwise we don't know what went wrong. */
1799 errstring = N_("internal error");
1800
1801 goto call_lose;
1802 }
1803
1804 if (__builtin_expect (ehdr->e_version, EV_CURRENT) != EV_CURRENT)
1805 {
1806 errstring = N_("ELF file version does not match current one");
1807 goto call_lose;
1808 }
1809 if (! __builtin_expect (elf_machine_matches_host (ehdr), 1))
1810 goto close_and_out;
1811 else if (__builtin_expect (ehdr->e_type, ET_DYN) != ET_DYN
1812 && __builtin_expect (ehdr->e_type, ET_EXEC) != ET_EXEC)
1813 {
1814 errstring = N_("only ET_DYN and ET_EXEC can be loaded");
1815 goto call_lose;
1816 }
1817 else if (__builtin_expect (ehdr->e_phentsize, sizeof (ElfW(Phdr)))
1818 != sizeof (ElfW(Phdr)))
1819 {
1820 errstring = N_("ELF file's phentsize not the expected size");
1821 goto call_lose;
1822 }
1823
1824 maplength = ehdr->e_phnum * sizeof (ElfW(Phdr));
1825 if (ehdr->e_phoff + maplength <= (size_t) fbp->len)
1826 phdr = (void *) (fbp->buf + ehdr->e_phoff);
1827 else
1828 {
1829 phdr = alloca (maplength);
1830 __lseek (fd, ehdr->e_phoff, SEEK_SET);
1831 if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
1832 {
1833 read_error:
1834 errval = errno;
1835 errstring = N_("cannot read file data");
1836 goto call_lose;
1837 }
1838 }
1839
1840 /* Check .note.ABI-tag if present. */
1841 for (ph = phdr; ph < &phdr[ehdr->e_phnum]; ++ph)
1842 if (ph->p_type == PT_NOTE && ph->p_filesz >= 32 && ph->p_align >= 4)
1843 {
1844 ElfW(Addr) size = ph->p_filesz;
1845
1846 if (ph->p_offset + size <= (size_t) fbp->len)
1847 abi_note = (void *) (fbp->buf + ph->p_offset);
1848 else
1849 {
1850 abi_note = alloca (size);
1851 __lseek (fd, ph->p_offset, SEEK_SET);
1852 if (__libc_read (fd, (void *) abi_note, size) != size)
1853 goto read_error;
1854 }
1855
1856 while (memcmp (abi_note, &expected_note, sizeof (expected_note)))
1857 {
1858 #define ROUND(len) (((len) + sizeof (ElfW(Word)) - 1) & -sizeof (ElfW(Word)))
1859 ElfW(Addr) note_size = 3 * sizeof (ElfW(Word))
1860 + ROUND (abi_note[0])
1861 + ROUND (abi_note[1]);
1862
1863 if (size - 32 < note_size)
1864 {
1865 size = 0;
1866 break;
1867 }
1868 size -= note_size;
1869 abi_note = (void *) abi_note + note_size;
1870 }
1871
1872 if (size == 0)
1873 continue;
1874
1875 osversion = (abi_note[5] & 0xff) * 65536
1876 + (abi_note[6] & 0xff) * 256
1877 + (abi_note[7] & 0xff);
1878 if (abi_note[4] != __ABI_TAG_OS
1879 || (GLRO(dl_osversion) && GLRO(dl_osversion) < osversion))
1880 {
1881 close_and_out:
1882 __close (fd);
1883 __set_errno (ENOENT);
1884 fd = -1;
1885 }
1886
1887 break;
1888 }
1889 }
1890
1891 return fd;
1892 }
1893 \f
1894 /* Try to open NAME in one of the directories in *DIRSP.
1895 Return the fd, or -1. If successful, fill in *REALNAME
1896 with the malloc'd full directory name. If it turns out
1897 that none of the directories in *DIRSP exists, *DIRSP is
1898 replaced with (void *) -1, and the old value is free()d
1899 if MAY_FREE_DIRS is true. */
1900
1901 static int
1902 open_path (const char *name, size_t namelen, int secure,
1903 struct r_search_path_struct *sps, char **realname,
1904 struct filebuf *fbp, struct link_map *loader, int whatcode,
1905 bool *found_other_class)
1906 {
1907 struct r_search_path_elem **dirs = sps->dirs;
1908 char *buf;
1909 int fd = -1;
1910 const char *current_what = NULL;
1911 int any = 0;
1912
1913 if (__builtin_expect (dirs == NULL, 0))
1914 /* We're called before _dl_init_paths when loading the main executable
1915 given on the command line when rtld is run directly. */
1916 return -1;
1917
1918 buf = alloca (max_dirnamelen + max_capstrlen + namelen);
1919 do
1920 {
1921 struct r_search_path_elem *this_dir = *dirs;
1922 size_t buflen = 0;
1923 size_t cnt;
1924 char *edp;
1925 int here_any = 0;
1926 int err;
1927
1928 /* If we are debugging the search for libraries print the path
1929 now if it hasn't happened now. */
1930 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0)
1931 && current_what != this_dir->what)
1932 {
1933 current_what = this_dir->what;
1934 print_search_path (dirs, current_what, this_dir->where);
1935 }
1936
1937 edp = (char *) __mempcpy (buf, this_dir->dirname, this_dir->dirnamelen);
1938 for (cnt = 0; fd == -1 && cnt < ncapstr; ++cnt)
1939 {
1940 /* Skip this directory if we know it does not exist. */
1941 if (this_dir->status[cnt] == nonexisting)
1942 continue;
1943
1944 buflen =
1945 ((char *) __mempcpy (__mempcpy (edp, capstr[cnt].str,
1946 capstr[cnt].len),
1947 name, namelen)
1948 - buf);
1949
1950 /* Print name we try if this is wanted. */
1951 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
1952 _dl_debug_printf (" trying file=%s\n", buf);
1953
1954 fd = open_verify (buf, fbp, loader, whatcode, found_other_class,
1955 false);
1956 if (this_dir->status[cnt] == unknown)
1957 {
1958 if (fd != -1)
1959 this_dir->status[cnt] = existing;
1960 /* Do not update the directory information when loading
1961 auditing code. We must try to disturb the program as
1962 little as possible. */
1963 else if (loader == NULL
1964 || GL(dl_ns)[loader->l_ns]._ns_loaded->l_auditing == 0)
1965 {
1966 /* We failed to open machine dependent library. Let's
1967 test whether there is any directory at all. */
1968 struct stat64 st;
1969
1970 buf[buflen - namelen - 1] = '\0';
1971
1972 if (__xstat64 (_STAT_VER, buf, &st) != 0
1973 || ! S_ISDIR (st.st_mode))
1974 /* The directory does not exist or it is no directory. */
1975 this_dir->status[cnt] = nonexisting;
1976 else
1977 this_dir->status[cnt] = existing;
1978 }
1979 }
1980
1981 /* Remember whether we found any existing directory. */
1982 here_any |= this_dir->status[cnt] != nonexisting;
1983
1984 if (fd != -1 && __builtin_expect (secure, 0)
1985 && INTUSE(__libc_enable_secure))
1986 {
1987 /* This is an extra security effort to make sure nobody can
1988 preload broken shared objects which are in the trusted
1989 directories and so exploit the bugs. */
1990 struct stat64 st;
1991
1992 if (__fxstat64 (_STAT_VER, fd, &st) != 0
1993 || (st.st_mode & S_ISUID) == 0)
1994 {
1995 /* The shared object cannot be tested for being SUID
1996 or this bit is not set. In this case we must not
1997 use this object. */
1998 __close (fd);
1999 fd = -1;
2000 /* We simply ignore the file, signal this by setting
2001 the error value which would have been set by `open'. */
2002 errno = ENOENT;
2003 }
2004 }
2005 }
2006
2007 if (fd != -1)
2008 {
2009 *realname = (char *) malloc (buflen);
2010 if (*realname != NULL)
2011 {
2012 memcpy (*realname, buf, buflen);
2013 return fd;
2014 }
2015 else
2016 {
2017 /* No memory for the name, we certainly won't be able
2018 to load and link it. */
2019 __close (fd);
2020 return -1;
2021 }
2022 }
2023 if (here_any && (err = errno) != ENOENT && err != EACCES)
2024 /* The file exists and is readable, but something went wrong. */
2025 return -1;
2026
2027 /* Remember whether we found anything. */
2028 any |= here_any;
2029 }
2030 while (*++dirs != NULL);
2031
2032 /* Remove the whole path if none of the directories exists. */
2033 if (__builtin_expect (! any, 0))
2034 {
2035 /* Paths which were allocated using the minimal malloc() in ld.so
2036 must not be freed using the general free() in libc. */
2037 if (sps->malloced)
2038 free (sps->dirs);
2039
2040 /* rtld_search_dirs is attribute_relro, therefore avoid writing
2041 into it. */
2042 if (sps != &rtld_search_dirs)
2043 sps->dirs = (void *) -1;
2044 }
2045
2046 return -1;
2047 }
2048
2049 /* Map in the shared object file NAME. */
2050
2051 struct link_map *
2052 internal_function
2053 _dl_map_object (struct link_map *loader, const char *name,
2054 int type, int trace_mode, int mode, Lmid_t nsid)
2055 {
2056 int fd;
2057 char *realname;
2058 char *name_copy;
2059 struct link_map *l;
2060 struct filebuf fb;
2061
2062 assert (nsid >= 0);
2063 assert (nsid < GL(dl_nns));
2064
2065 /* Look for this name among those already loaded. */
2066 for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
2067 {
2068 /* If the requested name matches the soname of a loaded object,
2069 use that object. Elide this check for names that have not
2070 yet been opened. */
2071 if (__builtin_expect (l->l_faked, 0) != 0
2072 || __builtin_expect (l->l_removed, 0) != 0)
2073 continue;
2074 if (!_dl_name_match_p (name, l))
2075 {
2076 const char *soname;
2077
2078 if (__builtin_expect (l->l_soname_added, 1)
2079 || l->l_info[DT_SONAME] == NULL)
2080 continue;
2081
2082 soname = ((const char *) D_PTR (l, l_info[DT_STRTAB])
2083 + l->l_info[DT_SONAME]->d_un.d_val);
2084 if (strcmp (name, soname) != 0)
2085 continue;
2086
2087 /* We have a match on a new name -- cache it. */
2088 add_name_to_object (l, soname);
2089 l->l_soname_added = 1;
2090 }
2091
2092 /* We have a match. */
2093 return l;
2094 }
2095
2096 /* Display information if we are debugging. */
2097 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0)
2098 && loader != NULL)
2099 _dl_debug_printf ((mode & __RTLD_CALLMAP) == 0
2100 ? "\nfile=%s [%lu]; needed by %s [%lu]\n"
2101 : "\nfile=%s [%lu]; dynamically loaded by %s [%lu]\n",
2102 name, nsid, loader->l_name[0]
2103 ? loader->l_name : rtld_progname, loader->l_ns);
2104
2105 #ifdef SHARED
2106 /* Give the auditing libraries a chance to change the name before we
2107 try anything. */
2108 if (__builtin_expect (GLRO(dl_naudit) > 0, 0)
2109 && (loader == NULL || loader->l_auditing == 0))
2110 {
2111 struct audit_ifaces *afct = GLRO(dl_audit);
2112 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
2113 {
2114 if (afct->objsearch != NULL)
2115 {
2116 name = afct->objsearch (name, &loader->l_audit[cnt].cookie,
2117 LA_SER_ORIG);
2118 if (name == NULL)
2119 {
2120 /* Do not try anything further. */
2121 fd = -1;
2122 goto no_file;
2123 }
2124 }
2125
2126 afct = afct->next;
2127 }
2128 }
2129 #endif
2130
2131 /* Will be true if we found a DSO which is of the other ELF class. */
2132 bool found_other_class = false;
2133
2134 if (strchr (name, '/') == NULL)
2135 {
2136 /* Search for NAME in several places. */
2137
2138 size_t namelen = strlen (name) + 1;
2139
2140 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
2141 _dl_debug_printf ("find library=%s [%lu]; searching\n", name, nsid);
2142
2143 fd = -1;
2144
2145 /* When the object has the RUNPATH information we don't use any
2146 RPATHs. */
2147 if (loader == NULL || loader->l_info[DT_RUNPATH] == NULL)
2148 {
2149 /* This is the executable's map (if there is one). Make sure that
2150 we do not look at it twice. */
2151 struct link_map *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
2152 bool did_main_map = false;
2153
2154 /* First try the DT_RPATH of the dependent object that caused NAME
2155 to be loaded. Then that object's dependent, and on up. */
2156 for (l = loader; l; l = l->l_loader)
2157 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2158 {
2159 fd = open_path (name, namelen, mode & __RTLD_SECURE,
2160 &l->l_rpath_dirs,
2161 &realname, &fb, loader, LA_SER_RUNPATH,
2162 &found_other_class);
2163 if (fd != -1)
2164 break;
2165
2166 did_main_map |= l == main_map;
2167 }
2168
2169 /* If dynamically linked, try the DT_RPATH of the executable
2170 itself. NB: we do this for lookups in any namespace. */
2171 if (fd == -1 && !did_main_map
2172 && main_map != NULL && main_map->l_type != lt_loaded
2173 && cache_rpath (main_map, &main_map->l_rpath_dirs, DT_RPATH,
2174 "RPATH"))
2175 fd = open_path (name, namelen, mode & __RTLD_SECURE,
2176 &main_map->l_rpath_dirs,
2177 &realname, &fb, loader ?: main_map, LA_SER_RUNPATH,
2178 &found_other_class);
2179 }
2180
2181 /* Try the LD_LIBRARY_PATH environment variable. */
2182 if (fd == -1 && env_path_list.dirs != (void *) -1)
2183 fd = open_path (name, namelen, mode & __RTLD_SECURE, &env_path_list,
2184 &realname, &fb,
2185 loader ?: GL(dl_ns)[LM_ID_BASE]._ns_loaded,
2186 LA_SER_LIBPATH, &found_other_class);
2187
2188 /* Look at the RUNPATH information for this binary. */
2189 if (fd == -1 && loader != NULL
2190 && cache_rpath (loader, &loader->l_runpath_dirs,
2191 DT_RUNPATH, "RUNPATH"))
2192 fd = open_path (name, namelen, mode & __RTLD_SECURE,
2193 &loader->l_runpath_dirs, &realname, &fb, loader,
2194 LA_SER_RUNPATH, &found_other_class);
2195
2196 if (fd == -1
2197 && (__builtin_expect (! (mode & __RTLD_SECURE), 1)
2198 || ! INTUSE(__libc_enable_secure)))
2199 {
2200 /* Check the list of libraries in the file /etc/ld.so.cache,
2201 for compatibility with Linux's ldconfig program. */
2202 const char *cached = _dl_load_cache_lookup (name);
2203
2204 if (cached != NULL)
2205 {
2206 #ifdef SHARED
2207 // XXX Correct to unconditionally default to namespace 0?
2208 l = (loader
2209 ?: GL(dl_ns)[LM_ID_BASE]._ns_loaded
2210 ?: &GL(dl_rtld_map));
2211 #else
2212 l = loader;
2213 #endif
2214
2215 /* If the loader has the DF_1_NODEFLIB flag set we must not
2216 use a cache entry from any of these directories. */
2217 if (
2218 #ifndef SHARED
2219 /* 'l' is always != NULL for dynamically linked objects. */
2220 l != NULL &&
2221 #endif
2222 __builtin_expect (l->l_flags_1 & DF_1_NODEFLIB, 0))
2223 {
2224 const char *dirp = system_dirs;
2225 unsigned int cnt = 0;
2226
2227 do
2228 {
2229 if (memcmp (cached, dirp, system_dirs_len[cnt]) == 0)
2230 {
2231 /* The prefix matches. Don't use the entry. */
2232 cached = NULL;
2233 break;
2234 }
2235
2236 dirp += system_dirs_len[cnt] + 1;
2237 ++cnt;
2238 }
2239 while (cnt < nsystem_dirs_len);
2240 }
2241
2242 if (cached != NULL)
2243 {
2244 fd = open_verify (cached,
2245 &fb, loader ?: GL(dl_ns)[nsid]._ns_loaded,
2246 LA_SER_CONFIG, &found_other_class, false);
2247 if (__builtin_expect (fd != -1, 1))
2248 {
2249 realname = local_strdup (cached);
2250 if (realname == NULL)
2251 {
2252 __close (fd);
2253 fd = -1;
2254 }
2255 }
2256 }
2257 }
2258 }
2259
2260 /* Finally, try the default path. */
2261 if (fd == -1
2262 && ((l = loader ?: GL(dl_ns)[nsid]._ns_loaded) == NULL
2263 || __builtin_expect (!(l->l_flags_1 & DF_1_NODEFLIB), 1))
2264 && rtld_search_dirs.dirs != (void *) -1)
2265 fd = open_path (name, namelen, mode & __RTLD_SECURE, &rtld_search_dirs,
2266 &realname, &fb, l, LA_SER_DEFAULT, &found_other_class);
2267
2268 /* Add another newline when we are tracing the library loading. */
2269 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
2270 _dl_debug_printf ("\n");
2271 }
2272 else
2273 {
2274 /* The path may contain dynamic string tokens. */
2275 realname = (loader
2276 ? expand_dynamic_string_token (loader, name, 0)
2277 : local_strdup (name));
2278 if (realname == NULL)
2279 fd = -1;
2280 else
2281 {
2282 fd = open_verify (realname, &fb,
2283 loader ?: GL(dl_ns)[nsid]._ns_loaded, 0,
2284 &found_other_class, true);
2285 if (__builtin_expect (fd, 0) == -1)
2286 free (realname);
2287 }
2288 }
2289
2290 #ifdef SHARED
2291 no_file:
2292 #endif
2293 /* In case the LOADER information has only been provided to get to
2294 the appropriate RUNPATH/RPATH information we do not need it
2295 anymore. */
2296 if (mode & __RTLD_CALLMAP)
2297 loader = NULL;
2298
2299 if (__builtin_expect (fd, 0) == -1)
2300 {
2301 if (trace_mode
2302 && __builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK, 0) == 0)
2303 {
2304 /* We haven't found an appropriate library. But since we
2305 are only interested in the list of libraries this isn't
2306 so severe. Fake an entry with all the information we
2307 have. */
2308 static const Elf_Symndx dummy_bucket = STN_UNDEF;
2309
2310 /* Allocate a new object map. */
2311 if ((name_copy = local_strdup (name)) == NULL
2312 || (l = _dl_new_object (name_copy, name, type, loader,
2313 mode, nsid)) == NULL)
2314 {
2315 free (name_copy);
2316 _dl_signal_error (ENOMEM, name, NULL,
2317 N_("cannot create shared object descriptor"));
2318 }
2319 /* Signal that this is a faked entry. */
2320 l->l_faked = 1;
2321 /* Since the descriptor is initialized with zero we do not
2322 have do this here.
2323 l->l_reserved = 0; */
2324 l->l_buckets = &dummy_bucket;
2325 l->l_nbuckets = 1;
2326 l->l_relocated = 1;
2327
2328 /* Enter the object in the object list. */
2329 _dl_add_to_namespace_list (l, nsid);
2330
2331 return l;
2332 }
2333 else if (found_other_class)
2334 _dl_signal_error (0, name, NULL,
2335 ELFW(CLASS) == ELFCLASS32
2336 ? N_("wrong ELF class: ELFCLASS64")
2337 : N_("wrong ELF class: ELFCLASS32"));
2338 else
2339 _dl_signal_error (errno, name, NULL,
2340 N_("cannot open shared object file"));
2341 }
2342
2343 void *stack_end = __libc_stack_end;
2344 return _dl_map_object_from_fd (name, fd, &fb, realname, loader, type, mode,
2345 &stack_end, nsid);
2346 }
2347
2348
2349 void
2350 internal_function
2351 _dl_rtld_di_serinfo (struct link_map *loader, Dl_serinfo *si, bool counting)
2352 {
2353 if (counting)
2354 {
2355 si->dls_cnt = 0;
2356 si->dls_size = 0;
2357 }
2358
2359 unsigned int idx = 0;
2360 char *allocptr = (char *) &si->dls_serpath[si->dls_cnt];
2361 void add_path (const struct r_search_path_struct *sps, unsigned int flags)
2362 # define add_path(sps, flags) add_path(sps, 0) /* XXX */
2363 {
2364 if (sps->dirs != (void *) -1)
2365 {
2366 struct r_search_path_elem **dirs = sps->dirs;
2367 do
2368 {
2369 const struct r_search_path_elem *const r = *dirs++;
2370 if (counting)
2371 {
2372 si->dls_cnt++;
2373 si->dls_size += MAX (2, r->dirnamelen);
2374 }
2375 else
2376 {
2377 Dl_serpath *const sp = &si->dls_serpath[idx++];
2378 sp->dls_name = allocptr;
2379 if (r->dirnamelen < 2)
2380 *allocptr++ = r->dirnamelen ? '/' : '.';
2381 else
2382 allocptr = __mempcpy (allocptr,
2383 r->dirname, r->dirnamelen - 1);
2384 *allocptr++ = '\0';
2385 sp->dls_flags = flags;
2386 }
2387 }
2388 while (*dirs != NULL);
2389 }
2390 }
2391
2392 /* When the object has the RUNPATH information we don't use any RPATHs. */
2393 if (loader->l_info[DT_RUNPATH] == NULL)
2394 {
2395 /* First try the DT_RPATH of the dependent object that caused NAME
2396 to be loaded. Then that object's dependent, and on up. */
2397
2398 struct link_map *l = loader;
2399 do
2400 {
2401 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2402 add_path (&l->l_rpath_dirs, XXX_RPATH);
2403 l = l->l_loader;
2404 }
2405 while (l != NULL);
2406
2407 /* If dynamically linked, try the DT_RPATH of the executable itself. */
2408 if (loader->l_ns == LM_ID_BASE)
2409 {
2410 l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
2411 if (l != NULL && l->l_type != lt_loaded && l != loader)
2412 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2413 add_path (&l->l_rpath_dirs, XXX_RPATH);
2414 }
2415 }
2416
2417 /* Try the LD_LIBRARY_PATH environment variable. */
2418 add_path (&env_path_list, XXX_ENV);
2419
2420 /* Look at the RUNPATH information for this binary. */
2421 if (cache_rpath (loader, &loader->l_runpath_dirs, DT_RUNPATH, "RUNPATH"))
2422 add_path (&loader->l_runpath_dirs, XXX_RUNPATH);
2423
2424 /* XXX
2425 Here is where ld.so.cache gets checked, but we don't have
2426 a way to indicate that in the results for Dl_serinfo. */
2427
2428 /* Finally, try the default path. */
2429 if (!(loader->l_flags_1 & DF_1_NODEFLIB))
2430 add_path (&rtld_search_dirs, XXX_default);
2431
2432 if (counting)
2433 /* Count the struct size before the string area, which we didn't
2434 know before we completed dls_cnt. */
2435 si->dls_size += (char *) &si->dls_serpath[si->dls_cnt] - (char *) si;
2436 }