]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-load.c
Fix memory leak in dlopen with RTLD_NOLOAD.
[thirdparty/glibc.git] / elf / dl-load.c
1 /* Map in a shared object's segments from the file.
2 Copyright (C) 1995-2005, 2006, 2007, 2009, 2010 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20 #include <elf.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <libintl.h>
24 #include <stdbool.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <ldsodefs.h>
29 #include <bits/wordsize.h>
30 #include <sys/mman.h>
31 #include <sys/param.h>
32 #include <sys/stat.h>
33 #include <sys/types.h>
34 #include "dynamic-link.h"
35 #include <abi-tag.h>
36 #include <stackinfo.h>
37 #include <caller.h>
38 #include <sysdep.h>
39
40 #include <dl-dst.h>
41
42 /* On some systems, no flag bits are given to specify file mapping. */
43 #ifndef MAP_FILE
44 # define MAP_FILE 0
45 #endif
46
47 /* The right way to map in the shared library files is MAP_COPY, which
48 makes a virtual copy of the data at the time of the mmap call; this
49 guarantees the mapped pages will be consistent even if the file is
50 overwritten. Some losing VM systems like Linux's lack MAP_COPY. All we
51 get is MAP_PRIVATE, which copies each page when it is modified; this
52 means if the file is overwritten, we may at some point get some pages
53 from the new version after starting with pages from the old version.
54
55 To make up for the lack and avoid the overwriting problem,
56 what Linux does have is MAP_DENYWRITE. This prevents anyone
57 from modifying the file while we have it mapped. */
58 #ifndef MAP_COPY
59 # ifdef MAP_DENYWRITE
60 # define MAP_COPY (MAP_PRIVATE | MAP_DENYWRITE)
61 # else
62 # define MAP_COPY MAP_PRIVATE
63 # endif
64 #endif
65
66 /* Some systems link their relocatable objects for another base address
67 than 0. We want to know the base address for these such that we can
68 subtract this address from the segment addresses during mapping.
69 This results in a more efficient address space usage. Defaults to
70 zero for almost all systems. */
71 #ifndef MAP_BASE_ADDR
72 # define MAP_BASE_ADDR(l) 0
73 #endif
74
75
76 #include <endian.h>
77 #if BYTE_ORDER == BIG_ENDIAN
78 # define byteorder ELFDATA2MSB
79 #elif BYTE_ORDER == LITTLE_ENDIAN
80 # define byteorder ELFDATA2LSB
81 #else
82 # error "Unknown BYTE_ORDER " BYTE_ORDER
83 # define byteorder ELFDATANONE
84 #endif
85
86 #define STRING(x) __STRING (x)
87
88 /* Handle situations where we have a preferred location in memory for
89 the shared objects. */
90 #ifdef ELF_PREFERRED_ADDRESS_DATA
91 ELF_PREFERRED_ADDRESS_DATA;
92 #endif
93 #ifndef ELF_PREFERRED_ADDRESS
94 # define ELF_PREFERRED_ADDRESS(loader, maplength, mapstartpref) (mapstartpref)
95 #endif
96 #ifndef ELF_FIXED_ADDRESS
97 # define ELF_FIXED_ADDRESS(loader, mapstart) ((void) 0)
98 #endif
99
100
101 int __stack_prot attribute_hidden attribute_relro
102 #if _STACK_GROWS_DOWN && defined PROT_GROWSDOWN
103 = PROT_GROWSDOWN;
104 #elif _STACK_GROWS_UP && defined PROT_GROWSUP
105 = PROT_GROWSUP;
106 #else
107 = 0;
108 #endif
109
110
111 /* Type for the buffer we put the ELF header and hopefully the program
112 header. This buffer does not really have to be too large. In most
113 cases the program header follows the ELF header directly. If this
114 is not the case all bets are off and we can make the header
115 arbitrarily large and still won't get it read. This means the only
116 question is how large are the ELF and program header combined. The
117 ELF header 32-bit files is 52 bytes long and in 64-bit files is 64
118 bytes long. Each program header entry is again 32 and 56 bytes
119 long respectively. I.e., even with a file which has 10 program
120 header entries we only have to read 372B/624B respectively. Add to
121 this a bit of margin for program notes and reading 512B and 832B
122 for 32-bit and 64-bit files respecitvely is enough. If this
123 heuristic should really fail for some file the code in
124 `_dl_map_object_from_fd' knows how to recover. */
125 struct filebuf
126 {
127 ssize_t len;
128 #if __WORDSIZE == 32
129 # define FILEBUF_SIZE 512
130 #else
131 # define FILEBUF_SIZE 832
132 #endif
133 char buf[FILEBUF_SIZE] __attribute__ ((aligned (__alignof (ElfW(Ehdr)))));
134 };
135
136 /* This is the decomposed LD_LIBRARY_PATH search path. */
137 static struct r_search_path_struct env_path_list attribute_relro;
138
139 /* List of the hardware capabilities we might end up using. */
140 static const struct r_strlenpair *capstr attribute_relro;
141 static size_t ncapstr attribute_relro;
142 static size_t max_capstrlen attribute_relro;
143
144
145 /* Get the generated information about the trusted directories. */
146 #include "trusted-dirs.h"
147
148 static const char system_dirs[] = SYSTEM_DIRS;
149 static const size_t system_dirs_len[] =
150 {
151 SYSTEM_DIRS_LEN
152 };
153 #define nsystem_dirs_len \
154 (sizeof (system_dirs_len) / sizeof (system_dirs_len[0]))
155
156
157 /* Local version of `strdup' function. */
158 static char *
159 local_strdup (const char *s)
160 {
161 size_t len = strlen (s) + 1;
162 void *new = malloc (len);
163
164 if (new == NULL)
165 return NULL;
166
167 return (char *) memcpy (new, s, len);
168 }
169
170
171 static size_t
172 is_dst (const char *start, const char *name, const char *str,
173 int is_path, int secure)
174 {
175 size_t len;
176 bool is_curly = false;
177
178 if (name[0] == '{')
179 {
180 is_curly = true;
181 ++name;
182 }
183
184 len = 0;
185 while (name[len] == str[len] && name[len] != '\0')
186 ++len;
187
188 if (is_curly)
189 {
190 if (name[len] != '}')
191 return 0;
192
193 /* Point again at the beginning of the name. */
194 --name;
195 /* Skip over closing curly brace and adjust for the --name. */
196 len += 2;
197 }
198 else if (name[len] != '\0' && name[len] != '/'
199 && (!is_path || name[len] != ':'))
200 return 0;
201
202 if (__builtin_expect (secure, 0)
203 && ((name[len] != '\0' && (!is_path || name[len] != ':'))
204 || (name != start + 1 && (!is_path || name[-2] != ':'))))
205 return 0;
206
207 return len;
208 }
209
210
211 size_t
212 _dl_dst_count (const char *name, int is_path)
213 {
214 const char *const start = name;
215 size_t cnt = 0;
216
217 do
218 {
219 size_t len;
220
221 /* $ORIGIN is not expanded for SUID/GUID programs (except if it
222 is $ORIGIN alone) and it must always appear first in path. */
223 ++name;
224 if ((len = is_dst (start, name, "ORIGIN", is_path,
225 INTUSE(__libc_enable_secure))) != 0
226 || (len = is_dst (start, name, "PLATFORM", is_path, 0)) != 0
227 || (len = is_dst (start, name, "LIB", is_path, 0)) != 0)
228 ++cnt;
229
230 name = strchr (name + len, '$');
231 }
232 while (name != NULL);
233
234 return cnt;
235 }
236
237
238 char *
239 _dl_dst_substitute (struct link_map *l, const char *name, char *result,
240 int is_path)
241 {
242 const char *const start = name;
243 char *last_elem, *wp;
244
245 /* Now fill the result path. While copying over the string we keep
246 track of the start of the last path element. When we come accross
247 a DST we copy over the value or (if the value is not available)
248 leave the entire path element out. */
249 last_elem = wp = result;
250
251 do
252 {
253 if (__builtin_expect (*name == '$', 0))
254 {
255 const char *repl = NULL;
256 size_t len;
257
258 ++name;
259 if ((len = is_dst (start, name, "ORIGIN", is_path,
260 INTUSE(__libc_enable_secure))) != 0)
261 {
262 #ifndef SHARED
263 if (l == NULL)
264 repl = _dl_get_origin ();
265 else
266 #endif
267 repl = l->l_origin;
268 }
269 else if ((len = is_dst (start, name, "PLATFORM", is_path, 0)) != 0)
270 repl = GLRO(dl_platform);
271 else if ((len = is_dst (start, name, "LIB", is_path, 0)) != 0)
272 repl = DL_DST_LIB;
273
274 if (repl != NULL && repl != (const char *) -1)
275 {
276 wp = __stpcpy (wp, repl);
277 name += len;
278 }
279 else if (len > 1)
280 {
281 /* We cannot use this path element, the value of the
282 replacement is unknown. */
283 wp = last_elem;
284 name += len;
285 while (*name != '\0' && (!is_path || *name != ':'))
286 ++name;
287 }
288 else
289 /* No DST we recognize. */
290 *wp++ = '$';
291 }
292 else
293 {
294 *wp++ = *name++;
295 if (is_path && *name == ':')
296 last_elem = wp;
297 }
298 }
299 while (*name != '\0');
300
301 *wp = '\0';
302
303 return result;
304 }
305
306
307 /* Return copy of argument with all recognized dynamic string tokens
308 ($ORIGIN and $PLATFORM for now) replaced. On some platforms it
309 might not be possible to determine the path from which the object
310 belonging to the map is loaded. In this case the path element
311 containing $ORIGIN is left out. */
312 static char *
313 expand_dynamic_string_token (struct link_map *l, const char *s)
314 {
315 /* We make two runs over the string. First we determine how large the
316 resulting string is and then we copy it over. Since this is no
317 frequently executed operation we are looking here not for performance
318 but rather for code size. */
319 size_t cnt;
320 size_t total;
321 char *result;
322
323 /* Determine the number of DST elements. */
324 cnt = DL_DST_COUNT (s, 1);
325
326 /* If we do not have to replace anything simply copy the string. */
327 if (__builtin_expect (cnt, 0) == 0)
328 return local_strdup (s);
329
330 /* Determine the length of the substituted string. */
331 total = DL_DST_REQUIRED (l, s, strlen (s), cnt);
332
333 /* Allocate the necessary memory. */
334 result = (char *) malloc (total + 1);
335 if (result == NULL)
336 return NULL;
337
338 return _dl_dst_substitute (l, s, result, 1);
339 }
340
341
342 /* Add `name' to the list of names for a particular shared object.
343 `name' is expected to have been allocated with malloc and will
344 be freed if the shared object already has this name.
345 Returns false if the object already had this name. */
346 static void
347 internal_function
348 add_name_to_object (struct link_map *l, const char *name)
349 {
350 struct libname_list *lnp, *lastp;
351 struct libname_list *newname;
352 size_t name_len;
353
354 lastp = NULL;
355 for (lnp = l->l_libname; lnp != NULL; lastp = lnp, lnp = lnp->next)
356 if (strcmp (name, lnp->name) == 0)
357 return;
358
359 name_len = strlen (name) + 1;
360 newname = (struct libname_list *) malloc (sizeof *newname + name_len);
361 if (newname == NULL)
362 {
363 /* No more memory. */
364 _dl_signal_error (ENOMEM, name, NULL, N_("cannot allocate name record"));
365 return;
366 }
367 /* The object should have a libname set from _dl_new_object. */
368 assert (lastp != NULL);
369
370 newname->name = memcpy (newname + 1, name, name_len);
371 newname->next = NULL;
372 newname->dont_free = 0;
373 lastp->next = newname;
374 }
375
376 /* Standard search directories. */
377 static struct r_search_path_struct rtld_search_dirs attribute_relro;
378
379 static size_t max_dirnamelen;
380
381 static struct r_search_path_elem **
382 fillin_rpath (char *rpath, struct r_search_path_elem **result, const char *sep,
383 int check_trusted, const char *what, const char *where)
384 {
385 char *cp;
386 size_t nelems = 0;
387
388 while ((cp = __strsep (&rpath, sep)) != NULL)
389 {
390 struct r_search_path_elem *dirp;
391 size_t len = strlen (cp);
392
393 /* `strsep' can pass an empty string. This has to be
394 interpreted as `use the current directory'. */
395 if (len == 0)
396 {
397 static const char curwd[] = "./";
398 cp = (char *) curwd;
399 }
400
401 /* Remove trailing slashes (except for "/"). */
402 while (len > 1 && cp[len - 1] == '/')
403 --len;
404
405 /* Now add one if there is none so far. */
406 if (len > 0 && cp[len - 1] != '/')
407 cp[len++] = '/';
408
409 /* Make sure we don't use untrusted directories if we run SUID. */
410 if (__builtin_expect (check_trusted, 0))
411 {
412 const char *trun = system_dirs;
413 size_t idx;
414 int unsecure = 1;
415
416 /* All trusted directories must be complete names. */
417 if (cp[0] == '/')
418 {
419 for (idx = 0; idx < nsystem_dirs_len; ++idx)
420 {
421 if (len == system_dirs_len[idx]
422 && memcmp (trun, cp, len) == 0)
423 {
424 /* Found it. */
425 unsecure = 0;
426 break;
427 }
428
429 trun += system_dirs_len[idx] + 1;
430 }
431 }
432
433 if (unsecure)
434 /* Simply drop this directory. */
435 continue;
436 }
437
438 /* See if this directory is already known. */
439 for (dirp = GL(dl_all_dirs); dirp != NULL; dirp = dirp->next)
440 if (dirp->dirnamelen == len && memcmp (cp, dirp->dirname, len) == 0)
441 break;
442
443 if (dirp != NULL)
444 {
445 /* It is available, see whether it's on our own list. */
446 size_t cnt;
447 for (cnt = 0; cnt < nelems; ++cnt)
448 if (result[cnt] == dirp)
449 break;
450
451 if (cnt == nelems)
452 result[nelems++] = dirp;
453 }
454 else
455 {
456 size_t cnt;
457 enum r_dir_status init_val;
458 size_t where_len = where ? strlen (where) + 1 : 0;
459
460 /* It's a new directory. Create an entry and add it. */
461 dirp = (struct r_search_path_elem *)
462 malloc (sizeof (*dirp) + ncapstr * sizeof (enum r_dir_status)
463 + where_len + len + 1);
464 if (dirp == NULL)
465 _dl_signal_error (ENOMEM, NULL, NULL,
466 N_("cannot create cache for search path"));
467
468 dirp->dirname = ((char *) dirp + sizeof (*dirp)
469 + ncapstr * sizeof (enum r_dir_status));
470 *((char *) __mempcpy ((char *) dirp->dirname, cp, len)) = '\0';
471 dirp->dirnamelen = len;
472
473 if (len > max_dirnamelen)
474 max_dirnamelen = len;
475
476 /* We have to make sure all the relative directories are
477 never ignored. The current directory might change and
478 all our saved information would be void. */
479 init_val = cp[0] != '/' ? existing : unknown;
480 for (cnt = 0; cnt < ncapstr; ++cnt)
481 dirp->status[cnt] = init_val;
482
483 dirp->what = what;
484 if (__builtin_expect (where != NULL, 1))
485 dirp->where = memcpy ((char *) dirp + sizeof (*dirp) + len + 1
486 + (ncapstr * sizeof (enum r_dir_status)),
487 where, where_len);
488 else
489 dirp->where = NULL;
490
491 dirp->next = GL(dl_all_dirs);
492 GL(dl_all_dirs) = dirp;
493
494 /* Put it in the result array. */
495 result[nelems++] = dirp;
496 }
497 }
498
499 /* Terminate the array. */
500 result[nelems] = NULL;
501
502 return result;
503 }
504
505
506 static bool
507 internal_function
508 decompose_rpath (struct r_search_path_struct *sps,
509 const char *rpath, struct link_map *l, const char *what)
510 {
511 /* Make a copy we can work with. */
512 const char *where = l->l_name;
513 char *copy;
514 char *cp;
515 struct r_search_path_elem **result;
516 size_t nelems;
517 /* Initialize to please the compiler. */
518 const char *errstring = NULL;
519
520 /* First see whether we must forget the RUNPATH and RPATH from this
521 object. */
522 if (__builtin_expect (GLRO(dl_inhibit_rpath) != NULL, 0)
523 && !INTUSE(__libc_enable_secure))
524 {
525 const char *inhp = GLRO(dl_inhibit_rpath);
526
527 do
528 {
529 const char *wp = where;
530
531 while (*inhp == *wp && *wp != '\0')
532 {
533 ++inhp;
534 ++wp;
535 }
536
537 if (*wp == '\0' && (*inhp == '\0' || *inhp == ':'))
538 {
539 /* This object is on the list of objects for which the
540 RUNPATH and RPATH must not be used. */
541 sps->dirs = (void *) -1;
542 return false;
543 }
544
545 while (*inhp != '\0')
546 if (*inhp++ == ':')
547 break;
548 }
549 while (*inhp != '\0');
550 }
551
552 /* Make a writable copy. At the same time expand possible dynamic
553 string tokens. */
554 copy = expand_dynamic_string_token (l, rpath);
555 if (copy == NULL)
556 {
557 errstring = N_("cannot create RUNPATH/RPATH copy");
558 goto signal_error;
559 }
560
561 /* Count the number of necessary elements in the result array. */
562 nelems = 0;
563 for (cp = copy; *cp != '\0'; ++cp)
564 if (*cp == ':')
565 ++nelems;
566
567 /* Allocate room for the result. NELEMS + 1 is an upper limit for the
568 number of necessary entries. */
569 result = (struct r_search_path_elem **) malloc ((nelems + 1 + 1)
570 * sizeof (*result));
571 if (result == NULL)
572 {
573 free (copy);
574 errstring = N_("cannot create cache for search path");
575 signal_error:
576 _dl_signal_error (ENOMEM, NULL, NULL, errstring);
577 }
578
579 fillin_rpath (copy, result, ":", 0, what, where);
580
581 /* Free the copied RPATH string. `fillin_rpath' make own copies if
582 necessary. */
583 free (copy);
584
585 sps->dirs = result;
586 /* The caller will change this value if we haven't used a real malloc. */
587 sps->malloced = 1;
588 return true;
589 }
590
591 /* Make sure cached path information is stored in *SP
592 and return true if there are any paths to search there. */
593 static bool
594 cache_rpath (struct link_map *l,
595 struct r_search_path_struct *sp,
596 int tag,
597 const char *what)
598 {
599 if (sp->dirs == (void *) -1)
600 return false;
601
602 if (sp->dirs != NULL)
603 return true;
604
605 if (l->l_info[tag] == NULL)
606 {
607 /* There is no path. */
608 sp->dirs = (void *) -1;
609 return false;
610 }
611
612 /* Make sure the cache information is available. */
613 return decompose_rpath (sp, (const char *) (D_PTR (l, l_info[DT_STRTAB])
614 + l->l_info[tag]->d_un.d_val),
615 l, what);
616 }
617
618
619 void
620 internal_function
621 _dl_init_paths (const char *llp)
622 {
623 size_t idx;
624 const char *strp;
625 struct r_search_path_elem *pelem, **aelem;
626 size_t round_size;
627 #ifdef SHARED
628 struct link_map *l;
629 #endif
630 /* Initialize to please the compiler. */
631 const char *errstring = NULL;
632
633 /* Fill in the information about the application's RPATH and the
634 directories addressed by the LD_LIBRARY_PATH environment variable. */
635
636 /* Get the capabilities. */
637 capstr = _dl_important_hwcaps (GLRO(dl_platform), GLRO(dl_platformlen),
638 &ncapstr, &max_capstrlen);
639
640 /* First set up the rest of the default search directory entries. */
641 aelem = rtld_search_dirs.dirs = (struct r_search_path_elem **)
642 malloc ((nsystem_dirs_len + 1) * sizeof (struct r_search_path_elem *));
643 if (rtld_search_dirs.dirs == NULL)
644 {
645 errstring = N_("cannot create search path array");
646 signal_error:
647 _dl_signal_error (ENOMEM, NULL, NULL, errstring);
648 }
649
650 round_size = ((2 * sizeof (struct r_search_path_elem) - 1
651 + ncapstr * sizeof (enum r_dir_status))
652 / sizeof (struct r_search_path_elem));
653
654 rtld_search_dirs.dirs[0] = (struct r_search_path_elem *)
655 malloc ((sizeof (system_dirs) / sizeof (system_dirs[0]))
656 * round_size * sizeof (struct r_search_path_elem));
657 if (rtld_search_dirs.dirs[0] == NULL)
658 {
659 errstring = N_("cannot create cache for search path");
660 goto signal_error;
661 }
662
663 rtld_search_dirs.malloced = 0;
664 pelem = GL(dl_all_dirs) = rtld_search_dirs.dirs[0];
665 strp = system_dirs;
666 idx = 0;
667
668 do
669 {
670 size_t cnt;
671
672 *aelem++ = pelem;
673
674 pelem->what = "system search path";
675 pelem->where = NULL;
676
677 pelem->dirname = strp;
678 pelem->dirnamelen = system_dirs_len[idx];
679 strp += system_dirs_len[idx] + 1;
680
681 /* System paths must be absolute. */
682 assert (pelem->dirname[0] == '/');
683 for (cnt = 0; cnt < ncapstr; ++cnt)
684 pelem->status[cnt] = unknown;
685
686 pelem->next = (++idx == nsystem_dirs_len ? NULL : (pelem + round_size));
687
688 pelem += round_size;
689 }
690 while (idx < nsystem_dirs_len);
691
692 max_dirnamelen = SYSTEM_DIRS_MAX_LEN;
693 *aelem = NULL;
694
695 #ifdef SHARED
696 /* This points to the map of the main object. */
697 l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
698 if (l != NULL)
699 {
700 assert (l->l_type != lt_loaded);
701
702 if (l->l_info[DT_RUNPATH])
703 {
704 /* Allocate room for the search path and fill in information
705 from RUNPATH. */
706 decompose_rpath (&l->l_runpath_dirs,
707 (const void *) (D_PTR (l, l_info[DT_STRTAB])
708 + l->l_info[DT_RUNPATH]->d_un.d_val),
709 l, "RUNPATH");
710
711 /* The RPATH is ignored. */
712 l->l_rpath_dirs.dirs = (void *) -1;
713 }
714 else
715 {
716 l->l_runpath_dirs.dirs = (void *) -1;
717
718 if (l->l_info[DT_RPATH])
719 {
720 /* Allocate room for the search path and fill in information
721 from RPATH. */
722 decompose_rpath (&l->l_rpath_dirs,
723 (const void *) (D_PTR (l, l_info[DT_STRTAB])
724 + l->l_info[DT_RPATH]->d_un.d_val),
725 l, "RPATH");
726 l->l_rpath_dirs.malloced = 0;
727 }
728 else
729 l->l_rpath_dirs.dirs = (void *) -1;
730 }
731 }
732 #endif /* SHARED */
733
734 if (llp != NULL && *llp != '\0')
735 {
736 size_t nllp;
737 const char *cp = llp;
738 char *llp_tmp;
739
740 #ifdef SHARED
741 /* Expand DSTs. */
742 size_t cnt = DL_DST_COUNT (llp, 1);
743 if (__builtin_expect (cnt == 0, 1))
744 llp_tmp = strdupa (llp);
745 else
746 {
747 /* Determine the length of the substituted string. */
748 size_t total = DL_DST_REQUIRED (l, llp, strlen (llp), cnt);
749
750 /* Allocate the necessary memory. */
751 llp_tmp = (char *) alloca (total + 1);
752 llp_tmp = _dl_dst_substitute (l, llp, llp_tmp, 1);
753 }
754 #else
755 llp_tmp = strdupa (llp);
756 #endif
757
758 /* Decompose the LD_LIBRARY_PATH contents. First determine how many
759 elements it has. */
760 nllp = 1;
761 while (*cp)
762 {
763 if (*cp == ':' || *cp == ';')
764 ++nllp;
765 ++cp;
766 }
767
768 env_path_list.dirs = (struct r_search_path_elem **)
769 malloc ((nllp + 1) * sizeof (struct r_search_path_elem *));
770 if (env_path_list.dirs == NULL)
771 {
772 errstring = N_("cannot create cache for search path");
773 goto signal_error;
774 }
775
776 (void) fillin_rpath (llp_tmp, env_path_list.dirs, ":;",
777 INTUSE(__libc_enable_secure), "LD_LIBRARY_PATH",
778 NULL);
779
780 if (env_path_list.dirs[0] == NULL)
781 {
782 free (env_path_list.dirs);
783 env_path_list.dirs = (void *) -1;
784 }
785
786 env_path_list.malloced = 0;
787 }
788 else
789 env_path_list.dirs = (void *) -1;
790 }
791
792
793 static void
794 __attribute__ ((noreturn, noinline))
795 lose (int code, int fd, const char *name, char *realname, struct link_map *l,
796 const char *msg, struct r_debug *r)
797 {
798 /* The file might already be closed. */
799 if (fd != -1)
800 (void) __close (fd);
801 free (l);
802 free (realname);
803
804 if (r != NULL)
805 {
806 r->r_state = RT_CONSISTENT;
807 _dl_debug_state ();
808 }
809
810 _dl_signal_error (code, name, NULL, msg);
811 }
812
813
814 /* Map in the shared object NAME, actually located in REALNAME, and already
815 opened on FD. */
816
817 #ifndef EXTERNAL_MAP_FROM_FD
818 static
819 #endif
820 struct link_map *
821 _dl_map_object_from_fd (const char *name, int fd, struct filebuf *fbp,
822 char *realname, struct link_map *loader, int l_type,
823 int mode, void **stack_endp, Lmid_t nsid)
824 {
825 struct link_map *l = NULL;
826 const ElfW(Ehdr) *header;
827 const ElfW(Phdr) *phdr;
828 const ElfW(Phdr) *ph;
829 size_t maplength;
830 int type;
831 struct stat64 st;
832 /* Initialize to keep the compiler happy. */
833 const char *errstring = NULL;
834 int errval = 0;
835 struct r_debug *r = _dl_debug_initialize (0, nsid);
836 bool make_consistent = false;
837
838 /* Get file information. */
839 if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &st) < 0, 0))
840 {
841 errstring = N_("cannot stat shared object");
842 call_lose_errno:
843 errval = errno;
844 call_lose:
845 lose (errval, fd, name, realname, l, errstring,
846 make_consistent ? r : NULL);
847 }
848
849 /* Look again to see if the real name matched another already loaded. */
850 for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
851 if (l->l_removed == 0 && l->l_ino == st.st_ino && l->l_dev == st.st_dev)
852 {
853 /* The object is already loaded.
854 Just bump its reference count and return it. */
855 __close (fd);
856
857 /* If the name is not in the list of names for this object add
858 it. */
859 free (realname);
860 add_name_to_object (l, name);
861
862 return l;
863 }
864
865 #ifdef SHARED
866 /* When loading into a namespace other than the base one we must
867 avoid loading ld.so since there can only be one copy. Ever. */
868 if (__builtin_expect (nsid != LM_ID_BASE, 0)
869 && ((st.st_ino == GL(dl_rtld_map).l_ino
870 && st.st_dev == GL(dl_rtld_map).l_dev)
871 || _dl_name_match_p (name, &GL(dl_rtld_map))))
872 {
873 /* This is indeed ld.so. Create a new link_map which refers to
874 the real one for almost everything. */
875 l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
876 if (l == NULL)
877 goto fail_new;
878
879 /* Refer to the real descriptor. */
880 l->l_real = &GL(dl_rtld_map);
881
882 /* No need to bump the refcount of the real object, ld.so will
883 never be unloaded. */
884 __close (fd);
885
886 /* Add the map for the mirrored object to the object list. */
887 _dl_add_to_namespace_list (l, nsid);
888
889 return l;
890 }
891 #endif
892
893 if (mode & RTLD_NOLOAD)
894 {
895 /* We are not supposed to load the object unless it is already
896 loaded. So return now. */
897 free (realname);
898 __close (fd);
899 return NULL;
900 }
901
902 /* Print debugging message. */
903 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
904 _dl_debug_printf ("file=%s [%lu]; generating link map\n", name, nsid);
905
906 /* This is the ELF header. We read it in `open_verify'. */
907 header = (void *) fbp->buf;
908
909 #ifndef MAP_ANON
910 # define MAP_ANON 0
911 if (_dl_zerofd == -1)
912 {
913 _dl_zerofd = _dl_sysdep_open_zero_fill ();
914 if (_dl_zerofd == -1)
915 {
916 free (realname);
917 __close (fd);
918 _dl_signal_error (errno, NULL, NULL,
919 N_("cannot open zero fill device"));
920 }
921 }
922 #endif
923
924 /* Signal that we are going to add new objects. */
925 if (r->r_state == RT_CONSISTENT)
926 {
927 #ifdef SHARED
928 /* Auditing checkpoint: we are going to add new objects. */
929 if ((mode & __RTLD_AUDIT) == 0
930 && __builtin_expect (GLRO(dl_naudit) > 0, 0))
931 {
932 struct link_map *head = GL(dl_ns)[nsid]._ns_loaded;
933 /* Do not call the functions for any auditing object. */
934 if (head->l_auditing == 0)
935 {
936 struct audit_ifaces *afct = GLRO(dl_audit);
937 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
938 {
939 if (afct->activity != NULL)
940 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_ADD);
941
942 afct = afct->next;
943 }
944 }
945 }
946 #endif
947
948 /* Notify the debugger we have added some objects. We need to
949 call _dl_debug_initialize in a static program in case dynamic
950 linking has not been used before. */
951 r->r_state = RT_ADD;
952 _dl_debug_state ();
953 make_consistent = true;
954 }
955 else
956 assert (r->r_state == RT_ADD);
957
958 /* Enter the new object in the list of loaded objects. */
959 l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
960 if (__builtin_expect (l == NULL, 0))
961 {
962 #ifdef SHARED
963 fail_new:
964 #endif
965 errstring = N_("cannot create shared object descriptor");
966 goto call_lose_errno;
967 }
968
969 /* Extract the remaining details we need from the ELF header
970 and then read in the program header table. */
971 l->l_entry = header->e_entry;
972 type = header->e_type;
973 l->l_phnum = header->e_phnum;
974
975 maplength = header->e_phnum * sizeof (ElfW(Phdr));
976 if (header->e_phoff + maplength <= (size_t) fbp->len)
977 phdr = (void *) (fbp->buf + header->e_phoff);
978 else
979 {
980 phdr = alloca (maplength);
981 __lseek (fd, header->e_phoff, SEEK_SET);
982 if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
983 {
984 errstring = N_("cannot read file data");
985 goto call_lose_errno;
986 }
987 }
988
989 /* On most platforms presume that PT_GNU_STACK is absent and the stack is
990 * executable. Other platforms default to a nonexecutable stack and don't
991 * need PT_GNU_STACK to do so. */
992 uint_fast16_t stack_flags = DEFAULT_STACK_PERMS;
993
994 {
995 /* Scan the program header table, collecting its load commands. */
996 struct loadcmd
997 {
998 ElfW(Addr) mapstart, mapend, dataend, allocend;
999 off_t mapoff;
1000 int prot;
1001 } loadcmds[l->l_phnum], *c;
1002 size_t nloadcmds = 0;
1003 bool has_holes = false;
1004
1005 /* The struct is initialized to zero so this is not necessary:
1006 l->l_ld = 0;
1007 l->l_phdr = 0;
1008 l->l_addr = 0; */
1009 for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
1010 switch (ph->p_type)
1011 {
1012 /* These entries tell us where to find things once the file's
1013 segments are mapped in. We record the addresses it says
1014 verbatim, and later correct for the run-time load address. */
1015 case PT_DYNAMIC:
1016 l->l_ld = (void *) ph->p_vaddr;
1017 l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
1018 break;
1019
1020 case PT_PHDR:
1021 l->l_phdr = (void *) ph->p_vaddr;
1022 break;
1023
1024 case PT_LOAD:
1025 /* A load command tells us to map in part of the file.
1026 We record the load commands and process them all later. */
1027 if (__builtin_expect ((ph->p_align & (GLRO(dl_pagesize) - 1)) != 0,
1028 0))
1029 {
1030 errstring = N_("ELF load command alignment not page-aligned");
1031 goto call_lose;
1032 }
1033 if (__builtin_expect (((ph->p_vaddr - ph->p_offset)
1034 & (ph->p_align - 1)) != 0, 0))
1035 {
1036 errstring
1037 = N_("ELF load command address/offset not properly aligned");
1038 goto call_lose;
1039 }
1040
1041 c = &loadcmds[nloadcmds++];
1042 c->mapstart = ph->p_vaddr & ~(GLRO(dl_pagesize) - 1);
1043 c->mapend = ((ph->p_vaddr + ph->p_filesz + GLRO(dl_pagesize) - 1)
1044 & ~(GLRO(dl_pagesize) - 1));
1045 c->dataend = ph->p_vaddr + ph->p_filesz;
1046 c->allocend = ph->p_vaddr + ph->p_memsz;
1047 c->mapoff = ph->p_offset & ~(GLRO(dl_pagesize) - 1);
1048
1049 /* Determine whether there is a gap between the last segment
1050 and this one. */
1051 if (nloadcmds > 1 && c[-1].mapend != c->mapstart)
1052 has_holes = true;
1053
1054 /* Optimize a common case. */
1055 #if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7
1056 c->prot = (PF_TO_PROT
1057 >> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf;
1058 #else
1059 c->prot = 0;
1060 if (ph->p_flags & PF_R)
1061 c->prot |= PROT_READ;
1062 if (ph->p_flags & PF_W)
1063 c->prot |= PROT_WRITE;
1064 if (ph->p_flags & PF_X)
1065 c->prot |= PROT_EXEC;
1066 #endif
1067 break;
1068
1069 case PT_TLS:
1070 if (ph->p_memsz == 0)
1071 /* Nothing to do for an empty segment. */
1072 break;
1073
1074 l->l_tls_blocksize = ph->p_memsz;
1075 l->l_tls_align = ph->p_align;
1076 if (ph->p_align == 0)
1077 l->l_tls_firstbyte_offset = 0;
1078 else
1079 l->l_tls_firstbyte_offset = ph->p_vaddr & (ph->p_align - 1);
1080 l->l_tls_initimage_size = ph->p_filesz;
1081 /* Since we don't know the load address yet only store the
1082 offset. We will adjust it later. */
1083 l->l_tls_initimage = (void *) ph->p_vaddr;
1084
1085 /* If not loading the initial set of shared libraries,
1086 check whether we should permit loading a TLS segment. */
1087 if (__builtin_expect (l->l_type == lt_library, 1)
1088 /* If GL(dl_tls_dtv_slotinfo_list) == NULL, then rtld.c did
1089 not set up TLS data structures, so don't use them now. */
1090 || __builtin_expect (GL(dl_tls_dtv_slotinfo_list) != NULL, 1))
1091 {
1092 /* Assign the next available module ID. */
1093 l->l_tls_modid = _dl_next_tls_modid ();
1094 break;
1095 }
1096
1097 #ifdef SHARED
1098 if (l->l_prev == NULL || (mode & __RTLD_AUDIT) != 0)
1099 /* We are loading the executable itself when the dynamic linker
1100 was executed directly. The setup will happen later. */
1101 break;
1102
1103 /* In a static binary there is no way to tell if we dynamically
1104 loaded libpthread. */
1105 if (GL(dl_error_catch_tsd) == &_dl_initial_error_catch_tsd)
1106 #endif
1107 {
1108 /* We have not yet loaded libpthread.
1109 We can do the TLS setup right now! */
1110
1111 void *tcb;
1112
1113 /* The first call allocates TLS bookkeeping data structures.
1114 Then we allocate the TCB for the initial thread. */
1115 if (__builtin_expect (_dl_tls_setup (), 0)
1116 || __builtin_expect ((tcb = _dl_allocate_tls (NULL)) == NULL,
1117 0))
1118 {
1119 errval = ENOMEM;
1120 errstring = N_("\
1121 cannot allocate TLS data structures for initial thread");
1122 goto call_lose;
1123 }
1124
1125 /* Now we install the TCB in the thread register. */
1126 errstring = TLS_INIT_TP (tcb, 0);
1127 if (__builtin_expect (errstring == NULL, 1))
1128 {
1129 /* Now we are all good. */
1130 l->l_tls_modid = ++GL(dl_tls_max_dtv_idx);
1131 break;
1132 }
1133
1134 /* The kernel is too old or somesuch. */
1135 errval = 0;
1136 _dl_deallocate_tls (tcb, 1);
1137 goto call_lose;
1138 }
1139
1140 /* Uh-oh, the binary expects TLS support but we cannot
1141 provide it. */
1142 errval = 0;
1143 errstring = N_("cannot handle TLS data");
1144 goto call_lose;
1145 break;
1146
1147 case PT_GNU_STACK:
1148 stack_flags = ph->p_flags;
1149 break;
1150
1151 case PT_GNU_RELRO:
1152 l->l_relro_addr = ph->p_vaddr;
1153 l->l_relro_size = ph->p_memsz;
1154 break;
1155 }
1156
1157 if (__builtin_expect (nloadcmds == 0, 0))
1158 {
1159 /* This only happens for a bogus object that will be caught with
1160 another error below. But we don't want to go through the
1161 calculations below using NLOADCMDS - 1. */
1162 errstring = N_("object file has no loadable segments");
1163 goto call_lose;
1164 }
1165
1166 /* Now process the load commands and map segments into memory. */
1167 c = loadcmds;
1168
1169 /* Length of the sections to be loaded. */
1170 maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;
1171
1172 if (__builtin_expect (type, ET_DYN) == ET_DYN)
1173 {
1174 /* This is a position-independent shared object. We can let the
1175 kernel map it anywhere it likes, but we must have space for all
1176 the segments in their specified positions relative to the first.
1177 So we map the first segment without MAP_FIXED, but with its
1178 extent increased to cover all the segments. Then we remove
1179 access from excess portion, and there is known sufficient space
1180 there to remap from the later segments.
1181
1182 As a refinement, sometimes we have an address that we would
1183 prefer to map such objects at; but this is only a preference,
1184 the OS can do whatever it likes. */
1185 ElfW(Addr) mappref;
1186 mappref = (ELF_PREFERRED_ADDRESS (loader, maplength,
1187 c->mapstart & GLRO(dl_use_load_bias))
1188 - MAP_BASE_ADDR (l));
1189
1190 /* Remember which part of the address space this object uses. */
1191 l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
1192 c->prot,
1193 MAP_COPY|MAP_FILE,
1194 fd, c->mapoff);
1195 if (__builtin_expect ((void *) l->l_map_start == MAP_FAILED, 0))
1196 {
1197 map_error:
1198 errstring = N_("failed to map segment from shared object");
1199 goto call_lose_errno;
1200 }
1201
1202 l->l_map_end = l->l_map_start + maplength;
1203 l->l_addr = l->l_map_start - c->mapstart;
1204
1205 if (has_holes)
1206 /* Change protection on the excess portion to disallow all access;
1207 the portions we do not remap later will be inaccessible as if
1208 unallocated. Then jump into the normal segment-mapping loop to
1209 handle the portion of the segment past the end of the file
1210 mapping. */
1211 __mprotect ((caddr_t) (l->l_addr + c->mapend),
1212 loadcmds[nloadcmds - 1].mapstart - c->mapend,
1213 PROT_NONE);
1214
1215 l->l_contiguous = 1;
1216
1217 goto postmap;
1218 }
1219
1220 /* This object is loaded at a fixed address. This must never
1221 happen for objects loaded with dlopen(). */
1222 if (__builtin_expect ((mode & __RTLD_OPENEXEC) == 0, 0))
1223 {
1224 errstring = N_("cannot dynamically load executable");
1225 goto call_lose;
1226 }
1227
1228 /* Notify ELF_PREFERRED_ADDRESS that we have to load this one
1229 fixed. */
1230 ELF_FIXED_ADDRESS (loader, c->mapstart);
1231
1232
1233 /* Remember which part of the address space this object uses. */
1234 l->l_map_start = c->mapstart + l->l_addr;
1235 l->l_map_end = l->l_map_start + maplength;
1236 l->l_contiguous = !has_holes;
1237
1238 while (c < &loadcmds[nloadcmds])
1239 {
1240 if (c->mapend > c->mapstart
1241 /* Map the segment contents from the file. */
1242 && (__mmap ((void *) (l->l_addr + c->mapstart),
1243 c->mapend - c->mapstart, c->prot,
1244 MAP_FIXED|MAP_COPY|MAP_FILE,
1245 fd, c->mapoff)
1246 == MAP_FAILED))
1247 goto map_error;
1248
1249 postmap:
1250 if (c->prot & PROT_EXEC)
1251 l->l_text_end = l->l_addr + c->mapend;
1252
1253 if (l->l_phdr == 0
1254 && (ElfW(Off)) c->mapoff <= header->e_phoff
1255 && ((size_t) (c->mapend - c->mapstart + c->mapoff)
1256 >= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
1257 /* Found the program header in this segment. */
1258 l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);
1259
1260 if (c->allocend > c->dataend)
1261 {
1262 /* Extra zero pages should appear at the end of this segment,
1263 after the data mapped from the file. */
1264 ElfW(Addr) zero, zeroend, zeropage;
1265
1266 zero = l->l_addr + c->dataend;
1267 zeroend = l->l_addr + c->allocend;
1268 zeropage = ((zero + GLRO(dl_pagesize) - 1)
1269 & ~(GLRO(dl_pagesize) - 1));
1270
1271 if (zeroend < zeropage)
1272 /* All the extra data is in the last page of the segment.
1273 We can just zero it. */
1274 zeropage = zeroend;
1275
1276 if (zeropage > zero)
1277 {
1278 /* Zero the final part of the last page of the segment. */
1279 if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
1280 {
1281 /* Dag nab it. */
1282 if (__mprotect ((caddr_t) (zero
1283 & ~(GLRO(dl_pagesize) - 1)),
1284 GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0)
1285 {
1286 errstring = N_("cannot change memory protections");
1287 goto call_lose_errno;
1288 }
1289 }
1290 memset ((void *) zero, '\0', zeropage - zero);
1291 if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
1292 __mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
1293 GLRO(dl_pagesize), c->prot);
1294 }
1295
1296 if (zeroend > zeropage)
1297 {
1298 /* Map the remaining zero pages in from the zero fill FD. */
1299 caddr_t mapat;
1300 mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
1301 c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
1302 -1, 0);
1303 if (__builtin_expect (mapat == MAP_FAILED, 0))
1304 {
1305 errstring = N_("cannot map zero-fill pages");
1306 goto call_lose_errno;
1307 }
1308 }
1309 }
1310
1311 ++c;
1312 }
1313 }
1314
1315 if (l->l_ld == 0)
1316 {
1317 if (__builtin_expect (type == ET_DYN, 0))
1318 {
1319 errstring = N_("object file has no dynamic section");
1320 goto call_lose;
1321 }
1322 }
1323 else
1324 l->l_ld = (ElfW(Dyn) *) ((ElfW(Addr)) l->l_ld + l->l_addr);
1325
1326 elf_get_dynamic_info (l, NULL);
1327
1328 /* Make sure we are not dlopen'ing an object that has the
1329 DF_1_NOOPEN flag set. */
1330 if (__builtin_expect (l->l_flags_1 & DF_1_NOOPEN, 0)
1331 && (mode & __RTLD_DLOPEN))
1332 {
1333 /* We are not supposed to load this object. Free all resources. */
1334 __munmap ((void *) l->l_map_start, l->l_map_end - l->l_map_start);
1335
1336 if (!l->l_libname->dont_free)
1337 free (l->l_libname);
1338
1339 if (l->l_phdr_allocated)
1340 free ((void *) l->l_phdr);
1341
1342 errstring = N_("shared object cannot be dlopen()ed");
1343 goto call_lose;
1344 }
1345
1346 if (l->l_phdr == NULL)
1347 {
1348 /* The program header is not contained in any of the segments.
1349 We have to allocate memory ourself and copy it over from out
1350 temporary place. */
1351 ElfW(Phdr) *newp = (ElfW(Phdr) *) malloc (header->e_phnum
1352 * sizeof (ElfW(Phdr)));
1353 if (newp == NULL)
1354 {
1355 errstring = N_("cannot allocate memory for program header");
1356 goto call_lose_errno;
1357 }
1358
1359 l->l_phdr = memcpy (newp, phdr,
1360 (header->e_phnum * sizeof (ElfW(Phdr))));
1361 l->l_phdr_allocated = 1;
1362 }
1363 else
1364 /* Adjust the PT_PHDR value by the runtime load address. */
1365 l->l_phdr = (ElfW(Phdr) *) ((ElfW(Addr)) l->l_phdr + l->l_addr);
1366
1367 if (__builtin_expect ((stack_flags &~ GL(dl_stack_flags)) & PF_X, 0))
1368 {
1369 if (__builtin_expect (__check_caller (RETURN_ADDRESS (0), allow_ldso),
1370 0) != 0)
1371 {
1372 errstring = N_("invalid caller");
1373 goto call_lose;
1374 }
1375
1376 /* The stack is presently not executable, but this module
1377 requires that it be executable. We must change the
1378 protection of the variable which contains the flags used in
1379 the mprotect calls. */
1380 #ifdef SHARED
1381 if ((mode & (__RTLD_DLOPEN | __RTLD_AUDIT)) == __RTLD_DLOPEN)
1382 {
1383 const uintptr_t p = (uintptr_t) &__stack_prot & -GLRO(dl_pagesize);
1384 const size_t s = (uintptr_t) (&__stack_prot + 1) - p;
1385
1386 struct link_map *const m = &GL(dl_rtld_map);
1387 const uintptr_t relro_end = ((m->l_addr + m->l_relro_addr
1388 + m->l_relro_size)
1389 & -GLRO(dl_pagesize));
1390 if (__builtin_expect (p + s <= relro_end, 1))
1391 {
1392 /* The variable lies in the region protected by RELRO. */
1393 __mprotect ((void *) p, s, PROT_READ|PROT_WRITE);
1394 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1395 __mprotect ((void *) p, s, PROT_READ);
1396 }
1397 else
1398 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1399 }
1400 else
1401 #endif
1402 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1403
1404 #ifdef check_consistency
1405 check_consistency ();
1406 #endif
1407
1408 errval = (*GL(dl_make_stack_executable_hook)) (stack_endp);
1409 if (errval)
1410 {
1411 errstring = N_("\
1412 cannot enable executable stack as shared object requires");
1413 goto call_lose;
1414 }
1415 }
1416
1417 /* Adjust the address of the TLS initialization image. */
1418 if (l->l_tls_initimage != NULL)
1419 l->l_tls_initimage = (char *) l->l_tls_initimage + l->l_addr;
1420
1421 /* We are done mapping in the file. We no longer need the descriptor. */
1422 if (__builtin_expect (__close (fd) != 0, 0))
1423 {
1424 errstring = N_("cannot close file descriptor");
1425 goto call_lose_errno;
1426 }
1427 /* Signal that we closed the file. */
1428 fd = -1;
1429
1430 if (l->l_type == lt_library && type == ET_EXEC)
1431 l->l_type = lt_executable;
1432
1433 l->l_entry += l->l_addr;
1434
1435 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
1436 _dl_debug_printf ("\
1437 dynamic: 0x%0*lx base: 0x%0*lx size: 0x%0*Zx\n\
1438 entry: 0x%0*lx phdr: 0x%0*lx phnum: %*u\n\n",
1439 (int) sizeof (void *) * 2,
1440 (unsigned long int) l->l_ld,
1441 (int) sizeof (void *) * 2,
1442 (unsigned long int) l->l_addr,
1443 (int) sizeof (void *) * 2, maplength,
1444 (int) sizeof (void *) * 2,
1445 (unsigned long int) l->l_entry,
1446 (int) sizeof (void *) * 2,
1447 (unsigned long int) l->l_phdr,
1448 (int) sizeof (void *) * 2, l->l_phnum);
1449
1450 /* Set up the symbol hash table. */
1451 _dl_setup_hash (l);
1452
1453 /* If this object has DT_SYMBOLIC set modify now its scope. We don't
1454 have to do this for the main map. */
1455 if ((mode & RTLD_DEEPBIND) == 0
1456 && __builtin_expect (l->l_info[DT_SYMBOLIC] != NULL, 0)
1457 && &l->l_searchlist != l->l_scope[0])
1458 {
1459 /* Create an appropriate searchlist. It contains only this map.
1460 This is the definition of DT_SYMBOLIC in SysVr4. */
1461 l->l_symbolic_searchlist.r_list[0] = l;
1462 l->l_symbolic_searchlist.r_nlist = 1;
1463
1464 /* Now move the existing entries one back. */
1465 memmove (&l->l_scope[1], &l->l_scope[0],
1466 (l->l_scope_max - 1) * sizeof (l->l_scope[0]));
1467
1468 /* Now add the new entry. */
1469 l->l_scope[0] = &l->l_symbolic_searchlist;
1470 }
1471
1472 /* Remember whether this object must be initialized first. */
1473 if (l->l_flags_1 & DF_1_INITFIRST)
1474 GL(dl_initfirst) = l;
1475
1476 /* Finally the file information. */
1477 l->l_dev = st.st_dev;
1478 l->l_ino = st.st_ino;
1479
1480 /* When we profile the SONAME might be needed for something else but
1481 loading. Add it right away. */
1482 if (__builtin_expect (GLRO(dl_profile) != NULL, 0)
1483 && l->l_info[DT_SONAME] != NULL)
1484 add_name_to_object (l, ((const char *) D_PTR (l, l_info[DT_STRTAB])
1485 + l->l_info[DT_SONAME]->d_un.d_val));
1486
1487 /* Now that the object is fully initialized add it to the object list. */
1488 _dl_add_to_namespace_list (l, nsid);
1489
1490 #ifdef SHARED
1491 /* Auditing checkpoint: we have a new object. */
1492 if (__builtin_expect (GLRO(dl_naudit) > 0, 0)
1493 && !GL(dl_ns)[l->l_ns]._ns_loaded->l_auditing)
1494 {
1495 struct audit_ifaces *afct = GLRO(dl_audit);
1496 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
1497 {
1498 if (afct->objopen != NULL)
1499 {
1500 l->l_audit[cnt].bindflags
1501 = afct->objopen (l, nsid, &l->l_audit[cnt].cookie);
1502
1503 l->l_audit_any_plt |= l->l_audit[cnt].bindflags != 0;
1504 }
1505
1506 afct = afct->next;
1507 }
1508 }
1509 #endif
1510
1511 return l;
1512 }
1513 \f
1514 /* Print search path. */
1515 static void
1516 print_search_path (struct r_search_path_elem **list,
1517 const char *what, const char *name)
1518 {
1519 char buf[max_dirnamelen + max_capstrlen];
1520 int first = 1;
1521
1522 _dl_debug_printf (" search path=");
1523
1524 while (*list != NULL && (*list)->what == what) /* Yes, ==. */
1525 {
1526 char *endp = __mempcpy (buf, (*list)->dirname, (*list)->dirnamelen);
1527 size_t cnt;
1528
1529 for (cnt = 0; cnt < ncapstr; ++cnt)
1530 if ((*list)->status[cnt] != nonexisting)
1531 {
1532 char *cp = __mempcpy (endp, capstr[cnt].str, capstr[cnt].len);
1533 if (cp == buf || (cp == buf + 1 && buf[0] == '/'))
1534 cp[0] = '\0';
1535 else
1536 cp[-1] = '\0';
1537
1538 _dl_debug_printf_c (first ? "%s" : ":%s", buf);
1539 first = 0;
1540 }
1541
1542 ++list;
1543 }
1544
1545 if (name != NULL)
1546 _dl_debug_printf_c ("\t\t(%s from file %s)\n", what,
1547 name[0] ? name : rtld_progname);
1548 else
1549 _dl_debug_printf_c ("\t\t(%s)\n", what);
1550 }
1551 \f
1552 /* Open a file and verify it is an ELF file for this architecture. We
1553 ignore only ELF files for other architectures. Non-ELF files and
1554 ELF files with different header information cause fatal errors since
1555 this could mean there is something wrong in the installation and the
1556 user might want to know about this. */
1557 static int
1558 open_verify (const char *name, struct filebuf *fbp, struct link_map *loader,
1559 int whatcode, bool *found_other_class, bool free_name)
1560 {
1561 /* This is the expected ELF header. */
1562 #define ELF32_CLASS ELFCLASS32
1563 #define ELF64_CLASS ELFCLASS64
1564 #ifndef VALID_ELF_HEADER
1565 # define VALID_ELF_HEADER(hdr,exp,size) (memcmp (hdr, exp, size) == 0)
1566 # define VALID_ELF_OSABI(osabi) (osabi == ELFOSABI_SYSV)
1567 # define VALID_ELF_ABIVERSION(osabi,ver) (ver == 0)
1568 #elif defined MORE_ELF_HEADER_DATA
1569 MORE_ELF_HEADER_DATA;
1570 #endif
1571 static const unsigned char expected[EI_NIDENT] =
1572 {
1573 [EI_MAG0] = ELFMAG0,
1574 [EI_MAG1] = ELFMAG1,
1575 [EI_MAG2] = ELFMAG2,
1576 [EI_MAG3] = ELFMAG3,
1577 [EI_CLASS] = ELFW(CLASS),
1578 [EI_DATA] = byteorder,
1579 [EI_VERSION] = EV_CURRENT,
1580 [EI_OSABI] = ELFOSABI_SYSV,
1581 [EI_ABIVERSION] = 0
1582 };
1583 static const struct
1584 {
1585 ElfW(Word) vendorlen;
1586 ElfW(Word) datalen;
1587 ElfW(Word) type;
1588 char vendor[4];
1589 } expected_note = { 4, 16, 1, "GNU" };
1590 /* Initialize it to make the compiler happy. */
1591 const char *errstring = NULL;
1592 int errval = 0;
1593
1594 #ifdef SHARED
1595 /* Give the auditing libraries a chance. */
1596 if (__builtin_expect (GLRO(dl_naudit) > 0, 0) && whatcode != 0
1597 && loader->l_auditing == 0)
1598 {
1599 struct audit_ifaces *afct = GLRO(dl_audit);
1600 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
1601 {
1602 if (afct->objsearch != NULL)
1603 {
1604 name = afct->objsearch (name, &loader->l_audit[cnt].cookie,
1605 whatcode);
1606 if (name == NULL)
1607 /* Ignore the path. */
1608 return -1;
1609 }
1610
1611 afct = afct->next;
1612 }
1613 }
1614 #endif
1615
1616 /* Open the file. We always open files read-only. */
1617 int fd = __open (name, O_RDONLY);
1618 if (fd != -1)
1619 {
1620 ElfW(Ehdr) *ehdr;
1621 ElfW(Phdr) *phdr, *ph;
1622 ElfW(Word) *abi_note;
1623 unsigned int osversion;
1624 size_t maplength;
1625
1626 /* We successfully openened the file. Now verify it is a file
1627 we can use. */
1628 __set_errno (0);
1629 fbp->len = __libc_read (fd, fbp->buf, sizeof (fbp->buf));
1630
1631 /* This is where the ELF header is loaded. */
1632 assert (sizeof (fbp->buf) > sizeof (ElfW(Ehdr)));
1633 ehdr = (ElfW(Ehdr) *) fbp->buf;
1634
1635 /* Now run the tests. */
1636 if (__builtin_expect (fbp->len < (ssize_t) sizeof (ElfW(Ehdr)), 0))
1637 {
1638 errval = errno;
1639 errstring = (errval == 0
1640 ? N_("file too short") : N_("cannot read file data"));
1641 call_lose:
1642 if (free_name)
1643 {
1644 char *realname = (char *) name;
1645 name = strdupa (realname);
1646 free (realname);
1647 }
1648 lose (errval, fd, name, NULL, NULL, errstring, NULL);
1649 }
1650
1651 /* See whether the ELF header is what we expect. */
1652 if (__builtin_expect (! VALID_ELF_HEADER (ehdr->e_ident, expected,
1653 EI_ABIVERSION)
1654 || !VALID_ELF_ABIVERSION (ehdr->e_ident[EI_OSABI],
1655 ehdr->e_ident[EI_ABIVERSION])
1656 || memcmp (&ehdr->e_ident[EI_PAD],
1657 &expected[EI_PAD],
1658 EI_NIDENT - EI_PAD) != 0,
1659 0))
1660 {
1661 /* Something is wrong. */
1662 const Elf32_Word *magp = (const void *) ehdr->e_ident;
1663 if (*magp !=
1664 #if BYTE_ORDER == LITTLE_ENDIAN
1665 ((ELFMAG0 << (EI_MAG0 * 8)) |
1666 (ELFMAG1 << (EI_MAG1 * 8)) |
1667 (ELFMAG2 << (EI_MAG2 * 8)) |
1668 (ELFMAG3 << (EI_MAG3 * 8)))
1669 #else
1670 ((ELFMAG0 << (EI_MAG3 * 8)) |
1671 (ELFMAG1 << (EI_MAG2 * 8)) |
1672 (ELFMAG2 << (EI_MAG1 * 8)) |
1673 (ELFMAG3 << (EI_MAG0 * 8)))
1674 #endif
1675 )
1676 errstring = N_("invalid ELF header");
1677 else if (ehdr->e_ident[EI_CLASS] != ELFW(CLASS))
1678 {
1679 /* This is not a fatal error. On architectures where
1680 32-bit and 64-bit binaries can be run this might
1681 happen. */
1682 *found_other_class = true;
1683 goto close_and_out;
1684 }
1685 else if (ehdr->e_ident[EI_DATA] != byteorder)
1686 {
1687 if (BYTE_ORDER == BIG_ENDIAN)
1688 errstring = N_("ELF file data encoding not big-endian");
1689 else
1690 errstring = N_("ELF file data encoding not little-endian");
1691 }
1692 else if (ehdr->e_ident[EI_VERSION] != EV_CURRENT)
1693 errstring
1694 = N_("ELF file version ident does not match current one");
1695 /* XXX We should be able so set system specific versions which are
1696 allowed here. */
1697 else if (!VALID_ELF_OSABI (ehdr->e_ident[EI_OSABI]))
1698 errstring = N_("ELF file OS ABI invalid");
1699 else if (!VALID_ELF_ABIVERSION (ehdr->e_ident[EI_OSABI],
1700 ehdr->e_ident[EI_ABIVERSION]))
1701 errstring = N_("ELF file ABI version invalid");
1702 else if (memcmp (&ehdr->e_ident[EI_PAD], &expected[EI_PAD],
1703 EI_NIDENT - EI_PAD) != 0)
1704 errstring = N_("nonzero padding in e_ident");
1705 else
1706 /* Otherwise we don't know what went wrong. */
1707 errstring = N_("internal error");
1708
1709 goto call_lose;
1710 }
1711
1712 if (__builtin_expect (ehdr->e_version, EV_CURRENT) != EV_CURRENT)
1713 {
1714 errstring = N_("ELF file version does not match current one");
1715 goto call_lose;
1716 }
1717 if (! __builtin_expect (elf_machine_matches_host (ehdr), 1))
1718 goto close_and_out;
1719 else if (__builtin_expect (ehdr->e_type, ET_DYN) != ET_DYN
1720 && __builtin_expect (ehdr->e_type, ET_EXEC) != ET_EXEC)
1721 {
1722 errstring = N_("only ET_DYN and ET_EXEC can be loaded");
1723 goto call_lose;
1724 }
1725 else if (__builtin_expect (ehdr->e_phentsize, sizeof (ElfW(Phdr)))
1726 != sizeof (ElfW(Phdr)))
1727 {
1728 errstring = N_("ELF file's phentsize not the expected size");
1729 goto call_lose;
1730 }
1731
1732 maplength = ehdr->e_phnum * sizeof (ElfW(Phdr));
1733 if (ehdr->e_phoff + maplength <= (size_t) fbp->len)
1734 phdr = (void *) (fbp->buf + ehdr->e_phoff);
1735 else
1736 {
1737 phdr = alloca (maplength);
1738 __lseek (fd, ehdr->e_phoff, SEEK_SET);
1739 if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
1740 {
1741 read_error:
1742 errval = errno;
1743 errstring = N_("cannot read file data");
1744 goto call_lose;
1745 }
1746 }
1747
1748 /* Check .note.ABI-tag if present. */
1749 for (ph = phdr; ph < &phdr[ehdr->e_phnum]; ++ph)
1750 if (ph->p_type == PT_NOTE && ph->p_filesz >= 32 && ph->p_align >= 4)
1751 {
1752 ElfW(Addr) size = ph->p_filesz;
1753
1754 if (ph->p_offset + size <= (size_t) fbp->len)
1755 abi_note = (void *) (fbp->buf + ph->p_offset);
1756 else
1757 {
1758 abi_note = alloca (size);
1759 __lseek (fd, ph->p_offset, SEEK_SET);
1760 if (__libc_read (fd, (void *) abi_note, size) != size)
1761 goto read_error;
1762 }
1763
1764 while (memcmp (abi_note, &expected_note, sizeof (expected_note)))
1765 {
1766 #define ROUND(len) (((len) + sizeof (ElfW(Word)) - 1) & -sizeof (ElfW(Word)))
1767 ElfW(Addr) note_size = 3 * sizeof (ElfW(Word))
1768 + ROUND (abi_note[0])
1769 + ROUND (abi_note[1]);
1770
1771 if (size - 32 < note_size)
1772 {
1773 size = 0;
1774 break;
1775 }
1776 size -= note_size;
1777 abi_note = (void *) abi_note + note_size;
1778 }
1779
1780 if (size == 0)
1781 continue;
1782
1783 osversion = (abi_note[5] & 0xff) * 65536
1784 + (abi_note[6] & 0xff) * 256
1785 + (abi_note[7] & 0xff);
1786 if (abi_note[4] != __ABI_TAG_OS
1787 || (GLRO(dl_osversion) && GLRO(dl_osversion) < osversion))
1788 {
1789 close_and_out:
1790 __close (fd);
1791 __set_errno (ENOENT);
1792 fd = -1;
1793 }
1794
1795 break;
1796 }
1797 }
1798
1799 return fd;
1800 }
1801 \f
1802 /* Try to open NAME in one of the directories in *DIRSP.
1803 Return the fd, or -1. If successful, fill in *REALNAME
1804 with the malloc'd full directory name. If it turns out
1805 that none of the directories in *DIRSP exists, *DIRSP is
1806 replaced with (void *) -1, and the old value is free()d
1807 if MAY_FREE_DIRS is true. */
1808
1809 static int
1810 open_path (const char *name, size_t namelen, int secure,
1811 struct r_search_path_struct *sps, char **realname,
1812 struct filebuf *fbp, struct link_map *loader, int whatcode,
1813 bool *found_other_class)
1814 {
1815 struct r_search_path_elem **dirs = sps->dirs;
1816 char *buf;
1817 int fd = -1;
1818 const char *current_what = NULL;
1819 int any = 0;
1820
1821 if (__builtin_expect (dirs == NULL, 0))
1822 /* We're called before _dl_init_paths when loading the main executable
1823 given on the command line when rtld is run directly. */
1824 return -1;
1825
1826 buf = alloca (max_dirnamelen + max_capstrlen + namelen);
1827 do
1828 {
1829 struct r_search_path_elem *this_dir = *dirs;
1830 size_t buflen = 0;
1831 size_t cnt;
1832 char *edp;
1833 int here_any = 0;
1834 int err;
1835
1836 /* If we are debugging the search for libraries print the path
1837 now if it hasn't happened now. */
1838 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0)
1839 && current_what != this_dir->what)
1840 {
1841 current_what = this_dir->what;
1842 print_search_path (dirs, current_what, this_dir->where);
1843 }
1844
1845 edp = (char *) __mempcpy (buf, this_dir->dirname, this_dir->dirnamelen);
1846 for (cnt = 0; fd == -1 && cnt < ncapstr; ++cnt)
1847 {
1848 /* Skip this directory if we know it does not exist. */
1849 if (this_dir->status[cnt] == nonexisting)
1850 continue;
1851
1852 buflen =
1853 ((char *) __mempcpy (__mempcpy (edp, capstr[cnt].str,
1854 capstr[cnt].len),
1855 name, namelen)
1856 - buf);
1857
1858 /* Print name we try if this is wanted. */
1859 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
1860 _dl_debug_printf (" trying file=%s\n", buf);
1861
1862 fd = open_verify (buf, fbp, loader, whatcode, found_other_class,
1863 false);
1864 if (this_dir->status[cnt] == unknown)
1865 {
1866 if (fd != -1)
1867 this_dir->status[cnt] = existing;
1868 /* Do not update the directory information when loading
1869 auditing code. We must try to disturb the program as
1870 little as possible. */
1871 else if (loader == NULL
1872 || GL(dl_ns)[loader->l_ns]._ns_loaded->l_auditing == 0)
1873 {
1874 /* We failed to open machine dependent library. Let's
1875 test whether there is any directory at all. */
1876 struct stat64 st;
1877
1878 buf[buflen - namelen - 1] = '\0';
1879
1880 if (__xstat64 (_STAT_VER, buf, &st) != 0
1881 || ! S_ISDIR (st.st_mode))
1882 /* The directory does not exist or it is no directory. */
1883 this_dir->status[cnt] = nonexisting;
1884 else
1885 this_dir->status[cnt] = existing;
1886 }
1887 }
1888
1889 /* Remember whether we found any existing directory. */
1890 here_any |= this_dir->status[cnt] != nonexisting;
1891
1892 if (fd != -1 && __builtin_expect (secure, 0)
1893 && INTUSE(__libc_enable_secure))
1894 {
1895 /* This is an extra security effort to make sure nobody can
1896 preload broken shared objects which are in the trusted
1897 directories and so exploit the bugs. */
1898 struct stat64 st;
1899
1900 if (__fxstat64 (_STAT_VER, fd, &st) != 0
1901 || (st.st_mode & S_ISUID) == 0)
1902 {
1903 /* The shared object cannot be tested for being SUID
1904 or this bit is not set. In this case we must not
1905 use this object. */
1906 __close (fd);
1907 fd = -1;
1908 /* We simply ignore the file, signal this by setting
1909 the error value which would have been set by `open'. */
1910 errno = ENOENT;
1911 }
1912 }
1913 }
1914
1915 if (fd != -1)
1916 {
1917 *realname = (char *) malloc (buflen);
1918 if (*realname != NULL)
1919 {
1920 memcpy (*realname, buf, buflen);
1921 return fd;
1922 }
1923 else
1924 {
1925 /* No memory for the name, we certainly won't be able
1926 to load and link it. */
1927 __close (fd);
1928 return -1;
1929 }
1930 }
1931 if (here_any && (err = errno) != ENOENT && err != EACCES)
1932 /* The file exists and is readable, but something went wrong. */
1933 return -1;
1934
1935 /* Remember whether we found anything. */
1936 any |= here_any;
1937 }
1938 while (*++dirs != NULL);
1939
1940 /* Remove the whole path if none of the directories exists. */
1941 if (__builtin_expect (! any, 0))
1942 {
1943 /* Paths which were allocated using the minimal malloc() in ld.so
1944 must not be freed using the general free() in libc. */
1945 if (sps->malloced)
1946 free (sps->dirs);
1947
1948 /* rtld_search_dirs is attribute_relro, therefore avoid writing
1949 into it. */
1950 if (sps != &rtld_search_dirs)
1951 sps->dirs = (void *) -1;
1952 }
1953
1954 return -1;
1955 }
1956
1957 /* Map in the shared object file NAME. */
1958
1959 struct link_map *
1960 internal_function
1961 _dl_map_object (struct link_map *loader, const char *name,
1962 int type, int trace_mode, int mode, Lmid_t nsid)
1963 {
1964 int fd;
1965 char *realname;
1966 char *name_copy;
1967 struct link_map *l;
1968 struct filebuf fb;
1969
1970 assert (nsid >= 0);
1971 assert (nsid < GL(dl_nns));
1972
1973 /* Look for this name among those already loaded. */
1974 for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
1975 {
1976 /* If the requested name matches the soname of a loaded object,
1977 use that object. Elide this check for names that have not
1978 yet been opened. */
1979 if (__builtin_expect (l->l_faked, 0) != 0
1980 || __builtin_expect (l->l_removed, 0) != 0)
1981 continue;
1982 if (!_dl_name_match_p (name, l))
1983 {
1984 const char *soname;
1985
1986 if (__builtin_expect (l->l_soname_added, 1)
1987 || l->l_info[DT_SONAME] == NULL)
1988 continue;
1989
1990 soname = ((const char *) D_PTR (l, l_info[DT_STRTAB])
1991 + l->l_info[DT_SONAME]->d_un.d_val);
1992 if (strcmp (name, soname) != 0)
1993 continue;
1994
1995 /* We have a match on a new name -- cache it. */
1996 add_name_to_object (l, soname);
1997 l->l_soname_added = 1;
1998 }
1999
2000 /* We have a match. */
2001 return l;
2002 }
2003
2004 /* Display information if we are debugging. */
2005 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0)
2006 && loader != NULL)
2007 _dl_debug_printf ("\nfile=%s [%lu]; needed by %s [%lu]\n", name, nsid,
2008 loader->l_name[0]
2009 ? loader->l_name : rtld_progname, loader->l_ns);
2010
2011 #ifdef SHARED
2012 /* Give the auditing libraries a chance to change the name before we
2013 try anything. */
2014 if (__builtin_expect (GLRO(dl_naudit) > 0, 0)
2015 && (loader == NULL || loader->l_auditing == 0))
2016 {
2017 struct audit_ifaces *afct = GLRO(dl_audit);
2018 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
2019 {
2020 if (afct->objsearch != NULL)
2021 {
2022 name = afct->objsearch (name, &loader->l_audit[cnt].cookie,
2023 LA_SER_ORIG);
2024 if (name == NULL)
2025 {
2026 /* Do not try anything further. */
2027 fd = -1;
2028 goto no_file;
2029 }
2030 }
2031
2032 afct = afct->next;
2033 }
2034 }
2035 #endif
2036
2037 /* Will be true if we found a DSO which is of the other ELF class. */
2038 bool found_other_class = false;
2039
2040 if (strchr (name, '/') == NULL)
2041 {
2042 /* Search for NAME in several places. */
2043
2044 size_t namelen = strlen (name) + 1;
2045
2046 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
2047 _dl_debug_printf ("find library=%s [%lu]; searching\n", name, nsid);
2048
2049 fd = -1;
2050
2051 /* When the object has the RUNPATH information we don't use any
2052 RPATHs. */
2053 if (loader == NULL || loader->l_info[DT_RUNPATH] == NULL)
2054 {
2055 /* This is the executable's map (if there is one). Make sure that
2056 we do not look at it twice. */
2057 struct link_map *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
2058 bool did_main_map = false;
2059
2060 /* First try the DT_RPATH of the dependent object that caused NAME
2061 to be loaded. Then that object's dependent, and on up. */
2062 for (l = loader; l; l = l->l_loader)
2063 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2064 {
2065 fd = open_path (name, namelen, mode & __RTLD_SECURE,
2066 &l->l_rpath_dirs,
2067 &realname, &fb, loader, LA_SER_RUNPATH,
2068 &found_other_class);
2069 if (fd != -1)
2070 break;
2071
2072 did_main_map |= l == main_map;
2073 }
2074
2075 /* If dynamically linked, try the DT_RPATH of the executable
2076 itself. NB: we do this for lookups in any namespace. */
2077 if (fd == -1 && !did_main_map
2078 && main_map != NULL && main_map->l_type != lt_loaded
2079 && cache_rpath (main_map, &main_map->l_rpath_dirs, DT_RPATH,
2080 "RPATH"))
2081 fd = open_path (name, namelen, mode & __RTLD_SECURE,
2082 &main_map->l_rpath_dirs,
2083 &realname, &fb, loader ?: main_map, LA_SER_RUNPATH,
2084 &found_other_class);
2085 }
2086
2087 /* Try the LD_LIBRARY_PATH environment variable. */
2088 if (fd == -1 && env_path_list.dirs != (void *) -1)
2089 fd = open_path (name, namelen, mode & __RTLD_SECURE, &env_path_list,
2090 &realname, &fb,
2091 loader ?: GL(dl_ns)[LM_ID_BASE]._ns_loaded,
2092 LA_SER_LIBPATH, &found_other_class);
2093
2094 /* Look at the RUNPATH information for this binary. */
2095 if (fd == -1 && loader != NULL
2096 && cache_rpath (loader, &loader->l_runpath_dirs,
2097 DT_RUNPATH, "RUNPATH"))
2098 fd = open_path (name, namelen, mode & __RTLD_SECURE,
2099 &loader->l_runpath_dirs, &realname, &fb, loader,
2100 LA_SER_RUNPATH, &found_other_class);
2101
2102 if (fd == -1
2103 && (__builtin_expect (! (mode & __RTLD_SECURE), 1)
2104 || ! INTUSE(__libc_enable_secure)))
2105 {
2106 /* Check the list of libraries in the file /etc/ld.so.cache,
2107 for compatibility with Linux's ldconfig program. */
2108 const char *cached = _dl_load_cache_lookup (name);
2109
2110 if (cached != NULL)
2111 {
2112 #ifdef SHARED
2113 // XXX Correct to unconditionally default to namespace 0?
2114 l = loader ?: GL(dl_ns)[LM_ID_BASE]._ns_loaded;
2115 #else
2116 l = loader;
2117 #endif
2118
2119 /* If the loader has the DF_1_NODEFLIB flag set we must not
2120 use a cache entry from any of these directories. */
2121 if (
2122 #ifndef SHARED
2123 /* 'l' is always != NULL for dynamically linked objects. */
2124 l != NULL &&
2125 #endif
2126 __builtin_expect (l->l_flags_1 & DF_1_NODEFLIB, 0))
2127 {
2128 const char *dirp = system_dirs;
2129 unsigned int cnt = 0;
2130
2131 do
2132 {
2133 if (memcmp (cached, dirp, system_dirs_len[cnt]) == 0)
2134 {
2135 /* The prefix matches. Don't use the entry. */
2136 cached = NULL;
2137 break;
2138 }
2139
2140 dirp += system_dirs_len[cnt] + 1;
2141 ++cnt;
2142 }
2143 while (cnt < nsystem_dirs_len);
2144 }
2145
2146 if (cached != NULL)
2147 {
2148 fd = open_verify (cached,
2149 &fb, loader ?: GL(dl_ns)[nsid]._ns_loaded,
2150 LA_SER_CONFIG, &found_other_class, false);
2151 if (__builtin_expect (fd != -1, 1))
2152 {
2153 realname = local_strdup (cached);
2154 if (realname == NULL)
2155 {
2156 __close (fd);
2157 fd = -1;
2158 }
2159 }
2160 }
2161 }
2162 }
2163
2164 /* Finally, try the default path. */
2165 if (fd == -1
2166 && ((l = loader ?: GL(dl_ns)[nsid]._ns_loaded) == NULL
2167 || __builtin_expect (!(l->l_flags_1 & DF_1_NODEFLIB), 1))
2168 && rtld_search_dirs.dirs != (void *) -1)
2169 fd = open_path (name, namelen, mode & __RTLD_SECURE, &rtld_search_dirs,
2170 &realname, &fb, l, LA_SER_DEFAULT, &found_other_class);
2171
2172 /* Add another newline when we are tracing the library loading. */
2173 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
2174 _dl_debug_printf ("\n");
2175 }
2176 else
2177 {
2178 /* The path may contain dynamic string tokens. */
2179 realname = (loader
2180 ? expand_dynamic_string_token (loader, name)
2181 : local_strdup (name));
2182 if (realname == NULL)
2183 fd = -1;
2184 else
2185 {
2186 fd = open_verify (realname, &fb,
2187 loader ?: GL(dl_ns)[nsid]._ns_loaded, 0,
2188 &found_other_class, true);
2189 if (__builtin_expect (fd, 0) == -1)
2190 free (realname);
2191 }
2192 }
2193
2194 #ifdef SHARED
2195 no_file:
2196 #endif
2197 /* In case the LOADER information has only been provided to get to
2198 the appropriate RUNPATH/RPATH information we do not need it
2199 anymore. */
2200 if (mode & __RTLD_CALLMAP)
2201 loader = NULL;
2202
2203 if (__builtin_expect (fd, 0) == -1)
2204 {
2205 if (trace_mode
2206 && __builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK, 0) == 0)
2207 {
2208 /* We haven't found an appropriate library. But since we
2209 are only interested in the list of libraries this isn't
2210 so severe. Fake an entry with all the information we
2211 have. */
2212 static const Elf_Symndx dummy_bucket = STN_UNDEF;
2213
2214 /* Allocate a new object map. */
2215 if ((name_copy = local_strdup (name)) == NULL
2216 || (l = _dl_new_object (name_copy, name, type, loader,
2217 mode, nsid)) == NULL)
2218 {
2219 free (name_copy);
2220 _dl_signal_error (ENOMEM, name, NULL,
2221 N_("cannot create shared object descriptor"));
2222 }
2223 /* Signal that this is a faked entry. */
2224 l->l_faked = 1;
2225 /* Since the descriptor is initialized with zero we do not
2226 have do this here.
2227 l->l_reserved = 0; */
2228 l->l_buckets = &dummy_bucket;
2229 l->l_nbuckets = 1;
2230 l->l_relocated = 1;
2231
2232 /* Enter the object in the object list. */
2233 _dl_add_to_namespace_list (l, nsid);
2234
2235 return l;
2236 }
2237 else if (found_other_class)
2238 _dl_signal_error (0, name, NULL,
2239 ELFW(CLASS) == ELFCLASS32
2240 ? N_("wrong ELF class: ELFCLASS64")
2241 : N_("wrong ELF class: ELFCLASS32"));
2242 else
2243 _dl_signal_error (errno, name, NULL,
2244 N_("cannot open shared object file"));
2245 }
2246
2247 void *stack_end = __libc_stack_end;
2248 return _dl_map_object_from_fd (name, fd, &fb, realname, loader, type, mode,
2249 &stack_end, nsid);
2250 }
2251
2252
2253 void
2254 internal_function
2255 _dl_rtld_di_serinfo (struct link_map *loader, Dl_serinfo *si, bool counting)
2256 {
2257 if (counting)
2258 {
2259 si->dls_cnt = 0;
2260 si->dls_size = 0;
2261 }
2262
2263 unsigned int idx = 0;
2264 char *allocptr = (char *) &si->dls_serpath[si->dls_cnt];
2265 void add_path (const struct r_search_path_struct *sps, unsigned int flags)
2266 # define add_path(sps, flags) add_path(sps, 0) /* XXX */
2267 {
2268 if (sps->dirs != (void *) -1)
2269 {
2270 struct r_search_path_elem **dirs = sps->dirs;
2271 do
2272 {
2273 const struct r_search_path_elem *const r = *dirs++;
2274 if (counting)
2275 {
2276 si->dls_cnt++;
2277 si->dls_size += MAX (2, r->dirnamelen);
2278 }
2279 else
2280 {
2281 Dl_serpath *const sp = &si->dls_serpath[idx++];
2282 sp->dls_name = allocptr;
2283 if (r->dirnamelen < 2)
2284 *allocptr++ = r->dirnamelen ? '/' : '.';
2285 else
2286 allocptr = __mempcpy (allocptr,
2287 r->dirname, r->dirnamelen - 1);
2288 *allocptr++ = '\0';
2289 sp->dls_flags = flags;
2290 }
2291 }
2292 while (*dirs != NULL);
2293 }
2294 }
2295
2296 /* When the object has the RUNPATH information we don't use any RPATHs. */
2297 if (loader->l_info[DT_RUNPATH] == NULL)
2298 {
2299 /* First try the DT_RPATH of the dependent object that caused NAME
2300 to be loaded. Then that object's dependent, and on up. */
2301
2302 struct link_map *l = loader;
2303 do
2304 {
2305 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2306 add_path (&l->l_rpath_dirs, XXX_RPATH);
2307 l = l->l_loader;
2308 }
2309 while (l != NULL);
2310
2311 /* If dynamically linked, try the DT_RPATH of the executable itself. */
2312 if (loader->l_ns == LM_ID_BASE)
2313 {
2314 l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
2315 if (l != NULL && l->l_type != lt_loaded && l != loader)
2316 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2317 add_path (&l->l_rpath_dirs, XXX_RPATH);
2318 }
2319 }
2320
2321 /* Try the LD_LIBRARY_PATH environment variable. */
2322 add_path (&env_path_list, XXX_ENV);
2323
2324 /* Look at the RUNPATH information for this binary. */
2325 if (cache_rpath (loader, &loader->l_runpath_dirs, DT_RUNPATH, "RUNPATH"))
2326 add_path (&loader->l_runpath_dirs, XXX_RUNPATH);
2327
2328 /* XXX
2329 Here is where ld.so.cache gets checked, but we don't have
2330 a way to indicate that in the results for Dl_serinfo. */
2331
2332 /* Finally, try the default path. */
2333 if (!(loader->l_flags_1 & DF_1_NODEFLIB))
2334 add_path (&rtld_search_dirs, XXX_default);
2335
2336 if (counting)
2337 /* Count the struct size before the string area, which we didn't
2338 know before we completed dls_cnt. */
2339 si->dls_size += (char *) &si->dls_serpath[si->dls_cnt] - (char *) si;
2340 }