]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-load.c
6a3d919976c4b06b5d352286f1af272b4a28067f
[thirdparty/glibc.git] / elf / dl-load.c
1 /* _dl_map_object -- Map in a shared object's segments from the file.
2 Copyright (C) 1995, 1996, 1997 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public License as
7 published by the Free Software Foundation; either version 2 of the
8 License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
14
15 You should have received a copy of the GNU Library General Public
16 License along with the GNU C Library; see the file COPYING.LIB. If not,
17 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 Boston, MA 02111-1307, USA. */
19
20 #include <link.h>
21 #include <sys/types.h>
22 #include <sys/mman.h>
23 #include <string.h>
24 #include <fcntl.h>
25 #include <unistd.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include "dynamic-link.h"
29
30
31 /* On some systems, no flag bits are given to specify file mapping. */
32 #ifndef MAP_FILE
33 #define MAP_FILE 0
34 #endif
35
36 /* The right way to map in the shared library files is MAP_COPY, which
37 makes a virtual copy of the data at the time of the mmap call; this
38 guarantees the mapped pages will be consistent even if the file is
39 overwritten. Some losing VM systems like Linux's lack MAP_COPY. All we
40 get is MAP_PRIVATE, which copies each page when it is modified; this
41 means if the file is overwritten, we may at some point get some pages
42 from the new version after starting with pages from the old version. */
43 #ifndef MAP_COPY
44 #define MAP_COPY MAP_PRIVATE
45 #endif
46
47
48 #include <endian.h>
49 #if BYTE_ORDER == BIG_ENDIAN
50 #define byteorder ELFDATA2MSB
51 #define byteorder_name "big-endian"
52 #elif BYTE_ORDER == LITTLE_ENDIAN
53 #define byteorder ELFDATA2LSB
54 #define byteorder_name "little-endian"
55 #else
56 #error "Unknown BYTE_ORDER " BYTE_ORDER
57 #define byteorder ELFDATANONE
58 #endif
59
60 #define STRING(x) #x
61
62 #ifdef MAP_ANON
63 /* The fd is not examined when using MAP_ANON. */
64 #define ANONFD -1
65 #else
66 int _dl_zerofd = -1;
67 #define ANONFD _dl_zerofd
68 #endif
69
70 size_t _dl_pagesize;
71
72
73 /* Local version of `strdup' function. */
74 static inline char *
75 local_strdup (const char *s)
76 {
77 size_t len = strlen (s) + 1;
78 void *new = malloc (len);
79
80 if (new == NULL)
81 return NULL;
82
83 return (char *) memcpy (new, s, len);
84 }
85
86
87 /* Map in the shared object NAME, actually located in REALNAME, and already
88 opened on FD. */
89
90 struct link_map *
91 _dl_map_object_from_fd (char *name, int fd, char *realname,
92 struct link_map *loader, int l_type)
93 {
94 struct link_map *l = NULL;
95 void *file_mapping = NULL;
96 size_t mapping_size = 0;
97
98 #define LOSE(s) lose (0, (s))
99 void lose (int code, const char *msg)
100 {
101 (void) __close (fd);
102 if (file_mapping)
103 __munmap (file_mapping, mapping_size);
104 if (l)
105 {
106 /* Remove the stillborn object from the list and free it. */
107 if (l->l_prev)
108 l->l_prev->l_next = l->l_next;
109 if (l->l_next)
110 l->l_next->l_prev = l->l_prev;
111 free (l);
112 }
113 free (name);
114 free (realname);
115 _dl_signal_error (code, name, msg);
116 }
117
118 inline caddr_t map_segment (ElfW(Addr) mapstart, size_t len,
119 int prot, int fixed, off_t offset)
120 {
121 caddr_t mapat = __mmap ((caddr_t) mapstart, len, prot,
122 fixed|MAP_COPY|MAP_FILE,
123 fd, offset);
124 if (mapat == (caddr_t) -1)
125 lose (errno, "failed to map segment from shared object");
126 return mapat;
127 }
128
129 /* Make sure LOCATION is mapped in. */
130 void *map (off_t location, size_t size)
131 {
132 if ((off_t) mapping_size <= location + (off_t) size)
133 {
134 void *result;
135 if (file_mapping)
136 __munmap (file_mapping, mapping_size);
137 mapping_size = (location + size + 1 + _dl_pagesize - 1);
138 mapping_size &= ~(_dl_pagesize - 1);
139 result = __mmap (file_mapping, mapping_size, PROT_READ,
140 MAP_COPY|MAP_FILE, fd, 0);
141 if (result == (void *) -1)
142 lose (errno, "cannot map file data");
143 file_mapping = result;
144 }
145 return file_mapping + location;
146 }
147
148 const ElfW(Ehdr) *header;
149 const ElfW(Phdr) *phdr;
150 const ElfW(Phdr) *ph;
151 int type;
152
153 /* Look again to see if the real name matched another already loaded. */
154 for (l = _dl_loaded; l; l = l->l_next)
155 if (! strcmp (realname, l->l_name))
156 {
157 /* The object is already loaded.
158 Just bump its reference count and return it. */
159 __close (fd);
160 free (name);
161 free (realname);
162 ++l->l_opencount;
163 return l;
164 }
165
166 if (_dl_pagesize == 0)
167 _dl_pagesize = __getpagesize ();
168
169 /* Map in the first page to read the header. */
170 header = map (0, sizeof *header);
171
172 /* Check the header for basic validity. */
173 if (*(Elf32_Word *) &header->e_ident !=
174 #if BYTE_ORDER == LITTLE_ENDIAN
175 ((ELFMAG0 << (EI_MAG0 * 8)) |
176 (ELFMAG1 << (EI_MAG1 * 8)) |
177 (ELFMAG2 << (EI_MAG2 * 8)) |
178 (ELFMAG3 << (EI_MAG3 * 8)))
179 #else
180 ((ELFMAG0 << (EI_MAG3 * 8)) |
181 (ELFMAG1 << (EI_MAG2 * 8)) |
182 (ELFMAG2 << (EI_MAG1 * 8)) |
183 (ELFMAG3 << (EI_MAG0 * 8)))
184 #endif
185 )
186 LOSE ("invalid ELF header");
187 #define ELF32_CLASS ELFCLASS32
188 #define ELF64_CLASS ELFCLASS64
189 if (header->e_ident[EI_CLASS] != ELFW(CLASS))
190 LOSE ("ELF file class not " STRING(__ELF_WORDSIZE) "-bit");
191 if (header->e_ident[EI_DATA] != byteorder)
192 LOSE ("ELF file data encoding not " byteorder_name);
193 if (header->e_ident[EI_VERSION] != EV_CURRENT)
194 LOSE ("ELF file version ident not " STRING(EV_CURRENT));
195 if (header->e_version != EV_CURRENT)
196 LOSE ("ELF file version not " STRING(EV_CURRENT));
197 if (! elf_machine_matches_host (header->e_machine))
198 LOSE ("ELF file machine architecture not " ELF_MACHINE_NAME);
199 if (header->e_phentsize != sizeof (ElfW(Phdr)))
200 LOSE ("ELF file's phentsize not the expected size");
201
202 #ifndef MAP_ANON
203 #define MAP_ANON 0
204 if (_dl_zerofd == -1)
205 {
206 _dl_zerofd = _dl_sysdep_open_zero_fill ();
207 if (_dl_zerofd == -1)
208 {
209 __close (fd);
210 _dl_signal_error (errno, NULL, "cannot open zero fill device");
211 }
212 }
213 #endif
214
215 /* Enter the new object in the list of loaded objects. */
216 l = _dl_new_object (realname, name, l_type);
217 if (! l)
218 lose (ENOMEM, "cannot create shared object descriptor");
219 l->l_opencount = 1;
220 l->l_loader = loader;
221
222 /* Extract the remaining details we need from the ELF header
223 and then map in the program header table. */
224 l->l_entry = header->e_entry;
225 type = header->e_type;
226 l->l_phnum = header->e_phnum;
227 phdr = map (header->e_phoff, l->l_phnum * sizeof (ElfW(Phdr)));
228
229 {
230 /* Scan the program header table, collecting its load commands. */
231 struct loadcmd
232 {
233 ElfW(Addr) mapstart, mapend, dataend, allocend;
234 off_t mapoff;
235 int prot;
236 } loadcmds[l->l_phnum], *c;
237 size_t nloadcmds = 0;
238
239 l->l_ld = 0;
240 l->l_phdr = 0;
241 l->l_addr = 0;
242 for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
243 switch (ph->p_type)
244 {
245 /* These entries tell us where to find things once the file's
246 segments are mapped in. We record the addresses it says
247 verbatim, and later correct for the run-time load address. */
248 case PT_DYNAMIC:
249 l->l_ld = (void *) ph->p_vaddr;
250 break;
251 case PT_PHDR:
252 l->l_phdr = (void *) ph->p_vaddr;
253 break;
254
255 case PT_LOAD:
256 /* A load command tells us to map in part of the file.
257 We record the load commands and process them all later. */
258 if (ph->p_align % _dl_pagesize != 0)
259 LOSE ("ELF load command alignment not page-aligned");
260 if ((ph->p_vaddr - ph->p_offset) % ph->p_align)
261 LOSE ("ELF load command address/offset not properly aligned");
262 {
263 struct loadcmd *c = &loadcmds[nloadcmds++];
264 c->mapstart = ph->p_vaddr & ~(ph->p_align - 1);
265 c->mapend = ((ph->p_vaddr + ph->p_filesz + _dl_pagesize - 1)
266 & ~(_dl_pagesize - 1));
267 c->dataend = ph->p_vaddr + ph->p_filesz;
268 c->allocend = ph->p_vaddr + ph->p_memsz;
269 c->mapoff = ph->p_offset & ~(ph->p_align - 1);
270 c->prot = 0;
271 if (ph->p_flags & PF_R)
272 c->prot |= PROT_READ;
273 if (ph->p_flags & PF_W)
274 c->prot |= PROT_WRITE;
275 if (ph->p_flags & PF_X)
276 c->prot |= PROT_EXEC;
277 break;
278 }
279 }
280
281 /* We are done reading the file's headers now. Unmap them. */
282 __munmap (file_mapping, mapping_size);
283
284 /* Now process the load commands and map segments into memory. */
285 c = loadcmds;
286
287 if (type == ET_DYN || type == ET_REL)
288 {
289 /* This is a position-independent shared object. We can let the
290 kernel map it anywhere it likes, but we must have space for all
291 the segments in their specified positions relative to the first.
292 So we map the first segment without MAP_FIXED, but with its
293 extent increased to cover all the segments. Then we remove
294 access from excess portion, and there is known sufficient space
295 there to remap from the later segments. */
296 caddr_t mapat;
297 mapat = map_segment (c->mapstart,
298 loadcmds[nloadcmds - 1].allocend - c->mapstart,
299 c->prot, 0, c->mapoff);
300 l->l_addr = (ElfW(Addr)) mapat - c->mapstart;
301
302 /* Change protection on the excess portion to disallow all access;
303 the portions we do not remap later will be inaccessible as if
304 unallocated. Then jump into the normal segment-mapping loop to
305 handle the portion of the segment past the end of the file
306 mapping. */
307 __mprotect ((caddr_t) (l->l_addr + c->mapend),
308 loadcmds[nloadcmds - 1].allocend - c->mapend,
309 0);
310 goto postmap;
311 }
312
313 while (c < &loadcmds[nloadcmds])
314 {
315 if (c->mapend > c->mapstart)
316 /* Map the segment contents from the file. */
317 map_segment (l->l_addr + c->mapstart, c->mapend - c->mapstart,
318 c->prot, MAP_FIXED, c->mapoff);
319
320 postmap:
321 if (c->allocend > c->dataend)
322 {
323 /* Extra zero pages should appear at the end of this segment,
324 after the data mapped from the file. */
325 ElfW(Addr) zero, zeroend, zeropage;
326
327 zero = l->l_addr + c->dataend;
328 zeroend = l->l_addr + c->allocend;
329 zeropage = (zero + _dl_pagesize - 1) & ~(_dl_pagesize - 1);
330
331 if (zeroend < zeropage)
332 /* All the extra data is in the last page of the segment.
333 We can just zero it. */
334 zeropage = zeroend;
335
336 if (zeropage > zero)
337 {
338 /* Zero the final part of the last page of the segment. */
339 if ((c->prot & PROT_WRITE) == 0)
340 {
341 /* Dag nab it. */
342 if (__mprotect ((caddr_t) (zero & ~(_dl_pagesize - 1)),
343 _dl_pagesize, c->prot|PROT_WRITE) < 0)
344 lose (errno, "cannot change memory protections");
345 }
346 memset ((void *) zero, 0, zeropage - zero);
347 if ((c->prot & PROT_WRITE) == 0)
348 __mprotect ((caddr_t) (zero & ~(_dl_pagesize - 1)),
349 _dl_pagesize, c->prot);
350 }
351
352 if (zeroend > zeropage)
353 {
354 /* Map the remaining zero pages in from the zero fill FD. */
355 caddr_t mapat;
356 mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
357 c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
358 ANONFD, 0);
359 if (mapat == (caddr_t) -1)
360 lose (errno, "cannot map zero-fill pages");
361 }
362 }
363
364 ++c;
365 }
366
367 if (l->l_phdr == 0)
368 {
369 /* There was no PT_PHDR specified. We need to find the phdr in the
370 load image ourselves. We assume it is in fact in the load image
371 somewhere, and that the first load command starts at the
372 beginning of the file and thus contains the ELF file header. */
373 ElfW(Addr) bof = l->l_addr + loadcmds[0].mapstart;
374 assert (loadcmds[0].mapoff == 0);
375 l->l_phdr = (void *) (bof + ((const ElfW(Ehdr) *) bof)->e_phoff);
376 }
377 else
378 /* Adjust the PT_PHDR value by the runtime load address. */
379 (ElfW(Addr)) l->l_phdr += l->l_addr;
380 }
381
382 /* We are done mapping in the file. We no longer need the descriptor. */
383 __close (fd);
384
385 if (l->l_type == lt_library && type == ET_EXEC)
386 l->l_type = lt_executable;
387
388 if (l->l_ld == 0)
389 {
390 if (type == ET_DYN)
391 LOSE ("object file has no dynamic section");
392 }
393 else
394 (ElfW(Addr)) l->l_ld += l->l_addr;
395
396 l->l_entry += l->l_addr;
397
398 elf_get_dynamic_info (l->l_ld, l->l_info);
399 if (l->l_info[DT_HASH])
400 _dl_setup_hash (l);
401
402 return l;
403 }
404 \f
405 /* Try to open NAME in one of the directories in DIRPATH.
406 Return the fd, or -1. If successful, fill in *REALNAME
407 with the malloc'd full directory name. */
408
409 static int
410 open_path (const char *name, size_t namelen,
411 const char *dirpath,
412 char **realname,
413 const char *trusted_dirs[])
414 {
415 char *buf;
416 const char *p;
417 int fd;
418
419 p = dirpath;
420 if (p == NULL || *p == '\0')
421 {
422 __set_errno (ENOENT);
423 return -1;
424 }
425
426 buf = __alloca (strlen (dirpath) + 1 + namelen);
427 do
428 {
429 size_t buflen;
430 size_t this_len;
431
432 dirpath = p;
433 p = strpbrk (dirpath, ":;");
434 if (p == NULL)
435 p = strchr (dirpath, '\0');
436
437 this_len = p - dirpath;
438
439 /* When we run a setuid program we do not accept any directory. */
440 if (__libc_enable_secure)
441 {
442 /* All trusted directory must be complete name. */
443 if (dirpath[0] != '/')
444 continue;
445
446 /* If we got a list of trusted directories only accept one
447 of these. */
448 if (trusted_dirs != NULL)
449 {
450 const char **trust = trusted_dirs;
451
452 while (*trust != NULL)
453 if (memcmp (dirpath, *trust, this_len) == 0
454 && (*trust)[this_len] == '\0')
455 break;
456 else
457 ++trust;
458
459 /* If directory is not trusted, ignore this directory. */
460 if (*trust == NULL)
461 continue;
462 }
463 }
464
465 if (this_len == 0)
466 {
467 /* Two adjacent colons, or a colon at the beginning or the end of
468 the path means to search the current directory. */
469 (void) memcpy (buf, name, namelen);
470 buflen = namelen;
471 }
472 else
473 {
474 /* Construct the pathname to try. */
475 (void) memcpy (buf, dirpath, this_len);
476 buf[this_len] = '/';
477 (void) memcpy (&buf[this_len + 1], name, namelen);
478 buflen = this_len + 1 + namelen;
479 }
480
481 fd = __open (buf, O_RDONLY);
482 if (fd != -1)
483 {
484 *realname = malloc (buflen);
485 if (*realname)
486 {
487 memcpy (*realname, buf, buflen);
488 return fd;
489 }
490 else
491 {
492 /* No memory for the name, we certainly won't be able
493 to load and link it. */
494 __close (fd);
495 return -1;
496 }
497 }
498 if (errno != ENOENT && errno != EACCES)
499 /* The file exists and is readable, but something went wrong. */
500 return -1;
501 }
502 while (*p++ != '\0');
503
504 return -1;
505 }
506
507 /* Map in the shared object file NAME. */
508
509 struct link_map *
510 _dl_map_object (struct link_map *loader, const char *name, int type,
511 int trace_mode)
512 {
513 int fd;
514 char *realname;
515 char *name_copy;
516 struct link_map *l;
517
518 /* Look for this name among those already loaded. */
519 for (l = _dl_loaded; l; l = l->l_next)
520 if (! strcmp (name, l->l_libname) || /* NAME was requested before. */
521 ! strcmp (name, l->l_name) || /* NAME was found before. */
522 /* If the requested name matches the soname of a loaded object,
523 use that object. */
524 (l->l_info[DT_SONAME] &&
525 ! strcmp (name, (const char *) (l->l_addr +
526 l->l_info[DT_STRTAB]->d_un.d_ptr +
527 l->l_info[DT_SONAME]->d_un.d_val))))
528 {
529 /* The object is already loaded.
530 Just bump its reference count and return it. */
531 ++l->l_opencount;
532 return l;
533 }
534
535 if (strchr (name, '/') == NULL)
536 {
537 /* Search for NAME in several places. */
538
539 size_t namelen = strlen (name) + 1;
540
541 inline void trypath (const char *dirpath, const char *trusted[])
542 {
543 fd = open_path (name, namelen, dirpath, &realname, trusted);
544 }
545
546 fd = -1;
547
548 /* First try the DT_RPATH of the dependent object that caused NAME
549 to be loaded. Then that object's dependent, and on up. */
550 for (l = loader; fd == -1 && l; l = l->l_loader)
551 if (l && l->l_info[DT_RPATH])
552 trypath ((const char *) (l->l_addr +
553 l->l_info[DT_STRTAB]->d_un.d_ptr +
554 l->l_info[DT_RPATH]->d_un.d_val), NULL);
555 /* If dynamically linked, try the DT_RPATH of the executable itself. */
556 l = _dl_loaded;
557 if (fd == -1 && l && l->l_type != lt_loaded && l->l_info[DT_RPATH])
558 trypath ((const char *) (l->l_addr +
559 l->l_info[DT_STRTAB]->d_un.d_ptr +
560 l->l_info[DT_RPATH]->d_un.d_val), NULL);
561 /* Try an environment variable (unless setuid). */
562 if (fd == -1 && ! __libc_enable_secure)
563 {
564 static const char *trusted_dirs[] =
565 {
566 #include "trusted-dirs.h"
567 NULL
568 };
569
570 trypath (getenv ("LD_LIBRARY_PATH"), trusted_dirs);
571 }
572 if (fd == -1)
573 {
574 /* Check the list of libraries in the file /etc/ld.so.cache,
575 for compatibility with Linux's ldconfig program. */
576 extern const char *_dl_load_cache_lookup (const char *name);
577 const char *cached = _dl_load_cache_lookup (name);
578 if (cached)
579 {
580 fd = __open (cached, O_RDONLY);
581 if (fd != -1)
582 {
583 realname = local_strdup (cached);
584 if (realname == NULL)
585 {
586 __close (fd);
587 fd = -1;
588 }
589 }
590 }
591 }
592 /* Finally, try the default path. */
593 if (fd == -1)
594 {
595 extern const char *_dl_rpath; /* Set in rtld.c. */
596 trypath (_dl_rpath, NULL);
597 }
598 }
599 else
600 {
601 fd = __open (name, O_RDONLY);
602 if (fd != -1)
603 {
604 realname = local_strdup (name);
605 if (realname == NULL)
606 {
607 __close (fd);
608 fd = -1;
609 }
610 }
611 }
612
613 if (fd != -1)
614 {
615 name_copy = local_strdup (name);
616 if (name_copy == NULL)
617 {
618 __close (fd);
619 fd = -1;
620 }
621 }
622
623 if (fd == -1)
624 {
625 if (trace_mode)
626 {
627 /* We haven't found an appropriate library. But since we
628 are only interested in the list of libraries this isn't
629 so severe. Fake an entry with all the information we
630 have (in fact only the name). */
631 static const ElfW(Symndx) dummy_bucket = STN_UNDEF;
632
633 /* Enter the new object in the list of loaded objects. */
634 if ((name_copy = local_strdup (name)) == NULL
635 || (l = _dl_new_object (name_copy, name, type)) == NULL)
636 _dl_signal_error (ENOMEM, name,
637 "cannot create shared object descriptor");
638 /* We use an opencount of 0 as a sign for the faked entry. */
639 l->l_opencount = 0;
640 l->l_reserved = 0;
641 l->l_buckets = &dummy_bucket;
642 l->l_nbuckets = 1;
643 l->l_relocated = 1;
644
645 return l;
646 }
647 else
648 _dl_signal_error (errno, name, "cannot open shared object file");
649 }
650
651 return _dl_map_object_from_fd (name_copy, fd, realname, loader, type);
652 }