]> git.ipfire.org Git - thirdparty/glibc.git/blame - elf/dl-load.c
Fri Jun 14 01:51:47 1996 Roland McGrath <roland@delasyd.gnu.ai.mit.edu>
[thirdparty/glibc.git] / elf / dl-load.c
CommitLineData
d66e34cd 1/* _dl_map_object -- Map in a shared object's segments from the file.
c4b72918 2Copyright (C) 1995, 1996 Free Software Foundation, Inc.
d66e34cd
RM
3This file is part of the GNU C Library.
4
5The GNU C Library is free software; you can redistribute it and/or
6modify it under the terms of the GNU Library General Public License as
7published by the Free Software Foundation; either version 2 of the
8License, or (at your option) any later version.
9
10The GNU C Library is distributed in the hope that it will be useful,
11but WITHOUT ANY WARRANTY; without even the implied warranty of
12MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13Library General Public License for more details.
14
15You should have received a copy of the GNU Library General Public
16License along with the GNU C Library; see the file COPYING.LIB. If
17not, write to the Free Software Foundation, Inc., 675 Mass Ave,
18Cambridge, MA 02139, USA. */
19
20#include <link.h>
21#include <sys/types.h>
22#include <sys/mman.h>
23#include <string.h>
24#include <fcntl.h>
25#include <unistd.h>
26#include <stdlib.h>
27#include <errno.h>
28#include "dynamic-link.h"
29
30
9b8a44cd
RM
31/* On some systems, no flag bits are given to specify file mapping. */
32#ifndef MAP_FILE
33#define MAP_FILE 0
34#endif
35
36/* The right way to map in the shared library files is MAP_COPY, which
37 makes a virtual copy of the data at the time of the mmap call; this
38 guarantees the mapped pages will be consistent even if the file is
39 overwritten. Some losing VM systems like Linux's lack MAP_COPY. All we
40 get is MAP_PRIVATE, which copies each page when it is modified; this
41 means if the file is overwritten, we may at some point get some pages
42 from the new version after starting with pages from the old version. */
43#ifndef MAP_COPY
44#define MAP_COPY MAP_PRIVATE
45#endif
46
47
d66e34cd
RM
48#include <endian.h>
49#if BYTE_ORDER == BIG_ENDIAN
50#define byteorder ELFDATA2MSB
51#define byteorder_name "big-endian"
52#elif BYTE_ORDER == LITTLE_ENDIAN
53#define byteorder ELFDATA2LSB
54#define byteorder_name "little-endian"
55#else
56#error "Unknown BYTE_ORDER " BYTE_ORDER
57#define byteorder ELFDATANONE
58#endif
59
60#define STRING(x) #x
61
62int _dl_zerofd = -1;
266180eb 63size_t _dl_pagesize;
d66e34cd
RM
64
65
ea03559a
RM
66/* Map in the shared object NAME, actually located in REALNAME, and already
67 opened on FD. */
68
69struct link_map *
ba79d61b
RM
70_dl_map_object_from_fd (const char *name, int fd, char *realname,
71 struct link_map *loader, int l_type)
ea03559a 72{
ba79d61b 73 struct link_map *l;
ea03559a
RM
74 void *file_mapping = NULL;
75 size_t mapping_size = 0;
76
b122c703 77#define LOSE(s) lose (0, (s))
ea03559a
RM
78 void lose (int code, const char *msg)
79 {
266180eb 80 (void) __close (fd);
ea03559a 81 if (file_mapping)
266180eb 82 __munmap (file_mapping, mapping_size);
ba79d61b
RM
83 if (l)
84 {
85 /* Remove the stillborn object from the list and free it. */
86 if (l->l_prev)
87 l->l_prev->l_next = l->l_next;
88 if (l->l_next)
89 l->l_next->l_prev = l->l_prev;
90 free (l);
91 }
92 free (realname);
93 _dl_signal_error (code, name, msg);
ea03559a
RM
94 }
95
266180eb 96 inline caddr_t map_segment (ElfW(Addr) mapstart, size_t len,
b122c703
RM
97 int prot, int fixed, off_t offset)
98 {
266180eb
RM
99 caddr_t mapat = __mmap ((caddr_t) mapstart, len, prot,
100 fixed|MAP_COPY|MAP_FILE,
101 fd, offset);
b122c703
RM
102 if (mapat == (caddr_t) -1)
103 lose (errno, "failed to map segment from shared object");
104 return mapat;
105 }
106
ea03559a
RM
107 /* Make sure LOCATION is mapped in. */
108 void *map (off_t location, size_t size)
109 {
110 if ((off_t) mapping_size <= location + (off_t) size)
111 {
112 void *result;
113 if (file_mapping)
266180eb
RM
114 __munmap (file_mapping, mapping_size);
115 mapping_size = (location + size + 1 + _dl_pagesize - 1);
116 mapping_size &= ~(_dl_pagesize - 1);
117 result = __mmap (file_mapping, mapping_size, PROT_READ,
118 MAP_COPY|MAP_FILE, fd, 0);
ea03559a
RM
119 if (result == (void *) -1)
120 lose (errno, "cannot map file data");
121 file_mapping = result;
122 }
123 return file_mapping + location;
124 }
125
266180eb
RM
126 const ElfW(Ehdr) *header;
127 const ElfW(Phdr) *phdr;
128 const ElfW(Phdr) *ph;
b122c703 129 int type;
d66e34cd
RM
130
131 /* Look again to see if the real name matched another already loaded. */
132 for (l = _dl_loaded; l; l = l->l_next)
133 if (! strcmp (realname, l->l_name))
134 {
135 /* The object is already loaded.
136 Just bump its reference count and return it. */
266180eb 137 __close (fd);
ea03559a 138 free (realname);
d66e34cd
RM
139 ++l->l_opencount;
140 return l;
141 }
142
266180eb
RM
143 if (_dl_pagesize == 0)
144 _dl_pagesize = __getpagesize ();
145
d66e34cd
RM
146 /* Map in the first page to read the header. */
147 header = map (0, sizeof *header);
d66e34cd 148
d66e34cd 149 /* Check the header for basic validity. */
c4b72918
RM
150 if (*(Elf32_Word *) &header->e_ident !=
151#if BYTE_ORDER == LITTLE_ENDIAN
152 ((ELFMAG0 << (EI_MAG0 * 8)) |
153 (ELFMAG1 << (EI_MAG1 * 8)) |
154 (ELFMAG2 << (EI_MAG2 * 8)) |
155 (ELFMAG3 << (EI_MAG3 * 8)))
156#else
157 ((ELFMAG0 << (EI_MAG3 * 8)) |
158 (ELFMAG1 << (EI_MAG2 * 8)) |
159 (ELFMAG2 << (EI_MAG1 * 8)) |
160 (ELFMAG3 << (EI_MAG0 * 8)))
161#endif
162 )
d66e34cd 163 LOSE ("invalid ELF header");
266180eb
RM
164#define ELF32_CLASS ELFCLASS32
165#define ELF64_CLASS ELFCLASS64
166 if (header->e_ident[EI_CLASS] != ELFW(CLASS))
167 LOSE ("ELF file class not " STRING(__ELF_WORDSIZE) "-bit");
d66e34cd
RM
168 if (header->e_ident[EI_DATA] != byteorder)
169 LOSE ("ELF file data encoding not " byteorder_name);
170 if (header->e_ident[EI_VERSION] != EV_CURRENT)
171 LOSE ("ELF file version ident not " STRING(EV_CURRENT));
172 if (header->e_version != EV_CURRENT)
173 LOSE ("ELF file version not " STRING(EV_CURRENT));
174 if (! elf_machine_matches_host (header->e_machine))
175 LOSE ("ELF file machine architecture not " ELF_MACHINE_NAME);
266180eb 176 if (header->e_phentsize != sizeof (ElfW(Phdr)))
d66e34cd
RM
177 LOSE ("ELF file's phentsize not the expected size");
178
d66e34cd
RM
179 if (_dl_zerofd == -1)
180 {
181 _dl_zerofd = _dl_sysdep_open_zero_fill ();
182 if (_dl_zerofd == -1)
ba79d61b
RM
183 {
184 __close (fd);
185 _dl_signal_error (errno, NULL, "cannot open zero fill device");
186 }
d66e34cd
RM
187 }
188
ba79d61b
RM
189 /* Enter the new object in the list of loaded objects. */
190 l = _dl_new_object (realname, name, l_type);
191 if (! l)
192 lose (ENOMEM, "cannot create shared object descriptor");
193 l->l_opencount = 1;
194 l->l_loader = loader;
195
b122c703
RM
196 /* Extract the remaining details we need from the ELF header
197 and then map in the program header table. */
198 l->l_entry = header->e_entry;
199 type = header->e_type;
200 l->l_phnum = header->e_phnum;
266180eb 201 phdr = map (header->e_phoff, l->l_phnum * sizeof (ElfW(Phdr)));
879bf2e6 202
b122c703
RM
203 {
204 /* Scan the program header table, collecting its load commands. */
205 struct loadcmd
206 {
266180eb 207 ElfW(Addr) mapstart, mapend, dataend, allocend;
b122c703
RM
208 off_t mapoff;
209 int prot;
210 } loadcmds[l->l_phnum], *c;
211 size_t nloadcmds = 0;
d66e34cd 212
d66e34cd 213 l->l_ld = 0;
b122c703
RM
214 l->l_phdr = 0;
215 l->l_addr = 0;
d66e34cd
RM
216 for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
217 switch (ph->p_type)
218 {
219 /* These entries tell us where to find things once the file's
220 segments are mapped in. We record the addresses it says
221 verbatim, and later correct for the run-time load address. */
222 case PT_DYNAMIC:
223 l->l_ld = (void *) ph->p_vaddr;
224 break;
225 case PT_PHDR:
226 l->l_phdr = (void *) ph->p_vaddr;
227 break;
228
229 case PT_LOAD:
b122c703
RM
230 /* A load command tells us to map in part of the file.
231 We record the load commands and process them all later. */
266180eb 232 if (ph->p_align % _dl_pagesize != 0)
d66e34cd
RM
233 LOSE ("ELF load command alignment not page-aligned");
234 if ((ph->p_vaddr - ph->p_offset) % ph->p_align)
235 LOSE ("ELF load command address/offset not properly aligned");
236 {
b122c703
RM
237 struct loadcmd *c = &loadcmds[nloadcmds++];
238 c->mapstart = ph->p_vaddr & ~(ph->p_align - 1);
266180eb
RM
239 c->mapend = ((ph->p_vaddr + ph->p_filesz + _dl_pagesize - 1)
240 & ~(_dl_pagesize - 1));
b122c703
RM
241 c->dataend = ph->p_vaddr + ph->p_filesz;
242 c->allocend = ph->p_vaddr + ph->p_memsz;
243 c->mapoff = ph->p_offset & ~(ph->p_align - 1);
244 c->prot = 0;
d66e34cd 245 if (ph->p_flags & PF_R)
b122c703 246 c->prot |= PROT_READ;
d66e34cd 247 if (ph->p_flags & PF_W)
b122c703 248 c->prot |= PROT_WRITE;
d66e34cd 249 if (ph->p_flags & PF_X)
b122c703
RM
250 c->prot |= PROT_EXEC;
251 break;
252 }
253 }
d66e34cd 254
b122c703 255 /* We are done reading the file's headers now. Unmap them. */
266180eb 256 __munmap (file_mapping, mapping_size);
b122c703
RM
257
258 /* Now process the load commands and map segments into memory. */
259 c = loadcmds;
260
261 if (type == ET_DYN || type == ET_REL)
262 {
263 /* This is a position-independent shared object. We can let the
264 kernel map it anywhere it likes, but we must have space for all
265 the segments in their specified positions relative to the first.
266 So we map the first segment without MAP_FIXED, but with its
22930c9b
RM
267 extent increased to cover all the segments. Then we remove
268 access from excess portion, and there is known sufficient space
269 there to remap from the later segments. */
b122c703
RM
270 caddr_t mapat;
271 mapat = map_segment (c->mapstart,
272 loadcmds[nloadcmds - 1].allocend - c->mapstart,
273 c->prot, 0, c->mapoff);
266180eb 274 l->l_addr = (ElfW(Addr)) mapat - c->mapstart;
b122c703 275
22930c9b
RM
276 /* Change protection on the excess portion to disallow all access;
277 the portions we do not remap later will be inaccessible as if
278 unallocated. Then jump into the normal segment-mapping loop to
279 handle the portion of the segment past the end of the file
280 mapping. */
266180eb
RM
281 __mprotect (mapat + c->mapend,
282 loadcmds[nloadcmds - 1].allocend - c->mapend,
283 0);
b122c703
RM
284 goto postmap;
285 }
286
287 while (c < &loadcmds[nloadcmds])
288 {
289 if (c->mapend > c->mapstart)
290 /* Map the segment contents from the file. */
291 map_segment (l->l_addr + c->mapstart, c->mapend - c->mapstart,
292 c->prot, MAP_FIXED, c->mapoff);
293
294 postmap:
295 if (c->allocend > c->dataend)
296 {
297 /* Extra zero pages should appear at the end of this segment,
298 after the data mapped from the file. */
266180eb 299 ElfW(Addr) zero, zeroend, zeropage;
b122c703
RM
300
301 zero = l->l_addr + c->dataend;
302 zeroend = l->l_addr + c->allocend;
266180eb 303 zeropage = (zero + _dl_pagesize - 1) & ~(_dl_pagesize - 1);
d66e34cd 304
b122c703
RM
305 if (zeroend < zeropage)
306 /* All the extra data is in the last page of the segment.
307 We can just zero it. */
308 zeropage = zeroend;
309
310 if (zeropage > zero)
d66e34cd 311 {
b122c703
RM
312 /* Zero the final part of the last page of the segment. */
313 if ((c->prot & PROT_WRITE) == 0)
d66e34cd 314 {
b122c703 315 /* Dag nab it. */
266180eb
RM
316 if (__mprotect ((caddr_t) (zero & ~(_dl_pagesize - 1)),
317 _dl_pagesize, c->prot|PROT_WRITE) < 0)
b122c703 318 lose (errno, "cannot change memory protections");
d66e34cd 319 }
b122c703
RM
320 memset ((void *) zero, 0, zeropage - zero);
321 if ((c->prot & PROT_WRITE) == 0)
266180eb
RM
322 __mprotect ((caddr_t) (zero & ~(_dl_pagesize - 1)),
323 _dl_pagesize, c->prot);
b122c703 324 }
d66e34cd 325
b122c703
RM
326 if (zeroend > zeropage)
327 {
328 /* Map the remaining zero pages in from the zero fill FD. */
329 caddr_t mapat;
266180eb
RM
330 mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
331 c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
332 _dl_zerofd, 0);
b122c703 333 if (mapat == (caddr_t) -1)
9b8a44cd 334 lose (errno, "cannot map zero-fill pages");
d66e34cd
RM
335 }
336 }
d66e34cd 337
b122c703 338 ++c;
879bf2e6 339 }
b122c703 340 }
d66e34cd 341
6d9756c9
RM
342 /* We are done mapping in the file. We no longer need the descriptor. */
343 __close (fd);
344
ba79d61b
RM
345 if (l->l_type == lt_library && type == ET_EXEC)
346 l->l_type = lt_executable;
9b8a44cd 347
b122c703
RM
348 if (l->l_ld == 0)
349 {
350 if (type == ET_DYN)
351 LOSE ("object file has no dynamic section");
352 }
353 else
266180eb 354 (ElfW(Addr)) l->l_ld += l->l_addr;
879bf2e6 355
b122c703 356 if (l->l_phdr == 0)
266180eb
RM
357 l->l_phdr = (void *) ((const ElfW(Ehdr) *) l->l_addr)->e_phoff;
358 (ElfW(Addr)) l->l_phdr += l->l_addr;
d66e34cd 359
463e148b
RM
360 l->l_entry += l->l_addr;
361
d66e34cd
RM
362 elf_get_dynamic_info (l->l_ld, l->l_info);
363 if (l->l_info[DT_HASH])
364 _dl_setup_hash (l);
365
366 return l;
367}
ba79d61b
RM
368\f
369/* Try to open NAME in one of the directories in DIRPATH.
370 Return the fd, or -1. If successful, fill in *REALNAME
371 with the malloc'd full directory name. */
372
373static int
374open_path (const char *name, size_t namelen,
375 const char *dirpath,
376 char **realname)
377{
378 char *buf;
379 const char *p;
380 int fd;
381
382 p = dirpath;
383 if (p == NULL || *p == '\0')
384 {
385 errno = ENOENT;
386 return -1;
387 }
388
389 buf = __alloca (strlen (dirpath) + 1 + namelen);
390 do
391 {
392 size_t buflen;
393
394 dirpath = p;
395 p = strpbrk (dirpath, ":;");
396 if (p == NULL)
397 p = strchr (dirpath, '\0');
398
399 if (p == dirpath)
400 {
401 /* Two adjacent colons, or a colon at the beginning or the end of
402 the path means to search the current directory. */
403 (void) memcpy (buf, name, namelen);
404 buflen = namelen;
405 }
406 else
407 {
408 /* Construct the pathname to try. */
409 (void) memcpy (buf, dirpath, p - dirpath);
410 buf[p - dirpath] = '/';
411 (void) memcpy (&buf[(p - dirpath) + 1], name, namelen);
412 buflen = p - dirpath + 1 + namelen;
413 }
414
415 fd = __open (buf, O_RDONLY);
416 if (fd != -1)
417 {
418 *realname = malloc (buflen);
419 if (*realname)
420 {
421 memcpy (*realname, buf, buflen);
422 return fd;
423 }
424 else
425 {
426 /* No memory for the name, we certainly won't be able
427 to load and link it. */
428 __close (fd);
429 return -1;
430 }
431 }
432 if (errno != ENOENT && errno != EACCES)
433 /* The file exists and is readable, but something went wrong. */
434 return -1;
435 }
436 while (*p++ != '\0');
437
438 return -1;
439}
440
441/* Map in the shared object file NAME. */
442
443struct link_map *
444_dl_map_object (struct link_map *loader, const char *name, int type)
445{
446 int fd;
447 char *realname;
448 struct link_map *l;
449
450 /* Look for this name among those already loaded. */
451 for (l = _dl_loaded; l; l = l->l_next)
452 if (! strcmp (name, l->l_libname))
453 {
454 /* The object is already loaded.
455 Just bump its reference count and return it. */
456 ++l->l_opencount;
457 return l;
458 }
459
460 if (strchr (name, '/') == NULL)
461 {
462 /* Search for NAME in several places. */
463
464 size_t namelen = strlen (name) + 1;
465
466 inline void trypath (const char *dirpath)
467 {
468 fd = open_path (name, namelen, dirpath, &realname);
469 }
470
471 fd = -1;
472 for (l = loader; l; l = l->l_loader)
473 if (l && l->l_info[DT_RPATH])
474 trypath ((const char *) (l->l_addr +
475 l->l_info[DT_STRTAB]->d_un.d_ptr +
476 l->l_info[DT_RPATH]->d_un.d_val));
477 if (fd == -1 && ! _dl_secure)
478 trypath (getenv ("LD_LIBRARY_PATH"));
479 if (fd == -1)
480 {
481 extern const char *_dl_rpath; /* Set in rtld.c. */
482 trypath (_dl_rpath);
483 }
484 }
485 else
486 {
487 fd = __open (name, O_RDONLY);
488 if (fd != -1)
489 {
490 size_t len = strlen (name) + 1;
491 realname = malloc (len);
492 if (realname)
493 memcpy (realname, name, len);
494 else
495 {
496 __close (fd);
497 fd = -1;
498 }
499 }
500 }
501
502 if (fd == -1)
503 _dl_signal_error (errno, name, "cannot open shared object file");
504
505 return _dl_map_object_from_fd (name, fd, realname, loader, type);
506}