]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/x86_64/dl-machine.h
Update.
[thirdparty/glibc.git] / sysdeps / x86_64 / dl-machine.h
1 /* Machine-dependent ELF dynamic relocation inline functions. x86-64 version.
2 Copyright (C) 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Andreas Jaeger <aj@suse.de>.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, write to the Free
18 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307 USA. */
20
21 #ifndef dl_machine_h
22 #define dl_machine_h
23
24 #define ELF_MACHINE_NAME "x86_64"
25
26 #include <sys/param.h>
27 #include <sysdep.h>
28 #include <tls.h>
29
30 /* Return nonzero iff ELF header is compatible with the running host. */
31 static inline int __attribute__ ((unused))
32 elf_machine_matches_host (const Elf64_Ehdr *ehdr)
33 {
34 return ehdr->e_machine == EM_X86_64;
35 }
36
37
38 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
39 first element of the GOT. This must be inlined in a function which
40 uses global data. */
41 static inline Elf64_Addr __attribute__ ((unused))
42 elf_machine_dynamic (void)
43 {
44 Elf64_Addr addr;
45
46 /* This works because we have our GOT address available in the small PIC
47 model. */
48 addr = (Elf64_Addr) &_DYNAMIC;
49
50 return addr;
51 }
52
53
54 /* Return the run-time load address of the shared object. */
55 static inline Elf64_Addr __attribute__ ((unused))
56 elf_machine_load_address (void)
57 {
58 register Elf64_Addr addr, tmp;
59
60 /* The easy way is just the same as on x86:
61 leaq _dl_start, %0
62 leaq _dl_start(%%rip), %1
63 subq %0, %1
64 but this does not work with binutils since we then have
65 a R_X86_64_32S relocation in a shared lib.
66
67 Instead we store the address of _dl_start in the data section
68 and compare it with the current value that we can get via
69 an RIP relative addressing mode. */
70
71 asm ("movq 1f(%%rip), %1\n"
72 "0:\tleaq _dl_start(%%rip), %0\n\t"
73 "subq %1, %0\n\t"
74 ".section\t.data\n"
75 "1:\t.quad _dl_start\n\t"
76 ".previous\n\t"
77 : "=r" (addr), "=r" (tmp) : : "cc");
78
79 return addr;
80 }
81
82 /* Set up the loaded object described by L so its unrelocated PLT
83 entries will jump to the on-demand fixup code in dl-runtime.c. */
84
85 static inline int __attribute__ ((unused, always_inline))
86 elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
87 {
88 Elf64_Addr *got;
89 extern void _dl_runtime_resolve (Elf64_Word) attribute_hidden;
90 extern void _dl_runtime_profile (Elf64_Word) attribute_hidden;
91
92 if (l->l_info[DT_JMPREL] && lazy)
93 {
94 /* The GOT entries for functions in the PLT have not yet been filled
95 in. Their initial contents will arrange when called to push an
96 offset into the .rel.plt section, push _GLOBAL_OFFSET_TABLE_[1],
97 and then jump to _GLOBAL_OFFSET_TABLE[2]. */
98 got = (Elf64_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
99 /* If a library is prelinked but we have to relocate anyway,
100 we have to be able to undo the prelinking of .got.plt.
101 The prelinker saved us here address of .plt + 0x16. */
102 if (got[1])
103 {
104 l->l_mach.plt = got[1] + l->l_addr;
105 l->l_mach.gotplt = (Elf64_Addr) &got[3];
106 }
107 got[1] = (Elf64_Addr) l; /* Identify this shared object. */
108
109 /* The got[2] entry contains the address of a function which gets
110 called to get the address of a so far unresolved function and
111 jump to it. The profiling extension of the dynamic linker allows
112 to intercept the calls to collect information. In this case we
113 don't store the address in the GOT so that all future calls also
114 end in this function. */
115 if (__builtin_expect (profile, 0))
116 {
117 got[2] = (Elf64_Addr) &_dl_runtime_profile;
118
119 if (GLRO(dl_profile) != NULL
120 && _dl_name_match_p (GLRO(dl_profile), l))
121 /* This is the object we are looking for. Say that we really
122 want profiling and the timers are started. */
123 GL(dl_profile_map) = l;
124 }
125 else
126 /* This function will get called to fix up the GOT entry indicated by
127 the offset on the stack, and then jump to the resolved address. */
128 got[2] = (Elf64_Addr) &_dl_runtime_resolve;
129 }
130
131 return lazy;
132 }
133
134 /* Initial entry point code for the dynamic linker.
135 The C function `_dl_start' is the real entry point;
136 its return value is the user program's entry point. */
137 #define RTLD_START asm ("\n\
138 .text\n\
139 .align 16\n\
140 .globl _start\n\
141 .globl _dl_start_user\n\
142 _start:\n\
143 movq %rsp, %rdi\n\
144 call _dl_start\n\
145 _dl_start_user:\n\
146 # Save the user entry point address in %r12.\n\
147 movq %rax, %r12\n\
148 # See if we were run as a command with the executable file\n\
149 # name as an extra leading argument.\n\
150 movl _dl_skip_args(%rip), %eax\n\
151 # Pop the original argument count.\n\
152 popq %rdx\n\
153 # Adjust the stack pointer to skip _dl_skip_args words.\n\
154 leaq (%rsp,%rax,8), %rsp\n\
155 # Subtract _dl_skip_args from argc.\n\
156 subl %eax, %edx\n\
157 # Push argc back on the stack.\n\
158 pushq %rdx\n\
159 # Call _dl_init (struct link_map *main_map, int argc, char **argv, char **env)\n\
160 # argc -> rsi\n\
161 movq %rdx, %rsi\n\
162 # Save %rsp value in %r13.\n\
163 movq %rsp, %r13\n\
164 # And align stack for the _dl_init_internal call. \n\
165 andq $-16, %rsp\n\
166 # _dl_loaded -> rdi\n\
167 movq _rtld_local(%rip), %rdi\n\
168 # env -> rcx\n\
169 leaq 16(%r13,%rdx,8), %rcx\n\
170 # argv -> rdx\n\
171 leaq 8(%r13), %rdx\n\
172 # Clear %rbp to mark outermost frame obviously even for constructors.\n\
173 xorq %rbp, %rbp\n\
174 # Call the function to run the initializers.\n\
175 call _dl_init_internal@PLT\n\
176 # Pass our finalizer function to the user in %rdx, as per ELF ABI.\n\
177 leaq _dl_fini(%rip), %rdx\n\
178 # And make sure %rsp points to argc stored on the stack.\n\
179 movq %r13, %rsp\n\
180 # Jump to the user's entry point.\n\
181 jmp *%r12\n\
182 .previous\n\
183 ");
184
185 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or
186 TLS variable, so undefined references should not be allowed to
187 define the value.
188 ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one
189 of the main executable's symbols, as for a COPY reloc. */
190 #if defined USE_TLS && (!defined RTLD_BOOTSTRAP || USE___THREAD)
191 # define elf_machine_type_class(type) \
192 ((((type) == R_X86_64_JUMP_SLOT \
193 || (type) == R_X86_64_DTPMOD64 \
194 || (type) == R_X86_64_DTPOFF64 || (type) == R_X86_64_TPOFF64) \
195 * ELF_RTYPE_CLASS_PLT) \
196 | (((type) == R_X86_64_COPY) * ELF_RTYPE_CLASS_COPY))
197 #else
198 # define elf_machine_type_class(type) \
199 ((((type) == R_X86_64_JUMP_SLOT) * ELF_RTYPE_CLASS_PLT) \
200 | (((type) == R_X86_64_COPY) * ELF_RTYPE_CLASS_COPY))
201 #endif
202
203 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
204 #define ELF_MACHINE_JMP_SLOT R_X86_64_JUMP_SLOT
205
206 /* The x86-64 never uses Elf64_Rel relocations. */
207 #define ELF_MACHINE_NO_REL 1
208
209 /* We define an initialization functions. This is called very early in
210 _dl_sysdep_start. */
211 #define DL_PLATFORM_INIT dl_platform_init ()
212
213 static inline void __attribute__ ((unused))
214 dl_platform_init (void)
215 {
216 if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0')
217 /* Avoid an empty string which would disturb us. */
218 GLRO(dl_platform) = NULL;
219 }
220
221 static inline Elf64_Addr
222 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
223 const Elf64_Rela *reloc,
224 Elf64_Addr *reloc_addr, Elf64_Addr value)
225 {
226 return *reloc_addr = value;
227 }
228
229 /* Return the final value of a plt relocation. On x86-64 the
230 JUMP_SLOT relocation ignores the addend. */
231 static inline Elf64_Addr
232 elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
233 Elf64_Addr value)
234 {
235 return value;
236 }
237
238
239 /* Names of the architecture-specific auditing callback functions. */
240 #define ARCH_LA_PLTENTER x86_64_gnu_pltenter
241 #define ARCH_LA_PLTEXIT x86_64_gnu_pltexit
242
243 #endif /* !dl_machine_h */
244
245 #ifdef RESOLVE_MAP
246
247 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
248 MAP is the object containing the reloc. */
249
250 auto inline void
251 __attribute__ ((always_inline))
252 elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
253 const Elf64_Sym *sym, const struct r_found_version *version,
254 void *const reloc_addr_arg)
255 {
256 Elf64_Addr *const reloc_addr = reloc_addr_arg;
257 const unsigned long int r_type = ELF64_R_TYPE (reloc->r_info);
258
259 #if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
260 if (__builtin_expect (r_type == R_X86_64_RELATIVE, 0))
261 {
262 # if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
263 /* This is defined in rtld.c, but nowhere in the static libc.a;
264 make the reference weak so static programs can still link.
265 This declaration cannot be done when compiling rtld.c
266 (i.e. #ifdef RTLD_BOOTSTRAP) because rtld.c contains the
267 common defn for _dl_rtld_map, which is incompatible with a
268 weak decl in the same file. */
269 # ifndef SHARED
270 weak_extern (GL(dl_rtld_map));
271 # endif
272 if (map != &GL(dl_rtld_map)) /* Already done in rtld itself. */
273 # endif
274 *reloc_addr = map->l_addr + reloc->r_addend;
275 }
276 else
277 #endif
278 if (__builtin_expect (r_type == R_X86_64_NONE, 0))
279 return;
280 else
281 {
282 #ifndef RTLD_BOOTSTRAP
283 const Elf64_Sym *const refsym = sym;
284 #endif
285 #ifndef RTLD_BOOTSTRAP
286 struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type);
287 Elf64_Addr value = (sym == NULL ? 0
288 : (Elf64_Addr) sym_map->l_addr + sym->st_value);
289 #else
290 Elf64_Addr value = RESOLVE (&sym, version, r_type);
291
292 # ifndef RTLD_BOOTSTRAP
293 if (sym != NULL)
294 # endif
295 value += sym->st_value;
296 #endif
297
298 #if defined RTLD_BOOTSTRAP && !USE___THREAD
299 assert (r_type == R_X86_64_GLOB_DAT || r_type == R_X86_64_JUMP_SLOT);
300 *reloc_addr = value + reloc->r_addend;
301 #else
302 switch (r_type)
303 {
304 case R_X86_64_GLOB_DAT:
305 case R_X86_64_JUMP_SLOT:
306 *reloc_addr = value + reloc->r_addend;
307 break;
308
309 #if defined USE_TLS && !defined RESOLVE_CONFLICT_FIND_MAP
310 case R_X86_64_DTPMOD64:
311 # ifdef RTLD_BOOTSTRAP
312 /* During startup the dynamic linker is always the module
313 with index 1.
314 XXX If this relocation is necessary move before RESOLVE
315 call. */
316 *reloc_addr = 1;
317 # else
318 /* Get the information from the link map returned by the
319 resolve function. */
320 if (sym_map != NULL)
321 *reloc_addr = sym_map->l_tls_modid;
322 # endif
323 break;
324 case R_X86_64_DTPOFF64:
325 # ifndef RTLD_BOOTSTRAP
326 /* During relocation all TLS symbols are defined and used.
327 Therefore the offset is already correct. */
328 if (sym != NULL)
329 *reloc_addr = sym->st_value + reloc->r_addend;
330 # endif
331 break;
332 case R_X86_64_TPOFF64:
333 /* The offset is negative, forward from the thread pointer. */
334 # ifndef RTLD_BOOTSTRAP
335 if (sym != NULL)
336 # endif
337 {
338 # ifndef RTLD_BOOTSTRAP
339 CHECK_STATIC_TLS (map, sym_map);
340 # endif
341 /* We know the offset of the object the symbol is contained in.
342 It is a negative value which will be added to the
343 thread pointer. */
344 *reloc_addr = (sym->st_value + reloc->r_addend
345 - sym_map->l_tls_offset);
346 }
347 break;
348 #endif /* use TLS */
349
350 #ifndef RTLD_BOOTSTRAP
351 case R_X86_64_64:
352 *reloc_addr = value + reloc->r_addend;
353 break;
354 case R_X86_64_32:
355 *(unsigned int *) reloc_addr = value + reloc->r_addend;
356 if (value + reloc->r_addend > UINT_MAX)
357 {
358 const char *strtab;
359
360 strtab = (const char *) D_PTR (map, l_info[DT_STRTAB]);
361
362 _dl_error_printf ("\
363 %s: Symbol `%s' causes overflow in R_X86_64_32 relocation\n",
364 rtld_progname ?: "<program name unknown>",
365 strtab + refsym->st_name);
366 }
367 break;
368 # ifndef RESOLVE_CONFLICT_FIND_MAP
369 /* Not needed for dl-conflict.c. */
370 case R_X86_64_PC32:
371 *(unsigned int *) reloc_addr = value + reloc->r_addend
372 - (Elf64_Addr) reloc_addr;
373 if (value + reloc->r_addend - (Elf64_Addr) reloc_addr
374 != (int)(value + reloc->r_addend - (Elf64_Addr) reloc_addr))
375 {
376 const char *strtab;
377
378 strtab = (const char *) D_PTR (map, l_info[DT_STRTAB]);
379
380 _dl_error_printf ("\
381 %s: Symbol `%s' causes overflow in R_X86_64_PC32 relocation\n",
382 rtld_progname ?: "<program name unknown>",
383 strtab + refsym->st_name);
384 }
385 break;
386 case R_X86_64_COPY:
387 if (sym == NULL)
388 /* This can happen in trace mode if an object could not be
389 found. */
390 break;
391 if (__builtin_expect (sym->st_size > refsym->st_size, 0)
392 || (__builtin_expect (sym->st_size < refsym->st_size, 0)
393 && GLRO(dl_verbose)))
394 {
395 const char *strtab;
396
397 strtab = (const char *) D_PTR (map, l_info[DT_STRTAB]);
398 _dl_error_printf ("\
399 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
400 rtld_progname ?: "<program name unknown>",
401 strtab + refsym->st_name);
402 }
403 memcpy (reloc_addr_arg, (void *) value,
404 MIN (sym->st_size, refsym->st_size));
405 break;
406 # endif
407 default:
408 _dl_reloc_bad_type (map, r_type, 0);
409 break;
410 #endif
411 }
412 #endif
413 }
414 }
415
416 auto inline void
417 __attribute ((always_inline))
418 elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
419 void *const reloc_addr_arg)
420 {
421 Elf64_Addr *const reloc_addr = reloc_addr_arg;
422 assert (ELF64_R_TYPE (reloc->r_info) == R_X86_64_RELATIVE);
423 *reloc_addr = l_addr + reloc->r_addend;
424 }
425
426 auto inline void
427 __attribute ((always_inline))
428 elf_machine_lazy_rel (struct link_map *map,
429 Elf64_Addr l_addr, const Elf64_Rela *reloc)
430 {
431 Elf64_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
432 const unsigned long int r_type = ELF64_R_TYPE (reloc->r_info);
433
434 /* Check for unexpected PLT reloc type. */
435 if (__builtin_expect (r_type == R_X86_64_JUMP_SLOT, 1))
436 {
437 if (__builtin_expect (map->l_mach.plt, 0) == 0)
438 *reloc_addr += l_addr;
439 else
440 *reloc_addr =
441 map->l_mach.plt
442 + (((Elf64_Addr) reloc_addr) - map->l_mach.gotplt) * 2;
443 }
444 else
445 _dl_reloc_bad_type (map, r_type, 1);
446 }
447
448 #endif /* RESOLVE_MAP */