]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/x86_64/dl-machine.h
Fix Wundef warning for ELF_MACHINE_NO_RELA
[thirdparty/glibc.git] / sysdeps / x86_64 / dl-machine.h
CommitLineData
c9cf6dde 1/* Machine-dependent ELF dynamic relocation inline functions. x86-64 version.
d4697bc9 2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
c9cf6dde
AJ
3 This file is part of the GNU C Library.
4 Contributed by Andreas Jaeger <aj@suse.de>.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
c9cf6dde
AJ
19
20#ifndef dl_machine_h
21#define dl_machine_h
22
23#define ELF_MACHINE_NAME "x86_64"
24
25#include <sys/param.h>
2b1c0eea 26#include <sysdep.h>
b177ed2b 27#include <tls.h>
c9ff0187 28#include <dl-tlsdesc.h>
c9cf6dde
AJ
29
30/* Return nonzero iff ELF header is compatible with the running host. */
31static inline int __attribute__ ((unused))
4b30f61a 32elf_machine_matches_host (const ElfW(Ehdr) *ehdr)
c9cf6dde
AJ
33{
34 return ehdr->e_machine == EM_X86_64;
35}
36
37
38/* Return the link-time address of _DYNAMIC. Conveniently, this is the
39 first element of the GOT. This must be inlined in a function which
40 uses global data. */
4b30f61a 41static inline ElfW(Addr) __attribute__ ((unused))
c9cf6dde
AJ
42elf_machine_dynamic (void)
43{
5f30cfec
L
44 /* This produces an IP-relative reloc which is resolved at link time. */
45 extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
46 return _GLOBAL_OFFSET_TABLE_[0];
c9cf6dde
AJ
47}
48
49
50/* Return the run-time load address of the shared object. */
4b30f61a 51static inline ElfW(Addr) __attribute__ ((unused))
c9cf6dde
AJ
52elf_machine_load_address (void)
53{
5f30cfec
L
54 /* Compute the difference between the runtime address of _DYNAMIC as seen
55 by an IP-relative reference, and the link-time address found in the
56 special unrelocated first GOT entry. */
57 extern ElfW(Dyn) _DYNAMIC[] attribute_hidden;
58 return (ElfW(Addr)) &_DYNAMIC - elf_machine_dynamic ();
c9cf6dde
AJ
59}
60
61/* Set up the loaded object described by L so its unrelocated PLT
62 entries will jump to the on-demand fixup code in dl-runtime.c. */
63
50441a98 64static inline int __attribute__ ((unused, always_inline))
c9cf6dde
AJ
65elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
66{
67 Elf64_Addr *got;
4b30f61a
L
68 extern void _dl_runtime_resolve (ElfW(Word)) attribute_hidden;
69 extern void _dl_runtime_profile (ElfW(Word)) attribute_hidden;
c9cf6dde
AJ
70
71 if (l->l_info[DT_JMPREL] && lazy)
72 {
73 /* The GOT entries for functions in the PLT have not yet been filled
74 in. Their initial contents will arrange when called to push an
75 offset into the .rel.plt section, push _GLOBAL_OFFSET_TABLE_[1],
530a3249 76 and then jump to _GLOBAL_OFFSET_TABLE_[2]. */
c9cf6dde 77 got = (Elf64_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
32e6df36
UD
78 /* If a library is prelinked but we have to relocate anyway,
79 we have to be able to undo the prelinking of .got.plt.
80 The prelinker saved us here address of .plt + 0x16. */
81 if (got[1])
82 {
83 l->l_mach.plt = got[1] + l->l_addr;
4b30f61a 84 l->l_mach.gotplt = (ElfW(Addr)) &got[3];
32e6df36 85 }
4b30f61a
L
86 /* Identify this shared object. */
87 *(ElfW(Addr) *) (got + 1) = (ElfW(Addr)) l;
c9cf6dde
AJ
88
89 /* The got[2] entry contains the address of a function which gets
90 called to get the address of a so far unresolved function and
91 jump to it. The profiling extension of the dynamic linker allows
92 to intercept the calls to collect information. In this case we
93 don't store the address in the GOT so that all future calls also
94 end in this function. */
a1ffb40e 95 if (__glibc_unlikely (profile))
c9cf6dde 96 {
4b30f61a 97 *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_profile;
c9cf6dde 98
9dcafc55
UD
99 if (GLRO(dl_profile) != NULL
100 && _dl_name_match_p (GLRO(dl_profile), l))
c9cf6dde
AJ
101 /* This is the object we are looking for. Say that we really
102 want profiling and the timers are started. */
5688da55 103 GL(dl_profile_map) = l;
c9cf6dde
AJ
104 }
105 else
106 /* This function will get called to fix up the GOT entry indicated by
107 the offset on the stack, and then jump to the resolved address. */
4b30f61a 108 *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_resolve;
c9cf6dde
AJ
109 }
110
c9ff0187 111 if (l->l_info[ADDRIDX (DT_TLSDESC_GOT)] && lazy)
4b30f61a
L
112 *(ElfW(Addr)*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + l->l_addr)
113 = (ElfW(Addr)) &_dl_tlsdesc_resolve_rela;
c9ff0187 114
c9cf6dde
AJ
115 return lazy;
116}
117
c9cf6dde
AJ
118/* Initial entry point code for the dynamic linker.
119 The C function `_dl_start' is the real entry point;
120 its return value is the user program's entry point. */
121#define RTLD_START asm ("\n\
122.text\n\
123 .align 16\n\
124.globl _start\n\
125.globl _dl_start_user\n\
126_start:\n\
127 movq %rsp, %rdi\n\
128 call _dl_start\n\
129_dl_start_user:\n\
130 # Save the user entry point address in %r12.\n\
131 movq %rax, %r12\n\
c9cf6dde
AJ
132 # See if we were run as a command with the executable file\n\
133 # name as an extra leading argument.\n\
217ed70e 134 movl _dl_skip_args(%rip), %eax\n\
c9cf6dde
AJ
135 # Pop the original argument count.\n\
136 popq %rdx\n\
137 # Adjust the stack pointer to skip _dl_skip_args words.\n\
138 leaq (%rsp,%rax,8), %rsp\n\
139 # Subtract _dl_skip_args from argc.\n\
140 subl %eax, %edx\n\
141 # Push argc back on the stack.\n\
142 pushq %rdx\n\
143 # Call _dl_init (struct link_map *main_map, int argc, char **argv, char **env)\n\
144 # argc -> rsi\n\
145 movq %rdx, %rsi\n\
be184b1d
UD
146 # Save %rsp value in %r13.\n\
147 movq %rsp, %r13\n\
148 # And align stack for the _dl_init_internal call. \n\
149 andq $-16, %rsp\n\
c9cf6dde 150 # _dl_loaded -> rdi\n\
217ed70e 151 movq _rtld_local(%rip), %rdi\n\
c9cf6dde 152 # env -> rcx\n\
be184b1d 153 leaq 16(%r13,%rdx,8), %rcx\n\
c9cf6dde 154 # argv -> rdx\n\
be184b1d
UD
155 leaq 8(%r13), %rdx\n\
156 # Clear %rbp to mark outermost frame obviously even for constructors.\n\
ee618985 157 xorl %ebp, %ebp\n\
c9cf6dde 158 # Call the function to run the initializers.\n\
7969407a 159 call _dl_init_internal@PLT\n\
c9cf6dde 160 # Pass our finalizer function to the user in %rdx, as per ELF ABI.\n\
217ed70e 161 leaq _dl_fini(%rip), %rdx\n\
be184b1d
UD
162 # And make sure %rsp points to argc stored on the stack.\n\
163 movq %r13, %rsp\n\
c9cf6dde
AJ
164 # Jump to the user's entry point.\n\
165 jmp *%r12\n\
166.previous\n\
167");
168
8323008c
RM
169/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or
170 TLS variable, so undefined references should not be allowed to
171 define the value.
c9cf6dde
AJ
172 ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one
173 of the main executable's symbols, as for a COPY reloc. */
d063d164 174#define elf_machine_type_class(type) \
82c02215
RM
175 ((((type) == R_X86_64_JUMP_SLOT \
176 || (type) == R_X86_64_DTPMOD64 \
c9ff0187
UD
177 || (type) == R_X86_64_DTPOFF64 \
178 || (type) == R_X86_64_TPOFF64 \
179 || (type) == R_X86_64_TLSDESC) \
82c02215 180 * ELF_RTYPE_CLASS_PLT) \
c9cf6dde
AJ
181 | (((type) == R_X86_64_COPY) * ELF_RTYPE_CLASS_COPY))
182
183/* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
184#define ELF_MACHINE_JMP_SLOT R_X86_64_JUMP_SLOT
185
ad0f5cad
UD
186/* The relative ifunc relocation. */
187// XXX This is a work-around for a broken linker. Remove!
188#define ELF_MACHINE_IRELATIVE R_X86_64_IRELATIVE
189
4b30f61a 190/* The x86-64 never uses Elf64_Rel/Elf32_Rel relocations. */
c9cf6dde 191#define ELF_MACHINE_NO_REL 1
4cf5b6d0 192#define ELF_MACHINE_NO_RELA 0
c9cf6dde 193
530a3249 194/* We define an initialization function. This is called very early in
c9cf6dde
AJ
195 _dl_sysdep_start. */
196#define DL_PLATFORM_INIT dl_platform_init ()
197
c9cf6dde
AJ
198static inline void __attribute__ ((unused))
199dl_platform_init (void)
200{
afdca0f2 201 if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0')
c9cf6dde 202 /* Avoid an empty string which would disturb us. */
afdca0f2 203 GLRO(dl_platform) = NULL;
c9cf6dde
AJ
204}
205
4b30f61a 206static inline ElfW(Addr)
c9cf6dde 207elf_machine_fixup_plt (struct link_map *map, lookup_t t,
4b30f61a
L
208 const ElfW(Rela) *reloc,
209 ElfW(Addr) *reloc_addr, ElfW(Addr) value)
c9cf6dde
AJ
210{
211 return *reloc_addr = value;
212}
213
530a3249
MP
214/* Return the final value of a PLT relocation. On x86-64 the
215 JUMP_SLOT relocation ignores the addend. */
4b30f61a
L
216static inline ElfW(Addr)
217elf_machine_plt_value (struct link_map *map, const ElfW(Rela) *reloc,
218 ElfW(Addr) value)
c9cf6dde
AJ
219{
220 return value;
221}
222
9dcafc55
UD
223
224/* Names of the architecture-specific auditing callback functions. */
225#define ARCH_LA_PLTENTER x86_64_gnu_pltenter
226#define ARCH_LA_PLTEXIT x86_64_gnu_pltexit
227
c9cf6dde
AJ
228#endif /* !dl_machine_h */
229
9dcafc55 230#ifdef RESOLVE_MAP
c9cf6dde
AJ
231
232/* Perform the relocation specified by RELOC and SYM (which is fully resolved).
233 MAP is the object containing the reloc. */
234
7090d3ca
AJ
235auto inline void
236__attribute__ ((always_inline))
4b30f61a
L
237elf_machine_rela (struct link_map *map, const ElfW(Rela) *reloc,
238 const ElfW(Sym) *sym, const struct r_found_version *version,
3a62d00d 239 void *const reloc_addr_arg, int skip_ifunc)
c9cf6dde 240{
4b30f61a
L
241 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
242 const unsigned long int r_type = ELFW(R_TYPE) (reloc->r_info);
c9cf6dde 243
e7f110cd 244# if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
a1ffb40e 245 if (__glibc_unlikely (r_type == R_X86_64_RELATIVE))
c9cf6dde 246 {
e7f110cd 247# if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
c9cf6dde
AJ
248 /* This is defined in rtld.c, but nowhere in the static libc.a;
249 make the reference weak so static programs can still link.
250 This declaration cannot be done when compiling rtld.c
251 (i.e. #ifdef RTLD_BOOTSTRAP) because rtld.c contains the
252 common defn for _dl_rtld_map, which is incompatible with a
253 weak decl in the same file. */
e7f110cd 254# ifndef SHARED
5688da55 255 weak_extern (GL(dl_rtld_map));
e7f110cd 256# endif
5688da55 257 if (map != &GL(dl_rtld_map)) /* Already done in rtld itself. */
e7f110cd 258# endif
c9cf6dde
AJ
259 *reloc_addr = map->l_addr + reloc->r_addend;
260 }
261 else
df8a552f
L
262# endif
263# if !defined RTLD_BOOTSTRAP
264 /* l_addr + r_addend may be > 0xffffffff and R_X86_64_RELATIVE64
265 relocation updates the whole 64-bit entry. */
a1ffb40e 266 if (__glibc_unlikely (r_type == R_X86_64_RELATIVE64))
df8a552f
L
267 *(Elf64_Addr *) reloc_addr = (Elf64_Addr) map->l_addr + reloc->r_addend;
268 else
e7f110cd 269# endif
a1ffb40e 270 if (__glibc_unlikely (r_type == R_X86_64_NONE))
c9cf6dde
AJ
271 return;
272 else
273 {
e7f110cd 274# ifndef RTLD_BOOTSTRAP
4b30f61a 275 const ElfW(Sym) *const refsym = sym;
e7f110cd 276# endif
8323008c 277 struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type);
4b30f61a
L
278 ElfW(Addr) value = (sym == NULL ? 0
279 : (ElfW(Addr)) sym_map->l_addr + sym->st_value);
c9cf6dde 280
e7f110cd
UD
281 if (sym != NULL
282 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC,
fd96f062 283 0)
3a62d00d
AS
284 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
285 && __builtin_expect (!skip_ifunc, 1))
4b30f61a 286 value = ((ElfW(Addr) (*) (void)) value) ();
e7f110cd 287
c9cf6dde
AJ
288 switch (r_type)
289 {
22676eaf
L
290# ifndef RTLD_BOOTSTRAP
291# ifdef __ILP32__
292 case R_X86_64_SIZE64:
293 /* Set to symbol size plus addend. */
294 *(Elf64_Addr *) (uintptr_t) reloc_addr
295 = (Elf64_Addr) sym->st_size + reloc->r_addend;
296 break;
297
298 case R_X86_64_SIZE32:
299# else
300 case R_X86_64_SIZE64:
301# endif
302 /* Set to symbol size plus addend. */
303 value = sym->st_size;
304# endif
c9cf6dde
AJ
305 case R_X86_64_GLOB_DAT:
306 case R_X86_64_JUMP_SLOT:
307 *reloc_addr = value + reloc->r_addend;
308 break;
8323008c 309
e7f110cd 310# ifndef RESOLVE_CONFLICT_FIND_MAP
8323008c 311 case R_X86_64_DTPMOD64:
e7f110cd 312# ifdef RTLD_BOOTSTRAP
8323008c
RM
313 /* During startup the dynamic linker is always the module
314 with index 1.
315 XXX If this relocation is necessary move before RESOLVE
316 call. */
317 *reloc_addr = 1;
e7f110cd 318# else
8323008c
RM
319 /* Get the information from the link map returned by the
320 resolve function. */
321 if (sym_map != NULL)
322 *reloc_addr = sym_map->l_tls_modid;
e7f110cd 323# endif
8323008c
RM
324 break;
325 case R_X86_64_DTPOFF64:
e7f110cd 326# ifndef RTLD_BOOTSTRAP
8323008c
RM
327 /* During relocation all TLS symbols are defined and used.
328 Therefore the offset is already correct. */
329 if (sym != NULL)
c8c59454
L
330 {
331 value = sym->st_value + reloc->r_addend;
332# ifdef __ILP32__
333 /* This relocation type computes a signed offset that is
334 usually negative. The symbol and addend values are 32
335 bits but the GOT entry is 64 bits wide and the whole
336 64-bit entry is used as a signed quantity, so we need
337 to sign-extend the computed value to 64 bits. */
b5c086a2 338 *(Elf64_Sxword *) reloc_addr = (Elf64_Sxword) (Elf32_Sword) value;
c8c59454
L
339# else
340 *reloc_addr = value;
341# endif
342 }
e7f110cd 343# endif
8323008c 344 break;
c9ff0187
UD
345 case R_X86_64_TLSDESC:
346 {
347 struct tlsdesc volatile *td =
348 (struct tlsdesc volatile *)reloc_addr;
349
e7f110cd 350# ifndef RTLD_BOOTSTRAP
c9ff0187
UD
351 if (! sym)
352 {
353 td->arg = (void*)reloc->r_addend;
354 td->entry = _dl_tlsdesc_undefweak;
355 }
356 else
e7f110cd 357# endif
c9ff0187 358 {
e7f110cd
UD
359# ifndef RTLD_BOOTSTRAP
360# ifndef SHARED
c9ff0187 361 CHECK_STATIC_TLS (map, sym_map);
e7f110cd 362# else
c9ff0187
UD
363 if (!TRY_STATIC_TLS (map, sym_map))
364 {
365 td->arg = _dl_make_tlsdesc_dynamic
366 (sym_map, sym->st_value + reloc->r_addend);
367 td->entry = _dl_tlsdesc_dynamic;
368 }
369 else
e7f110cd 370# endif
c9ff0187 371# endif
c9ff0187
UD
372 {
373 td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
374 + reloc->r_addend);
375 td->entry = _dl_tlsdesc_return;
376 }
377 }
378 break;
379 }
8323008c
RM
380 case R_X86_64_TPOFF64:
381 /* The offset is negative, forward from the thread pointer. */
e7f110cd 382# ifndef RTLD_BOOTSTRAP
8323008c 383 if (sym != NULL)
e7f110cd 384# endif
2430d57a 385 {
e7f110cd 386# ifndef RTLD_BOOTSTRAP
eb775e67 387 CHECK_STATIC_TLS (map, sym_map);
e7f110cd 388# endif
2430d57a
RM
389 /* We know the offset of the object the symbol is contained in.
390 It is a negative value which will be added to the
391 thread pointer. */
c8c59454
L
392 value = (sym->st_value + reloc->r_addend
393 - sym_map->l_tls_offset);
394# ifdef __ILP32__
395 /* The symbol and addend values are 32 bits but the GOT
396 entry is 64 bits wide and the whole 64-bit entry is used
397 as a signed quantity, so we need to sign-extend the
398 computed value to 64 bits. */
399 *(Elf64_Sxword *) reloc_addr = (Elf64_Sxword) (Elf32_Sword) value;
400# else
401 *reloc_addr = value;
402# endif
2430d57a 403 }
8323008c 404 break;
e7f110cd 405# endif
8323008c 406
e7f110cd 407# ifndef RTLD_BOOTSTRAP
c9cf6dde 408 case R_X86_64_64:
df8a552f
L
409 /* value + r_addend may be > 0xffffffff and R_X86_64_64
410 relocation updates the whole 64-bit entry. */
411 *(Elf64_Addr *) reloc_addr = (Elf64_Addr) value + reloc->r_addend;
c9cf6dde 412 break;
22676eaf
L
413# ifndef __ILP32__
414 case R_X86_64_SIZE32:
415 /* Set to symbol size plus addend. */
416 value = sym->st_size;
417# endif
c9cf6dde 418 case R_X86_64_32:
e7f110cd
UD
419 value += reloc->r_addend;
420 *(unsigned int *) reloc_addr = value;
421
422 const char *fmt;
a1ffb40e 423 if (__glibc_unlikely (value > UINT_MAX))
6c2b2a19
AJ
424 {
425 const char *strtab;
426
e7f110cd
UD
427 fmt = "\
428%s: Symbol `%s' causes overflow in R_X86_64_32 relocation\n";
6cc8844f 429# ifndef RESOLVE_CONFLICT_FIND_MAP
e7f110cd 430 print_err:
6cc8844f 431# endif
6c2b2a19
AJ
432 strtab = (const char *) D_PTR (map, l_info[DT_STRTAB]);
433
b9375348 434 _dl_error_printf (fmt, RTLD_PROGNAME, strtab + refsym->st_name);
6c2b2a19 435 }
c9cf6dde 436 break;
e7f110cd 437# ifndef RESOLVE_CONFLICT_FIND_MAP
8e27f45e 438 /* Not needed for dl-conflict.c. */
c9cf6dde 439 case R_X86_64_PC32:
4b30f61a 440 value += reloc->r_addend - (ElfW(Addr)) reloc_addr;
e7f110cd 441 *(unsigned int *) reloc_addr = value;
a1ffb40e 442 if (__glibc_unlikely (value != (int) value))
6c2b2a19 443 {
e7f110cd
UD
444 fmt = "\
445%s: Symbol `%s' causes overflow in R_X86_64_PC32 relocation\n";
446 goto print_err;
6c2b2a19 447 }
c9cf6dde
AJ
448 break;
449 case R_X86_64_COPY:
450 if (sym == NULL)
451 /* This can happen in trace mode if an object could not be
452 found. */
453 break;
e7f110cd
UD
454 memcpy (reloc_addr_arg, (void *) value,
455 MIN (sym->st_size, refsym->st_size));
c9cf6dde
AJ
456 if (__builtin_expect (sym->st_size > refsym->st_size, 0)
457 || (__builtin_expect (sym->st_size < refsym->st_size, 0)
afdca0f2 458 && GLRO(dl_verbose)))
c9cf6dde 459 {
e7f110cd
UD
460 fmt = "\
461%s: Symbol `%s' has different size in shared object, consider re-linking\n";
462 goto print_err;
c9cf6dde 463 }
c9cf6dde 464 break;
e7f110cd 465# endif
74414708
UD
466 case R_X86_64_IRELATIVE:
467 value = map->l_addr + reloc->r_addend;
4b30f61a 468 value = ((ElfW(Addr) (*) (void)) value) ();
74414708
UD
469 *reloc_addr = value;
470 break;
c9cf6dde
AJ
471 default:
472 _dl_reloc_bad_type (map, r_type, 0);
473 break;
e7f110cd 474# endif
c9cf6dde 475 }
c9cf6dde
AJ
476 }
477}
478
7090d3ca
AJ
479auto inline void
480__attribute ((always_inline))
4b30f61a 481elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
87d254a7 482 void *const reloc_addr_arg)
c9cf6dde 483{
4b30f61a 484 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
0a10fb9e 485#if !defined RTLD_BOOTSTRAP
df8a552f
L
486 /* l_addr + r_addend may be > 0xffffffff and R_X86_64_RELATIVE64
487 relocation updates the whole 64-bit entry. */
a1ffb40e 488 if (__glibc_unlikely (ELFW(R_TYPE) (reloc->r_info) == R_X86_64_RELATIVE64))
df8a552f
L
489 *(Elf64_Addr *) reloc_addr = (Elf64_Addr) l_addr + reloc->r_addend;
490 else
0a10fb9e 491#endif
df8a552f
L
492 {
493 assert (ELFW(R_TYPE) (reloc->r_info) == R_X86_64_RELATIVE);
494 *reloc_addr = l_addr + reloc->r_addend;
495 }
c9cf6dde
AJ
496}
497
7090d3ca
AJ
498auto inline void
499__attribute ((always_inline))
c9cf6dde 500elf_machine_lazy_rel (struct link_map *map,
4b30f61a 501 ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
3a62d00d 502 int skip_ifunc)
c9cf6dde 503{
4b30f61a
L
504 ElfW(Addr) *const reloc_addr = (void *) (l_addr + reloc->r_offset);
505 const unsigned long int r_type = ELFW(R_TYPE) (reloc->r_info);
c9cf6dde
AJ
506
507 /* Check for unexpected PLT reloc type. */
a1ffb40e 508 if (__glibc_likely (r_type == R_X86_64_JUMP_SLOT))
32e6df36
UD
509 {
510 if (__builtin_expect (map->l_mach.plt, 0) == 0)
511 *reloc_addr += l_addr;
512 else
513 *reloc_addr =
514 map->l_mach.plt
4b30f61a 515 + (((ElfW(Addr)) reloc_addr) - map->l_mach.gotplt) * 2;
32e6df36 516 }
a1ffb40e 517 else if (__glibc_likely (r_type == R_X86_64_TLSDESC))
c9ff0187
UD
518 {
519 struct tlsdesc volatile * __attribute__((__unused__)) td =
520 (struct tlsdesc volatile *)reloc_addr;
521
522 td->arg = (void*)reloc;
523 td->entry = (void*)(D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)])
524 + map->l_addr);
525 }
a1ffb40e 526 else if (__glibc_unlikely (r_type == R_X86_64_IRELATIVE))
74414708 527 {
4b30f61a 528 ElfW(Addr) value = map->l_addr + reloc->r_addend;
a1ffb40e 529 if (__glibc_likely (!skip_ifunc))
4b30f61a 530 value = ((ElfW(Addr) (*) (void)) value) ();
74414708
UD
531 *reloc_addr = value;
532 }
c9cf6dde
AJ
533 else
534 _dl_reloc_bad_type (map, r_type, 1);
535}
536
9dcafc55 537#endif /* RESOLVE_MAP */