]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/aarch64/dl-machine.h
[AARCH64] Rewrite elf_machine_load_address using _DYNAMIC symbol
[thirdparty/glibc.git] / sysdeps / aarch64 / dl-machine.h
CommitLineData
bfff8b1b 1/* Copyright (C) 1995-2017 Free Software Foundation, Inc.
554066b8
MS
2
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public License as
7 published by the Free Software Foundation; either version 2.1 of the
8 License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19#ifndef dl_machine_h
20#define dl_machine_h
21
22#define ELF_MACHINE_NAME "aarch64"
23
389d1f1b 24#include <sysdep.h>
554066b8
MS
25#include <tls.h>
26#include <dl-tlsdesc.h>
7520ff8c 27#include <dl-irel.h>
d2e4346a 28#include <cpu-features.c>
554066b8
MS
29
30/* Return nonzero iff ELF header is compatible with the running host. */
31static inline int __attribute__ ((unused))
32elf_machine_matches_host (const ElfW(Ehdr) *ehdr)
33{
34 return ehdr->e_machine == EM_AARCH64;
35}
36
37/* Return the link-time address of _DYNAMIC. Conveniently, this is the
38 first element of the GOT. */
39static inline ElfW(Addr) __attribute__ ((unused))
40elf_machine_dynamic (void)
41{
ed0257f7
MS
42 extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
43 return _GLOBAL_OFFSET_TABLE_[0];
554066b8
MS
44}
45
46/* Return the run-time load address of the shared object. */
47
48static inline ElfW(Addr) __attribute__ ((unused))
49elf_machine_load_address (void)
50{
51 /* To figure out the load address we use the definition that for any symbol:
52 dynamic_addr(symbol) = static_addr(symbol) + load_addr
53
a68ba2f3
SN
54 _DYNAMIC sysmbol is used here as its link-time address stored in
55 the special unrelocated first GOT entry. */
554066b8 56
a68ba2f3
SN
57 extern ElfW(Dyn) _DYNAMIC[] attribute_hidden;
58 return (ElfW(Addr)) &_DYNAMIC - elf_machine_dynamic ();
554066b8
MS
59}
60
61/* Set up the loaded object described by L so its unrelocated PLT
62 entries will jump to the on-demand fixup code in dl-runtime.c. */
63
64static inline int __attribute__ ((unused))
65elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
66{
67 if (l->l_info[DT_JMPREL] && lazy)
68 {
69 ElfW(Addr) *got;
70 extern void _dl_runtime_resolve (ElfW(Word));
71 extern void _dl_runtime_profile (ElfW(Word));
72
73 got = (ElfW(Addr) *) D_PTR (l, l_info[DT_PLTGOT]);
74 if (got[1])
75 {
76 l->l_mach.plt = got[1] + l->l_addr;
77 }
78 got[1] = (ElfW(Addr)) l;
79
80 /* The got[2] entry contains the address of a function which gets
81 called to get the address of a so far unresolved function and
82 jump to it. The profiling extension of the dynamic linker allows
83 to intercept the calls to collect information. In this case we
84 don't store the address in the GOT so that all future calls also
85 end in this function. */
86 if ( profile)
87 {
88 got[2] = (ElfW(Addr)) &_dl_runtime_profile;
89
90 if (GLRO(dl_profile) != NULL
91 && _dl_name_match_p (GLRO(dl_profile), l))
92 /* Say that we really want profiling and the timers are
93 started. */
94 GL(dl_profile_map) = l;
95 }
96 else
97 {
98 /* This function will get called to fix up the GOT entry
99 indicated by the offset on the stack, and then jump to
100 the resolved address. */
101 got[2] = (ElfW(Addr)) &_dl_runtime_resolve;
102 }
103 }
104
105 if (l->l_info[ADDRIDX (DT_TLSDESC_GOT)] && lazy)
01194ba1
WN
106 *(ElfW(Addr)*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + l->l_addr)
107 = (ElfW(Addr)) &_dl_tlsdesc_resolve_rela;
554066b8
MS
108
109 return lazy;
110}
111
112/* Initial entry point for the dynamic linker. The C function
113 _dl_start is the real entry point, its return value is the user
114 program's entry point */
389d1f1b
SE
115#ifdef __LP64__
116# define RTLD_START RTLD_START_1 ("x", "3", "sp")
117#else
118# define RTLD_START RTLD_START_1 ("w", "2", "wsp")
119#endif
554066b8 120
389d1f1b
SE
121
122#define RTLD_START_1(PTR, PTR_SIZE_LOG, PTR_SP) asm ("\
123.text \n\
124.globl _start \n\
125.type _start, %function \n\
126.globl _dl_start_user \n\
127.type _dl_start_user, %function \n\
128_start: \n\
129 mov " PTR "0, " PTR_SP " \n\
130 bl _dl_start \n\
131 // returns user entry point in x0 \n\
132 mov x21, x0 \n\
133_dl_start_user: \n\
134 // get the original arg count \n\
135 ldr " PTR "1, [sp] \n\
136 // get the argv address \n\
137 add " PTR "2, " PTR_SP ", #(1<<" PTR_SIZE_LOG ") \n\
138 // get _dl_skip_args to see if we were \n\
139 // invoked as an executable \n\
140 adrp x4, _dl_skip_args \n\
141 ldr w4, [x4, #:lo12:_dl_skip_args] \n\
142 // do we need to adjust argc/argv \n\
143 cmp w4, 0 \n\
144 beq .L_done_stack_adjust \n\
145 // subtract _dl_skip_args from original arg count \n\
146 sub " PTR "1, " PTR "1, " PTR "4 \n\
147 // store adjusted argc back to stack \n\
148 str " PTR "1, [sp] \n\
149 // find the first unskipped argument \n\
150 mov " PTR "3, " PTR "2 \n\
151 add " PTR "4, " PTR "2, " PTR "4, lsl #" PTR_SIZE_LOG " \n\
152 // shuffle argv down \n\
1531: ldr " PTR "5, [x4], #(1<<" PTR_SIZE_LOG ") \n\
154 str " PTR "5, [x3], #(1<<" PTR_SIZE_LOG ") \n\
155 cmp " PTR "5, #0 \n\
156 bne 1b \n\
157 // shuffle envp down \n\
1581: ldr " PTR "5, [x4], #(1<<" PTR_SIZE_LOG ") \n\
159 str " PTR "5, [x3], #(1<<" PTR_SIZE_LOG ") \n\
160 cmp " PTR "5, #0 \n\
161 bne 1b \n\
162 // shuffle auxv down \n\
1631: ldp " PTR "0, " PTR "5, [x4, #(2<<" PTR_SIZE_LOG ")]! \n\
164 stp " PTR "0, " PTR "5, [x3], #(2<<" PTR_SIZE_LOG ") \n\
165 cmp " PTR "0, #0 \n\
166 bne 1b \n\
167 // Update _dl_argv \n\
e9177fba
SN
168 adrp x3, __GI__dl_argv \n\
169 str " PTR "2, [x3, #:lo12:__GI__dl_argv] \n\
389d1f1b
SE
170.L_done_stack_adjust: \n\
171 // compute envp \n\
172 add " PTR "3, " PTR "2, " PTR "1, lsl #" PTR_SIZE_LOG " \n\
173 add " PTR "3, " PTR "3, #(1<<" PTR_SIZE_LOG ") \n\
174 adrp x16, _rtld_local \n\
175 add " PTR "16, " PTR "16, #:lo12:_rtld_local \n\
176 ldr " PTR "0, [x16] \n\
177 bl _dl_init \n\
178 // load the finalizer function \n\
179 adrp x0, _dl_fini \n\
180 add " PTR "0, " PTR "0, #:lo12:_dl_fini \n\
181 // jump to the user_s entry point \n\
182 br x21 \n\
554066b8
MS
183");
184
185#define elf_machine_type_class(type) \
389d1f1b
SE
186 ((((type) == AARCH64_R(JUMP_SLOT) \
187 || (type) == AARCH64_R(TLS_DTPMOD) \
188 || (type) == AARCH64_R(TLS_DTPREL) \
189 || (type) == AARCH64_R(TLS_TPREL) \
190 || (type) == AARCH64_R(TLSDESC)) * ELF_RTYPE_CLASS_PLT) \
191 | (((type) == AARCH64_R(COPY)) * ELF_RTYPE_CLASS_COPY) \
192 | (((type) == AARCH64_R(GLOB_DAT)) * ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA))
554066b8 193
389d1f1b 194#define ELF_MACHINE_JMP_SLOT AARCH64_R(JUMP_SLOT)
554066b8
MS
195
196/* AArch64 uses RELA not REL */
197#define ELF_MACHINE_NO_REL 1
4cf5b6d0 198#define ELF_MACHINE_NO_RELA 0
554066b8 199
d2e4346a
SE
200#define DL_PLATFORM_INIT dl_platform_init ()
201
202static inline void __attribute__ ((unused))
203dl_platform_init (void)
204{
205 if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0')
206 /* Avoid an empty string which would disturb us. */
207 GLRO(dl_platform) = NULL;
208
209#ifdef SHARED
210 /* init_cpu_features has been called early from __libc_start_main in
211 static executable. */
212 init_cpu_features (&GLRO(dl_aarch64_cpu_features));
213#endif
214}
215
216
554066b8
MS
217static inline ElfW(Addr)
218elf_machine_fixup_plt (struct link_map *map, lookup_t t,
0572433b 219 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
554066b8
MS
220 const ElfW(Rela) *reloc,
221 ElfW(Addr) *reloc_addr,
222 ElfW(Addr) value)
223{
224 return *reloc_addr = value;
225}
226
227/* Return the final value of a plt relocation. */
228static inline ElfW(Addr)
229elf_machine_plt_value (struct link_map *map,
230 const ElfW(Rela) *reloc,
231 ElfW(Addr) value)
232{
233 return value;
234}
235
236#endif
237
238/* Names of the architecture-specific auditing callback functions. */
239#define ARCH_LA_PLTENTER aarch64_gnu_pltenter
240#define ARCH_LA_PLTEXIT aarch64_gnu_pltexit
241
242#ifdef RESOLVE_MAP
243
244auto inline void
245__attribute__ ((always_inline))
246elf_machine_rela (struct link_map *map, const ElfW(Rela) *reloc,
247 const ElfW(Sym) *sym, const struct r_found_version *version,
248 void *const reloc_addr_arg, int skip_ifunc)
249{
250 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
389d1f1b 251 const unsigned int r_type = ELFW (R_TYPE) (reloc->r_info);
554066b8 252
389d1f1b 253 if (__builtin_expect (r_type == AARCH64_R(RELATIVE), 0))
554066b8
MS
254 *reloc_addr = map->l_addr + reloc->r_addend;
255 else if (__builtin_expect (r_type == R_AARCH64_NONE, 0))
256 return;
257 else
258 {
259 const ElfW(Sym) *const refsym = sym;
260 struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type);
261 ElfW(Addr) value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value;
262
7520ff8c
WN
263 if (sym != NULL
264 && __glibc_unlikely (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC)
265 && __glibc_likely (sym->st_shndx != SHN_UNDEF)
266 && __glibc_likely (!skip_ifunc))
267 value = elf_ifunc_invoke (value);
268
554066b8
MS
269 switch (r_type)
270 {
389d1f1b 271 case AARCH64_R(COPY):
554066b8
MS
272 if (sym == NULL)
273 break;
274
275 if (sym->st_size > refsym->st_size
276 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
277 {
278 const char *strtab;
279
280 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
281 _dl_error_printf ("\
282%s: Symbol `%s' has different size in shared object, consider re-linking\n",
b9375348 283 RTLD_PROGNAME, strtab + refsym->st_name);
554066b8
MS
284 }
285 memcpy (reloc_addr_arg, (void *) value,
db4f87ba
SN
286 sym->st_size < refsym->st_size
287 ? sym->st_size : refsym->st_size);
554066b8
MS
288 break;
289
389d1f1b
SE
290 case AARCH64_R(RELATIVE):
291 case AARCH64_R(GLOB_DAT):
292 case AARCH64_R(JUMP_SLOT):
293 case AARCH64_R(ABS32):
294#ifdef __LP64__
295 case AARCH64_R(ABS64):
296#endif
554066b8
MS
297 *reloc_addr = value + reloc->r_addend;
298 break;
299
389d1f1b 300 case AARCH64_R(TLSDESC):
554066b8
MS
301 {
302 struct tlsdesc volatile *td =
303 (struct tlsdesc volatile *)reloc_addr;
304#ifndef RTLD_BOOTSTRAP
305 if (! sym)
306 {
307 td->arg = (void*)reloc->r_addend;
308 td->entry = _dl_tlsdesc_undefweak;
309 }
310 else
311#endif
312 {
313#ifndef RTLD_BOOTSTRAP
314# ifndef SHARED
315 CHECK_STATIC_TLS (map, sym_map);
316# else
317 if (!TRY_STATIC_TLS (map, sym_map))
318 {
319 td->arg = _dl_make_tlsdesc_dynamic
320 (sym_map, sym->st_value + reloc->r_addend);
321 td->entry = _dl_tlsdesc_dynamic;
322 }
323 else
324# endif
325#endif
326 {
327 td->arg = (void*)(sym->st_value + sym_map->l_tls_offset
328 + reloc->r_addend);
329 td->entry = _dl_tlsdesc_return;
330 }
331 }
332 break;
333 }
334
389d1f1b 335 case AARCH64_R(TLS_DTPMOD):
554066b8
MS
336#ifdef RTLD_BOOTSTRAP
337 *reloc_addr = 1;
338#else
339 if (sym_map != NULL)
340 {
341 *reloc_addr = sym_map->l_tls_modid;
342 }
343#endif
344 break;
345
389d1f1b 346 case AARCH64_R(TLS_DTPREL):
554066b8 347 if (sym)
443d9489 348 *reloc_addr = sym->st_value + reloc->r_addend;
554066b8
MS
349 break;
350
389d1f1b 351 case AARCH64_R(TLS_TPREL):
554066b8
MS
352 if (sym)
353 {
554066b8
MS
354 CHECK_STATIC_TLS (map, sym_map);
355 *reloc_addr =
356 sym->st_value + reloc->r_addend + sym_map->l_tls_offset;
357 }
358 break;
359
389d1f1b 360 case AARCH64_R(IRELATIVE):
7520ff8c
WN
361 value = map->l_addr + reloc->r_addend;
362 value = elf_ifunc_invoke (value);
363 *reloc_addr = value;
364 break;
365
554066b8
MS
366 default:
367 _dl_reloc_bad_type (map, r_type, 0);
368 break;
369 }
370 }
371}
372
373inline void
374__attribute__ ((always_inline))
375elf_machine_rela_relative (ElfW(Addr) l_addr,
376 const ElfW(Rela) *reloc,
377 void *const reloc_addr_arg)
378{
379 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
380 *reloc_addr = l_addr + reloc->r_addend;
381}
382
383inline void
384__attribute__ ((always_inline))
385elf_machine_lazy_rel (struct link_map *map,
386 ElfW(Addr) l_addr,
387 const ElfW(Rela) *reloc,
388 int skip_ifunc)
389{
390 ElfW(Addr) *const reloc_addr = (void *) (l_addr + reloc->r_offset);
389d1f1b 391 const unsigned int r_type = ELFW (R_TYPE) (reloc->r_info);
554066b8 392 /* Check for unexpected PLT reloc type. */
389d1f1b 393 if (__builtin_expect (r_type == AARCH64_R(JUMP_SLOT), 1))
554066b8
MS
394 {
395 if (__builtin_expect (map->l_mach.plt, 0) == 0)
396 *reloc_addr += l_addr;
397 else
398 *reloc_addr = map->l_mach.plt;
399 }
389d1f1b 400 else if (__builtin_expect (r_type == AARCH64_R(TLSDESC), 1))
554066b8
MS
401 {
402 struct tlsdesc volatile *td =
403 (struct tlsdesc volatile *)reloc_addr;
404
405 td->arg = (void*)reloc;
406 td->entry = (void*)(D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)])
407 + map->l_addr);
408 }
389d1f1b 409 else if (__glibc_unlikely (r_type == AARCH64_R(IRELATIVE)))
7520ff8c
WN
410 {
411 ElfW(Addr) value = map->l_addr + reloc->r_addend;
412 if (__glibc_likely (!skip_ifunc))
413 value = elf_ifunc_invoke (value);
414 *reloc_addr = value;
415 }
554066b8
MS
416 else
417 _dl_reloc_bad_type (map, r_type, 1);
418}
419
420#endif