]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/aarch64/dl-machine.h
elf: Avoid nested functions in the loader [BZ #27220]
[thirdparty/glibc.git] / sysdeps / aarch64 / dl-machine.h
1 /* Copyright (C) 1995-2021 Free Software Foundation, Inc.
2
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public License as
7 published by the Free Software Foundation; either version 2.1 of the
8 License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #ifndef dl_machine_h
20 #define dl_machine_h
21
22 #define ELF_MACHINE_NAME "aarch64"
23
24 #include <sysdep.h>
25 #include <tls.h>
26 #include <dl-tlsdesc.h>
27 #include <dl-irel.h>
28 #include <cpu-features.c>
29
30 /* Translate a processor specific dynamic tag to the index in l_info array. */
31 #define DT_AARCH64(x) (DT_AARCH64_##x - DT_LOPROC + DT_NUM)
32
33 /* Return nonzero iff ELF header is compatible with the running host. */
34 static inline int __attribute__ ((unused))
35 elf_machine_matches_host (const ElfW(Ehdr) *ehdr)
36 {
37 return ehdr->e_machine == EM_AARCH64;
38 }
39
40 /* Return the run-time load address of the shared object. */
41
42 static inline ElfW(Addr) __attribute__ ((unused))
43 elf_machine_load_address (void)
44 {
45 extern const ElfW(Ehdr) __ehdr_start attribute_hidden;
46 return (ElfW(Addr)) &__ehdr_start;
47 }
48
49 /* Return the link-time address of _DYNAMIC. */
50
51 static inline ElfW(Addr) __attribute__ ((unused))
52 elf_machine_dynamic (void)
53 {
54 extern ElfW(Dyn) _DYNAMIC[] attribute_hidden;
55 return (ElfW(Addr)) _DYNAMIC - elf_machine_load_address ();
56 }
57
58 /* Set up the loaded object described by L so its unrelocated PLT
59 entries will jump to the on-demand fixup code in dl-runtime.c. */
60
61 static inline int __attribute__ ((unused))
62 elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
63 int lazy, int profile)
64 {
65 if (l->l_info[DT_JMPREL] && lazy)
66 {
67 ElfW(Addr) *got;
68 extern void _dl_runtime_resolve (ElfW(Word));
69 extern void _dl_runtime_profile (ElfW(Word));
70
71 got = (ElfW(Addr) *) D_PTR (l, l_info[DT_PLTGOT]);
72 if (got[1])
73 {
74 l->l_mach.plt = got[1] + l->l_addr;
75 }
76 got[1] = (ElfW(Addr)) l;
77
78 /* The got[2] entry contains the address of a function which gets
79 called to get the address of a so far unresolved function and
80 jump to it. The profiling extension of the dynamic linker allows
81 to intercept the calls to collect information. In this case we
82 don't store the address in the GOT so that all future calls also
83 end in this function. */
84 if ( profile)
85 {
86 got[2] = (ElfW(Addr)) &_dl_runtime_profile;
87
88 if (GLRO(dl_profile) != NULL
89 && _dl_name_match_p (GLRO(dl_profile), l))
90 /* Say that we really want profiling and the timers are
91 started. */
92 GL(dl_profile_map) = l;
93 }
94 else
95 {
96 /* This function will get called to fix up the GOT entry
97 indicated by the offset on the stack, and then jump to
98 the resolved address. */
99 got[2] = (ElfW(Addr)) &_dl_runtime_resolve;
100 }
101 }
102
103 return lazy;
104 }
105
106 /* Initial entry point for the dynamic linker. The C function
107 _dl_start is the real entry point, its return value is the user
108 program's entry point */
109 #ifdef __LP64__
110 # define RTLD_START RTLD_START_1 ("x", "3", "sp")
111 #else
112 # define RTLD_START RTLD_START_1 ("w", "2", "wsp")
113 #endif
114
115
116 #define RTLD_START_1(PTR, PTR_SIZE_LOG, PTR_SP) asm ("\
117 .text \n\
118 .globl _start \n\
119 .type _start, %function \n\
120 .globl _dl_start_user \n\
121 .type _dl_start_user, %function \n\
122 _start: \n\
123 // bti c \n\
124 hint 34 \n\
125 mov " PTR "0, " PTR_SP " \n\
126 bl _dl_start \n\
127 // returns user entry point in x0 \n\
128 mov x21, x0 \n\
129 _dl_start_user: \n\
130 // get the original arg count \n\
131 ldr " PTR "1, [sp] \n\
132 // get the argv address \n\
133 add " PTR "2, " PTR_SP ", #(1<<" PTR_SIZE_LOG ") \n\
134 // get _dl_skip_args to see if we were \n\
135 // invoked as an executable \n\
136 adrp x4, _dl_skip_args \n\
137 ldr w4, [x4, #:lo12:_dl_skip_args] \n\
138 // do we need to adjust argc/argv \n\
139 cmp w4, 0 \n\
140 beq .L_done_stack_adjust \n\
141 // subtract _dl_skip_args from original arg count \n\
142 sub " PTR "1, " PTR "1, " PTR "4 \n\
143 // store adjusted argc back to stack \n\
144 str " PTR "1, [sp] \n\
145 // find the first unskipped argument \n\
146 mov " PTR "3, " PTR "2 \n\
147 add " PTR "4, " PTR "2, " PTR "4, lsl #" PTR_SIZE_LOG " \n\
148 // shuffle argv down \n\
149 1: ldr " PTR "5, [x4], #(1<<" PTR_SIZE_LOG ") \n\
150 str " PTR "5, [x3], #(1<<" PTR_SIZE_LOG ") \n\
151 cmp " PTR "5, #0 \n\
152 bne 1b \n\
153 // shuffle envp down \n\
154 1: ldr " PTR "5, [x4], #(1<<" PTR_SIZE_LOG ") \n\
155 str " PTR "5, [x3], #(1<<" PTR_SIZE_LOG ") \n\
156 cmp " PTR "5, #0 \n\
157 bne 1b \n\
158 // shuffle auxv down \n\
159 1: ldp " PTR "0, " PTR "5, [x4, #(2<<" PTR_SIZE_LOG ")]! \n\
160 stp " PTR "0, " PTR "5, [x3], #(2<<" PTR_SIZE_LOG ") \n\
161 cmp " PTR "0, #0 \n\
162 bne 1b \n\
163 // Update _dl_argv \n\
164 adrp x3, __GI__dl_argv \n\
165 str " PTR "2, [x3, #:lo12:__GI__dl_argv] \n\
166 .L_done_stack_adjust: \n\
167 // compute envp \n\
168 add " PTR "3, " PTR "2, " PTR "1, lsl #" PTR_SIZE_LOG " \n\
169 add " PTR "3, " PTR "3, #(1<<" PTR_SIZE_LOG ") \n\
170 adrp x16, _rtld_local \n\
171 add " PTR "16, " PTR "16, #:lo12:_rtld_local \n\
172 ldr " PTR "0, [x16] \n\
173 bl _dl_init \n\
174 // load the finalizer function \n\
175 adrp x0, _dl_fini \n\
176 add " PTR "0, " PTR "0, #:lo12:_dl_fini \n\
177 // jump to the user_s entry point \n\
178 mov x16, x21 \n\
179 br x16 \n\
180 ");
181
182 #define elf_machine_type_class(type) \
183 ((((type) == AARCH64_R(JUMP_SLOT) \
184 || (type) == AARCH64_R(TLS_DTPMOD) \
185 || (type) == AARCH64_R(TLS_DTPREL) \
186 || (type) == AARCH64_R(TLS_TPREL) \
187 || (type) == AARCH64_R(TLSDESC)) * ELF_RTYPE_CLASS_PLT) \
188 | (((type) == AARCH64_R(COPY)) * ELF_RTYPE_CLASS_COPY) \
189 | (((type) == AARCH64_R(GLOB_DAT)) * ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA))
190
191 #define ELF_MACHINE_JMP_SLOT AARCH64_R(JUMP_SLOT)
192
193 /* AArch64 uses RELA not REL */
194 #define ELF_MACHINE_NO_REL 1
195 #define ELF_MACHINE_NO_RELA 0
196
197 #define DL_PLATFORM_INIT dl_platform_init ()
198
199 static inline void __attribute__ ((unused))
200 dl_platform_init (void)
201 {
202 if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0')
203 /* Avoid an empty string which would disturb us. */
204 GLRO(dl_platform) = NULL;
205
206 #ifdef SHARED
207 /* init_cpu_features has been called early from __libc_start_main in
208 static executable. */
209 init_cpu_features (&GLRO(dl_aarch64_cpu_features));
210 #endif
211 }
212
213
214 static inline ElfW(Addr)
215 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
216 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
217 const ElfW(Rela) *reloc,
218 ElfW(Addr) *reloc_addr,
219 ElfW(Addr) value)
220 {
221 return *reloc_addr = value;
222 }
223
224 /* Return the final value of a plt relocation. */
225 static inline ElfW(Addr)
226 elf_machine_plt_value (struct link_map *map,
227 const ElfW(Rela) *reloc,
228 ElfW(Addr) value)
229 {
230 return value;
231 }
232
233 #endif
234
235 /* Names of the architecture-specific auditing callback functions. */
236 #define ARCH_LA_PLTENTER aarch64_gnu_pltenter
237 #define ARCH_LA_PLTEXIT aarch64_gnu_pltexit
238
239 #ifdef RESOLVE_MAP
240
241 static inline void
242 __attribute__ ((always_inline))
243 elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
244 const ElfW(Rela) *reloc, const ElfW(Sym) *sym,
245 const struct r_found_version *version,
246 void *const reloc_addr_arg, int skip_ifunc)
247 {
248 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
249 const unsigned int r_type = ELFW (R_TYPE) (reloc->r_info);
250
251 if (__builtin_expect (r_type == AARCH64_R(RELATIVE), 0))
252 *reloc_addr = map->l_addr + reloc->r_addend;
253 else if (__builtin_expect (r_type == R_AARCH64_NONE, 0))
254 return;
255 else
256 {
257 const ElfW(Sym) *const refsym = sym;
258 struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
259 r_type);
260 ElfW(Addr) value = SYMBOL_ADDRESS (sym_map, sym, true);
261
262 if (sym != NULL
263 && __glibc_unlikely (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC)
264 && __glibc_likely (sym->st_shndx != SHN_UNDEF)
265 && __glibc_likely (!skip_ifunc))
266 value = elf_ifunc_invoke (value);
267
268 switch (r_type)
269 {
270 case AARCH64_R(COPY):
271 if (sym == NULL)
272 break;
273
274 if (sym->st_size > refsym->st_size
275 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
276 {
277 const char *strtab;
278
279 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
280 _dl_error_printf ("\
281 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
282 RTLD_PROGNAME, strtab + refsym->st_name);
283 }
284 memcpy (reloc_addr_arg, (void *) value,
285 sym->st_size < refsym->st_size
286 ? sym->st_size : refsym->st_size);
287 break;
288
289 case AARCH64_R(RELATIVE):
290 case AARCH64_R(GLOB_DAT):
291 case AARCH64_R(JUMP_SLOT):
292 case AARCH64_R(ABS32):
293 #ifdef __LP64__
294 case AARCH64_R(ABS64):
295 #endif
296 *reloc_addr = value + reloc->r_addend;
297 break;
298
299 case AARCH64_R(TLSDESC):
300 {
301 struct tlsdesc volatile *td =
302 (struct tlsdesc volatile *)reloc_addr;
303 #ifndef RTLD_BOOTSTRAP
304 if (! sym)
305 {
306 td->arg = (void*)reloc->r_addend;
307 td->entry = _dl_tlsdesc_undefweak;
308 }
309 else
310 #endif
311 {
312 #ifndef RTLD_BOOTSTRAP
313 # ifndef SHARED
314 CHECK_STATIC_TLS (map, sym_map);
315 # else
316 if (!TRY_STATIC_TLS (map, sym_map))
317 {
318 td->arg = _dl_make_tlsdesc_dynamic
319 (sym_map, sym->st_value + reloc->r_addend);
320 td->entry = _dl_tlsdesc_dynamic;
321 }
322 else
323 # endif
324 #endif
325 {
326 td->arg = (void*)(sym->st_value + sym_map->l_tls_offset
327 + reloc->r_addend);
328 td->entry = _dl_tlsdesc_return;
329 }
330 }
331 break;
332 }
333
334 case AARCH64_R(TLS_DTPMOD):
335 #ifdef RTLD_BOOTSTRAP
336 *reloc_addr = 1;
337 #else
338 if (sym_map != NULL)
339 {
340 *reloc_addr = sym_map->l_tls_modid;
341 }
342 #endif
343 break;
344
345 case AARCH64_R(TLS_DTPREL):
346 if (sym)
347 *reloc_addr = sym->st_value + reloc->r_addend;
348 break;
349
350 case AARCH64_R(TLS_TPREL):
351 if (sym)
352 {
353 CHECK_STATIC_TLS (map, sym_map);
354 *reloc_addr =
355 sym->st_value + reloc->r_addend + sym_map->l_tls_offset;
356 }
357 break;
358
359 case AARCH64_R(IRELATIVE):
360 value = map->l_addr + reloc->r_addend;
361 if (__glibc_likely (!skip_ifunc))
362 value = elf_ifunc_invoke (value);
363 *reloc_addr = value;
364 break;
365
366 default:
367 _dl_reloc_bad_type (map, r_type, 0);
368 break;
369 }
370 }
371 }
372
373 inline void
374 __attribute__ ((always_inline))
375 elf_machine_rela_relative (ElfW(Addr) l_addr,
376 const ElfW(Rela) *reloc,
377 void *const reloc_addr_arg)
378 {
379 ElfW(Addr) *const reloc_addr = reloc_addr_arg;
380 *reloc_addr = l_addr + reloc->r_addend;
381 }
382
383 static inline void
384 __attribute__ ((always_inline))
385 elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
386 ElfW(Addr) l_addr,
387 const ElfW(Rela) *reloc,
388 int skip_ifunc)
389 {
390 ElfW(Addr) *const reloc_addr = (void *) (l_addr + reloc->r_offset);
391 const unsigned int r_type = ELFW (R_TYPE) (reloc->r_info);
392 /* Check for unexpected PLT reloc type. */
393 if (__builtin_expect (r_type == AARCH64_R(JUMP_SLOT), 1))
394 {
395 if (__glibc_unlikely (map->l_info[DT_AARCH64 (VARIANT_PCS)] != NULL))
396 {
397 /* Check the symbol table for variant PCS symbols. */
398 const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
399 const ElfW (Sym) *symtab =
400 (const void *)D_PTR (map, l_info[DT_SYMTAB]);
401 const ElfW (Sym) *sym = &symtab[symndx];
402 if (__glibc_unlikely (sym->st_other & STO_AARCH64_VARIANT_PCS))
403 {
404 /* Avoid lazy resolution of variant PCS symbols. */
405 const struct r_found_version *version = NULL;
406 if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
407 {
408 const ElfW (Half) *vernum =
409 (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
410 version = &map->l_versions[vernum[symndx] & 0x7fff];
411 }
412 elf_machine_rela (map, scope, reloc, sym, version, reloc_addr,
413 skip_ifunc);
414 return;
415 }
416 }
417
418 if (map->l_mach.plt == 0)
419 *reloc_addr += l_addr;
420 else
421 *reloc_addr = map->l_mach.plt;
422 }
423 else if (__builtin_expect (r_type == AARCH64_R(TLSDESC), 1))
424 {
425 const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
426 const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]);
427 const ElfW (Sym) *sym = &symtab[symndx];
428 const struct r_found_version *version = NULL;
429
430 if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
431 {
432 const ElfW (Half) *vernum =
433 (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
434 version = &map->l_versions[vernum[symndx] & 0x7fff];
435 }
436
437 /* Always initialize TLS descriptors completely, because lazy
438 initialization requires synchronization at every TLS access. */
439 elf_machine_rela (map, scope, reloc, sym, version, reloc_addr,
440 skip_ifunc);
441 }
442 else if (__glibc_unlikely (r_type == AARCH64_R(IRELATIVE)))
443 {
444 ElfW(Addr) value = map->l_addr + reloc->r_addend;
445 if (__glibc_likely (!skip_ifunc))
446 value = elf_ifunc_invoke (value);
447 *reloc_addr = value;
448 }
449 else
450 _dl_reloc_bad_type (map, r_type, 1);
451 }
452
453 #endif