]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/powerpc/powerpc64/dl-machine.h
99a83d0c82ea0a9c0ac95285668fbefa0f96d7e8
[thirdparty/glibc.git] / sysdeps / powerpc / powerpc64 / dl-machine.h
1 /* Machine-dependent ELF dynamic relocation inline functions.
2 PowerPC64 version.
3 Copyright 1995-2018 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
15
16 You should have received a copy of the GNU Library General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
19
20 #ifndef dl_machine_h
21 #define dl_machine_h
22
23 #define ELF_MACHINE_NAME "powerpc64"
24
25 #include <assert.h>
26 #include <sys/param.h>
27 #include <dl-tls.h>
28 #include <sysdep.h>
29 #include <hwcapinfo.h>
30 #include <cpu-features.c>
31
32 /* Translate a processor specific dynamic tag to the index
33 in l_info array. */
34 #define DT_PPC64(x) (DT_PPC64_##x - DT_LOPROC + DT_NUM)
35
36 #if _CALL_ELF != 2
37 /* A PowerPC64 function descriptor. The .plt (procedure linkage
38 table) and .opd (official procedure descriptor) sections are
39 arrays of these. */
40 typedef struct
41 {
42 Elf64_Addr fd_func;
43 Elf64_Addr fd_toc;
44 Elf64_Addr fd_aux;
45 } Elf64_FuncDesc;
46 #endif
47
48 #define ELF_MULT_MACHINES_SUPPORTED
49
50 /* Return nonzero iff ELF header is compatible with the running host. */
51 static inline int
52 elf_machine_matches_host (const Elf64_Ehdr *ehdr)
53 {
54 /* Verify that the binary matches our ABI version. */
55 if ((ehdr->e_flags & EF_PPC64_ABI) != 0)
56 {
57 #if _CALL_ELF != 2
58 if ((ehdr->e_flags & EF_PPC64_ABI) != 1)
59 return 0;
60 #else
61 if ((ehdr->e_flags & EF_PPC64_ABI) != 2)
62 return 0;
63 #endif
64 }
65
66 return ehdr->e_machine == EM_PPC64;
67 }
68
69 /* Return nonzero iff ELF header is compatible with the running host,
70 but not this loader. */
71 static inline int
72 elf_host_tolerates_machine (const Elf64_Ehdr *ehdr)
73 {
74 return ehdr->e_machine == EM_PPC;
75 }
76
77 /* Return nonzero iff ELF header is compatible with the running host,
78 but not this loader. */
79 static inline int
80 elf_host_tolerates_class (const Elf64_Ehdr *ehdr)
81 {
82 return ehdr->e_ident[EI_CLASS] == ELFCLASS32;
83 }
84
85
86 /* Return the run-time load address of the shared object, assuming it
87 was originally linked at zero. */
88 static inline Elf64_Addr
89 elf_machine_load_address (void) __attribute__ ((const));
90
91 static inline Elf64_Addr
92 elf_machine_load_address (void)
93 {
94 Elf64_Addr ret;
95
96 /* The first entry in .got (and thus the first entry in .toc) is the
97 link-time TOC_base, ie. r2. So the difference between that and
98 the current r2 set by the kernel is how far the shared lib has
99 moved. */
100 asm ( " ld %0,-32768(2)\n"
101 " subf %0,%0,2\n"
102 : "=r" (ret));
103 return ret;
104 }
105
106 /* Return the link-time address of _DYNAMIC. */
107 static inline Elf64_Addr
108 elf_machine_dynamic (void)
109 {
110 Elf64_Addr runtime_dynamic;
111 /* It's easier to get the run-time address. */
112 asm ( " addis %0,2,_DYNAMIC@toc@ha\n"
113 " addi %0,%0,_DYNAMIC@toc@l\n"
114 : "=b" (runtime_dynamic));
115 /* Then subtract off the load address offset. */
116 return runtime_dynamic - elf_machine_load_address() ;
117 }
118
119 #define ELF_MACHINE_BEFORE_RTLD_RELOC(dynamic_info) /* nothing */
120
121 /* The PLT uses Elf64_Rela relocs. */
122 #define elf_machine_relplt elf_machine_rela
123
124
125 #ifdef HAVE_INLINED_SYSCALLS
126 /* We do not need _dl_starting_up. */
127 # define DL_STARTING_UP_DEF
128 #else
129 # define DL_STARTING_UP_DEF \
130 ".LC__dl_starting_up:\n" \
131 " .tc __GI__dl_starting_up[TC],__GI__dl_starting_up\n"
132 #endif
133
134
135 /* Initial entry point code for the dynamic linker. The C function
136 `_dl_start' is the real entry point; its return value is the user
137 program's entry point. */
138 #define RTLD_START \
139 asm (".pushsection \".text\"\n" \
140 " .align 2\n" \
141 " " ENTRY_2(_start) "\n" \
142 BODY_PREFIX "_start:\n" \
143 " " LOCALENTRY(_start) "\n" \
144 /* We start with the following on the stack, from top: \
145 argc (4 bytes); \
146 arguments for program (terminated by NULL); \
147 environment variables (terminated by NULL); \
148 arguments for the program loader. */ \
149 " mr 3,1\n" \
150 " li 4,0\n" \
151 " stdu 4,-128(1)\n" \
152 /* Call _dl_start with one parameter pointing at argc. */ \
153 " bl " DOT_PREFIX "_dl_start\n" \
154 " nop\n" \
155 /* Transfer control to _dl_start_user! */ \
156 " b " DOT_PREFIX "_dl_start_user\n" \
157 ".LT__start:\n" \
158 " .long 0\n" \
159 " .byte 0x00,0x0c,0x24,0x40,0x00,0x00,0x00,0x00\n" \
160 " .long .LT__start-" BODY_PREFIX "_start\n" \
161 " .short .LT__start_name_end-.LT__start_name_start\n" \
162 ".LT__start_name_start:\n" \
163 " .ascii \"_start\"\n" \
164 ".LT__start_name_end:\n" \
165 " .align 2\n" \
166 " " END_2(_start) "\n" \
167 " .pushsection \".toc\",\"aw\"\n" \
168 DL_STARTING_UP_DEF \
169 ".LC__rtld_local:\n" \
170 " .tc _rtld_local[TC],_rtld_local\n" \
171 ".LC__dl_argc:\n" \
172 " .tc _dl_argc[TC],_dl_argc\n" \
173 ".LC__dl_argv:\n" \
174 " .tc __GI__dl_argv[TC],__GI__dl_argv\n" \
175 ".LC__dl_fini:\n" \
176 " .tc _dl_fini[TC],_dl_fini\n" \
177 " .popsection\n" \
178 " " ENTRY_2(_dl_start_user) "\n" \
179 /* Now, we do our main work of calling initialisation procedures. \
180 The ELF ABI doesn't say anything about parameters for these, \
181 so we just pass argc, argv, and the environment. \
182 Changing these is strongly discouraged (not least because argc is \
183 passed by value!). */ \
184 BODY_PREFIX "_dl_start_user:\n" \
185 " " LOCALENTRY(_dl_start_user) "\n" \
186 /* the address of _start in r30. */ \
187 " mr 30,3\n" \
188 /* &_dl_argc in 29, &_dl_argv in 27, and _dl_loaded in 28. */ \
189 " ld 28,.LC__rtld_local@toc(2)\n" \
190 " ld 29,.LC__dl_argc@toc(2)\n" \
191 " ld 27,.LC__dl_argv@toc(2)\n" \
192 /* _dl_init (_dl_loaded, _dl_argc, _dl_argv, _dl_argv+_dl_argc+1). */ \
193 " ld 3,0(28)\n" \
194 " lwa 4,0(29)\n" \
195 " ld 5,0(27)\n" \
196 " sldi 6,4,3\n" \
197 " add 6,5,6\n" \
198 " addi 6,6,8\n" \
199 " bl " DOT_PREFIX "_dl_init\n" \
200 " nop\n" \
201 /* Now, to conform to the ELF ABI, we have to: \
202 Pass argc (actually _dl_argc) in r3; */ \
203 " lwa 3,0(29)\n" \
204 /* Pass argv (actually _dl_argv) in r4; */ \
205 " ld 4,0(27)\n" \
206 /* Pass argv+argc+1 in r5; */ \
207 " sldi 5,3,3\n" \
208 " add 6,4,5\n" \
209 " addi 5,6,8\n" \
210 /* Pass the auxiliary vector in r6. This is passed to us just after \
211 _envp. */ \
212 "2: ldu 0,8(6)\n" \
213 " cmpdi 0,0\n" \
214 " bne 2b\n" \
215 " addi 6,6,8\n" \
216 /* Pass a termination function pointer (in this case _dl_fini) in \
217 r7. */ \
218 " ld 7,.LC__dl_fini@toc(2)\n" \
219 /* Pass the stack pointer in r1 (so far so good), pointing to a NULL \
220 value. This lets our startup code distinguish between a program \
221 linked statically, which linux will call with argc on top of the \
222 stack which will hopefully never be zero, and a dynamically linked \
223 program which will always have a NULL on the top of the stack. \
224 Take the opportunity to clear LR, so anyone who accidentally \
225 returns from _start gets SEGV. Also clear the next few words of \
226 the stack. */ \
227 " li 31,0\n" \
228 " std 31,0(1)\n" \
229 " mtlr 31\n" \
230 " std 31,8(1)\n" \
231 " std 31,16(1)\n" \
232 " std 31,24(1)\n" \
233 /* Now, call the start function descriptor at r30... */ \
234 " .globl ._dl_main_dispatch\n" \
235 "._dl_main_dispatch:\n" \
236 " " PPC64_LOAD_FUNCPTR(30) "\n" \
237 " bctr\n" \
238 ".LT__dl_start_user:\n" \
239 " .long 0\n" \
240 " .byte 0x00,0x0c,0x24,0x40,0x00,0x00,0x00,0x00\n" \
241 " .long .LT__dl_start_user-" BODY_PREFIX "_dl_start_user\n" \
242 " .short .LT__dl_start_user_name_end-.LT__dl_start_user_name_start\n" \
243 ".LT__dl_start_user_name_start:\n" \
244 " .ascii \"_dl_start_user\"\n" \
245 ".LT__dl_start_user_name_end:\n" \
246 " .align 2\n" \
247 " " END_2(_dl_start_user) "\n" \
248 " .popsection");
249
250 /* ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve to
251 one of the main executable's symbols, as for a COPY reloc.
252
253 To make function pointer comparisons work on most targets, the
254 relevant ABI states that the address of a non-local function in a
255 dynamically linked executable is the address of the PLT entry for
256 that function. This is quite reasonable since using the real
257 function address in a non-PIC executable would typically require
258 dynamic relocations in .text, something to be avoided. For such
259 functions, the linker emits a SHN_UNDEF symbol in the executable
260 with value equal to the PLT entry address. Normally, SHN_UNDEF
261 symbols have a value of zero, so this is a clue to ld.so that it
262 should treat these symbols specially. For relocations not in
263 ELF_RTYPE_CLASS_PLT (eg. those on function pointers), ld.so should
264 use the value of the executable SHN_UNDEF symbol, ie. the PLT entry
265 address. For relocations in ELF_RTYPE_CLASS_PLT (eg. the relocs in
266 the PLT itself), ld.so should use the value of the corresponding
267 defined symbol in the object that defines the function, ie. the
268 real function address. This complicates ld.so in that there are
269 now two possible values for a given symbol, and it gets even worse
270 because protected symbols need yet another set of rules.
271
272 On PowerPC64 we don't need any of this. The linker won't emit
273 SHN_UNDEF symbols with non-zero values. ld.so can make all
274 relocations behave "normally", ie. always use the real address
275 like PLT relocations. So always set ELF_RTYPE_CLASS_PLT. */
276
277 #if _CALL_ELF != 2
278 #define elf_machine_type_class(type) \
279 (ELF_RTYPE_CLASS_PLT | (((type) == R_PPC64_COPY) * ELF_RTYPE_CLASS_COPY))
280 #else
281 /* And now that you have read that large comment, you can disregard it
282 all for ELFv2. ELFv2 does need the special SHN_UNDEF treatment. */
283 #define IS_PPC64_TLS_RELOC(R) \
284 (((R) >= R_PPC64_TLS && (R) <= R_PPC64_DTPREL16_HIGHESTA) \
285 || ((R) >= R_PPC64_TPREL16_HIGH && (R) <= R_PPC64_DTPREL16_HIGHA))
286
287 #define elf_machine_type_class(type) \
288 ((((type) == R_PPC64_JMP_SLOT \
289 || (type) == R_PPC64_ADDR24 \
290 || IS_PPC64_TLS_RELOC (type)) * ELF_RTYPE_CLASS_PLT) \
291 | (((type) == R_PPC64_COPY) * ELF_RTYPE_CLASS_COPY))
292 #endif
293
294 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
295 #define ELF_MACHINE_JMP_SLOT R_PPC64_JMP_SLOT
296
297 /* The PowerPC never uses REL relocations. */
298 #define ELF_MACHINE_NO_REL 1
299 #define ELF_MACHINE_NO_RELA 0
300
301 /* We define an initialization function to initialize HWCAP/HWCAP2 and
302 platform data so it can be copied into the TCB later. This is called
303 very early in _dl_sysdep_start for dynamically linked binaries. */
304 #if defined(SHARED) && IS_IN (rtld)
305 # define DL_PLATFORM_INIT dl_platform_init ()
306
307 static inline void __attribute__ ((unused))
308 dl_platform_init (void)
309 {
310 __tcb_parse_hwcap_and_convert_at_platform ();
311 init_cpu_features (&GLRO(dl_powerpc_cpu_features));
312 }
313 #endif
314
315 /* Stuff for the PLT. */
316 #if _CALL_ELF != 2
317 #define PLT_INITIAL_ENTRY_WORDS 3
318 #define PLT_ENTRY_WORDS 3
319 #define GLINK_INITIAL_ENTRY_WORDS 8
320 /* The first 32k entries of glink can set an index and branch using two
321 instructions; past that point, glink uses three instructions. */
322 #define GLINK_ENTRY_WORDS(I) (((I) < 0x8000)? 2 : 3)
323 #else
324 #define PLT_INITIAL_ENTRY_WORDS 2
325 #define PLT_ENTRY_WORDS 1
326 #define GLINK_INITIAL_ENTRY_WORDS 8
327 #define GLINK_ENTRY_WORDS(I) 1
328 #endif
329
330 #define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
331 #define PPC_DCBT(where) asm volatile ("dcbt 0,%0" : : "r"(where) : "memory")
332 #define PPC_DCBF(where) asm volatile ("dcbf 0,%0" : : "r"(where) : "memory")
333 #define PPC_SYNC asm volatile ("sync" : : : "memory")
334 #define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
335 #define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
336 #define PPC_DIE asm volatile ("tweq 0,0")
337 /* Use this when you've modified some code, but it won't be in the
338 instruction fetch queue (or when it doesn't matter if it is). */
339 #define MODIFIED_CODE_NOQUEUE(where) \
340 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); } while (0)
341 /* Use this when it might be in the instruction queue. */
342 #define MODIFIED_CODE(where) \
343 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); PPC_ISYNC; } while (0)
344
345 /* Set up the loaded object described by MAP so its unrelocated PLT
346 entries will jump to the on-demand fixup code in dl-runtime.c. */
347 static inline int __attribute__ ((always_inline))
348 elf_machine_runtime_setup (struct link_map *map, int lazy, int profile)
349 {
350 if (map->l_info[DT_JMPREL])
351 {
352 Elf64_Word i;
353 Elf64_Word *glink = NULL;
354 Elf64_Xword *plt = (Elf64_Xword *) D_PTR (map, l_info[DT_PLTGOT]);
355 Elf64_Word num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
356 / sizeof (Elf64_Rela));
357 Elf64_Addr l_addr = map->l_addr;
358 Elf64_Dyn **info = map->l_info;
359 char *p;
360
361 extern void _dl_runtime_resolve (void);
362 extern void _dl_profile_resolve (void);
363
364 /* Relocate the DT_PPC64_GLINK entry in the _DYNAMIC section.
365 elf_get_dynamic_info takes care of the standard entries but
366 doesn't know exactly what to do with processor specific
367 entries. */
368 if (info[DT_PPC64(GLINK)] != NULL)
369 info[DT_PPC64(GLINK)]->d_un.d_ptr += l_addr;
370
371 if (lazy)
372 {
373 Elf64_Word glink_offset;
374 Elf64_Word offset;
375 Elf64_Addr dlrr;
376
377 dlrr = (Elf64_Addr) (profile ? _dl_profile_resolve
378 : _dl_runtime_resolve);
379 if (profile && GLRO(dl_profile) != NULL
380 && _dl_name_match_p (GLRO(dl_profile), map))
381 /* This is the object we are looking for. Say that we really
382 want profiling and the timers are started. */
383 GL(dl_profile_map) = map;
384
385 #if _CALL_ELF != 2
386 /* We need to stuff the address/TOC of _dl_runtime_resolve
387 into doublewords 0 and 1 of plt_reserve. Then we need to
388 stuff the map address into doubleword 2 of plt_reserve.
389 This allows the GLINK0 code to transfer control to the
390 correct trampoline which will transfer control to fixup
391 in dl-machine.c. */
392 {
393 /* The plt_reserve area is the 1st 3 doublewords of the PLT. */
394 Elf64_FuncDesc *plt_reserve = (Elf64_FuncDesc *) plt;
395 Elf64_FuncDesc *resolve_fd = (Elf64_FuncDesc *) dlrr;
396 plt_reserve->fd_func = resolve_fd->fd_func;
397 plt_reserve->fd_toc = resolve_fd->fd_toc;
398 plt_reserve->fd_aux = (Elf64_Addr) map;
399 #ifdef RTLD_BOOTSTRAP
400 /* When we're bootstrapping, the opd entry will not have
401 been relocated yet. */
402 plt_reserve->fd_func += l_addr;
403 plt_reserve->fd_toc += l_addr;
404 #endif
405 }
406 #else
407 /* When we don't have function descriptors, the first doubleword
408 of the PLT holds the address of _dl_runtime_resolve, and the
409 second doubleword holds the map address. */
410 plt[0] = dlrr;
411 plt[1] = (Elf64_Addr) map;
412 #endif
413
414 /* Set up the lazy PLT entries. */
415 glink = (Elf64_Word *) D_PTR (map, l_info[DT_PPC64(GLINK)]);
416 offset = PLT_INITIAL_ENTRY_WORDS;
417 glink_offset = GLINK_INITIAL_ENTRY_WORDS;
418 for (i = 0; i < num_plt_entries; i++)
419 {
420
421 plt[offset] = (Elf64_Xword) &glink[glink_offset];
422 offset += PLT_ENTRY_WORDS;
423 glink_offset += GLINK_ENTRY_WORDS (i);
424 }
425
426 /* Now, we've modified data. We need to write the changes from
427 the data cache to a second-level unified cache, then make
428 sure that stale data in the instruction cache is removed.
429 (In a multiprocessor system, the effect is more complex.)
430 Most of the PLT shouldn't be in the instruction cache, but
431 there may be a little overlap at the start and the end.
432
433 Assumes that dcbst and icbi apply to lines of 16 bytes or
434 more. Current known line sizes are 16, 32, and 128 bytes. */
435
436 for (p = (char *) plt; p < (char *) &plt[offset]; p += 16)
437 PPC_DCBST (p);
438 PPC_SYNC;
439 }
440 }
441 return lazy;
442 }
443
444 #if _CALL_ELF == 2
445 extern void attribute_hidden _dl_error_localentry (struct link_map *map,
446 const Elf64_Sym *refsym);
447
448 /* If the PLT entry resolves to a function in the same object, return
449 the target function's local entry point offset if usable. */
450 static inline Elf64_Addr __attribute__ ((always_inline))
451 ppc64_local_entry_offset (struct link_map *map, lookup_t sym_map,
452 const ElfW(Sym) *refsym, const ElfW(Sym) *sym)
453 {
454 /* If the target function is in a different object, we cannot
455 use the local entry point. */
456 if (sym_map != map)
457 {
458 /* Check that optimized plt call stubs for localentry:0 functions
459 are not being satisfied by a non-zero localentry symbol. */
460 if (map->l_info[DT_PPC64(OPT)]
461 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_LOCALENTRY) != 0
462 && refsym->st_info == ELFW(ST_INFO) (STB_GLOBAL, STT_FUNC)
463 && (STO_PPC64_LOCAL_MASK & refsym->st_other) == 0
464 && (STO_PPC64_LOCAL_MASK & sym->st_other) != 0)
465 _dl_error_localentry (map, refsym);
466
467 return 0;
468 }
469
470 /* If the linker inserted multiple TOCs, we cannot use the
471 local entry point. */
472 if (map->l_info[DT_PPC64(OPT)]
473 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_MULTI_TOC))
474 return 0;
475
476 /* If the target function is an ifunc then the local entry offset is
477 for the resolver, not the final destination. */
478 if (__builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0))
479 return 0;
480
481 /* Otherwise, we can use the local entry point. Retrieve its offset
482 from the symbol's ELF st_other field. */
483 return PPC64_LOCAL_ENTRY_OFFSET (sym->st_other);
484 }
485 #endif
486
487 /* Change the PLT entry whose reloc is 'reloc' to call the actual
488 routine. */
489 static inline Elf64_Addr __attribute__ ((always_inline))
490 elf_machine_fixup_plt (struct link_map *map, lookup_t sym_map,
491 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
492 const Elf64_Rela *reloc,
493 Elf64_Addr *reloc_addr, Elf64_Addr finaladdr)
494 {
495 #if _CALL_ELF != 2
496 Elf64_FuncDesc *plt = (Elf64_FuncDesc *) reloc_addr;
497 Elf64_FuncDesc *rel = (Elf64_FuncDesc *) finaladdr;
498 Elf64_Addr offset = 0;
499 Elf64_FuncDesc zero_fd = {0, 0, 0};
500
501 PPC_DCBT (&plt->fd_aux);
502 PPC_DCBT (&plt->fd_func);
503
504 /* If sym_map is NULL, it's a weak undefined sym; Set the plt to
505 zero. finaladdr should be zero already in this case, but guard
506 against invalid plt relocations with non-zero addends. */
507 if (sym_map == NULL)
508 finaladdr = 0;
509
510 /* Don't die here if finaladdr is zero, die if this plt entry is
511 actually called. Makes a difference when LD_BIND_NOW=1.
512 finaladdr may be zero for a weak undefined symbol, or when an
513 ifunc resolver returns zero. */
514 if (finaladdr == 0)
515 rel = &zero_fd;
516 else
517 {
518 PPC_DCBT (&rel->fd_aux);
519 PPC_DCBT (&rel->fd_func);
520 }
521
522 /* If the opd entry is not yet relocated (because it's from a shared
523 object that hasn't been processed yet), then manually reloc it. */
524 if (finaladdr != 0 && map != sym_map && !sym_map->l_relocated
525 #if !defined RTLD_BOOTSTRAP && defined SHARED
526 /* Bootstrap map doesn't have l_relocated set for it. */
527 && sym_map != &GL(dl_rtld_map)
528 #endif
529 )
530 offset = sym_map->l_addr;
531
532 /* For PPC64, fixup_plt copies the function descriptor from opd
533 over the corresponding PLT entry.
534 Initially, PLT Entry[i] is set up for lazy linking, or is zero.
535 For lazy linking, the fd_toc and fd_aux entries are irrelevant,
536 so for thread safety we write them before changing fd_func. */
537
538 plt->fd_aux = rel->fd_aux + offset;
539 plt->fd_toc = rel->fd_toc + offset;
540 PPC_DCBF (&plt->fd_toc);
541 PPC_ISYNC;
542
543 plt->fd_func = rel->fd_func + offset;
544 PPC_DCBST (&plt->fd_func);
545 PPC_ISYNC;
546 #else
547 finaladdr += ppc64_local_entry_offset (map, sym_map, refsym, sym);
548 *reloc_addr = finaladdr;
549 #endif
550
551 return finaladdr;
552 }
553
554 static inline void __attribute__ ((always_inline))
555 elf_machine_plt_conflict (struct link_map *map, lookup_t sym_map,
556 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
557 const Elf64_Rela *reloc,
558 Elf64_Addr *reloc_addr, Elf64_Addr finaladdr)
559 {
560 #if _CALL_ELF != 2
561 Elf64_FuncDesc *plt = (Elf64_FuncDesc *) reloc_addr;
562 Elf64_FuncDesc *rel = (Elf64_FuncDesc *) finaladdr;
563 Elf64_FuncDesc zero_fd = {0, 0, 0};
564
565 if (sym_map == NULL)
566 finaladdr = 0;
567
568 if (finaladdr == 0)
569 rel = &zero_fd;
570
571 plt->fd_func = rel->fd_func;
572 plt->fd_aux = rel->fd_aux;
573 plt->fd_toc = rel->fd_toc;
574 PPC_DCBST (&plt->fd_func);
575 PPC_DCBST (&plt->fd_aux);
576 PPC_DCBST (&plt->fd_toc);
577 PPC_SYNC;
578 #else
579 finaladdr += ppc64_local_entry_offset (map, sym_map, refsym, sym);
580 *reloc_addr = finaladdr;
581 #endif
582 }
583
584 /* Return the final value of a plt relocation. */
585 static inline Elf64_Addr
586 elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
587 Elf64_Addr value)
588 {
589 return value + reloc->r_addend;
590 }
591
592
593 /* Names of the architecture-specific auditing callback functions. */
594 #if _CALL_ELF != 2
595 #define ARCH_LA_PLTENTER ppc64_gnu_pltenter
596 #define ARCH_LA_PLTEXIT ppc64_gnu_pltexit
597 #else
598 #define ARCH_LA_PLTENTER ppc64v2_gnu_pltenter
599 #define ARCH_LA_PLTEXIT ppc64v2_gnu_pltexit
600 #endif
601
602 #endif /* dl_machine_h */
603
604 #ifdef RESOLVE_MAP
605
606 #define PPC_LO(v) ((v) & 0xffff)
607 #define PPC_HI(v) (((v) >> 16) & 0xffff)
608 #define PPC_HA(v) PPC_HI ((v) + 0x8000)
609 #define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
610 #define PPC_HIGHERA(v) PPC_HIGHER ((v) + 0x8000)
611 #define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
612 #define PPC_HIGHESTA(v) PPC_HIGHEST ((v) + 0x8000)
613 #define BIT_INSERT(var, val, mask) \
614 ((var) = ((var) & ~(Elf64_Addr) (mask)) | ((val) & (mask)))
615
616 #define dont_expect(X) __builtin_expect ((X), 0)
617
618 extern void attribute_hidden _dl_reloc_overflow (struct link_map *map,
619 const char *name,
620 Elf64_Addr *const reloc_addr,
621 const Elf64_Sym *refsym);
622
623 auto inline void __attribute__ ((always_inline))
624 elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
625 void *const reloc_addr_arg)
626 {
627 Elf64_Addr *const reloc_addr = reloc_addr_arg;
628 *reloc_addr = l_addr + reloc->r_addend;
629 }
630
631 /* This computes the value used by TPREL* relocs. */
632 auto inline Elf64_Addr __attribute__ ((always_inline, const))
633 elf_machine_tprel (struct link_map *map,
634 struct link_map *sym_map,
635 const Elf64_Sym *sym,
636 const Elf64_Rela *reloc)
637 {
638 #ifndef RTLD_BOOTSTRAP
639 if (sym_map)
640 {
641 CHECK_STATIC_TLS (map, sym_map);
642 #endif
643 return TLS_TPREL_VALUE (sym_map, sym, reloc);
644 #ifndef RTLD_BOOTSTRAP
645 }
646 #endif
647 return 0;
648 }
649
650 /* Call function at address VALUE (an OPD entry) to resolve ifunc relocs. */
651 auto inline Elf64_Addr __attribute__ ((always_inline))
652 resolve_ifunc (Elf64_Addr value,
653 const struct link_map *map, const struct link_map *sym_map)
654 {
655 #if _CALL_ELF != 2
656 #ifndef RESOLVE_CONFLICT_FIND_MAP
657 /* The function we are calling may not yet have its opd entry relocated. */
658 Elf64_FuncDesc opd;
659 if (map != sym_map
660 # if !defined RTLD_BOOTSTRAP && defined SHARED
661 /* Bootstrap map doesn't have l_relocated set for it. */
662 && sym_map != &GL(dl_rtld_map)
663 # endif
664 && !sym_map->l_relocated)
665 {
666 Elf64_FuncDesc *func = (Elf64_FuncDesc *) value;
667 opd.fd_func = func->fd_func + sym_map->l_addr;
668 opd.fd_toc = func->fd_toc + sym_map->l_addr;
669 opd.fd_aux = func->fd_aux;
670 /* GCC 4.9+ eliminates the branch as dead code, force the odp set
671 dependency. */
672 asm ("" : "=r" (value) : "0" (&opd), "X" (opd));
673 }
674 #endif
675 #endif
676 return ((Elf64_Addr (*) (unsigned long int)) value) (GLRO(dl_hwcap));
677 }
678
679 /* Perform the relocation specified by RELOC and SYM (which is fully
680 resolved). MAP is the object containing the reloc. */
681 auto inline void __attribute__ ((always_inline))
682 elf_machine_rela (struct link_map *map,
683 const Elf64_Rela *reloc,
684 const Elf64_Sym *sym,
685 const struct r_found_version *version,
686 void *const reloc_addr_arg,
687 int skip_ifunc)
688 {
689 Elf64_Addr *const reloc_addr = reloc_addr_arg;
690 const int r_type = ELF64_R_TYPE (reloc->r_info);
691 const Elf64_Sym *const refsym = sym;
692 union unaligned
693 {
694 uint16_t u2;
695 uint32_t u4;
696 uint64_t u8;
697 } __attribute__ ((__packed__));
698
699 if (r_type == R_PPC64_RELATIVE)
700 {
701 *reloc_addr = map->l_addr + reloc->r_addend;
702 return;
703 }
704
705 if (__glibc_unlikely (r_type == R_PPC64_NONE))
706 return;
707
708 /* We need SYM_MAP even in the absence of TLS, for elf_machine_fixup_plt
709 and STT_GNU_IFUNC. */
710 struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type);
711 Elf64_Addr value = SYMBOL_ADDRESS (sym_map, sym, true) + reloc->r_addend;
712
713 if (sym != NULL
714 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
715 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
716 && __builtin_expect (!skip_ifunc, 1))
717 value = resolve_ifunc (value, map, sym_map);
718
719 /* For relocs that don't edit code, return.
720 For relocs that might edit instructions, break from the switch. */
721 switch (r_type)
722 {
723 case R_PPC64_ADDR64:
724 case R_PPC64_GLOB_DAT:
725 *reloc_addr = value;
726 return;
727
728 case R_PPC64_IRELATIVE:
729 if (__glibc_likely (!skip_ifunc))
730 value = resolve_ifunc (value, map, sym_map);
731 *reloc_addr = value;
732 return;
733
734 case R_PPC64_JMP_IREL:
735 if (__glibc_likely (!skip_ifunc))
736 value = resolve_ifunc (value, map, sym_map);
737 /* Fall thru */
738 case R_PPC64_JMP_SLOT:
739 #ifdef RESOLVE_CONFLICT_FIND_MAP
740 elf_machine_plt_conflict (map, sym_map, refsym, sym,
741 reloc, reloc_addr, value);
742 #else
743 elf_machine_fixup_plt (map, sym_map, refsym, sym,
744 reloc, reloc_addr, value);
745 #endif
746 return;
747
748 case R_PPC64_DTPMOD64:
749 if (map->l_info[DT_PPC64(OPT)]
750 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_TLS))
751 {
752 #ifdef RTLD_BOOTSTRAP
753 reloc_addr[0] = 0;
754 reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
755 + TLS_DTV_OFFSET);
756 return;
757 #else
758 if (sym_map != NULL)
759 {
760 # ifndef SHARED
761 CHECK_STATIC_TLS (map, sym_map);
762 # else
763 if (TRY_STATIC_TLS (map, sym_map))
764 # endif
765 {
766 reloc_addr[0] = 0;
767 /* Set up for local dynamic. */
768 reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
769 + TLS_DTV_OFFSET);
770 return;
771 }
772 }
773 #endif
774 }
775 #ifdef RTLD_BOOTSTRAP
776 /* During startup the dynamic linker is always index 1. */
777 *reloc_addr = 1;
778 #else
779 /* Get the information from the link map returned by the
780 resolve function. */
781 if (sym_map != NULL)
782 *reloc_addr = sym_map->l_tls_modid;
783 #endif
784 return;
785
786 case R_PPC64_DTPREL64:
787 if (map->l_info[DT_PPC64(OPT)]
788 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_TLS))
789 {
790 #ifdef RTLD_BOOTSTRAP
791 *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
792 return;
793 #else
794 if (sym_map != NULL)
795 {
796 /* This reloc is always preceded by R_PPC64_DTPMOD64. */
797 # ifndef SHARED
798 assert (HAVE_STATIC_TLS (map, sym_map));
799 # else
800 if (HAVE_STATIC_TLS (map, sym_map))
801 # endif
802 {
803 *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
804 return;
805 }
806 }
807 #endif
808 }
809 /* During relocation all TLS symbols are defined and used.
810 Therefore the offset is already correct. */
811 #ifndef RTLD_BOOTSTRAP
812 if (sym_map != NULL)
813 *reloc_addr = TLS_DTPREL_VALUE (sym, reloc);
814 #endif
815 return;
816
817 case R_PPC64_TPREL64:
818 *reloc_addr = elf_machine_tprel (map, sym_map, sym, reloc);
819 return;
820
821 case R_PPC64_TPREL16_LO_DS:
822 value = elf_machine_tprel (map, sym_map, sym, reloc);
823 if (dont_expect ((value & 3) != 0))
824 _dl_reloc_overflow (map, "R_PPC64_TPREL16_LO_DS", reloc_addr, refsym);
825 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
826 break;
827
828 case R_PPC64_TPREL16_DS:
829 value = elf_machine_tprel (map, sym_map, sym, reloc);
830 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
831 _dl_reloc_overflow (map, "R_PPC64_TPREL16_DS", reloc_addr, refsym);
832 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
833 break;
834
835 case R_PPC64_TPREL16:
836 value = elf_machine_tprel (map, sym_map, sym, reloc);
837 if (dont_expect ((value + 0x8000) >= 0x10000))
838 _dl_reloc_overflow (map, "R_PPC64_TPREL16", reloc_addr, refsym);
839 *(Elf64_Half *) reloc_addr = PPC_LO (value);
840 break;
841
842 case R_PPC64_TPREL16_LO:
843 value = elf_machine_tprel (map, sym_map, sym, reloc);
844 *(Elf64_Half *) reloc_addr = PPC_LO (value);
845 break;
846
847 case R_PPC64_TPREL16_HI:
848 value = elf_machine_tprel (map, sym_map, sym, reloc);
849 if (dont_expect (value + 0x80000000 >= 0x100000000LL))
850 _dl_reloc_overflow (map, "R_PPC64_TPREL16_HI", reloc_addr, refsym);
851 *(Elf64_Half *) reloc_addr = PPC_HI (value);
852 break;
853
854 case R_PPC64_TPREL16_HIGH:
855 value = elf_machine_tprel (map, sym_map, sym, reloc);
856 *(Elf64_Half *) reloc_addr = PPC_HI (value);
857 break;
858
859 case R_PPC64_TPREL16_HA:
860 value = elf_machine_tprel (map, sym_map, sym, reloc);
861 if (dont_expect (value + 0x80008000 >= 0x100000000LL))
862 _dl_reloc_overflow (map, "R_PPC64_TPREL16_HA", reloc_addr, refsym);
863 *(Elf64_Half *) reloc_addr = PPC_HA (value);
864 break;
865
866 case R_PPC64_TPREL16_HIGHA:
867 value = elf_machine_tprel (map, sym_map, sym, reloc);
868 *(Elf64_Half *) reloc_addr = PPC_HA (value);
869 break;
870
871 case R_PPC64_TPREL16_HIGHER:
872 value = elf_machine_tprel (map, sym_map, sym, reloc);
873 *(Elf64_Half *) reloc_addr = PPC_HIGHER (value);
874 break;
875
876 case R_PPC64_TPREL16_HIGHEST:
877 value = elf_machine_tprel (map, sym_map, sym, reloc);
878 *(Elf64_Half *) reloc_addr = PPC_HIGHEST (value);
879 break;
880
881 case R_PPC64_TPREL16_HIGHERA:
882 value = elf_machine_tprel (map, sym_map, sym, reloc);
883 *(Elf64_Half *) reloc_addr = PPC_HIGHERA (value);
884 break;
885
886 case R_PPC64_TPREL16_HIGHESTA:
887 value = elf_machine_tprel (map, sym_map, sym, reloc);
888 *(Elf64_Half *) reloc_addr = PPC_HIGHESTA (value);
889 break;
890
891 #ifndef RTLD_BOOTSTRAP /* None of the following appear in ld.so */
892 case R_PPC64_ADDR16_LO_DS:
893 if (dont_expect ((value & 3) != 0))
894 _dl_reloc_overflow (map, "R_PPC64_ADDR16_LO_DS", reloc_addr, refsym);
895 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
896 break;
897
898 case R_PPC64_ADDR16_LO:
899 *(Elf64_Half *) reloc_addr = PPC_LO (value);
900 break;
901
902 case R_PPC64_ADDR16_HI:
903 if (dont_expect (value + 0x80000000 >= 0x100000000LL))
904 _dl_reloc_overflow (map, "R_PPC64_ADDR16_HI", reloc_addr, refsym);
905 case R_PPC64_ADDR16_HIGH:
906 *(Elf64_Half *) reloc_addr = PPC_HI (value);
907 break;
908
909 case R_PPC64_ADDR16_HA:
910 if (dont_expect (value + 0x80008000 >= 0x100000000LL))
911 _dl_reloc_overflow (map, "R_PPC64_ADDR16_HA", reloc_addr, refsym);
912 case R_PPC64_ADDR16_HIGHA:
913 *(Elf64_Half *) reloc_addr = PPC_HA (value);
914 break;
915
916 case R_PPC64_ADDR30:
917 {
918 Elf64_Addr delta = value - (Elf64_Xword) reloc_addr;
919 if (dont_expect ((delta + 0x80000000) >= 0x100000000LL
920 || (delta & 3) != 0))
921 _dl_reloc_overflow (map, "R_PPC64_ADDR30", reloc_addr, refsym);
922 BIT_INSERT (*(Elf64_Word *) reloc_addr, delta, 0xfffffffc);
923 }
924 break;
925
926 case R_PPC64_COPY:
927 if (dont_expect (sym == NULL))
928 /* This can happen in trace mode when an object could not be found. */
929 return;
930 if (dont_expect (sym->st_size > refsym->st_size
931 || (GLRO(dl_verbose)
932 && sym->st_size < refsym->st_size)))
933 {
934 const char *strtab;
935
936 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
937 _dl_error_printf ("%s: Symbol `%s' has different size" \
938 " in shared object," \
939 " consider re-linking\n",
940 RTLD_PROGNAME, strtab + refsym->st_name);
941 }
942 memcpy (reloc_addr_arg, (char *) value,
943 MIN (sym->st_size, refsym->st_size));
944 return;
945
946 case R_PPC64_UADDR64:
947 ((union unaligned *) reloc_addr)->u8 = value;
948 return;
949
950 case R_PPC64_UADDR32:
951 ((union unaligned *) reloc_addr)->u4 = value;
952 return;
953
954 case R_PPC64_ADDR32:
955 if (dont_expect ((value + 0x80000000) >= 0x100000000LL))
956 _dl_reloc_overflow (map, "R_PPC64_ADDR32", reloc_addr, refsym);
957 *(Elf64_Word *) reloc_addr = value;
958 return;
959
960 case R_PPC64_ADDR24:
961 if (dont_expect ((value + 0x2000000) >= 0x4000000 || (value & 3) != 0))
962 _dl_reloc_overflow (map, "R_PPC64_ADDR24", reloc_addr, refsym);
963 BIT_INSERT (*(Elf64_Word *) reloc_addr, value, 0x3fffffc);
964 break;
965
966 case R_PPC64_ADDR16:
967 if (dont_expect ((value + 0x8000) >= 0x10000))
968 _dl_reloc_overflow (map, "R_PPC64_ADDR16", reloc_addr, refsym);
969 *(Elf64_Half *) reloc_addr = value;
970 break;
971
972 case R_PPC64_UADDR16:
973 if (dont_expect ((value + 0x8000) >= 0x10000))
974 _dl_reloc_overflow (map, "R_PPC64_UADDR16", reloc_addr, refsym);
975 ((union unaligned *) reloc_addr)->u2 = value;
976 return;
977
978 case R_PPC64_ADDR16_DS:
979 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
980 _dl_reloc_overflow (map, "R_PPC64_ADDR16_DS", reloc_addr, refsym);
981 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
982 break;
983
984 case R_PPC64_ADDR16_HIGHER:
985 *(Elf64_Half *) reloc_addr = PPC_HIGHER (value);
986 break;
987
988 case R_PPC64_ADDR16_HIGHEST:
989 *(Elf64_Half *) reloc_addr = PPC_HIGHEST (value);
990 break;
991
992 case R_PPC64_ADDR16_HIGHERA:
993 *(Elf64_Half *) reloc_addr = PPC_HIGHERA (value);
994 break;
995
996 case R_PPC64_ADDR16_HIGHESTA:
997 *(Elf64_Half *) reloc_addr = PPC_HIGHESTA (value);
998 break;
999
1000 case R_PPC64_ADDR14:
1001 case R_PPC64_ADDR14_BRTAKEN:
1002 case R_PPC64_ADDR14_BRNTAKEN:
1003 {
1004 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
1005 _dl_reloc_overflow (map, "R_PPC64_ADDR14", reloc_addr, refsym);
1006 Elf64_Word insn = *(Elf64_Word *) reloc_addr;
1007 BIT_INSERT (insn, value, 0xfffc);
1008 if (r_type != R_PPC64_ADDR14)
1009 {
1010 insn &= ~(1 << 21);
1011 if (r_type == R_PPC64_ADDR14_BRTAKEN)
1012 insn |= 1 << 21;
1013 if ((insn & (0x14 << 21)) == (0x04 << 21))
1014 insn |= 0x02 << 21;
1015 else if ((insn & (0x14 << 21)) == (0x10 << 21))
1016 insn |= 0x08 << 21;
1017 }
1018 *(Elf64_Word *) reloc_addr = insn;
1019 }
1020 break;
1021
1022 case R_PPC64_REL32:
1023 *(Elf64_Word *) reloc_addr = value - (Elf64_Addr) reloc_addr;
1024 return;
1025
1026 case R_PPC64_REL64:
1027 *reloc_addr = value - (Elf64_Addr) reloc_addr;
1028 return;
1029 #endif /* !RTLD_BOOTSTRAP */
1030
1031 default:
1032 _dl_reloc_bad_type (map, r_type, 0);
1033 return;
1034 }
1035 MODIFIED_CODE_NOQUEUE (reloc_addr);
1036 }
1037
1038 auto inline void __attribute__ ((always_inline))
1039 elf_machine_lazy_rel (struct link_map *map,
1040 Elf64_Addr l_addr, const Elf64_Rela *reloc,
1041 int skip_ifunc)
1042 {
1043 /* elf_machine_runtime_setup handles this. */
1044 }
1045
1046
1047 #endif /* RESOLVE */