]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/powerpc/powerpc64/dl-machine.h
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / powerpc / powerpc64 / dl-machine.h
1 /* Machine-dependent ELF dynamic relocation inline functions.
2 PowerPC64 version.
3 Copyright 1995-2016 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
15
16 You should have received a copy of the GNU Library General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
19
20 #ifndef dl_machine_h
21 #define dl_machine_h
22
23 #define ELF_MACHINE_NAME "powerpc64"
24
25 #include <assert.h>
26 #include <sys/param.h>
27 #include <dl-tls.h>
28 #include <sysdep.h>
29 #include <hwcapinfo.h>
30
31 /* Translate a processor specific dynamic tag to the index
32 in l_info array. */
33 #define DT_PPC64(x) (DT_PPC64_##x - DT_LOPROC + DT_NUM)
34
35 #if _CALL_ELF != 2
36 /* A PowerPC64 function descriptor. The .plt (procedure linkage
37 table) and .opd (official procedure descriptor) sections are
38 arrays of these. */
39 typedef struct
40 {
41 Elf64_Addr fd_func;
42 Elf64_Addr fd_toc;
43 Elf64_Addr fd_aux;
44 } Elf64_FuncDesc;
45 #endif
46
47 #define ELF_MULT_MACHINES_SUPPORTED
48
49 /* Return nonzero iff ELF header is compatible with the running host. */
50 static inline int
51 elf_machine_matches_host (const Elf64_Ehdr *ehdr)
52 {
53 /* Verify that the binary matches our ABI version. */
54 if ((ehdr->e_flags & EF_PPC64_ABI) != 0)
55 {
56 #if _CALL_ELF != 2
57 if ((ehdr->e_flags & EF_PPC64_ABI) != 1)
58 return 0;
59 #else
60 if ((ehdr->e_flags & EF_PPC64_ABI) != 2)
61 return 0;
62 #endif
63 }
64
65 return ehdr->e_machine == EM_PPC64;
66 }
67
68 /* Return nonzero iff ELF header is compatible with the running host,
69 but not this loader. */
70 static inline int
71 elf_host_tolerates_machine (const Elf64_Ehdr *ehdr)
72 {
73 return ehdr->e_machine == EM_PPC;
74 }
75
76 /* Return nonzero iff ELF header is compatible with the running host,
77 but not this loader. */
78 static inline int
79 elf_host_tolerates_class (const Elf64_Ehdr *ehdr)
80 {
81 return ehdr->e_ident[EI_CLASS] == ELFCLASS32;
82 }
83
84
85 /* Return the run-time load address of the shared object, assuming it
86 was originally linked at zero. */
87 static inline Elf64_Addr
88 elf_machine_load_address (void) __attribute__ ((const));
89
90 static inline Elf64_Addr
91 elf_machine_load_address (void)
92 {
93 Elf64_Addr ret;
94
95 /* The first entry in .got (and thus the first entry in .toc) is the
96 link-time TOC_base, ie. r2. So the difference between that and
97 the current r2 set by the kernel is how far the shared lib has
98 moved. */
99 asm ( " ld %0,-32768(2)\n"
100 " subf %0,%0,2\n"
101 : "=r" (ret));
102 return ret;
103 }
104
105 /* Return the link-time address of _DYNAMIC. */
106 static inline Elf64_Addr
107 elf_machine_dynamic (void)
108 {
109 Elf64_Addr runtime_dynamic;
110 /* It's easier to get the run-time address. */
111 asm ( " addis %0,2,_DYNAMIC@toc@ha\n"
112 " addi %0,%0,_DYNAMIC@toc@l\n"
113 : "=b" (runtime_dynamic));
114 /* Then subtract off the load address offset. */
115 return runtime_dynamic - elf_machine_load_address() ;
116 }
117
118 #define ELF_MACHINE_BEFORE_RTLD_RELOC(dynamic_info) /* nothing */
119
120 /* The PLT uses Elf64_Rela relocs. */
121 #define elf_machine_relplt elf_machine_rela
122
123
124 #ifdef HAVE_INLINED_SYSCALLS
125 /* We do not need _dl_starting_up. */
126 # define DL_STARTING_UP_DEF
127 #else
128 # define DL_STARTING_UP_DEF \
129 ".LC__dl_starting_up:\n" \
130 " .tc __GI__dl_starting_up[TC],__GI__dl_starting_up\n"
131 #endif
132
133
134 /* Initial entry point code for the dynamic linker. The C function
135 `_dl_start' is the real entry point; its return value is the user
136 program's entry point. */
137 #define RTLD_START \
138 asm (".pushsection \".text\"\n" \
139 " .align 2\n" \
140 " " ENTRY_2(_start) "\n" \
141 BODY_PREFIX "_start:\n" \
142 " " LOCALENTRY(_start) "\n" \
143 /* We start with the following on the stack, from top: \
144 argc (4 bytes); \
145 arguments for program (terminated by NULL); \
146 environment variables (terminated by NULL); \
147 arguments for the program loader. */ \
148 " mr 3,1\n" \
149 " li 4,0\n" \
150 " stdu 4,-128(1)\n" \
151 /* Call _dl_start with one parameter pointing at argc. */ \
152 " bl " DOT_PREFIX "_dl_start\n" \
153 " nop\n" \
154 /* Transfer control to _dl_start_user! */ \
155 " b " DOT_PREFIX "_dl_start_user\n" \
156 ".LT__start:\n" \
157 " .long 0\n" \
158 " .byte 0x00,0x0c,0x24,0x40,0x00,0x00,0x00,0x00\n" \
159 " .long .LT__start-" BODY_PREFIX "_start\n" \
160 " .short .LT__start_name_end-.LT__start_name_start\n" \
161 ".LT__start_name_start:\n" \
162 " .ascii \"_start\"\n" \
163 ".LT__start_name_end:\n" \
164 " .align 2\n" \
165 " " END_2(_start) "\n" \
166 " .pushsection \".toc\",\"aw\"\n" \
167 DL_STARTING_UP_DEF \
168 ".LC__rtld_local:\n" \
169 " .tc _rtld_local[TC],_rtld_local\n" \
170 ".LC__dl_argc:\n" \
171 " .tc _dl_argc[TC],_dl_argc\n" \
172 ".LC__dl_argv:\n" \
173 " .tc __GI__dl_argv[TC],__GI__dl_argv\n" \
174 ".LC__dl_fini:\n" \
175 " .tc _dl_fini[TC],_dl_fini\n" \
176 " .popsection\n" \
177 " " ENTRY_2(_dl_start_user) "\n" \
178 /* Now, we do our main work of calling initialisation procedures. \
179 The ELF ABI doesn't say anything about parameters for these, \
180 so we just pass argc, argv, and the environment. \
181 Changing these is strongly discouraged (not least because argc is \
182 passed by value!). */ \
183 BODY_PREFIX "_dl_start_user:\n" \
184 " " LOCALENTRY(_dl_start_user) "\n" \
185 /* the address of _start in r30. */ \
186 " mr 30,3\n" \
187 /* &_dl_argc in 29, &_dl_argv in 27, and _dl_loaded in 28. */ \
188 " ld 28,.LC__rtld_local@toc(2)\n" \
189 " ld 29,.LC__dl_argc@toc(2)\n" \
190 " ld 27,.LC__dl_argv@toc(2)\n" \
191 /* _dl_init (_dl_loaded, _dl_argc, _dl_argv, _dl_argv+_dl_argc+1). */ \
192 " ld 3,0(28)\n" \
193 " lwa 4,0(29)\n" \
194 " ld 5,0(27)\n" \
195 " sldi 6,4,3\n" \
196 " add 6,5,6\n" \
197 " addi 6,6,8\n" \
198 " bl " DOT_PREFIX "_dl_init\n" \
199 " nop\n" \
200 /* Now, to conform to the ELF ABI, we have to: \
201 Pass argc (actually _dl_argc) in r3; */ \
202 " lwa 3,0(29)\n" \
203 /* Pass argv (actually _dl_argv) in r4; */ \
204 " ld 4,0(27)\n" \
205 /* Pass argv+argc+1 in r5; */ \
206 " sldi 5,3,3\n" \
207 " add 6,4,5\n" \
208 " addi 5,6,8\n" \
209 /* Pass the auxiliary vector in r6. This is passed to us just after \
210 _envp. */ \
211 "2: ldu 0,8(6)\n" \
212 " cmpdi 0,0\n" \
213 " bne 2b\n" \
214 " addi 6,6,8\n" \
215 /* Pass a termination function pointer (in this case _dl_fini) in \
216 r7. */ \
217 " ld 7,.LC__dl_fini@toc(2)\n" \
218 /* Pass the stack pointer in r1 (so far so good), pointing to a NULL \
219 value. This lets our startup code distinguish between a program \
220 linked statically, which linux will call with argc on top of the \
221 stack which will hopefully never be zero, and a dynamically linked \
222 program which will always have a NULL on the top of the stack. \
223 Take the opportunity to clear LR, so anyone who accidentally \
224 returns from _start gets SEGV. Also clear the next few words of \
225 the stack. */ \
226 " li 31,0\n" \
227 " std 31,0(1)\n" \
228 " mtlr 31\n" \
229 " std 31,8(1)\n" \
230 " std 31,16(1)\n" \
231 " std 31,24(1)\n" \
232 /* Now, call the start function descriptor at r30... */ \
233 " .globl ._dl_main_dispatch\n" \
234 "._dl_main_dispatch:\n" \
235 " " PPC64_LOAD_FUNCPTR(30) "\n" \
236 " bctr\n" \
237 ".LT__dl_start_user:\n" \
238 " .long 0\n" \
239 " .byte 0x00,0x0c,0x24,0x40,0x00,0x00,0x00,0x00\n" \
240 " .long .LT__dl_start_user-" BODY_PREFIX "_dl_start_user\n" \
241 " .short .LT__dl_start_user_name_end-.LT__dl_start_user_name_start\n" \
242 ".LT__dl_start_user_name_start:\n" \
243 " .ascii \"_dl_start_user\"\n" \
244 ".LT__dl_start_user_name_end:\n" \
245 " .align 2\n" \
246 " " END_2(_dl_start_user) "\n" \
247 " .popsection");
248
249 /* ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve to
250 one of the main executable's symbols, as for a COPY reloc.
251
252 To make function pointer comparisons work on most targets, the
253 relevant ABI states that the address of a non-local function in a
254 dynamically linked executable is the address of the PLT entry for
255 that function. This is quite reasonable since using the real
256 function address in a non-PIC executable would typically require
257 dynamic relocations in .text, something to be avoided. For such
258 functions, the linker emits a SHN_UNDEF symbol in the executable
259 with value equal to the PLT entry address. Normally, SHN_UNDEF
260 symbols have a value of zero, so this is a clue to ld.so that it
261 should treat these symbols specially. For relocations not in
262 ELF_RTYPE_CLASS_PLT (eg. those on function pointers), ld.so should
263 use the value of the executable SHN_UNDEF symbol, ie. the PLT entry
264 address. For relocations in ELF_RTYPE_CLASS_PLT (eg. the relocs in
265 the PLT itself), ld.so should use the value of the corresponding
266 defined symbol in the object that defines the function, ie. the
267 real function address. This complicates ld.so in that there are
268 now two possible values for a given symbol, and it gets even worse
269 because protected symbols need yet another set of rules.
270
271 On PowerPC64 we don't need any of this. The linker won't emit
272 SHN_UNDEF symbols with non-zero values. ld.so can make all
273 relocations behave "normally", ie. always use the real address
274 like PLT relocations. So always set ELF_RTYPE_CLASS_PLT. */
275
276 #if _CALL_ELF != 2
277 #define elf_machine_type_class(type) \
278 (ELF_RTYPE_CLASS_PLT | (((type) == R_PPC64_COPY) * ELF_RTYPE_CLASS_COPY))
279 #else
280 /* And now that you have read that large comment, you can disregard it
281 all for ELFv2. ELFv2 does need the special SHN_UNDEF treatment. */
282 #define IS_PPC64_TLS_RELOC(R) \
283 (((R) >= R_PPC64_TLS && (R) <= R_PPC64_DTPREL16_HIGHESTA) \
284 || ((R) >= R_PPC64_TPREL16_HIGH && (R) <= R_PPC64_DTPREL16_HIGHA))
285
286 #define elf_machine_type_class(type) \
287 ((((type) == R_PPC64_JMP_SLOT \
288 || (type) == R_PPC64_ADDR24 \
289 || IS_PPC64_TLS_RELOC (type)) * ELF_RTYPE_CLASS_PLT) \
290 | (((type) == R_PPC64_COPY) * ELF_RTYPE_CLASS_COPY))
291 #endif
292
293 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
294 #define ELF_MACHINE_JMP_SLOT R_PPC64_JMP_SLOT
295
296 /* The PowerPC never uses REL relocations. */
297 #define ELF_MACHINE_NO_REL 1
298 #define ELF_MACHINE_NO_RELA 0
299
300 /* We define an initialization function to initialize HWCAP/HWCAP2 and
301 platform data so it can be copied into the TCB later. This is called
302 very early in _dl_sysdep_start for dynamically linked binaries. */
303 #ifdef SHARED
304 # define DL_PLATFORM_INIT dl_platform_init ()
305
306 static inline void __attribute__ ((unused))
307 dl_platform_init (void)
308 {
309 __tcb_parse_hwcap_and_convert_at_platform ();
310 }
311 #endif
312
313 /* Stuff for the PLT. */
314 #if _CALL_ELF != 2
315 #define PLT_INITIAL_ENTRY_WORDS 3
316 #define PLT_ENTRY_WORDS 3
317 #define GLINK_INITIAL_ENTRY_WORDS 8
318 /* The first 32k entries of glink can set an index and branch using two
319 instructions; past that point, glink uses three instructions. */
320 #define GLINK_ENTRY_WORDS(I) (((I) < 0x8000)? 2 : 3)
321 #else
322 #define PLT_INITIAL_ENTRY_WORDS 2
323 #define PLT_ENTRY_WORDS 1
324 #define GLINK_INITIAL_ENTRY_WORDS 8
325 #define GLINK_ENTRY_WORDS(I) 1
326 #endif
327
328 #define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
329 #define PPC_DCBT(where) asm volatile ("dcbt 0,%0" : : "r"(where) : "memory")
330 #define PPC_DCBF(where) asm volatile ("dcbf 0,%0" : : "r"(where) : "memory")
331 #define PPC_SYNC asm volatile ("sync" : : : "memory")
332 #define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
333 #define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
334 #define PPC_DIE asm volatile ("tweq 0,0")
335 /* Use this when you've modified some code, but it won't be in the
336 instruction fetch queue (or when it doesn't matter if it is). */
337 #define MODIFIED_CODE_NOQUEUE(where) \
338 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); } while (0)
339 /* Use this when it might be in the instruction queue. */
340 #define MODIFIED_CODE(where) \
341 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); PPC_ISYNC; } while (0)
342
343 /* Set up the loaded object described by MAP so its unrelocated PLT
344 entries will jump to the on-demand fixup code in dl-runtime.c. */
345 static inline int __attribute__ ((always_inline))
346 elf_machine_runtime_setup (struct link_map *map, int lazy, int profile)
347 {
348 if (map->l_info[DT_JMPREL])
349 {
350 Elf64_Word i;
351 Elf64_Word *glink = NULL;
352 Elf64_Xword *plt = (Elf64_Xword *) D_PTR (map, l_info[DT_PLTGOT]);
353 Elf64_Word num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
354 / sizeof (Elf64_Rela));
355 Elf64_Addr l_addr = map->l_addr;
356 Elf64_Dyn **info = map->l_info;
357 char *p;
358
359 extern void _dl_runtime_resolve (void);
360 extern void _dl_profile_resolve (void);
361
362 /* Relocate the DT_PPC64_GLINK entry in the _DYNAMIC section.
363 elf_get_dynamic_info takes care of the standard entries but
364 doesn't know exactly what to do with processor specific
365 entries. */
366 if (info[DT_PPC64(GLINK)] != NULL)
367 info[DT_PPC64(GLINK)]->d_un.d_ptr += l_addr;
368
369 if (lazy)
370 {
371 Elf64_Word glink_offset;
372 Elf64_Word offset;
373 Elf64_Addr dlrr;
374
375 dlrr = (Elf64_Addr) (profile ? _dl_profile_resolve
376 : _dl_runtime_resolve);
377 if (profile && GLRO(dl_profile) != NULL
378 && _dl_name_match_p (GLRO(dl_profile), map))
379 /* This is the object we are looking for. Say that we really
380 want profiling and the timers are started. */
381 GL(dl_profile_map) = map;
382
383 #if _CALL_ELF != 2
384 /* We need to stuff the address/TOC of _dl_runtime_resolve
385 into doublewords 0 and 1 of plt_reserve. Then we need to
386 stuff the map address into doubleword 2 of plt_reserve.
387 This allows the GLINK0 code to transfer control to the
388 correct trampoline which will transfer control to fixup
389 in dl-machine.c. */
390 {
391 /* The plt_reserve area is the 1st 3 doublewords of the PLT. */
392 Elf64_FuncDesc *plt_reserve = (Elf64_FuncDesc *) plt;
393 Elf64_FuncDesc *resolve_fd = (Elf64_FuncDesc *) dlrr;
394 plt_reserve->fd_func = resolve_fd->fd_func;
395 plt_reserve->fd_toc = resolve_fd->fd_toc;
396 plt_reserve->fd_aux = (Elf64_Addr) map;
397 #ifdef RTLD_BOOTSTRAP
398 /* When we're bootstrapping, the opd entry will not have
399 been relocated yet. */
400 plt_reserve->fd_func += l_addr;
401 plt_reserve->fd_toc += l_addr;
402 #endif
403 }
404 #else
405 /* When we don't have function descriptors, the first doubleword
406 of the PLT holds the address of _dl_runtime_resolve, and the
407 second doubleword holds the map address. */
408 plt[0] = dlrr;
409 plt[1] = (Elf64_Addr) map;
410 #endif
411
412 /* Set up the lazy PLT entries. */
413 glink = (Elf64_Word *) D_PTR (map, l_info[DT_PPC64(GLINK)]);
414 offset = PLT_INITIAL_ENTRY_WORDS;
415 glink_offset = GLINK_INITIAL_ENTRY_WORDS;
416 for (i = 0; i < num_plt_entries; i++)
417 {
418
419 plt[offset] = (Elf64_Xword) &glink[glink_offset];
420 offset += PLT_ENTRY_WORDS;
421 glink_offset += GLINK_ENTRY_WORDS (i);
422 }
423
424 /* Now, we've modified data. We need to write the changes from
425 the data cache to a second-level unified cache, then make
426 sure that stale data in the instruction cache is removed.
427 (In a multiprocessor system, the effect is more complex.)
428 Most of the PLT shouldn't be in the instruction cache, but
429 there may be a little overlap at the start and the end.
430
431 Assumes that dcbst and icbi apply to lines of 16 bytes or
432 more. Current known line sizes are 16, 32, and 128 bytes. */
433
434 for (p = (char *) plt; p < (char *) &plt[offset]; p += 16)
435 PPC_DCBST (p);
436 PPC_SYNC;
437 }
438 }
439 return lazy;
440 }
441
442 #if _CALL_ELF == 2
443 /* If the PLT entry whose reloc is 'reloc' resolves to a function in
444 the same object, return the target function's local entry point
445 offset if usable. */
446 static inline Elf64_Addr __attribute__ ((always_inline))
447 ppc64_local_entry_offset (struct link_map *map, lookup_t sym_map,
448 const Elf64_Rela *reloc)
449 {
450 const Elf64_Sym *symtab;
451 const Elf64_Sym *sym;
452
453 /* If the target function is in a different object, we cannot
454 use the local entry point. */
455 if (sym_map != map)
456 return 0;
457
458 /* If the linker inserted multiple TOCs, we cannot use the
459 local entry point. */
460 if (map->l_info[DT_PPC64(OPT)]
461 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_MULTI_TOC))
462 return 0;
463
464 /* Otherwise, we can use the local entry point. Retrieve its offset
465 from the symbol's ELF st_other field. */
466 symtab = (const void *) D_PTR (map, l_info[DT_SYMTAB]);
467 sym = &symtab[ELFW(R_SYM) (reloc->r_info)];
468
469 /* If the target function is an ifunc then the local entry offset is
470 for the resolver, not the final destination. */
471 if (__builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0))
472 return 0;
473
474 return PPC64_LOCAL_ENTRY_OFFSET (sym->st_other);
475 }
476 #endif
477
478 /* Change the PLT entry whose reloc is 'reloc' to call the actual
479 routine. */
480 static inline Elf64_Addr __attribute__ ((always_inline))
481 elf_machine_fixup_plt (struct link_map *map, lookup_t sym_map,
482 const Elf64_Rela *reloc,
483 Elf64_Addr *reloc_addr, Elf64_Addr finaladdr)
484 {
485 #if _CALL_ELF != 2
486 Elf64_FuncDesc *plt = (Elf64_FuncDesc *) reloc_addr;
487 Elf64_FuncDesc *rel = (Elf64_FuncDesc *) finaladdr;
488 Elf64_Addr offset = 0;
489 Elf64_FuncDesc zero_fd = {0, 0, 0};
490
491 PPC_DCBT (&plt->fd_aux);
492 PPC_DCBT (&plt->fd_func);
493
494 /* If sym_map is NULL, it's a weak undefined sym; Set the plt to
495 zero. finaladdr should be zero already in this case, but guard
496 against invalid plt relocations with non-zero addends. */
497 if (sym_map == NULL)
498 finaladdr = 0;
499
500 /* Don't die here if finaladdr is zero, die if this plt entry is
501 actually called. Makes a difference when LD_BIND_NOW=1.
502 finaladdr may be zero for a weak undefined symbol, or when an
503 ifunc resolver returns zero. */
504 if (finaladdr == 0)
505 rel = &zero_fd;
506 else
507 {
508 PPC_DCBT (&rel->fd_aux);
509 PPC_DCBT (&rel->fd_func);
510 }
511
512 /* If the opd entry is not yet relocated (because it's from a shared
513 object that hasn't been processed yet), then manually reloc it. */
514 if (finaladdr != 0 && map != sym_map && !sym_map->l_relocated
515 #if !defined RTLD_BOOTSTRAP && defined SHARED
516 /* Bootstrap map doesn't have l_relocated set for it. */
517 && sym_map != &GL(dl_rtld_map)
518 #endif
519 )
520 offset = sym_map->l_addr;
521
522 /* For PPC64, fixup_plt copies the function descriptor from opd
523 over the corresponding PLT entry.
524 Initially, PLT Entry[i] is set up for lazy linking, or is zero.
525 For lazy linking, the fd_toc and fd_aux entries are irrelevant,
526 so for thread safety we write them before changing fd_func. */
527
528 plt->fd_aux = rel->fd_aux + offset;
529 plt->fd_toc = rel->fd_toc + offset;
530 PPC_DCBF (&plt->fd_toc);
531 PPC_ISYNC;
532
533 plt->fd_func = rel->fd_func + offset;
534 PPC_DCBST (&plt->fd_func);
535 PPC_ISYNC;
536 #else
537 finaladdr += ppc64_local_entry_offset (map, sym_map, reloc);
538 *reloc_addr = finaladdr;
539 #endif
540
541 return finaladdr;
542 }
543
544 static inline void __attribute__ ((always_inline))
545 elf_machine_plt_conflict (struct link_map *map, lookup_t sym_map,
546 const Elf64_Rela *reloc,
547 Elf64_Addr *reloc_addr, Elf64_Addr finaladdr)
548 {
549 #if _CALL_ELF != 2
550 Elf64_FuncDesc *plt = (Elf64_FuncDesc *) reloc_addr;
551 Elf64_FuncDesc *rel = (Elf64_FuncDesc *) finaladdr;
552 Elf64_FuncDesc zero_fd = {0, 0, 0};
553
554 if (sym_map == NULL)
555 finaladdr = 0;
556
557 if (finaladdr == 0)
558 rel = &zero_fd;
559
560 plt->fd_func = rel->fd_func;
561 plt->fd_aux = rel->fd_aux;
562 plt->fd_toc = rel->fd_toc;
563 PPC_DCBST (&plt->fd_func);
564 PPC_DCBST (&plt->fd_aux);
565 PPC_DCBST (&plt->fd_toc);
566 PPC_SYNC;
567 #else
568 finaladdr += ppc64_local_entry_offset (map, sym_map, reloc);
569 *reloc_addr = finaladdr;
570 #endif
571 }
572
573 /* Return the final value of a plt relocation. */
574 static inline Elf64_Addr
575 elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
576 Elf64_Addr value)
577 {
578 return value + reloc->r_addend;
579 }
580
581
582 /* Names of the architecture-specific auditing callback functions. */
583 #if _CALL_ELF != 2
584 #define ARCH_LA_PLTENTER ppc64_gnu_pltenter
585 #define ARCH_LA_PLTEXIT ppc64_gnu_pltexit
586 #else
587 #define ARCH_LA_PLTENTER ppc64v2_gnu_pltenter
588 #define ARCH_LA_PLTEXIT ppc64v2_gnu_pltexit
589 #endif
590
591 #endif /* dl_machine_h */
592
593 #ifdef RESOLVE_MAP
594
595 #define PPC_LO(v) ((v) & 0xffff)
596 #define PPC_HI(v) (((v) >> 16) & 0xffff)
597 #define PPC_HA(v) PPC_HI ((v) + 0x8000)
598 #define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
599 #define PPC_HIGHERA(v) PPC_HIGHER ((v) + 0x8000)
600 #define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
601 #define PPC_HIGHESTA(v) PPC_HIGHEST ((v) + 0x8000)
602 #define BIT_INSERT(var, val, mask) \
603 ((var) = ((var) & ~(Elf64_Addr) (mask)) | ((val) & (mask)))
604
605 #define dont_expect(X) __builtin_expect ((X), 0)
606
607 extern void _dl_reloc_overflow (struct link_map *map,
608 const char *name,
609 Elf64_Addr *const reloc_addr,
610 const Elf64_Sym *refsym)
611 attribute_hidden;
612
613 auto inline void __attribute__ ((always_inline))
614 elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
615 void *const reloc_addr_arg)
616 {
617 Elf64_Addr *const reloc_addr = reloc_addr_arg;
618 *reloc_addr = l_addr + reloc->r_addend;
619 }
620
621 /* This computes the value used by TPREL* relocs. */
622 auto inline Elf64_Addr __attribute__ ((always_inline, const))
623 elf_machine_tprel (struct link_map *map,
624 struct link_map *sym_map,
625 const Elf64_Sym *sym,
626 const Elf64_Rela *reloc)
627 {
628 #ifndef RTLD_BOOTSTRAP
629 if (sym_map)
630 {
631 CHECK_STATIC_TLS (map, sym_map);
632 #endif
633 return TLS_TPREL_VALUE (sym_map, sym, reloc);
634 #ifndef RTLD_BOOTSTRAP
635 }
636 #endif
637 return 0;
638 }
639
640 /* Call function at address VALUE (an OPD entry) to resolve ifunc relocs. */
641 auto inline Elf64_Addr __attribute__ ((always_inline))
642 resolve_ifunc (Elf64_Addr value,
643 const struct link_map *map, const struct link_map *sym_map)
644 {
645 #if _CALL_ELF != 2
646 #ifndef RESOLVE_CONFLICT_FIND_MAP
647 /* The function we are calling may not yet have its opd entry relocated. */
648 Elf64_FuncDesc opd;
649 if (map != sym_map
650 # if !defined RTLD_BOOTSTRAP && defined SHARED
651 /* Bootstrap map doesn't have l_relocated set for it. */
652 && sym_map != &GL(dl_rtld_map)
653 # endif
654 && !sym_map->l_relocated)
655 {
656 Elf64_FuncDesc *func = (Elf64_FuncDesc *) value;
657 opd.fd_func = func->fd_func + sym_map->l_addr;
658 opd.fd_toc = func->fd_toc + sym_map->l_addr;
659 opd.fd_aux = func->fd_aux;
660 /* GCC 4.9+ eliminates the branch as dead code, force the odp set
661 dependency. */
662 asm ("" : "=r" (value) : "0" (&opd), "X" (opd));
663 }
664 #endif
665 #endif
666 return ((Elf64_Addr (*) (unsigned long int)) value) (GLRO(dl_hwcap));
667 }
668
669 /* Perform the relocation specified by RELOC and SYM (which is fully
670 resolved). MAP is the object containing the reloc. */
671 auto inline void __attribute__ ((always_inline))
672 elf_machine_rela (struct link_map *map,
673 const Elf64_Rela *reloc,
674 const Elf64_Sym *sym,
675 const struct r_found_version *version,
676 void *const reloc_addr_arg,
677 int skip_ifunc)
678 {
679 Elf64_Addr *const reloc_addr = reloc_addr_arg;
680 const int r_type = ELF64_R_TYPE (reloc->r_info);
681 const Elf64_Sym *const refsym = sym;
682 union unaligned
683 {
684 uint16_t u2;
685 uint32_t u4;
686 uint64_t u8;
687 } __attribute__ ((__packed__));
688
689 if (r_type == R_PPC64_RELATIVE)
690 {
691 *reloc_addr = map->l_addr + reloc->r_addend;
692 return;
693 }
694
695 if (__glibc_unlikely (r_type == R_PPC64_NONE))
696 return;
697
698 /* We need SYM_MAP even in the absence of TLS, for elf_machine_fixup_plt
699 and STT_GNU_IFUNC. */
700 struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type);
701 Elf64_Addr value = ((sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value)
702 + reloc->r_addend);
703
704 if (sym != NULL
705 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
706 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
707 && __builtin_expect (!skip_ifunc, 1))
708 value = resolve_ifunc (value, map, sym_map);
709
710 /* For relocs that don't edit code, return.
711 For relocs that might edit instructions, break from the switch. */
712 switch (r_type)
713 {
714 case R_PPC64_ADDR64:
715 case R_PPC64_GLOB_DAT:
716 *reloc_addr = value;
717 return;
718
719 case R_PPC64_IRELATIVE:
720 if (__glibc_likely (!skip_ifunc))
721 value = resolve_ifunc (value, map, sym_map);
722 *reloc_addr = value;
723 return;
724
725 case R_PPC64_JMP_IREL:
726 if (__glibc_likely (!skip_ifunc))
727 value = resolve_ifunc (value, map, sym_map);
728 /* Fall thru */
729 case R_PPC64_JMP_SLOT:
730 #ifdef RESOLVE_CONFLICT_FIND_MAP
731 elf_machine_plt_conflict (map, sym_map, reloc, reloc_addr, value);
732 #else
733 elf_machine_fixup_plt (map, sym_map, reloc, reloc_addr, value);
734 #endif
735 return;
736
737 case R_PPC64_DTPMOD64:
738 if (map->l_info[DT_PPC64(OPT)]
739 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_TLS))
740 {
741 #ifdef RTLD_BOOTSTRAP
742 reloc_addr[0] = 0;
743 reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
744 + TLS_DTV_OFFSET);
745 return;
746 #else
747 if (sym_map != NULL)
748 {
749 # ifndef SHARED
750 CHECK_STATIC_TLS (map, sym_map);
751 # else
752 if (TRY_STATIC_TLS (map, sym_map))
753 # endif
754 {
755 reloc_addr[0] = 0;
756 /* Set up for local dynamic. */
757 reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
758 + TLS_DTV_OFFSET);
759 return;
760 }
761 }
762 #endif
763 }
764 #ifdef RTLD_BOOTSTRAP
765 /* During startup the dynamic linker is always index 1. */
766 *reloc_addr = 1;
767 #else
768 /* Get the information from the link map returned by the
769 resolve function. */
770 if (sym_map != NULL)
771 *reloc_addr = sym_map->l_tls_modid;
772 #endif
773 return;
774
775 case R_PPC64_DTPREL64:
776 if (map->l_info[DT_PPC64(OPT)]
777 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_TLS))
778 {
779 #ifdef RTLD_BOOTSTRAP
780 *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
781 return;
782 #else
783 if (sym_map != NULL)
784 {
785 /* This reloc is always preceded by R_PPC64_DTPMOD64. */
786 # ifndef SHARED
787 assert (HAVE_STATIC_TLS (map, sym_map));
788 # else
789 if (HAVE_STATIC_TLS (map, sym_map))
790 # endif
791 {
792 *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
793 return;
794 }
795 }
796 #endif
797 }
798 /* During relocation all TLS symbols are defined and used.
799 Therefore the offset is already correct. */
800 #ifndef RTLD_BOOTSTRAP
801 if (sym_map != NULL)
802 *reloc_addr = TLS_DTPREL_VALUE (sym, reloc);
803 #endif
804 return;
805
806 case R_PPC64_TPREL64:
807 *reloc_addr = elf_machine_tprel (map, sym_map, sym, reloc);
808 return;
809
810 case R_PPC64_TPREL16_LO_DS:
811 value = elf_machine_tprel (map, sym_map, sym, reloc);
812 if (dont_expect ((value & 3) != 0))
813 _dl_reloc_overflow (map, "R_PPC64_TPREL16_LO_DS", reloc_addr, refsym);
814 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
815 break;
816
817 case R_PPC64_TPREL16_DS:
818 value = elf_machine_tprel (map, sym_map, sym, reloc);
819 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
820 _dl_reloc_overflow (map, "R_PPC64_TPREL16_DS", reloc_addr, refsym);
821 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
822 break;
823
824 case R_PPC64_TPREL16:
825 value = elf_machine_tprel (map, sym_map, sym, reloc);
826 if (dont_expect ((value + 0x8000) >= 0x10000))
827 _dl_reloc_overflow (map, "R_PPC64_TPREL16", reloc_addr, refsym);
828 *(Elf64_Half *) reloc_addr = PPC_LO (value);
829 break;
830
831 case R_PPC64_TPREL16_LO:
832 value = elf_machine_tprel (map, sym_map, sym, reloc);
833 *(Elf64_Half *) reloc_addr = PPC_LO (value);
834 break;
835
836 case R_PPC64_TPREL16_HI:
837 value = elf_machine_tprel (map, sym_map, sym, reloc);
838 if (dont_expect (value + 0x80000000 >= 0x100000000LL))
839 _dl_reloc_overflow (map, "R_PPC64_TPREL16_HI", reloc_addr, refsym);
840 *(Elf64_Half *) reloc_addr = PPC_HI (value);
841 break;
842
843 case R_PPC64_TPREL16_HIGH:
844 value = elf_machine_tprel (map, sym_map, sym, reloc);
845 *(Elf64_Half *) reloc_addr = PPC_HI (value);
846 break;
847
848 case R_PPC64_TPREL16_HA:
849 value = elf_machine_tprel (map, sym_map, sym, reloc);
850 if (dont_expect (value + 0x80008000 >= 0x100000000LL))
851 _dl_reloc_overflow (map, "R_PPC64_TPREL16_HA", reloc_addr, refsym);
852 *(Elf64_Half *) reloc_addr = PPC_HA (value);
853 break;
854
855 case R_PPC64_TPREL16_HIGHA:
856 value = elf_machine_tprel (map, sym_map, sym, reloc);
857 *(Elf64_Half *) reloc_addr = PPC_HA (value);
858 break;
859
860 case R_PPC64_TPREL16_HIGHER:
861 value = elf_machine_tprel (map, sym_map, sym, reloc);
862 *(Elf64_Half *) reloc_addr = PPC_HIGHER (value);
863 break;
864
865 case R_PPC64_TPREL16_HIGHEST:
866 value = elf_machine_tprel (map, sym_map, sym, reloc);
867 *(Elf64_Half *) reloc_addr = PPC_HIGHEST (value);
868 break;
869
870 case R_PPC64_TPREL16_HIGHERA:
871 value = elf_machine_tprel (map, sym_map, sym, reloc);
872 *(Elf64_Half *) reloc_addr = PPC_HIGHERA (value);
873 break;
874
875 case R_PPC64_TPREL16_HIGHESTA:
876 value = elf_machine_tprel (map, sym_map, sym, reloc);
877 *(Elf64_Half *) reloc_addr = PPC_HIGHESTA (value);
878 break;
879
880 #ifndef RTLD_BOOTSTRAP /* None of the following appear in ld.so */
881 case R_PPC64_ADDR16_LO_DS:
882 if (dont_expect ((value & 3) != 0))
883 _dl_reloc_overflow (map, "R_PPC64_ADDR16_LO_DS", reloc_addr, refsym);
884 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
885 break;
886
887 case R_PPC64_ADDR16_LO:
888 *(Elf64_Half *) reloc_addr = PPC_LO (value);
889 break;
890
891 case R_PPC64_ADDR16_HI:
892 if (dont_expect (value + 0x80000000 >= 0x100000000LL))
893 _dl_reloc_overflow (map, "R_PPC64_ADDR16_HI", reloc_addr, refsym);
894 case R_PPC64_ADDR16_HIGH:
895 *(Elf64_Half *) reloc_addr = PPC_HI (value);
896 break;
897
898 case R_PPC64_ADDR16_HA:
899 if (dont_expect (value + 0x80008000 >= 0x100000000LL))
900 _dl_reloc_overflow (map, "R_PPC64_ADDR16_HA", reloc_addr, refsym);
901 case R_PPC64_ADDR16_HIGHA:
902 *(Elf64_Half *) reloc_addr = PPC_HA (value);
903 break;
904
905 case R_PPC64_ADDR30:
906 {
907 Elf64_Addr delta = value - (Elf64_Xword) reloc_addr;
908 if (dont_expect ((delta + 0x80000000) >= 0x100000000LL
909 || (delta & 3) != 0))
910 _dl_reloc_overflow (map, "R_PPC64_ADDR30", reloc_addr, refsym);
911 BIT_INSERT (*(Elf64_Word *) reloc_addr, delta, 0xfffffffc);
912 }
913 break;
914
915 case R_PPC64_COPY:
916 if (dont_expect (sym == NULL))
917 /* This can happen in trace mode when an object could not be found. */
918 return;
919 if (dont_expect (sym->st_size > refsym->st_size
920 || (GLRO(dl_verbose)
921 && sym->st_size < refsym->st_size)))
922 {
923 const char *strtab;
924
925 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
926 _dl_error_printf ("%s: Symbol `%s' has different size" \
927 " in shared object," \
928 " consider re-linking\n",
929 RTLD_PROGNAME, strtab + refsym->st_name);
930 }
931 memcpy (reloc_addr_arg, (char *) value,
932 MIN (sym->st_size, refsym->st_size));
933 return;
934
935 case R_PPC64_UADDR64:
936 ((union unaligned *) reloc_addr)->u8 = value;
937 return;
938
939 case R_PPC64_UADDR32:
940 ((union unaligned *) reloc_addr)->u4 = value;
941 return;
942
943 case R_PPC64_ADDR32:
944 if (dont_expect ((value + 0x80000000) >= 0x100000000LL))
945 _dl_reloc_overflow (map, "R_PPC64_ADDR32", reloc_addr, refsym);
946 *(Elf64_Word *) reloc_addr = value;
947 return;
948
949 case R_PPC64_ADDR24:
950 if (dont_expect ((value + 0x2000000) >= 0x4000000 || (value & 3) != 0))
951 _dl_reloc_overflow (map, "R_PPC64_ADDR24", reloc_addr, refsym);
952 BIT_INSERT (*(Elf64_Word *) reloc_addr, value, 0x3fffffc);
953 break;
954
955 case R_PPC64_ADDR16:
956 if (dont_expect ((value + 0x8000) >= 0x10000))
957 _dl_reloc_overflow (map, "R_PPC64_ADDR16", reloc_addr, refsym);
958 *(Elf64_Half *) reloc_addr = value;
959 break;
960
961 case R_PPC64_UADDR16:
962 if (dont_expect ((value + 0x8000) >= 0x10000))
963 _dl_reloc_overflow (map, "R_PPC64_UADDR16", reloc_addr, refsym);
964 ((union unaligned *) reloc_addr)->u2 = value;
965 return;
966
967 case R_PPC64_ADDR16_DS:
968 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
969 _dl_reloc_overflow (map, "R_PPC64_ADDR16_DS", reloc_addr, refsym);
970 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
971 break;
972
973 case R_PPC64_ADDR16_HIGHER:
974 *(Elf64_Half *) reloc_addr = PPC_HIGHER (value);
975 break;
976
977 case R_PPC64_ADDR16_HIGHEST:
978 *(Elf64_Half *) reloc_addr = PPC_HIGHEST (value);
979 break;
980
981 case R_PPC64_ADDR16_HIGHERA:
982 *(Elf64_Half *) reloc_addr = PPC_HIGHERA (value);
983 break;
984
985 case R_PPC64_ADDR16_HIGHESTA:
986 *(Elf64_Half *) reloc_addr = PPC_HIGHESTA (value);
987 break;
988
989 case R_PPC64_ADDR14:
990 case R_PPC64_ADDR14_BRTAKEN:
991 case R_PPC64_ADDR14_BRNTAKEN:
992 {
993 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
994 _dl_reloc_overflow (map, "R_PPC64_ADDR14", reloc_addr, refsym);
995 Elf64_Word insn = *(Elf64_Word *) reloc_addr;
996 BIT_INSERT (insn, value, 0xfffc);
997 if (r_type != R_PPC64_ADDR14)
998 {
999 insn &= ~(1 << 21);
1000 if (r_type == R_PPC64_ADDR14_BRTAKEN)
1001 insn |= 1 << 21;
1002 if ((insn & (0x14 << 21)) == (0x04 << 21))
1003 insn |= 0x02 << 21;
1004 else if ((insn & (0x14 << 21)) == (0x10 << 21))
1005 insn |= 0x08 << 21;
1006 }
1007 *(Elf64_Word *) reloc_addr = insn;
1008 }
1009 break;
1010
1011 case R_PPC64_REL32:
1012 *(Elf64_Word *) reloc_addr = value - (Elf64_Addr) reloc_addr;
1013 return;
1014
1015 case R_PPC64_REL64:
1016 *reloc_addr = value - (Elf64_Addr) reloc_addr;
1017 return;
1018 #endif /* !RTLD_BOOTSTRAP */
1019
1020 default:
1021 _dl_reloc_bad_type (map, r_type, 0);
1022 return;
1023 }
1024 MODIFIED_CODE_NOQUEUE (reloc_addr);
1025 }
1026
1027 auto inline void __attribute__ ((always_inline))
1028 elf_machine_lazy_rel (struct link_map *map,
1029 Elf64_Addr l_addr, const Elf64_Rela *reloc,
1030 int skip_ifunc)
1031 {
1032 /* elf_machine_runtime_setup handles this. */
1033 }
1034
1035
1036 #endif /* RESOLVE */