]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/sparc/sparc64/dl-machine.h
501092e0ca908cfb70219f8974a7c0e1caa915e3
[thirdparty/glibc.git] / sysdeps / sparc / sparc64 / dl-machine.h
1 /* Machine-dependent ELF dynamic relocation inline functions. Sparc64 version.
2 Copyright (C) 1997-2006, 2009, 2010, 2011 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20 #ifndef dl_machine_h
21 #define dl_machine_h
22
23 #define ELF_MACHINE_NAME "sparc64"
24
25 #include <string.h>
26 #include <sys/param.h>
27 #include <ldsodefs.h>
28 #include <sysdep.h>
29 #include <dl-plt.h>
30
31 #ifndef VALIDX
32 # define VALIDX(tag) (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
33 + DT_EXTRANUM + DT_VALTAGIDX (tag))
34 #endif
35
36 #define ELF64_R_TYPE_ID(info) ((info) & 0xff)
37 #define ELF64_R_TYPE_DATA(info) ((info) >> 8)
38
39 /* Return nonzero iff ELF header is compatible with the running host. */
40 static inline int
41 elf_machine_matches_host (const Elf64_Ehdr *ehdr)
42 {
43 return ehdr->e_machine == EM_SPARCV9;
44 }
45
46 /* We have to do this because elf_machine_{dynamic,load_address} can be
47 invoked from functions that have no GOT references, and thus the compiler
48 has no obligation to load the PIC register. */
49 #define LOAD_PIC_REG(PIC_REG) \
50 do { Elf64_Addr tmp; \
51 __asm("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t" \
52 "rd %%pc, %0\n\t" \
53 "add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t" \
54 "add %0, %1, %0" \
55 : "=r" (PIC_REG), "=r" (tmp)); \
56 } while (0)
57
58 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
59 first element of the GOT. This must be inlined in a function which
60 uses global data. */
61 static inline Elf64_Addr
62 elf_machine_dynamic (void)
63 {
64 register Elf64_Addr *elf_pic_register __asm__("%l7");
65
66 LOAD_PIC_REG (elf_pic_register);
67
68 return *elf_pic_register;
69 }
70
71 /* Return the run-time load address of the shared object. */
72 static inline Elf64_Addr
73 elf_machine_load_address (void)
74 {
75 register Elf32_Addr *pc __asm ("%o7");
76 register Elf64_Addr *got __asm ("%l7");
77
78 __asm ("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t"
79 "call 1f\n\t"
80 " add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t"
81 "call _DYNAMIC\n\t"
82 "call _GLOBAL_OFFSET_TABLE_\n"
83 "1:\tadd %1, %0, %1\n\t" : "=r" (pc), "=r" (got));
84
85 /* got is now l_addr + _GLOBAL_OFFSET_TABLE_
86 *got is _DYNAMIC
87 pc[2]*4 is l_addr + _DYNAMIC - (long)pc - 8
88 pc[3]*4 is l_addr + _GLOBAL_OFFSET_TABLE_ - (long)pc - 12 */
89 return (Elf64_Addr) got - *got + (Elf32_Sword) ((pc[2] - pc[3]) * 4) - 4;
90 }
91
92 static inline Elf64_Addr __attribute__ ((always_inline))
93 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
94 const Elf64_Rela *reloc,
95 Elf64_Addr *reloc_addr, Elf64_Addr value)
96 {
97 sparc64_fixup_plt (map, reloc, reloc_addr, value + reloc->r_addend,
98 reloc->r_addend, 1);
99 return value;
100 }
101
102 /* Return the final value of a plt relocation. */
103 static inline Elf64_Addr
104 elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
105 Elf64_Addr value)
106 {
107 /* Don't add addend here, but in elf_machine_fixup_plt instead.
108 value + reloc->r_addend is the value which should actually be
109 stored into .plt data slot. */
110 return value;
111 }
112
113 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so
114 PLT entries should not be allowed to define the value.
115 ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one
116 of the main executable's symbols, as for a COPY reloc. */
117 #define elf_machine_type_class(type) \
118 ((((type) == R_SPARC_JMP_SLOT \
119 || ((type) >= R_SPARC_TLS_GD_HI22 && (type) <= R_SPARC_TLS_TPOFF64)) \
120 * ELF_RTYPE_CLASS_PLT) \
121 | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY))
122
123 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
124 #define ELF_MACHINE_JMP_SLOT R_SPARC_JMP_SLOT
125
126 /* The SPARC never uses Elf64_Rel relocations. */
127 #define ELF_MACHINE_NO_REL 1
128
129 /* The SPARC overlaps DT_RELA and DT_PLTREL. */
130 #define ELF_MACHINE_PLTREL_OVERLAP 1
131
132 /* Set up the loaded object described by L so its unrelocated PLT
133 entries will jump to the on-demand fixup code in dl-runtime.c. */
134
135 static inline int
136 elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
137 {
138 if (l->l_info[DT_JMPREL] && lazy)
139 {
140 extern void _dl_runtime_resolve_0 (void);
141 extern void _dl_runtime_resolve_1 (void);
142 extern void _dl_runtime_profile_0 (void);
143 extern void _dl_runtime_profile_1 (void);
144 Elf64_Addr res0_addr, res1_addr;
145 unsigned int *plt = (void *) D_PTR (l, l_info[DT_PLTGOT]);
146
147 if (__builtin_expect(profile, 0))
148 {
149 res0_addr = (Elf64_Addr) &_dl_runtime_profile_0;
150 res1_addr = (Elf64_Addr) &_dl_runtime_profile_1;
151
152 if (GLRO(dl_profile) != NULL
153 && _dl_name_match_p (GLRO(dl_profile), l))
154 GL(dl_profile_map) = l;
155 }
156 else
157 {
158 res0_addr = (Elf64_Addr) &_dl_runtime_resolve_0;
159 res1_addr = (Elf64_Addr) &_dl_runtime_resolve_1;
160 }
161
162 /* PLT0 looks like:
163
164 sethi %uhi(_dl_runtime_{resolve,profile}_0), %g4
165 sethi %hi(_dl_runtime_{resolve,profile}_0), %g5
166 or %g4, %ulo(_dl_runtime_{resolve,profile}_0), %g4
167 or %g5, %lo(_dl_runtime_{resolve,profile}_0), %g5
168 sllx %g4, 32, %g4
169 add %g4, %g5, %g5
170 jmpl %g5, %g4
171 nop
172 */
173
174 plt[0] = 0x09000000 | (res0_addr >> (64 - 22));
175 plt[1] = 0x0b000000 | ((res0_addr >> 10) & 0x003fffff);
176 plt[2] = 0x88112000 | ((res0_addr >> 32) & 0x3ff);
177 plt[3] = 0x8a116000 | (res0_addr & 0x3ff);
178 plt[4] = 0x89293020;
179 plt[5] = 0x8a010005;
180 plt[6] = 0x89c14000;
181 plt[7] = 0x01000000;
182
183 /* PLT1 looks like:
184
185 sethi %uhi(_dl_runtime_{resolve,profile}_1), %g4
186 sethi %hi(_dl_runtime_{resolve,profile}_1), %g5
187 or %g4, %ulo(_dl_runtime_{resolve,profile}_1), %g4
188 or %g5, %lo(_dl_runtime_{resolve,profile}_1), %g5
189 sllx %g4, 32, %g4
190 add %g4, %g5, %g5
191 jmpl %g5, %g4
192 nop
193 */
194
195 plt[8] = 0x09000000 | (res1_addr >> (64 - 22));
196 plt[9] = 0x0b000000 | ((res1_addr >> 10) & 0x003fffff);
197 plt[10] = 0x88112000 | ((res1_addr >> 32) & 0x3ff);
198 plt[11] = 0x8a116000 | (res1_addr & 0x3ff);
199 plt[12] = 0x89293020;
200 plt[13] = 0x8a010005;
201 plt[14] = 0x89c14000;
202 plt[15] = 0x01000000;
203
204 /* Now put the magic cookie at the beginning of .PLT2
205 Entry .PLT3 is unused by this implementation. */
206 *((struct link_map **)(&plt[16])) = l;
207
208 if (__builtin_expect (l->l_info[VALIDX(DT_GNU_PRELINKED)] != NULL, 0)
209 || __builtin_expect (l->l_info [VALIDX (DT_GNU_LIBLISTSZ)] != NULL, 0))
210 {
211 /* Need to reinitialize .plt to undo prelinking. */
212 Elf64_Rela *rela = (Elf64_Rela *) D_PTR (l, l_info[DT_JMPREL]);
213 Elf64_Rela *relaend
214 = (Elf64_Rela *) ((char *) rela
215 + l->l_info[DT_PLTRELSZ]->d_un.d_val);
216
217 /* prelink must ensure there are no R_SPARC_NONE relocs left
218 in .rela.plt. */
219 while (rela < relaend)
220 {
221 if (__builtin_expect (rela->r_addend, 0) != 0)
222 {
223 Elf64_Addr slot = ((rela->r_offset + l->l_addr + 0x400
224 - (Elf64_Addr) plt)
225 / 0x1400) * 0x1400
226 + (Elf64_Addr) plt - 0x400;
227 /* ldx [%o7 + X], %g1 */
228 unsigned int first_ldx = *(unsigned int *)(slot + 12);
229 Elf64_Addr ptr = slot + (first_ldx & 0xfff) + 4;
230
231 *(Elf64_Addr *) (rela->r_offset + l->l_addr)
232 = (Elf64_Addr) plt
233 - (slot + ((rela->r_offset + l->l_addr - ptr) / 8) * 24
234 + 4);
235 ++rela;
236 continue;
237 }
238
239 *(unsigned int *) (rela->r_offset + l->l_addr)
240 = 0x03000000 | (rela->r_offset + l->l_addr - (Elf64_Addr) plt);
241 *(unsigned int *) (rela->r_offset + l->l_addr + 4)
242 = 0x30680000 | ((((Elf64_Addr) plt + 32 - rela->r_offset
243 - l->l_addr - 4) >> 2) & 0x7ffff);
244 __asm __volatile ("flush %0" : : "r" (rela->r_offset
245 + l->l_addr));
246 __asm __volatile ("flush %0+4" : : "r" (rela->r_offset
247 + l->l_addr));
248 ++rela;
249 }
250 }
251 }
252
253 return lazy;
254 }
255
256 /* The PLT uses Elf64_Rela relocs. */
257 #define elf_machine_relplt elf_machine_rela
258
259 /* Undo the sub %sp, 6*8, %sp; add %sp, STACK_BIAS + 22*8, %o0 below
260 (but w/o STACK_BIAS) to get at the value we want in __libc_stack_end. */
261 #define DL_STACK_END(cookie) \
262 ((void *) (((long) (cookie)) - (22 - 6) * 8))
263
264 /* Initial entry point code for the dynamic linker.
265 The C function `_dl_start' is the real entry point;
266 its return value is the user program's entry point. */
267
268 #define __S1(x) #x
269 #define __S(x) __S1(x)
270
271 #define RTLD_START __asm__ ( "\n" \
272 " .text\n" \
273 " .global _start\n" \
274 " .type _start, @function\n" \
275 " .align 32\n" \
276 "_start:\n" \
277 " /* Make room for functions to drop their arguments on the stack. */\n" \
278 " sub %sp, 6*8, %sp\n" \
279 " /* Pass pointer to argument block to _dl_start. */\n" \
280 " call _dl_start\n" \
281 " add %sp," __S(STACK_BIAS) "+22*8,%o0\n" \
282 " /* FALLTHRU */\n" \
283 " .size _start, .-_start\n" \
284 "\n" \
285 " .global _dl_start_user\n" \
286 " .type _dl_start_user, @function\n" \
287 "_dl_start_user:\n" \
288 " /* Load the GOT register. */\n" \
289 "1: call 11f\n" \
290 " sethi %hi(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n" \
291 "11: or %l7, %lo(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n" \
292 " sethi %hi(_dl_skip_args), %g5\n" \
293 " add %l7, %o7, %l7\n" \
294 " or %g5, %lo(_dl_skip_args), %g5\n" \
295 " /* Save the user entry point address in %l0. */\n" \
296 " mov %o0, %l0\n" \
297 " /* See if we were run as a command with the executable file name as an\n" \
298 " extra leading argument. If so, we must shift things around since we\n" \
299 " must keep the stack doubleword aligned. */\n" \
300 " ldx [%l7 + %g5], %i0\n" \
301 " ld [%i0], %i0\n" \
302 " brz,pt %i0, 2f\n" \
303 " ldx [%sp + " __S(STACK_BIAS) " + 22*8], %i5\n" \
304 " /* Find out how far to shift. */\n" \
305 " sethi %hi(_dl_argv), %l4\n" \
306 " sub %i5, %i0, %i5\n" \
307 " or %l4, %lo(_dl_argv), %l4\n" \
308 " sllx %i0, 3, %l6\n" \
309 " ldx [%l7 + %l4], %l4\n" \
310 " stx %i5, [%sp + " __S(STACK_BIAS) " + 22*8]\n" \
311 " add %sp, " __S(STACK_BIAS) " + 23*8, %i1\n" \
312 " add %i1, %l6, %i2\n" \
313 " ldx [%l4], %l5\n" \
314 " /* Copy down argv. */\n" \
315 "12: ldx [%i2], %i3\n" \
316 " add %i2, 8, %i2\n" \
317 " stx %i3, [%i1]\n" \
318 " brnz,pt %i3, 12b\n" \
319 " add %i1, 8, %i1\n" \
320 " sub %l5, %l6, %l5\n" \
321 " /* Copy down envp. */\n" \
322 "13: ldx [%i2], %i3\n" \
323 " add %i2, 8, %i2\n" \
324 " stx %i3, [%i1]\n" \
325 " brnz,pt %i3, 13b\n" \
326 " add %i1, 8, %i1\n" \
327 " /* Copy down auxiliary table. */\n" \
328 "14: ldx [%i2], %i3\n" \
329 " ldx [%i2 + 8], %i4\n" \
330 " add %i2, 16, %i2\n" \
331 " stx %i3, [%i1]\n" \
332 " stx %i4, [%i1 + 8]\n" \
333 " brnz,pt %i3, 14b\n" \
334 " add %i1, 16, %i1\n" \
335 " stx %l5, [%l4]\n" \
336 " /* %o0 = _dl_loaded, %o1 = argc, %o2 = argv, %o3 = envp. */\n" \
337 "2: sethi %hi(_rtld_local), %o0\n" \
338 " add %sp, " __S(STACK_BIAS) " + 23*8, %o2\n" \
339 " orcc %o0, %lo(_rtld_local), %o0\n" \
340 " sllx %i5, 3, %o3\n" \
341 " ldx [%l7 + %o0], %o0\n" \
342 " add %o3, 8, %o3\n" \
343 " mov %i5, %o1\n" \
344 " add %o2, %o3, %o3\n" \
345 " call _dl_init_internal\n" \
346 " ldx [%o0], %o0\n" \
347 " /* Pass our finalizer function to the user in %g1. */\n" \
348 " sethi %hi(_dl_fini), %g1\n" \
349 " or %g1, %lo(_dl_fini), %g1\n" \
350 " ldx [%l7 + %g1], %g1\n" \
351 " /* Jump to the user's entry point and deallocate the extra stack we got. */\n" \
352 " jmp %l0\n" \
353 " add %sp, 6*8, %sp\n" \
354 " .size _dl_start_user, . - _dl_start_user\n" \
355 " .previous\n");
356
357 #endif /* dl_machine_h */
358
359 #define ARCH_LA_PLTENTER sparc64_gnu_pltenter
360 #define ARCH_LA_PLTEXIT sparc64_gnu_pltexit
361
362 #ifdef RESOLVE_MAP
363
364 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
365 MAP is the object containing the reloc. */
366
367 auto inline void
368 __attribute__ ((always_inline))
369 elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
370 const Elf64_Sym *sym, const struct r_found_version *version,
371 void *const reloc_addr_arg, int skip_ifunc)
372 {
373 Elf64_Addr *const reloc_addr = reloc_addr_arg;
374 #if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
375 const Elf64_Sym *const refsym = sym;
376 #endif
377 Elf64_Addr value;
378 const unsigned long int r_type = ELF64_R_TYPE_ID (reloc->r_info);
379 #if !defined RESOLVE_CONFLICT_FIND_MAP
380 struct link_map *sym_map = NULL;
381 #endif
382
383 #if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
384 /* This is defined in rtld.c, but nowhere in the static libc.a; make the
385 reference weak so static programs can still link. This declaration
386 cannot be done when compiling rtld.c (i.e. #ifdef RTLD_BOOTSTRAP)
387 because rtld.c contains the common defn for _dl_rtld_map, which is
388 incompatible with a weak decl in the same file. */
389 weak_extern (_dl_rtld_map);
390 #endif
391
392 if (__builtin_expect (r_type == R_SPARC_NONE, 0))
393 return;
394
395 #if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
396 if (__builtin_expect (r_type == R_SPARC_RELATIVE, 0))
397 {
398 # if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
399 if (map != &_dl_rtld_map) /* Already done in rtld itself. */
400 # endif
401 *reloc_addr += map->l_addr + reloc->r_addend;
402 return;
403 }
404 #endif
405
406 #ifndef RESOLVE_CONFLICT_FIND_MAP
407 if (__builtin_expect (ELF64_ST_BIND (sym->st_info) == STB_LOCAL, 0)
408 && sym->st_shndx != SHN_UNDEF)
409 {
410 value = map->l_addr;
411 }
412 else
413 {
414 sym_map = RESOLVE_MAP (&sym, version, r_type);
415 value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value;
416 }
417 #else
418 value = 0;
419 #endif
420
421 value += reloc->r_addend; /* Assume copy relocs have zero addend. */
422
423 if (sym != NULL
424 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
425 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
426 && __builtin_expect (!skip_ifunc, 1))
427 value = ((Elf64_Addr (*) (int)) value) (GLRO(dl_hwcap));
428
429 switch (r_type)
430 {
431 #if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
432 case R_SPARC_COPY:
433 if (sym == NULL)
434 /* This can happen in trace mode if an object could not be
435 found. */
436 break;
437 if (sym->st_size > refsym->st_size
438 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
439 {
440 const char *strtab;
441
442 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
443 _dl_error_printf ("\
444 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
445 rtld_progname ?: "<program name unknown>",
446 strtab + refsym->st_name);
447 }
448 memcpy (reloc_addr_arg, (void *) value,
449 MIN (sym->st_size, refsym->st_size));
450 break;
451 #endif
452 case R_SPARC_64:
453 case R_SPARC_GLOB_DAT:
454 *reloc_addr = value;
455 break;
456 case R_SPARC_IRELATIVE:
457 value = ((Elf64_Addr (*) (int)) value) (GLRO(dl_hwcap));
458 *reloc_addr = value;
459 break;
460 case R_SPARC_JMP_IREL:
461 value = ((Elf64_Addr (*) (int)) value) (GLRO(dl_hwcap));
462 /* Fall thru */
463 case R_SPARC_JMP_SLOT:
464 #ifdef RESOLVE_CONFLICT_FIND_MAP
465 /* R_SPARC_JMP_SLOT conflicts against .plt[32768+]
466 relocs should be turned into R_SPARC_64 relocs
467 in .gnu.conflict section.
468 r_addend non-zero does not mean it is a .plt[32768+]
469 reloc, instead it is the actual address of the function
470 to call. */
471 sparc64_fixup_plt (NULL, reloc, reloc_addr, value, 0, 0);
472 #else
473 sparc64_fixup_plt (map, reloc, reloc_addr, value, reloc->r_addend, 0);
474 #endif
475 break;
476 #ifndef RESOLVE_CONFLICT_FIND_MAP
477 case R_SPARC_TLS_DTPMOD64:
478 /* Get the information from the link map returned by the
479 resolv function. */
480 if (sym_map != NULL)
481 *reloc_addr = sym_map->l_tls_modid;
482 break;
483 case R_SPARC_TLS_DTPOFF64:
484 /* During relocation all TLS symbols are defined and used.
485 Therefore the offset is already correct. */
486 *reloc_addr = (sym == NULL ? 0 : sym->st_value) + reloc->r_addend;
487 break;
488 case R_SPARC_TLS_TPOFF64:
489 /* The offset is negative, forward from the thread pointer. */
490 /* We know the offset of object the symbol is contained in.
491 It is a negative value which will be added to the
492 thread pointer. */
493 if (sym != NULL)
494 {
495 CHECK_STATIC_TLS (map, sym_map);
496 *reloc_addr = sym->st_value - sym_map->l_tls_offset
497 + reloc->r_addend;
498 }
499 break;
500 # ifndef RTLD_BOOTSTRAP
501 case R_SPARC_TLS_LE_HIX22:
502 case R_SPARC_TLS_LE_LOX10:
503 if (sym != NULL)
504 {
505 CHECK_STATIC_TLS (map, sym_map);
506 value = sym->st_value - sym_map->l_tls_offset
507 + reloc->r_addend;
508 if (r_type == R_SPARC_TLS_LE_HIX22)
509 *(unsigned int *)reloc_addr =
510 ((*(unsigned int *)reloc_addr & 0xffc00000)
511 | (((~value) >> 10) & 0x3fffff));
512 else
513 *(unsigned int *)reloc_addr =
514 ((*(unsigned int *)reloc_addr & 0xffffe000) | (value & 0x3ff)
515 | 0x1c00);
516 }
517 break;
518 # endif
519 #endif
520 #ifndef RTLD_BOOTSTRAP
521 case R_SPARC_8:
522 *(char *) reloc_addr = value;
523 break;
524 case R_SPARC_16:
525 *(short *) reloc_addr = value;
526 break;
527 case R_SPARC_32:
528 *(unsigned int *) reloc_addr = value;
529 break;
530 case R_SPARC_DISP8:
531 *(char *) reloc_addr = (value - (Elf64_Addr) reloc_addr);
532 break;
533 case R_SPARC_DISP16:
534 *(short *) reloc_addr = (value - (Elf64_Addr) reloc_addr);
535 break;
536 case R_SPARC_DISP32:
537 *(unsigned int *) reloc_addr = (value - (Elf64_Addr) reloc_addr);
538 break;
539 case R_SPARC_WDISP30:
540 *(unsigned int *) reloc_addr =
541 ((*(unsigned int *)reloc_addr & 0xc0000000) |
542 (((value - (Elf64_Addr) reloc_addr) >> 2) & 0x3fffffff));
543 break;
544
545 /* MEDLOW code model relocs */
546 case R_SPARC_LO10:
547 *(unsigned int *) reloc_addr =
548 ((*(unsigned int *)reloc_addr & ~0x3ff) |
549 (value & 0x3ff));
550 break;
551 case R_SPARC_HI22:
552 *(unsigned int *) reloc_addr =
553 ((*(unsigned int *)reloc_addr & 0xffc00000) |
554 ((value >> 10) & 0x3fffff));
555 break;
556 case R_SPARC_OLO10:
557 *(unsigned int *) reloc_addr =
558 ((*(unsigned int *)reloc_addr & ~0x1fff) |
559 (((value & 0x3ff) + ELF64_R_TYPE_DATA (reloc->r_info)) & 0x1fff));
560 break;
561
562 /* MEDMID code model relocs */
563 case R_SPARC_H44:
564 *(unsigned int *) reloc_addr =
565 ((*(unsigned int *)reloc_addr & 0xffc00000) |
566 ((value >> 22) & 0x3fffff));
567 break;
568 case R_SPARC_M44:
569 *(unsigned int *) reloc_addr =
570 ((*(unsigned int *)reloc_addr & ~0x3ff) |
571 ((value >> 12) & 0x3ff));
572 break;
573 case R_SPARC_L44:
574 *(unsigned int *) reloc_addr =
575 ((*(unsigned int *)reloc_addr & ~0xfff) |
576 (value & 0xfff));
577 break;
578
579 /* MEDANY code model relocs */
580 case R_SPARC_HH22:
581 *(unsigned int *) reloc_addr =
582 ((*(unsigned int *)reloc_addr & 0xffc00000) |
583 (value >> 42));
584 break;
585 case R_SPARC_HM10:
586 *(unsigned int *) reloc_addr =
587 ((*(unsigned int *)reloc_addr & ~0x3ff) |
588 ((value >> 32) & 0x3ff));
589 break;
590 case R_SPARC_LM22:
591 *(unsigned int *) reloc_addr =
592 ((*(unsigned int *)reloc_addr & 0xffc00000) |
593 ((value >> 10) & 0x003fffff));
594 break;
595 case R_SPARC_UA16:
596 ((unsigned char *) reloc_addr_arg) [0] = value >> 8;
597 ((unsigned char *) reloc_addr_arg) [1] = value;
598 break;
599 case R_SPARC_UA32:
600 ((unsigned char *) reloc_addr_arg) [0] = value >> 24;
601 ((unsigned char *) reloc_addr_arg) [1] = value >> 16;
602 ((unsigned char *) reloc_addr_arg) [2] = value >> 8;
603 ((unsigned char *) reloc_addr_arg) [3] = value;
604 break;
605 case R_SPARC_UA64:
606 if (! ((long) reloc_addr_arg & 3))
607 {
608 /* Common in .eh_frame */
609 ((unsigned int *) reloc_addr_arg) [0] = value >> 32;
610 ((unsigned int *) reloc_addr_arg) [1] = value;
611 break;
612 }
613 ((unsigned char *) reloc_addr_arg) [0] = value >> 56;
614 ((unsigned char *) reloc_addr_arg) [1] = value >> 48;
615 ((unsigned char *) reloc_addr_arg) [2] = value >> 40;
616 ((unsigned char *) reloc_addr_arg) [3] = value >> 32;
617 ((unsigned char *) reloc_addr_arg) [4] = value >> 24;
618 ((unsigned char *) reloc_addr_arg) [5] = value >> 16;
619 ((unsigned char *) reloc_addr_arg) [6] = value >> 8;
620 ((unsigned char *) reloc_addr_arg) [7] = value;
621 break;
622 #endif
623 #if !defined RTLD_BOOTSTRAP || defined _NDEBUG
624 default:
625 _dl_reloc_bad_type (map, r_type, 0);
626 break;
627 #endif
628 }
629 }
630
631 auto inline void
632 __attribute__ ((always_inline))
633 elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
634 void *const reloc_addr_arg)
635 {
636 Elf64_Addr *const reloc_addr = reloc_addr_arg;
637 *reloc_addr = l_addr + reloc->r_addend;
638 }
639
640 auto inline void
641 __attribute__ ((always_inline))
642 elf_machine_lazy_rel (struct link_map *map,
643 Elf64_Addr l_addr, const Elf64_Rela *reloc,
644 int skip_ifunc)
645 {
646 Elf64_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
647 const unsigned int r_type = ELF64_R_TYPE (reloc->r_info);
648
649 if (__builtin_expect (r_type == R_SPARC_JMP_SLOT, 1))
650 ;
651 else if (r_type == R_SPARC_JMP_IREL
652 || r_type == R_SPARC_IRELATIVE)
653 {
654 Elf64_Addr value = map->l_addr + reloc->r_addend;
655 if (__builtin_expect (!skip_ifunc, 1))
656 value = ((Elf64_Addr (*) (int)) value) (GLRO(dl_hwcap));
657 if (r_type == R_SPARC_JMP_IREL)
658 {
659 /* 'high' is always zero, for large PLT entries the linker
660 emits an R_SPARC_IRELATIVE. */
661 sparc64_fixup_plt (map, reloc, reloc_addr, value, 0, 1);
662 }
663 else
664 *reloc_addr = value;
665 }
666 else if (r_type == R_SPARC_NONE)
667 ;
668 else
669 _dl_reloc_bad_type (map, r_type, 1);
670 }
671
672 #endif /* RESOLVE_MAP */