]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/powerpc/powerpc32/dl-machine.c
2.5-18.1
[thirdparty/glibc.git] / sysdeps / powerpc / powerpc32 / dl-machine.c
CommitLineData
052b6a6c 1/* Machine-dependent ELF dynamic relocation functions. PowerPC version.
0ecb606c 2 Copyright (C) 1995-2003, 2004, 2005 Free Software Foundation, Inc.
052b6a6c
UD
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
41bdb6e2
AJ
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
052b6a6c
UD
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
41bdb6e2 13 Lesser General Public License for more details.
052b6a6c 14
41bdb6e2
AJ
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
052b6a6c
UD
19
20#include <unistd.h>
21#include <string.h>
22#include <sys/param.h>
23#include <link.h>
a42195db 24#include <ldsodefs.h>
052b6a6c 25#include <elf/dynamic-link.h>
b6299091 26#include <dl-machine.h>
c6e6c9c8 27#include <stdio-common/_itoa.h>
052b6a6c 28
acd262e6
UD
29/* The value __cache_line_size is defined in memset.S and is initialised
30 by _dl_sysdep_start via DL_PLATFORM_INIT. */
31extern int __cache_line_size;
32weak_extern (__cache_line_size)
33
052b6a6c
UD
34/* Because ld.so is now versioned, these functions can be in their own file;
35 no relocations need to be done to call them.
36 Of course, if ld.so is not versioned... */
ab9ba655 37#if defined SHARED && !(DO_VERSIONING - 0)
052b6a6c
UD
38#error This will not work with versioning turned off, sorry.
39#endif
40
41
7137f424 42/* Stuff for the PLT. */
052b6a6c 43#define PLT_INITIAL_ENTRY_WORDS 18
7137f424
GK
44#define PLT_LONGBRANCH_ENTRY_WORDS 0
45#define PLT_TRAMPOLINE_ENTRY_WORDS 6
052b6a6c
UD
46#define PLT_DOUBLE_SIZE (1<<13)
47#define PLT_ENTRY_START_WORDS(entry_number) \
7137f424
GK
48 (PLT_INITIAL_ENTRY_WORDS + (entry_number)*2 \
49 + ((entry_number) > PLT_DOUBLE_SIZE \
50 ? ((entry_number) - PLT_DOUBLE_SIZE)*2 \
51 : 0))
052b6a6c
UD
52#define PLT_DATA_START_WORDS(num_entries) PLT_ENTRY_START_WORDS(num_entries)
53
7137f424 54/* Macros to build PowerPC opcode words. */
052b6a6c 55#define OPCODE_ADDI(rd,ra,simm) \
118bad87 56 (0x38000000 | (rd) << 21 | (ra) << 16 | ((simm) & 0xffff))
052b6a6c 57#define OPCODE_ADDIS(rd,ra,simm) \
118bad87 58 (0x3c000000 | (rd) << 21 | (ra) << 16 | ((simm) & 0xffff))
052b6a6c
UD
59#define OPCODE_ADD(rd,ra,rb) \
60 (0x7c000214 | (rd) << 21 | (ra) << 16 | (rb) << 11)
118bad87
UD
61#define OPCODE_B(target) (0x48000000 | ((target) & 0x03fffffc))
62#define OPCODE_BA(target) (0x48000002 | ((target) & 0x03fffffc))
052b6a6c
UD
63#define OPCODE_BCTR() 0x4e800420
64#define OPCODE_LWZ(rd,d,ra) \
118bad87 65 (0x80000000 | (rd) << 21 | (ra) << 16 | ((d) & 0xffff))
7137f424
GK
66#define OPCODE_LWZU(rd,d,ra) \
67 (0x84000000 | (rd) << 21 | (ra) << 16 | ((d) & 0xffff))
052b6a6c
UD
68#define OPCODE_MTCTR(rd) (0x7C0903A6 | (rd) << 21)
69#define OPCODE_RLWINM(ra,rs,sh,mb,me) \
70 (0x54000000 | (rs) << 21 | (ra) << 16 | (sh) << 11 | (mb) << 6 | (me) << 1)
71
72#define OPCODE_LI(rd,simm) OPCODE_ADDI(rd,0,simm)
7137f424
GK
73#define OPCODE_ADDIS_HI(rd,ra,value) \
74 OPCODE_ADDIS(rd,ra,((value) + 0x8000) >> 16)
75#define OPCODE_LIS_HI(rd,value) OPCODE_ADDIS_HI(rd,0,value)
052b6a6c
UD
76#define OPCODE_SLWI(ra,rs,sh) OPCODE_RLWINM(ra,rs,sh,0,31-sh)
77
78
f57ae0b2
UD
79#define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
80#define PPC_SYNC asm volatile ("sync" : : : "memory")
2d09b95d 81#define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
f57ae0b2 82#define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
052b6a6c
UD
83#define PPC_DIE asm volatile ("tweq 0,0")
84
85/* Use this when you've modified some code, but it won't be in the
86 instruction fetch queue (or when it doesn't matter if it is). */
87#define MODIFIED_CODE_NOQUEUE(where) \
88 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); } while (0)
89/* Use this when it might be in the instruction queue. */
90#define MODIFIED_CODE(where) \
91 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); PPC_ISYNC; } while (0)
92
93
94/* The idea here is that to conform to the ABI, we are supposed to try
95 to load dynamic objects between 0x10000 (we actually use 0x40000 as
96 the lower bound, to increase the chance of a memory reference from
97 a null pointer giving a segfault) and the program's load address;
98 this may allow us to use a branch instruction in the PLT rather
99 than a computed jump. The address is only used as a preference for
100 mmap, so if we get it wrong the worst that happens is that it gets
101 mapped somewhere else. */
102
103ElfW(Addr)
5ca3d19c
UD
104__elf_preferred_address (struct link_map *loader, size_t maplength,
105 ElfW(Addr) mapstartpref)
052b6a6c
UD
106{
107 ElfW(Addr) low, high;
108 struct link_map *l;
5ca3d19c 109 Lmid_t nsid;
052b6a6c
UD
110
111 /* If the object has a preference, load it there! */
112 if (mapstartpref != 0)
113 return mapstartpref;
114
115 /* Otherwise, quickly look for a suitable gap between 0x3FFFF and
116 0x70000000. 0x3FFFF is so that references off NULL pointers will
117 cause a segfault, 0x70000000 is just paranoia (it should always
118 be superceded by the program's load address). */
119 low = 0x0003FFFF;
120 high = 0x70000000;
5ca3d19c
UD
121 for (nsid = 0; nsid < DL_NNS; ++nsid)
122 for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
123 {
124 ElfW(Addr) mapstart, mapend;
125 mapstart = l->l_map_start & ~(GLRO(dl_pagesize) - 1);
126 mapend = l->l_map_end | (GLRO(dl_pagesize) - 1);
127 assert (mapend > mapstart);
128
129 /* Prefer gaps below the main executable, note that l ==
130 _dl_loaded does not work for static binaries loading
131 e.g. libnss_*.so. */
132 if ((mapend >= high || l->l_type == lt_executable)
133 && high >= mapstart)
134 high = mapstart;
135 else if (mapend >= low && low >= mapstart)
136 low = mapend;
137 else if (high >= mapend && mapstart >= low)
138 {
139 if (high - mapend >= mapstart - low)
140 low = mapend;
141 else
142 high = mapstart;
143 }
144 }
052b6a6c
UD
145
146 high -= 0x10000; /* Allow some room between objects. */
afdca0f2 147 maplength = (maplength | (GLRO(dl_pagesize) - 1)) + 1;
052b6a6c
UD
148 if (high <= low || high - low < maplength )
149 return 0;
150 return high - maplength; /* Both high and maplength are page-aligned. */
151}
152
153/* Set up the loaded object described by L so its unrelocated PLT
154 entries will jump to the on-demand fixup code in dl-runtime.c.
155 Also install a small trampoline to be used by entries that have
156 been relocated to an address too far away for a single branch. */
157
7137f424
GK
158/* There are many kinds of PLT entries:
159
160 (1) A direct jump to the actual routine, either a relative or
161 absolute branch. These are set up in __elf_machine_fixup_plt.
162
163 (2) Short lazy entries. These cover the first 8192 slots in
164 the PLT, and look like (where 'index' goes from 0 to 8191):
165
166 li %r11, index*4
167 b &plt[PLT_TRAMPOLINE_ENTRY_WORDS+1]
168
169 (3) Short indirect jumps. These replace (2) when a direct jump
170 wouldn't reach. They look the same except that the branch
171 is 'b &plt[PLT_LONGBRANCH_ENTRY_WORDS]'.
172
173 (4) Long lazy entries. These cover the slots when a short entry
174 won't fit ('index*4' overflows its field), and look like:
175
176 lis %r11, %hi(index*4 + &plt[PLT_DATA_START_WORDS])
177 lwzu %r12, %r11, %lo(index*4 + &plt[PLT_DATA_START_WORDS])
178 b &plt[PLT_TRAMPOLINE_ENTRY_WORDS]
179 bctr
180
181 (5) Long indirect jumps. These replace (4) when a direct jump
182 wouldn't reach. They look like:
183
184 lis %r11, %hi(index*4 + &plt[PLT_DATA_START_WORDS])
185 lwz %r12, %r11, %lo(index*4 + &plt[PLT_DATA_START_WORDS])
186 mtctr %r12
187 bctr
188
189 (6) Long direct jumps. These are used when thread-safety is not
190 required. They look like:
191
192 lis %r12, %hi(finaladdr)
193 addi %r12, %r12, %lo(finaladdr)
194 mtctr %r12
195 bctr
196
197
198 The lazy entries, (2) and (4), are set up here in
199 __elf_machine_runtime_setup. (1), (3), and (5) are set up in
200 __elf_machine_fixup_plt. (1), (3), and (6) can also be constructed
201 in __process_machine_rela.
202
203 The reason for the somewhat strange construction of the long
204 entries, (4) and (5), is that we need to ensure thread-safety. For
205 (1) and (3), this is obvious because only one instruction is
206 changed and the PPC architecture guarantees that aligned stores are
207 atomic. For (5), this is more tricky. When changing (4) to (5),
208 the `b' instruction is first changed to to `mtctr'; this is safe
209 and is why the `lwzu' instruction is not just a simple `addi'.
210 Once this is done, and is visible to all processors, the `lwzu' can
211 safely be changed to a `lwz'. */
052b6a6c
UD
212int
213__elf_machine_runtime_setup (struct link_map *map, int lazy, int profile)
214{
215 if (map->l_info[DT_JMPREL])
216 {
217 Elf32_Word i;
b86120ed 218 Elf32_Word *plt = (Elf32_Word *) D_PTR (map, l_info[DT_PLTGOT]);
052b6a6c
UD
219 Elf32_Word num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
220 / sizeof (Elf32_Rela));
221 Elf32_Word rel_offset_words = PLT_DATA_START_WORDS (num_plt_entries);
7137f424 222 Elf32_Word data_words = (Elf32_Word) (plt + rel_offset_words);
052b6a6c 223 Elf32_Word size_modified;
7137f424 224
052b6a6c
UD
225 extern void _dl_runtime_resolve (void);
226 extern void _dl_prof_resolve (void);
052b6a6c 227
7137f424
GK
228 /* Convert the index in r11 into an actual address, and get the
229 word at that address. */
230 plt[PLT_LONGBRANCH_ENTRY_WORDS] = OPCODE_ADDIS_HI (11, 11, data_words);
231 plt[PLT_LONGBRANCH_ENTRY_WORDS + 1] = OPCODE_LWZ (11, data_words, 11);
052b6a6c 232
7137f424
GK
233 /* Call the procedure at that address. */
234 plt[PLT_LONGBRANCH_ENTRY_WORDS + 2] = OPCODE_MTCTR (11);
235 plt[PLT_LONGBRANCH_ENTRY_WORDS + 3] = OPCODE_BCTR ();
722c33bb 236
052b6a6c 237 if (lazy)
052b6a6c 238 {
7137f424
GK
239 Elf32_Word *tramp = plt + PLT_TRAMPOLINE_ENTRY_WORDS;
240 Elf32_Word dlrr = (Elf32_Word)(profile
241 ? _dl_prof_resolve
242 : _dl_runtime_resolve);
243 Elf32_Word offset;
244
0ecb606c
JJ
245 if (profile && GLRO(dl_profile) != NULL
246 && _dl_name_match_p (GLRO(dl_profile), map))
7137f424
GK
247 /* This is the object we are looking for. Say that we really
248 want profiling and the timers are started. */
5688da55 249 GL(dl_profile_map) = map;
fb0dd050 250
7137f424
GK
251 /* For the long entries, subtract off data_words. */
252 tramp[0] = OPCODE_ADDIS_HI (11, 11, -data_words);
253 tramp[1] = OPCODE_ADDI (11, 11, -data_words);
fb0dd050 254
7137f424
GK
255 /* Multiply index of entry by 3 (in r11). */
256 tramp[2] = OPCODE_SLWI (12, 11, 1);
257 tramp[3] = OPCODE_ADD (11, 12, 11);
258 if (dlrr <= 0x01fffffc || dlrr >= 0xfe000000)
052b6a6c 259 {
7137f424
GK
260 /* Load address of link map in r12. */
261 tramp[4] = OPCODE_LI (12, (Elf32_Word) map);
262 tramp[5] = OPCODE_ADDIS_HI (12, 12, (Elf32_Word) map);
fb0dd050 263
7137f424
GK
264 /* Call _dl_runtime_resolve. */
265 tramp[6] = OPCODE_BA (dlrr);
052b6a6c
UD
266 }
267 else
7137f424
GK
268 {
269 /* Get address of _dl_runtime_resolve in CTR. */
270 tramp[4] = OPCODE_LI (12, dlrr);
271 tramp[5] = OPCODE_ADDIS_HI (12, 12, dlrr);
272 tramp[6] = OPCODE_MTCTR (12);
fb0dd050 273
7137f424
GK
274 /* Load address of link map in r12. */
275 tramp[7] = OPCODE_LI (12, (Elf32_Word) map);
276 tramp[8] = OPCODE_ADDIS_HI (12, 12, (Elf32_Word) map);
fb0dd050 277
7137f424
GK
278 /* Call _dl_runtime_resolve. */
279 tramp[9] = OPCODE_BCTR ();
280 }
fb0dd050 281
7137f424
GK
282 /* Set up the lazy PLT entries. */
283 offset = PLT_INITIAL_ENTRY_WORDS;
284 i = 0;
285 while (i < num_plt_entries && i < PLT_DOUBLE_SIZE)
052b6a6c
UD
286 {
287 plt[offset ] = OPCODE_LI (11, i * 4);
7137f424
GK
288 plt[offset+1] = OPCODE_B ((PLT_TRAMPOLINE_ENTRY_WORDS + 2
289 - (offset+1))
290 * 4);
291 i++;
292 offset += 2;
293 }
294 while (i < num_plt_entries)
295 {
296 plt[offset ] = OPCODE_LIS_HI (11, i * 4 + data_words);
297 plt[offset+1] = OPCODE_LWZU (12, i * 4 + data_words, 11);
298 plt[offset+2] = OPCODE_B ((PLT_TRAMPOLINE_ENTRY_WORDS
299 - (offset+2))
300 * 4);
301 plt[offset+3] = OPCODE_BCTR ();
302 i++;
303 offset += 4;
052b6a6c
UD
304 }
305 }
306
7137f424
GK
307 /* Now, we've modified code. We need to write the changes from
308 the data cache to a second-level unified cache, then make
309 sure that stale data in the instruction cache is removed.
310 (In a multiprocessor system, the effect is more complex.)
311 Most of the PLT shouldn't be in the instruction cache, but
312 there may be a little overlap at the start and the end.
052b6a6c 313
7137f424 314 Assumes that dcbst and icbi apply to lines of 16 bytes or
acd262e6
UD
315 more. Current known line sizes are 16, 32, and 128 bytes.
316 The following gets the __cache_line_size, when available. */
317
318 /* Default minimum 4 words per cache line. */
319 int line_size_words = 4;
320
321 /* Don't try this until ld.so has relocated itself! */
322 int *line_size_ptr = &__cache_line_size;
323 if (lazy && line_size_ptr != NULL)
324 {
325 /* Verify that __cache_line_size is defined and set. */
326 if (*line_size_ptr != 0)
327 /* Convert bytes to words. */
328 line_size_words = *line_size_ptr / 4;
329 }
052b6a6c 330
7137f424 331 size_modified = lazy ? rel_offset_words : 6;
acd262e6
UD
332 for (i = 0; i < size_modified; i += line_size_words)
333 PPC_DCBST (plt + i);
f1d34527 334 PPC_DCBST (plt + size_modified - 1);
052b6a6c 335 PPC_SYNC;
acd262e6
UD
336
337 for (i = 0; i < size_modified; i += line_size_words)
338 PPC_ICBI (plt + i);
7137f424 339 PPC_ICBI (plt + size_modified - 1);
052b6a6c
UD
340 PPC_ISYNC;
341 }
342
343 return lazy;
344}
345
b6299091 346Elf32_Addr
5ca3d19c
UD
347__elf_machine_fixup_plt (struct link_map *map, const Elf32_Rela *reloc,
348 Elf32_Addr *reloc_addr, Elf32_Addr finaladdr)
052b6a6c 349{
7137f424 350 Elf32_Sword delta = finaladdr - (Elf32_Word) reloc_addr;
052b6a6c
UD
351 if (delta << 6 >> 6 == delta)
352 *reloc_addr = OPCODE_B (delta);
353 else if (finaladdr <= 0x01fffffc || finaladdr >= 0xfe000000)
354 *reloc_addr = OPCODE_BA (finaladdr);
355 else
356 {
7137f424
GK
357 Elf32_Word *plt, *data_words;
358 Elf32_Word index, offset, num_plt_entries;
fb0dd050 359
7137f424
GK
360 num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
361 / sizeof(Elf32_Rela));
b86120ed 362 plt = (Elf32_Word *) D_PTR (map, l_info[DT_PLTGOT]);
7137f424
GK
363 offset = reloc_addr - plt;
364 index = (offset - PLT_INITIAL_ENTRY_WORDS)/2;
365 data_words = plt + PLT_DATA_START_WORDS (num_plt_entries);
366
367 reloc_addr += 1;
368
369 if (index < PLT_DOUBLE_SIZE)
052b6a6c 370 {
7137f424
GK
371 data_words[index] = finaladdr;
372 PPC_SYNC;
fb0dd050 373 *reloc_addr = OPCODE_B ((PLT_LONGBRANCH_ENTRY_WORDS - (offset+1))
7137f424 374 * 4);
052b6a6c
UD
375 }
376 else
377 {
7137f424
GK
378 index -= (index - PLT_DOUBLE_SIZE)/2;
379
380 data_words[index] = finaladdr;
381 PPC_SYNC;
382
383 reloc_addr[1] = OPCODE_MTCTR (12);
384 MODIFIED_CODE_NOQUEUE (reloc_addr + 1);
385 PPC_SYNC;
386
387 reloc_addr[0] = OPCODE_LWZ (12,
388 (Elf32_Word) (data_words + index), 11);
052b6a6c
UD
389 }
390 }
391 MODIFIED_CODE (reloc_addr);
b6299091 392 return finaladdr;
052b6a6c
UD
393}
394
7551556f
RM
395void
396_dl_reloc_overflow (struct link_map *map,
397 const char *name,
398 Elf32_Addr *const reloc_addr,
7551556f 399 const Elf32_Sym *refsym)
c6e6c9c8
GK
400{
401 char buffer[128];
402 char *t;
403 t = stpcpy (buffer, name);
404 t = stpcpy (t, " relocation at 0x00000000");
405 _itoa_word ((unsigned) reloc_addr, t, 16, 0);
63c7a7e8 406 if (refsym)
fb0dd050
UD
407 {
408 const char *strtab;
409
410 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
411 t = stpcpy (t, " for symbol `");
63c7a7e8 412 t = stpcpy (t, strtab + refsym->st_name);
fb0dd050
UD
413 t = stpcpy (t, "'");
414 }
c6e6c9c8 415 t = stpcpy (t, " out of range");
84aafa91 416 _dl_signal_error (0, map->l_name, NULL, buffer);
c6e6c9c8
GK
417}
418
052b6a6c
UD
419void
420__process_machine_rela (struct link_map *map,
421 const Elf32_Rela *reloc,
545dbc93 422 struct link_map *sym_map,
052b6a6c
UD
423 const Elf32_Sym *sym,
424 const Elf32_Sym *refsym,
425 Elf32_Addr *const reloc_addr,
426 Elf32_Addr const finaladdr,
427 int rinfo)
428{
429 switch (rinfo)
430 {
431 case R_PPC_NONE:
432 return;
433
434 case R_PPC_ADDR32:
052b6a6c
UD
435 case R_PPC_GLOB_DAT:
436 case R_PPC_RELATIVE:
437 *reloc_addr = finaladdr;
438 return;
439
b51b47f4 440 case R_PPC_UADDR32:
82f3f07d
RM
441 ((char *) reloc_addr)[0] = finaladdr >> 24;
442 ((char *) reloc_addr)[1] = finaladdr >> 16;
443 ((char *) reloc_addr)[2] = finaladdr >> 8;
444 ((char *) reloc_addr)[3] = finaladdr;
b51b47f4
UD
445 break;
446
052b6a6c 447 case R_PPC_ADDR24:
b51b47f4 448 if (__builtin_expect (finaladdr > 0x01fffffc && finaladdr < 0xfe000000, 0))
63c7a7e8 449 _dl_reloc_overflow (map, "R_PPC_ADDR24", reloc_addr, refsym);
118bad87 450 *reloc_addr = (*reloc_addr & 0xfc000003) | (finaladdr & 0x3fffffc);
052b6a6c
UD
451 break;
452
453 case R_PPC_ADDR16:
b51b47f4 454 if (__builtin_expect (finaladdr > 0x7fff && finaladdr < 0xffff8000, 0))
63c7a7e8 455 _dl_reloc_overflow (map, "R_PPC_ADDR16", reloc_addr, refsym);
052b6a6c
UD
456 *(Elf32_Half*) reloc_addr = finaladdr;
457 break;
458
b51b47f4
UD
459 case R_PPC_UADDR16:
460 if (__builtin_expect (finaladdr > 0x7fff && finaladdr < 0xffff8000, 0))
63c7a7e8 461 _dl_reloc_overflow (map, "R_PPC_UADDR16", reloc_addr, refsym);
82f3f07d
RM
462 ((char *) reloc_addr)[0] = finaladdr >> 8;
463 ((char *) reloc_addr)[1] = finaladdr;
b51b47f4
UD
464 break;
465
052b6a6c
UD
466 case R_PPC_ADDR16_LO:
467 *(Elf32_Half*) reloc_addr = finaladdr;
468 break;
469
470 case R_PPC_ADDR16_HI:
471 *(Elf32_Half*) reloc_addr = finaladdr >> 16;
472 break;
473
474 case R_PPC_ADDR16_HA:
475 *(Elf32_Half*) reloc_addr = (finaladdr + 0x8000) >> 16;
476 break;
477
478 case R_PPC_ADDR14:
479 case R_PPC_ADDR14_BRTAKEN:
480 case R_PPC_ADDR14_BRNTAKEN:
b51b47f4 481 if (__builtin_expect (finaladdr > 0x7fff && finaladdr < 0xffff8000, 0))
63c7a7e8 482 _dl_reloc_overflow (map, "R_PPC_ADDR14", reloc_addr, refsym);
118bad87 483 *reloc_addr = (*reloc_addr & 0xffff0003) | (finaladdr & 0xfffc);
052b6a6c 484 if (rinfo != R_PPC_ADDR14)
118bad87
UD
485 *reloc_addr = ((*reloc_addr & 0xffdfffff)
486 | ((rinfo == R_PPC_ADDR14_BRTAKEN)
487 ^ (finaladdr >> 31)) << 21);
052b6a6c
UD
488 break;
489
490 case R_PPC_REL24:
491 {
7137f424 492 Elf32_Sword delta = finaladdr - (Elf32_Word) reloc_addr;
052b6a6c 493 if (delta << 6 >> 6 != delta)
63c7a7e8 494 _dl_reloc_overflow (map, "R_PPC_REL24", reloc_addr, refsym);
118bad87 495 *reloc_addr = (*reloc_addr & 0xfc000003) | (delta & 0x3fffffc);
052b6a6c
UD
496 }
497 break;
498
499 case R_PPC_COPY:
500 if (sym == NULL)
501 /* This can happen in trace mode when an object could not be
502 found. */
503 return;
504 if (sym->st_size > refsym->st_size
afdca0f2 505 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
052b6a6c
UD
506 {
507 const char *strtab;
508
b86120ed 509 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
35fc382a 510 _dl_error_printf ("\
7b97934b 511%s: Symbol `%s' has different size in shared object, onsider re-linking\n",
e6caf4e1 512 rtld_progname ?: "<program name unknown>",
35fc382a 513 strtab + refsym->st_name);
052b6a6c
UD
514 }
515 memcpy (reloc_addr, (char *) finaladdr, MIN (sym->st_size,
516 refsym->st_size));
517 return;
518
519 case R_PPC_REL32:
7137f424 520 *reloc_addr = finaladdr - (Elf32_Word) reloc_addr;
052b6a6c
UD
521 return;
522
523 case R_PPC_JMP_SLOT:
7137f424
GK
524 /* It used to be that elf_machine_fixup_plt was used here,
525 but that doesn't work when ld.so relocates itself
526 for the second time. On the bright side, there's
527 no need to worry about thread-safety here. */
528 {
529 Elf32_Sword delta = finaladdr - (Elf32_Word) reloc_addr;
530 if (delta << 6 >> 6 == delta)
531 *reloc_addr = OPCODE_B (delta);
532 else if (finaladdr <= 0x01fffffc || finaladdr >= 0xfe000000)
533 *reloc_addr = OPCODE_BA (finaladdr);
534 else
535 {
536 Elf32_Word *plt, *data_words;
537 Elf32_Word index, offset, num_plt_entries;
fb0dd050 538
b86120ed 539 plt = (Elf32_Word *) D_PTR (map, l_info[DT_PLTGOT]);
7137f424
GK
540 offset = reloc_addr - plt;
541
542 if (offset < PLT_DOUBLE_SIZE*2 + PLT_INITIAL_ENTRY_WORDS)
543 {
544 index = (offset - PLT_INITIAL_ENTRY_WORDS)/2;
545 num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
546 / sizeof(Elf32_Rela));
547 data_words = plt + PLT_DATA_START_WORDS (num_plt_entries);
548 data_words[index] = finaladdr;
549 reloc_addr[0] = OPCODE_LI (11, index * 4);
fb0dd050
UD
550 reloc_addr[1] = OPCODE_B ((PLT_LONGBRANCH_ENTRY_WORDS
551 - (offset+1))
7137f424
GK
552 * 4);
553 MODIFIED_CODE_NOQUEUE (reloc_addr + 1);
554 }
555 else
556 {
557 reloc_addr[0] = OPCODE_LIS_HI (12, finaladdr);
558 reloc_addr[1] = OPCODE_ADDI (12, 12, finaladdr);
559 reloc_addr[2] = OPCODE_MTCTR (12);
560 reloc_addr[3] = OPCODE_BCTR ();
561 MODIFIED_CODE_NOQUEUE (reloc_addr + 3);
562 }
563 }
564 }
565 break;
052b6a6c 566
545dbc93
RM
567#ifdef USE_TLS
568#define CHECK_STATIC_TLS(map, sym_map) \
569 do { \
570 if (__builtin_expect ((sym_map)->l_tls_offset == NO_TLS_OFFSET, 0)) \
571 _dl_allocate_static_tls (sym_map); \
572 } while (0)
573# define DO_TLS_RELOC(suffix) \
574 case R_PPC_DTPREL##suffix: \
575 /* During relocation all TLS symbols are defined and used. \
576 Therefore the offset is already correct. */ \
577 if (sym_map != NULL) \
578 do_reloc##suffix ("R_PPC_DTPREL"#suffix, \
579 TLS_DTPREL_VALUE (sym, reloc)); \
580 break; \
581 case R_PPC_TPREL##suffix: \
582 if (sym_map != NULL) \
583 { \
584 CHECK_STATIC_TLS (map, sym_map); \
585 do_reloc##suffix ("R_PPC_TPREL"#suffix, \
586 TLS_TPREL_VALUE (sym_map, sym, reloc)); \
587 } \
588 break;
589
590 inline void do_reloc16 (const char *r_name, Elf32_Addr value)
591 {
592 if (__builtin_expect (value > 0x7fff && value < 0xffff8000, 0))
63c7a7e8 593 _dl_reloc_overflow (map, r_name, reloc_addr, refsym);
545dbc93
RM
594 *(Elf32_Half *) reloc_addr = value;
595 }
596 inline void do_reloc16_LO (const char *r_name, Elf32_Addr value)
597 {
598 *(Elf32_Half *) reloc_addr = value;
599 }
600 inline void do_reloc16_HI (const char *r_name, Elf32_Addr value)
601 {
602 *(Elf32_Half *) reloc_addr = value >> 16;
603 }
604 inline void do_reloc16_HA (const char *r_name, Elf32_Addr value)
605 {
606 *(Elf32_Half *) reloc_addr = (value + 0x8000) >> 16;
607 }
608 DO_TLS_RELOC (16)
609 DO_TLS_RELOC (16_LO)
610 DO_TLS_RELOC (16_HI)
611 DO_TLS_RELOC (16_HA)
612#endif
613
052b6a6c 614 default:
421c80d2 615 _dl_reloc_bad_type (map, rinfo, 0);
052b6a6c
UD
616 return;
617 }
618
619 MODIFIED_CODE_NOQUEUE (reloc_addr);
620}