]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/powerpc/powerpc32/dl-machine.c
Update copyright dates with scripts/update-copyrights
[thirdparty/glibc.git] / sysdeps / powerpc / powerpc32 / dl-machine.c
CommitLineData
052b6a6c 1/* Machine-dependent ELF dynamic relocation functions. PowerPC version.
2b778ceb 2 Copyright (C) 1995-2021 Free Software Foundation, Inc.
052b6a6c
UD
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
41bdb6e2
AJ
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
052b6a6c
UD
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
41bdb6e2 13 Lesser General Public License for more details.
052b6a6c 14
41bdb6e2 15 You should have received a copy of the GNU Lesser General Public
59ba27a6 16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
052b6a6c
UD
18
19#include <unistd.h>
20#include <string.h>
21#include <sys/param.h>
22#include <link.h>
a42195db 23#include <ldsodefs.h>
052b6a6c 24#include <elf/dynamic-link.h>
b6299091 25#include <dl-machine.h>
eb96ffb0 26#include <_itoa.h>
052b6a6c 27
7137f424 28/* Stuff for the PLT. */
052b6a6c 29#define PLT_INITIAL_ENTRY_WORDS 18
7137f424
GK
30#define PLT_LONGBRANCH_ENTRY_WORDS 0
31#define PLT_TRAMPOLINE_ENTRY_WORDS 6
052b6a6c
UD
32#define PLT_DOUBLE_SIZE (1<<13)
33#define PLT_ENTRY_START_WORDS(entry_number) \
7137f424
GK
34 (PLT_INITIAL_ENTRY_WORDS + (entry_number)*2 \
35 + ((entry_number) > PLT_DOUBLE_SIZE \
36 ? ((entry_number) - PLT_DOUBLE_SIZE)*2 \
37 : 0))
052b6a6c
UD
38#define PLT_DATA_START_WORDS(num_entries) PLT_ENTRY_START_WORDS(num_entries)
39
7137f424 40/* Macros to build PowerPC opcode words. */
052b6a6c 41#define OPCODE_ADDI(rd,ra,simm) \
118bad87 42 (0x38000000 | (rd) << 21 | (ra) << 16 | ((simm) & 0xffff))
052b6a6c 43#define OPCODE_ADDIS(rd,ra,simm) \
118bad87 44 (0x3c000000 | (rd) << 21 | (ra) << 16 | ((simm) & 0xffff))
052b6a6c
UD
45#define OPCODE_ADD(rd,ra,rb) \
46 (0x7c000214 | (rd) << 21 | (ra) << 16 | (rb) << 11)
118bad87
UD
47#define OPCODE_B(target) (0x48000000 | ((target) & 0x03fffffc))
48#define OPCODE_BA(target) (0x48000002 | ((target) & 0x03fffffc))
052b6a6c
UD
49#define OPCODE_BCTR() 0x4e800420
50#define OPCODE_LWZ(rd,d,ra) \
118bad87 51 (0x80000000 | (rd) << 21 | (ra) << 16 | ((d) & 0xffff))
7137f424
GK
52#define OPCODE_LWZU(rd,d,ra) \
53 (0x84000000 | (rd) << 21 | (ra) << 16 | ((d) & 0xffff))
052b6a6c
UD
54#define OPCODE_MTCTR(rd) (0x7C0903A6 | (rd) << 21)
55#define OPCODE_RLWINM(ra,rs,sh,mb,me) \
56 (0x54000000 | (rs) << 21 | (ra) << 16 | (sh) << 11 | (mb) << 6 | (me) << 1)
57
58#define OPCODE_LI(rd,simm) OPCODE_ADDI(rd,0,simm)
7137f424
GK
59#define OPCODE_ADDIS_HI(rd,ra,value) \
60 OPCODE_ADDIS(rd,ra,((value) + 0x8000) >> 16)
61#define OPCODE_LIS_HI(rd,value) OPCODE_ADDIS_HI(rd,0,value)
052b6a6c
UD
62#define OPCODE_SLWI(ra,rs,sh) OPCODE_RLWINM(ra,rs,sh,0,31-sh)
63
64
f57ae0b2
UD
65#define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
66#define PPC_SYNC asm volatile ("sync" : : : "memory")
2d09b95d 67#define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
f57ae0b2 68#define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
052b6a6c
UD
69#define PPC_DIE asm volatile ("tweq 0,0")
70
71/* Use this when you've modified some code, but it won't be in the
72 instruction fetch queue (or when it doesn't matter if it is). */
73#define MODIFIED_CODE_NOQUEUE(where) \
74 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); } while (0)
75/* Use this when it might be in the instruction queue. */
76#define MODIFIED_CODE(where) \
77 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); PPC_ISYNC; } while (0)
78
79
80/* The idea here is that to conform to the ABI, we are supposed to try
81 to load dynamic objects between 0x10000 (we actually use 0x40000 as
82 the lower bound, to increase the chance of a memory reference from
83 a null pointer giving a segfault) and the program's load address;
84 this may allow us to use a branch instruction in the PLT rather
85 than a computed jump. The address is only used as a preference for
86 mmap, so if we get it wrong the worst that happens is that it gets
87 mapped somewhere else. */
88
89ElfW(Addr)
5ca3d19c
UD
90__elf_preferred_address (struct link_map *loader, size_t maplength,
91 ElfW(Addr) mapstartpref)
052b6a6c
UD
92{
93 ElfW(Addr) low, high;
94 struct link_map *l;
5ca3d19c 95 Lmid_t nsid;
052b6a6c
UD
96
97 /* If the object has a preference, load it there! */
98 if (mapstartpref != 0)
99 return mapstartpref;
100
101 /* Otherwise, quickly look for a suitable gap between 0x3FFFF and
102 0x70000000. 0x3FFFF is so that references off NULL pointers will
103 cause a segfault, 0x70000000 is just paranoia (it should always
2ccdea26 104 be superseded by the program's load address). */
052b6a6c
UD
105 low = 0x0003FFFF;
106 high = 0x70000000;
5ca3d19c
UD
107 for (nsid = 0; nsid < DL_NNS; ++nsid)
108 for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
109 {
110 ElfW(Addr) mapstart, mapend;
111 mapstart = l->l_map_start & ~(GLRO(dl_pagesize) - 1);
112 mapend = l->l_map_end | (GLRO(dl_pagesize) - 1);
113 assert (mapend > mapstart);
114
115 /* Prefer gaps below the main executable, note that l ==
116 _dl_loaded does not work for static binaries loading
117 e.g. libnss_*.so. */
118 if ((mapend >= high || l->l_type == lt_executable)
350635a5 119 && high >= mapstart)
5ca3d19c
UD
120 high = mapstart;
121 else if (mapend >= low && low >= mapstart)
122 low = mapend;
123 else if (high >= mapend && mapstart >= low)
124 {
125 if (high - mapend >= mapstart - low)
126 low = mapend;
127 else
128 high = mapstart;
129 }
130 }
052b6a6c
UD
131
132 high -= 0x10000; /* Allow some room between objects. */
afdca0f2 133 maplength = (maplength | (GLRO(dl_pagesize) - 1)) + 1;
052b6a6c
UD
134 if (high <= low || high - low < maplength )
135 return 0;
136 return high - maplength; /* Both high and maplength are page-aligned. */
137}
138
139/* Set up the loaded object described by L so its unrelocated PLT
140 entries will jump to the on-demand fixup code in dl-runtime.c.
141 Also install a small trampoline to be used by entries that have
142 been relocated to an address too far away for a single branch. */
143
7137f424
GK
144/* There are many kinds of PLT entries:
145
146 (1) A direct jump to the actual routine, either a relative or
147 absolute branch. These are set up in __elf_machine_fixup_plt.
148
149 (2) Short lazy entries. These cover the first 8192 slots in
150 the PLT, and look like (where 'index' goes from 0 to 8191):
151
152 li %r11, index*4
153 b &plt[PLT_TRAMPOLINE_ENTRY_WORDS+1]
154
155 (3) Short indirect jumps. These replace (2) when a direct jump
156 wouldn't reach. They look the same except that the branch
157 is 'b &plt[PLT_LONGBRANCH_ENTRY_WORDS]'.
158
159 (4) Long lazy entries. These cover the slots when a short entry
160 won't fit ('index*4' overflows its field), and look like:
161
162 lis %r11, %hi(index*4 + &plt[PLT_DATA_START_WORDS])
163 lwzu %r12, %r11, %lo(index*4 + &plt[PLT_DATA_START_WORDS])
164 b &plt[PLT_TRAMPOLINE_ENTRY_WORDS]
165 bctr
166
167 (5) Long indirect jumps. These replace (4) when a direct jump
168 wouldn't reach. They look like:
169
170 lis %r11, %hi(index*4 + &plt[PLT_DATA_START_WORDS])
171 lwz %r12, %r11, %lo(index*4 + &plt[PLT_DATA_START_WORDS])
172 mtctr %r12
173 bctr
174
175 (6) Long direct jumps. These are used when thread-safety is not
176 required. They look like:
177
178 lis %r12, %hi(finaladdr)
179 addi %r12, %r12, %lo(finaladdr)
180 mtctr %r12
181 bctr
182
183
184 The lazy entries, (2) and (4), are set up here in
185 __elf_machine_runtime_setup. (1), (3), and (5) are set up in
186 __elf_machine_fixup_plt. (1), (3), and (6) can also be constructed
187 in __process_machine_rela.
188
189 The reason for the somewhat strange construction of the long
190 entries, (4) and (5), is that we need to ensure thread-safety. For
191 (1) and (3), this is obvious because only one instruction is
192 changed and the PPC architecture guarantees that aligned stores are
193 atomic. For (5), this is more tricky. When changing (4) to (5),
ded5b9b7 194 the `b' instruction is first changed to `mtctr'; this is safe
7137f424
GK
195 and is why the `lwzu' instruction is not just a simple `addi'.
196 Once this is done, and is visible to all processors, the `lwzu' can
197 safely be changed to a `lwz'. */
052b6a6c
UD
198int
199__elf_machine_runtime_setup (struct link_map *map, int lazy, int profile)
200{
201 if (map->l_info[DT_JMPREL])
202 {
203 Elf32_Word i;
b86120ed 204 Elf32_Word *plt = (Elf32_Word *) D_PTR (map, l_info[DT_PLTGOT]);
052b6a6c
UD
205 Elf32_Word num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
206 / sizeof (Elf32_Rela));
207 Elf32_Word rel_offset_words = PLT_DATA_START_WORDS (num_plt_entries);
7137f424 208 Elf32_Word data_words = (Elf32_Word) (plt + rel_offset_words);
052b6a6c 209 Elf32_Word size_modified;
7137f424 210
052b6a6c
UD
211 extern void _dl_runtime_resolve (void);
212 extern void _dl_prof_resolve (void);
052b6a6c 213
7137f424
GK
214 /* Convert the index in r11 into an actual address, and get the
215 word at that address. */
216 plt[PLT_LONGBRANCH_ENTRY_WORDS] = OPCODE_ADDIS_HI (11, 11, data_words);
217 plt[PLT_LONGBRANCH_ENTRY_WORDS + 1] = OPCODE_LWZ (11, data_words, 11);
052b6a6c 218
7137f424
GK
219 /* Call the procedure at that address. */
220 plt[PLT_LONGBRANCH_ENTRY_WORDS + 2] = OPCODE_MTCTR (11);
221 plt[PLT_LONGBRANCH_ENTRY_WORDS + 3] = OPCODE_BCTR ();
722c33bb 222
052b6a6c 223 if (lazy)
052b6a6c 224 {
7137f424 225 Elf32_Word *tramp = plt + PLT_TRAMPOLINE_ENTRY_WORDS;
b53ef01a 226 Elf32_Word dlrr;
7137f424
GK
227 Elf32_Word offset;
228
b53ef01a
AS
229#ifndef PROF
230 dlrr = (Elf32_Word) (profile
231 ? _dl_prof_resolve
232 : _dl_runtime_resolve);
70cd1f97
UD
233 if (profile && GLRO(dl_profile) != NULL
234 && _dl_name_match_p (GLRO(dl_profile), map))
7137f424
GK
235 /* This is the object we are looking for. Say that we really
236 want profiling and the timers are started. */
5688da55 237 GL(dl_profile_map) = map;
b53ef01a
AS
238#else
239 dlrr = (Elf32_Word) _dl_runtime_resolve;
240#endif
fb0dd050 241
7137f424
GK
242 /* For the long entries, subtract off data_words. */
243 tramp[0] = OPCODE_ADDIS_HI (11, 11, -data_words);
244 tramp[1] = OPCODE_ADDI (11, 11, -data_words);
fb0dd050 245
7137f424
GK
246 /* Multiply index of entry by 3 (in r11). */
247 tramp[2] = OPCODE_SLWI (12, 11, 1);
248 tramp[3] = OPCODE_ADD (11, 12, 11);
249 if (dlrr <= 0x01fffffc || dlrr >= 0xfe000000)
052b6a6c 250 {
7137f424
GK
251 /* Load address of link map in r12. */
252 tramp[4] = OPCODE_LI (12, (Elf32_Word) map);
253 tramp[5] = OPCODE_ADDIS_HI (12, 12, (Elf32_Word) map);
fb0dd050 254
7137f424
GK
255 /* Call _dl_runtime_resolve. */
256 tramp[6] = OPCODE_BA (dlrr);
052b6a6c
UD
257 }
258 else
7137f424
GK
259 {
260 /* Get address of _dl_runtime_resolve in CTR. */
261 tramp[4] = OPCODE_LI (12, dlrr);
262 tramp[5] = OPCODE_ADDIS_HI (12, 12, dlrr);
263 tramp[6] = OPCODE_MTCTR (12);
fb0dd050 264
7137f424
GK
265 /* Load address of link map in r12. */
266 tramp[7] = OPCODE_LI (12, (Elf32_Word) map);
267 tramp[8] = OPCODE_ADDIS_HI (12, 12, (Elf32_Word) map);
fb0dd050 268
7137f424
GK
269 /* Call _dl_runtime_resolve. */
270 tramp[9] = OPCODE_BCTR ();
271 }
fb0dd050 272
7137f424
GK
273 /* Set up the lazy PLT entries. */
274 offset = PLT_INITIAL_ENTRY_WORDS;
275 i = 0;
276 while (i < num_plt_entries && i < PLT_DOUBLE_SIZE)
052b6a6c
UD
277 {
278 plt[offset ] = OPCODE_LI (11, i * 4);
7137f424
GK
279 plt[offset+1] = OPCODE_B ((PLT_TRAMPOLINE_ENTRY_WORDS + 2
280 - (offset+1))
281 * 4);
282 i++;
283 offset += 2;
284 }
285 while (i < num_plt_entries)
286 {
287 plt[offset ] = OPCODE_LIS_HI (11, i * 4 + data_words);
288 plt[offset+1] = OPCODE_LWZU (12, i * 4 + data_words, 11);
289 plt[offset+2] = OPCODE_B ((PLT_TRAMPOLINE_ENTRY_WORDS
290 - (offset+2))
291 * 4);
292 plt[offset+3] = OPCODE_BCTR ();
293 i++;
294 offset += 4;
052b6a6c
UD
295 }
296 }
297
7137f424
GK
298 /* Now, we've modified code. We need to write the changes from
299 the data cache to a second-level unified cache, then make
300 sure that stale data in the instruction cache is removed.
301 (In a multiprocessor system, the effect is more complex.)
302 Most of the PLT shouldn't be in the instruction cache, but
303 there may be a little overlap at the start and the end.
052b6a6c 304
7137f424 305 Assumes that dcbst and icbi apply to lines of 16 bytes or
acd262e6 306 more. Current known line sizes are 16, 32, and 128 bytes.
18363b4f 307 The following gets the cache line size, when available. */
acd262e6
UD
308
309 /* Default minimum 4 words per cache line. */
310 int line_size_words = 4;
311
18363b4f 312 if (lazy && GLRO(dl_cache_line_size) != 0)
c7693af7 313 /* Convert bytes to words. */
18363b4f 314 line_size_words = GLRO(dl_cache_line_size) / 4;
052b6a6c 315
7137f424 316 size_modified = lazy ? rel_offset_words : 6;
acd262e6
UD
317 for (i = 0; i < size_modified; i += line_size_words)
318 PPC_DCBST (plt + i);
f1d34527 319 PPC_DCBST (plt + size_modified - 1);
052b6a6c 320 PPC_SYNC;
acd262e6
UD
321
322 for (i = 0; i < size_modified; i += line_size_words)
323 PPC_ICBI (plt + i);
7137f424 324 PPC_ICBI (plt + size_modified - 1);
052b6a6c
UD
325 PPC_ISYNC;
326 }
327
328 return lazy;
329}
330
b6299091 331Elf32_Addr
77799d9d 332__elf_machine_fixup_plt (struct link_map *map,
5ca3d19c 333 Elf32_Addr *reloc_addr, Elf32_Addr finaladdr)
052b6a6c 334{
7137f424 335 Elf32_Sword delta = finaladdr - (Elf32_Word) reloc_addr;
052b6a6c
UD
336 if (delta << 6 >> 6 == delta)
337 *reloc_addr = OPCODE_B (delta);
338 else if (finaladdr <= 0x01fffffc || finaladdr >= 0xfe000000)
339 *reloc_addr = OPCODE_BA (finaladdr);
340 else
341 {
7137f424
GK
342 Elf32_Word *plt, *data_words;
343 Elf32_Word index, offset, num_plt_entries;
fb0dd050 344
7137f424 345 num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
462e83a4 346 / sizeof (Elf32_Rela));
b86120ed 347 plt = (Elf32_Word *) D_PTR (map, l_info[DT_PLTGOT]);
7137f424
GK
348 offset = reloc_addr - plt;
349 index = (offset - PLT_INITIAL_ENTRY_WORDS)/2;
350 data_words = plt + PLT_DATA_START_WORDS (num_plt_entries);
351
352 reloc_addr += 1;
353
354 if (index < PLT_DOUBLE_SIZE)
052b6a6c 355 {
7137f424
GK
356 data_words[index] = finaladdr;
357 PPC_SYNC;
fb0dd050 358 *reloc_addr = OPCODE_B ((PLT_LONGBRANCH_ENTRY_WORDS - (offset+1))
7137f424 359 * 4);
052b6a6c
UD
360 }
361 else
362 {
7137f424
GK
363 index -= (index - PLT_DOUBLE_SIZE)/2;
364
365 data_words[index] = finaladdr;
366 PPC_SYNC;
367
368 reloc_addr[1] = OPCODE_MTCTR (12);
369 MODIFIED_CODE_NOQUEUE (reloc_addr + 1);
370 PPC_SYNC;
371
372 reloc_addr[0] = OPCODE_LWZ (12,
373 (Elf32_Word) (data_words + index), 11);
052b6a6c
UD
374 }
375 }
376 MODIFIED_CODE (reloc_addr);
b6299091 377 return finaladdr;
052b6a6c
UD
378}
379
7551556f
RM
380void
381_dl_reloc_overflow (struct link_map *map,
382 const char *name,
383 Elf32_Addr *const reloc_addr,
7551556f 384 const Elf32_Sym *refsym)
c6e6c9c8
GK
385{
386 char buffer[128];
387 char *t;
388 t = stpcpy (buffer, name);
389 t = stpcpy (t, " relocation at 0x00000000");
390 _itoa_word ((unsigned) reloc_addr, t, 16, 0);
63c7a7e8 391 if (refsym)
fb0dd050
UD
392 {
393 const char *strtab;
394
395 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
396 t = stpcpy (t, " for symbol `");
63c7a7e8 397 t = stpcpy (t, strtab + refsym->st_name);
fb0dd050
UD
398 t = stpcpy (t, "'");
399 }
c6e6c9c8 400 t = stpcpy (t, " out of range");
84aafa91 401 _dl_signal_error (0, map->l_name, NULL, buffer);
c6e6c9c8
GK
402}
403
052b6a6c
UD
404void
405__process_machine_rela (struct link_map *map,
406 const Elf32_Rela *reloc,
545dbc93 407 struct link_map *sym_map,
052b6a6c
UD
408 const Elf32_Sym *sym,
409 const Elf32_Sym *refsym,
410 Elf32_Addr *const reloc_addr,
411 Elf32_Addr const finaladdr,
0331bffe 412 int rinfo, bool skip_ifunc)
052b6a6c 413{
f8e3e9f3
AM
414 union unaligned
415 {
4cb81307
AM
416 uint16_t u2;
417 uint32_t u4;
f8e3e9f3
AM
418 } __attribute__((__packed__));
419
052b6a6c
UD
420 switch (rinfo)
421 {
422 case R_PPC_NONE:
423 return;
424
425 case R_PPC_ADDR32:
052b6a6c
UD
426 case R_PPC_GLOB_DAT:
427 case R_PPC_RELATIVE:
428 *reloc_addr = finaladdr;
429 return;
430
77799d9d 431 case R_PPC_IRELATIVE:
0331bffe
AZ
432 if (__glibc_likely (!skip_ifunc))
433 *reloc_addr = ((Elf32_Addr (*) (void)) finaladdr) ();
77799d9d
AM
434 return;
435
b51b47f4 436 case R_PPC_UADDR32:
f8e3e9f3 437 ((union unaligned *) reloc_addr)->u4 = finaladdr;
b51b47f4
UD
438 break;
439
052b6a6c 440 case R_PPC_ADDR24:
a1ffb40e 441 if (__glibc_unlikely (finaladdr > 0x01fffffc && finaladdr < 0xfe000000))
63c7a7e8 442 _dl_reloc_overflow (map, "R_PPC_ADDR24", reloc_addr, refsym);
118bad87 443 *reloc_addr = (*reloc_addr & 0xfc000003) | (finaladdr & 0x3fffffc);
052b6a6c
UD
444 break;
445
446 case R_PPC_ADDR16:
a1ffb40e 447 if (__glibc_unlikely (finaladdr > 0x7fff && finaladdr < 0xffff8000))
63c7a7e8 448 _dl_reloc_overflow (map, "R_PPC_ADDR16", reloc_addr, refsym);
052b6a6c
UD
449 *(Elf32_Half*) reloc_addr = finaladdr;
450 break;
451
b51b47f4 452 case R_PPC_UADDR16:
a1ffb40e 453 if (__glibc_unlikely (finaladdr > 0x7fff && finaladdr < 0xffff8000))
63c7a7e8 454 _dl_reloc_overflow (map, "R_PPC_UADDR16", reloc_addr, refsym);
f8e3e9f3 455 ((union unaligned *) reloc_addr)->u2 = finaladdr;
b51b47f4
UD
456 break;
457
052b6a6c
UD
458 case R_PPC_ADDR16_LO:
459 *(Elf32_Half*) reloc_addr = finaladdr;
460 break;
461
462 case R_PPC_ADDR16_HI:
463 *(Elf32_Half*) reloc_addr = finaladdr >> 16;
464 break;
465
466 case R_PPC_ADDR16_HA:
467 *(Elf32_Half*) reloc_addr = (finaladdr + 0x8000) >> 16;
468 break;
469
470 case R_PPC_ADDR14:
471 case R_PPC_ADDR14_BRTAKEN:
472 case R_PPC_ADDR14_BRNTAKEN:
a1ffb40e 473 if (__glibc_unlikely (finaladdr > 0x7fff && finaladdr < 0xffff8000))
63c7a7e8 474 _dl_reloc_overflow (map, "R_PPC_ADDR14", reloc_addr, refsym);
118bad87 475 *reloc_addr = (*reloc_addr & 0xffff0003) | (finaladdr & 0xfffc);
052b6a6c 476 if (rinfo != R_PPC_ADDR14)
118bad87
UD
477 *reloc_addr = ((*reloc_addr & 0xffdfffff)
478 | ((rinfo == R_PPC_ADDR14_BRTAKEN)
479 ^ (finaladdr >> 31)) << 21);
052b6a6c
UD
480 break;
481
482 case R_PPC_REL24:
483 {
7137f424 484 Elf32_Sword delta = finaladdr - (Elf32_Word) reloc_addr;
052b6a6c 485 if (delta << 6 >> 6 != delta)
63c7a7e8 486 _dl_reloc_overflow (map, "R_PPC_REL24", reloc_addr, refsym);
118bad87 487 *reloc_addr = (*reloc_addr & 0xfc000003) | (delta & 0x3fffffc);
052b6a6c
UD
488 }
489 break;
490
491 case R_PPC_COPY:
492 if (sym == NULL)
493 /* This can happen in trace mode when an object could not be
494 found. */
495 return;
496 if (sym->st_size > refsym->st_size
afdca0f2 497 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
052b6a6c
UD
498 {
499 const char *strtab;
500
b86120ed 501 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
35fc382a 502 _dl_error_printf ("\
3cf44918 503%s: Symbol `%s' has different size in shared object, consider re-linking\n",
b9375348 504 RTLD_PROGNAME, strtab + refsym->st_name);
052b6a6c
UD
505 }
506 memcpy (reloc_addr, (char *) finaladdr, MIN (sym->st_size,
507 refsym->st_size));
508 return;
509
510 case R_PPC_REL32:
7137f424 511 *reloc_addr = finaladdr - (Elf32_Word) reloc_addr;
052b6a6c
UD
512 return;
513
514 case R_PPC_JMP_SLOT:
7137f424
GK
515 /* It used to be that elf_machine_fixup_plt was used here,
516 but that doesn't work when ld.so relocates itself
517 for the second time. On the bright side, there's
518 no need to worry about thread-safety here. */
519 {
520 Elf32_Sword delta = finaladdr - (Elf32_Word) reloc_addr;
521 if (delta << 6 >> 6 == delta)
522 *reloc_addr = OPCODE_B (delta);
523 else if (finaladdr <= 0x01fffffc || finaladdr >= 0xfe000000)
524 *reloc_addr = OPCODE_BA (finaladdr);
525 else
526 {
527 Elf32_Word *plt, *data_words;
528 Elf32_Word index, offset, num_plt_entries;
fb0dd050 529
b86120ed 530 plt = (Elf32_Word *) D_PTR (map, l_info[DT_PLTGOT]);
7137f424
GK
531 offset = reloc_addr - plt;
532
533 if (offset < PLT_DOUBLE_SIZE*2 + PLT_INITIAL_ENTRY_WORDS)
534 {
535 index = (offset - PLT_INITIAL_ENTRY_WORDS)/2;
536 num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
462e83a4 537 / sizeof (Elf32_Rela));
7137f424
GK
538 data_words = plt + PLT_DATA_START_WORDS (num_plt_entries);
539 data_words[index] = finaladdr;
540 reloc_addr[0] = OPCODE_LI (11, index * 4);
fb0dd050
UD
541 reloc_addr[1] = OPCODE_B ((PLT_LONGBRANCH_ENTRY_WORDS
542 - (offset+1))
7137f424
GK
543 * 4);
544 MODIFIED_CODE_NOQUEUE (reloc_addr + 1);
545 }
546 else
547 {
548 reloc_addr[0] = OPCODE_LIS_HI (12, finaladdr);
549 reloc_addr[1] = OPCODE_ADDI (12, 12, finaladdr);
550 reloc_addr[2] = OPCODE_MTCTR (12);
551 reloc_addr[3] = OPCODE_BCTR ();
552 MODIFIED_CODE_NOQUEUE (reloc_addr + 3);
553 }
554 }
555 }
556 break;
052b6a6c 557
11bf311e 558#define DO_TLS_RELOC(suffix) \
545dbc93
RM
559 case R_PPC_DTPREL##suffix: \
560 /* During relocation all TLS symbols are defined and used. \
561 Therefore the offset is already correct. */ \
562 if (sym_map != NULL) \
563 do_reloc##suffix ("R_PPC_DTPREL"#suffix, \
564 TLS_DTPREL_VALUE (sym, reloc)); \
565 break; \
566 case R_PPC_TPREL##suffix: \
567 if (sym_map != NULL) \
568 { \
569 CHECK_STATIC_TLS (map, sym_map); \
570 do_reloc##suffix ("R_PPC_TPREL"#suffix, \
571 TLS_TPREL_VALUE (sym_map, sym, reloc)); \
572 } \
573 break;
574
575 inline void do_reloc16 (const char *r_name, Elf32_Addr value)
576 {
a1ffb40e 577 if (__glibc_unlikely (value > 0x7fff && value < 0xffff8000))
63c7a7e8 578 _dl_reloc_overflow (map, r_name, reloc_addr, refsym);
545dbc93
RM
579 *(Elf32_Half *) reloc_addr = value;
580 }
581 inline void do_reloc16_LO (const char *r_name, Elf32_Addr value)
582 {
583 *(Elf32_Half *) reloc_addr = value;
584 }
585 inline void do_reloc16_HI (const char *r_name, Elf32_Addr value)
586 {
587 *(Elf32_Half *) reloc_addr = value >> 16;
588 }
589 inline void do_reloc16_HA (const char *r_name, Elf32_Addr value)
590 {
591 *(Elf32_Half *) reloc_addr = (value + 0x8000) >> 16;
592 }
593 DO_TLS_RELOC (16)
594 DO_TLS_RELOC (16_LO)
595 DO_TLS_RELOC (16_HI)
596 DO_TLS_RELOC (16_HA)
545dbc93 597
052b6a6c 598 default:
421c80d2 599 _dl_reloc_bad_type (map, rinfo, 0);
052b6a6c
UD
600 return;
601 }
602
603 MODIFIED_CODE_NOQUEUE (reloc_addr);
604}