]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf64-x86-64.c
ld: Limit cache size and add --max-cache-size=SIZE
[thirdparty/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2021 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "dwarf2.h"
24 #include "libiberty.h"
25
26 #include "opcode/i386.h"
27 #include "elf/x86-64.h"
28
29 #ifdef CORE_HEADER
30 #include <stdarg.h>
31 #include CORE_HEADER
32 #endif
33
34 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
35 #define MINUS_ONE (~ (bfd_vma) 0)
36
37 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
38 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
39 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
40 since they are the same. */
41
42 /* The relocation "howto" table. Order of fields:
43 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
44 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
45 static reloc_howto_type x86_64_elf_howto_table[] =
46 {
47 HOWTO(R_X86_64_NONE, 0, 3, 0, false, 0, complain_overflow_dont,
48 bfd_elf_generic_reloc, "R_X86_64_NONE", false, 0, 0x00000000,
49 false),
50 HOWTO(R_X86_64_64, 0, 4, 64, false, 0, complain_overflow_dont,
51 bfd_elf_generic_reloc, "R_X86_64_64", false, 0, MINUS_ONE,
52 false),
53 HOWTO(R_X86_64_PC32, 0, 2, 32, true, 0, complain_overflow_signed,
54 bfd_elf_generic_reloc, "R_X86_64_PC32", false, 0, 0xffffffff,
55 true),
56 HOWTO(R_X86_64_GOT32, 0, 2, 32, false, 0, complain_overflow_signed,
57 bfd_elf_generic_reloc, "R_X86_64_GOT32", false, 0, 0xffffffff,
58 false),
59 HOWTO(R_X86_64_PLT32, 0, 2, 32, true, 0, complain_overflow_signed,
60 bfd_elf_generic_reloc, "R_X86_64_PLT32", false, 0, 0xffffffff,
61 true),
62 HOWTO(R_X86_64_COPY, 0, 2, 32, false, 0, complain_overflow_bitfield,
63 bfd_elf_generic_reloc, "R_X86_64_COPY", false, 0, 0xffffffff,
64 false),
65 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, false, 0, complain_overflow_dont,
66 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", false, 0, MINUS_ONE,
67 false),
68 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, false, 0, complain_overflow_dont,
69 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", false, 0, MINUS_ONE,
70 false),
71 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, false, 0, complain_overflow_dont,
72 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", false, 0, MINUS_ONE,
73 false),
74 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, true, 0, complain_overflow_signed,
75 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", false, 0, 0xffffffff,
76 true),
77 HOWTO(R_X86_64_32, 0, 2, 32, false, 0, complain_overflow_unsigned,
78 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
79 false),
80 HOWTO(R_X86_64_32S, 0, 2, 32, false, 0, complain_overflow_signed,
81 bfd_elf_generic_reloc, "R_X86_64_32S", false, 0, 0xffffffff,
82 false),
83 HOWTO(R_X86_64_16, 0, 1, 16, false, 0, complain_overflow_bitfield,
84 bfd_elf_generic_reloc, "R_X86_64_16", false, 0, 0xffff, false),
85 HOWTO(R_X86_64_PC16, 0, 1, 16, true, 0, complain_overflow_bitfield,
86 bfd_elf_generic_reloc, "R_X86_64_PC16", false, 0, 0xffff, true),
87 HOWTO(R_X86_64_8, 0, 0, 8, false, 0, complain_overflow_bitfield,
88 bfd_elf_generic_reloc, "R_X86_64_8", false, 0, 0xff, false),
89 HOWTO(R_X86_64_PC8, 0, 0, 8, true, 0, complain_overflow_signed,
90 bfd_elf_generic_reloc, "R_X86_64_PC8", false, 0, 0xff, true),
91 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, false, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", false, 0, MINUS_ONE,
93 false),
94 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
95 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", false, 0, MINUS_ONE,
96 false),
97 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
98 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", false, 0, MINUS_ONE,
99 false),
100 HOWTO(R_X86_64_TLSGD, 0, 2, 32, true, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_TLSGD", false, 0, 0xffffffff,
102 true),
103 HOWTO(R_X86_64_TLSLD, 0, 2, 32, true, 0, complain_overflow_signed,
104 bfd_elf_generic_reloc, "R_X86_64_TLSLD", false, 0, 0xffffffff,
105 true),
106 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, false, 0, complain_overflow_signed,
107 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", false, 0, 0xffffffff,
108 false),
109 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, true, 0, complain_overflow_signed,
110 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", false, 0, 0xffffffff,
111 true),
112 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, false, 0, complain_overflow_signed,
113 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", false, 0, 0xffffffff,
114 false),
115 HOWTO(R_X86_64_PC64, 0, 4, 64, true, 0, complain_overflow_dont,
116 bfd_elf_generic_reloc, "R_X86_64_PC64", false, 0, MINUS_ONE,
117 true),
118 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
119 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64", false, 0, MINUS_ONE,
120 false),
121 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, true, 0, complain_overflow_signed,
122 bfd_elf_generic_reloc, "R_X86_64_GOTPC32", false, 0, 0xffffffff,
123 true),
124 HOWTO(R_X86_64_GOT64, 0, 4, 64, false, 0, complain_overflow_signed,
125 bfd_elf_generic_reloc, "R_X86_64_GOT64", false, 0, MINUS_ONE,
126 false),
127 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, true, 0, complain_overflow_signed,
128 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", false, 0, MINUS_ONE,
129 true),
130 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, true, 0, complain_overflow_signed,
131 bfd_elf_generic_reloc, "R_X86_64_GOTPC64", false, 0, MINUS_ONE,
132 true),
133 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, false, 0, complain_overflow_signed,
134 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", false, 0, MINUS_ONE,
135 false),
136 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, false, 0, complain_overflow_signed,
137 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", false, 0, MINUS_ONE,
138 false),
139 HOWTO(R_X86_64_SIZE32, 0, 2, 32, false, 0, complain_overflow_unsigned,
140 bfd_elf_generic_reloc, "R_X86_64_SIZE32", false, 0, 0xffffffff,
141 false),
142 HOWTO(R_X86_64_SIZE64, 0, 4, 64, false, 0, complain_overflow_dont,
143 bfd_elf_generic_reloc, "R_X86_64_SIZE64", false, 0, MINUS_ONE,
144 false),
145 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, true, 0,
146 complain_overflow_bitfield, bfd_elf_generic_reloc,
147 "R_X86_64_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
148 HOWTO(R_X86_64_TLSDESC_CALL, 0, 3, 0, false, 0,
149 complain_overflow_dont, bfd_elf_generic_reloc,
150 "R_X86_64_TLSDESC_CALL",
151 false, 0, 0, false),
152 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, false, 0,
153 complain_overflow_dont, bfd_elf_generic_reloc,
154 "R_X86_64_TLSDESC", false, 0, MINUS_ONE, false),
155 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, false, 0, complain_overflow_dont,
156 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", false, 0, MINUS_ONE,
157 false),
158 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, false, 0, complain_overflow_dont,
159 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", false, 0, MINUS_ONE,
160 false),
161 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, true, 0, complain_overflow_signed,
162 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", false, 0, 0xffffffff,
163 true),
164 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, true, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", false, 0, 0xffffffff,
166 true),
167 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, true, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", false, 0, 0xffffffff,
169 true),
170 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, true, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", false, 0, 0xffffffff,
172 true),
173
174 /* We have a gap in the reloc numbers here.
175 R_X86_64_standard counts the number up to this point, and
176 R_X86_64_vt_offset is the value to subtract from a reloc type of
177 R_X86_64_GNU_VT* to form an index into this table. */
178 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
179 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
180
181 /* GNU extension to record C++ vtable hierarchy. */
182 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, false, 0, complain_overflow_dont,
183 NULL, "R_X86_64_GNU_VTINHERIT", false, 0, 0, false),
184
185 /* GNU extension to record C++ vtable member usage. */
186 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, false, 0, complain_overflow_dont,
187 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", false, 0, 0,
188 false),
189
190 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
191 HOWTO(R_X86_64_32, 0, 2, 32, false, 0, complain_overflow_bitfield,
192 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
193 false)
194 };
195
196 #define X86_PCREL_TYPE_P(TYPE) \
197 ( ((TYPE) == R_X86_64_PC8) \
198 || ((TYPE) == R_X86_64_PC16) \
199 || ((TYPE) == R_X86_64_PC32) \
200 || ((TYPE) == R_X86_64_PC32_BND) \
201 || ((TYPE) == R_X86_64_PC64))
202
203 #define X86_SIZE_TYPE_P(TYPE) \
204 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
205
206 /* Map BFD relocs to the x86_64 elf relocs. */
207 struct elf_reloc_map
208 {
209 bfd_reloc_code_real_type bfd_reloc_val;
210 unsigned char elf_reloc_val;
211 };
212
213 static const struct elf_reloc_map x86_64_reloc_map[] =
214 {
215 { BFD_RELOC_NONE, R_X86_64_NONE, },
216 { BFD_RELOC_64, R_X86_64_64, },
217 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
218 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
219 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
220 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
221 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
222 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
223 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
224 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
225 { BFD_RELOC_32, R_X86_64_32, },
226 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
227 { BFD_RELOC_16, R_X86_64_16, },
228 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
229 { BFD_RELOC_8, R_X86_64_8, },
230 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
231 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
232 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
233 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
234 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
235 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
236 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
237 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
238 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
239 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
240 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
241 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
242 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
243 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
244 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
245 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
246 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
247 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
248 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
249 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
250 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
251 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
252 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
253 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
254 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
255 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
256 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
257 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
258 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
259 };
260
261 static reloc_howto_type *
262 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
263 {
264 unsigned i;
265
266 if (r_type == (unsigned int) R_X86_64_32)
267 {
268 if (ABI_64_P (abfd))
269 i = r_type;
270 else
271 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
272 }
273 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
274 || r_type >= (unsigned int) R_X86_64_max)
275 {
276 if (r_type >= (unsigned int) R_X86_64_standard)
277 {
278 /* xgettext:c-format */
279 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
280 abfd, r_type);
281 bfd_set_error (bfd_error_bad_value);
282 return NULL;
283 }
284 i = r_type;
285 }
286 else
287 i = r_type - (unsigned int) R_X86_64_vt_offset;
288 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
289 return &x86_64_elf_howto_table[i];
290 }
291
292 /* Given a BFD reloc type, return a HOWTO structure. */
293 static reloc_howto_type *
294 elf_x86_64_reloc_type_lookup (bfd *abfd,
295 bfd_reloc_code_real_type code)
296 {
297 unsigned int i;
298
299 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
300 i++)
301 {
302 if (x86_64_reloc_map[i].bfd_reloc_val == code)
303 return elf_x86_64_rtype_to_howto (abfd,
304 x86_64_reloc_map[i].elf_reloc_val);
305 }
306 return NULL;
307 }
308
309 static reloc_howto_type *
310 elf_x86_64_reloc_name_lookup (bfd *abfd,
311 const char *r_name)
312 {
313 unsigned int i;
314
315 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
316 {
317 /* Get x32 R_X86_64_32. */
318 reloc_howto_type *reloc
319 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
320 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
321 return reloc;
322 }
323
324 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
325 if (x86_64_elf_howto_table[i].name != NULL
326 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
327 return &x86_64_elf_howto_table[i];
328
329 return NULL;
330 }
331
332 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
333
334 static bool
335 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
336 Elf_Internal_Rela *dst)
337 {
338 unsigned r_type;
339
340 r_type = ELF32_R_TYPE (dst->r_info);
341 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
342 if (cache_ptr->howto == NULL)
343 return false;
344 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
345 return true;
346 }
347 \f
348 /* Support for core dump NOTE sections. */
349 static bool
350 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
351 {
352 int offset;
353 size_t size;
354
355 switch (note->descsz)
356 {
357 default:
358 return false;
359
360 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
361 /* pr_cursig */
362 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
363
364 /* pr_pid */
365 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
366
367 /* pr_reg */
368 offset = 72;
369 size = 216;
370
371 break;
372
373 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
374 /* pr_cursig */
375 elf_tdata (abfd)->core->signal
376 = bfd_get_16 (abfd, note->descdata + 12);
377
378 /* pr_pid */
379 elf_tdata (abfd)->core->lwpid
380 = bfd_get_32 (abfd, note->descdata + 32);
381
382 /* pr_reg */
383 offset = 112;
384 size = 216;
385
386 break;
387 }
388
389 /* Make a ".reg/999" section. */
390 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
391 size, note->descpos + offset);
392 }
393
394 static bool
395 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
396 {
397 switch (note->descsz)
398 {
399 default:
400 return false;
401
402 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
403 elf_tdata (abfd)->core->pid
404 = bfd_get_32 (abfd, note->descdata + 12);
405 elf_tdata (abfd)->core->program
406 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
407 elf_tdata (abfd)->core->command
408 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
409 break;
410
411 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
412 elf_tdata (abfd)->core->pid
413 = bfd_get_32 (abfd, note->descdata + 24);
414 elf_tdata (abfd)->core->program
415 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
416 elf_tdata (abfd)->core->command
417 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
418 }
419
420 /* Note that for some reason, a spurious space is tacked
421 onto the end of the args in some (at least one anyway)
422 implementations, so strip it off if it exists. */
423
424 {
425 char *command = elf_tdata (abfd)->core->command;
426 int n = strlen (command);
427
428 if (0 < n && command[n - 1] == ' ')
429 command[n - 1] = '\0';
430 }
431
432 return true;
433 }
434
435 #ifdef CORE_HEADER
436 # if GCC_VERSION >= 8000
437 # pragma GCC diagnostic push
438 # pragma GCC diagnostic ignored "-Wstringop-truncation"
439 # endif
440 static char *
441 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
442 int note_type, ...)
443 {
444 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
445 va_list ap;
446 const char *fname, *psargs;
447 long pid;
448 int cursig;
449 const void *gregs;
450
451 switch (note_type)
452 {
453 default:
454 return NULL;
455
456 case NT_PRPSINFO:
457 va_start (ap, note_type);
458 fname = va_arg (ap, const char *);
459 psargs = va_arg (ap, const char *);
460 va_end (ap);
461
462 if (bed->s->elfclass == ELFCLASS32)
463 {
464 prpsinfo32_t data;
465 memset (&data, 0, sizeof (data));
466 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
467 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
468 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
469 &data, sizeof (data));
470 }
471 else
472 {
473 prpsinfo64_t data;
474 memset (&data, 0, sizeof (data));
475 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
476 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
477 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
478 &data, sizeof (data));
479 }
480 /* NOTREACHED */
481
482 case NT_PRSTATUS:
483 va_start (ap, note_type);
484 pid = va_arg (ap, long);
485 cursig = va_arg (ap, int);
486 gregs = va_arg (ap, const void *);
487 va_end (ap);
488
489 if (bed->s->elfclass == ELFCLASS32)
490 {
491 if (bed->elf_machine_code == EM_X86_64)
492 {
493 prstatusx32_t prstat;
494 memset (&prstat, 0, sizeof (prstat));
495 prstat.pr_pid = pid;
496 prstat.pr_cursig = cursig;
497 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
498 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
499 &prstat, sizeof (prstat));
500 }
501 else
502 {
503 prstatus32_t prstat;
504 memset (&prstat, 0, sizeof (prstat));
505 prstat.pr_pid = pid;
506 prstat.pr_cursig = cursig;
507 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
508 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
509 &prstat, sizeof (prstat));
510 }
511 }
512 else
513 {
514 prstatus64_t prstat;
515 memset (&prstat, 0, sizeof (prstat));
516 prstat.pr_pid = pid;
517 prstat.pr_cursig = cursig;
518 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
519 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
520 &prstat, sizeof (prstat));
521 }
522 }
523 /* NOTREACHED */
524 }
525 # if GCC_VERSION >= 8000
526 # pragma GCC diagnostic pop
527 # endif
528 #endif
529 \f
530 /* Functions for the x86-64 ELF linker. */
531
532 /* The size in bytes of an entry in the global offset table. */
533
534 #define GOT_ENTRY_SIZE 8
535
536 /* The size in bytes of an entry in the lazy procedure linkage table. */
537
538 #define LAZY_PLT_ENTRY_SIZE 16
539
540 /* The size in bytes of an entry in the non-lazy procedure linkage
541 table. */
542
543 #define NON_LAZY_PLT_ENTRY_SIZE 8
544
545 /* The first entry in a lazy procedure linkage table looks like this.
546 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
547 works. */
548
549 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
550 {
551 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
552 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
553 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
554 };
555
556 /* Subsequent entries in a lazy procedure linkage table look like this. */
557
558 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
559 {
560 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
561 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
562 0x68, /* pushq immediate */
563 0, 0, 0, 0, /* replaced with index into relocation table. */
564 0xe9, /* jmp relative */
565 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
566 };
567
568 /* The first entry in a lazy procedure linkage table with BND prefix
569 like this. */
570
571 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
572 {
573 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
574 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
575 0x0f, 0x1f, 0 /* nopl (%rax) */
576 };
577
578 /* Subsequent entries for branches with BND prefx in a lazy procedure
579 linkage table look like this. */
580
581 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
582 {
583 0x68, 0, 0, 0, 0, /* pushq immediate */
584 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
585 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
586 };
587
588 /* The first entry in the IBT-enabled lazy procedure linkage table is the
589 the same as the lazy PLT with BND prefix so that bound registers are
590 preserved when control is passed to dynamic linker. Subsequent
591 entries for a IBT-enabled lazy procedure linkage table look like
592 this. */
593
594 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
595 {
596 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
597 0x68, 0, 0, 0, 0, /* pushq immediate */
598 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
599 0x90 /* nop */
600 };
601
602 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
603 is the same as the normal lazy PLT. Subsequent entries for an
604 x32 IBT-enabled lazy procedure linkage table look like this. */
605
606 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
607 {
608 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
609 0x68, 0, 0, 0, 0, /* pushq immediate */
610 0xe9, 0, 0, 0, 0, /* jmpq relative */
611 0x66, 0x90 /* xchg %ax,%ax */
612 };
613
614 /* Entries in the non-lazey procedure linkage table look like this. */
615
616 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
617 {
618 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
619 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
620 0x66, 0x90 /* xchg %ax,%ax */
621 };
622
623 /* Entries for branches with BND prefix in the non-lazey procedure
624 linkage table look like this. */
625
626 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
627 {
628 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
629 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
630 0x90 /* nop */
631 };
632
633 /* Entries for branches with IBT-enabled in the non-lazey procedure
634 linkage table look like this. They have the same size as the lazy
635 PLT entry. */
636
637 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
638 {
639 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
640 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
641 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
642 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
643 };
644
645 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
646 linkage table look like this. They have the same size as the lazy
647 PLT entry. */
648
649 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
650 {
651 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
652 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
653 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
654 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
655 };
656
657 /* The TLSDESC entry in a lazy procedure linkage table. */
658 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
659 {
660 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
661 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
662 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
663 };
664
665 /* .eh_frame covering the lazy .plt section. */
666
667 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
668 {
669 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
670 0, 0, 0, 0, /* CIE ID */
671 1, /* CIE version */
672 'z', 'R', 0, /* Augmentation string */
673 1, /* Code alignment factor */
674 0x78, /* Data alignment factor */
675 16, /* Return address column */
676 1, /* Augmentation size */
677 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
678 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
679 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
680 DW_CFA_nop, DW_CFA_nop,
681
682 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
683 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
684 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
685 0, 0, 0, 0, /* .plt size goes here */
686 0, /* Augmentation size */
687 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
688 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
689 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
690 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
691 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
692 11, /* Block length */
693 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
694 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
695 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
696 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
697 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
698 };
699
700 /* .eh_frame covering the lazy BND .plt section. */
701
702 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
703 {
704 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
705 0, 0, 0, 0, /* CIE ID */
706 1, /* CIE version */
707 'z', 'R', 0, /* Augmentation string */
708 1, /* Code alignment factor */
709 0x78, /* Data alignment factor */
710 16, /* Return address column */
711 1, /* Augmentation size */
712 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
713 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
714 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
715 DW_CFA_nop, DW_CFA_nop,
716
717 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
718 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
719 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
720 0, 0, 0, 0, /* .plt size goes here */
721 0, /* Augmentation size */
722 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
723 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
724 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
725 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
726 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
727 11, /* Block length */
728 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
729 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
730 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
731 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
732 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
733 };
734
735 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
736
737 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
738 {
739 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
740 0, 0, 0, 0, /* CIE ID */
741 1, /* CIE version */
742 'z', 'R', 0, /* Augmentation string */
743 1, /* Code alignment factor */
744 0x78, /* Data alignment factor */
745 16, /* Return address column */
746 1, /* Augmentation size */
747 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
748 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
749 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
750 DW_CFA_nop, DW_CFA_nop,
751
752 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
753 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
754 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
755 0, 0, 0, 0, /* .plt size goes here */
756 0, /* Augmentation size */
757 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
758 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
759 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
760 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
761 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
762 11, /* Block length */
763 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
764 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
765 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
766 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
767 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
768 };
769
770 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
771
772 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
773 {
774 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
775 0, 0, 0, 0, /* CIE ID */
776 1, /* CIE version */
777 'z', 'R', 0, /* Augmentation string */
778 1, /* Code alignment factor */
779 0x78, /* Data alignment factor */
780 16, /* Return address column */
781 1, /* Augmentation size */
782 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
783 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
784 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
785 DW_CFA_nop, DW_CFA_nop,
786
787 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
788 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
789 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
790 0, 0, 0, 0, /* .plt size goes here */
791 0, /* Augmentation size */
792 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
793 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
794 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
795 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
796 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
797 11, /* Block length */
798 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
799 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
800 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
801 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
802 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
803 };
804
805 /* .eh_frame covering the non-lazy .plt section. */
806
807 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
808 {
809 #define PLT_GOT_FDE_LENGTH 20
810 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
811 0, 0, 0, 0, /* CIE ID */
812 1, /* CIE version */
813 'z', 'R', 0, /* Augmentation string */
814 1, /* Code alignment factor */
815 0x78, /* Data alignment factor */
816 16, /* Return address column */
817 1, /* Augmentation size */
818 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
819 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
820 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
821 DW_CFA_nop, DW_CFA_nop,
822
823 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
824 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
825 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
826 0, 0, 0, 0, /* non-lazy .plt size goes here */
827 0, /* Augmentation size */
828 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
829 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
830 };
831
832 /* These are the standard parameters. */
833 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
834 {
835 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
836 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
837 elf_x86_64_lazy_plt_entry, /* plt_entry */
838 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
839 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
840 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
841 6, /* plt_tlsdesc_got1_offset */
842 12, /* plt_tlsdesc_got2_offset */
843 10, /* plt_tlsdesc_got1_insn_end */
844 16, /* plt_tlsdesc_got2_insn_end */
845 2, /* plt0_got1_offset */
846 8, /* plt0_got2_offset */
847 12, /* plt0_got2_insn_end */
848 2, /* plt_got_offset */
849 7, /* plt_reloc_offset */
850 12, /* plt_plt_offset */
851 6, /* plt_got_insn_size */
852 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
853 6, /* plt_lazy_offset */
854 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
855 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
856 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
857 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
858 };
859
860 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
861 {
862 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
863 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
864 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
865 2, /* plt_got_offset */
866 6, /* plt_got_insn_size */
867 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
868 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
869 };
870
871 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
872 {
873 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
874 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
875 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
876 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
877 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
878 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
879 6, /* plt_tlsdesc_got1_offset */
880 12, /* plt_tlsdesc_got2_offset */
881 10, /* plt_tlsdesc_got1_insn_end */
882 16, /* plt_tlsdesc_got2_insn_end */
883 2, /* plt0_got1_offset */
884 1+8, /* plt0_got2_offset */
885 1+12, /* plt0_got2_insn_end */
886 1+2, /* plt_got_offset */
887 1, /* plt_reloc_offset */
888 7, /* plt_plt_offset */
889 1+6, /* plt_got_insn_size */
890 11, /* plt_plt_insn_end */
891 0, /* plt_lazy_offset */
892 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
893 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
894 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
895 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
896 };
897
898 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
899 {
900 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
901 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
902 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
903 1+2, /* plt_got_offset */
904 1+6, /* plt_got_insn_size */
905 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
906 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
907 };
908
909 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
910 {
911 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
912 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
913 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
914 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
915 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
916 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
917 6, /* plt_tlsdesc_got1_offset */
918 12, /* plt_tlsdesc_got2_offset */
919 10, /* plt_tlsdesc_got1_insn_end */
920 16, /* plt_tlsdesc_got2_insn_end */
921 2, /* plt0_got1_offset */
922 1+8, /* plt0_got2_offset */
923 1+12, /* plt0_got2_insn_end */
924 4+1+2, /* plt_got_offset */
925 4+1, /* plt_reloc_offset */
926 4+1+6, /* plt_plt_offset */
927 4+1+6, /* plt_got_insn_size */
928 4+1+5+5, /* plt_plt_insn_end */
929 0, /* plt_lazy_offset */
930 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
931 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
932 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
933 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
934 };
935
936 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
937 {
938 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
939 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
940 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
941 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
942 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
943 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
944 6, /* plt_tlsdesc_got1_offset */
945 12, /* plt_tlsdesc_got2_offset */
946 10, /* plt_tlsdesc_got1_insn_end */
947 16, /* plt_tlsdesc_got2_insn_end */
948 2, /* plt0_got1_offset */
949 8, /* plt0_got2_offset */
950 12, /* plt0_got2_insn_end */
951 4+2, /* plt_got_offset */
952 4+1, /* plt_reloc_offset */
953 4+6, /* plt_plt_offset */
954 4+6, /* plt_got_insn_size */
955 4+5+5, /* plt_plt_insn_end */
956 0, /* plt_lazy_offset */
957 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
958 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
959 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
960 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
961 };
962
963 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
964 {
965 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
966 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
967 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
968 4+1+2, /* plt_got_offset */
969 4+1+6, /* plt_got_insn_size */
970 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
971 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
972 };
973
974 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
975 {
976 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
977 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
978 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
979 4+2, /* plt_got_offset */
980 4+6, /* plt_got_insn_size */
981 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
982 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
983 };
984
985
986 static bool
987 elf64_x86_64_elf_object_p (bfd *abfd)
988 {
989 /* Set the right machine number for an x86-64 elf64 file. */
990 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
991 return true;
992 }
993
994 static bool
995 elf32_x86_64_elf_object_p (bfd *abfd)
996 {
997 /* Set the right machine number for an x86-64 elf32 file. */
998 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
999 return true;
1000 }
1001
1002 /* Return TRUE if the TLS access code sequence support transition
1003 from R_TYPE. */
1004
1005 static bool
1006 elf_x86_64_check_tls_transition (bfd *abfd,
1007 struct bfd_link_info *info,
1008 asection *sec,
1009 bfd_byte *contents,
1010 Elf_Internal_Shdr *symtab_hdr,
1011 struct elf_link_hash_entry **sym_hashes,
1012 unsigned int r_type,
1013 const Elf_Internal_Rela *rel,
1014 const Elf_Internal_Rela *relend)
1015 {
1016 unsigned int val;
1017 unsigned long r_symndx;
1018 bool largepic = false;
1019 struct elf_link_hash_entry *h;
1020 bfd_vma offset;
1021 struct elf_x86_link_hash_table *htab;
1022 bfd_byte *call;
1023 bool indirect_call;
1024
1025 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1026 offset = rel->r_offset;
1027 switch (r_type)
1028 {
1029 case R_X86_64_TLSGD:
1030 case R_X86_64_TLSLD:
1031 if ((rel + 1) >= relend)
1032 return false;
1033
1034 if (r_type == R_X86_64_TLSGD)
1035 {
1036 /* Check transition from GD access model. For 64bit, only
1037 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1038 .word 0x6666; rex64; call __tls_get_addr@PLT
1039 or
1040 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1041 .byte 0x66; rex64
1042 call *__tls_get_addr@GOTPCREL(%rip)
1043 which may be converted to
1044 addr32 call __tls_get_addr
1045 can transit to different access model. For 32bit, only
1046 leaq foo@tlsgd(%rip), %rdi
1047 .word 0x6666; rex64; call __tls_get_addr@PLT
1048 or
1049 leaq foo@tlsgd(%rip), %rdi
1050 .byte 0x66; rex64
1051 call *__tls_get_addr@GOTPCREL(%rip)
1052 which may be converted to
1053 addr32 call __tls_get_addr
1054 can transit to different access model. For largepic,
1055 we also support:
1056 leaq foo@tlsgd(%rip), %rdi
1057 movabsq $__tls_get_addr@pltoff, %rax
1058 addq $r15, %rax
1059 call *%rax
1060 or
1061 leaq foo@tlsgd(%rip), %rdi
1062 movabsq $__tls_get_addr@pltoff, %rax
1063 addq $rbx, %rax
1064 call *%rax */
1065
1066 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1067
1068 if ((offset + 12) > sec->size)
1069 return false;
1070
1071 call = contents + offset + 4;
1072 if (call[0] != 0x66
1073 || !((call[1] == 0x48
1074 && call[2] == 0xff
1075 && call[3] == 0x15)
1076 || (call[1] == 0x48
1077 && call[2] == 0x67
1078 && call[3] == 0xe8)
1079 || (call[1] == 0x66
1080 && call[2] == 0x48
1081 && call[3] == 0xe8)))
1082 {
1083 if (!ABI_64_P (abfd)
1084 || (offset + 19) > sec->size
1085 || offset < 3
1086 || memcmp (call - 7, leaq + 1, 3) != 0
1087 || memcmp (call, "\x48\xb8", 2) != 0
1088 || call[11] != 0x01
1089 || call[13] != 0xff
1090 || call[14] != 0xd0
1091 || !((call[10] == 0x48 && call[12] == 0xd8)
1092 || (call[10] == 0x4c && call[12] == 0xf8)))
1093 return false;
1094 largepic = true;
1095 }
1096 else if (ABI_64_P (abfd))
1097 {
1098 if (offset < 4
1099 || memcmp (contents + offset - 4, leaq, 4) != 0)
1100 return false;
1101 }
1102 else
1103 {
1104 if (offset < 3
1105 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1106 return false;
1107 }
1108 indirect_call = call[2] == 0xff;
1109 }
1110 else
1111 {
1112 /* Check transition from LD access model. Only
1113 leaq foo@tlsld(%rip), %rdi;
1114 call __tls_get_addr@PLT
1115 or
1116 leaq foo@tlsld(%rip), %rdi;
1117 call *__tls_get_addr@GOTPCREL(%rip)
1118 which may be converted to
1119 addr32 call __tls_get_addr
1120 can transit to different access model. For largepic
1121 we also support:
1122 leaq foo@tlsld(%rip), %rdi
1123 movabsq $__tls_get_addr@pltoff, %rax
1124 addq $r15, %rax
1125 call *%rax
1126 or
1127 leaq foo@tlsld(%rip), %rdi
1128 movabsq $__tls_get_addr@pltoff, %rax
1129 addq $rbx, %rax
1130 call *%rax */
1131
1132 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1133
1134 if (offset < 3 || (offset + 9) > sec->size)
1135 return false;
1136
1137 if (memcmp (contents + offset - 3, lea, 3) != 0)
1138 return false;
1139
1140 call = contents + offset + 4;
1141 if (!(call[0] == 0xe8
1142 || (call[0] == 0xff && call[1] == 0x15)
1143 || (call[0] == 0x67 && call[1] == 0xe8)))
1144 {
1145 if (!ABI_64_P (abfd)
1146 || (offset + 19) > sec->size
1147 || memcmp (call, "\x48\xb8", 2) != 0
1148 || call[11] != 0x01
1149 || call[13] != 0xff
1150 || call[14] != 0xd0
1151 || !((call[10] == 0x48 && call[12] == 0xd8)
1152 || (call[10] == 0x4c && call[12] == 0xf8)))
1153 return false;
1154 largepic = true;
1155 }
1156 indirect_call = call[0] == 0xff;
1157 }
1158
1159 r_symndx = htab->r_sym (rel[1].r_info);
1160 if (r_symndx < symtab_hdr->sh_info)
1161 return false;
1162
1163 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1164 if (h == NULL
1165 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1166 return false;
1167 else
1168 {
1169 r_type = (ELF32_R_TYPE (rel[1].r_info)
1170 & ~R_X86_64_converted_reloc_bit);
1171 if (largepic)
1172 return r_type == R_X86_64_PLTOFF64;
1173 else if (indirect_call)
1174 return r_type == R_X86_64_GOTPCRELX;
1175 else
1176 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1177 }
1178
1179 case R_X86_64_GOTTPOFF:
1180 /* Check transition from IE access model:
1181 mov foo@gottpoff(%rip), %reg
1182 add foo@gottpoff(%rip), %reg
1183 */
1184
1185 /* Check REX prefix first. */
1186 if (offset >= 3 && (offset + 4) <= sec->size)
1187 {
1188 val = bfd_get_8 (abfd, contents + offset - 3);
1189 if (val != 0x48 && val != 0x4c)
1190 {
1191 /* X32 may have 0x44 REX prefix or no REX prefix. */
1192 if (ABI_64_P (abfd))
1193 return false;
1194 }
1195 }
1196 else
1197 {
1198 /* X32 may not have any REX prefix. */
1199 if (ABI_64_P (abfd))
1200 return false;
1201 if (offset < 2 || (offset + 3) > sec->size)
1202 return false;
1203 }
1204
1205 val = bfd_get_8 (abfd, contents + offset - 2);
1206 if (val != 0x8b && val != 0x03)
1207 return false;
1208
1209 val = bfd_get_8 (abfd, contents + offset - 1);
1210 return (val & 0xc7) == 5;
1211
1212 case R_X86_64_GOTPC32_TLSDESC:
1213 /* Check transition from GDesc access model:
1214 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
1215 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
1216
1217 Make sure it's a leaq adding rip to a 32-bit offset
1218 into any register, although it's probably almost always
1219 going to be rax. */
1220
1221 if (offset < 3 || (offset + 4) > sec->size)
1222 return false;
1223
1224 val = bfd_get_8 (abfd, contents + offset - 3);
1225 val &= 0xfb;
1226 if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40))
1227 return false;
1228
1229 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1230 return false;
1231
1232 val = bfd_get_8 (abfd, contents + offset - 1);
1233 return (val & 0xc7) == 0x05;
1234
1235 case R_X86_64_TLSDESC_CALL:
1236 /* Check transition from GDesc access model:
1237 call *x@tlsdesc(%rax) <--- LP64 mode.
1238 call *x@tlsdesc(%eax) <--- X32 mode.
1239 */
1240 if (offset + 2 <= sec->size)
1241 {
1242 unsigned int prefix;
1243 call = contents + offset;
1244 prefix = 0;
1245 if (!ABI_64_P (abfd))
1246 {
1247 /* Check for call *x@tlsdesc(%eax). */
1248 if (call[0] == 0x67)
1249 {
1250 prefix = 1;
1251 if (offset + 3 > sec->size)
1252 return false;
1253 }
1254 }
1255 /* Make sure that it's a call *x@tlsdesc(%rax). */
1256 return call[prefix] == 0xff && call[1 + prefix] == 0x10;
1257 }
1258
1259 return false;
1260
1261 default:
1262 abort ();
1263 }
1264 }
1265
1266 /* Return TRUE if the TLS access transition is OK or no transition
1267 will be performed. Update R_TYPE if there is a transition. */
1268
1269 static bool
1270 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1271 asection *sec, bfd_byte *contents,
1272 Elf_Internal_Shdr *symtab_hdr,
1273 struct elf_link_hash_entry **sym_hashes,
1274 unsigned int *r_type, int tls_type,
1275 const Elf_Internal_Rela *rel,
1276 const Elf_Internal_Rela *relend,
1277 struct elf_link_hash_entry *h,
1278 unsigned long r_symndx,
1279 bool from_relocate_section)
1280 {
1281 unsigned int from_type = *r_type;
1282 unsigned int to_type = from_type;
1283 bool check = true;
1284
1285 /* Skip TLS transition for functions. */
1286 if (h != NULL
1287 && (h->type == STT_FUNC
1288 || h->type == STT_GNU_IFUNC))
1289 return true;
1290
1291 switch (from_type)
1292 {
1293 case R_X86_64_TLSGD:
1294 case R_X86_64_GOTPC32_TLSDESC:
1295 case R_X86_64_TLSDESC_CALL:
1296 case R_X86_64_GOTTPOFF:
1297 if (bfd_link_executable (info))
1298 {
1299 if (h == NULL)
1300 to_type = R_X86_64_TPOFF32;
1301 else
1302 to_type = R_X86_64_GOTTPOFF;
1303 }
1304
1305 /* When we are called from elf_x86_64_relocate_section, there may
1306 be additional transitions based on TLS_TYPE. */
1307 if (from_relocate_section)
1308 {
1309 unsigned int new_to_type = to_type;
1310
1311 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1312 new_to_type = R_X86_64_TPOFF32;
1313
1314 if (to_type == R_X86_64_TLSGD
1315 || to_type == R_X86_64_GOTPC32_TLSDESC
1316 || to_type == R_X86_64_TLSDESC_CALL)
1317 {
1318 if (tls_type == GOT_TLS_IE)
1319 new_to_type = R_X86_64_GOTTPOFF;
1320 }
1321
1322 /* We checked the transition before when we were called from
1323 elf_x86_64_check_relocs. We only want to check the new
1324 transition which hasn't been checked before. */
1325 check = new_to_type != to_type && from_type == to_type;
1326 to_type = new_to_type;
1327 }
1328
1329 break;
1330
1331 case R_X86_64_TLSLD:
1332 if (bfd_link_executable (info))
1333 to_type = R_X86_64_TPOFF32;
1334 break;
1335
1336 default:
1337 return true;
1338 }
1339
1340 /* Return TRUE if there is no transition. */
1341 if (from_type == to_type)
1342 return true;
1343
1344 /* Check if the transition can be performed. */
1345 if (check
1346 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1347 symtab_hdr, sym_hashes,
1348 from_type, rel, relend))
1349 {
1350 reloc_howto_type *from, *to;
1351 const char *name;
1352
1353 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1354 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1355
1356 if (from == NULL || to == NULL)
1357 return false;
1358
1359 if (h)
1360 name = h->root.root.string;
1361 else
1362 {
1363 struct elf_x86_link_hash_table *htab;
1364
1365 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1366 if (htab == NULL)
1367 name = "*unknown*";
1368 else
1369 {
1370 Elf_Internal_Sym *isym;
1371
1372 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1373 abfd, r_symndx);
1374 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1375 }
1376 }
1377
1378 _bfd_error_handler
1379 /* xgettext:c-format */
1380 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1381 " in section `%pA' failed"),
1382 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1383 bfd_set_error (bfd_error_bad_value);
1384 return false;
1385 }
1386
1387 *r_type = to_type;
1388 return true;
1389 }
1390
1391 /* Rename some of the generic section flags to better document how they
1392 are used here. */
1393 #define check_relocs_failed sec_flg0
1394
1395 static bool
1396 elf_x86_64_need_pic (struct bfd_link_info *info,
1397 bfd *input_bfd, asection *sec,
1398 struct elf_link_hash_entry *h,
1399 Elf_Internal_Shdr *symtab_hdr,
1400 Elf_Internal_Sym *isym,
1401 reloc_howto_type *howto)
1402 {
1403 const char *v = "";
1404 const char *und = "";
1405 const char *pic = "";
1406 const char *object;
1407
1408 const char *name;
1409 if (h)
1410 {
1411 name = h->root.root.string;
1412 switch (ELF_ST_VISIBILITY (h->other))
1413 {
1414 case STV_HIDDEN:
1415 v = _("hidden symbol ");
1416 break;
1417 case STV_INTERNAL:
1418 v = _("internal symbol ");
1419 break;
1420 case STV_PROTECTED:
1421 v = _("protected symbol ");
1422 break;
1423 default:
1424 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1425 v = _("protected symbol ");
1426 else
1427 v = _("symbol ");
1428 pic = NULL;
1429 break;
1430 }
1431
1432 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1433 und = _("undefined ");
1434 }
1435 else
1436 {
1437 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1438 pic = NULL;
1439 }
1440
1441 if (bfd_link_dll (info))
1442 {
1443 object = _("a shared object");
1444 if (!pic)
1445 pic = _("; recompile with -fPIC");
1446 }
1447 else
1448 {
1449 if (bfd_link_pie (info))
1450 object = _("a PIE object");
1451 else
1452 object = _("a PDE object");
1453 if (!pic)
1454 pic = _("; recompile with -fPIE");
1455 }
1456
1457 /* xgettext:c-format */
1458 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1459 "not be used when making %s%s"),
1460 input_bfd, howto->name, und, v, name,
1461 object, pic);
1462 bfd_set_error (bfd_error_bad_value);
1463 sec->check_relocs_failed = 1;
1464 return false;
1465 }
1466
1467 /* With the local symbol, foo, we convert
1468 mov foo@GOTPCREL(%rip), %reg
1469 to
1470 lea foo(%rip), %reg
1471 and convert
1472 call/jmp *foo@GOTPCREL(%rip)
1473 to
1474 nop call foo/jmp foo nop
1475 When PIC is false, convert
1476 test %reg, foo@GOTPCREL(%rip)
1477 to
1478 test $foo, %reg
1479 and convert
1480 binop foo@GOTPCREL(%rip), %reg
1481 to
1482 binop $foo, %reg
1483 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1484 instructions. */
1485
1486 static bool
1487 elf_x86_64_convert_load_reloc (bfd *abfd,
1488 bfd_byte *contents,
1489 unsigned int *r_type_p,
1490 Elf_Internal_Rela *irel,
1491 struct elf_link_hash_entry *h,
1492 bool *converted,
1493 struct bfd_link_info *link_info)
1494 {
1495 struct elf_x86_link_hash_table *htab;
1496 bool is_pic;
1497 bool no_overflow;
1498 bool relocx;
1499 bool to_reloc_pc32;
1500 bool abs_symbol;
1501 bool local_ref;
1502 asection *tsec;
1503 bfd_signed_vma raddend;
1504 unsigned int opcode;
1505 unsigned int modrm;
1506 unsigned int r_type = *r_type_p;
1507 unsigned int r_symndx;
1508 bfd_vma roff = irel->r_offset;
1509 bfd_vma abs_relocation;
1510
1511 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1512 return true;
1513
1514 raddend = irel->r_addend;
1515 /* Addend for 32-bit PC-relative relocation must be -4. */
1516 if (raddend != -4)
1517 return true;
1518
1519 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1520 is_pic = bfd_link_pic (link_info);
1521
1522 relocx = (r_type == R_X86_64_GOTPCRELX
1523 || r_type == R_X86_64_REX_GOTPCRELX);
1524
1525 /* TRUE if --no-relax is used. */
1526 no_overflow = link_info->disable_target_specific_optimizations > 1;
1527
1528 r_symndx = htab->r_sym (irel->r_info);
1529
1530 opcode = bfd_get_8 (abfd, contents + roff - 2);
1531
1532 /* Convert mov to lea since it has been done for a while. */
1533 if (opcode != 0x8b)
1534 {
1535 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1536 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1537 test, xor instructions. */
1538 if (!relocx)
1539 return true;
1540 }
1541
1542 /* We convert only to R_X86_64_PC32:
1543 1. Branch.
1544 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1545 3. no_overflow is true.
1546 4. PIC.
1547 */
1548 to_reloc_pc32 = (opcode == 0xff
1549 || !relocx
1550 || no_overflow
1551 || is_pic);
1552
1553 abs_symbol = false;
1554 abs_relocation = 0;
1555
1556 /* Get the symbol referred to by the reloc. */
1557 if (h == NULL)
1558 {
1559 Elf_Internal_Sym *isym
1560 = bfd_sym_from_r_symndx (&htab->elf.sym_cache, abfd, r_symndx);
1561
1562 /* Skip relocation against undefined symbols. */
1563 if (isym->st_shndx == SHN_UNDEF)
1564 return true;
1565
1566 local_ref = true;
1567 if (isym->st_shndx == SHN_ABS)
1568 {
1569 tsec = bfd_abs_section_ptr;
1570 abs_symbol = true;
1571 abs_relocation = isym->st_value;
1572 }
1573 else if (isym->st_shndx == SHN_COMMON)
1574 tsec = bfd_com_section_ptr;
1575 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1576 tsec = &_bfd_elf_large_com_section;
1577 else
1578 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1579 }
1580 else
1581 {
1582 /* Undefined weak symbol is only bound locally in executable
1583 and its reference is resolved as 0 without relocation
1584 overflow. We can only perform this optimization for
1585 GOTPCRELX relocations since we need to modify REX byte.
1586 It is OK convert mov with R_X86_64_GOTPCREL to
1587 R_X86_64_PC32. */
1588 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1589
1590 abs_symbol = ABS_SYMBOL_P (h);
1591 abs_relocation = h->root.u.def.value;
1592
1593 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1594 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1595 if ((relocx || opcode == 0x8b)
1596 && (h->root.type == bfd_link_hash_undefweak
1597 && !eh->linker_def
1598 && local_ref))
1599 {
1600 if (opcode == 0xff)
1601 {
1602 /* Skip for branch instructions since R_X86_64_PC32
1603 may overflow. */
1604 if (no_overflow)
1605 return true;
1606 }
1607 else if (relocx)
1608 {
1609 /* For non-branch instructions, we can convert to
1610 R_X86_64_32/R_X86_64_32S since we know if there
1611 is a REX byte. */
1612 to_reloc_pc32 = false;
1613 }
1614
1615 /* Since we don't know the current PC when PIC is true,
1616 we can't convert to R_X86_64_PC32. */
1617 if (to_reloc_pc32 && is_pic)
1618 return true;
1619
1620 goto convert;
1621 }
1622 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1623 ld.so may use its link-time address. */
1624 else if (h->start_stop
1625 || eh->linker_def
1626 || ((h->def_regular
1627 || h->root.type == bfd_link_hash_defined
1628 || h->root.type == bfd_link_hash_defweak)
1629 && h != htab->elf.hdynamic
1630 && local_ref))
1631 {
1632 /* bfd_link_hash_new or bfd_link_hash_undefined is
1633 set by an assignment in a linker script in
1634 bfd_elf_record_link_assignment. start_stop is set
1635 on __start_SECNAME/__stop_SECNAME which mark section
1636 SECNAME. */
1637 if (h->start_stop
1638 || eh->linker_def
1639 || (h->def_regular
1640 && (h->root.type == bfd_link_hash_new
1641 || h->root.type == bfd_link_hash_undefined
1642 || ((h->root.type == bfd_link_hash_defined
1643 || h->root.type == bfd_link_hash_defweak)
1644 && h->root.u.def.section == bfd_und_section_ptr))))
1645 {
1646 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1647 if (no_overflow)
1648 return true;
1649 goto convert;
1650 }
1651 tsec = h->root.u.def.section;
1652 }
1653 else
1654 return true;
1655 }
1656
1657 /* Don't convert GOTPCREL relocation against large section. */
1658 if (elf_section_data (tsec) != NULL
1659 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1660 return true;
1661
1662 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1663 if (no_overflow)
1664 return true;
1665
1666 convert:
1667 if (opcode == 0xff)
1668 {
1669 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1670 unsigned int nop;
1671 unsigned int disp;
1672 bfd_vma nop_offset;
1673
1674 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1675 R_X86_64_PC32. */
1676 modrm = bfd_get_8 (abfd, contents + roff - 1);
1677 if (modrm == 0x25)
1678 {
1679 /* Convert to "jmp foo nop". */
1680 modrm = 0xe9;
1681 nop = NOP_OPCODE;
1682 nop_offset = irel->r_offset + 3;
1683 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1684 irel->r_offset -= 1;
1685 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1686 }
1687 else
1688 {
1689 struct elf_x86_link_hash_entry *eh
1690 = (struct elf_x86_link_hash_entry *) h;
1691
1692 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1693 is a nop prefix. */
1694 modrm = 0xe8;
1695 /* To support TLS optimization, always use addr32 prefix for
1696 "call *__tls_get_addr@GOTPCREL(%rip)". */
1697 if (eh && eh->tls_get_addr)
1698 {
1699 nop = 0x67;
1700 nop_offset = irel->r_offset - 2;
1701 }
1702 else
1703 {
1704 nop = htab->params->call_nop_byte;
1705 if (htab->params->call_nop_as_suffix)
1706 {
1707 nop_offset = irel->r_offset + 3;
1708 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1709 irel->r_offset -= 1;
1710 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1711 }
1712 else
1713 nop_offset = irel->r_offset - 2;
1714 }
1715 }
1716 bfd_put_8 (abfd, nop, contents + nop_offset);
1717 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1718 r_type = R_X86_64_PC32;
1719 }
1720 else
1721 {
1722 unsigned int rex;
1723 unsigned int rex_mask = REX_R;
1724
1725 if (r_type == R_X86_64_REX_GOTPCRELX)
1726 rex = bfd_get_8 (abfd, contents + roff - 3);
1727 else
1728 rex = 0;
1729
1730 if (opcode == 0x8b)
1731 {
1732 if (abs_symbol && local_ref && relocx)
1733 to_reloc_pc32 = false;
1734
1735 if (to_reloc_pc32)
1736 {
1737 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1738 "lea foo(%rip), %reg". */
1739 opcode = 0x8d;
1740 r_type = R_X86_64_PC32;
1741 }
1742 else
1743 {
1744 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1745 "mov $foo, %reg". */
1746 opcode = 0xc7;
1747 modrm = bfd_get_8 (abfd, contents + roff - 1);
1748 modrm = 0xc0 | (modrm & 0x38) >> 3;
1749 if ((rex & REX_W) != 0
1750 && ABI_64_P (link_info->output_bfd))
1751 {
1752 /* Keep the REX_W bit in REX byte for LP64. */
1753 r_type = R_X86_64_32S;
1754 goto rewrite_modrm_rex;
1755 }
1756 else
1757 {
1758 /* If the REX_W bit in REX byte isn't needed,
1759 use R_X86_64_32 and clear the W bit to avoid
1760 sign-extend imm32 to imm64. */
1761 r_type = R_X86_64_32;
1762 /* Clear the W bit in REX byte. */
1763 rex_mask |= REX_W;
1764 goto rewrite_modrm_rex;
1765 }
1766 }
1767 }
1768 else
1769 {
1770 /* R_X86_64_PC32 isn't supported. */
1771 if (to_reloc_pc32)
1772 return true;
1773
1774 modrm = bfd_get_8 (abfd, contents + roff - 1);
1775 if (opcode == 0x85)
1776 {
1777 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1778 "test $foo, %reg". */
1779 modrm = 0xc0 | (modrm & 0x38) >> 3;
1780 opcode = 0xf7;
1781 }
1782 else
1783 {
1784 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1785 "binop $foo, %reg". */
1786 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1787 opcode = 0x81;
1788 }
1789
1790 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1791 overflow when sign-extending imm32 to imm64. */
1792 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1793
1794 rewrite_modrm_rex:
1795 if (abs_relocation)
1796 {
1797 /* Check if R_X86_64_32S/R_X86_64_32 fits. */
1798 if (r_type == R_X86_64_32S)
1799 {
1800 if ((abs_relocation + 0x80000000) > 0xffffffff)
1801 return true;
1802 }
1803 else
1804 {
1805 if (abs_relocation > 0xffffffff)
1806 return true;
1807 }
1808 }
1809
1810 bfd_put_8 (abfd, modrm, contents + roff - 1);
1811
1812 if (rex)
1813 {
1814 /* Move the R bit to the B bit in REX byte. */
1815 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1816 bfd_put_8 (abfd, rex, contents + roff - 3);
1817 }
1818
1819 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1820 irel->r_addend = 0;
1821 }
1822
1823 bfd_put_8 (abfd, opcode, contents + roff - 2);
1824 }
1825
1826 *r_type_p = r_type;
1827 irel->r_info = htab->r_info (r_symndx,
1828 r_type | R_X86_64_converted_reloc_bit);
1829
1830 *converted = true;
1831
1832 return true;
1833 }
1834
1835 /* Look through the relocs for a section during the first phase, and
1836 calculate needed space in the global offset table, procedure
1837 linkage table, and dynamic reloc sections. */
1838
1839 static bool
1840 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1841 asection *sec,
1842 const Elf_Internal_Rela *relocs)
1843 {
1844 struct elf_x86_link_hash_table *htab;
1845 Elf_Internal_Shdr *symtab_hdr;
1846 struct elf_link_hash_entry **sym_hashes;
1847 const Elf_Internal_Rela *rel;
1848 const Elf_Internal_Rela *rel_end;
1849 asection *sreloc;
1850 bfd_byte *contents;
1851 bool converted;
1852
1853 if (bfd_link_relocatable (info))
1854 return true;
1855
1856 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1857 if (htab == NULL)
1858 {
1859 sec->check_relocs_failed = 1;
1860 return false;
1861 }
1862
1863 BFD_ASSERT (is_x86_elf (abfd, htab));
1864
1865 /* Get the section contents. */
1866 if (elf_section_data (sec)->this_hdr.contents != NULL)
1867 contents = elf_section_data (sec)->this_hdr.contents;
1868 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1869 {
1870 sec->check_relocs_failed = 1;
1871 return false;
1872 }
1873
1874 symtab_hdr = &elf_symtab_hdr (abfd);
1875 sym_hashes = elf_sym_hashes (abfd);
1876
1877 converted = false;
1878
1879 sreloc = NULL;
1880
1881 rel_end = relocs + sec->reloc_count;
1882 for (rel = relocs; rel < rel_end; rel++)
1883 {
1884 unsigned int r_type;
1885 unsigned int r_symndx;
1886 struct elf_link_hash_entry *h;
1887 struct elf_x86_link_hash_entry *eh;
1888 Elf_Internal_Sym *isym;
1889 const char *name;
1890 bool size_reloc;
1891 bool converted_reloc;
1892 bool no_dynreloc;
1893
1894 r_symndx = htab->r_sym (rel->r_info);
1895 r_type = ELF32_R_TYPE (rel->r_info);
1896
1897 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1898 {
1899 /* xgettext:c-format */
1900 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1901 abfd, r_symndx);
1902 goto error_return;
1903 }
1904
1905 if (r_symndx < symtab_hdr->sh_info)
1906 {
1907 /* A local symbol. */
1908 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1909 abfd, r_symndx);
1910 if (isym == NULL)
1911 goto error_return;
1912
1913 /* Check relocation against local STT_GNU_IFUNC symbol. */
1914 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1915 {
1916 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1917 true);
1918 if (h == NULL)
1919 goto error_return;
1920
1921 /* Fake a STT_GNU_IFUNC symbol. */
1922 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1923 isym, NULL);
1924 h->type = STT_GNU_IFUNC;
1925 h->def_regular = 1;
1926 h->ref_regular = 1;
1927 h->forced_local = 1;
1928 h->root.type = bfd_link_hash_defined;
1929 }
1930 else
1931 h = NULL;
1932 }
1933 else
1934 {
1935 isym = NULL;
1936 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1937 while (h->root.type == bfd_link_hash_indirect
1938 || h->root.type == bfd_link_hash_warning)
1939 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1940 }
1941
1942 /* Check invalid x32 relocations. */
1943 if (!ABI_64_P (abfd))
1944 switch (r_type)
1945 {
1946 default:
1947 break;
1948
1949 case R_X86_64_DTPOFF64:
1950 case R_X86_64_TPOFF64:
1951 case R_X86_64_PC64:
1952 case R_X86_64_GOTOFF64:
1953 case R_X86_64_GOT64:
1954 case R_X86_64_GOTPCREL64:
1955 case R_X86_64_GOTPC64:
1956 case R_X86_64_GOTPLT64:
1957 case R_X86_64_PLTOFF64:
1958 {
1959 if (h)
1960 name = h->root.root.string;
1961 else
1962 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1963 NULL);
1964 _bfd_error_handler
1965 /* xgettext:c-format */
1966 (_("%pB: relocation %s against symbol `%s' isn't "
1967 "supported in x32 mode"), abfd,
1968 x86_64_elf_howto_table[r_type].name, name);
1969 bfd_set_error (bfd_error_bad_value);
1970 goto error_return;
1971 }
1972 break;
1973 }
1974
1975 if (h != NULL)
1976 {
1977 /* It is referenced by a non-shared object. */
1978 h->ref_regular = 1;
1979 }
1980
1981 converted_reloc = false;
1982 if ((r_type == R_X86_64_GOTPCREL
1983 || r_type == R_X86_64_GOTPCRELX
1984 || r_type == R_X86_64_REX_GOTPCRELX)
1985 && (h == NULL || h->type != STT_GNU_IFUNC))
1986 {
1987 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1988 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1989 irel, h, &converted_reloc,
1990 info))
1991 goto error_return;
1992
1993 if (converted_reloc)
1994 converted = true;
1995 }
1996
1997 if (!_bfd_elf_x86_valid_reloc_p (sec, info, htab, rel, h, isym,
1998 symtab_hdr, &no_dynreloc))
1999 return false;
2000
2001 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2002 symtab_hdr, sym_hashes,
2003 &r_type, GOT_UNKNOWN,
2004 rel, rel_end, h, r_symndx, false))
2005 goto error_return;
2006
2007 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
2008 if (h == htab->elf.hgot)
2009 htab->got_referenced = true;
2010
2011 eh = (struct elf_x86_link_hash_entry *) h;
2012 switch (r_type)
2013 {
2014 case R_X86_64_TLSLD:
2015 htab->tls_ld_or_ldm_got.refcount = 1;
2016 goto create_got;
2017
2018 case R_X86_64_TPOFF32:
2019 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2020 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2021 &x86_64_elf_howto_table[r_type]);
2022 if (eh != NULL)
2023 eh->zero_undefweak &= 0x2;
2024 break;
2025
2026 case R_X86_64_GOTTPOFF:
2027 if (!bfd_link_executable (info))
2028 info->flags |= DF_STATIC_TLS;
2029 /* Fall through */
2030
2031 case R_X86_64_GOT32:
2032 case R_X86_64_GOTPCREL:
2033 case R_X86_64_GOTPCRELX:
2034 case R_X86_64_REX_GOTPCRELX:
2035 case R_X86_64_TLSGD:
2036 case R_X86_64_GOT64:
2037 case R_X86_64_GOTPCREL64:
2038 case R_X86_64_GOTPLT64:
2039 case R_X86_64_GOTPC32_TLSDESC:
2040 case R_X86_64_TLSDESC_CALL:
2041 /* This symbol requires a global offset table entry. */
2042 {
2043 int tls_type, old_tls_type;
2044
2045 switch (r_type)
2046 {
2047 default:
2048 tls_type = GOT_NORMAL;
2049 if (h)
2050 {
2051 if (ABS_SYMBOL_P (h))
2052 tls_type = GOT_ABS;
2053 }
2054 else if (isym->st_shndx == SHN_ABS)
2055 tls_type = GOT_ABS;
2056 break;
2057 case R_X86_64_TLSGD:
2058 tls_type = GOT_TLS_GD;
2059 break;
2060 case R_X86_64_GOTTPOFF:
2061 tls_type = GOT_TLS_IE;
2062 break;
2063 case R_X86_64_GOTPC32_TLSDESC:
2064 case R_X86_64_TLSDESC_CALL:
2065 tls_type = GOT_TLS_GDESC;
2066 break;
2067 }
2068
2069 if (h != NULL)
2070 {
2071 h->got.refcount = 1;
2072 old_tls_type = eh->tls_type;
2073 }
2074 else
2075 {
2076 bfd_signed_vma *local_got_refcounts;
2077
2078 /* This is a global offset table entry for a local symbol. */
2079 local_got_refcounts = elf_local_got_refcounts (abfd);
2080 if (local_got_refcounts == NULL)
2081 {
2082 bfd_size_type size;
2083
2084 size = symtab_hdr->sh_info;
2085 size *= sizeof (bfd_signed_vma)
2086 + sizeof (bfd_vma) + sizeof (char);
2087 local_got_refcounts = ((bfd_signed_vma *)
2088 bfd_zalloc (abfd, size));
2089 if (local_got_refcounts == NULL)
2090 goto error_return;
2091 elf_local_got_refcounts (abfd) = local_got_refcounts;
2092 elf_x86_local_tlsdesc_gotent (abfd)
2093 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2094 elf_x86_local_got_tls_type (abfd)
2095 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2096 }
2097 local_got_refcounts[r_symndx] = 1;
2098 old_tls_type
2099 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2100 }
2101
2102 /* If a TLS symbol is accessed using IE at least once,
2103 there is no point to use dynamic model for it. */
2104 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2105 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2106 || tls_type != GOT_TLS_IE))
2107 {
2108 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2109 tls_type = old_tls_type;
2110 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2111 && GOT_TLS_GD_ANY_P (tls_type))
2112 tls_type |= old_tls_type;
2113 else
2114 {
2115 if (h)
2116 name = h->root.root.string;
2117 else
2118 name = bfd_elf_sym_name (abfd, symtab_hdr,
2119 isym, NULL);
2120 _bfd_error_handler
2121 /* xgettext:c-format */
2122 (_("%pB: '%s' accessed both as normal and"
2123 " thread local symbol"),
2124 abfd, name);
2125 bfd_set_error (bfd_error_bad_value);
2126 goto error_return;
2127 }
2128 }
2129
2130 if (old_tls_type != tls_type)
2131 {
2132 if (eh != NULL)
2133 eh->tls_type = tls_type;
2134 else
2135 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2136 }
2137 }
2138 /* Fall through */
2139
2140 case R_X86_64_GOTOFF64:
2141 case R_X86_64_GOTPC32:
2142 case R_X86_64_GOTPC64:
2143 create_got:
2144 if (eh != NULL)
2145 eh->zero_undefweak &= 0x2;
2146 break;
2147
2148 case R_X86_64_PLT32:
2149 case R_X86_64_PLT32_BND:
2150 /* This symbol requires a procedure linkage table entry. We
2151 actually build the entry in adjust_dynamic_symbol,
2152 because this might be a case of linking PIC code which is
2153 never referenced by a dynamic object, in which case we
2154 don't need to generate a procedure linkage table entry
2155 after all. */
2156
2157 /* If this is a local symbol, we resolve it directly without
2158 creating a procedure linkage table entry. */
2159 if (h == NULL)
2160 continue;
2161
2162 eh->zero_undefweak &= 0x2;
2163 h->needs_plt = 1;
2164 h->plt.refcount = 1;
2165 break;
2166
2167 case R_X86_64_PLTOFF64:
2168 /* This tries to form the 'address' of a function relative
2169 to GOT. For global symbols we need a PLT entry. */
2170 if (h != NULL)
2171 {
2172 h->needs_plt = 1;
2173 h->plt.refcount = 1;
2174 }
2175 goto create_got;
2176
2177 case R_X86_64_SIZE32:
2178 case R_X86_64_SIZE64:
2179 size_reloc = true;
2180 goto do_size;
2181
2182 case R_X86_64_32:
2183 if (!ABI_64_P (abfd))
2184 goto pointer;
2185 /* Fall through. */
2186 case R_X86_64_8:
2187 case R_X86_64_16:
2188 case R_X86_64_32S:
2189 /* Check relocation overflow as these relocs may lead to
2190 run-time relocation overflow. Don't error out for
2191 sections we don't care about, such as debug sections or
2192 when relocation overflow check is disabled. */
2193 if (!htab->params->no_reloc_overflow_check
2194 && !converted_reloc
2195 && (bfd_link_pic (info)
2196 || (bfd_link_executable (info)
2197 && h != NULL
2198 && !h->def_regular
2199 && h->def_dynamic
2200 && (sec->flags & SEC_READONLY) == 0)))
2201 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2202 &x86_64_elf_howto_table[r_type]);
2203 /* Fall through. */
2204
2205 case R_X86_64_PC8:
2206 case R_X86_64_PC16:
2207 case R_X86_64_PC32:
2208 case R_X86_64_PC32_BND:
2209 case R_X86_64_PC64:
2210 case R_X86_64_64:
2211 pointer:
2212 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2213 eh->zero_undefweak |= 0x2;
2214 /* We are called after all symbols have been resolved. Only
2215 relocation against STT_GNU_IFUNC symbol must go through
2216 PLT. */
2217 if (h != NULL
2218 && (bfd_link_executable (info)
2219 || h->type == STT_GNU_IFUNC))
2220 {
2221 bool func_pointer_ref = false;
2222
2223 if (r_type == R_X86_64_PC32)
2224 {
2225 /* Since something like ".long foo - ." may be used
2226 as pointer, make sure that PLT is used if foo is
2227 a function defined in a shared library. */
2228 if ((sec->flags & SEC_CODE) == 0)
2229 {
2230 h->pointer_equality_needed = 1;
2231 if (bfd_link_pie (info)
2232 && h->type == STT_FUNC
2233 && !h->def_regular
2234 && h->def_dynamic)
2235 {
2236 h->needs_plt = 1;
2237 h->plt.refcount = 1;
2238 }
2239 }
2240 }
2241 else if (r_type != R_X86_64_PC32_BND
2242 && r_type != R_X86_64_PC64)
2243 {
2244 h->pointer_equality_needed = 1;
2245 /* At run-time, R_X86_64_64 can be resolved for both
2246 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2247 can only be resolved for x32. */
2248 if ((sec->flags & SEC_READONLY) == 0
2249 && (r_type == R_X86_64_64
2250 || (!ABI_64_P (abfd)
2251 && (r_type == R_X86_64_32
2252 || r_type == R_X86_64_32S))))
2253 func_pointer_ref = true;
2254 }
2255
2256 if (!func_pointer_ref)
2257 {
2258 /* If this reloc is in a read-only section, we might
2259 need a copy reloc. We can't check reliably at this
2260 stage whether the section is read-only, as input
2261 sections have not yet been mapped to output sections.
2262 Tentatively set the flag for now, and correct in
2263 adjust_dynamic_symbol. */
2264 h->non_got_ref = 1;
2265
2266 /* We may need a .plt entry if the symbol is a function
2267 defined in a shared lib or is a function referenced
2268 from the code or read-only section. */
2269 if (!h->def_regular
2270 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2271 h->plt.refcount = 1;
2272 }
2273 }
2274
2275 size_reloc = false;
2276 do_size:
2277 if (!no_dynreloc
2278 && NEED_DYNAMIC_RELOCATION_P (info, true, h, sec, r_type,
2279 htab->pointer_r_type))
2280 {
2281 struct elf_dyn_relocs *p;
2282 struct elf_dyn_relocs **head;
2283
2284 /* We must copy these reloc types into the output file.
2285 Create a reloc section in dynobj and make room for
2286 this reloc. */
2287 if (sreloc == NULL)
2288 {
2289 sreloc = _bfd_elf_make_dynamic_reloc_section
2290 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2291 abfd, /*rela?*/ true);
2292
2293 if (sreloc == NULL)
2294 goto error_return;
2295 }
2296
2297 /* If this is a global symbol, we count the number of
2298 relocations we need for this symbol. */
2299 if (h != NULL)
2300 head = &h->dyn_relocs;
2301 else
2302 {
2303 /* Track dynamic relocs needed for local syms too.
2304 We really need local syms available to do this
2305 easily. Oh well. */
2306 asection *s;
2307 void **vpp;
2308
2309 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
2310 abfd, r_symndx);
2311 if (isym == NULL)
2312 goto error_return;
2313
2314 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2315 if (s == NULL)
2316 s = sec;
2317
2318 /* Beware of type punned pointers vs strict aliasing
2319 rules. */
2320 vpp = &(elf_section_data (s)->local_dynrel);
2321 head = (struct elf_dyn_relocs **)vpp;
2322 }
2323
2324 p = *head;
2325 if (p == NULL || p->sec != sec)
2326 {
2327 size_t amt = sizeof *p;
2328
2329 p = ((struct elf_dyn_relocs *)
2330 bfd_alloc (htab->elf.dynobj, amt));
2331 if (p == NULL)
2332 goto error_return;
2333 p->next = *head;
2334 *head = p;
2335 p->sec = sec;
2336 p->count = 0;
2337 p->pc_count = 0;
2338 }
2339
2340 p->count += 1;
2341 /* Count size relocation as PC-relative relocation. */
2342 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2343 p->pc_count += 1;
2344 }
2345 break;
2346
2347 /* This relocation describes the C++ object vtable hierarchy.
2348 Reconstruct it for later use during GC. */
2349 case R_X86_64_GNU_VTINHERIT:
2350 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2351 goto error_return;
2352 break;
2353
2354 /* This relocation describes which C++ vtable entries are actually
2355 used. Record for later use during GC. */
2356 case R_X86_64_GNU_VTENTRY:
2357 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2358 goto error_return;
2359 break;
2360
2361 default:
2362 break;
2363 }
2364 }
2365
2366 if (elf_section_data (sec)->this_hdr.contents != contents)
2367 {
2368 if (!converted && !_bfd_link_keep_memory (info))
2369 free (contents);
2370 else
2371 {
2372 /* Cache the section contents for elf_link_input_bfd if any
2373 load is converted or --no-keep-memory isn't used. */
2374 elf_section_data (sec)->this_hdr.contents = contents;
2375 info->cache_size += sec->size;
2376 }
2377 }
2378
2379 /* Cache relocations if any load is converted. */
2380 if (elf_section_data (sec)->relocs != relocs && converted)
2381 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2382
2383 return true;
2384
2385 error_return:
2386 if (elf_section_data (sec)->this_hdr.contents != contents)
2387 free (contents);
2388 sec->check_relocs_failed = 1;
2389 return false;
2390 }
2391
2392 /* Return the relocation value for @tpoff relocation
2393 if STT_TLS virtual address is ADDRESS. */
2394
2395 static bfd_vma
2396 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2397 {
2398 struct elf_link_hash_table *htab = elf_hash_table (info);
2399 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2400 bfd_vma static_tls_size;
2401
2402 /* If tls_segment is NULL, we should have signalled an error already. */
2403 if (htab->tls_sec == NULL)
2404 return 0;
2405
2406 /* Consider special static TLS alignment requirements. */
2407 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2408 return address - static_tls_size - htab->tls_sec->vma;
2409 }
2410
2411 /* Relocate an x86_64 ELF section. */
2412
2413 static int
2414 elf_x86_64_relocate_section (bfd *output_bfd,
2415 struct bfd_link_info *info,
2416 bfd *input_bfd,
2417 asection *input_section,
2418 bfd_byte *contents,
2419 Elf_Internal_Rela *relocs,
2420 Elf_Internal_Sym *local_syms,
2421 asection **local_sections)
2422 {
2423 struct elf_x86_link_hash_table *htab;
2424 Elf_Internal_Shdr *symtab_hdr;
2425 struct elf_link_hash_entry **sym_hashes;
2426 bfd_vma *local_got_offsets;
2427 bfd_vma *local_tlsdesc_gotents;
2428 Elf_Internal_Rela *rel;
2429 Elf_Internal_Rela *wrel;
2430 Elf_Internal_Rela *relend;
2431 unsigned int plt_entry_size;
2432 bool status;
2433
2434 /* Skip if check_relocs failed. */
2435 if (input_section->check_relocs_failed)
2436 return false;
2437
2438 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2439 if (htab == NULL)
2440 return false;
2441
2442 if (!is_x86_elf (input_bfd, htab))
2443 {
2444 bfd_set_error (bfd_error_wrong_format);
2445 return false;
2446 }
2447
2448 plt_entry_size = htab->plt.plt_entry_size;
2449 symtab_hdr = &elf_symtab_hdr (input_bfd);
2450 sym_hashes = elf_sym_hashes (input_bfd);
2451 local_got_offsets = elf_local_got_offsets (input_bfd);
2452 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2453
2454 _bfd_x86_elf_set_tls_module_base (info);
2455
2456 status = true;
2457 rel = wrel = relocs;
2458 relend = relocs + input_section->reloc_count;
2459 for (; rel < relend; wrel++, rel++)
2460 {
2461 unsigned int r_type, r_type_tls;
2462 reloc_howto_type *howto;
2463 unsigned long r_symndx;
2464 struct elf_link_hash_entry *h;
2465 struct elf_x86_link_hash_entry *eh;
2466 Elf_Internal_Sym *sym;
2467 asection *sec;
2468 bfd_vma off, offplt, plt_offset;
2469 bfd_vma relocation;
2470 bool unresolved_reloc;
2471 bfd_reloc_status_type r;
2472 int tls_type;
2473 asection *base_got, *resolved_plt;
2474 bfd_vma st_size;
2475 bool resolved_to_zero;
2476 bool relative_reloc;
2477 bool converted_reloc;
2478 bool need_copy_reloc_in_pie;
2479 bool no_copyreloc_p;
2480
2481 r_type = ELF32_R_TYPE (rel->r_info);
2482 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2483 || r_type == (int) R_X86_64_GNU_VTENTRY)
2484 {
2485 if (wrel != rel)
2486 *wrel = *rel;
2487 continue;
2488 }
2489
2490 r_symndx = htab->r_sym (rel->r_info);
2491 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2492 if (converted_reloc)
2493 {
2494 r_type &= ~R_X86_64_converted_reloc_bit;
2495 rel->r_info = htab->r_info (r_symndx, r_type);
2496 }
2497
2498 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2499 if (howto == NULL)
2500 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2501
2502 h = NULL;
2503 sym = NULL;
2504 sec = NULL;
2505 unresolved_reloc = false;
2506 if (r_symndx < symtab_hdr->sh_info)
2507 {
2508 sym = local_syms + r_symndx;
2509 sec = local_sections[r_symndx];
2510
2511 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2512 &sec, rel);
2513 st_size = sym->st_size;
2514
2515 /* Relocate against local STT_GNU_IFUNC symbol. */
2516 if (!bfd_link_relocatable (info)
2517 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2518 {
2519 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2520 rel, false);
2521 if (h == NULL)
2522 abort ();
2523
2524 /* Set STT_GNU_IFUNC symbol value. */
2525 h->root.u.def.value = sym->st_value;
2526 h->root.u.def.section = sec;
2527 }
2528 }
2529 else
2530 {
2531 bool warned ATTRIBUTE_UNUSED;
2532 bool ignored ATTRIBUTE_UNUSED;
2533
2534 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2535 r_symndx, symtab_hdr, sym_hashes,
2536 h, sec, relocation,
2537 unresolved_reloc, warned, ignored);
2538 st_size = h->size;
2539 }
2540
2541 if (sec != NULL && discarded_section (sec))
2542 {
2543 _bfd_clear_contents (howto, input_bfd, input_section,
2544 contents, rel->r_offset);
2545 wrel->r_offset = rel->r_offset;
2546 wrel->r_info = 0;
2547 wrel->r_addend = 0;
2548
2549 /* For ld -r, remove relocations in debug sections against
2550 sections defined in discarded sections. Not done for
2551 eh_frame editing code expects to be present. */
2552 if (bfd_link_relocatable (info)
2553 && (input_section->flags & SEC_DEBUGGING))
2554 wrel--;
2555
2556 continue;
2557 }
2558
2559 if (bfd_link_relocatable (info))
2560 {
2561 if (wrel != rel)
2562 *wrel = *rel;
2563 continue;
2564 }
2565
2566 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2567 {
2568 if (r_type == R_X86_64_64)
2569 {
2570 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2571 zero-extend it to 64bit if addend is zero. */
2572 r_type = R_X86_64_32;
2573 memset (contents + rel->r_offset + 4, 0, 4);
2574 }
2575 else if (r_type == R_X86_64_SIZE64)
2576 {
2577 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2578 zero-extend it to 64bit if addend is zero. */
2579 r_type = R_X86_64_SIZE32;
2580 memset (contents + rel->r_offset + 4, 0, 4);
2581 }
2582 }
2583
2584 eh = (struct elf_x86_link_hash_entry *) h;
2585
2586 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2587 it here if it is defined in a non-shared object. */
2588 if (h != NULL
2589 && h->type == STT_GNU_IFUNC
2590 && h->def_regular)
2591 {
2592 bfd_vma plt_index;
2593 const char *name;
2594
2595 if ((input_section->flags & SEC_ALLOC) == 0)
2596 {
2597 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2598 STT_GNU_IFUNC symbol as STT_FUNC. */
2599 if (elf_section_type (input_section) == SHT_NOTE)
2600 goto skip_ifunc;
2601 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2602 sections because such sections are not SEC_ALLOC and
2603 thus ld.so will not process them. */
2604 if ((input_section->flags & SEC_DEBUGGING) != 0)
2605 continue;
2606 abort ();
2607 }
2608
2609 switch (r_type)
2610 {
2611 default:
2612 break;
2613
2614 case R_X86_64_GOTPCREL:
2615 case R_X86_64_GOTPCRELX:
2616 case R_X86_64_REX_GOTPCRELX:
2617 case R_X86_64_GOTPCREL64:
2618 base_got = htab->elf.sgot;
2619 off = h->got.offset;
2620
2621 if (base_got == NULL)
2622 abort ();
2623
2624 if (off == (bfd_vma) -1)
2625 {
2626 /* We can't use h->got.offset here to save state, or
2627 even just remember the offset, as finish_dynamic_symbol
2628 would use that as offset into .got. */
2629
2630 if (h->plt.offset == (bfd_vma) -1)
2631 abort ();
2632
2633 if (htab->elf.splt != NULL)
2634 {
2635 plt_index = (h->plt.offset / plt_entry_size
2636 - htab->plt.has_plt0);
2637 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2638 base_got = htab->elf.sgotplt;
2639 }
2640 else
2641 {
2642 plt_index = h->plt.offset / plt_entry_size;
2643 off = plt_index * GOT_ENTRY_SIZE;
2644 base_got = htab->elf.igotplt;
2645 }
2646
2647 if (h->dynindx == -1
2648 || h->forced_local
2649 || info->symbolic)
2650 {
2651 /* This references the local defitionion. We must
2652 initialize this entry in the global offset table.
2653 Since the offset must always be a multiple of 8,
2654 we use the least significant bit to record
2655 whether we have initialized it already.
2656
2657 When doing a dynamic link, we create a .rela.got
2658 relocation entry to initialize the value. This
2659 is done in the finish_dynamic_symbol routine. */
2660 if ((off & 1) != 0)
2661 off &= ~1;
2662 else
2663 {
2664 bfd_put_64 (output_bfd, relocation,
2665 base_got->contents + off);
2666 /* Note that this is harmless for the GOTPLT64
2667 case, as -1 | 1 still is -1. */
2668 h->got.offset |= 1;
2669 }
2670 }
2671 }
2672
2673 relocation = (base_got->output_section->vma
2674 + base_got->output_offset + off);
2675
2676 goto do_relocation;
2677 }
2678
2679 if (h->plt.offset == (bfd_vma) -1)
2680 {
2681 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2682 if (r_type == htab->pointer_r_type
2683 && (input_section->flags & SEC_CODE) == 0)
2684 goto do_ifunc_pointer;
2685 goto bad_ifunc_reloc;
2686 }
2687
2688 /* STT_GNU_IFUNC symbol must go through PLT. */
2689 if (htab->elf.splt != NULL)
2690 {
2691 if (htab->plt_second != NULL)
2692 {
2693 resolved_plt = htab->plt_second;
2694 plt_offset = eh->plt_second.offset;
2695 }
2696 else
2697 {
2698 resolved_plt = htab->elf.splt;
2699 plt_offset = h->plt.offset;
2700 }
2701 }
2702 else
2703 {
2704 resolved_plt = htab->elf.iplt;
2705 plt_offset = h->plt.offset;
2706 }
2707
2708 relocation = (resolved_plt->output_section->vma
2709 + resolved_plt->output_offset + plt_offset);
2710
2711 switch (r_type)
2712 {
2713 default:
2714 bad_ifunc_reloc:
2715 if (h->root.root.string)
2716 name = h->root.root.string;
2717 else
2718 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2719 NULL);
2720 _bfd_error_handler
2721 /* xgettext:c-format */
2722 (_("%pB: relocation %s against STT_GNU_IFUNC "
2723 "symbol `%s' isn't supported"), input_bfd,
2724 howto->name, name);
2725 bfd_set_error (bfd_error_bad_value);
2726 return false;
2727
2728 case R_X86_64_32S:
2729 if (bfd_link_pic (info))
2730 abort ();
2731 goto do_relocation;
2732
2733 case R_X86_64_32:
2734 if (ABI_64_P (output_bfd))
2735 goto do_relocation;
2736 /* FALLTHROUGH */
2737 case R_X86_64_64:
2738 do_ifunc_pointer:
2739 if (rel->r_addend != 0)
2740 {
2741 if (h->root.root.string)
2742 name = h->root.root.string;
2743 else
2744 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2745 sym, NULL);
2746 _bfd_error_handler
2747 /* xgettext:c-format */
2748 (_("%pB: relocation %s against STT_GNU_IFUNC "
2749 "symbol `%s' has non-zero addend: %" PRId64),
2750 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2751 bfd_set_error (bfd_error_bad_value);
2752 return false;
2753 }
2754
2755 /* Generate dynamic relcoation only when there is a
2756 non-GOT reference in a shared object or there is no
2757 PLT. */
2758 if ((bfd_link_pic (info) && h->non_got_ref)
2759 || h->plt.offset == (bfd_vma) -1)
2760 {
2761 Elf_Internal_Rela outrel;
2762 asection *sreloc;
2763
2764 /* Need a dynamic relocation to get the real function
2765 address. */
2766 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2767 info,
2768 input_section,
2769 rel->r_offset);
2770 if (outrel.r_offset == (bfd_vma) -1
2771 || outrel.r_offset == (bfd_vma) -2)
2772 abort ();
2773
2774 outrel.r_offset += (input_section->output_section->vma
2775 + input_section->output_offset);
2776
2777 if (POINTER_LOCAL_IFUNC_P (info, h))
2778 {
2779 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2780 h->root.root.string,
2781 h->root.u.def.section->owner);
2782
2783 /* This symbol is resolved locally. */
2784 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2785 outrel.r_addend = (h->root.u.def.value
2786 + h->root.u.def.section->output_section->vma
2787 + h->root.u.def.section->output_offset);
2788
2789 if (htab->params->report_relative_reloc)
2790 _bfd_x86_elf_link_report_relative_reloc
2791 (info, input_section, h, sym,
2792 "R_X86_64_IRELATIVE", &outrel);
2793 }
2794 else
2795 {
2796 outrel.r_info = htab->r_info (h->dynindx, r_type);
2797 outrel.r_addend = 0;
2798 }
2799
2800 /* Dynamic relocations are stored in
2801 1. .rela.ifunc section in PIC object.
2802 2. .rela.got section in dynamic executable.
2803 3. .rela.iplt section in static executable. */
2804 if (bfd_link_pic (info))
2805 sreloc = htab->elf.irelifunc;
2806 else if (htab->elf.splt != NULL)
2807 sreloc = htab->elf.srelgot;
2808 else
2809 sreloc = htab->elf.irelplt;
2810 elf_append_rela (output_bfd, sreloc, &outrel);
2811
2812 /* If this reloc is against an external symbol, we
2813 do not want to fiddle with the addend. Otherwise,
2814 we need to include the symbol value so that it
2815 becomes an addend for the dynamic reloc. For an
2816 internal symbol, we have updated addend. */
2817 continue;
2818 }
2819 /* FALLTHROUGH */
2820 case R_X86_64_PC32:
2821 case R_X86_64_PC32_BND:
2822 case R_X86_64_PC64:
2823 case R_X86_64_PLT32:
2824 case R_X86_64_PLT32_BND:
2825 goto do_relocation;
2826 }
2827 }
2828
2829 skip_ifunc:
2830 resolved_to_zero = (eh != NULL
2831 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2832
2833 /* When generating a shared object, the relocations handled here are
2834 copied into the output file to be resolved at run time. */
2835 switch (r_type)
2836 {
2837 case R_X86_64_GOT32:
2838 case R_X86_64_GOT64:
2839 /* Relocation is to the entry for this symbol in the global
2840 offset table. */
2841 case R_X86_64_GOTPCREL:
2842 case R_X86_64_GOTPCRELX:
2843 case R_X86_64_REX_GOTPCRELX:
2844 case R_X86_64_GOTPCREL64:
2845 /* Use global offset table entry as symbol value. */
2846 case R_X86_64_GOTPLT64:
2847 /* This is obsolete and treated the same as GOT64. */
2848 base_got = htab->elf.sgot;
2849
2850 if (htab->elf.sgot == NULL)
2851 abort ();
2852
2853 relative_reloc = false;
2854 if (h != NULL)
2855 {
2856 off = h->got.offset;
2857 if (h->needs_plt
2858 && h->plt.offset != (bfd_vma)-1
2859 && off == (bfd_vma)-1)
2860 {
2861 /* We can't use h->got.offset here to save
2862 state, or even just remember the offset, as
2863 finish_dynamic_symbol would use that as offset into
2864 .got. */
2865 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2866 - htab->plt.has_plt0);
2867 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2868 base_got = htab->elf.sgotplt;
2869 }
2870
2871 if (RESOLVED_LOCALLY_P (info, h, htab))
2872 {
2873 /* We must initialize this entry in the global offset
2874 table. Since the offset must always be a multiple
2875 of 8, we use the least significant bit to record
2876 whether we have initialized it already.
2877
2878 When doing a dynamic link, we create a .rela.got
2879 relocation entry to initialize the value. This is
2880 done in the finish_dynamic_symbol routine. */
2881 if ((off & 1) != 0)
2882 off &= ~1;
2883 else
2884 {
2885 bfd_put_64 (output_bfd, relocation,
2886 base_got->contents + off);
2887 /* Note that this is harmless for the GOTPLT64 case,
2888 as -1 | 1 still is -1. */
2889 h->got.offset |= 1;
2890
2891 if (GENERATE_RELATIVE_RELOC_P (info, h))
2892 {
2893 /* If this symbol isn't dynamic in PIC,
2894 generate R_X86_64_RELATIVE here. */
2895 eh->no_finish_dynamic_symbol = 1;
2896 relative_reloc = true;
2897 }
2898 }
2899 }
2900 else
2901 unresolved_reloc = false;
2902 }
2903 else
2904 {
2905 if (local_got_offsets == NULL)
2906 abort ();
2907
2908 off = local_got_offsets[r_symndx];
2909
2910 /* The offset must always be a multiple of 8. We use
2911 the least significant bit to record whether we have
2912 already generated the necessary reloc. */
2913 if ((off & 1) != 0)
2914 off &= ~1;
2915 else
2916 {
2917 bfd_put_64 (output_bfd, relocation,
2918 base_got->contents + off);
2919 local_got_offsets[r_symndx] |= 1;
2920
2921 /* NB: GOTPCREL relocations against local absolute
2922 symbol store relocation value in the GOT slot
2923 without relative relocation. */
2924 if (bfd_link_pic (info)
2925 && !(sym->st_shndx == SHN_ABS
2926 && (r_type == R_X86_64_GOTPCREL
2927 || r_type == R_X86_64_GOTPCRELX
2928 || r_type == R_X86_64_REX_GOTPCRELX)))
2929 relative_reloc = true;
2930 }
2931 }
2932
2933 if (relative_reloc)
2934 {
2935 asection *s;
2936 Elf_Internal_Rela outrel;
2937
2938 /* We need to generate a R_X86_64_RELATIVE reloc
2939 for the dynamic linker. */
2940 s = htab->elf.srelgot;
2941 if (s == NULL)
2942 abort ();
2943
2944 outrel.r_offset = (base_got->output_section->vma
2945 + base_got->output_offset
2946 + off);
2947 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2948 outrel.r_addend = relocation;
2949
2950 if (htab->params->report_relative_reloc)
2951 _bfd_x86_elf_link_report_relative_reloc
2952 (info, input_section, h, sym, "R_X86_64_RELATIVE",
2953 &outrel);
2954
2955 elf_append_rela (output_bfd, s, &outrel);
2956 }
2957
2958 if (off >= (bfd_vma) -2)
2959 abort ();
2960
2961 relocation = base_got->output_section->vma
2962 + base_got->output_offset + off;
2963 if (r_type != R_X86_64_GOTPCREL
2964 && r_type != R_X86_64_GOTPCRELX
2965 && r_type != R_X86_64_REX_GOTPCRELX
2966 && r_type != R_X86_64_GOTPCREL64)
2967 relocation -= htab->elf.sgotplt->output_section->vma
2968 - htab->elf.sgotplt->output_offset;
2969
2970 break;
2971
2972 case R_X86_64_GOTOFF64:
2973 /* Relocation is relative to the start of the global offset
2974 table. */
2975
2976 /* Check to make sure it isn't a protected function or data
2977 symbol for shared library since it may not be local when
2978 used as function address or with copy relocation. We also
2979 need to make sure that a symbol is referenced locally. */
2980 if (bfd_link_pic (info) && h)
2981 {
2982 if (!h->def_regular)
2983 {
2984 const char *v;
2985
2986 switch (ELF_ST_VISIBILITY (h->other))
2987 {
2988 case STV_HIDDEN:
2989 v = _("hidden symbol");
2990 break;
2991 case STV_INTERNAL:
2992 v = _("internal symbol");
2993 break;
2994 case STV_PROTECTED:
2995 v = _("protected symbol");
2996 break;
2997 default:
2998 v = _("symbol");
2999 break;
3000 }
3001
3002 _bfd_error_handler
3003 /* xgettext:c-format */
3004 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3005 " `%s' can not be used when making a shared object"),
3006 input_bfd, v, h->root.root.string);
3007 bfd_set_error (bfd_error_bad_value);
3008 return false;
3009 }
3010 else if (!bfd_link_executable (info)
3011 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3012 && (h->type == STT_FUNC
3013 || h->type == STT_OBJECT)
3014 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3015 {
3016 _bfd_error_handler
3017 /* xgettext:c-format */
3018 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3019 " `%s' can not be used when making a shared object"),
3020 input_bfd,
3021 h->type == STT_FUNC ? "function" : "data",
3022 h->root.root.string);
3023 bfd_set_error (bfd_error_bad_value);
3024 return false;
3025 }
3026 }
3027
3028 /* Note that sgot is not involved in this
3029 calculation. We always want the start of .got.plt. If we
3030 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3031 permitted by the ABI, we might have to change this
3032 calculation. */
3033 relocation -= htab->elf.sgotplt->output_section->vma
3034 + htab->elf.sgotplt->output_offset;
3035 break;
3036
3037 case R_X86_64_GOTPC32:
3038 case R_X86_64_GOTPC64:
3039 /* Use global offset table as symbol value. */
3040 relocation = htab->elf.sgotplt->output_section->vma
3041 + htab->elf.sgotplt->output_offset;
3042 unresolved_reloc = false;
3043 break;
3044
3045 case R_X86_64_PLTOFF64:
3046 /* Relocation is PLT entry relative to GOT. For local
3047 symbols it's the symbol itself relative to GOT. */
3048 if (h != NULL
3049 /* See PLT32 handling. */
3050 && (h->plt.offset != (bfd_vma) -1
3051 || eh->plt_got.offset != (bfd_vma) -1)
3052 && htab->elf.splt != NULL)
3053 {
3054 if (eh->plt_got.offset != (bfd_vma) -1)
3055 {
3056 /* Use the GOT PLT. */
3057 resolved_plt = htab->plt_got;
3058 plt_offset = eh->plt_got.offset;
3059 }
3060 else if (htab->plt_second != NULL)
3061 {
3062 resolved_plt = htab->plt_second;
3063 plt_offset = eh->plt_second.offset;
3064 }
3065 else
3066 {
3067 resolved_plt = htab->elf.splt;
3068 plt_offset = h->plt.offset;
3069 }
3070
3071 relocation = (resolved_plt->output_section->vma
3072 + resolved_plt->output_offset
3073 + plt_offset);
3074 unresolved_reloc = false;
3075 }
3076
3077 relocation -= htab->elf.sgotplt->output_section->vma
3078 + htab->elf.sgotplt->output_offset;
3079 break;
3080
3081 case R_X86_64_PLT32:
3082 case R_X86_64_PLT32_BND:
3083 /* Relocation is to the entry for this symbol in the
3084 procedure linkage table. */
3085
3086 /* Resolve a PLT32 reloc against a local symbol directly,
3087 without using the procedure linkage table. */
3088 if (h == NULL)
3089 break;
3090
3091 if ((h->plt.offset == (bfd_vma) -1
3092 && eh->plt_got.offset == (bfd_vma) -1)
3093 || htab->elf.splt == NULL)
3094 {
3095 /* We didn't make a PLT entry for this symbol. This
3096 happens when statically linking PIC code, or when
3097 using -Bsymbolic. */
3098 break;
3099 }
3100
3101 use_plt:
3102 if (h->plt.offset != (bfd_vma) -1)
3103 {
3104 if (htab->plt_second != NULL)
3105 {
3106 resolved_plt = htab->plt_second;
3107 plt_offset = eh->plt_second.offset;
3108 }
3109 else
3110 {
3111 resolved_plt = htab->elf.splt;
3112 plt_offset = h->plt.offset;
3113 }
3114 }
3115 else
3116 {
3117 /* Use the GOT PLT. */
3118 resolved_plt = htab->plt_got;
3119 plt_offset = eh->plt_got.offset;
3120 }
3121
3122 relocation = (resolved_plt->output_section->vma
3123 + resolved_plt->output_offset
3124 + plt_offset);
3125 unresolved_reloc = false;
3126 break;
3127
3128 case R_X86_64_SIZE32:
3129 case R_X86_64_SIZE64:
3130 /* Set to symbol size. */
3131 relocation = st_size;
3132 goto direct;
3133
3134 case R_X86_64_PC8:
3135 case R_X86_64_PC16:
3136 case R_X86_64_PC32:
3137 case R_X86_64_PC32_BND:
3138 /* Don't complain about -fPIC if the symbol is undefined when
3139 building executable unless it is unresolved weak symbol,
3140 references a dynamic definition in PIE or -z nocopyreloc
3141 is used. */
3142 no_copyreloc_p
3143 = (info->nocopyreloc
3144 || (h != NULL
3145 && !h->root.linker_def
3146 && !h->root.ldscript_def
3147 && eh->def_protected
3148 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
3149
3150 if ((input_section->flags & SEC_ALLOC) != 0
3151 && (input_section->flags & SEC_READONLY) != 0
3152 && h != NULL
3153 && ((bfd_link_executable (info)
3154 && ((h->root.type == bfd_link_hash_undefweak
3155 && (eh == NULL
3156 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3157 eh)))
3158 || (bfd_link_pie (info)
3159 && !SYMBOL_DEFINED_NON_SHARED_P (h)
3160 && h->def_dynamic)
3161 || (no_copyreloc_p
3162 && h->def_dynamic
3163 && !(h->root.u.def.section->flags & SEC_CODE))))
3164 || bfd_link_dll (info)))
3165 {
3166 bool fail = false;
3167 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3168 {
3169 /* Symbol is referenced locally. Make sure it is
3170 defined locally. */
3171 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
3172 }
3173 else if (bfd_link_pie (info))
3174 {
3175 /* We can only use PC-relative relocations in PIE
3176 from non-code sections. */
3177 if (h->type == STT_FUNC
3178 && (sec->flags & SEC_CODE) != 0)
3179 fail = true;
3180 }
3181 else if (no_copyreloc_p || bfd_link_dll (info))
3182 {
3183 /* Symbol doesn't need copy reloc and isn't
3184 referenced locally. Don't allow PC-relative
3185 relocations against default and protected
3186 symbols since address of protected function
3187 and location of protected data may not be in
3188 the shared object. */
3189 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3190 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
3191 }
3192
3193 if (fail)
3194 return elf_x86_64_need_pic (info, input_bfd, input_section,
3195 h, NULL, NULL, howto);
3196 }
3197 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3198 as function address. */
3199 else if (h != NULL
3200 && (input_section->flags & SEC_CODE) == 0
3201 && bfd_link_pie (info)
3202 && h->type == STT_FUNC
3203 && !h->def_regular
3204 && h->def_dynamic)
3205 goto use_plt;
3206 /* Fall through. */
3207
3208 case R_X86_64_8:
3209 case R_X86_64_16:
3210 case R_X86_64_32:
3211 case R_X86_64_PC64:
3212 case R_X86_64_64:
3213 /* FIXME: The ABI says the linker should make sure the value is
3214 the same when it's zeroextended to 64 bit. */
3215
3216 direct:
3217 if ((input_section->flags & SEC_ALLOC) == 0)
3218 break;
3219
3220 need_copy_reloc_in_pie = (bfd_link_pie (info)
3221 && h != NULL
3222 && (h->needs_copy
3223 || eh->needs_copy
3224 || (h->root.type
3225 == bfd_link_hash_undefined))
3226 && (X86_PCREL_TYPE_P (r_type)
3227 || X86_SIZE_TYPE_P (r_type)));
3228
3229 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type, sec,
3230 need_copy_reloc_in_pie,
3231 resolved_to_zero, false))
3232 {
3233 Elf_Internal_Rela outrel;
3234 bool skip, relocate;
3235 asection *sreloc;
3236 const char *relative_reloc_name = NULL;
3237
3238 /* When generating a shared object, these relocations
3239 are copied into the output file to be resolved at run
3240 time. */
3241 skip = false;
3242 relocate = false;
3243
3244 outrel.r_offset =
3245 _bfd_elf_section_offset (output_bfd, info, input_section,
3246 rel->r_offset);
3247 if (outrel.r_offset == (bfd_vma) -1)
3248 skip = true;
3249 else if (outrel.r_offset == (bfd_vma) -2)
3250 skip = true, relocate = true;
3251
3252 outrel.r_offset += (input_section->output_section->vma
3253 + input_section->output_offset);
3254
3255 if (skip)
3256 memset (&outrel, 0, sizeof outrel);
3257
3258 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3259 {
3260 outrel.r_info = htab->r_info (h->dynindx, r_type);
3261 outrel.r_addend = rel->r_addend;
3262 }
3263 else
3264 {
3265 /* This symbol is local, or marked to become local.
3266 When relocation overflow check is disabled, we
3267 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3268 if (r_type == htab->pointer_r_type
3269 || (r_type == R_X86_64_32
3270 && htab->params->no_reloc_overflow_check))
3271 {
3272 relocate = true;
3273 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3274 outrel.r_addend = relocation + rel->r_addend;
3275 relative_reloc_name = "R_X86_64_RELATIVE";
3276 }
3277 else if (r_type == R_X86_64_64
3278 && !ABI_64_P (output_bfd))
3279 {
3280 relocate = true;
3281 outrel.r_info = htab->r_info (0,
3282 R_X86_64_RELATIVE64);
3283 outrel.r_addend = relocation + rel->r_addend;
3284 relative_reloc_name = "R_X86_64_RELATIVE64";
3285 /* Check addend overflow. */
3286 if ((outrel.r_addend & 0x80000000)
3287 != (rel->r_addend & 0x80000000))
3288 {
3289 const char *name;
3290 int addend = rel->r_addend;
3291 if (h && h->root.root.string)
3292 name = h->root.root.string;
3293 else
3294 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3295 sym, NULL);
3296 _bfd_error_handler
3297 /* xgettext:c-format */
3298 (_("%pB: addend %s%#x in relocation %s against "
3299 "symbol `%s' at %#" PRIx64
3300 " in section `%pA' is out of range"),
3301 input_bfd, addend < 0 ? "-" : "", addend,
3302 howto->name, name, (uint64_t) rel->r_offset,
3303 input_section);
3304 bfd_set_error (bfd_error_bad_value);
3305 return false;
3306 }
3307 }
3308 else
3309 {
3310 long sindx;
3311
3312 if (bfd_is_abs_section (sec))
3313 sindx = 0;
3314 else if (sec == NULL || sec->owner == NULL)
3315 {
3316 bfd_set_error (bfd_error_bad_value);
3317 return false;
3318 }
3319 else
3320 {
3321 asection *osec;
3322
3323 /* We are turning this relocation into one
3324 against a section symbol. It would be
3325 proper to subtract the symbol's value,
3326 osec->vma, from the emitted reloc addend,
3327 but ld.so expects buggy relocs. */
3328 osec = sec->output_section;
3329 sindx = elf_section_data (osec)->dynindx;
3330 if (sindx == 0)
3331 {
3332 asection *oi = htab->elf.text_index_section;
3333 sindx = elf_section_data (oi)->dynindx;
3334 }
3335 BFD_ASSERT (sindx != 0);
3336 }
3337
3338 outrel.r_info = htab->r_info (sindx, r_type);
3339 outrel.r_addend = relocation + rel->r_addend;
3340 }
3341 }
3342
3343 sreloc = elf_section_data (input_section)->sreloc;
3344
3345 if (sreloc == NULL || sreloc->contents == NULL)
3346 {
3347 r = bfd_reloc_notsupported;
3348 goto check_relocation_error;
3349 }
3350
3351 if (relative_reloc_name
3352 && htab->params->report_relative_reloc)
3353 _bfd_x86_elf_link_report_relative_reloc
3354 (info, input_section, h, sym, relative_reloc_name,
3355 &outrel);
3356
3357 elf_append_rela (output_bfd, sreloc, &outrel);
3358
3359 /* If this reloc is against an external symbol, we do
3360 not want to fiddle with the addend. Otherwise, we
3361 need to include the symbol value so that it becomes
3362 an addend for the dynamic reloc. */
3363 if (! relocate)
3364 continue;
3365 }
3366
3367 break;
3368
3369 case R_X86_64_TLSGD:
3370 case R_X86_64_GOTPC32_TLSDESC:
3371 case R_X86_64_TLSDESC_CALL:
3372 case R_X86_64_GOTTPOFF:
3373 tls_type = GOT_UNKNOWN;
3374 if (h == NULL && local_got_offsets)
3375 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3376 else if (h != NULL)
3377 tls_type = elf_x86_hash_entry (h)->tls_type;
3378
3379 r_type_tls = r_type;
3380 if (! elf_x86_64_tls_transition (info, input_bfd,
3381 input_section, contents,
3382 symtab_hdr, sym_hashes,
3383 &r_type_tls, tls_type, rel,
3384 relend, h, r_symndx, true))
3385 return false;
3386
3387 if (r_type_tls == R_X86_64_TPOFF32)
3388 {
3389 bfd_vma roff = rel->r_offset;
3390
3391 BFD_ASSERT (! unresolved_reloc);
3392
3393 if (r_type == R_X86_64_TLSGD)
3394 {
3395 /* GD->LE transition. For 64bit, change
3396 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3397 .word 0x6666; rex64; call __tls_get_addr@PLT
3398 or
3399 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3400 .byte 0x66; rex64
3401 call *__tls_get_addr@GOTPCREL(%rip)
3402 which may be converted to
3403 addr32 call __tls_get_addr
3404 into:
3405 movq %fs:0, %rax
3406 leaq foo@tpoff(%rax), %rax
3407 For 32bit, change
3408 leaq foo@tlsgd(%rip), %rdi
3409 .word 0x6666; rex64; call __tls_get_addr@PLT
3410 or
3411 leaq foo@tlsgd(%rip), %rdi
3412 .byte 0x66; rex64
3413 call *__tls_get_addr@GOTPCREL(%rip)
3414 which may be converted to
3415 addr32 call __tls_get_addr
3416 into:
3417 movl %fs:0, %eax
3418 leaq foo@tpoff(%rax), %rax
3419 For largepic, change:
3420 leaq foo@tlsgd(%rip), %rdi
3421 movabsq $__tls_get_addr@pltoff, %rax
3422 addq %r15, %rax
3423 call *%rax
3424 into:
3425 movq %fs:0, %rax
3426 leaq foo@tpoff(%rax), %rax
3427 nopw 0x0(%rax,%rax,1) */
3428 int largepic = 0;
3429 if (ABI_64_P (output_bfd))
3430 {
3431 if (contents[roff + 5] == 0xb8)
3432 {
3433 if (roff < 3
3434 || (roff - 3 + 22) > input_section->size)
3435 {
3436 corrupt_input:
3437 info->callbacks->einfo
3438 (_("%F%P: corrupt input: %pB\n"),
3439 input_bfd);
3440 return false;
3441 }
3442 memcpy (contents + roff - 3,
3443 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3444 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3445 largepic = 1;
3446 }
3447 else
3448 {
3449 if (roff < 4
3450 || (roff - 4 + 16) > input_section->size)
3451 goto corrupt_input;
3452 memcpy (contents + roff - 4,
3453 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3454 16);
3455 }
3456 }
3457 else
3458 {
3459 if (roff < 3
3460 || (roff - 3 + 15) > input_section->size)
3461 goto corrupt_input;
3462 memcpy (contents + roff - 3,
3463 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3464 15);
3465 }
3466 bfd_put_32 (output_bfd,
3467 elf_x86_64_tpoff (info, relocation),
3468 contents + roff + 8 + largepic);
3469 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3470 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3471 rel++;
3472 wrel++;
3473 continue;
3474 }
3475 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3476 {
3477 /* GDesc -> LE transition.
3478 It's originally something like:
3479 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3480 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3481
3482 Change it to:
3483 movq $x@tpoff, %rax <--- LP64 mode.
3484 rex movl $x@tpoff, %eax <--- X32 mode.
3485 */
3486
3487 unsigned int val, type;
3488
3489 if (roff < 3)
3490 goto corrupt_input;
3491 type = bfd_get_8 (input_bfd, contents + roff - 3);
3492 val = bfd_get_8 (input_bfd, contents + roff - 1);
3493 bfd_put_8 (output_bfd,
3494 (type & 0x48) | ((type >> 2) & 1),
3495 contents + roff - 3);
3496 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3497 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3498 contents + roff - 1);
3499 bfd_put_32 (output_bfd,
3500 elf_x86_64_tpoff (info, relocation),
3501 contents + roff);
3502 continue;
3503 }
3504 else if (r_type == R_X86_64_TLSDESC_CALL)
3505 {
3506 /* GDesc -> LE transition.
3507 It's originally:
3508 call *(%rax) <--- LP64 mode.
3509 call *(%eax) <--- X32 mode.
3510 Turn it into:
3511 xchg %ax,%ax <-- LP64 mode.
3512 nopl (%rax) <-- X32 mode.
3513 */
3514 unsigned int prefix = 0;
3515 if (!ABI_64_P (input_bfd))
3516 {
3517 /* Check for call *x@tlsdesc(%eax). */
3518 if (contents[roff] == 0x67)
3519 prefix = 1;
3520 }
3521 if (prefix)
3522 {
3523 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3524 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3525 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3526 }
3527 else
3528 {
3529 bfd_put_8 (output_bfd, 0x66, contents + roff);
3530 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3531 }
3532 continue;
3533 }
3534 else if (r_type == R_X86_64_GOTTPOFF)
3535 {
3536 /* IE->LE transition:
3537 For 64bit, originally it can be one of:
3538 movq foo@gottpoff(%rip), %reg
3539 addq foo@gottpoff(%rip), %reg
3540 We change it into:
3541 movq $foo, %reg
3542 leaq foo(%reg), %reg
3543 addq $foo, %reg.
3544 For 32bit, originally it can be one of:
3545 movq foo@gottpoff(%rip), %reg
3546 addl foo@gottpoff(%rip), %reg
3547 We change it into:
3548 movq $foo, %reg
3549 leal foo(%reg), %reg
3550 addl $foo, %reg. */
3551
3552 unsigned int val, type, reg;
3553
3554 if (roff >= 3)
3555 val = bfd_get_8 (input_bfd, contents + roff - 3);
3556 else
3557 {
3558 if (roff < 2)
3559 goto corrupt_input;
3560 val = 0;
3561 }
3562 type = bfd_get_8 (input_bfd, contents + roff - 2);
3563 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3564 reg >>= 3;
3565 if (type == 0x8b)
3566 {
3567 /* movq */
3568 if (val == 0x4c)
3569 {
3570 if (roff < 3)
3571 goto corrupt_input;
3572 bfd_put_8 (output_bfd, 0x49,
3573 contents + roff - 3);
3574 }
3575 else if (!ABI_64_P (output_bfd) && val == 0x44)
3576 {
3577 if (roff < 3)
3578 goto corrupt_input;
3579 bfd_put_8 (output_bfd, 0x41,
3580 contents + roff - 3);
3581 }
3582 bfd_put_8 (output_bfd, 0xc7,
3583 contents + roff - 2);
3584 bfd_put_8 (output_bfd, 0xc0 | reg,
3585 contents + roff - 1);
3586 }
3587 else if (reg == 4)
3588 {
3589 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3590 is special */
3591 if (val == 0x4c)
3592 {
3593 if (roff < 3)
3594 goto corrupt_input;
3595 bfd_put_8 (output_bfd, 0x49,
3596 contents + roff - 3);
3597 }
3598 else if (!ABI_64_P (output_bfd) && val == 0x44)
3599 {
3600 if (roff < 3)
3601 goto corrupt_input;
3602 bfd_put_8 (output_bfd, 0x41,
3603 contents + roff - 3);
3604 }
3605 bfd_put_8 (output_bfd, 0x81,
3606 contents + roff - 2);
3607 bfd_put_8 (output_bfd, 0xc0 | reg,
3608 contents + roff - 1);
3609 }
3610 else
3611 {
3612 /* addq/addl -> leaq/leal */
3613 if (val == 0x4c)
3614 {
3615 if (roff < 3)
3616 goto corrupt_input;
3617 bfd_put_8 (output_bfd, 0x4d,
3618 contents + roff - 3);
3619 }
3620 else if (!ABI_64_P (output_bfd) && val == 0x44)
3621 {
3622 if (roff < 3)
3623 goto corrupt_input;
3624 bfd_put_8 (output_bfd, 0x45,
3625 contents + roff - 3);
3626 }
3627 bfd_put_8 (output_bfd, 0x8d,
3628 contents + roff - 2);
3629 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3630 contents + roff - 1);
3631 }
3632 bfd_put_32 (output_bfd,
3633 elf_x86_64_tpoff (info, relocation),
3634 contents + roff);
3635 continue;
3636 }
3637 else
3638 BFD_ASSERT (false);
3639 }
3640
3641 if (htab->elf.sgot == NULL)
3642 abort ();
3643
3644 if (h != NULL)
3645 {
3646 off = h->got.offset;
3647 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3648 }
3649 else
3650 {
3651 if (local_got_offsets == NULL)
3652 abort ();
3653
3654 off = local_got_offsets[r_symndx];
3655 offplt = local_tlsdesc_gotents[r_symndx];
3656 }
3657
3658 if ((off & 1) != 0)
3659 off &= ~1;
3660 else
3661 {
3662 Elf_Internal_Rela outrel;
3663 int dr_type, indx;
3664 asection *sreloc;
3665
3666 if (htab->elf.srelgot == NULL)
3667 abort ();
3668
3669 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3670
3671 if (GOT_TLS_GDESC_P (tls_type))
3672 {
3673 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3674 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3675 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3676 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3677 + htab->elf.sgotplt->output_offset
3678 + offplt
3679 + htab->sgotplt_jump_table_size);
3680 sreloc = htab->elf.srelplt;
3681 if (indx == 0)
3682 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3683 else
3684 outrel.r_addend = 0;
3685 elf_append_rela (output_bfd, sreloc, &outrel);
3686 }
3687
3688 sreloc = htab->elf.srelgot;
3689
3690 outrel.r_offset = (htab->elf.sgot->output_section->vma
3691 + htab->elf.sgot->output_offset + off);
3692
3693 if (GOT_TLS_GD_P (tls_type))
3694 dr_type = R_X86_64_DTPMOD64;
3695 else if (GOT_TLS_GDESC_P (tls_type))
3696 goto dr_done;
3697 else
3698 dr_type = R_X86_64_TPOFF64;
3699
3700 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3701 outrel.r_addend = 0;
3702 if ((dr_type == R_X86_64_TPOFF64
3703 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3704 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3705 outrel.r_info = htab->r_info (indx, dr_type);
3706
3707 elf_append_rela (output_bfd, sreloc, &outrel);
3708
3709 if (GOT_TLS_GD_P (tls_type))
3710 {
3711 if (indx == 0)
3712 {
3713 BFD_ASSERT (! unresolved_reloc);
3714 bfd_put_64 (output_bfd,
3715 relocation - _bfd_x86_elf_dtpoff_base (info),
3716 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3717 }
3718 else
3719 {
3720 bfd_put_64 (output_bfd, 0,
3721 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3722 outrel.r_info = htab->r_info (indx,
3723 R_X86_64_DTPOFF64);
3724 outrel.r_offset += GOT_ENTRY_SIZE;
3725 elf_append_rela (output_bfd, sreloc,
3726 &outrel);
3727 }
3728 }
3729
3730 dr_done:
3731 if (h != NULL)
3732 h->got.offset |= 1;
3733 else
3734 local_got_offsets[r_symndx] |= 1;
3735 }
3736
3737 if (off >= (bfd_vma) -2
3738 && ! GOT_TLS_GDESC_P (tls_type))
3739 abort ();
3740 if (r_type_tls == r_type)
3741 {
3742 if (r_type == R_X86_64_GOTPC32_TLSDESC
3743 || r_type == R_X86_64_TLSDESC_CALL)
3744 relocation = htab->elf.sgotplt->output_section->vma
3745 + htab->elf.sgotplt->output_offset
3746 + offplt + htab->sgotplt_jump_table_size;
3747 else
3748 relocation = htab->elf.sgot->output_section->vma
3749 + htab->elf.sgot->output_offset + off;
3750 unresolved_reloc = false;
3751 }
3752 else
3753 {
3754 bfd_vma roff = rel->r_offset;
3755
3756 if (r_type == R_X86_64_TLSGD)
3757 {
3758 /* GD->IE transition. For 64bit, change
3759 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3760 .word 0x6666; rex64; call __tls_get_addr@PLT
3761 or
3762 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3763 .byte 0x66; rex64
3764 call *__tls_get_addr@GOTPCREL(%rip
3765 which may be converted to
3766 addr32 call __tls_get_addr
3767 into:
3768 movq %fs:0, %rax
3769 addq foo@gottpoff(%rip), %rax
3770 For 32bit, change
3771 leaq foo@tlsgd(%rip), %rdi
3772 .word 0x6666; rex64; call __tls_get_addr@PLT
3773 or
3774 leaq foo@tlsgd(%rip), %rdi
3775 .byte 0x66; rex64;
3776 call *__tls_get_addr@GOTPCREL(%rip)
3777 which may be converted to
3778 addr32 call __tls_get_addr
3779 into:
3780 movl %fs:0, %eax
3781 addq foo@gottpoff(%rip), %rax
3782 For largepic, change:
3783 leaq foo@tlsgd(%rip), %rdi
3784 movabsq $__tls_get_addr@pltoff, %rax
3785 addq %r15, %rax
3786 call *%rax
3787 into:
3788 movq %fs:0, %rax
3789 addq foo@gottpoff(%rax), %rax
3790 nopw 0x0(%rax,%rax,1) */
3791 int largepic = 0;
3792 if (ABI_64_P (output_bfd))
3793 {
3794 if (contents[roff + 5] == 0xb8)
3795 {
3796 if (roff < 3
3797 || (roff - 3 + 22) > input_section->size)
3798 goto corrupt_input;
3799 memcpy (contents + roff - 3,
3800 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3801 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3802 largepic = 1;
3803 }
3804 else
3805 {
3806 if (roff < 4
3807 || (roff - 4 + 16) > input_section->size)
3808 goto corrupt_input;
3809 memcpy (contents + roff - 4,
3810 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3811 16);
3812 }
3813 }
3814 else
3815 {
3816 if (roff < 3
3817 || (roff - 3 + 15) > input_section->size)
3818 goto corrupt_input;
3819 memcpy (contents + roff - 3,
3820 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3821 15);
3822 }
3823
3824 relocation = (htab->elf.sgot->output_section->vma
3825 + htab->elf.sgot->output_offset + off
3826 - roff
3827 - largepic
3828 - input_section->output_section->vma
3829 - input_section->output_offset
3830 - 12);
3831 bfd_put_32 (output_bfd, relocation,
3832 contents + roff + 8 + largepic);
3833 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3834 rel++;
3835 wrel++;
3836 continue;
3837 }
3838 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3839 {
3840 /* GDesc -> IE transition.
3841 It's originally something like:
3842 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3843 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3844
3845 Change it to:
3846 # before xchg %ax,%ax in LP64 mode.
3847 movq x@gottpoff(%rip), %rax
3848 # before nopl (%rax) in X32 mode.
3849 rex movl x@gottpoff(%rip), %eax
3850 */
3851
3852 /* Now modify the instruction as appropriate. To
3853 turn a lea into a mov in the form we use it, it
3854 suffices to change the second byte from 0x8d to
3855 0x8b. */
3856 if (roff < 2)
3857 goto corrupt_input;
3858 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3859
3860 bfd_put_32 (output_bfd,
3861 htab->elf.sgot->output_section->vma
3862 + htab->elf.sgot->output_offset + off
3863 - rel->r_offset
3864 - input_section->output_section->vma
3865 - input_section->output_offset
3866 - 4,
3867 contents + roff);
3868 continue;
3869 }
3870 else if (r_type == R_X86_64_TLSDESC_CALL)
3871 {
3872 /* GDesc -> IE transition.
3873 It's originally:
3874 call *(%rax) <--- LP64 mode.
3875 call *(%eax) <--- X32 mode.
3876
3877 Change it to:
3878 xchg %ax, %ax <-- LP64 mode.
3879 nopl (%rax) <-- X32 mode.
3880 */
3881
3882 unsigned int prefix = 0;
3883 if (!ABI_64_P (input_bfd))
3884 {
3885 /* Check for call *x@tlsdesc(%eax). */
3886 if (contents[roff] == 0x67)
3887 prefix = 1;
3888 }
3889 if (prefix)
3890 {
3891 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3892 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3893 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3894 }
3895 else
3896 {
3897 bfd_put_8 (output_bfd, 0x66, contents + roff);
3898 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3899 }
3900 continue;
3901 }
3902 else
3903 BFD_ASSERT (false);
3904 }
3905 break;
3906
3907 case R_X86_64_TLSLD:
3908 if (! elf_x86_64_tls_transition (info, input_bfd,
3909 input_section, contents,
3910 symtab_hdr, sym_hashes,
3911 &r_type, GOT_UNKNOWN, rel,
3912 relend, h, r_symndx, true))
3913 return false;
3914
3915 if (r_type != R_X86_64_TLSLD)
3916 {
3917 /* LD->LE transition:
3918 leaq foo@tlsld(%rip), %rdi
3919 call __tls_get_addr@PLT
3920 For 64bit, we change it into:
3921 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3922 For 32bit, we change it into:
3923 nopl 0x0(%rax); movl %fs:0, %eax
3924 Or
3925 leaq foo@tlsld(%rip), %rdi;
3926 call *__tls_get_addr@GOTPCREL(%rip)
3927 which may be converted to
3928 addr32 call __tls_get_addr
3929 For 64bit, we change it into:
3930 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3931 For 32bit, we change it into:
3932 nopw 0x0(%rax); movl %fs:0, %eax
3933 For largepic, change:
3934 leaq foo@tlsgd(%rip), %rdi
3935 movabsq $__tls_get_addr@pltoff, %rax
3936 addq %rbx, %rax
3937 call *%rax
3938 into
3939 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3940 movq %fs:0, %eax */
3941
3942 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3943 if (ABI_64_P (output_bfd))
3944 {
3945 if ((rel->r_offset + 5) >= input_section->size)
3946 goto corrupt_input;
3947 if (contents[rel->r_offset + 5] == 0xb8)
3948 {
3949 if (rel->r_offset < 3
3950 || (rel->r_offset - 3 + 22) > input_section->size)
3951 goto corrupt_input;
3952 memcpy (contents + rel->r_offset - 3,
3953 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3954 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3955 }
3956 else if (contents[rel->r_offset + 4] == 0xff
3957 || contents[rel->r_offset + 4] == 0x67)
3958 {
3959 if (rel->r_offset < 3
3960 || (rel->r_offset - 3 + 13) > input_section->size)
3961 goto corrupt_input;
3962 memcpy (contents + rel->r_offset - 3,
3963 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3964 13);
3965
3966 }
3967 else
3968 {
3969 if (rel->r_offset < 3
3970 || (rel->r_offset - 3 + 12) > input_section->size)
3971 goto corrupt_input;
3972 memcpy (contents + rel->r_offset - 3,
3973 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3974 }
3975 }
3976 else
3977 {
3978 if ((rel->r_offset + 4) >= input_section->size)
3979 goto corrupt_input;
3980 if (contents[rel->r_offset + 4] == 0xff)
3981 {
3982 if (rel->r_offset < 3
3983 || (rel->r_offset - 3 + 13) > input_section->size)
3984 goto corrupt_input;
3985 memcpy (contents + rel->r_offset - 3,
3986 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3987 13);
3988 }
3989 else
3990 {
3991 if (rel->r_offset < 3
3992 || (rel->r_offset - 3 + 12) > input_section->size)
3993 goto corrupt_input;
3994 memcpy (contents + rel->r_offset - 3,
3995 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3996 }
3997 }
3998 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3999 and R_X86_64_PLTOFF64. */
4000 rel++;
4001 wrel++;
4002 continue;
4003 }
4004
4005 if (htab->elf.sgot == NULL)
4006 abort ();
4007
4008 off = htab->tls_ld_or_ldm_got.offset;
4009 if (off & 1)
4010 off &= ~1;
4011 else
4012 {
4013 Elf_Internal_Rela outrel;
4014
4015 if (htab->elf.srelgot == NULL)
4016 abort ();
4017
4018 outrel.r_offset = (htab->elf.sgot->output_section->vma
4019 + htab->elf.sgot->output_offset + off);
4020
4021 bfd_put_64 (output_bfd, 0,
4022 htab->elf.sgot->contents + off);
4023 bfd_put_64 (output_bfd, 0,
4024 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4025 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4026 outrel.r_addend = 0;
4027 elf_append_rela (output_bfd, htab->elf.srelgot,
4028 &outrel);
4029 htab->tls_ld_or_ldm_got.offset |= 1;
4030 }
4031 relocation = htab->elf.sgot->output_section->vma
4032 + htab->elf.sgot->output_offset + off;
4033 unresolved_reloc = false;
4034 break;
4035
4036 case R_X86_64_DTPOFF32:
4037 if (!bfd_link_executable (info)
4038 || (input_section->flags & SEC_CODE) == 0)
4039 relocation -= _bfd_x86_elf_dtpoff_base (info);
4040 else
4041 relocation = elf_x86_64_tpoff (info, relocation);
4042 break;
4043
4044 case R_X86_64_TPOFF32:
4045 case R_X86_64_TPOFF64:
4046 BFD_ASSERT (bfd_link_executable (info));
4047 relocation = elf_x86_64_tpoff (info, relocation);
4048 break;
4049
4050 case R_X86_64_DTPOFF64:
4051 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4052 relocation -= _bfd_x86_elf_dtpoff_base (info);
4053 break;
4054
4055 default:
4056 break;
4057 }
4058
4059 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4060 because such sections are not SEC_ALLOC and thus ld.so will
4061 not process them. */
4062 if (unresolved_reloc
4063 && !((input_section->flags & SEC_DEBUGGING) != 0
4064 && h->def_dynamic)
4065 && _bfd_elf_section_offset (output_bfd, info, input_section,
4066 rel->r_offset) != (bfd_vma) -1)
4067 {
4068 switch (r_type)
4069 {
4070 case R_X86_64_32S:
4071 sec = h->root.u.def.section;
4072 if ((info->nocopyreloc
4073 || (eh->def_protected
4074 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
4075 && !(h->root.u.def.section->flags & SEC_CODE))
4076 return elf_x86_64_need_pic (info, input_bfd, input_section,
4077 h, NULL, NULL, howto);
4078 /* Fall through. */
4079
4080 default:
4081 _bfd_error_handler
4082 /* xgettext:c-format */
4083 (_("%pB(%pA+%#" PRIx64 "): "
4084 "unresolvable %s relocation against symbol `%s'"),
4085 input_bfd,
4086 input_section,
4087 (uint64_t) rel->r_offset,
4088 howto->name,
4089 h->root.root.string);
4090 return false;
4091 }
4092 }
4093
4094 do_relocation:
4095 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4096 contents, rel->r_offset,
4097 relocation, rel->r_addend);
4098
4099 check_relocation_error:
4100 if (r != bfd_reloc_ok)
4101 {
4102 const char *name;
4103
4104 if (h != NULL)
4105 name = h->root.root.string;
4106 else
4107 {
4108 name = bfd_elf_string_from_elf_section (input_bfd,
4109 symtab_hdr->sh_link,
4110 sym->st_name);
4111 if (name == NULL)
4112 return false;
4113 if (*name == '\0')
4114 name = bfd_section_name (sec);
4115 }
4116
4117 if (r == bfd_reloc_overflow)
4118 {
4119 if (converted_reloc)
4120 {
4121 info->callbacks->einfo
4122 ("%X%H:", input_bfd, input_section, rel->r_offset);
4123 info->callbacks->einfo
4124 (_(" failed to convert GOTPCREL relocation against "
4125 "'%s'; relink with --no-relax\n"),
4126 name);
4127 status = false;
4128 continue;
4129 }
4130 (*info->callbacks->reloc_overflow)
4131 (info, (h ? &h->root : NULL), name, howto->name,
4132 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4133 }
4134 else
4135 {
4136 _bfd_error_handler
4137 /* xgettext:c-format */
4138 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4139 input_bfd, input_section,
4140 (uint64_t) rel->r_offset, name, (int) r);
4141 return false;
4142 }
4143 }
4144
4145 if (wrel != rel)
4146 *wrel = *rel;
4147 }
4148
4149 if (wrel != rel)
4150 {
4151 Elf_Internal_Shdr *rel_hdr;
4152 size_t deleted = rel - wrel;
4153
4154 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4155 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4156 if (rel_hdr->sh_size == 0)
4157 {
4158 /* It is too late to remove an empty reloc section. Leave
4159 one NONE reloc.
4160 ??? What is wrong with an empty section??? */
4161 rel_hdr->sh_size = rel_hdr->sh_entsize;
4162 deleted -= 1;
4163 }
4164 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4165 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4166 input_section->reloc_count -= deleted;
4167 }
4168
4169 return status;
4170 }
4171
4172 /* Finish up dynamic symbol handling. We set the contents of various
4173 dynamic sections here. */
4174
4175 static bool
4176 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4177 struct bfd_link_info *info,
4178 struct elf_link_hash_entry *h,
4179 Elf_Internal_Sym *sym)
4180 {
4181 struct elf_x86_link_hash_table *htab;
4182 bool use_plt_second;
4183 struct elf_x86_link_hash_entry *eh;
4184 bool local_undefweak;
4185
4186 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4187 if (htab == NULL)
4188 return false;
4189
4190 /* Use the second PLT section only if there is .plt section. */
4191 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4192
4193 eh = (struct elf_x86_link_hash_entry *) h;
4194 if (eh->no_finish_dynamic_symbol)
4195 abort ();
4196
4197 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4198 resolved undefined weak symbols in executable so that their
4199 references have value 0 at run-time. */
4200 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4201
4202 if (h->plt.offset != (bfd_vma) -1)
4203 {
4204 bfd_vma plt_index;
4205 bfd_vma got_offset, plt_offset;
4206 Elf_Internal_Rela rela;
4207 bfd_byte *loc;
4208 asection *plt, *gotplt, *relplt, *resolved_plt;
4209 const struct elf_backend_data *bed;
4210 bfd_vma plt_got_pcrel_offset;
4211
4212 /* When building a static executable, use .iplt, .igot.plt and
4213 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4214 if (htab->elf.splt != NULL)
4215 {
4216 plt = htab->elf.splt;
4217 gotplt = htab->elf.sgotplt;
4218 relplt = htab->elf.srelplt;
4219 }
4220 else
4221 {
4222 plt = htab->elf.iplt;
4223 gotplt = htab->elf.igotplt;
4224 relplt = htab->elf.irelplt;
4225 }
4226
4227 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4228
4229 /* Get the index in the procedure linkage table which
4230 corresponds to this symbol. This is the index of this symbol
4231 in all the symbols for which we are making plt entries. The
4232 first entry in the procedure linkage table is reserved.
4233
4234 Get the offset into the .got table of the entry that
4235 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4236 bytes. The first three are reserved for the dynamic linker.
4237
4238 For static executables, we don't reserve anything. */
4239
4240 if (plt == htab->elf.splt)
4241 {
4242 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4243 - htab->plt.has_plt0);
4244 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4245 }
4246 else
4247 {
4248 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4249 got_offset = got_offset * GOT_ENTRY_SIZE;
4250 }
4251
4252 /* Fill in the entry in the procedure linkage table. */
4253 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4254 htab->plt.plt_entry_size);
4255 if (use_plt_second)
4256 {
4257 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4258 htab->non_lazy_plt->plt_entry,
4259 htab->non_lazy_plt->plt_entry_size);
4260
4261 resolved_plt = htab->plt_second;
4262 plt_offset = eh->plt_second.offset;
4263 }
4264 else
4265 {
4266 resolved_plt = plt;
4267 plt_offset = h->plt.offset;
4268 }
4269
4270 /* Insert the relocation positions of the plt section. */
4271
4272 /* Put offset the PC-relative instruction referring to the GOT entry,
4273 subtracting the size of that instruction. */
4274 plt_got_pcrel_offset = (gotplt->output_section->vma
4275 + gotplt->output_offset
4276 + got_offset
4277 - resolved_plt->output_section->vma
4278 - resolved_plt->output_offset
4279 - plt_offset
4280 - htab->plt.plt_got_insn_size);
4281
4282 /* Check PC-relative offset overflow in PLT entry. */
4283 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4284 /* xgettext:c-format */
4285 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4286 output_bfd, h->root.root.string);
4287
4288 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4289 (resolved_plt->contents + plt_offset
4290 + htab->plt.plt_got_offset));
4291
4292 /* Fill in the entry in the global offset table, initially this
4293 points to the second part of the PLT entry. Leave the entry
4294 as zero for undefined weak symbol in PIE. No PLT relocation
4295 against undefined weak symbol in PIE. */
4296 if (!local_undefweak)
4297 {
4298 if (htab->plt.has_plt0)
4299 bfd_put_64 (output_bfd, (plt->output_section->vma
4300 + plt->output_offset
4301 + h->plt.offset
4302 + htab->lazy_plt->plt_lazy_offset),
4303 gotplt->contents + got_offset);
4304
4305 /* Fill in the entry in the .rela.plt section. */
4306 rela.r_offset = (gotplt->output_section->vma
4307 + gotplt->output_offset
4308 + got_offset);
4309 if (PLT_LOCAL_IFUNC_P (info, h))
4310 {
4311 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4312 h->root.root.string,
4313 h->root.u.def.section->owner);
4314
4315 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4316 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4317 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4318 rela.r_addend = (h->root.u.def.value
4319 + h->root.u.def.section->output_section->vma
4320 + h->root.u.def.section->output_offset);
4321
4322 if (htab->params->report_relative_reloc)
4323 _bfd_x86_elf_link_report_relative_reloc
4324 (info, relplt, h, sym, "R_X86_64_IRELATIVE", &rela);
4325
4326 /* R_X86_64_IRELATIVE comes last. */
4327 plt_index = htab->next_irelative_index--;
4328 }
4329 else
4330 {
4331 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4332 rela.r_addend = 0;
4333 plt_index = htab->next_jump_slot_index++;
4334 }
4335
4336 /* Don't fill the second and third slots in PLT entry for
4337 static executables nor without PLT0. */
4338 if (plt == htab->elf.splt && htab->plt.has_plt0)
4339 {
4340 bfd_vma plt0_offset
4341 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4342
4343 /* Put relocation index. */
4344 bfd_put_32 (output_bfd, plt_index,
4345 (plt->contents + h->plt.offset
4346 + htab->lazy_plt->plt_reloc_offset));
4347
4348 /* Put offset for jmp .PLT0 and check for overflow. We don't
4349 check relocation index for overflow since branch displacement
4350 will overflow first. */
4351 if (plt0_offset > 0x80000000)
4352 /* xgettext:c-format */
4353 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4354 output_bfd, h->root.root.string);
4355 bfd_put_32 (output_bfd, - plt0_offset,
4356 (plt->contents + h->plt.offset
4357 + htab->lazy_plt->plt_plt_offset));
4358 }
4359
4360 bed = get_elf_backend_data (output_bfd);
4361 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4362 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4363 }
4364 }
4365 else if (eh->plt_got.offset != (bfd_vma) -1)
4366 {
4367 bfd_vma got_offset, plt_offset;
4368 asection *plt, *got;
4369 bool got_after_plt;
4370 int32_t got_pcrel_offset;
4371
4372 /* Set the entry in the GOT procedure linkage table. */
4373 plt = htab->plt_got;
4374 got = htab->elf.sgot;
4375 got_offset = h->got.offset;
4376
4377 if (got_offset == (bfd_vma) -1
4378 || (h->type == STT_GNU_IFUNC && h->def_regular)
4379 || plt == NULL
4380 || got == NULL)
4381 abort ();
4382
4383 /* Use the non-lazy PLT entry template for the GOT PLT since they
4384 are the identical. */
4385 /* Fill in the entry in the GOT procedure linkage table. */
4386 plt_offset = eh->plt_got.offset;
4387 memcpy (plt->contents + plt_offset,
4388 htab->non_lazy_plt->plt_entry,
4389 htab->non_lazy_plt->plt_entry_size);
4390
4391 /* Put offset the PC-relative instruction referring to the GOT
4392 entry, subtracting the size of that instruction. */
4393 got_pcrel_offset = (got->output_section->vma
4394 + got->output_offset
4395 + got_offset
4396 - plt->output_section->vma
4397 - plt->output_offset
4398 - plt_offset
4399 - htab->non_lazy_plt->plt_got_insn_size);
4400
4401 /* Check PC-relative offset overflow in GOT PLT entry. */
4402 got_after_plt = got->output_section->vma > plt->output_section->vma;
4403 if ((got_after_plt && got_pcrel_offset < 0)
4404 || (!got_after_plt && got_pcrel_offset > 0))
4405 /* xgettext:c-format */
4406 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4407 output_bfd, h->root.root.string);
4408
4409 bfd_put_32 (output_bfd, got_pcrel_offset,
4410 (plt->contents + plt_offset
4411 + htab->non_lazy_plt->plt_got_offset));
4412 }
4413
4414 if (!local_undefweak
4415 && !h->def_regular
4416 && (h->plt.offset != (bfd_vma) -1
4417 || eh->plt_got.offset != (bfd_vma) -1))
4418 {
4419 /* Mark the symbol as undefined, rather than as defined in
4420 the .plt section. Leave the value if there were any
4421 relocations where pointer equality matters (this is a clue
4422 for the dynamic linker, to make function pointer
4423 comparisons work between an application and shared
4424 library), otherwise set it to zero. If a function is only
4425 called from a binary, there is no need to slow down
4426 shared libraries because of that. */
4427 sym->st_shndx = SHN_UNDEF;
4428 if (!h->pointer_equality_needed)
4429 sym->st_value = 0;
4430 }
4431
4432 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4433
4434 /* Don't generate dynamic GOT relocation against undefined weak
4435 symbol in executable. */
4436 if (h->got.offset != (bfd_vma) -1
4437 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4438 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4439 && !local_undefweak)
4440 {
4441 Elf_Internal_Rela rela;
4442 asection *relgot = htab->elf.srelgot;
4443 const char *relative_reloc_name = NULL;
4444
4445 /* This symbol has an entry in the global offset table. Set it
4446 up. */
4447 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4448 abort ();
4449
4450 rela.r_offset = (htab->elf.sgot->output_section->vma
4451 + htab->elf.sgot->output_offset
4452 + (h->got.offset &~ (bfd_vma) 1));
4453
4454 /* If this is a static link, or it is a -Bsymbolic link and the
4455 symbol is defined locally or was forced to be local because
4456 of a version file, we just want to emit a RELATIVE reloc.
4457 The entry in the global offset table will already have been
4458 initialized in the relocate_section function. */
4459 if (h->def_regular
4460 && h->type == STT_GNU_IFUNC)
4461 {
4462 if (h->plt.offset == (bfd_vma) -1)
4463 {
4464 /* STT_GNU_IFUNC is referenced without PLT. */
4465 if (htab->elf.splt == NULL)
4466 {
4467 /* use .rel[a].iplt section to store .got relocations
4468 in static executable. */
4469 relgot = htab->elf.irelplt;
4470 }
4471 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4472 {
4473 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4474 h->root.root.string,
4475 h->root.u.def.section->owner);
4476
4477 rela.r_info = htab->r_info (0,
4478 R_X86_64_IRELATIVE);
4479 rela.r_addend = (h->root.u.def.value
4480 + h->root.u.def.section->output_section->vma
4481 + h->root.u.def.section->output_offset);
4482 relative_reloc_name = "R_X86_64_IRELATIVE";
4483 }
4484 else
4485 goto do_glob_dat;
4486 }
4487 else if (bfd_link_pic (info))
4488 {
4489 /* Generate R_X86_64_GLOB_DAT. */
4490 goto do_glob_dat;
4491 }
4492 else
4493 {
4494 asection *plt;
4495 bfd_vma plt_offset;
4496
4497 if (!h->pointer_equality_needed)
4498 abort ();
4499
4500 /* For non-shared object, we can't use .got.plt, which
4501 contains the real function addres if we need pointer
4502 equality. We load the GOT entry with the PLT entry. */
4503 if (htab->plt_second != NULL)
4504 {
4505 plt = htab->plt_second;
4506 plt_offset = eh->plt_second.offset;
4507 }
4508 else
4509 {
4510 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4511 plt_offset = h->plt.offset;
4512 }
4513 bfd_put_64 (output_bfd, (plt->output_section->vma
4514 + plt->output_offset
4515 + plt_offset),
4516 htab->elf.sgot->contents + h->got.offset);
4517 return true;
4518 }
4519 }
4520 else if (bfd_link_pic (info)
4521 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4522 {
4523 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4524 return false;
4525 BFD_ASSERT((h->got.offset & 1) != 0);
4526 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4527 rela.r_addend = (h->root.u.def.value
4528 + h->root.u.def.section->output_section->vma
4529 + h->root.u.def.section->output_offset);
4530 relative_reloc_name = "R_X86_64_RELATIVE";
4531 }
4532 else
4533 {
4534 BFD_ASSERT((h->got.offset & 1) == 0);
4535 do_glob_dat:
4536 bfd_put_64 (output_bfd, (bfd_vma) 0,
4537 htab->elf.sgot->contents + h->got.offset);
4538 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4539 rela.r_addend = 0;
4540 }
4541
4542 if (relative_reloc_name != NULL
4543 && htab->params->report_relative_reloc)
4544 _bfd_x86_elf_link_report_relative_reloc
4545 (info, relgot, h, sym, relative_reloc_name, &rela);
4546
4547 elf_append_rela (output_bfd, relgot, &rela);
4548 }
4549
4550 if (h->needs_copy)
4551 {
4552 Elf_Internal_Rela rela;
4553 asection *s;
4554
4555 /* This symbol needs a copy reloc. Set it up. */
4556 VERIFY_COPY_RELOC (h, htab)
4557
4558 rela.r_offset = (h->root.u.def.value
4559 + h->root.u.def.section->output_section->vma
4560 + h->root.u.def.section->output_offset);
4561 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4562 rela.r_addend = 0;
4563 if (h->root.u.def.section == htab->elf.sdynrelro)
4564 s = htab->elf.sreldynrelro;
4565 else
4566 s = htab->elf.srelbss;
4567 elf_append_rela (output_bfd, s, &rela);
4568 }
4569
4570 return true;
4571 }
4572
4573 /* Finish up local dynamic symbol handling. We set the contents of
4574 various dynamic sections here. */
4575
4576 static int
4577 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4578 {
4579 struct elf_link_hash_entry *h
4580 = (struct elf_link_hash_entry *) *slot;
4581 struct bfd_link_info *info
4582 = (struct bfd_link_info *) inf;
4583
4584 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4585 info, h, NULL);
4586 }
4587
4588 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4589 here since undefined weak symbol may not be dynamic and may not be
4590 called for elf_x86_64_finish_dynamic_symbol. */
4591
4592 static bool
4593 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4594 void *inf)
4595 {
4596 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4597 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4598
4599 if (h->root.type != bfd_link_hash_undefweak
4600 || h->dynindx != -1)
4601 return true;
4602
4603 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4604 info, h, NULL);
4605 }
4606
4607 /* Used to decide how to sort relocs in an optimal manner for the
4608 dynamic linker, before writing them out. */
4609
4610 static enum elf_reloc_type_class
4611 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4612 const asection *rel_sec ATTRIBUTE_UNUSED,
4613 const Elf_Internal_Rela *rela)
4614 {
4615 bfd *abfd = info->output_bfd;
4616 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4617 struct elf_x86_link_hash_table *htab
4618 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4619
4620 if (htab->elf.dynsym != NULL
4621 && htab->elf.dynsym->contents != NULL)
4622 {
4623 /* Check relocation against STT_GNU_IFUNC symbol if there are
4624 dynamic symbols. */
4625 unsigned long r_symndx = htab->r_sym (rela->r_info);
4626 if (r_symndx != STN_UNDEF)
4627 {
4628 Elf_Internal_Sym sym;
4629 if (!bed->s->swap_symbol_in (abfd,
4630 (htab->elf.dynsym->contents
4631 + r_symndx * bed->s->sizeof_sym),
4632 0, &sym))
4633 abort ();
4634
4635 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4636 return reloc_class_ifunc;
4637 }
4638 }
4639
4640 switch ((int) ELF32_R_TYPE (rela->r_info))
4641 {
4642 case R_X86_64_IRELATIVE:
4643 return reloc_class_ifunc;
4644 case R_X86_64_RELATIVE:
4645 case R_X86_64_RELATIVE64:
4646 return reloc_class_relative;
4647 case R_X86_64_JUMP_SLOT:
4648 return reloc_class_plt;
4649 case R_X86_64_COPY:
4650 return reloc_class_copy;
4651 default:
4652 return reloc_class_normal;
4653 }
4654 }
4655
4656 /* Finish up the dynamic sections. */
4657
4658 static bool
4659 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4660 struct bfd_link_info *info)
4661 {
4662 struct elf_x86_link_hash_table *htab;
4663
4664 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4665 if (htab == NULL)
4666 return false;
4667
4668 if (! htab->elf.dynamic_sections_created)
4669 return true;
4670
4671 if (htab->elf.splt && htab->elf.splt->size > 0)
4672 {
4673 elf_section_data (htab->elf.splt->output_section)
4674 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4675
4676 if (htab->plt.has_plt0)
4677 {
4678 /* Fill in the special first entry in the procedure linkage
4679 table. */
4680 memcpy (htab->elf.splt->contents,
4681 htab->lazy_plt->plt0_entry,
4682 htab->lazy_plt->plt0_entry_size);
4683 /* Add offset for pushq GOT+8(%rip), since the instruction
4684 uses 6 bytes subtract this value. */
4685 bfd_put_32 (output_bfd,
4686 (htab->elf.sgotplt->output_section->vma
4687 + htab->elf.sgotplt->output_offset
4688 + 8
4689 - htab->elf.splt->output_section->vma
4690 - htab->elf.splt->output_offset
4691 - 6),
4692 (htab->elf.splt->contents
4693 + htab->lazy_plt->plt0_got1_offset));
4694 /* Add offset for the PC-relative instruction accessing
4695 GOT+16, subtracting the offset to the end of that
4696 instruction. */
4697 bfd_put_32 (output_bfd,
4698 (htab->elf.sgotplt->output_section->vma
4699 + htab->elf.sgotplt->output_offset
4700 + 16
4701 - htab->elf.splt->output_section->vma
4702 - htab->elf.splt->output_offset
4703 - htab->lazy_plt->plt0_got2_insn_end),
4704 (htab->elf.splt->contents
4705 + htab->lazy_plt->plt0_got2_offset));
4706 }
4707
4708 if (htab->elf.tlsdesc_plt)
4709 {
4710 bfd_put_64 (output_bfd, (bfd_vma) 0,
4711 htab->elf.sgot->contents + htab->elf.tlsdesc_got);
4712
4713 memcpy (htab->elf.splt->contents + htab->elf.tlsdesc_plt,
4714 htab->lazy_plt->plt_tlsdesc_entry,
4715 htab->lazy_plt->plt_tlsdesc_entry_size);
4716
4717 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4718 bytes and the instruction uses 6 bytes, subtract these
4719 values. */
4720 bfd_put_32 (output_bfd,
4721 (htab->elf.sgotplt->output_section->vma
4722 + htab->elf.sgotplt->output_offset
4723 + 8
4724 - htab->elf.splt->output_section->vma
4725 - htab->elf.splt->output_offset
4726 - htab->elf.tlsdesc_plt
4727 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4728 (htab->elf.splt->contents
4729 + htab->elf.tlsdesc_plt
4730 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4731 /* Add offset for indirect branch via GOT+TDG, where TDG
4732 stands for htab->tlsdesc_got, subtracting the offset
4733 to the end of that instruction. */
4734 bfd_put_32 (output_bfd,
4735 (htab->elf.sgot->output_section->vma
4736 + htab->elf.sgot->output_offset
4737 + htab->elf.tlsdesc_got
4738 - htab->elf.splt->output_section->vma
4739 - htab->elf.splt->output_offset
4740 - htab->elf.tlsdesc_plt
4741 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4742 (htab->elf.splt->contents
4743 + htab->elf.tlsdesc_plt
4744 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4745 }
4746 }
4747
4748 /* Fill PLT entries for undefined weak symbols in PIE. */
4749 if (bfd_link_pie (info))
4750 bfd_hash_traverse (&info->hash->table,
4751 elf_x86_64_pie_finish_undefweak_symbol,
4752 info);
4753
4754 return true;
4755 }
4756
4757 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4758 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4759 It has to be done before elf_link_sort_relocs is called so that
4760 dynamic relocations are properly sorted. */
4761
4762 static bool
4763 elf_x86_64_output_arch_local_syms
4764 (bfd *output_bfd ATTRIBUTE_UNUSED,
4765 struct bfd_link_info *info,
4766 void *flaginfo ATTRIBUTE_UNUSED,
4767 int (*func) (void *, const char *,
4768 Elf_Internal_Sym *,
4769 asection *,
4770 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4771 {
4772 struct elf_x86_link_hash_table *htab
4773 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4774 if (htab == NULL)
4775 return false;
4776
4777 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4778 htab_traverse (htab->loc_hash_table,
4779 elf_x86_64_finish_local_dynamic_symbol,
4780 info);
4781
4782 return true;
4783 }
4784
4785 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4786 dynamic relocations. */
4787
4788 static long
4789 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4790 long symcount ATTRIBUTE_UNUSED,
4791 asymbol **syms ATTRIBUTE_UNUSED,
4792 long dynsymcount,
4793 asymbol **dynsyms,
4794 asymbol **ret)
4795 {
4796 long count, i, n;
4797 int j;
4798 bfd_byte *plt_contents;
4799 long relsize;
4800 const struct elf_x86_lazy_plt_layout *lazy_plt;
4801 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4802 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4803 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4804 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4805 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4806 asection *plt;
4807 enum elf_x86_plt_type plt_type;
4808 struct elf_x86_plt plts[] =
4809 {
4810 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4811 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4812 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4813 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4814 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4815 };
4816
4817 *ret = NULL;
4818
4819 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4820 return 0;
4821
4822 if (dynsymcount <= 0)
4823 return 0;
4824
4825 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4826 if (relsize <= 0)
4827 return -1;
4828
4829 lazy_plt = &elf_x86_64_lazy_plt;
4830 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4831 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4832 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4833 if (ABI_64_P (abfd))
4834 {
4835 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4836 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4837 }
4838 else
4839 {
4840 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4841 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4842 }
4843
4844 count = 0;
4845 for (j = 0; plts[j].name != NULL; j++)
4846 {
4847 plt = bfd_get_section_by_name (abfd, plts[j].name);
4848 if (plt == NULL || plt->size == 0)
4849 continue;
4850
4851 /* Get the PLT section contents. */
4852 if (!bfd_malloc_and_get_section (abfd, plt, &plt_contents))
4853 break;
4854
4855 /* Check what kind of PLT it is. */
4856 plt_type = plt_unknown;
4857 if (plts[j].type == plt_unknown
4858 && (plt->size >= (lazy_plt->plt_entry_size
4859 + lazy_plt->plt_entry_size)))
4860 {
4861 /* Match lazy PLT first. Need to check the first two
4862 instructions. */
4863 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4864 lazy_plt->plt0_got1_offset) == 0)
4865 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4866 2) == 0))
4867 plt_type = plt_lazy;
4868 else if (lazy_bnd_plt != NULL
4869 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4870 lazy_bnd_plt->plt0_got1_offset) == 0)
4871 && (memcmp (plt_contents + 6,
4872 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4873 {
4874 plt_type = plt_lazy | plt_second;
4875 /* The fist entry in the lazy IBT PLT is the same as the
4876 lazy BND PLT. */
4877 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4878 lazy_ibt_plt->plt_entry,
4879 lazy_ibt_plt->plt_got_offset) == 0))
4880 lazy_plt = lazy_ibt_plt;
4881 else
4882 lazy_plt = lazy_bnd_plt;
4883 }
4884 }
4885
4886 if (non_lazy_plt != NULL
4887 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4888 && plt->size >= non_lazy_plt->plt_entry_size)
4889 {
4890 /* Match non-lazy PLT. */
4891 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4892 non_lazy_plt->plt_got_offset) == 0)
4893 plt_type = plt_non_lazy;
4894 }
4895
4896 if (plt_type == plt_unknown || plt_type == plt_second)
4897 {
4898 if (non_lazy_bnd_plt != NULL
4899 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4900 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4901 non_lazy_bnd_plt->plt_got_offset) == 0))
4902 {
4903 /* Match BND PLT. */
4904 plt_type = plt_second;
4905 non_lazy_plt = non_lazy_bnd_plt;
4906 }
4907 else if (non_lazy_ibt_plt != NULL
4908 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4909 && (memcmp (plt_contents,
4910 non_lazy_ibt_plt->plt_entry,
4911 non_lazy_ibt_plt->plt_got_offset) == 0))
4912 {
4913 /* Match IBT PLT. */
4914 plt_type = plt_second;
4915 non_lazy_plt = non_lazy_ibt_plt;
4916 }
4917 }
4918
4919 if (plt_type == plt_unknown)
4920 {
4921 free (plt_contents);
4922 continue;
4923 }
4924
4925 plts[j].sec = plt;
4926 plts[j].type = plt_type;
4927
4928 if ((plt_type & plt_lazy))
4929 {
4930 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4931 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4932 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4933 /* Skip PLT0 in lazy PLT. */
4934 i = 1;
4935 }
4936 else
4937 {
4938 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4939 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4940 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4941 i = 0;
4942 }
4943
4944 /* Skip lazy PLT when the second PLT is used. */
4945 if (plt_type == (plt_lazy | plt_second))
4946 plts[j].count = 0;
4947 else
4948 {
4949 n = plt->size / plts[j].plt_entry_size;
4950 plts[j].count = n;
4951 count += n - i;
4952 }
4953
4954 plts[j].contents = plt_contents;
4955 }
4956
4957 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4958 (bfd_vma) 0, plts, dynsyms,
4959 ret);
4960 }
4961
4962 /* Handle an x86-64 specific section when reading an object file. This
4963 is called when elfcode.h finds a section with an unknown type. */
4964
4965 static bool
4966 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4967 const char *name, int shindex)
4968 {
4969 if (hdr->sh_type != SHT_X86_64_UNWIND)
4970 return false;
4971
4972 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4973 return false;
4974
4975 return true;
4976 }
4977
4978 /* Hook called by the linker routine which adds symbols from an object
4979 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4980 of .bss. */
4981
4982 static bool
4983 elf_x86_64_add_symbol_hook (bfd *abfd,
4984 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4985 Elf_Internal_Sym *sym,
4986 const char **namep ATTRIBUTE_UNUSED,
4987 flagword *flagsp ATTRIBUTE_UNUSED,
4988 asection **secp,
4989 bfd_vma *valp)
4990 {
4991 asection *lcomm;
4992
4993 switch (sym->st_shndx)
4994 {
4995 case SHN_X86_64_LCOMMON:
4996 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4997 if (lcomm == NULL)
4998 {
4999 lcomm = bfd_make_section_with_flags (abfd,
5000 "LARGE_COMMON",
5001 (SEC_ALLOC
5002 | SEC_IS_COMMON
5003 | SEC_LINKER_CREATED));
5004 if (lcomm == NULL)
5005 return false;
5006 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5007 }
5008 *secp = lcomm;
5009 *valp = sym->st_size;
5010 return true;
5011 }
5012
5013 return true;
5014 }
5015
5016
5017 /* Given a BFD section, try to locate the corresponding ELF section
5018 index. */
5019
5020 static bool
5021 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5022 asection *sec, int *index_return)
5023 {
5024 if (sec == &_bfd_elf_large_com_section)
5025 {
5026 *index_return = SHN_X86_64_LCOMMON;
5027 return true;
5028 }
5029 return false;
5030 }
5031
5032 /* Process a symbol. */
5033
5034 static void
5035 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5036 asymbol *asym)
5037 {
5038 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5039
5040 switch (elfsym->internal_elf_sym.st_shndx)
5041 {
5042 case SHN_X86_64_LCOMMON:
5043 asym->section = &_bfd_elf_large_com_section;
5044 asym->value = elfsym->internal_elf_sym.st_size;
5045 /* Common symbol doesn't set BSF_GLOBAL. */
5046 asym->flags &= ~BSF_GLOBAL;
5047 break;
5048 }
5049 }
5050
5051 static bool
5052 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5053 {
5054 return (sym->st_shndx == SHN_COMMON
5055 || sym->st_shndx == SHN_X86_64_LCOMMON);
5056 }
5057
5058 static unsigned int
5059 elf_x86_64_common_section_index (asection *sec)
5060 {
5061 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5062 return SHN_COMMON;
5063 else
5064 return SHN_X86_64_LCOMMON;
5065 }
5066
5067 static asection *
5068 elf_x86_64_common_section (asection *sec)
5069 {
5070 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5071 return bfd_com_section_ptr;
5072 else
5073 return &_bfd_elf_large_com_section;
5074 }
5075
5076 static bool
5077 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5078 const Elf_Internal_Sym *sym,
5079 asection **psec,
5080 bool newdef,
5081 bool olddef,
5082 bfd *oldbfd,
5083 const asection *oldsec)
5084 {
5085 /* A normal common symbol and a large common symbol result in a
5086 normal common symbol. We turn the large common symbol into a
5087 normal one. */
5088 if (!olddef
5089 && h->root.type == bfd_link_hash_common
5090 && !newdef
5091 && bfd_is_com_section (*psec)
5092 && oldsec != *psec)
5093 {
5094 if (sym->st_shndx == SHN_COMMON
5095 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5096 {
5097 h->root.u.c.p->section
5098 = bfd_make_section_old_way (oldbfd, "COMMON");
5099 h->root.u.c.p->section->flags = SEC_ALLOC;
5100 }
5101 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5102 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5103 *psec = bfd_com_section_ptr;
5104 }
5105
5106 return true;
5107 }
5108
5109 static int
5110 elf_x86_64_additional_program_headers (bfd *abfd,
5111 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5112 {
5113 asection *s;
5114 int count = 0;
5115
5116 /* Check to see if we need a large readonly segment. */
5117 s = bfd_get_section_by_name (abfd, ".lrodata");
5118 if (s && (s->flags & SEC_LOAD))
5119 count++;
5120
5121 /* Check to see if we need a large data segment. Since .lbss sections
5122 is placed right after the .bss section, there should be no need for
5123 a large data segment just because of .lbss. */
5124 s = bfd_get_section_by_name (abfd, ".ldata");
5125 if (s && (s->flags & SEC_LOAD))
5126 count++;
5127
5128 return count;
5129 }
5130
5131 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5132
5133 static bool
5134 elf_x86_64_relocs_compatible (const bfd_target *input,
5135 const bfd_target *output)
5136 {
5137 return ((xvec_get_elf_backend_data (input)->s->elfclass
5138 == xvec_get_elf_backend_data (output)->s->elfclass)
5139 && _bfd_elf_relocs_compatible (input, output));
5140 }
5141
5142 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5143 with GNU properties if found. Otherwise, return NULL. */
5144
5145 static bfd *
5146 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5147 {
5148 struct elf_x86_init_table init_table;
5149 const struct elf_backend_data *bed;
5150 struct elf_x86_link_hash_table *htab;
5151
5152 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5153 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5154 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5155 != (int) R_X86_64_GNU_VTINHERIT)
5156 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5157 != (int) R_X86_64_GNU_VTENTRY))
5158 abort ();
5159
5160 /* This is unused for x86-64. */
5161 init_table.plt0_pad_byte = 0x90;
5162
5163 bed = get_elf_backend_data (info->output_bfd);
5164 htab = elf_x86_hash_table (info, bed->target_id);
5165 if (!htab)
5166 abort ();
5167 if (htab->params->bndplt)
5168 {
5169 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5170 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5171 }
5172 else
5173 {
5174 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5175 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5176 }
5177
5178 if (ABI_64_P (info->output_bfd))
5179 {
5180 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5181 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5182 }
5183 else
5184 {
5185 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5186 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5187 }
5188
5189 if (ABI_64_P (info->output_bfd))
5190 {
5191 init_table.r_info = elf64_r_info;
5192 init_table.r_sym = elf64_r_sym;
5193 }
5194 else
5195 {
5196 init_table.r_info = elf32_r_info;
5197 init_table.r_sym = elf32_r_sym;
5198 }
5199
5200 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5201 }
5202
5203 static const struct bfd_elf_special_section
5204 elf_x86_64_special_sections[]=
5205 {
5206 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5207 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5208 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5209 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5210 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5211 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5212 { NULL, 0, 0, 0, 0 }
5213 };
5214
5215 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5216 #define TARGET_LITTLE_NAME "elf64-x86-64"
5217 #define ELF_ARCH bfd_arch_i386
5218 #define ELF_TARGET_ID X86_64_ELF_DATA
5219 #define ELF_MACHINE_CODE EM_X86_64
5220 #if DEFAULT_LD_Z_SEPARATE_CODE
5221 # define ELF_MAXPAGESIZE 0x1000
5222 #else
5223 # define ELF_MAXPAGESIZE 0x200000
5224 #endif
5225 #define ELF_MINPAGESIZE 0x1000
5226 #define ELF_COMMONPAGESIZE 0x1000
5227
5228 #define elf_backend_can_gc_sections 1
5229 #define elf_backend_can_refcount 1
5230 #define elf_backend_want_got_plt 1
5231 #define elf_backend_plt_readonly 1
5232 #define elf_backend_want_plt_sym 0
5233 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5234 #define elf_backend_rela_normal 1
5235 #define elf_backend_plt_alignment 4
5236 #define elf_backend_extern_protected_data 1
5237 #define elf_backend_caches_rawsize 1
5238 #define elf_backend_dtrel_excludes_plt 1
5239 #define elf_backend_want_dynrelro 1
5240
5241 #define elf_info_to_howto elf_x86_64_info_to_howto
5242
5243 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5244 #define bfd_elf64_bfd_reloc_name_lookup \
5245 elf_x86_64_reloc_name_lookup
5246
5247 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5248 #define elf_backend_check_relocs elf_x86_64_check_relocs
5249 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5250 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5251 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5252 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5253 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5254 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5255 #ifdef CORE_HEADER
5256 #define elf_backend_write_core_note elf_x86_64_write_core_note
5257 #endif
5258 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5259 #define elf_backend_relocate_section elf_x86_64_relocate_section
5260 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5261 #define elf_backend_object_p elf64_x86_64_elf_object_p
5262 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5263
5264 #define elf_backend_section_from_shdr \
5265 elf_x86_64_section_from_shdr
5266
5267 #define elf_backend_section_from_bfd_section \
5268 elf_x86_64_elf_section_from_bfd_section
5269 #define elf_backend_add_symbol_hook \
5270 elf_x86_64_add_symbol_hook
5271 #define elf_backend_symbol_processing \
5272 elf_x86_64_symbol_processing
5273 #define elf_backend_common_section_index \
5274 elf_x86_64_common_section_index
5275 #define elf_backend_common_section \
5276 elf_x86_64_common_section
5277 #define elf_backend_common_definition \
5278 elf_x86_64_common_definition
5279 #define elf_backend_merge_symbol \
5280 elf_x86_64_merge_symbol
5281 #define elf_backend_special_sections \
5282 elf_x86_64_special_sections
5283 #define elf_backend_additional_program_headers \
5284 elf_x86_64_additional_program_headers
5285 #define elf_backend_setup_gnu_properties \
5286 elf_x86_64_link_setup_gnu_properties
5287 #define elf_backend_hide_symbol \
5288 _bfd_x86_elf_hide_symbol
5289
5290 #undef elf64_bed
5291 #define elf64_bed elf64_x86_64_bed
5292
5293 #include "elf64-target.h"
5294
5295 /* CloudABI support. */
5296
5297 #undef TARGET_LITTLE_SYM
5298 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5299 #undef TARGET_LITTLE_NAME
5300 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5301
5302 #undef ELF_OSABI
5303 #define ELF_OSABI ELFOSABI_CLOUDABI
5304
5305 #undef elf64_bed
5306 #define elf64_bed elf64_x86_64_cloudabi_bed
5307
5308 #include "elf64-target.h"
5309
5310 /* FreeBSD support. */
5311
5312 #undef TARGET_LITTLE_SYM
5313 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5314 #undef TARGET_LITTLE_NAME
5315 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5316
5317 #undef ELF_OSABI
5318 #define ELF_OSABI ELFOSABI_FREEBSD
5319
5320 #undef elf64_bed
5321 #define elf64_bed elf64_x86_64_fbsd_bed
5322
5323 #include "elf64-target.h"
5324
5325 /* Solaris 2 support. */
5326
5327 #undef TARGET_LITTLE_SYM
5328 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5329 #undef TARGET_LITTLE_NAME
5330 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5331
5332 #undef ELF_TARGET_OS
5333 #define ELF_TARGET_OS is_solaris
5334
5335 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5336 objects won't be recognized. */
5337 #undef ELF_OSABI
5338
5339 #undef elf64_bed
5340 #define elf64_bed elf64_x86_64_sol2_bed
5341
5342 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5343 boundary. */
5344 #undef elf_backend_static_tls_alignment
5345 #define elf_backend_static_tls_alignment 16
5346
5347 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5348
5349 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5350 File, p.63. */
5351 #undef elf_backend_want_plt_sym
5352 #define elf_backend_want_plt_sym 1
5353
5354 #undef elf_backend_strtab_flags
5355 #define elf_backend_strtab_flags SHF_STRINGS
5356
5357 static bool
5358 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5359 bfd *obfd ATTRIBUTE_UNUSED,
5360 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5361 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5362 {
5363 /* PR 19938: FIXME: Need to add code for setting the sh_info
5364 and sh_link fields of Solaris specific section types. */
5365 return false;
5366 }
5367
5368 #undef elf_backend_copy_special_section_fields
5369 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5370
5371 #include "elf64-target.h"
5372
5373 /* Restore defaults. */
5374 #undef ELF_OSABI
5375 #undef elf_backend_static_tls_alignment
5376 #undef elf_backend_want_plt_sym
5377 #define elf_backend_want_plt_sym 0
5378 #undef elf_backend_strtab_flags
5379 #undef elf_backend_copy_special_section_fields
5380
5381 /* Intel L1OM support. */
5382
5383 static bool
5384 elf64_l1om_elf_object_p (bfd *abfd)
5385 {
5386 /* Set the right machine number for an L1OM elf64 file. */
5387 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5388 return true;
5389 }
5390
5391 #undef TARGET_LITTLE_SYM
5392 #define TARGET_LITTLE_SYM l1om_elf64_vec
5393 #undef TARGET_LITTLE_NAME
5394 #define TARGET_LITTLE_NAME "elf64-l1om"
5395 #undef ELF_ARCH
5396 #define ELF_ARCH bfd_arch_l1om
5397
5398 #undef ELF_MACHINE_CODE
5399 #define ELF_MACHINE_CODE EM_L1OM
5400
5401 #undef ELF_OSABI
5402
5403 #undef elf64_bed
5404 #define elf64_bed elf64_l1om_bed
5405
5406 #undef elf_backend_object_p
5407 #define elf_backend_object_p elf64_l1om_elf_object_p
5408
5409 /* Restore defaults. */
5410 #undef ELF_MAXPAGESIZE
5411 #undef ELF_MINPAGESIZE
5412 #undef ELF_COMMONPAGESIZE
5413 #if DEFAULT_LD_Z_SEPARATE_CODE
5414 # define ELF_MAXPAGESIZE 0x1000
5415 #else
5416 # define ELF_MAXPAGESIZE 0x200000
5417 #endif
5418 #define ELF_MINPAGESIZE 0x1000
5419 #define ELF_COMMONPAGESIZE 0x1000
5420 #undef elf_backend_plt_alignment
5421 #define elf_backend_plt_alignment 4
5422 #undef ELF_TARGET_OS
5423
5424 #include "elf64-target.h"
5425
5426 /* FreeBSD L1OM support. */
5427
5428 #undef TARGET_LITTLE_SYM
5429 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5430 #undef TARGET_LITTLE_NAME
5431 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5432
5433 #undef ELF_OSABI
5434 #define ELF_OSABI ELFOSABI_FREEBSD
5435
5436 #undef elf64_bed
5437 #define elf64_bed elf64_l1om_fbsd_bed
5438
5439 #include "elf64-target.h"
5440
5441 /* Intel K1OM support. */
5442
5443 static bool
5444 elf64_k1om_elf_object_p (bfd *abfd)
5445 {
5446 /* Set the right machine number for an K1OM elf64 file. */
5447 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5448 return true;
5449 }
5450
5451 #undef TARGET_LITTLE_SYM
5452 #define TARGET_LITTLE_SYM k1om_elf64_vec
5453 #undef TARGET_LITTLE_NAME
5454 #define TARGET_LITTLE_NAME "elf64-k1om"
5455 #undef ELF_ARCH
5456 #define ELF_ARCH bfd_arch_k1om
5457
5458 #undef ELF_MACHINE_CODE
5459 #define ELF_MACHINE_CODE EM_K1OM
5460
5461 #undef ELF_OSABI
5462
5463 #undef elf64_bed
5464 #define elf64_bed elf64_k1om_bed
5465
5466 #undef elf_backend_object_p
5467 #define elf_backend_object_p elf64_k1om_elf_object_p
5468
5469 #include "elf64-target.h"
5470
5471 /* FreeBSD K1OM support. */
5472
5473 #undef TARGET_LITTLE_SYM
5474 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5475 #undef TARGET_LITTLE_NAME
5476 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5477
5478 #undef ELF_OSABI
5479 #define ELF_OSABI ELFOSABI_FREEBSD
5480
5481 #undef elf64_bed
5482 #define elf64_bed elf64_k1om_fbsd_bed
5483
5484 #include "elf64-target.h"
5485
5486 /* 32bit x86-64 support. */
5487
5488 #undef TARGET_LITTLE_SYM
5489 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5490 #undef TARGET_LITTLE_NAME
5491 #define TARGET_LITTLE_NAME "elf32-x86-64"
5492 #undef elf32_bed
5493 #define elf32_bed elf32_x86_64_bed
5494
5495 #undef ELF_ARCH
5496 #define ELF_ARCH bfd_arch_i386
5497
5498 #undef ELF_MACHINE_CODE
5499 #define ELF_MACHINE_CODE EM_X86_64
5500
5501 #undef ELF_OSABI
5502
5503 #define bfd_elf32_bfd_reloc_type_lookup \
5504 elf_x86_64_reloc_type_lookup
5505 #define bfd_elf32_bfd_reloc_name_lookup \
5506 elf_x86_64_reloc_name_lookup
5507 #define bfd_elf32_get_synthetic_symtab \
5508 elf_x86_64_get_synthetic_symtab
5509
5510 #undef elf_backend_object_p
5511 #define elf_backend_object_p \
5512 elf32_x86_64_elf_object_p
5513
5514 #undef elf_backend_bfd_from_remote_memory
5515 #define elf_backend_bfd_from_remote_memory \
5516 _bfd_elf32_bfd_from_remote_memory
5517
5518 #undef elf_backend_size_info
5519 #define elf_backend_size_info \
5520 _bfd_elf32_size_info
5521
5522 #include "elf32-target.h"