]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf64-x86-64.c
ef0ebdd696702fc5e654e6e7d8f63e8687b4dee7
[thirdparty/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2023 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "dwarf2.h"
24 #include "libiberty.h"
25 #include "sframe.h"
26
27 #include "opcode/i386.h"
28
29 #ifdef CORE_HEADER
30 #include <stdarg.h>
31 #include CORE_HEADER
32 #endif
33
34 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
35 #define MINUS_ONE (~ (bfd_vma) 0)
36
37 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
38 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
39 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
40 since they are the same. */
41
42 /* The relocation "howto" table. Order of fields:
43 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
44 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
45 static reloc_howto_type x86_64_elf_howto_table[] =
46 {
47 HOWTO(R_X86_64_NONE, 0, 0, 0, false, 0, complain_overflow_dont,
48 bfd_elf_generic_reloc, "R_X86_64_NONE", false, 0, 0x00000000,
49 false),
50 HOWTO(R_X86_64_64, 0, 8, 64, false, 0, complain_overflow_dont,
51 bfd_elf_generic_reloc, "R_X86_64_64", false, 0, MINUS_ONE,
52 false),
53 HOWTO(R_X86_64_PC32, 0, 4, 32, true, 0, complain_overflow_signed,
54 bfd_elf_generic_reloc, "R_X86_64_PC32", false, 0, 0xffffffff,
55 true),
56 HOWTO(R_X86_64_GOT32, 0, 4, 32, false, 0, complain_overflow_signed,
57 bfd_elf_generic_reloc, "R_X86_64_GOT32", false, 0, 0xffffffff,
58 false),
59 HOWTO(R_X86_64_PLT32, 0, 4, 32, true, 0, complain_overflow_signed,
60 bfd_elf_generic_reloc, "R_X86_64_PLT32", false, 0, 0xffffffff,
61 true),
62 HOWTO(R_X86_64_COPY, 0, 4, 32, false, 0, complain_overflow_bitfield,
63 bfd_elf_generic_reloc, "R_X86_64_COPY", false, 0, 0xffffffff,
64 false),
65 HOWTO(R_X86_64_GLOB_DAT, 0, 8, 64, false, 0, complain_overflow_dont,
66 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", false, 0, MINUS_ONE,
67 false),
68 HOWTO(R_X86_64_JUMP_SLOT, 0, 8, 64, false, 0, complain_overflow_dont,
69 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", false, 0, MINUS_ONE,
70 false),
71 HOWTO(R_X86_64_RELATIVE, 0, 8, 64, false, 0, complain_overflow_dont,
72 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", false, 0, MINUS_ONE,
73 false),
74 HOWTO(R_X86_64_GOTPCREL, 0, 4, 32, true, 0, complain_overflow_signed,
75 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", false, 0, 0xffffffff,
76 true),
77 HOWTO(R_X86_64_32, 0, 4, 32, false, 0, complain_overflow_unsigned,
78 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
79 false),
80 HOWTO(R_X86_64_32S, 0, 4, 32, false, 0, complain_overflow_signed,
81 bfd_elf_generic_reloc, "R_X86_64_32S", false, 0, 0xffffffff,
82 false),
83 HOWTO(R_X86_64_16, 0, 2, 16, false, 0, complain_overflow_bitfield,
84 bfd_elf_generic_reloc, "R_X86_64_16", false, 0, 0xffff, false),
85 HOWTO(R_X86_64_PC16, 0, 2, 16, true, 0, complain_overflow_bitfield,
86 bfd_elf_generic_reloc, "R_X86_64_PC16", false, 0, 0xffff, true),
87 HOWTO(R_X86_64_8, 0, 1, 8, false, 0, complain_overflow_bitfield,
88 bfd_elf_generic_reloc, "R_X86_64_8", false, 0, 0xff, false),
89 HOWTO(R_X86_64_PC8, 0, 1, 8, true, 0, complain_overflow_signed,
90 bfd_elf_generic_reloc, "R_X86_64_PC8", false, 0, 0xff, true),
91 HOWTO(R_X86_64_DTPMOD64, 0, 8, 64, false, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", false, 0, MINUS_ONE,
93 false),
94 HOWTO(R_X86_64_DTPOFF64, 0, 8, 64, false, 0, complain_overflow_dont,
95 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", false, 0, MINUS_ONE,
96 false),
97 HOWTO(R_X86_64_TPOFF64, 0, 8, 64, false, 0, complain_overflow_dont,
98 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", false, 0, MINUS_ONE,
99 false),
100 HOWTO(R_X86_64_TLSGD, 0, 4, 32, true, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_TLSGD", false, 0, 0xffffffff,
102 true),
103 HOWTO(R_X86_64_TLSLD, 0, 4, 32, true, 0, complain_overflow_signed,
104 bfd_elf_generic_reloc, "R_X86_64_TLSLD", false, 0, 0xffffffff,
105 true),
106 HOWTO(R_X86_64_DTPOFF32, 0, 4, 32, false, 0, complain_overflow_signed,
107 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", false, 0, 0xffffffff,
108 false),
109 HOWTO(R_X86_64_GOTTPOFF, 0, 4, 32, true, 0, complain_overflow_signed,
110 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", false, 0, 0xffffffff,
111 true),
112 HOWTO(R_X86_64_TPOFF32, 0, 4, 32, false, 0, complain_overflow_signed,
113 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", false, 0, 0xffffffff,
114 false),
115 HOWTO(R_X86_64_PC64, 0, 8, 64, true, 0, complain_overflow_dont,
116 bfd_elf_generic_reloc, "R_X86_64_PC64", false, 0, MINUS_ONE,
117 true),
118 HOWTO(R_X86_64_GOTOFF64, 0, 8, 64, false, 0, complain_overflow_dont,
119 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64", false, 0, MINUS_ONE,
120 false),
121 HOWTO(R_X86_64_GOTPC32, 0, 4, 32, true, 0, complain_overflow_signed,
122 bfd_elf_generic_reloc, "R_X86_64_GOTPC32", false, 0, 0xffffffff,
123 true),
124 HOWTO(R_X86_64_GOT64, 0, 8, 64, false, 0, complain_overflow_signed,
125 bfd_elf_generic_reloc, "R_X86_64_GOT64", false, 0, MINUS_ONE,
126 false),
127 HOWTO(R_X86_64_GOTPCREL64, 0, 8, 64, true, 0, complain_overflow_signed,
128 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", false, 0, MINUS_ONE,
129 true),
130 HOWTO(R_X86_64_GOTPC64, 0, 8, 64, true, 0, complain_overflow_signed,
131 bfd_elf_generic_reloc, "R_X86_64_GOTPC64", false, 0, MINUS_ONE,
132 true),
133 HOWTO(R_X86_64_GOTPLT64, 0, 8, 64, false, 0, complain_overflow_signed,
134 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", false, 0, MINUS_ONE,
135 false),
136 HOWTO(R_X86_64_PLTOFF64, 0, 8, 64, false, 0, complain_overflow_signed,
137 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", false, 0, MINUS_ONE,
138 false),
139 HOWTO(R_X86_64_SIZE32, 0, 4, 32, false, 0, complain_overflow_unsigned,
140 bfd_elf_generic_reloc, "R_X86_64_SIZE32", false, 0, 0xffffffff,
141 false),
142 HOWTO(R_X86_64_SIZE64, 0, 8, 64, false, 0, complain_overflow_dont,
143 bfd_elf_generic_reloc, "R_X86_64_SIZE64", false, 0, MINUS_ONE,
144 false),
145 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 4, 32, true, 0,
146 complain_overflow_bitfield, bfd_elf_generic_reloc,
147 "R_X86_64_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
148 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, false, 0,
149 complain_overflow_dont, bfd_elf_generic_reloc,
150 "R_X86_64_TLSDESC_CALL",
151 false, 0, 0, false),
152 HOWTO(R_X86_64_TLSDESC, 0, 8, 64, false, 0,
153 complain_overflow_dont, bfd_elf_generic_reloc,
154 "R_X86_64_TLSDESC", false, 0, MINUS_ONE, false),
155 HOWTO(R_X86_64_IRELATIVE, 0, 8, 64, false, 0, complain_overflow_dont,
156 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", false, 0, MINUS_ONE,
157 false),
158 HOWTO(R_X86_64_RELATIVE64, 0, 8, 64, false, 0, complain_overflow_dont,
159 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", false, 0, MINUS_ONE,
160 false),
161 HOWTO(R_X86_64_PC32_BND, 0, 4, 32, true, 0, complain_overflow_signed,
162 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", false, 0, 0xffffffff,
163 true),
164 HOWTO(R_X86_64_PLT32_BND, 0, 4, 32, true, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", false, 0, 0xffffffff,
166 true),
167 HOWTO(R_X86_64_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", false, 0, 0xffffffff,
169 true),
170 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", false, 0, 0xffffffff,
172 true),
173
174 /* We have a gap in the reloc numbers here.
175 R_X86_64_standard counts the number up to this point, and
176 R_X86_64_vt_offset is the value to subtract from a reloc type of
177 R_X86_64_GNU_VT* to form an index into this table. */
178 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
179 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
180
181 /* GNU extension to record C++ vtable hierarchy. */
182 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 8, 0, false, 0, complain_overflow_dont,
183 NULL, "R_X86_64_GNU_VTINHERIT", false, 0, 0, false),
184
185 /* GNU extension to record C++ vtable member usage. */
186 HOWTO (R_X86_64_GNU_VTENTRY, 0, 8, 0, false, 0, complain_overflow_dont,
187 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", false, 0, 0,
188 false),
189
190 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
191 HOWTO(R_X86_64_32, 0, 4, 32, false, 0, complain_overflow_bitfield,
192 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
193 false)
194 };
195
196 /* Map BFD relocs to the x86_64 elf relocs. */
197 struct elf_reloc_map
198 {
199 bfd_reloc_code_real_type bfd_reloc_val;
200 unsigned char elf_reloc_val;
201 };
202
203 static const struct elf_reloc_map x86_64_reloc_map[] =
204 {
205 { BFD_RELOC_NONE, R_X86_64_NONE, },
206 { BFD_RELOC_64, R_X86_64_64, },
207 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
208 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
209 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
210 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
211 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
212 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
213 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
214 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
215 { BFD_RELOC_32, R_X86_64_32, },
216 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
217 { BFD_RELOC_16, R_X86_64_16, },
218 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
219 { BFD_RELOC_8, R_X86_64_8, },
220 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
221 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
222 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
223 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
224 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
225 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
226 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
227 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
228 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
229 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
230 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
231 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
232 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
233 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
234 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
235 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
236 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
237 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
238 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
239 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
240 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
241 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
242 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
243 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
244 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
245 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
246 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
247 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
248 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
249 };
250
251 static reloc_howto_type *
252 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
253 {
254 unsigned i;
255
256 if (r_type == (unsigned int) R_X86_64_32)
257 {
258 if (ABI_64_P (abfd))
259 i = r_type;
260 else
261 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
262 }
263 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
264 || r_type >= (unsigned int) R_X86_64_max)
265 {
266 if (r_type >= (unsigned int) R_X86_64_standard)
267 {
268 /* xgettext:c-format */
269 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
270 abfd, r_type);
271 bfd_set_error (bfd_error_bad_value);
272 return NULL;
273 }
274 i = r_type;
275 }
276 else
277 i = r_type - (unsigned int) R_X86_64_vt_offset;
278 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
279 return &x86_64_elf_howto_table[i];
280 }
281
282 /* Given a BFD reloc type, return a HOWTO structure. */
283 static reloc_howto_type *
284 elf_x86_64_reloc_type_lookup (bfd *abfd,
285 bfd_reloc_code_real_type code)
286 {
287 unsigned int i;
288
289 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
290 i++)
291 {
292 if (x86_64_reloc_map[i].bfd_reloc_val == code)
293 return elf_x86_64_rtype_to_howto (abfd,
294 x86_64_reloc_map[i].elf_reloc_val);
295 }
296 return NULL;
297 }
298
299 static reloc_howto_type *
300 elf_x86_64_reloc_name_lookup (bfd *abfd,
301 const char *r_name)
302 {
303 unsigned int i;
304
305 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
306 {
307 /* Get x32 R_X86_64_32. */
308 reloc_howto_type *reloc
309 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
310 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
311 return reloc;
312 }
313
314 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
315 if (x86_64_elf_howto_table[i].name != NULL
316 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
317 return &x86_64_elf_howto_table[i];
318
319 return NULL;
320 }
321
322 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
323
324 static bool
325 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
326 Elf_Internal_Rela *dst)
327 {
328 unsigned r_type;
329
330 r_type = ELF32_R_TYPE (dst->r_info);
331 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
332 if (cache_ptr->howto == NULL)
333 return false;
334 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
335 return true;
336 }
337 \f
338 /* Support for core dump NOTE sections. */
339 static bool
340 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
341 {
342 int offset;
343 size_t size;
344
345 switch (note->descsz)
346 {
347 default:
348 return false;
349
350 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
351 /* pr_cursig */
352 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
353
354 /* pr_pid */
355 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
356
357 /* pr_reg */
358 offset = 72;
359 size = 216;
360
361 break;
362
363 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
364 /* pr_cursig */
365 elf_tdata (abfd)->core->signal
366 = bfd_get_16 (abfd, note->descdata + 12);
367
368 /* pr_pid */
369 elf_tdata (abfd)->core->lwpid
370 = bfd_get_32 (abfd, note->descdata + 32);
371
372 /* pr_reg */
373 offset = 112;
374 size = 216;
375
376 break;
377 }
378
379 /* Make a ".reg/999" section. */
380 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
381 size, note->descpos + offset);
382 }
383
384 static bool
385 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
386 {
387 switch (note->descsz)
388 {
389 default:
390 return false;
391
392 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
393 elf_tdata (abfd)->core->pid
394 = bfd_get_32 (abfd, note->descdata + 12);
395 elf_tdata (abfd)->core->program
396 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
397 elf_tdata (abfd)->core->command
398 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
399 break;
400
401 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
402 elf_tdata (abfd)->core->pid
403 = bfd_get_32 (abfd, note->descdata + 24);
404 elf_tdata (abfd)->core->program
405 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
406 elf_tdata (abfd)->core->command
407 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
408 }
409
410 /* Note that for some reason, a spurious space is tacked
411 onto the end of the args in some (at least one anyway)
412 implementations, so strip it off if it exists. */
413
414 {
415 char *command = elf_tdata (abfd)->core->command;
416 int n = strlen (command);
417
418 if (0 < n && command[n - 1] == ' ')
419 command[n - 1] = '\0';
420 }
421
422 return true;
423 }
424
425 #ifdef CORE_HEADER
426 # if GCC_VERSION >= 8000
427 # pragma GCC diagnostic push
428 # pragma GCC diagnostic ignored "-Wstringop-truncation"
429 # endif
430 static char *
431 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
432 int note_type, ...)
433 {
434 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
435 va_list ap;
436 const char *fname, *psargs;
437 long pid;
438 int cursig;
439 const void *gregs;
440
441 switch (note_type)
442 {
443 default:
444 return NULL;
445
446 case NT_PRPSINFO:
447 va_start (ap, note_type);
448 fname = va_arg (ap, const char *);
449 psargs = va_arg (ap, const char *);
450 va_end (ap);
451
452 if (bed->s->elfclass == ELFCLASS32)
453 {
454 prpsinfo32_t data;
455 memset (&data, 0, sizeof (data));
456 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
457 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
458 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
459 &data, sizeof (data));
460 }
461 else
462 {
463 prpsinfo64_t data;
464 memset (&data, 0, sizeof (data));
465 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
466 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
467 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
468 &data, sizeof (data));
469 }
470 /* NOTREACHED */
471
472 case NT_PRSTATUS:
473 va_start (ap, note_type);
474 pid = va_arg (ap, long);
475 cursig = va_arg (ap, int);
476 gregs = va_arg (ap, const void *);
477 va_end (ap);
478
479 if (bed->s->elfclass == ELFCLASS32)
480 {
481 if (bed->elf_machine_code == EM_X86_64)
482 {
483 prstatusx32_t prstat;
484 memset (&prstat, 0, sizeof (prstat));
485 prstat.pr_pid = pid;
486 prstat.pr_cursig = cursig;
487 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
488 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
489 &prstat, sizeof (prstat));
490 }
491 else
492 {
493 prstatus32_t prstat;
494 memset (&prstat, 0, sizeof (prstat));
495 prstat.pr_pid = pid;
496 prstat.pr_cursig = cursig;
497 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
498 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
499 &prstat, sizeof (prstat));
500 }
501 }
502 else
503 {
504 prstatus64_t prstat;
505 memset (&prstat, 0, sizeof (prstat));
506 prstat.pr_pid = pid;
507 prstat.pr_cursig = cursig;
508 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
509 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
510 &prstat, sizeof (prstat));
511 }
512 }
513 /* NOTREACHED */
514 }
515 # if GCC_VERSION >= 8000
516 # pragma GCC diagnostic pop
517 # endif
518 #endif
519 \f
520 /* Functions for the x86-64 ELF linker. */
521
522 /* The size in bytes of an entry in the global offset table. */
523
524 #define GOT_ENTRY_SIZE 8
525
526 /* The size in bytes of an entry in the lazy procedure linkage table. */
527
528 #define LAZY_PLT_ENTRY_SIZE 16
529
530 /* The size in bytes of an entry in the non-lazy procedure linkage
531 table. */
532
533 #define NON_LAZY_PLT_ENTRY_SIZE 8
534
535 /* The first entry in a lazy procedure linkage table looks like this.
536 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
537 works. */
538
539 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
540 {
541 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
542 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
543 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
544 };
545
546 /* Subsequent entries in a lazy procedure linkage table look like this. */
547
548 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
549 {
550 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
551 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
552 0x68, /* pushq immediate */
553 0, 0, 0, 0, /* replaced with index into relocation table. */
554 0xe9, /* jmp relative */
555 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
556 };
557
558 /* The first entry in a lazy procedure linkage table with BND prefix
559 like this. */
560
561 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
562 {
563 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
564 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
565 0x0f, 0x1f, 0 /* nopl (%rax) */
566 };
567
568 /* Subsequent entries for branches with BND prefx in a lazy procedure
569 linkage table look like this. */
570
571 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
572 {
573 0x68, 0, 0, 0, 0, /* pushq immediate */
574 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
575 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
576 };
577
578 /* The first entry in the IBT-enabled lazy procedure linkage table is the
579 the same as the lazy PLT with BND prefix so that bound registers are
580 preserved when control is passed to dynamic linker. Subsequent
581 entries for a IBT-enabled lazy procedure linkage table look like
582 this. */
583
584 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
585 {
586 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
587 0x68, 0, 0, 0, 0, /* pushq immediate */
588 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
589 0x90 /* nop */
590 };
591
592 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
593 is the same as the normal lazy PLT. Subsequent entries for an
594 x32 IBT-enabled lazy procedure linkage table look like this. */
595
596 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
597 {
598 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
599 0x68, 0, 0, 0, 0, /* pushq immediate */
600 0xe9, 0, 0, 0, 0, /* jmpq relative */
601 0x66, 0x90 /* xchg %ax,%ax */
602 };
603
604 /* Entries in the non-lazey procedure linkage table look like this. */
605
606 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
607 {
608 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x66, 0x90 /* xchg %ax,%ax */
611 };
612
613 /* Entries for branches with BND prefix in the non-lazey procedure
614 linkage table look like this. */
615
616 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
617 {
618 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
619 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
620 0x90 /* nop */
621 };
622
623 /* Entries for branches with IBT-enabled in the non-lazey procedure
624 linkage table look like this. They have the same size as the lazy
625 PLT entry. */
626
627 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
628 {
629 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
630 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
631 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
632 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
633 };
634
635 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
636 linkage table look like this. They have the same size as the lazy
637 PLT entry. */
638
639 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
640 {
641 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
642 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
643 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
644 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
645 };
646
647 /* The TLSDESC entry in a lazy procedure linkage table. */
648 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
649 {
650 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
651 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
652 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
653 };
654
655 /* .eh_frame covering the lazy .plt section. */
656
657 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
658 {
659 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
660 0, 0, 0, 0, /* CIE ID */
661 1, /* CIE version */
662 'z', 'R', 0, /* Augmentation string */
663 1, /* Code alignment factor */
664 0x78, /* Data alignment factor */
665 16, /* Return address column */
666 1, /* Augmentation size */
667 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
668 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
669 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
670 DW_CFA_nop, DW_CFA_nop,
671
672 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
673 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
674 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
675 0, 0, 0, 0, /* .plt size goes here */
676 0, /* Augmentation size */
677 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
678 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
679 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
680 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
681 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
682 11, /* Block length */
683 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
684 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
685 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
686 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
687 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
688 };
689
690 /* .eh_frame covering the lazy BND .plt section. */
691
692 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
693 {
694 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
695 0, 0, 0, 0, /* CIE ID */
696 1, /* CIE version */
697 'z', 'R', 0, /* Augmentation string */
698 1, /* Code alignment factor */
699 0x78, /* Data alignment factor */
700 16, /* Return address column */
701 1, /* Augmentation size */
702 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
703 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
704 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
705 DW_CFA_nop, DW_CFA_nop,
706
707 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
708 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
709 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
710 0, 0, 0, 0, /* .plt size goes here */
711 0, /* Augmentation size */
712 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
713 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
714 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
715 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
716 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
717 11, /* Block length */
718 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
719 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
720 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
721 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
722 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
723 };
724
725 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
726
727 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
728 {
729 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
730 0, 0, 0, 0, /* CIE ID */
731 1, /* CIE version */
732 'z', 'R', 0, /* Augmentation string */
733 1, /* Code alignment factor */
734 0x78, /* Data alignment factor */
735 16, /* Return address column */
736 1, /* Augmentation size */
737 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
738 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
739 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
740 DW_CFA_nop, DW_CFA_nop,
741
742 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
743 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
744 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
745 0, 0, 0, 0, /* .plt size goes here */
746 0, /* Augmentation size */
747 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
748 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
749 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
750 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
751 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
752 11, /* Block length */
753 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
754 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
755 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
756 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
757 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
758 };
759
760 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
761
762 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
763 {
764 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
765 0, 0, 0, 0, /* CIE ID */
766 1, /* CIE version */
767 'z', 'R', 0, /* Augmentation string */
768 1, /* Code alignment factor */
769 0x78, /* Data alignment factor */
770 16, /* Return address column */
771 1, /* Augmentation size */
772 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
773 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
774 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
775 DW_CFA_nop, DW_CFA_nop,
776
777 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
778 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
779 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
780 0, 0, 0, 0, /* .plt size goes here */
781 0, /* Augmentation size */
782 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
783 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
784 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
785 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
786 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
787 11, /* Block length */
788 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
789 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
790 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
791 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
792 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
793 };
794
795 /* .eh_frame covering the non-lazy .plt section. */
796
797 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
798 {
799 #define PLT_GOT_FDE_LENGTH 20
800 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
801 0, 0, 0, 0, /* CIE ID */
802 1, /* CIE version */
803 'z', 'R', 0, /* Augmentation string */
804 1, /* Code alignment factor */
805 0x78, /* Data alignment factor */
806 16, /* Return address column */
807 1, /* Augmentation size */
808 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
809 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
810 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
811 DW_CFA_nop, DW_CFA_nop,
812
813 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
814 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
815 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
816 0, 0, 0, 0, /* non-lazy .plt size goes here */
817 0, /* Augmentation size */
818 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
819 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
820 };
821
822 static const sframe_frame_row_entry elf_x86_64_sframe_null_fre =
823 {
824 0,
825 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
826 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
827 };
828
829 /* .sframe FRE covering the .plt section entry. */
830 static const sframe_frame_row_entry elf_x86_64_sframe_plt0_fre1 =
831 {
832 0, /* SFrame FRE start address. */
833 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
834 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
835 };
836
837 /* .sframe FRE covering the .plt section entry. */
838 static const sframe_frame_row_entry elf_x86_64_sframe_plt0_fre2 =
839 {
840 6, /* SFrame FRE start address. */
841 {24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
842 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
843 };
844
845 /* .sframe FRE covering the .plt section entry. */
846 static const sframe_frame_row_entry elf_x86_64_sframe_pltn_fre1 =
847 {
848 0, /* SFrame FRE start address. */
849 {8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
850 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
851 };
852
853 /* .sframe FRE covering the .plt section entry. */
854 static const sframe_frame_row_entry elf_x86_64_sframe_pltn_fre2 =
855 {
856 11, /* SFrame FRE start address. */
857 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
858 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
859 };
860
861 /* .sframe FRE covering the second .plt section entry. */
862 static const sframe_frame_row_entry elf_x86_64_sframe_sec_pltn_fre1 =
863 {
864 0, /* SFrame FRE start address. */
865 {8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
866 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
867 };
868
869 /* SFrame helper object for non-lazy PLT. Also used for IBT enabled PLT. */
870 static const struct elf_x86_sframe_plt elf_x86_64_sframe_non_lazy_plt =
871 {
872 LAZY_PLT_ENTRY_SIZE,
873 2, /* Number of FREs for PLT0. */
874 /* Array of SFrame FREs for plt0. */
875 { &elf_x86_64_sframe_plt0_fre1, &elf_x86_64_sframe_plt0_fre2 },
876 LAZY_PLT_ENTRY_SIZE,
877 1, /* Number of FREs for PLTn. */
878 /* Array of SFrame FREs for plt. */
879 { &elf_x86_64_sframe_sec_pltn_fre1, &elf_x86_64_sframe_null_fre },
880 0,
881 0, /* There is no second PLT necessary. */
882 { &elf_x86_64_sframe_null_fre }
883 };
884
885 /* SFrame helper object for lazy PLT. Also used for IBT enabled PLT. */
886 static const struct elf_x86_sframe_plt elf_x86_64_sframe_plt =
887 {
888 LAZY_PLT_ENTRY_SIZE,
889 2, /* Number of FREs for PLT0. */
890 /* Array of SFrame FREs for plt0. */
891 { &elf_x86_64_sframe_plt0_fre1, &elf_x86_64_sframe_plt0_fre2 },
892 LAZY_PLT_ENTRY_SIZE,
893 2, /* Number of FREs for PLTn. */
894 /* Array of SFrame FREs for plt. */
895 { &elf_x86_64_sframe_pltn_fre1, &elf_x86_64_sframe_pltn_fre2 },
896 NON_LAZY_PLT_ENTRY_SIZE,
897 1, /* Number of FREs for PLTn for second PLT. */
898 /* FREs for second plt (stack trace info for .plt.got is
899 identical). Used when IBT or non-lazy PLT is in effect. */
900 { &elf_x86_64_sframe_sec_pltn_fre1 }
901 };
902
903 /* These are the standard parameters. */
904 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
905 {
906 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
907 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
908 elf_x86_64_lazy_plt_entry, /* plt_entry */
909 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
910 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
911 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
912 6, /* plt_tlsdesc_got1_offset */
913 12, /* plt_tlsdesc_got2_offset */
914 10, /* plt_tlsdesc_got1_insn_end */
915 16, /* plt_tlsdesc_got2_insn_end */
916 2, /* plt0_got1_offset */
917 8, /* plt0_got2_offset */
918 12, /* plt0_got2_insn_end */
919 2, /* plt_got_offset */
920 7, /* plt_reloc_offset */
921 12, /* plt_plt_offset */
922 6, /* plt_got_insn_size */
923 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
924 6, /* plt_lazy_offset */
925 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
926 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
927 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
928 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
929 };
930
931 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
932 {
933 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
934 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
935 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
936 2, /* plt_got_offset */
937 6, /* plt_got_insn_size */
938 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
939 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
940 };
941
942 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
943 {
944 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
946 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
947 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
948 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
949 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
950 6, /* plt_tlsdesc_got1_offset */
951 12, /* plt_tlsdesc_got2_offset */
952 10, /* plt_tlsdesc_got1_insn_end */
953 16, /* plt_tlsdesc_got2_insn_end */
954 2, /* plt0_got1_offset */
955 1+8, /* plt0_got2_offset */
956 1+12, /* plt0_got2_insn_end */
957 1+2, /* plt_got_offset */
958 1, /* plt_reloc_offset */
959 7, /* plt_plt_offset */
960 1+6, /* plt_got_insn_size */
961 11, /* plt_plt_insn_end */
962 0, /* plt_lazy_offset */
963 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
964 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
965 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
966 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
967 };
968
969 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
970 {
971 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
972 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
973 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
974 1+2, /* plt_got_offset */
975 1+6, /* plt_got_insn_size */
976 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
977 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
978 };
979
980 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
981 {
982 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
983 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
984 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
985 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
986 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
987 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
988 6, /* plt_tlsdesc_got1_offset */
989 12, /* plt_tlsdesc_got2_offset */
990 10, /* plt_tlsdesc_got1_insn_end */
991 16, /* plt_tlsdesc_got2_insn_end */
992 2, /* plt0_got1_offset */
993 1+8, /* plt0_got2_offset */
994 1+12, /* plt0_got2_insn_end */
995 4+1+2, /* plt_got_offset */
996 4+1, /* plt_reloc_offset */
997 4+1+6, /* plt_plt_offset */
998 4+1+6, /* plt_got_insn_size */
999 4+1+5+5, /* plt_plt_insn_end */
1000 0, /* plt_lazy_offset */
1001 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
1002 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
1003 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
1004 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
1005 };
1006
1007 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
1008 {
1009 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
1010 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
1011 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
1012 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1013 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
1014 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
1015 6, /* plt_tlsdesc_got1_offset */
1016 12, /* plt_tlsdesc_got2_offset */
1017 10, /* plt_tlsdesc_got1_insn_end */
1018 16, /* plt_tlsdesc_got2_insn_end */
1019 2, /* plt0_got1_offset */
1020 8, /* plt0_got2_offset */
1021 12, /* plt0_got2_insn_end */
1022 4+2, /* plt_got_offset */
1023 4+1, /* plt_reloc_offset */
1024 4+6, /* plt_plt_offset */
1025 4+6, /* plt_got_insn_size */
1026 4+5+5, /* plt_plt_insn_end */
1027 0, /* plt_lazy_offset */
1028 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
1029 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
1030 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
1031 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
1032 };
1033
1034 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
1035 {
1036 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
1037 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
1038 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1039 4+1+2, /* plt_got_offset */
1040 4+1+6, /* plt_got_insn_size */
1041 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
1042 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
1043 };
1044
1045 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
1046 {
1047 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
1048 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
1049 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1050 4+2, /* plt_got_offset */
1051 4+6, /* plt_got_insn_size */
1052 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
1053 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
1054 };
1055
1056 static bool
1057 elf64_x86_64_elf_object_p (bfd *abfd)
1058 {
1059 /* Set the right machine number for an x86-64 elf64 file. */
1060 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1061 return true;
1062 }
1063
1064 static bool
1065 elf32_x86_64_elf_object_p (bfd *abfd)
1066 {
1067 /* Set the right machine number for an x86-64 elf32 file. */
1068 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1069 return true;
1070 }
1071
1072 /* Return TRUE if the TLS access code sequence support transition
1073 from R_TYPE. */
1074
1075 static bool
1076 elf_x86_64_check_tls_transition (bfd *abfd,
1077 struct bfd_link_info *info,
1078 asection *sec,
1079 bfd_byte *contents,
1080 Elf_Internal_Shdr *symtab_hdr,
1081 struct elf_link_hash_entry **sym_hashes,
1082 unsigned int r_type,
1083 const Elf_Internal_Rela *rel,
1084 const Elf_Internal_Rela *relend)
1085 {
1086 unsigned int val;
1087 unsigned long r_symndx;
1088 bool largepic = false;
1089 struct elf_link_hash_entry *h;
1090 bfd_vma offset;
1091 struct elf_x86_link_hash_table *htab;
1092 bfd_byte *call;
1093 bool indirect_call;
1094
1095 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1096 offset = rel->r_offset;
1097 switch (r_type)
1098 {
1099 case R_X86_64_TLSGD:
1100 case R_X86_64_TLSLD:
1101 if ((rel + 1) >= relend)
1102 return false;
1103
1104 if (r_type == R_X86_64_TLSGD)
1105 {
1106 /* Check transition from GD access model. For 64bit, only
1107 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1108 .word 0x6666; rex64; call __tls_get_addr@PLT
1109 or
1110 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1111 .byte 0x66; rex64
1112 call *__tls_get_addr@GOTPCREL(%rip)
1113 which may be converted to
1114 addr32 call __tls_get_addr
1115 can transit to different access model. For 32bit, only
1116 leaq foo@tlsgd(%rip), %rdi
1117 .word 0x6666; rex64; call __tls_get_addr@PLT
1118 or
1119 leaq foo@tlsgd(%rip), %rdi
1120 .byte 0x66; rex64
1121 call *__tls_get_addr@GOTPCREL(%rip)
1122 which may be converted to
1123 addr32 call __tls_get_addr
1124 can transit to different access model. For largepic,
1125 we also support:
1126 leaq foo@tlsgd(%rip), %rdi
1127 movabsq $__tls_get_addr@pltoff, %rax
1128 addq $r15, %rax
1129 call *%rax
1130 or
1131 leaq foo@tlsgd(%rip), %rdi
1132 movabsq $__tls_get_addr@pltoff, %rax
1133 addq $rbx, %rax
1134 call *%rax */
1135
1136 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1137
1138 if ((offset + 12) > sec->size)
1139 return false;
1140
1141 call = contents + offset + 4;
1142 if (call[0] != 0x66
1143 || !((call[1] == 0x48
1144 && call[2] == 0xff
1145 && call[3] == 0x15)
1146 || (call[1] == 0x48
1147 && call[2] == 0x67
1148 && call[3] == 0xe8)
1149 || (call[1] == 0x66
1150 && call[2] == 0x48
1151 && call[3] == 0xe8)))
1152 {
1153 if (!ABI_64_P (abfd)
1154 || (offset + 19) > sec->size
1155 || offset < 3
1156 || memcmp (call - 7, leaq + 1, 3) != 0
1157 || memcmp (call, "\x48\xb8", 2) != 0
1158 || call[11] != 0x01
1159 || call[13] != 0xff
1160 || call[14] != 0xd0
1161 || !((call[10] == 0x48 && call[12] == 0xd8)
1162 || (call[10] == 0x4c && call[12] == 0xf8)))
1163 return false;
1164 largepic = true;
1165 }
1166 else if (ABI_64_P (abfd))
1167 {
1168 if (offset < 4
1169 || memcmp (contents + offset - 4, leaq, 4) != 0)
1170 return false;
1171 }
1172 else
1173 {
1174 if (offset < 3
1175 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1176 return false;
1177 }
1178 indirect_call = call[2] == 0xff;
1179 }
1180 else
1181 {
1182 /* Check transition from LD access model. Only
1183 leaq foo@tlsld(%rip), %rdi;
1184 call __tls_get_addr@PLT
1185 or
1186 leaq foo@tlsld(%rip), %rdi;
1187 call *__tls_get_addr@GOTPCREL(%rip)
1188 which may be converted to
1189 addr32 call __tls_get_addr
1190 can transit to different access model. For largepic
1191 we also support:
1192 leaq foo@tlsld(%rip), %rdi
1193 movabsq $__tls_get_addr@pltoff, %rax
1194 addq $r15, %rax
1195 call *%rax
1196 or
1197 leaq foo@tlsld(%rip), %rdi
1198 movabsq $__tls_get_addr@pltoff, %rax
1199 addq $rbx, %rax
1200 call *%rax */
1201
1202 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1203
1204 if (offset < 3 || (offset + 9) > sec->size)
1205 return false;
1206
1207 if (memcmp (contents + offset - 3, lea, 3) != 0)
1208 return false;
1209
1210 call = contents + offset + 4;
1211 if (!(call[0] == 0xe8
1212 || (call[0] == 0xff && call[1] == 0x15)
1213 || (call[0] == 0x67 && call[1] == 0xe8)))
1214 {
1215 if (!ABI_64_P (abfd)
1216 || (offset + 19) > sec->size
1217 || memcmp (call, "\x48\xb8", 2) != 0
1218 || call[11] != 0x01
1219 || call[13] != 0xff
1220 || call[14] != 0xd0
1221 || !((call[10] == 0x48 && call[12] == 0xd8)
1222 || (call[10] == 0x4c && call[12] == 0xf8)))
1223 return false;
1224 largepic = true;
1225 }
1226 indirect_call = call[0] == 0xff;
1227 }
1228
1229 r_symndx = htab->r_sym (rel[1].r_info);
1230 if (r_symndx < symtab_hdr->sh_info)
1231 return false;
1232
1233 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1234 if (h == NULL
1235 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1236 return false;
1237 else
1238 {
1239 r_type = (ELF32_R_TYPE (rel[1].r_info)
1240 & ~R_X86_64_converted_reloc_bit);
1241 if (largepic)
1242 return r_type == R_X86_64_PLTOFF64;
1243 else if (indirect_call)
1244 return r_type == R_X86_64_GOTPCRELX;
1245 else
1246 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1247 }
1248
1249 case R_X86_64_GOTTPOFF:
1250 /* Check transition from IE access model:
1251 mov foo@gottpoff(%rip), %reg
1252 add foo@gottpoff(%rip), %reg
1253 */
1254
1255 /* Check REX prefix first. */
1256 if (offset >= 3 && (offset + 4) <= sec->size)
1257 {
1258 val = bfd_get_8 (abfd, contents + offset - 3);
1259 if (val != 0x48 && val != 0x4c)
1260 {
1261 /* X32 may have 0x44 REX prefix or no REX prefix. */
1262 if (ABI_64_P (abfd))
1263 return false;
1264 }
1265 }
1266 else
1267 {
1268 /* X32 may not have any REX prefix. */
1269 if (ABI_64_P (abfd))
1270 return false;
1271 if (offset < 2 || (offset + 3) > sec->size)
1272 return false;
1273 }
1274
1275 val = bfd_get_8 (abfd, contents + offset - 2);
1276 if (val != 0x8b && val != 0x03)
1277 return false;
1278
1279 val = bfd_get_8 (abfd, contents + offset - 1);
1280 return (val & 0xc7) == 5;
1281
1282 case R_X86_64_GOTPC32_TLSDESC:
1283 /* Check transition from GDesc access model:
1284 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
1285 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
1286
1287 Make sure it's a leaq adding rip to a 32-bit offset
1288 into any register, although it's probably almost always
1289 going to be rax. */
1290
1291 if (offset < 3 || (offset + 4) > sec->size)
1292 return false;
1293
1294 val = bfd_get_8 (abfd, contents + offset - 3);
1295 val &= 0xfb;
1296 if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40))
1297 return false;
1298
1299 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1300 return false;
1301
1302 val = bfd_get_8 (abfd, contents + offset - 1);
1303 return (val & 0xc7) == 0x05;
1304
1305 case R_X86_64_TLSDESC_CALL:
1306 /* Check transition from GDesc access model:
1307 call *x@tlsdesc(%rax) <--- LP64 mode.
1308 call *x@tlsdesc(%eax) <--- X32 mode.
1309 */
1310 if (offset + 2 <= sec->size)
1311 {
1312 unsigned int prefix;
1313 call = contents + offset;
1314 prefix = 0;
1315 if (!ABI_64_P (abfd))
1316 {
1317 /* Check for call *x@tlsdesc(%eax). */
1318 if (call[0] == 0x67)
1319 {
1320 prefix = 1;
1321 if (offset + 3 > sec->size)
1322 return false;
1323 }
1324 }
1325 /* Make sure that it's a call *x@tlsdesc(%rax). */
1326 return call[prefix] == 0xff && call[1 + prefix] == 0x10;
1327 }
1328
1329 return false;
1330
1331 default:
1332 abort ();
1333 }
1334 }
1335
1336 /* Return TRUE if the TLS access transition is OK or no transition
1337 will be performed. Update R_TYPE if there is a transition. */
1338
1339 static bool
1340 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1341 asection *sec, bfd_byte *contents,
1342 Elf_Internal_Shdr *symtab_hdr,
1343 struct elf_link_hash_entry **sym_hashes,
1344 unsigned int *r_type, int tls_type,
1345 const Elf_Internal_Rela *rel,
1346 const Elf_Internal_Rela *relend,
1347 struct elf_link_hash_entry *h,
1348 unsigned long r_symndx,
1349 bool from_relocate_section)
1350 {
1351 unsigned int from_type = *r_type;
1352 unsigned int to_type = from_type;
1353 bool check = true;
1354
1355 /* Skip TLS transition for functions. */
1356 if (h != NULL
1357 && (h->type == STT_FUNC
1358 || h->type == STT_GNU_IFUNC))
1359 return true;
1360
1361 switch (from_type)
1362 {
1363 case R_X86_64_TLSGD:
1364 case R_X86_64_GOTPC32_TLSDESC:
1365 case R_X86_64_TLSDESC_CALL:
1366 case R_X86_64_GOTTPOFF:
1367 if (bfd_link_executable (info))
1368 {
1369 if (h == NULL)
1370 to_type = R_X86_64_TPOFF32;
1371 else
1372 to_type = R_X86_64_GOTTPOFF;
1373 }
1374
1375 /* When we are called from elf_x86_64_relocate_section, there may
1376 be additional transitions based on TLS_TYPE. */
1377 if (from_relocate_section)
1378 {
1379 unsigned int new_to_type = to_type;
1380
1381 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1382 new_to_type = R_X86_64_TPOFF32;
1383
1384 if (to_type == R_X86_64_TLSGD
1385 || to_type == R_X86_64_GOTPC32_TLSDESC
1386 || to_type == R_X86_64_TLSDESC_CALL)
1387 {
1388 if (tls_type == GOT_TLS_IE)
1389 new_to_type = R_X86_64_GOTTPOFF;
1390 }
1391
1392 /* We checked the transition before when we were called from
1393 elf_x86_64_scan_relocs. We only want to check the new
1394 transition which hasn't been checked before. */
1395 check = new_to_type != to_type && from_type == to_type;
1396 to_type = new_to_type;
1397 }
1398
1399 break;
1400
1401 case R_X86_64_TLSLD:
1402 if (bfd_link_executable (info))
1403 to_type = R_X86_64_TPOFF32;
1404 break;
1405
1406 default:
1407 return true;
1408 }
1409
1410 /* Return TRUE if there is no transition. */
1411 if (from_type == to_type)
1412 return true;
1413
1414 /* Check if the transition can be performed. */
1415 if (check
1416 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1417 symtab_hdr, sym_hashes,
1418 from_type, rel, relend))
1419 {
1420 reloc_howto_type *from, *to;
1421 const char *name;
1422
1423 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1424 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1425
1426 if (from == NULL || to == NULL)
1427 return false;
1428
1429 if (h)
1430 name = h->root.root.string;
1431 else
1432 {
1433 struct elf_x86_link_hash_table *htab;
1434
1435 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1436 if (htab == NULL)
1437 name = "*unknown*";
1438 else
1439 {
1440 Elf_Internal_Sym *isym;
1441
1442 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1443 abfd, r_symndx);
1444 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1445 }
1446 }
1447
1448 _bfd_error_handler
1449 /* xgettext:c-format */
1450 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1451 " in section `%pA' failed"),
1452 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1453 bfd_set_error (bfd_error_bad_value);
1454 return false;
1455 }
1456
1457 *r_type = to_type;
1458 return true;
1459 }
1460
1461 static bool
1462 elf_x86_64_need_pic (struct bfd_link_info *info,
1463 bfd *input_bfd, asection *sec,
1464 struct elf_link_hash_entry *h,
1465 Elf_Internal_Shdr *symtab_hdr,
1466 Elf_Internal_Sym *isym,
1467 reloc_howto_type *howto)
1468 {
1469 const char *v = "";
1470 const char *und = "";
1471 const char *pic = "";
1472 const char *object;
1473
1474 const char *name;
1475 if (h)
1476 {
1477 name = h->root.root.string;
1478 switch (ELF_ST_VISIBILITY (h->other))
1479 {
1480 case STV_HIDDEN:
1481 v = _("hidden symbol ");
1482 break;
1483 case STV_INTERNAL:
1484 v = _("internal symbol ");
1485 break;
1486 case STV_PROTECTED:
1487 v = _("protected symbol ");
1488 break;
1489 default:
1490 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1491 v = _("protected symbol ");
1492 else
1493 v = _("symbol ");
1494 pic = NULL;
1495 break;
1496 }
1497
1498 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1499 und = _("undefined ");
1500 }
1501 else
1502 {
1503 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1504 pic = NULL;
1505 }
1506
1507 if (bfd_link_dll (info))
1508 {
1509 object = _("a shared object");
1510 if (!pic)
1511 pic = _("; recompile with -fPIC");
1512 }
1513 else
1514 {
1515 if (bfd_link_pie (info))
1516 object = _("a PIE object");
1517 else
1518 object = _("a PDE object");
1519 if (!pic)
1520 pic = _("; recompile with -fPIE");
1521 }
1522
1523 /* xgettext:c-format */
1524 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1525 "not be used when making %s%s"),
1526 input_bfd, howto->name, und, v, name,
1527 object, pic);
1528 bfd_set_error (bfd_error_bad_value);
1529 sec->check_relocs_failed = 1;
1530 return false;
1531 }
1532
1533 /* With the local symbol, foo, we convert
1534 mov foo@GOTPCREL(%rip), %reg
1535 to
1536 lea foo(%rip), %reg
1537 and convert
1538 call/jmp *foo@GOTPCREL(%rip)
1539 to
1540 nop call foo/jmp foo nop
1541 When PIC is false, convert
1542 test %reg, foo@GOTPCREL(%rip)
1543 to
1544 test $foo, %reg
1545 and convert
1546 binop foo@GOTPCREL(%rip), %reg
1547 to
1548 binop $foo, %reg
1549 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1550 instructions. */
1551
1552 static bool
1553 elf_x86_64_convert_load_reloc (bfd *abfd,
1554 bfd_byte *contents,
1555 unsigned int *r_type_p,
1556 Elf_Internal_Rela *irel,
1557 struct elf_link_hash_entry *h,
1558 bool *converted,
1559 struct bfd_link_info *link_info)
1560 {
1561 struct elf_x86_link_hash_table *htab;
1562 bool is_pic;
1563 bool no_overflow;
1564 bool relocx;
1565 bool to_reloc_pc32;
1566 bool abs_symbol;
1567 bool local_ref;
1568 asection *tsec;
1569 bfd_signed_vma raddend;
1570 unsigned int opcode;
1571 unsigned int modrm;
1572 unsigned int r_type = *r_type_p;
1573 unsigned int r_symndx;
1574 bfd_vma roff = irel->r_offset;
1575 bfd_vma abs_relocation;
1576
1577 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1578 return true;
1579
1580 raddend = irel->r_addend;
1581 /* Addend for 32-bit PC-relative relocation must be -4. */
1582 if (raddend != -4)
1583 return true;
1584
1585 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1586 is_pic = bfd_link_pic (link_info);
1587
1588 relocx = (r_type == R_X86_64_GOTPCRELX
1589 || r_type == R_X86_64_REX_GOTPCRELX);
1590
1591 /* TRUE if --no-relax is used. */
1592 no_overflow = link_info->disable_target_specific_optimizations > 1;
1593
1594 r_symndx = htab->r_sym (irel->r_info);
1595
1596 opcode = bfd_get_8 (abfd, contents + roff - 2);
1597
1598 /* Convert mov to lea since it has been done for a while. */
1599 if (opcode != 0x8b)
1600 {
1601 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1602 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1603 test, xor instructions. */
1604 if (!relocx)
1605 return true;
1606 }
1607
1608 /* We convert only to R_X86_64_PC32:
1609 1. Branch.
1610 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1611 3. no_overflow is true.
1612 4. PIC.
1613 */
1614 to_reloc_pc32 = (opcode == 0xff
1615 || !relocx
1616 || no_overflow
1617 || is_pic);
1618
1619 abs_symbol = false;
1620 abs_relocation = 0;
1621
1622 /* Get the symbol referred to by the reloc. */
1623 if (h == NULL)
1624 {
1625 Elf_Internal_Sym *isym
1626 = bfd_sym_from_r_symndx (&htab->elf.sym_cache, abfd, r_symndx);
1627
1628 /* Skip relocation against undefined symbols. */
1629 if (isym->st_shndx == SHN_UNDEF)
1630 return true;
1631
1632 local_ref = true;
1633 if (isym->st_shndx == SHN_ABS)
1634 {
1635 tsec = bfd_abs_section_ptr;
1636 abs_symbol = true;
1637 abs_relocation = isym->st_value;
1638 }
1639 else if (isym->st_shndx == SHN_COMMON)
1640 tsec = bfd_com_section_ptr;
1641 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1642 tsec = &_bfd_elf_large_com_section;
1643 else
1644 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1645 }
1646 else
1647 {
1648 /* Undefined weak symbol is only bound locally in executable
1649 and its reference is resolved as 0 without relocation
1650 overflow. We can only perform this optimization for
1651 GOTPCRELX relocations since we need to modify REX byte.
1652 It is OK convert mov with R_X86_64_GOTPCREL to
1653 R_X86_64_PC32. */
1654 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1655
1656 abs_symbol = ABS_SYMBOL_P (h);
1657 abs_relocation = h->root.u.def.value;
1658
1659 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1660 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1661 if ((relocx || opcode == 0x8b)
1662 && (h->root.type == bfd_link_hash_undefweak
1663 && !eh->linker_def
1664 && local_ref))
1665 {
1666 if (opcode == 0xff)
1667 {
1668 /* Skip for branch instructions since R_X86_64_PC32
1669 may overflow. */
1670 if (no_overflow)
1671 return true;
1672 }
1673 else if (relocx)
1674 {
1675 /* For non-branch instructions, we can convert to
1676 R_X86_64_32/R_X86_64_32S since we know if there
1677 is a REX byte. */
1678 to_reloc_pc32 = false;
1679 }
1680
1681 /* Since we don't know the current PC when PIC is true,
1682 we can't convert to R_X86_64_PC32. */
1683 if (to_reloc_pc32 && is_pic)
1684 return true;
1685
1686 goto convert;
1687 }
1688 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1689 ld.so may use its link-time address. */
1690 else if (h->start_stop
1691 || eh->linker_def
1692 || ((h->def_regular
1693 || h->root.type == bfd_link_hash_defined
1694 || h->root.type == bfd_link_hash_defweak)
1695 && h != htab->elf.hdynamic
1696 && local_ref))
1697 {
1698 /* bfd_link_hash_new or bfd_link_hash_undefined is
1699 set by an assignment in a linker script in
1700 bfd_elf_record_link_assignment. start_stop is set
1701 on __start_SECNAME/__stop_SECNAME which mark section
1702 SECNAME. */
1703 if (h->start_stop
1704 || eh->linker_def
1705 || (h->def_regular
1706 && (h->root.type == bfd_link_hash_new
1707 || h->root.type == bfd_link_hash_undefined
1708 || ((h->root.type == bfd_link_hash_defined
1709 || h->root.type == bfd_link_hash_defweak)
1710 && h->root.u.def.section == bfd_und_section_ptr))))
1711 {
1712 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1713 if (no_overflow)
1714 return true;
1715 goto convert;
1716 }
1717 tsec = h->root.u.def.section;
1718 }
1719 else
1720 return true;
1721 }
1722
1723 /* Don't convert GOTPCREL relocation against large section. */
1724 if (elf_section_data (tsec) != NULL
1725 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1726 return true;
1727
1728 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1729 if (no_overflow)
1730 return true;
1731
1732 convert:
1733 if (opcode == 0xff)
1734 {
1735 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1736 unsigned int nop;
1737 unsigned int disp;
1738 bfd_vma nop_offset;
1739
1740 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1741 R_X86_64_PC32. */
1742 modrm = bfd_get_8 (abfd, contents + roff - 1);
1743 if (modrm == 0x25)
1744 {
1745 /* Convert to "jmp foo nop". */
1746 modrm = 0xe9;
1747 nop = NOP_OPCODE;
1748 nop_offset = irel->r_offset + 3;
1749 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1750 irel->r_offset -= 1;
1751 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1752 }
1753 else
1754 {
1755 struct elf_x86_link_hash_entry *eh
1756 = (struct elf_x86_link_hash_entry *) h;
1757
1758 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1759 is a nop prefix. */
1760 modrm = 0xe8;
1761 /* To support TLS optimization, always use addr32 prefix for
1762 "call *__tls_get_addr@GOTPCREL(%rip)". */
1763 if (eh && eh->tls_get_addr)
1764 {
1765 nop = 0x67;
1766 nop_offset = irel->r_offset - 2;
1767 }
1768 else
1769 {
1770 nop = htab->params->call_nop_byte;
1771 if (htab->params->call_nop_as_suffix)
1772 {
1773 nop_offset = irel->r_offset + 3;
1774 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1775 irel->r_offset -= 1;
1776 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1777 }
1778 else
1779 nop_offset = irel->r_offset - 2;
1780 }
1781 }
1782 bfd_put_8 (abfd, nop, contents + nop_offset);
1783 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1784 r_type = R_X86_64_PC32;
1785 }
1786 else
1787 {
1788 unsigned int rex;
1789 unsigned int rex_mask = REX_R;
1790
1791 if (r_type == R_X86_64_REX_GOTPCRELX)
1792 rex = bfd_get_8 (abfd, contents + roff - 3);
1793 else
1794 rex = 0;
1795
1796 if (opcode == 0x8b)
1797 {
1798 if (abs_symbol && local_ref && relocx)
1799 to_reloc_pc32 = false;
1800
1801 if (to_reloc_pc32)
1802 {
1803 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1804 "lea foo(%rip), %reg". */
1805 opcode = 0x8d;
1806 r_type = R_X86_64_PC32;
1807 }
1808 else
1809 {
1810 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1811 "mov $foo, %reg". */
1812 opcode = 0xc7;
1813 modrm = bfd_get_8 (abfd, contents + roff - 1);
1814 modrm = 0xc0 | (modrm & 0x38) >> 3;
1815 if ((rex & REX_W) != 0
1816 && ABI_64_P (link_info->output_bfd))
1817 {
1818 /* Keep the REX_W bit in REX byte for LP64. */
1819 r_type = R_X86_64_32S;
1820 goto rewrite_modrm_rex;
1821 }
1822 else
1823 {
1824 /* If the REX_W bit in REX byte isn't needed,
1825 use R_X86_64_32 and clear the W bit to avoid
1826 sign-extend imm32 to imm64. */
1827 r_type = R_X86_64_32;
1828 /* Clear the W bit in REX byte. */
1829 rex_mask |= REX_W;
1830 goto rewrite_modrm_rex;
1831 }
1832 }
1833 }
1834 else
1835 {
1836 /* R_X86_64_PC32 isn't supported. */
1837 if (to_reloc_pc32)
1838 return true;
1839
1840 modrm = bfd_get_8 (abfd, contents + roff - 1);
1841 if (opcode == 0x85)
1842 {
1843 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1844 "test $foo, %reg". */
1845 modrm = 0xc0 | (modrm & 0x38) >> 3;
1846 opcode = 0xf7;
1847 }
1848 else
1849 {
1850 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1851 "binop $foo, %reg". */
1852 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1853 opcode = 0x81;
1854 }
1855
1856 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1857 overflow when sign-extending imm32 to imm64. */
1858 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1859
1860 rewrite_modrm_rex:
1861 if (abs_relocation)
1862 {
1863 /* Check if R_X86_64_32S/R_X86_64_32 fits. */
1864 if (r_type == R_X86_64_32S)
1865 {
1866 if ((abs_relocation + 0x80000000) > 0xffffffff)
1867 return true;
1868 }
1869 else
1870 {
1871 if (abs_relocation > 0xffffffff)
1872 return true;
1873 }
1874 }
1875
1876 bfd_put_8 (abfd, modrm, contents + roff - 1);
1877
1878 if (rex)
1879 {
1880 /* Move the R bit to the B bit in REX byte. */
1881 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1882 bfd_put_8 (abfd, rex, contents + roff - 3);
1883 }
1884
1885 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1886 irel->r_addend = 0;
1887 }
1888
1889 bfd_put_8 (abfd, opcode, contents + roff - 2);
1890 }
1891
1892 *r_type_p = r_type;
1893 irel->r_info = htab->r_info (r_symndx,
1894 r_type | R_X86_64_converted_reloc_bit);
1895
1896 *converted = true;
1897
1898 return true;
1899 }
1900
1901 /* Look through the relocs for a section during the first phase, and
1902 calculate needed space in the global offset table, and procedure
1903 linkage table. */
1904
1905 static bool
1906 elf_x86_64_scan_relocs (bfd *abfd, struct bfd_link_info *info,
1907 asection *sec,
1908 const Elf_Internal_Rela *relocs)
1909 {
1910 struct elf_x86_link_hash_table *htab;
1911 Elf_Internal_Shdr *symtab_hdr;
1912 struct elf_link_hash_entry **sym_hashes;
1913 const Elf_Internal_Rela *rel;
1914 const Elf_Internal_Rela *rel_end;
1915 bfd_byte *contents;
1916 bool converted;
1917
1918 if (bfd_link_relocatable (info))
1919 return true;
1920
1921 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1922 if (htab == NULL)
1923 {
1924 sec->check_relocs_failed = 1;
1925 return false;
1926 }
1927
1928 BFD_ASSERT (is_x86_elf (abfd, htab));
1929
1930 /* Get the section contents. */
1931 if (elf_section_data (sec)->this_hdr.contents != NULL)
1932 contents = elf_section_data (sec)->this_hdr.contents;
1933 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1934 {
1935 sec->check_relocs_failed = 1;
1936 return false;
1937 }
1938
1939 symtab_hdr = &elf_symtab_hdr (abfd);
1940 sym_hashes = elf_sym_hashes (abfd);
1941
1942 converted = false;
1943
1944 rel_end = relocs + sec->reloc_count;
1945 for (rel = relocs; rel < rel_end; rel++)
1946 {
1947 unsigned int r_type;
1948 unsigned int r_symndx;
1949 struct elf_link_hash_entry *h;
1950 struct elf_x86_link_hash_entry *eh;
1951 Elf_Internal_Sym *isym;
1952 const char *name;
1953 bool size_reloc;
1954 bool converted_reloc;
1955 bool no_dynreloc;
1956
1957 r_symndx = htab->r_sym (rel->r_info);
1958 r_type = ELF32_R_TYPE (rel->r_info);
1959
1960 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1961 {
1962 /* xgettext:c-format */
1963 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1964 abfd, r_symndx);
1965 goto error_return;
1966 }
1967
1968 if (r_symndx < symtab_hdr->sh_info)
1969 {
1970 /* A local symbol. */
1971 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1972 abfd, r_symndx);
1973 if (isym == NULL)
1974 goto error_return;
1975
1976 /* Check relocation against local STT_GNU_IFUNC symbol. */
1977 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1978 {
1979 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1980 true);
1981 if (h == NULL)
1982 goto error_return;
1983
1984 /* Fake a STT_GNU_IFUNC symbol. */
1985 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1986 isym, NULL);
1987 h->type = STT_GNU_IFUNC;
1988 h->def_regular = 1;
1989 h->ref_regular = 1;
1990 h->forced_local = 1;
1991 h->root.type = bfd_link_hash_defined;
1992 }
1993 else
1994 h = NULL;
1995 }
1996 else
1997 {
1998 isym = NULL;
1999 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2000 while (h->root.type == bfd_link_hash_indirect
2001 || h->root.type == bfd_link_hash_warning)
2002 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2003 }
2004
2005 /* Check invalid x32 relocations. */
2006 if (!ABI_64_P (abfd))
2007 switch (r_type)
2008 {
2009 default:
2010 break;
2011
2012 case R_X86_64_DTPOFF64:
2013 case R_X86_64_TPOFF64:
2014 case R_X86_64_PC64:
2015 case R_X86_64_GOTOFF64:
2016 case R_X86_64_GOT64:
2017 case R_X86_64_GOTPCREL64:
2018 case R_X86_64_GOTPC64:
2019 case R_X86_64_GOTPLT64:
2020 case R_X86_64_PLTOFF64:
2021 {
2022 if (h)
2023 name = h->root.root.string;
2024 else
2025 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
2026 NULL);
2027 _bfd_error_handler
2028 /* xgettext:c-format */
2029 (_("%pB: relocation %s against symbol `%s' isn't "
2030 "supported in x32 mode"), abfd,
2031 x86_64_elf_howto_table[r_type].name, name);
2032 bfd_set_error (bfd_error_bad_value);
2033 goto error_return;
2034 }
2035 break;
2036 }
2037
2038 eh = (struct elf_x86_link_hash_entry *) h;
2039
2040 if (h != NULL)
2041 {
2042 /* It is referenced by a non-shared object. */
2043 h->ref_regular = 1;
2044 }
2045
2046 converted_reloc = false;
2047 if ((r_type == R_X86_64_GOTPCREL
2048 || r_type == R_X86_64_GOTPCRELX
2049 || r_type == R_X86_64_REX_GOTPCRELX)
2050 && (h == NULL || h->type != STT_GNU_IFUNC))
2051 {
2052 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
2053 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
2054 irel, h, &converted_reloc,
2055 info))
2056 goto error_return;
2057
2058 if (converted_reloc)
2059 converted = true;
2060 }
2061
2062 if (!_bfd_elf_x86_valid_reloc_p (sec, info, htab, rel, h, isym,
2063 symtab_hdr, &no_dynreloc))
2064 return false;
2065
2066 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2067 symtab_hdr, sym_hashes,
2068 &r_type, GOT_UNKNOWN,
2069 rel, rel_end, h, r_symndx, false))
2070 goto error_return;
2071
2072 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
2073 if (h == htab->elf.hgot)
2074 htab->got_referenced = true;
2075
2076 switch (r_type)
2077 {
2078 case R_X86_64_TLSLD:
2079 htab->tls_ld_or_ldm_got.refcount = 1;
2080 goto create_got;
2081
2082 case R_X86_64_TPOFF32:
2083 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2084 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2085 &x86_64_elf_howto_table[r_type]);
2086 if (eh != NULL)
2087 eh->zero_undefweak &= 0x2;
2088 break;
2089
2090 case R_X86_64_GOTTPOFF:
2091 if (!bfd_link_executable (info))
2092 info->flags |= DF_STATIC_TLS;
2093 /* Fall through */
2094
2095 case R_X86_64_GOT32:
2096 case R_X86_64_GOTPCREL:
2097 case R_X86_64_GOTPCRELX:
2098 case R_X86_64_REX_GOTPCRELX:
2099 case R_X86_64_TLSGD:
2100 case R_X86_64_GOT64:
2101 case R_X86_64_GOTPCREL64:
2102 case R_X86_64_GOTPLT64:
2103 case R_X86_64_GOTPC32_TLSDESC:
2104 case R_X86_64_TLSDESC_CALL:
2105 /* This symbol requires a global offset table entry. */
2106 {
2107 int tls_type, old_tls_type;
2108
2109 switch (r_type)
2110 {
2111 default:
2112 tls_type = GOT_NORMAL;
2113 if (h)
2114 {
2115 if (ABS_SYMBOL_P (h))
2116 tls_type = GOT_ABS;
2117 }
2118 else if (isym->st_shndx == SHN_ABS)
2119 tls_type = GOT_ABS;
2120 break;
2121 case R_X86_64_TLSGD:
2122 tls_type = GOT_TLS_GD;
2123 break;
2124 case R_X86_64_GOTTPOFF:
2125 tls_type = GOT_TLS_IE;
2126 break;
2127 case R_X86_64_GOTPC32_TLSDESC:
2128 case R_X86_64_TLSDESC_CALL:
2129 tls_type = GOT_TLS_GDESC;
2130 break;
2131 }
2132
2133 if (h != NULL)
2134 {
2135 h->got.refcount = 1;
2136 old_tls_type = eh->tls_type;
2137 }
2138 else
2139 {
2140 bfd_signed_vma *local_got_refcounts;
2141
2142 if (!elf_x86_allocate_local_got_info (abfd,
2143 symtab_hdr->sh_info))
2144 goto error_return;
2145
2146 /* This is a global offset table entry for a local symbol. */
2147 local_got_refcounts = elf_local_got_refcounts (abfd);
2148 local_got_refcounts[r_symndx] = 1;
2149 old_tls_type
2150 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2151 }
2152
2153 /* If a TLS symbol is accessed using IE at least once,
2154 there is no point to use dynamic model for it. */
2155 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2156 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2157 || tls_type != GOT_TLS_IE))
2158 {
2159 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2160 tls_type = old_tls_type;
2161 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2162 && GOT_TLS_GD_ANY_P (tls_type))
2163 tls_type |= old_tls_type;
2164 else
2165 {
2166 if (h)
2167 name = h->root.root.string;
2168 else
2169 name = bfd_elf_sym_name (abfd, symtab_hdr,
2170 isym, NULL);
2171 _bfd_error_handler
2172 /* xgettext:c-format */
2173 (_("%pB: '%s' accessed both as normal and"
2174 " thread local symbol"),
2175 abfd, name);
2176 bfd_set_error (bfd_error_bad_value);
2177 goto error_return;
2178 }
2179 }
2180
2181 if (old_tls_type != tls_type)
2182 {
2183 if (eh != NULL)
2184 eh->tls_type = tls_type;
2185 else
2186 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2187 }
2188 }
2189 /* Fall through */
2190
2191 case R_X86_64_GOTOFF64:
2192 case R_X86_64_GOTPC32:
2193 case R_X86_64_GOTPC64:
2194 create_got:
2195 if (eh != NULL)
2196 eh->zero_undefweak &= 0x2;
2197 break;
2198
2199 case R_X86_64_PLT32:
2200 /* This symbol requires a procedure linkage table entry. We
2201 actually build the entry in adjust_dynamic_symbol,
2202 because this might be a case of linking PIC code which is
2203 never referenced by a dynamic object, in which case we
2204 don't need to generate a procedure linkage table entry
2205 after all. */
2206
2207 /* If this is a local symbol, we resolve it directly without
2208 creating a procedure linkage table entry. */
2209 if (h == NULL)
2210 continue;
2211
2212 eh->zero_undefweak &= 0x2;
2213 h->needs_plt = 1;
2214 h->plt.refcount = 1;
2215 break;
2216
2217 case R_X86_64_PLTOFF64:
2218 /* This tries to form the 'address' of a function relative
2219 to GOT. For global symbols we need a PLT entry. */
2220 if (h != NULL)
2221 {
2222 h->needs_plt = 1;
2223 h->plt.refcount = 1;
2224 }
2225 goto create_got;
2226
2227 case R_X86_64_SIZE32:
2228 case R_X86_64_SIZE64:
2229 size_reloc = true;
2230 goto do_size;
2231
2232 case R_X86_64_32:
2233 if (!ABI_64_P (abfd))
2234 goto pointer;
2235 /* Fall through. */
2236 case R_X86_64_8:
2237 case R_X86_64_16:
2238 case R_X86_64_32S:
2239 /* Check relocation overflow as these relocs may lead to
2240 run-time relocation overflow. Don't error out for
2241 sections we don't care about, such as debug sections or
2242 when relocation overflow check is disabled. */
2243 if (!htab->params->no_reloc_overflow_check
2244 && !converted_reloc
2245 && (bfd_link_pic (info)
2246 || (bfd_link_executable (info)
2247 && h != NULL
2248 && !h->def_regular
2249 && h->def_dynamic
2250 && (sec->flags & SEC_READONLY) == 0)))
2251 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2252 &x86_64_elf_howto_table[r_type]);
2253 /* Fall through. */
2254
2255 case R_X86_64_PC8:
2256 case R_X86_64_PC16:
2257 case R_X86_64_PC32:
2258 case R_X86_64_PC64:
2259 case R_X86_64_64:
2260 pointer:
2261 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2262 eh->zero_undefweak |= 0x2;
2263 /* We are called after all symbols have been resolved. Only
2264 relocation against STT_GNU_IFUNC symbol must go through
2265 PLT. */
2266 if (h != NULL
2267 && (bfd_link_executable (info)
2268 || h->type == STT_GNU_IFUNC))
2269 {
2270 bool func_pointer_ref = false;
2271
2272 if (r_type == R_X86_64_PC32)
2273 {
2274 /* Since something like ".long foo - ." may be used
2275 as pointer, make sure that PLT is used if foo is
2276 a function defined in a shared library. */
2277 if ((sec->flags & SEC_CODE) == 0)
2278 {
2279 h->pointer_equality_needed = 1;
2280 if (bfd_link_pie (info)
2281 && h->type == STT_FUNC
2282 && !h->def_regular
2283 && h->def_dynamic)
2284 {
2285 h->needs_plt = 1;
2286 h->plt.refcount = 1;
2287 }
2288 }
2289 }
2290 else if (r_type != R_X86_64_PC64)
2291 {
2292 /* At run-time, R_X86_64_64 can be resolved for both
2293 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2294 can only be resolved for x32. Function pointer
2295 reference doesn't need PLT for pointer equality. */
2296 if ((sec->flags & SEC_READONLY) == 0
2297 && (r_type == R_X86_64_64
2298 || (!ABI_64_P (abfd)
2299 && (r_type == R_X86_64_32
2300 || r_type == R_X86_64_32S))))
2301 func_pointer_ref = true;
2302
2303 /* IFUNC symbol needs pointer equality in PDE so that
2304 function pointer reference will be resolved to its
2305 PLT entry directly. */
2306 if (!func_pointer_ref
2307 || (bfd_link_pde (info)
2308 && h->type == STT_GNU_IFUNC))
2309 h->pointer_equality_needed = 1;
2310 }
2311
2312 if (!func_pointer_ref)
2313 {
2314 /* If this reloc is in a read-only section, we might
2315 need a copy reloc. We can't check reliably at this
2316 stage whether the section is read-only, as input
2317 sections have not yet been mapped to output sections.
2318 Tentatively set the flag for now, and correct in
2319 adjust_dynamic_symbol. */
2320 h->non_got_ref = 1;
2321
2322 if (!elf_has_indirect_extern_access (sec->owner))
2323 eh->non_got_ref_without_indirect_extern_access = 1;
2324
2325 /* We may need a .plt entry if the symbol is a function
2326 defined in a shared lib or is a function referenced
2327 from the code or read-only section. */
2328 if (!h->def_regular
2329 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2330 h->plt.refcount = 1;
2331
2332 if (htab->elf.target_os != is_solaris
2333 && h->pointer_equality_needed
2334 && h->type == STT_FUNC
2335 && eh->def_protected
2336 && !SYMBOL_DEFINED_NON_SHARED_P (h)
2337 && h->def_dynamic)
2338 {
2339 /* Disallow non-canonical reference to canonical
2340 protected function. */
2341 _bfd_error_handler
2342 /* xgettext:c-format */
2343 (_("%pB: non-canonical reference to canonical "
2344 "protected function `%s' in %pB"),
2345 abfd, h->root.root.string,
2346 h->root.u.def.section->owner);
2347 bfd_set_error (bfd_error_bad_value);
2348 goto error_return;
2349 }
2350 }
2351 }
2352
2353 size_reloc = false;
2354 do_size:
2355 if (!no_dynreloc
2356 && NEED_DYNAMIC_RELOCATION_P (true, info, true, h, sec,
2357 r_type,
2358 htab->pointer_r_type))
2359 {
2360 struct elf_dyn_relocs *p;
2361 struct elf_dyn_relocs **head;
2362
2363 /* If this is a global symbol, we count the number of
2364 relocations we need for this symbol. */
2365 if (h != NULL)
2366 head = &h->dyn_relocs;
2367 else
2368 {
2369 /* Track dynamic relocs needed for local syms too.
2370 We really need local syms available to do this
2371 easily. Oh well. */
2372 asection *s;
2373 void **vpp;
2374
2375 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
2376 abfd, r_symndx);
2377 if (isym == NULL)
2378 goto error_return;
2379
2380 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2381 if (s == NULL)
2382 s = sec;
2383
2384 /* Beware of type punned pointers vs strict aliasing
2385 rules. */
2386 vpp = &(elf_section_data (s)->local_dynrel);
2387 head = (struct elf_dyn_relocs **)vpp;
2388 }
2389
2390 p = *head;
2391 if (p == NULL || p->sec != sec)
2392 {
2393 size_t amt = sizeof *p;
2394
2395 p = ((struct elf_dyn_relocs *)
2396 bfd_alloc (htab->elf.dynobj, amt));
2397 if (p == NULL)
2398 goto error_return;
2399 p->next = *head;
2400 *head = p;
2401 p->sec = sec;
2402 p->count = 0;
2403 p->pc_count = 0;
2404 }
2405
2406 p->count += 1;
2407 /* Count size relocation as PC-relative relocation. */
2408 if (X86_PCREL_TYPE_P (true, r_type) || size_reloc)
2409 p->pc_count += 1;
2410 }
2411 break;
2412
2413 /* This relocation describes the C++ object vtable hierarchy.
2414 Reconstruct it for later use during GC. */
2415 case R_X86_64_GNU_VTINHERIT:
2416 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2417 goto error_return;
2418 break;
2419
2420 /* This relocation describes which C++ vtable entries are actually
2421 used. Record for later use during GC. */
2422 case R_X86_64_GNU_VTENTRY:
2423 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2424 goto error_return;
2425 break;
2426
2427 default:
2428 break;
2429 }
2430 }
2431
2432 if (elf_section_data (sec)->this_hdr.contents != contents)
2433 {
2434 if (!converted && !_bfd_link_keep_memory (info))
2435 free (contents);
2436 else
2437 {
2438 /* Cache the section contents for elf_link_input_bfd if any
2439 load is converted or --no-keep-memory isn't used. */
2440 elf_section_data (sec)->this_hdr.contents = contents;
2441 info->cache_size += sec->size;
2442 }
2443 }
2444
2445 /* Cache relocations if any load is converted. */
2446 if (elf_section_data (sec)->relocs != relocs && converted)
2447 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2448
2449 return true;
2450
2451 error_return:
2452 if (elf_section_data (sec)->this_hdr.contents != contents)
2453 free (contents);
2454 sec->check_relocs_failed = 1;
2455 return false;
2456 }
2457
2458 static bool
2459 elf_x86_64_always_size_sections (bfd *output_bfd,
2460 struct bfd_link_info *info)
2461 {
2462 bfd *abfd;
2463
2464 /* Scan relocations after rel_from_abs has been set on __ehdr_start. */
2465 for (abfd = info->input_bfds;
2466 abfd != (bfd *) NULL;
2467 abfd = abfd->link.next)
2468 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour
2469 && !_bfd_elf_link_iterate_on_relocs (abfd, info,
2470 elf_x86_64_scan_relocs))
2471 return false;
2472
2473 return _bfd_x86_elf_always_size_sections (output_bfd, info);
2474 }
2475
2476 /* Return the relocation value for @tpoff relocation
2477 if STT_TLS virtual address is ADDRESS. */
2478
2479 static bfd_vma
2480 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2481 {
2482 struct elf_link_hash_table *htab = elf_hash_table (info);
2483 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2484 bfd_vma static_tls_size;
2485
2486 /* If tls_segment is NULL, we should have signalled an error already. */
2487 if (htab->tls_sec == NULL)
2488 return 0;
2489
2490 /* Consider special static TLS alignment requirements. */
2491 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2492 return address - static_tls_size - htab->tls_sec->vma;
2493 }
2494
2495 /* Relocate an x86_64 ELF section. */
2496
2497 static int
2498 elf_x86_64_relocate_section (bfd *output_bfd,
2499 struct bfd_link_info *info,
2500 bfd *input_bfd,
2501 asection *input_section,
2502 bfd_byte *contents,
2503 Elf_Internal_Rela *relocs,
2504 Elf_Internal_Sym *local_syms,
2505 asection **local_sections)
2506 {
2507 struct elf_x86_link_hash_table *htab;
2508 Elf_Internal_Shdr *symtab_hdr;
2509 struct elf_link_hash_entry **sym_hashes;
2510 bfd_vma *local_got_offsets;
2511 bfd_vma *local_tlsdesc_gotents;
2512 Elf_Internal_Rela *rel;
2513 Elf_Internal_Rela *wrel;
2514 Elf_Internal_Rela *relend;
2515 unsigned int plt_entry_size;
2516 bool status;
2517
2518 /* Skip if check_relocs or scan_relocs failed. */
2519 if (input_section->check_relocs_failed)
2520 return false;
2521
2522 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2523 if (htab == NULL)
2524 return false;
2525
2526 if (!is_x86_elf (input_bfd, htab))
2527 {
2528 bfd_set_error (bfd_error_wrong_format);
2529 return false;
2530 }
2531
2532 plt_entry_size = htab->plt.plt_entry_size;
2533 symtab_hdr = &elf_symtab_hdr (input_bfd);
2534 sym_hashes = elf_sym_hashes (input_bfd);
2535 local_got_offsets = elf_local_got_offsets (input_bfd);
2536 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2537
2538 _bfd_x86_elf_set_tls_module_base (info);
2539
2540 status = true;
2541 rel = wrel = relocs;
2542 relend = relocs + input_section->reloc_count;
2543 for (; rel < relend; wrel++, rel++)
2544 {
2545 unsigned int r_type, r_type_tls;
2546 reloc_howto_type *howto;
2547 unsigned long r_symndx;
2548 struct elf_link_hash_entry *h;
2549 struct elf_x86_link_hash_entry *eh;
2550 Elf_Internal_Sym *sym;
2551 asection *sec;
2552 bfd_vma off, offplt, plt_offset;
2553 bfd_vma relocation;
2554 bool unresolved_reloc;
2555 bfd_reloc_status_type r;
2556 int tls_type;
2557 asection *base_got, *resolved_plt;
2558 bfd_vma st_size;
2559 bool resolved_to_zero;
2560 bool relative_reloc;
2561 bool converted_reloc;
2562 bool need_copy_reloc_in_pie;
2563 bool no_copyreloc_p;
2564
2565 r_type = ELF32_R_TYPE (rel->r_info);
2566 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2567 || r_type == (int) R_X86_64_GNU_VTENTRY)
2568 {
2569 if (wrel != rel)
2570 *wrel = *rel;
2571 continue;
2572 }
2573
2574 r_symndx = htab->r_sym (rel->r_info);
2575 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2576 if (converted_reloc)
2577 {
2578 r_type &= ~R_X86_64_converted_reloc_bit;
2579 rel->r_info = htab->r_info (r_symndx, r_type);
2580 }
2581
2582 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2583 if (howto == NULL)
2584 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2585
2586 h = NULL;
2587 sym = NULL;
2588 sec = NULL;
2589 unresolved_reloc = false;
2590 if (r_symndx < symtab_hdr->sh_info)
2591 {
2592 sym = local_syms + r_symndx;
2593 sec = local_sections[r_symndx];
2594
2595 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2596 &sec, rel);
2597 st_size = sym->st_size;
2598
2599 /* Relocate against local STT_GNU_IFUNC symbol. */
2600 if (!bfd_link_relocatable (info)
2601 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2602 {
2603 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2604 rel, false);
2605 if (h == NULL)
2606 abort ();
2607
2608 /* Set STT_GNU_IFUNC symbol value. */
2609 h->root.u.def.value = sym->st_value;
2610 h->root.u.def.section = sec;
2611 }
2612 }
2613 else
2614 {
2615 bool warned ATTRIBUTE_UNUSED;
2616 bool ignored ATTRIBUTE_UNUSED;
2617
2618 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2619 r_symndx, symtab_hdr, sym_hashes,
2620 h, sec, relocation,
2621 unresolved_reloc, warned, ignored);
2622 st_size = h->size;
2623 }
2624
2625 if (sec != NULL && discarded_section (sec))
2626 {
2627 _bfd_clear_contents (howto, input_bfd, input_section,
2628 contents, rel->r_offset);
2629 wrel->r_offset = rel->r_offset;
2630 wrel->r_info = 0;
2631 wrel->r_addend = 0;
2632
2633 /* For ld -r, remove relocations in debug sections against
2634 sections defined in discarded sections. Not done for
2635 eh_frame editing code expects to be present. */
2636 if (bfd_link_relocatable (info)
2637 && (input_section->flags & SEC_DEBUGGING))
2638 wrel--;
2639
2640 continue;
2641 }
2642
2643 if (bfd_link_relocatable (info))
2644 {
2645 if (wrel != rel)
2646 *wrel = *rel;
2647 continue;
2648 }
2649
2650 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2651 {
2652 if (r_type == R_X86_64_64)
2653 {
2654 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2655 zero-extend it to 64bit if addend is zero. */
2656 r_type = R_X86_64_32;
2657 memset (contents + rel->r_offset + 4, 0, 4);
2658 }
2659 else if (r_type == R_X86_64_SIZE64)
2660 {
2661 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2662 zero-extend it to 64bit if addend is zero. */
2663 r_type = R_X86_64_SIZE32;
2664 memset (contents + rel->r_offset + 4, 0, 4);
2665 }
2666 }
2667
2668 eh = (struct elf_x86_link_hash_entry *) h;
2669
2670 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2671 it here if it is defined in a non-shared object. */
2672 if (h != NULL
2673 && h->type == STT_GNU_IFUNC
2674 && h->def_regular)
2675 {
2676 bfd_vma plt_index;
2677 const char *name;
2678
2679 if ((input_section->flags & SEC_ALLOC) == 0)
2680 {
2681 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2682 STT_GNU_IFUNC symbol as STT_FUNC. */
2683 if (elf_section_type (input_section) == SHT_NOTE)
2684 goto skip_ifunc;
2685 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2686 sections because such sections are not SEC_ALLOC and
2687 thus ld.so will not process them. */
2688 if ((input_section->flags & SEC_DEBUGGING) != 0)
2689 continue;
2690 abort ();
2691 }
2692
2693 switch (r_type)
2694 {
2695 default:
2696 break;
2697
2698 case R_X86_64_GOTPCREL:
2699 case R_X86_64_GOTPCRELX:
2700 case R_X86_64_REX_GOTPCRELX:
2701 case R_X86_64_GOTPCREL64:
2702 base_got = htab->elf.sgot;
2703 off = h->got.offset;
2704
2705 if (base_got == NULL)
2706 abort ();
2707
2708 if (off == (bfd_vma) -1)
2709 {
2710 /* We can't use h->got.offset here to save state, or
2711 even just remember the offset, as finish_dynamic_symbol
2712 would use that as offset into .got. */
2713
2714 if (h->plt.offset == (bfd_vma) -1)
2715 abort ();
2716
2717 if (htab->elf.splt != NULL)
2718 {
2719 plt_index = (h->plt.offset / plt_entry_size
2720 - htab->plt.has_plt0);
2721 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2722 base_got = htab->elf.sgotplt;
2723 }
2724 else
2725 {
2726 plt_index = h->plt.offset / plt_entry_size;
2727 off = plt_index * GOT_ENTRY_SIZE;
2728 base_got = htab->elf.igotplt;
2729 }
2730
2731 if (h->dynindx == -1
2732 || h->forced_local
2733 || info->symbolic)
2734 {
2735 /* This references the local defitionion. We must
2736 initialize this entry in the global offset table.
2737 Since the offset must always be a multiple of 8,
2738 we use the least significant bit to record
2739 whether we have initialized it already.
2740
2741 When doing a dynamic link, we create a .rela.got
2742 relocation entry to initialize the value. This
2743 is done in the finish_dynamic_symbol routine. */
2744 if ((off & 1) != 0)
2745 off &= ~1;
2746 else
2747 {
2748 bfd_put_64 (output_bfd, relocation,
2749 base_got->contents + off);
2750 /* Note that this is harmless for the GOTPLT64
2751 case, as -1 | 1 still is -1. */
2752 h->got.offset |= 1;
2753 }
2754 }
2755 }
2756
2757 relocation = (base_got->output_section->vma
2758 + base_got->output_offset + off);
2759
2760 goto do_relocation;
2761 }
2762
2763 if (h->plt.offset == (bfd_vma) -1)
2764 {
2765 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2766 if (r_type == htab->pointer_r_type
2767 && (input_section->flags & SEC_CODE) == 0)
2768 goto do_ifunc_pointer;
2769 goto bad_ifunc_reloc;
2770 }
2771
2772 /* STT_GNU_IFUNC symbol must go through PLT. */
2773 if (htab->elf.splt != NULL)
2774 {
2775 if (htab->plt_second != NULL)
2776 {
2777 resolved_plt = htab->plt_second;
2778 plt_offset = eh->plt_second.offset;
2779 }
2780 else
2781 {
2782 resolved_plt = htab->elf.splt;
2783 plt_offset = h->plt.offset;
2784 }
2785 }
2786 else
2787 {
2788 resolved_plt = htab->elf.iplt;
2789 plt_offset = h->plt.offset;
2790 }
2791
2792 relocation = (resolved_plt->output_section->vma
2793 + resolved_plt->output_offset + plt_offset);
2794
2795 switch (r_type)
2796 {
2797 default:
2798 bad_ifunc_reloc:
2799 if (h->root.root.string)
2800 name = h->root.root.string;
2801 else
2802 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2803 NULL);
2804 _bfd_error_handler
2805 /* xgettext:c-format */
2806 (_("%pB: relocation %s against STT_GNU_IFUNC "
2807 "symbol `%s' isn't supported"), input_bfd,
2808 howto->name, name);
2809 bfd_set_error (bfd_error_bad_value);
2810 return false;
2811
2812 case R_X86_64_32S:
2813 if (bfd_link_pic (info))
2814 abort ();
2815 goto do_relocation;
2816
2817 case R_X86_64_32:
2818 if (ABI_64_P (output_bfd))
2819 goto do_relocation;
2820 /* FALLTHROUGH */
2821 case R_X86_64_64:
2822 do_ifunc_pointer:
2823 if (rel->r_addend != 0)
2824 {
2825 if (h->root.root.string)
2826 name = h->root.root.string;
2827 else
2828 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2829 sym, NULL);
2830 _bfd_error_handler
2831 /* xgettext:c-format */
2832 (_("%pB: relocation %s against STT_GNU_IFUNC "
2833 "symbol `%s' has non-zero addend: %" PRId64),
2834 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2835 bfd_set_error (bfd_error_bad_value);
2836 return false;
2837 }
2838
2839 /* Generate dynamic relcoation only when there is a
2840 non-GOT reference in a shared object or there is no
2841 PLT. */
2842 if ((bfd_link_pic (info) && h->non_got_ref)
2843 || h->plt.offset == (bfd_vma) -1)
2844 {
2845 Elf_Internal_Rela outrel;
2846 asection *sreloc;
2847
2848 /* Need a dynamic relocation to get the real function
2849 address. */
2850 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2851 info,
2852 input_section,
2853 rel->r_offset);
2854 if (outrel.r_offset == (bfd_vma) -1
2855 || outrel.r_offset == (bfd_vma) -2)
2856 abort ();
2857
2858 outrel.r_offset += (input_section->output_section->vma
2859 + input_section->output_offset);
2860
2861 if (POINTER_LOCAL_IFUNC_P (info, h))
2862 {
2863 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2864 h->root.root.string,
2865 h->root.u.def.section->owner);
2866
2867 /* This symbol is resolved locally. */
2868 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2869 outrel.r_addend = (h->root.u.def.value
2870 + h->root.u.def.section->output_section->vma
2871 + h->root.u.def.section->output_offset);
2872
2873 if (htab->params->report_relative_reloc)
2874 _bfd_x86_elf_link_report_relative_reloc
2875 (info, input_section, h, sym,
2876 "R_X86_64_IRELATIVE", &outrel);
2877 }
2878 else
2879 {
2880 outrel.r_info = htab->r_info (h->dynindx, r_type);
2881 outrel.r_addend = 0;
2882 }
2883
2884 /* Dynamic relocations are stored in
2885 1. .rela.ifunc section in PIC object.
2886 2. .rela.got section in dynamic executable.
2887 3. .rela.iplt section in static executable. */
2888 if (bfd_link_pic (info))
2889 sreloc = htab->elf.irelifunc;
2890 else if (htab->elf.splt != NULL)
2891 sreloc = htab->elf.srelgot;
2892 else
2893 sreloc = htab->elf.irelplt;
2894 elf_append_rela (output_bfd, sreloc, &outrel);
2895
2896 /* If this reloc is against an external symbol, we
2897 do not want to fiddle with the addend. Otherwise,
2898 we need to include the symbol value so that it
2899 becomes an addend for the dynamic reloc. For an
2900 internal symbol, we have updated addend. */
2901 continue;
2902 }
2903 /* FALLTHROUGH */
2904 case R_X86_64_PC32:
2905 case R_X86_64_PC64:
2906 case R_X86_64_PLT32:
2907 goto do_relocation;
2908 }
2909 }
2910
2911 skip_ifunc:
2912 resolved_to_zero = (eh != NULL
2913 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2914
2915 /* When generating a shared object, the relocations handled here are
2916 copied into the output file to be resolved at run time. */
2917 switch (r_type)
2918 {
2919 case R_X86_64_GOT32:
2920 case R_X86_64_GOT64:
2921 /* Relocation is to the entry for this symbol in the global
2922 offset table. */
2923 case R_X86_64_GOTPCREL:
2924 case R_X86_64_GOTPCRELX:
2925 case R_X86_64_REX_GOTPCRELX:
2926 case R_X86_64_GOTPCREL64:
2927 /* Use global offset table entry as symbol value. */
2928 case R_X86_64_GOTPLT64:
2929 /* This is obsolete and treated the same as GOT64. */
2930 base_got = htab->elf.sgot;
2931
2932 if (htab->elf.sgot == NULL)
2933 abort ();
2934
2935 relative_reloc = false;
2936 if (h != NULL)
2937 {
2938 off = h->got.offset;
2939 if (h->needs_plt
2940 && h->plt.offset != (bfd_vma)-1
2941 && off == (bfd_vma)-1)
2942 {
2943 /* We can't use h->got.offset here to save
2944 state, or even just remember the offset, as
2945 finish_dynamic_symbol would use that as offset into
2946 .got. */
2947 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2948 - htab->plt.has_plt0);
2949 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2950 base_got = htab->elf.sgotplt;
2951 }
2952
2953 if (RESOLVED_LOCALLY_P (info, h, htab))
2954 {
2955 /* We must initialize this entry in the global offset
2956 table. Since the offset must always be a multiple
2957 of 8, we use the least significant bit to record
2958 whether we have initialized it already.
2959
2960 When doing a dynamic link, we create a .rela.got
2961 relocation entry to initialize the value. This is
2962 done in the finish_dynamic_symbol routine. */
2963 if ((off & 1) != 0)
2964 off &= ~1;
2965 else
2966 {
2967 bfd_put_64 (output_bfd, relocation,
2968 base_got->contents + off);
2969 /* Note that this is harmless for the GOTPLT64 case,
2970 as -1 | 1 still is -1. */
2971 h->got.offset |= 1;
2972
2973 /* NB: Don't generate relative relocation here if
2974 it has been generated by DT_RELR. */
2975 if (!info->enable_dt_relr
2976 && GENERATE_RELATIVE_RELOC_P (info, h))
2977 {
2978 /* If this symbol isn't dynamic in PIC,
2979 generate R_X86_64_RELATIVE here. */
2980 eh->no_finish_dynamic_symbol = 1;
2981 relative_reloc = true;
2982 }
2983 }
2984 }
2985 else
2986 unresolved_reloc = false;
2987 }
2988 else
2989 {
2990 if (local_got_offsets == NULL)
2991 abort ();
2992
2993 off = local_got_offsets[r_symndx];
2994
2995 /* The offset must always be a multiple of 8. We use
2996 the least significant bit to record whether we have
2997 already generated the necessary reloc. */
2998 if ((off & 1) != 0)
2999 off &= ~1;
3000 else
3001 {
3002 bfd_put_64 (output_bfd, relocation,
3003 base_got->contents + off);
3004 local_got_offsets[r_symndx] |= 1;
3005
3006 /* NB: GOTPCREL relocations against local absolute
3007 symbol store relocation value in the GOT slot
3008 without relative relocation. Don't generate
3009 relative relocation here if it has been generated
3010 by DT_RELR. */
3011 if (!info->enable_dt_relr
3012 && bfd_link_pic (info)
3013 && !(sym->st_shndx == SHN_ABS
3014 && (r_type == R_X86_64_GOTPCREL
3015 || r_type == R_X86_64_GOTPCRELX
3016 || r_type == R_X86_64_REX_GOTPCRELX)))
3017 relative_reloc = true;
3018 }
3019 }
3020
3021 if (relative_reloc)
3022 {
3023 asection *s;
3024 Elf_Internal_Rela outrel;
3025
3026 /* We need to generate a R_X86_64_RELATIVE reloc
3027 for the dynamic linker. */
3028 s = htab->elf.srelgot;
3029 if (s == NULL)
3030 abort ();
3031
3032 outrel.r_offset = (base_got->output_section->vma
3033 + base_got->output_offset
3034 + off);
3035 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3036 outrel.r_addend = relocation;
3037
3038 if (htab->params->report_relative_reloc)
3039 _bfd_x86_elf_link_report_relative_reloc
3040 (info, input_section, h, sym, "R_X86_64_RELATIVE",
3041 &outrel);
3042
3043 elf_append_rela (output_bfd, s, &outrel);
3044 }
3045
3046 if (off >= (bfd_vma) -2)
3047 abort ();
3048
3049 relocation = base_got->output_section->vma
3050 + base_got->output_offset + off;
3051 if (r_type != R_X86_64_GOTPCREL
3052 && r_type != R_X86_64_GOTPCRELX
3053 && r_type != R_X86_64_REX_GOTPCRELX
3054 && r_type != R_X86_64_GOTPCREL64)
3055 relocation -= htab->elf.sgotplt->output_section->vma
3056 - htab->elf.sgotplt->output_offset;
3057
3058 break;
3059
3060 case R_X86_64_GOTOFF64:
3061 /* Relocation is relative to the start of the global offset
3062 table. */
3063
3064 /* Check to make sure it isn't a protected function or data
3065 symbol for shared library since it may not be local when
3066 used as function address or with copy relocation. We also
3067 need to make sure that a symbol is referenced locally. */
3068 if (bfd_link_pic (info) && h)
3069 {
3070 if (!h->def_regular)
3071 {
3072 const char *v;
3073
3074 switch (ELF_ST_VISIBILITY (h->other))
3075 {
3076 case STV_HIDDEN:
3077 v = _("hidden symbol");
3078 break;
3079 case STV_INTERNAL:
3080 v = _("internal symbol");
3081 break;
3082 case STV_PROTECTED:
3083 v = _("protected symbol");
3084 break;
3085 default:
3086 v = _("symbol");
3087 break;
3088 }
3089
3090 _bfd_error_handler
3091 /* xgettext:c-format */
3092 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3093 " `%s' can not be used when making a shared object"),
3094 input_bfd, v, h->root.root.string);
3095 bfd_set_error (bfd_error_bad_value);
3096 return false;
3097 }
3098 else if (!bfd_link_executable (info)
3099 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3100 && (h->type == STT_FUNC
3101 || h->type == STT_OBJECT)
3102 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3103 {
3104 _bfd_error_handler
3105 /* xgettext:c-format */
3106 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3107 " `%s' can not be used when making a shared object"),
3108 input_bfd,
3109 h->type == STT_FUNC ? "function" : "data",
3110 h->root.root.string);
3111 bfd_set_error (bfd_error_bad_value);
3112 return false;
3113 }
3114 }
3115
3116 /* Note that sgot is not involved in this
3117 calculation. We always want the start of .got.plt. If we
3118 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3119 permitted by the ABI, we might have to change this
3120 calculation. */
3121 relocation -= htab->elf.sgotplt->output_section->vma
3122 + htab->elf.sgotplt->output_offset;
3123 break;
3124
3125 case R_X86_64_GOTPC32:
3126 case R_X86_64_GOTPC64:
3127 /* Use global offset table as symbol value. */
3128 relocation = htab->elf.sgotplt->output_section->vma
3129 + htab->elf.sgotplt->output_offset;
3130 unresolved_reloc = false;
3131 break;
3132
3133 case R_X86_64_PLTOFF64:
3134 /* Relocation is PLT entry relative to GOT. For local
3135 symbols it's the symbol itself relative to GOT. */
3136 if (h != NULL
3137 /* See PLT32 handling. */
3138 && (h->plt.offset != (bfd_vma) -1
3139 || eh->plt_got.offset != (bfd_vma) -1)
3140 && htab->elf.splt != NULL)
3141 {
3142 if (eh->plt_got.offset != (bfd_vma) -1)
3143 {
3144 /* Use the GOT PLT. */
3145 resolved_plt = htab->plt_got;
3146 plt_offset = eh->plt_got.offset;
3147 }
3148 else if (htab->plt_second != NULL)
3149 {
3150 resolved_plt = htab->plt_second;
3151 plt_offset = eh->plt_second.offset;
3152 }
3153 else
3154 {
3155 resolved_plt = htab->elf.splt;
3156 plt_offset = h->plt.offset;
3157 }
3158
3159 relocation = (resolved_plt->output_section->vma
3160 + resolved_plt->output_offset
3161 + plt_offset);
3162 unresolved_reloc = false;
3163 }
3164
3165 relocation -= htab->elf.sgotplt->output_section->vma
3166 + htab->elf.sgotplt->output_offset;
3167 break;
3168
3169 case R_X86_64_PLT32:
3170 /* Relocation is to the entry for this symbol in the
3171 procedure linkage table. */
3172
3173 /* Resolve a PLT32 reloc against a local symbol directly,
3174 without using the procedure linkage table. */
3175 if (h == NULL)
3176 break;
3177
3178 if ((h->plt.offset == (bfd_vma) -1
3179 && eh->plt_got.offset == (bfd_vma) -1)
3180 || htab->elf.splt == NULL)
3181 {
3182 /* We didn't make a PLT entry for this symbol. This
3183 happens when statically linking PIC code, or when
3184 using -Bsymbolic. */
3185 break;
3186 }
3187
3188 use_plt:
3189 if (h->plt.offset != (bfd_vma) -1)
3190 {
3191 if (htab->plt_second != NULL)
3192 {
3193 resolved_plt = htab->plt_second;
3194 plt_offset = eh->plt_second.offset;
3195 }
3196 else
3197 {
3198 resolved_plt = htab->elf.splt;
3199 plt_offset = h->plt.offset;
3200 }
3201 }
3202 else
3203 {
3204 /* Use the GOT PLT. */
3205 resolved_plt = htab->plt_got;
3206 plt_offset = eh->plt_got.offset;
3207 }
3208
3209 relocation = (resolved_plt->output_section->vma
3210 + resolved_plt->output_offset
3211 + plt_offset);
3212 unresolved_reloc = false;
3213 break;
3214
3215 case R_X86_64_SIZE32:
3216 case R_X86_64_SIZE64:
3217 /* Set to symbol size. */
3218 relocation = st_size;
3219 goto direct;
3220
3221 case R_X86_64_PC8:
3222 case R_X86_64_PC16:
3223 case R_X86_64_PC32:
3224 /* Don't complain about -fPIC if the symbol is undefined when
3225 building executable unless it is unresolved weak symbol,
3226 references a dynamic definition in PIE or -z nocopyreloc
3227 is used. */
3228 no_copyreloc_p
3229 = (info->nocopyreloc
3230 || (h != NULL
3231 && !h->root.linker_def
3232 && !h->root.ldscript_def
3233 && eh->def_protected));
3234
3235 if ((input_section->flags & SEC_ALLOC) != 0
3236 && (input_section->flags & SEC_READONLY) != 0
3237 && h != NULL
3238 && ((bfd_link_executable (info)
3239 && ((h->root.type == bfd_link_hash_undefweak
3240 && (eh == NULL
3241 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3242 eh)))
3243 || (bfd_link_pie (info)
3244 && !SYMBOL_DEFINED_NON_SHARED_P (h)
3245 && h->def_dynamic)
3246 || (no_copyreloc_p
3247 && h->def_dynamic
3248 && !(h->root.u.def.section->flags & SEC_CODE))))
3249 || (bfd_link_pie (info)
3250 && h->root.type == bfd_link_hash_undefweak)
3251 || bfd_link_dll (info)))
3252 {
3253 bool fail = false;
3254 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3255 {
3256 /* Symbol is referenced locally. Make sure it is
3257 defined locally. */
3258 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
3259 }
3260 else if (bfd_link_pie (info))
3261 {
3262 /* We can only use PC-relative relocations in PIE
3263 from non-code sections. */
3264 if (h->root.type == bfd_link_hash_undefweak
3265 || (h->type == STT_FUNC
3266 && (sec->flags & SEC_CODE) != 0))
3267 fail = true;
3268 }
3269 else if (no_copyreloc_p || bfd_link_dll (info))
3270 {
3271 /* Symbol doesn't need copy reloc and isn't
3272 referenced locally. Don't allow PC-relative
3273 relocations against default and protected
3274 symbols since address of protected function
3275 and location of protected data may not be in
3276 the shared object. */
3277 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3278 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
3279 }
3280
3281 if (fail)
3282 return elf_x86_64_need_pic (info, input_bfd, input_section,
3283 h, NULL, NULL, howto);
3284 }
3285 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3286 as function address. */
3287 else if (h != NULL
3288 && (input_section->flags & SEC_CODE) == 0
3289 && bfd_link_pie (info)
3290 && h->type == STT_FUNC
3291 && !h->def_regular
3292 && h->def_dynamic)
3293 goto use_plt;
3294 /* Fall through. */
3295
3296 case R_X86_64_8:
3297 case R_X86_64_16:
3298 case R_X86_64_32:
3299 case R_X86_64_PC64:
3300 case R_X86_64_64:
3301 /* FIXME: The ABI says the linker should make sure the value is
3302 the same when it's zeroextended to 64 bit. */
3303
3304 direct:
3305 if ((input_section->flags & SEC_ALLOC) == 0)
3306 break;
3307
3308 need_copy_reloc_in_pie = (bfd_link_pie (info)
3309 && h != NULL
3310 && (h->needs_copy
3311 || eh->needs_copy
3312 || (h->root.type
3313 == bfd_link_hash_undefined))
3314 && (X86_PCREL_TYPE_P (true, r_type)
3315 || X86_SIZE_TYPE_P (true,
3316 r_type)));
3317
3318 if (GENERATE_DYNAMIC_RELOCATION_P (true, info, eh, r_type, sec,
3319 need_copy_reloc_in_pie,
3320 resolved_to_zero, false))
3321 {
3322 Elf_Internal_Rela outrel;
3323 bool skip, relocate;
3324 bool generate_dynamic_reloc = true;
3325 asection *sreloc;
3326 const char *relative_reloc_name = NULL;
3327
3328 /* When generating a shared object, these relocations
3329 are copied into the output file to be resolved at run
3330 time. */
3331 skip = false;
3332 relocate = false;
3333
3334 outrel.r_offset =
3335 _bfd_elf_section_offset (output_bfd, info, input_section,
3336 rel->r_offset);
3337 if (outrel.r_offset == (bfd_vma) -1)
3338 skip = true;
3339 else if (outrel.r_offset == (bfd_vma) -2)
3340 skip = true, relocate = true;
3341
3342 outrel.r_offset += (input_section->output_section->vma
3343 + input_section->output_offset);
3344
3345 if (skip)
3346 memset (&outrel, 0, sizeof outrel);
3347
3348 else if (COPY_INPUT_RELOC_P (true, info, h, r_type))
3349 {
3350 outrel.r_info = htab->r_info (h->dynindx, r_type);
3351 outrel.r_addend = rel->r_addend;
3352 }
3353 else
3354 {
3355 /* This symbol is local, or marked to become local.
3356 When relocation overflow check is disabled, we
3357 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3358 if (r_type == htab->pointer_r_type
3359 || (r_type == R_X86_64_32
3360 && htab->params->no_reloc_overflow_check))
3361 {
3362 relocate = true;
3363 /* NB: Don't generate relative relocation here if
3364 it has been generated by DT_RELR. */
3365 if (info->enable_dt_relr)
3366 generate_dynamic_reloc = false;
3367 else
3368 {
3369 outrel.r_info =
3370 htab->r_info (0, R_X86_64_RELATIVE);
3371 outrel.r_addend = relocation + rel->r_addend;
3372 relative_reloc_name = "R_X86_64_RELATIVE";
3373 }
3374 }
3375 else if (r_type == R_X86_64_64
3376 && !ABI_64_P (output_bfd))
3377 {
3378 relocate = true;
3379 outrel.r_info = htab->r_info (0,
3380 R_X86_64_RELATIVE64);
3381 outrel.r_addend = relocation + rel->r_addend;
3382 relative_reloc_name = "R_X86_64_RELATIVE64";
3383 /* Check addend overflow. */
3384 if ((outrel.r_addend & 0x80000000)
3385 != (rel->r_addend & 0x80000000))
3386 {
3387 const char *name;
3388 int addend = rel->r_addend;
3389 if (h && h->root.root.string)
3390 name = h->root.root.string;
3391 else
3392 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3393 sym, NULL);
3394 _bfd_error_handler
3395 /* xgettext:c-format */
3396 (_("%pB: addend %s%#x in relocation %s against "
3397 "symbol `%s' at %#" PRIx64
3398 " in section `%pA' is out of range"),
3399 input_bfd, addend < 0 ? "-" : "", addend,
3400 howto->name, name, (uint64_t) rel->r_offset,
3401 input_section);
3402 bfd_set_error (bfd_error_bad_value);
3403 return false;
3404 }
3405 }
3406 else
3407 {
3408 long sindx;
3409
3410 if (bfd_is_abs_section (sec))
3411 sindx = 0;
3412 else if (sec == NULL || sec->owner == NULL)
3413 {
3414 bfd_set_error (bfd_error_bad_value);
3415 return false;
3416 }
3417 else
3418 {
3419 asection *osec;
3420
3421 /* We are turning this relocation into one
3422 against a section symbol. It would be
3423 proper to subtract the symbol's value,
3424 osec->vma, from the emitted reloc addend,
3425 but ld.so expects buggy relocs. */
3426 osec = sec->output_section;
3427 sindx = elf_section_data (osec)->dynindx;
3428 if (sindx == 0)
3429 {
3430 asection *oi = htab->elf.text_index_section;
3431 sindx = elf_section_data (oi)->dynindx;
3432 }
3433 BFD_ASSERT (sindx != 0);
3434 }
3435
3436 outrel.r_info = htab->r_info (sindx, r_type);
3437 outrel.r_addend = relocation + rel->r_addend;
3438 }
3439 }
3440
3441 if (generate_dynamic_reloc)
3442 {
3443 sreloc = elf_section_data (input_section)->sreloc;
3444
3445 if (sreloc == NULL || sreloc->contents == NULL)
3446 {
3447 r = bfd_reloc_notsupported;
3448 goto check_relocation_error;
3449 }
3450
3451 if (relative_reloc_name
3452 && htab->params->report_relative_reloc)
3453 _bfd_x86_elf_link_report_relative_reloc
3454 (info, input_section, h, sym,
3455 relative_reloc_name, &outrel);
3456
3457 elf_append_rela (output_bfd, sreloc, &outrel);
3458 }
3459
3460 /* If this reloc is against an external symbol, we do
3461 not want to fiddle with the addend. Otherwise, we
3462 need to include the symbol value so that it becomes
3463 an addend for the dynamic reloc. */
3464 if (! relocate)
3465 continue;
3466 }
3467
3468 break;
3469
3470 case R_X86_64_TLSGD:
3471 case R_X86_64_GOTPC32_TLSDESC:
3472 case R_X86_64_TLSDESC_CALL:
3473 case R_X86_64_GOTTPOFF:
3474 tls_type = GOT_UNKNOWN;
3475 if (h == NULL && local_got_offsets)
3476 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3477 else if (h != NULL)
3478 tls_type = elf_x86_hash_entry (h)->tls_type;
3479
3480 r_type_tls = r_type;
3481 if (! elf_x86_64_tls_transition (info, input_bfd,
3482 input_section, contents,
3483 symtab_hdr, sym_hashes,
3484 &r_type_tls, tls_type, rel,
3485 relend, h, r_symndx, true))
3486 return false;
3487
3488 if (r_type_tls == R_X86_64_TPOFF32)
3489 {
3490 bfd_vma roff = rel->r_offset;
3491
3492 BFD_ASSERT (! unresolved_reloc);
3493
3494 if (r_type == R_X86_64_TLSGD)
3495 {
3496 /* GD->LE transition. For 64bit, change
3497 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3498 .word 0x6666; rex64; call __tls_get_addr@PLT
3499 or
3500 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3501 .byte 0x66; rex64
3502 call *__tls_get_addr@GOTPCREL(%rip)
3503 which may be converted to
3504 addr32 call __tls_get_addr
3505 into:
3506 movq %fs:0, %rax
3507 leaq foo@tpoff(%rax), %rax
3508 For 32bit, change
3509 leaq foo@tlsgd(%rip), %rdi
3510 .word 0x6666; rex64; call __tls_get_addr@PLT
3511 or
3512 leaq foo@tlsgd(%rip), %rdi
3513 .byte 0x66; rex64
3514 call *__tls_get_addr@GOTPCREL(%rip)
3515 which may be converted to
3516 addr32 call __tls_get_addr
3517 into:
3518 movl %fs:0, %eax
3519 leaq foo@tpoff(%rax), %rax
3520 For largepic, change:
3521 leaq foo@tlsgd(%rip), %rdi
3522 movabsq $__tls_get_addr@pltoff, %rax
3523 addq %r15, %rax
3524 call *%rax
3525 into:
3526 movq %fs:0, %rax
3527 leaq foo@tpoff(%rax), %rax
3528 nopw 0x0(%rax,%rax,1) */
3529 int largepic = 0;
3530 if (ABI_64_P (output_bfd))
3531 {
3532 if (contents[roff + 5] == 0xb8)
3533 {
3534 if (roff < 3
3535 || (roff - 3 + 22) > input_section->size)
3536 {
3537 corrupt_input:
3538 info->callbacks->einfo
3539 (_("%F%P: corrupt input: %pB\n"),
3540 input_bfd);
3541 return false;
3542 }
3543 memcpy (contents + roff - 3,
3544 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3545 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3546 largepic = 1;
3547 }
3548 else
3549 {
3550 if (roff < 4
3551 || (roff - 4 + 16) > input_section->size)
3552 goto corrupt_input;
3553 memcpy (contents + roff - 4,
3554 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3555 16);
3556 }
3557 }
3558 else
3559 {
3560 if (roff < 3
3561 || (roff - 3 + 15) > input_section->size)
3562 goto corrupt_input;
3563 memcpy (contents + roff - 3,
3564 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3565 15);
3566 }
3567 bfd_put_32 (output_bfd,
3568 elf_x86_64_tpoff (info, relocation),
3569 contents + roff + 8 + largepic);
3570 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3571 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3572 rel++;
3573 wrel++;
3574 continue;
3575 }
3576 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3577 {
3578 /* GDesc -> LE transition.
3579 It's originally something like:
3580 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3581 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3582
3583 Change it to:
3584 movq $x@tpoff, %rax <--- LP64 mode.
3585 rex movl $x@tpoff, %eax <--- X32 mode.
3586 */
3587
3588 unsigned int val, type;
3589
3590 if (roff < 3)
3591 goto corrupt_input;
3592 type = bfd_get_8 (input_bfd, contents + roff - 3);
3593 val = bfd_get_8 (input_bfd, contents + roff - 1);
3594 bfd_put_8 (output_bfd,
3595 (type & 0x48) | ((type >> 2) & 1),
3596 contents + roff - 3);
3597 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3598 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3599 contents + roff - 1);
3600 bfd_put_32 (output_bfd,
3601 elf_x86_64_tpoff (info, relocation),
3602 contents + roff);
3603 continue;
3604 }
3605 else if (r_type == R_X86_64_TLSDESC_CALL)
3606 {
3607 /* GDesc -> LE transition.
3608 It's originally:
3609 call *(%rax) <--- LP64 mode.
3610 call *(%eax) <--- X32 mode.
3611 Turn it into:
3612 xchg %ax,%ax <-- LP64 mode.
3613 nopl (%rax) <-- X32 mode.
3614 */
3615 unsigned int prefix = 0;
3616 if (!ABI_64_P (input_bfd))
3617 {
3618 /* Check for call *x@tlsdesc(%eax). */
3619 if (contents[roff] == 0x67)
3620 prefix = 1;
3621 }
3622 if (prefix)
3623 {
3624 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3625 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3626 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3627 }
3628 else
3629 {
3630 bfd_put_8 (output_bfd, 0x66, contents + roff);
3631 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3632 }
3633 continue;
3634 }
3635 else if (r_type == R_X86_64_GOTTPOFF)
3636 {
3637 /* IE->LE transition:
3638 For 64bit, originally it can be one of:
3639 movq foo@gottpoff(%rip), %reg
3640 addq foo@gottpoff(%rip), %reg
3641 We change it into:
3642 movq $foo, %reg
3643 leaq foo(%reg), %reg
3644 addq $foo, %reg.
3645 For 32bit, originally it can be one of:
3646 movq foo@gottpoff(%rip), %reg
3647 addl foo@gottpoff(%rip), %reg
3648 We change it into:
3649 movq $foo, %reg
3650 leal foo(%reg), %reg
3651 addl $foo, %reg. */
3652
3653 unsigned int val, type, reg;
3654
3655 if (roff >= 3)
3656 val = bfd_get_8 (input_bfd, contents + roff - 3);
3657 else
3658 {
3659 if (roff < 2)
3660 goto corrupt_input;
3661 val = 0;
3662 }
3663 type = bfd_get_8 (input_bfd, contents + roff - 2);
3664 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3665 reg >>= 3;
3666 if (type == 0x8b)
3667 {
3668 /* movq */
3669 if (val == 0x4c)
3670 {
3671 if (roff < 3)
3672 goto corrupt_input;
3673 bfd_put_8 (output_bfd, 0x49,
3674 contents + roff - 3);
3675 }
3676 else if (!ABI_64_P (output_bfd) && val == 0x44)
3677 {
3678 if (roff < 3)
3679 goto corrupt_input;
3680 bfd_put_8 (output_bfd, 0x41,
3681 contents + roff - 3);
3682 }
3683 bfd_put_8 (output_bfd, 0xc7,
3684 contents + roff - 2);
3685 bfd_put_8 (output_bfd, 0xc0 | reg,
3686 contents + roff - 1);
3687 }
3688 else if (reg == 4)
3689 {
3690 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3691 is special */
3692 if (val == 0x4c)
3693 {
3694 if (roff < 3)
3695 goto corrupt_input;
3696 bfd_put_8 (output_bfd, 0x49,
3697 contents + roff - 3);
3698 }
3699 else if (!ABI_64_P (output_bfd) && val == 0x44)
3700 {
3701 if (roff < 3)
3702 goto corrupt_input;
3703 bfd_put_8 (output_bfd, 0x41,
3704 contents + roff - 3);
3705 }
3706 bfd_put_8 (output_bfd, 0x81,
3707 contents + roff - 2);
3708 bfd_put_8 (output_bfd, 0xc0 | reg,
3709 contents + roff - 1);
3710 }
3711 else
3712 {
3713 /* addq/addl -> leaq/leal */
3714 if (val == 0x4c)
3715 {
3716 if (roff < 3)
3717 goto corrupt_input;
3718 bfd_put_8 (output_bfd, 0x4d,
3719 contents + roff - 3);
3720 }
3721 else if (!ABI_64_P (output_bfd) && val == 0x44)
3722 {
3723 if (roff < 3)
3724 goto corrupt_input;
3725 bfd_put_8 (output_bfd, 0x45,
3726 contents + roff - 3);
3727 }
3728 bfd_put_8 (output_bfd, 0x8d,
3729 contents + roff - 2);
3730 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3731 contents + roff - 1);
3732 }
3733 bfd_put_32 (output_bfd,
3734 elf_x86_64_tpoff (info, relocation),
3735 contents + roff);
3736 continue;
3737 }
3738 else
3739 BFD_ASSERT (false);
3740 }
3741
3742 if (htab->elf.sgot == NULL)
3743 abort ();
3744
3745 if (h != NULL)
3746 {
3747 off = h->got.offset;
3748 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3749 }
3750 else
3751 {
3752 if (local_got_offsets == NULL)
3753 abort ();
3754
3755 off = local_got_offsets[r_symndx];
3756 offplt = local_tlsdesc_gotents[r_symndx];
3757 }
3758
3759 if ((off & 1) != 0)
3760 off &= ~1;
3761 else
3762 {
3763 Elf_Internal_Rela outrel;
3764 int dr_type, indx;
3765 asection *sreloc;
3766
3767 if (htab->elf.srelgot == NULL)
3768 abort ();
3769
3770 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3771
3772 if (GOT_TLS_GDESC_P (tls_type))
3773 {
3774 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3775 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3776 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3777 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3778 + htab->elf.sgotplt->output_offset
3779 + offplt
3780 + htab->sgotplt_jump_table_size);
3781 sreloc = htab->elf.srelplt;
3782 if (indx == 0)
3783 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3784 else
3785 outrel.r_addend = 0;
3786 elf_append_rela (output_bfd, sreloc, &outrel);
3787 }
3788
3789 sreloc = htab->elf.srelgot;
3790
3791 outrel.r_offset = (htab->elf.sgot->output_section->vma
3792 + htab->elf.sgot->output_offset + off);
3793
3794 if (GOT_TLS_GD_P (tls_type))
3795 dr_type = R_X86_64_DTPMOD64;
3796 else if (GOT_TLS_GDESC_P (tls_type))
3797 goto dr_done;
3798 else
3799 dr_type = R_X86_64_TPOFF64;
3800
3801 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3802 outrel.r_addend = 0;
3803 if ((dr_type == R_X86_64_TPOFF64
3804 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3805 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3806 outrel.r_info = htab->r_info (indx, dr_type);
3807
3808 elf_append_rela (output_bfd, sreloc, &outrel);
3809
3810 if (GOT_TLS_GD_P (tls_type))
3811 {
3812 if (indx == 0)
3813 {
3814 BFD_ASSERT (! unresolved_reloc);
3815 bfd_put_64 (output_bfd,
3816 relocation - _bfd_x86_elf_dtpoff_base (info),
3817 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3818 }
3819 else
3820 {
3821 bfd_put_64 (output_bfd, 0,
3822 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3823 outrel.r_info = htab->r_info (indx,
3824 R_X86_64_DTPOFF64);
3825 outrel.r_offset += GOT_ENTRY_SIZE;
3826 elf_append_rela (output_bfd, sreloc,
3827 &outrel);
3828 }
3829 }
3830
3831 dr_done:
3832 if (h != NULL)
3833 h->got.offset |= 1;
3834 else
3835 local_got_offsets[r_symndx] |= 1;
3836 }
3837
3838 if (off >= (bfd_vma) -2
3839 && ! GOT_TLS_GDESC_P (tls_type))
3840 abort ();
3841 if (r_type_tls == r_type)
3842 {
3843 if (r_type == R_X86_64_GOTPC32_TLSDESC
3844 || r_type == R_X86_64_TLSDESC_CALL)
3845 relocation = htab->elf.sgotplt->output_section->vma
3846 + htab->elf.sgotplt->output_offset
3847 + offplt + htab->sgotplt_jump_table_size;
3848 else
3849 relocation = htab->elf.sgot->output_section->vma
3850 + htab->elf.sgot->output_offset + off;
3851 unresolved_reloc = false;
3852 }
3853 else
3854 {
3855 bfd_vma roff = rel->r_offset;
3856
3857 if (r_type == R_X86_64_TLSGD)
3858 {
3859 /* GD->IE transition. For 64bit, change
3860 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3861 .word 0x6666; rex64; call __tls_get_addr@PLT
3862 or
3863 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3864 .byte 0x66; rex64
3865 call *__tls_get_addr@GOTPCREL(%rip
3866 which may be converted to
3867 addr32 call __tls_get_addr
3868 into:
3869 movq %fs:0, %rax
3870 addq foo@gottpoff(%rip), %rax
3871 For 32bit, change
3872 leaq foo@tlsgd(%rip), %rdi
3873 .word 0x6666; rex64; call __tls_get_addr@PLT
3874 or
3875 leaq foo@tlsgd(%rip), %rdi
3876 .byte 0x66; rex64;
3877 call *__tls_get_addr@GOTPCREL(%rip)
3878 which may be converted to
3879 addr32 call __tls_get_addr
3880 into:
3881 movl %fs:0, %eax
3882 addq foo@gottpoff(%rip), %rax
3883 For largepic, change:
3884 leaq foo@tlsgd(%rip), %rdi
3885 movabsq $__tls_get_addr@pltoff, %rax
3886 addq %r15, %rax
3887 call *%rax
3888 into:
3889 movq %fs:0, %rax
3890 addq foo@gottpoff(%rax), %rax
3891 nopw 0x0(%rax,%rax,1) */
3892 int largepic = 0;
3893 if (ABI_64_P (output_bfd))
3894 {
3895 if (contents[roff + 5] == 0xb8)
3896 {
3897 if (roff < 3
3898 || (roff - 3 + 22) > input_section->size)
3899 goto corrupt_input;
3900 memcpy (contents + roff - 3,
3901 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3902 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3903 largepic = 1;
3904 }
3905 else
3906 {
3907 if (roff < 4
3908 || (roff - 4 + 16) > input_section->size)
3909 goto corrupt_input;
3910 memcpy (contents + roff - 4,
3911 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3912 16);
3913 }
3914 }
3915 else
3916 {
3917 if (roff < 3
3918 || (roff - 3 + 15) > input_section->size)
3919 goto corrupt_input;
3920 memcpy (contents + roff - 3,
3921 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3922 15);
3923 }
3924
3925 relocation = (htab->elf.sgot->output_section->vma
3926 + htab->elf.sgot->output_offset + off
3927 - roff
3928 - largepic
3929 - input_section->output_section->vma
3930 - input_section->output_offset
3931 - 12);
3932 bfd_put_32 (output_bfd, relocation,
3933 contents + roff + 8 + largepic);
3934 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3935 rel++;
3936 wrel++;
3937 continue;
3938 }
3939 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3940 {
3941 /* GDesc -> IE transition.
3942 It's originally something like:
3943 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3944 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3945
3946 Change it to:
3947 # before xchg %ax,%ax in LP64 mode.
3948 movq x@gottpoff(%rip), %rax
3949 # before nopl (%rax) in X32 mode.
3950 rex movl x@gottpoff(%rip), %eax
3951 */
3952
3953 /* Now modify the instruction as appropriate. To
3954 turn a lea into a mov in the form we use it, it
3955 suffices to change the second byte from 0x8d to
3956 0x8b. */
3957 if (roff < 2)
3958 goto corrupt_input;
3959 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3960
3961 bfd_put_32 (output_bfd,
3962 htab->elf.sgot->output_section->vma
3963 + htab->elf.sgot->output_offset + off
3964 - rel->r_offset
3965 - input_section->output_section->vma
3966 - input_section->output_offset
3967 - 4,
3968 contents + roff);
3969 continue;
3970 }
3971 else if (r_type == R_X86_64_TLSDESC_CALL)
3972 {
3973 /* GDesc -> IE transition.
3974 It's originally:
3975 call *(%rax) <--- LP64 mode.
3976 call *(%eax) <--- X32 mode.
3977
3978 Change it to:
3979 xchg %ax, %ax <-- LP64 mode.
3980 nopl (%rax) <-- X32 mode.
3981 */
3982
3983 unsigned int prefix = 0;
3984 if (!ABI_64_P (input_bfd))
3985 {
3986 /* Check for call *x@tlsdesc(%eax). */
3987 if (contents[roff] == 0x67)
3988 prefix = 1;
3989 }
3990 if (prefix)
3991 {
3992 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3993 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3994 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3995 }
3996 else
3997 {
3998 bfd_put_8 (output_bfd, 0x66, contents + roff);
3999 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4000 }
4001 continue;
4002 }
4003 else
4004 BFD_ASSERT (false);
4005 }
4006 break;
4007
4008 case R_X86_64_TLSLD:
4009 if (! elf_x86_64_tls_transition (info, input_bfd,
4010 input_section, contents,
4011 symtab_hdr, sym_hashes,
4012 &r_type, GOT_UNKNOWN, rel,
4013 relend, h, r_symndx, true))
4014 return false;
4015
4016 if (r_type != R_X86_64_TLSLD)
4017 {
4018 /* LD->LE transition:
4019 leaq foo@tlsld(%rip), %rdi
4020 call __tls_get_addr@PLT
4021 For 64bit, we change it into:
4022 .word 0x6666; .byte 0x66; movq %fs:0, %rax
4023 For 32bit, we change it into:
4024 nopl 0x0(%rax); movl %fs:0, %eax
4025 Or
4026 leaq foo@tlsld(%rip), %rdi;
4027 call *__tls_get_addr@GOTPCREL(%rip)
4028 which may be converted to
4029 addr32 call __tls_get_addr
4030 For 64bit, we change it into:
4031 .word 0x6666; .word 0x6666; movq %fs:0, %rax
4032 For 32bit, we change it into:
4033 nopw 0x0(%rax); movl %fs:0, %eax
4034 For largepic, change:
4035 leaq foo@tlsgd(%rip), %rdi
4036 movabsq $__tls_get_addr@pltoff, %rax
4037 addq %rbx, %rax
4038 call *%rax
4039 into
4040 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
4041 movq %fs:0, %eax */
4042
4043 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4044 if (ABI_64_P (output_bfd))
4045 {
4046 if ((rel->r_offset + 5) >= input_section->size)
4047 goto corrupt_input;
4048 if (contents[rel->r_offset + 5] == 0xb8)
4049 {
4050 if (rel->r_offset < 3
4051 || (rel->r_offset - 3 + 22) > input_section->size)
4052 goto corrupt_input;
4053 memcpy (contents + rel->r_offset - 3,
4054 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4055 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4056 }
4057 else if (contents[rel->r_offset + 4] == 0xff
4058 || contents[rel->r_offset + 4] == 0x67)
4059 {
4060 if (rel->r_offset < 3
4061 || (rel->r_offset - 3 + 13) > input_section->size)
4062 goto corrupt_input;
4063 memcpy (contents + rel->r_offset - 3,
4064 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
4065 13);
4066
4067 }
4068 else
4069 {
4070 if (rel->r_offset < 3
4071 || (rel->r_offset - 3 + 12) > input_section->size)
4072 goto corrupt_input;
4073 memcpy (contents + rel->r_offset - 3,
4074 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4075 }
4076 }
4077 else
4078 {
4079 if ((rel->r_offset + 4) >= input_section->size)
4080 goto corrupt_input;
4081 if (contents[rel->r_offset + 4] == 0xff)
4082 {
4083 if (rel->r_offset < 3
4084 || (rel->r_offset - 3 + 13) > input_section->size)
4085 goto corrupt_input;
4086 memcpy (contents + rel->r_offset - 3,
4087 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
4088 13);
4089 }
4090 else
4091 {
4092 if (rel->r_offset < 3
4093 || (rel->r_offset - 3 + 12) > input_section->size)
4094 goto corrupt_input;
4095 memcpy (contents + rel->r_offset - 3,
4096 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4097 }
4098 }
4099 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
4100 and R_X86_64_PLTOFF64. */
4101 rel++;
4102 wrel++;
4103 continue;
4104 }
4105
4106 if (htab->elf.sgot == NULL)
4107 abort ();
4108
4109 off = htab->tls_ld_or_ldm_got.offset;
4110 if (off & 1)
4111 off &= ~1;
4112 else
4113 {
4114 Elf_Internal_Rela outrel;
4115
4116 if (htab->elf.srelgot == NULL)
4117 abort ();
4118
4119 outrel.r_offset = (htab->elf.sgot->output_section->vma
4120 + htab->elf.sgot->output_offset + off);
4121
4122 bfd_put_64 (output_bfd, 0,
4123 htab->elf.sgot->contents + off);
4124 bfd_put_64 (output_bfd, 0,
4125 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4126 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4127 outrel.r_addend = 0;
4128 elf_append_rela (output_bfd, htab->elf.srelgot,
4129 &outrel);
4130 htab->tls_ld_or_ldm_got.offset |= 1;
4131 }
4132 relocation = htab->elf.sgot->output_section->vma
4133 + htab->elf.sgot->output_offset + off;
4134 unresolved_reloc = false;
4135 break;
4136
4137 case R_X86_64_DTPOFF32:
4138 if (!bfd_link_executable (info)
4139 || (input_section->flags & SEC_CODE) == 0)
4140 relocation -= _bfd_x86_elf_dtpoff_base (info);
4141 else
4142 relocation = elf_x86_64_tpoff (info, relocation);
4143 break;
4144
4145 case R_X86_64_TPOFF32:
4146 case R_X86_64_TPOFF64:
4147 BFD_ASSERT (bfd_link_executable (info));
4148 relocation = elf_x86_64_tpoff (info, relocation);
4149 break;
4150
4151 case R_X86_64_DTPOFF64:
4152 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4153 relocation -= _bfd_x86_elf_dtpoff_base (info);
4154 break;
4155
4156 default:
4157 break;
4158 }
4159
4160 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4161 because such sections are not SEC_ALLOC and thus ld.so will
4162 not process them. */
4163 if (unresolved_reloc
4164 && !((input_section->flags & SEC_DEBUGGING) != 0
4165 && h->def_dynamic)
4166 && _bfd_elf_section_offset (output_bfd, info, input_section,
4167 rel->r_offset) != (bfd_vma) -1)
4168 {
4169 switch (r_type)
4170 {
4171 case R_X86_64_32S:
4172 sec = h->root.u.def.section;
4173 if ((info->nocopyreloc || eh->def_protected)
4174 && !(h->root.u.def.section->flags & SEC_CODE))
4175 return elf_x86_64_need_pic (info, input_bfd, input_section,
4176 h, NULL, NULL, howto);
4177 /* Fall through. */
4178
4179 default:
4180 _bfd_error_handler
4181 /* xgettext:c-format */
4182 (_("%pB(%pA+%#" PRIx64 "): "
4183 "unresolvable %s relocation against symbol `%s'"),
4184 input_bfd,
4185 input_section,
4186 (uint64_t) rel->r_offset,
4187 howto->name,
4188 h->root.root.string);
4189 return false;
4190 }
4191 }
4192
4193 do_relocation:
4194 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4195 contents, rel->r_offset,
4196 relocation, rel->r_addend);
4197
4198 check_relocation_error:
4199 if (r != bfd_reloc_ok)
4200 {
4201 const char *name;
4202
4203 if (h != NULL)
4204 name = h->root.root.string;
4205 else
4206 {
4207 name = bfd_elf_string_from_elf_section (input_bfd,
4208 symtab_hdr->sh_link,
4209 sym->st_name);
4210 if (name == NULL)
4211 return false;
4212 if (*name == '\0')
4213 name = bfd_section_name (sec);
4214 }
4215
4216 if (r == bfd_reloc_overflow)
4217 {
4218 if (converted_reloc)
4219 {
4220 info->callbacks->einfo
4221 ("%X%H:", input_bfd, input_section, rel->r_offset);
4222 info->callbacks->einfo
4223 (_(" failed to convert GOTPCREL relocation against "
4224 "'%s'; relink with --no-relax\n"),
4225 name);
4226 status = false;
4227 continue;
4228 }
4229 (*info->callbacks->reloc_overflow)
4230 (info, (h ? &h->root : NULL), name, howto->name,
4231 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4232 }
4233 else
4234 {
4235 _bfd_error_handler
4236 /* xgettext:c-format */
4237 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4238 input_bfd, input_section,
4239 (uint64_t) rel->r_offset, name, (int) r);
4240 return false;
4241 }
4242 }
4243
4244 if (wrel != rel)
4245 *wrel = *rel;
4246 }
4247
4248 if (wrel != rel)
4249 {
4250 Elf_Internal_Shdr *rel_hdr;
4251 size_t deleted = rel - wrel;
4252
4253 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4254 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4255 if (rel_hdr->sh_size == 0)
4256 {
4257 /* It is too late to remove an empty reloc section. Leave
4258 one NONE reloc.
4259 ??? What is wrong with an empty section??? */
4260 rel_hdr->sh_size = rel_hdr->sh_entsize;
4261 deleted -= 1;
4262 }
4263 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4264 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4265 input_section->reloc_count -= deleted;
4266 }
4267
4268 return status;
4269 }
4270
4271 /* Finish up dynamic symbol handling. We set the contents of various
4272 dynamic sections here. */
4273
4274 static bool
4275 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4276 struct bfd_link_info *info,
4277 struct elf_link_hash_entry *h,
4278 Elf_Internal_Sym *sym)
4279 {
4280 struct elf_x86_link_hash_table *htab;
4281 bool use_plt_second;
4282 struct elf_x86_link_hash_entry *eh;
4283 bool local_undefweak;
4284
4285 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4286 if (htab == NULL)
4287 return false;
4288
4289 /* Use the second PLT section only if there is .plt section. */
4290 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4291
4292 eh = (struct elf_x86_link_hash_entry *) h;
4293 if (eh->no_finish_dynamic_symbol)
4294 abort ();
4295
4296 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4297 resolved undefined weak symbols in executable so that their
4298 references have value 0 at run-time. */
4299 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4300
4301 if (h->plt.offset != (bfd_vma) -1)
4302 {
4303 bfd_vma plt_index;
4304 bfd_vma got_offset, plt_offset;
4305 Elf_Internal_Rela rela;
4306 bfd_byte *loc;
4307 asection *plt, *gotplt, *relplt, *resolved_plt;
4308 const struct elf_backend_data *bed;
4309 bfd_vma plt_got_pcrel_offset;
4310
4311 /* When building a static executable, use .iplt, .igot.plt and
4312 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4313 if (htab->elf.splt != NULL)
4314 {
4315 plt = htab->elf.splt;
4316 gotplt = htab->elf.sgotplt;
4317 relplt = htab->elf.srelplt;
4318 }
4319 else
4320 {
4321 plt = htab->elf.iplt;
4322 gotplt = htab->elf.igotplt;
4323 relplt = htab->elf.irelplt;
4324 }
4325
4326 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4327
4328 /* Get the index in the procedure linkage table which
4329 corresponds to this symbol. This is the index of this symbol
4330 in all the symbols for which we are making plt entries. The
4331 first entry in the procedure linkage table is reserved.
4332
4333 Get the offset into the .got table of the entry that
4334 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4335 bytes. The first three are reserved for the dynamic linker.
4336
4337 For static executables, we don't reserve anything. */
4338
4339 if (plt == htab->elf.splt)
4340 {
4341 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4342 - htab->plt.has_plt0);
4343 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4344 }
4345 else
4346 {
4347 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4348 got_offset = got_offset * GOT_ENTRY_SIZE;
4349 }
4350
4351 /* Fill in the entry in the procedure linkage table. */
4352 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4353 htab->plt.plt_entry_size);
4354 if (use_plt_second)
4355 {
4356 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4357 htab->non_lazy_plt->plt_entry,
4358 htab->non_lazy_plt->plt_entry_size);
4359
4360 resolved_plt = htab->plt_second;
4361 plt_offset = eh->plt_second.offset;
4362 }
4363 else
4364 {
4365 resolved_plt = plt;
4366 plt_offset = h->plt.offset;
4367 }
4368
4369 /* Insert the relocation positions of the plt section. */
4370
4371 /* Put offset the PC-relative instruction referring to the GOT entry,
4372 subtracting the size of that instruction. */
4373 plt_got_pcrel_offset = (gotplt->output_section->vma
4374 + gotplt->output_offset
4375 + got_offset
4376 - resolved_plt->output_section->vma
4377 - resolved_plt->output_offset
4378 - plt_offset
4379 - htab->plt.plt_got_insn_size);
4380
4381 /* Check PC-relative offset overflow in PLT entry. */
4382 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4383 /* xgettext:c-format */
4384 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4385 output_bfd, h->root.root.string);
4386
4387 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4388 (resolved_plt->contents + plt_offset
4389 + htab->plt.plt_got_offset));
4390
4391 /* Fill in the entry in the global offset table, initially this
4392 points to the second part of the PLT entry. Leave the entry
4393 as zero for undefined weak symbol in PIE. No PLT relocation
4394 against undefined weak symbol in PIE. */
4395 if (!local_undefweak)
4396 {
4397 if (htab->plt.has_plt0)
4398 bfd_put_64 (output_bfd, (plt->output_section->vma
4399 + plt->output_offset
4400 + h->plt.offset
4401 + htab->lazy_plt->plt_lazy_offset),
4402 gotplt->contents + got_offset);
4403
4404 /* Fill in the entry in the .rela.plt section. */
4405 rela.r_offset = (gotplt->output_section->vma
4406 + gotplt->output_offset
4407 + got_offset);
4408 if (PLT_LOCAL_IFUNC_P (info, h))
4409 {
4410 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4411 h->root.root.string,
4412 h->root.u.def.section->owner);
4413
4414 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4415 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4416 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4417 rela.r_addend = (h->root.u.def.value
4418 + h->root.u.def.section->output_section->vma
4419 + h->root.u.def.section->output_offset);
4420
4421 if (htab->params->report_relative_reloc)
4422 _bfd_x86_elf_link_report_relative_reloc
4423 (info, relplt, h, sym, "R_X86_64_IRELATIVE", &rela);
4424
4425 /* R_X86_64_IRELATIVE comes last. */
4426 plt_index = htab->next_irelative_index--;
4427 }
4428 else
4429 {
4430 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4431 rela.r_addend = 0;
4432 plt_index = htab->next_jump_slot_index++;
4433 }
4434
4435 /* Don't fill the second and third slots in PLT entry for
4436 static executables nor without PLT0. */
4437 if (plt == htab->elf.splt && htab->plt.has_plt0)
4438 {
4439 bfd_vma plt0_offset
4440 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4441
4442 /* Put relocation index. */
4443 bfd_put_32 (output_bfd, plt_index,
4444 (plt->contents + h->plt.offset
4445 + htab->lazy_plt->plt_reloc_offset));
4446
4447 /* Put offset for jmp .PLT0 and check for overflow. We don't
4448 check relocation index for overflow since branch displacement
4449 will overflow first. */
4450 if (plt0_offset > 0x80000000)
4451 /* xgettext:c-format */
4452 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4453 output_bfd, h->root.root.string);
4454 bfd_put_32 (output_bfd, - plt0_offset,
4455 (plt->contents + h->plt.offset
4456 + htab->lazy_plt->plt_plt_offset));
4457 }
4458
4459 bed = get_elf_backend_data (output_bfd);
4460 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4461 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4462 }
4463 }
4464 else if (eh->plt_got.offset != (bfd_vma) -1)
4465 {
4466 bfd_vma got_offset, plt_offset;
4467 asection *plt, *got;
4468 bool got_after_plt;
4469 int32_t got_pcrel_offset;
4470
4471 /* Set the entry in the GOT procedure linkage table. */
4472 plt = htab->plt_got;
4473 got = htab->elf.sgot;
4474 got_offset = h->got.offset;
4475
4476 if (got_offset == (bfd_vma) -1
4477 || (h->type == STT_GNU_IFUNC && h->def_regular)
4478 || plt == NULL
4479 || got == NULL)
4480 abort ();
4481
4482 /* Use the non-lazy PLT entry template for the GOT PLT since they
4483 are the identical. */
4484 /* Fill in the entry in the GOT procedure linkage table. */
4485 plt_offset = eh->plt_got.offset;
4486 memcpy (plt->contents + plt_offset,
4487 htab->non_lazy_plt->plt_entry,
4488 htab->non_lazy_plt->plt_entry_size);
4489
4490 /* Put offset the PC-relative instruction referring to the GOT
4491 entry, subtracting the size of that instruction. */
4492 got_pcrel_offset = (got->output_section->vma
4493 + got->output_offset
4494 + got_offset
4495 - plt->output_section->vma
4496 - plt->output_offset
4497 - plt_offset
4498 - htab->non_lazy_plt->plt_got_insn_size);
4499
4500 /* Check PC-relative offset overflow in GOT PLT entry. */
4501 got_after_plt = got->output_section->vma > plt->output_section->vma;
4502 if ((got_after_plt && got_pcrel_offset < 0)
4503 || (!got_after_plt && got_pcrel_offset > 0))
4504 /* xgettext:c-format */
4505 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4506 output_bfd, h->root.root.string);
4507
4508 bfd_put_32 (output_bfd, got_pcrel_offset,
4509 (plt->contents + plt_offset
4510 + htab->non_lazy_plt->plt_got_offset));
4511 }
4512
4513 if (!local_undefweak
4514 && !h->def_regular
4515 && (h->plt.offset != (bfd_vma) -1
4516 || eh->plt_got.offset != (bfd_vma) -1))
4517 {
4518 /* Mark the symbol as undefined, rather than as defined in
4519 the .plt section. Leave the value if there were any
4520 relocations where pointer equality matters (this is a clue
4521 for the dynamic linker, to make function pointer
4522 comparisons work between an application and shared
4523 library), otherwise set it to zero. If a function is only
4524 called from a binary, there is no need to slow down
4525 shared libraries because of that. */
4526 sym->st_shndx = SHN_UNDEF;
4527 if (!h->pointer_equality_needed)
4528 sym->st_value = 0;
4529 }
4530
4531 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4532
4533 /* Don't generate dynamic GOT relocation against undefined weak
4534 symbol in executable. */
4535 if (h->got.offset != (bfd_vma) -1
4536 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4537 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4538 && !local_undefweak)
4539 {
4540 Elf_Internal_Rela rela;
4541 asection *relgot = htab->elf.srelgot;
4542 const char *relative_reloc_name = NULL;
4543 bool generate_dynamic_reloc = true;
4544
4545 /* This symbol has an entry in the global offset table. Set it
4546 up. */
4547 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4548 abort ();
4549
4550 rela.r_offset = (htab->elf.sgot->output_section->vma
4551 + htab->elf.sgot->output_offset
4552 + (h->got.offset &~ (bfd_vma) 1));
4553
4554 /* If this is a static link, or it is a -Bsymbolic link and the
4555 symbol is defined locally or was forced to be local because
4556 of a version file, we just want to emit a RELATIVE reloc.
4557 The entry in the global offset table will already have been
4558 initialized in the relocate_section function. */
4559 if (h->def_regular
4560 && h->type == STT_GNU_IFUNC)
4561 {
4562 if (h->plt.offset == (bfd_vma) -1)
4563 {
4564 /* STT_GNU_IFUNC is referenced without PLT. */
4565 if (htab->elf.splt == NULL)
4566 {
4567 /* use .rel[a].iplt section to store .got relocations
4568 in static executable. */
4569 relgot = htab->elf.irelplt;
4570 }
4571 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4572 {
4573 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4574 h->root.root.string,
4575 h->root.u.def.section->owner);
4576
4577 rela.r_info = htab->r_info (0,
4578 R_X86_64_IRELATIVE);
4579 rela.r_addend = (h->root.u.def.value
4580 + h->root.u.def.section->output_section->vma
4581 + h->root.u.def.section->output_offset);
4582 relative_reloc_name = "R_X86_64_IRELATIVE";
4583 }
4584 else
4585 goto do_glob_dat;
4586 }
4587 else if (bfd_link_pic (info))
4588 {
4589 /* Generate R_X86_64_GLOB_DAT. */
4590 goto do_glob_dat;
4591 }
4592 else
4593 {
4594 asection *plt;
4595 bfd_vma plt_offset;
4596
4597 if (!h->pointer_equality_needed)
4598 abort ();
4599
4600 /* For non-shared object, we can't use .got.plt, which
4601 contains the real function addres if we need pointer
4602 equality. We load the GOT entry with the PLT entry. */
4603 if (htab->plt_second != NULL)
4604 {
4605 plt = htab->plt_second;
4606 plt_offset = eh->plt_second.offset;
4607 }
4608 else
4609 {
4610 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4611 plt_offset = h->plt.offset;
4612 }
4613 bfd_put_64 (output_bfd, (plt->output_section->vma
4614 + plt->output_offset
4615 + plt_offset),
4616 htab->elf.sgot->contents + h->got.offset);
4617 return true;
4618 }
4619 }
4620 else if (bfd_link_pic (info)
4621 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4622 {
4623 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4624 return false;
4625 BFD_ASSERT((h->got.offset & 1) != 0);
4626 if (info->enable_dt_relr)
4627 generate_dynamic_reloc = false;
4628 else
4629 {
4630 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4631 rela.r_addend = (h->root.u.def.value
4632 + h->root.u.def.section->output_section->vma
4633 + h->root.u.def.section->output_offset);
4634 relative_reloc_name = "R_X86_64_RELATIVE";
4635 }
4636 }
4637 else
4638 {
4639 BFD_ASSERT((h->got.offset & 1) == 0);
4640 do_glob_dat:
4641 bfd_put_64 (output_bfd, (bfd_vma) 0,
4642 htab->elf.sgot->contents + h->got.offset);
4643 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4644 rela.r_addend = 0;
4645 }
4646
4647 if (generate_dynamic_reloc)
4648 {
4649 if (relative_reloc_name != NULL
4650 && htab->params->report_relative_reloc)
4651 _bfd_x86_elf_link_report_relative_reloc
4652 (info, relgot, h, sym, relative_reloc_name, &rela);
4653
4654 elf_append_rela (output_bfd, relgot, &rela);
4655 }
4656 }
4657
4658 if (h->needs_copy)
4659 {
4660 Elf_Internal_Rela rela;
4661 asection *s;
4662
4663 /* This symbol needs a copy reloc. Set it up. */
4664 VERIFY_COPY_RELOC (h, htab)
4665
4666 rela.r_offset = (h->root.u.def.value
4667 + h->root.u.def.section->output_section->vma
4668 + h->root.u.def.section->output_offset);
4669 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4670 rela.r_addend = 0;
4671 if (h->root.u.def.section == htab->elf.sdynrelro)
4672 s = htab->elf.sreldynrelro;
4673 else
4674 s = htab->elf.srelbss;
4675 elf_append_rela (output_bfd, s, &rela);
4676 }
4677
4678 return true;
4679 }
4680
4681 /* Finish up local dynamic symbol handling. We set the contents of
4682 various dynamic sections here. */
4683
4684 static int
4685 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4686 {
4687 struct elf_link_hash_entry *h
4688 = (struct elf_link_hash_entry *) *slot;
4689 struct bfd_link_info *info
4690 = (struct bfd_link_info *) inf;
4691
4692 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4693 info, h, NULL);
4694 }
4695
4696 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4697 here since undefined weak symbol may not be dynamic and may not be
4698 called for elf_x86_64_finish_dynamic_symbol. */
4699
4700 static bool
4701 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4702 void *inf)
4703 {
4704 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4705 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4706
4707 if (h->root.type != bfd_link_hash_undefweak
4708 || h->dynindx != -1)
4709 return true;
4710
4711 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4712 info, h, NULL);
4713 }
4714
4715 /* Used to decide how to sort relocs in an optimal manner for the
4716 dynamic linker, before writing them out. */
4717
4718 static enum elf_reloc_type_class
4719 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4720 const asection *rel_sec ATTRIBUTE_UNUSED,
4721 const Elf_Internal_Rela *rela)
4722 {
4723 bfd *abfd = info->output_bfd;
4724 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4725 struct elf_x86_link_hash_table *htab
4726 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4727
4728 if (htab->elf.dynsym != NULL
4729 && htab->elf.dynsym->contents != NULL)
4730 {
4731 /* Check relocation against STT_GNU_IFUNC symbol if there are
4732 dynamic symbols. */
4733 unsigned long r_symndx = htab->r_sym (rela->r_info);
4734 if (r_symndx != STN_UNDEF)
4735 {
4736 Elf_Internal_Sym sym;
4737 if (!bed->s->swap_symbol_in (abfd,
4738 (htab->elf.dynsym->contents
4739 + r_symndx * bed->s->sizeof_sym),
4740 0, &sym))
4741 abort ();
4742
4743 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4744 return reloc_class_ifunc;
4745 }
4746 }
4747
4748 switch ((int) ELF32_R_TYPE (rela->r_info))
4749 {
4750 case R_X86_64_IRELATIVE:
4751 return reloc_class_ifunc;
4752 case R_X86_64_RELATIVE:
4753 case R_X86_64_RELATIVE64:
4754 return reloc_class_relative;
4755 case R_X86_64_JUMP_SLOT:
4756 return reloc_class_plt;
4757 case R_X86_64_COPY:
4758 return reloc_class_copy;
4759 default:
4760 return reloc_class_normal;
4761 }
4762 }
4763
4764 /* Finish up the dynamic sections. */
4765
4766 static bool
4767 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4768 struct bfd_link_info *info)
4769 {
4770 struct elf_x86_link_hash_table *htab;
4771
4772 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4773 if (htab == NULL)
4774 return false;
4775
4776 if (! htab->elf.dynamic_sections_created)
4777 return true;
4778
4779 if (htab->elf.splt && htab->elf.splt->size > 0)
4780 {
4781 if (bfd_is_abs_section (htab->elf.splt->output_section))
4782 {
4783 info->callbacks->einfo
4784 (_("%F%P: discarded output section: `%pA'\n"),
4785 htab->elf.splt);
4786 return false;
4787 }
4788
4789 elf_section_data (htab->elf.splt->output_section)
4790 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4791
4792 if (htab->plt.has_plt0)
4793 {
4794 /* Fill in the special first entry in the procedure linkage
4795 table. */
4796 memcpy (htab->elf.splt->contents,
4797 htab->lazy_plt->plt0_entry,
4798 htab->lazy_plt->plt0_entry_size);
4799 /* Add offset for pushq GOT+8(%rip), since the instruction
4800 uses 6 bytes subtract this value. */
4801 bfd_put_32 (output_bfd,
4802 (htab->elf.sgotplt->output_section->vma
4803 + htab->elf.sgotplt->output_offset
4804 + 8
4805 - htab->elf.splt->output_section->vma
4806 - htab->elf.splt->output_offset
4807 - 6),
4808 (htab->elf.splt->contents
4809 + htab->lazy_plt->plt0_got1_offset));
4810 /* Add offset for the PC-relative instruction accessing
4811 GOT+16, subtracting the offset to the end of that
4812 instruction. */
4813 bfd_put_32 (output_bfd,
4814 (htab->elf.sgotplt->output_section->vma
4815 + htab->elf.sgotplt->output_offset
4816 + 16
4817 - htab->elf.splt->output_section->vma
4818 - htab->elf.splt->output_offset
4819 - htab->lazy_plt->plt0_got2_insn_end),
4820 (htab->elf.splt->contents
4821 + htab->lazy_plt->plt0_got2_offset));
4822 }
4823
4824 if (htab->elf.tlsdesc_plt)
4825 {
4826 bfd_put_64 (output_bfd, (bfd_vma) 0,
4827 htab->elf.sgot->contents + htab->elf.tlsdesc_got);
4828
4829 memcpy (htab->elf.splt->contents + htab->elf.tlsdesc_plt,
4830 htab->lazy_plt->plt_tlsdesc_entry,
4831 htab->lazy_plt->plt_tlsdesc_entry_size);
4832
4833 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4834 bytes and the instruction uses 6 bytes, subtract these
4835 values. */
4836 bfd_put_32 (output_bfd,
4837 (htab->elf.sgotplt->output_section->vma
4838 + htab->elf.sgotplt->output_offset
4839 + 8
4840 - htab->elf.splt->output_section->vma
4841 - htab->elf.splt->output_offset
4842 - htab->elf.tlsdesc_plt
4843 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4844 (htab->elf.splt->contents
4845 + htab->elf.tlsdesc_plt
4846 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4847 /* Add offset for indirect branch via GOT+TDG, where TDG
4848 stands for htab->tlsdesc_got, subtracting the offset
4849 to the end of that instruction. */
4850 bfd_put_32 (output_bfd,
4851 (htab->elf.sgot->output_section->vma
4852 + htab->elf.sgot->output_offset
4853 + htab->elf.tlsdesc_got
4854 - htab->elf.splt->output_section->vma
4855 - htab->elf.splt->output_offset
4856 - htab->elf.tlsdesc_plt
4857 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4858 (htab->elf.splt->contents
4859 + htab->elf.tlsdesc_plt
4860 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4861 }
4862 }
4863
4864 /* Fill PLT entries for undefined weak symbols in PIE. */
4865 if (bfd_link_pie (info))
4866 bfd_hash_traverse (&info->hash->table,
4867 elf_x86_64_pie_finish_undefweak_symbol,
4868 info);
4869
4870 return true;
4871 }
4872
4873 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4874 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4875 It has to be done before elf_link_sort_relocs is called so that
4876 dynamic relocations are properly sorted. */
4877
4878 static bool
4879 elf_x86_64_output_arch_local_syms
4880 (bfd *output_bfd ATTRIBUTE_UNUSED,
4881 struct bfd_link_info *info,
4882 void *flaginfo ATTRIBUTE_UNUSED,
4883 int (*func) (void *, const char *,
4884 Elf_Internal_Sym *,
4885 asection *,
4886 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4887 {
4888 struct elf_x86_link_hash_table *htab
4889 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4890 if (htab == NULL)
4891 return false;
4892
4893 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4894 htab_traverse (htab->loc_hash_table,
4895 elf_x86_64_finish_local_dynamic_symbol,
4896 info);
4897
4898 return true;
4899 }
4900
4901 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4902 dynamic relocations. */
4903
4904 static long
4905 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4906 long symcount ATTRIBUTE_UNUSED,
4907 asymbol **syms ATTRIBUTE_UNUSED,
4908 long dynsymcount,
4909 asymbol **dynsyms,
4910 asymbol **ret)
4911 {
4912 long count, i, n;
4913 int j;
4914 bfd_byte *plt_contents;
4915 long relsize;
4916 const struct elf_x86_lazy_plt_layout *lazy_plt;
4917 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4918 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4919 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4920 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4921 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4922 const struct elf_x86_lazy_plt_layout *x32_lazy_ibt_plt;
4923 const struct elf_x86_non_lazy_plt_layout *x32_non_lazy_ibt_plt;
4924 asection *plt;
4925 enum elf_x86_plt_type plt_type;
4926 struct elf_x86_plt plts[] =
4927 {
4928 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4929 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4930 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4931 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4932 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4933 };
4934
4935 *ret = NULL;
4936
4937 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4938 return 0;
4939
4940 if (dynsymcount <= 0)
4941 return 0;
4942
4943 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4944 if (relsize <= 0)
4945 return -1;
4946
4947 lazy_plt = &elf_x86_64_lazy_plt;
4948 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4949 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4950 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4951 if (ABI_64_P (abfd))
4952 {
4953 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4954 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4955 x32_lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4956 x32_non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4957 }
4958 else
4959 {
4960 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4961 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4962 x32_lazy_ibt_plt = NULL;
4963 x32_non_lazy_ibt_plt = NULL;
4964 }
4965
4966 count = 0;
4967 for (j = 0; plts[j].name != NULL; j++)
4968 {
4969 plt = bfd_get_section_by_name (abfd, plts[j].name);
4970 if (plt == NULL
4971 || plt->size == 0
4972 || (plt->flags & SEC_HAS_CONTENTS) == 0)
4973 continue;
4974
4975 /* Get the PLT section contents. */
4976 if (!bfd_malloc_and_get_section (abfd, plt, &plt_contents))
4977 break;
4978
4979 /* Check what kind of PLT it is. */
4980 plt_type = plt_unknown;
4981 if (plts[j].type == plt_unknown
4982 && (plt->size >= (lazy_plt->plt_entry_size
4983 + lazy_plt->plt_entry_size)))
4984 {
4985 /* Match lazy PLT first. Need to check the first two
4986 instructions. */
4987 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4988 lazy_plt->plt0_got1_offset) == 0)
4989 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4990 2) == 0))
4991 {
4992 if (x32_lazy_ibt_plt != NULL
4993 && (memcmp (plt_contents
4994 + x32_lazy_ibt_plt->plt_entry_size,
4995 x32_lazy_ibt_plt->plt_entry,
4996 x32_lazy_ibt_plt->plt_got_offset) == 0))
4997 {
4998 /* The fist entry in the x32 lazy IBT PLT is the same
4999 as the lazy PLT. */
5000 plt_type = plt_lazy | plt_second;
5001 lazy_plt = x32_lazy_ibt_plt;
5002 }
5003 else
5004 plt_type = plt_lazy;
5005 }
5006 else if (lazy_bnd_plt != NULL
5007 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
5008 lazy_bnd_plt->plt0_got1_offset) == 0)
5009 && (memcmp (plt_contents + 6,
5010 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
5011 {
5012 plt_type = plt_lazy | plt_second;
5013 /* The fist entry in the lazy IBT PLT is the same as the
5014 lazy BND PLT. */
5015 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
5016 lazy_ibt_plt->plt_entry,
5017 lazy_ibt_plt->plt_got_offset) == 0))
5018 lazy_plt = lazy_ibt_plt;
5019 else
5020 lazy_plt = lazy_bnd_plt;
5021 }
5022 }
5023
5024 if (non_lazy_plt != NULL
5025 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
5026 && plt->size >= non_lazy_plt->plt_entry_size)
5027 {
5028 /* Match non-lazy PLT. */
5029 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
5030 non_lazy_plt->plt_got_offset) == 0)
5031 plt_type = plt_non_lazy;
5032 }
5033
5034 if (plt_type == plt_unknown || plt_type == plt_second)
5035 {
5036 if (non_lazy_bnd_plt != NULL
5037 && plt->size >= non_lazy_bnd_plt->plt_entry_size
5038 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
5039 non_lazy_bnd_plt->plt_got_offset) == 0))
5040 {
5041 /* Match BND PLT. */
5042 plt_type = plt_second;
5043 non_lazy_plt = non_lazy_bnd_plt;
5044 }
5045 else if (non_lazy_ibt_plt != NULL
5046 && plt->size >= non_lazy_ibt_plt->plt_entry_size
5047 && (memcmp (plt_contents,
5048 non_lazy_ibt_plt->plt_entry,
5049 non_lazy_ibt_plt->plt_got_offset) == 0))
5050 {
5051 /* Match IBT PLT. */
5052 plt_type = plt_second;
5053 non_lazy_plt = non_lazy_ibt_plt;
5054 }
5055 else if (x32_non_lazy_ibt_plt != NULL
5056 && plt->size >= x32_non_lazy_ibt_plt->plt_entry_size
5057 && (memcmp (plt_contents,
5058 x32_non_lazy_ibt_plt->plt_entry,
5059 x32_non_lazy_ibt_plt->plt_got_offset) == 0))
5060 {
5061 /* Match x32 IBT PLT. */
5062 plt_type = plt_second;
5063 non_lazy_plt = x32_non_lazy_ibt_plt;
5064 }
5065 }
5066
5067 if (plt_type == plt_unknown)
5068 {
5069 free (plt_contents);
5070 continue;
5071 }
5072
5073 plts[j].sec = plt;
5074 plts[j].type = plt_type;
5075
5076 if ((plt_type & plt_lazy))
5077 {
5078 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
5079 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
5080 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
5081 /* Skip PLT0 in lazy PLT. */
5082 i = 1;
5083 }
5084 else
5085 {
5086 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
5087 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
5088 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
5089 i = 0;
5090 }
5091
5092 /* Skip lazy PLT when the second PLT is used. */
5093 if (plt_type == (plt_lazy | plt_second))
5094 plts[j].count = 0;
5095 else
5096 {
5097 n = plt->size / plts[j].plt_entry_size;
5098 plts[j].count = n;
5099 count += n - i;
5100 }
5101
5102 plts[j].contents = plt_contents;
5103 }
5104
5105 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
5106 (bfd_vma) 0, plts, dynsyms,
5107 ret);
5108 }
5109
5110 /* Handle an x86-64 specific section when reading an object file. This
5111 is called when elfcode.h finds a section with an unknown type. */
5112
5113 static bool
5114 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5115 const char *name, int shindex)
5116 {
5117 if (hdr->sh_type != SHT_X86_64_UNWIND)
5118 return false;
5119
5120 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5121 return false;
5122
5123 return true;
5124 }
5125
5126 /* Hook called by the linker routine which adds symbols from an object
5127 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5128 of .bss. */
5129
5130 static bool
5131 elf_x86_64_add_symbol_hook (bfd *abfd,
5132 struct bfd_link_info *info ATTRIBUTE_UNUSED,
5133 Elf_Internal_Sym *sym,
5134 const char **namep ATTRIBUTE_UNUSED,
5135 flagword *flagsp ATTRIBUTE_UNUSED,
5136 asection **secp,
5137 bfd_vma *valp)
5138 {
5139 asection *lcomm;
5140
5141 switch (sym->st_shndx)
5142 {
5143 case SHN_X86_64_LCOMMON:
5144 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5145 if (lcomm == NULL)
5146 {
5147 lcomm = bfd_make_section_with_flags (abfd,
5148 "LARGE_COMMON",
5149 (SEC_ALLOC
5150 | SEC_IS_COMMON
5151 | SEC_LINKER_CREATED));
5152 if (lcomm == NULL)
5153 return false;
5154 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5155 }
5156 *secp = lcomm;
5157 *valp = sym->st_size;
5158 return true;
5159 }
5160
5161 return true;
5162 }
5163
5164
5165 /* Given a BFD section, try to locate the corresponding ELF section
5166 index. */
5167
5168 static bool
5169 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5170 asection *sec, int *index_return)
5171 {
5172 if (sec == &_bfd_elf_large_com_section)
5173 {
5174 *index_return = SHN_X86_64_LCOMMON;
5175 return true;
5176 }
5177 return false;
5178 }
5179
5180 /* Process a symbol. */
5181
5182 static void
5183 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5184 asymbol *asym)
5185 {
5186 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5187
5188 switch (elfsym->internal_elf_sym.st_shndx)
5189 {
5190 case SHN_X86_64_LCOMMON:
5191 asym->section = &_bfd_elf_large_com_section;
5192 asym->value = elfsym->internal_elf_sym.st_size;
5193 /* Common symbol doesn't set BSF_GLOBAL. */
5194 asym->flags &= ~BSF_GLOBAL;
5195 break;
5196 }
5197 }
5198
5199 static bool
5200 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5201 {
5202 return (sym->st_shndx == SHN_COMMON
5203 || sym->st_shndx == SHN_X86_64_LCOMMON);
5204 }
5205
5206 static unsigned int
5207 elf_x86_64_common_section_index (asection *sec)
5208 {
5209 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5210 return SHN_COMMON;
5211 else
5212 return SHN_X86_64_LCOMMON;
5213 }
5214
5215 static asection *
5216 elf_x86_64_common_section (asection *sec)
5217 {
5218 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5219 return bfd_com_section_ptr;
5220 else
5221 return &_bfd_elf_large_com_section;
5222 }
5223
5224 static bool
5225 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5226 const Elf_Internal_Sym *sym,
5227 asection **psec,
5228 bool newdef,
5229 bool olddef,
5230 bfd *oldbfd,
5231 const asection *oldsec)
5232 {
5233 /* A normal common symbol and a large common symbol result in a
5234 normal common symbol. We turn the large common symbol into a
5235 normal one. */
5236 if (!olddef
5237 && h->root.type == bfd_link_hash_common
5238 && !newdef
5239 && bfd_is_com_section (*psec)
5240 && oldsec != *psec)
5241 {
5242 if (sym->st_shndx == SHN_COMMON
5243 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5244 {
5245 h->root.u.c.p->section
5246 = bfd_make_section_old_way (oldbfd, "COMMON");
5247 h->root.u.c.p->section->flags = SEC_ALLOC;
5248 }
5249 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5250 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5251 *psec = bfd_com_section_ptr;
5252 }
5253
5254 return true;
5255 }
5256
5257 static int
5258 elf_x86_64_additional_program_headers (bfd *abfd,
5259 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5260 {
5261 asection *s;
5262 int count = 0;
5263
5264 /* Check to see if we need a large readonly segment. */
5265 s = bfd_get_section_by_name (abfd, ".lrodata");
5266 if (s && (s->flags & SEC_LOAD))
5267 count++;
5268
5269 /* Check to see if we need a large data segment. Since .lbss sections
5270 is placed right after the .bss section, there should be no need for
5271 a large data segment just because of .lbss. */
5272 s = bfd_get_section_by_name (abfd, ".ldata");
5273 if (s && (s->flags & SEC_LOAD))
5274 count++;
5275
5276 return count;
5277 }
5278
5279 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5280
5281 static bool
5282 elf_x86_64_relocs_compatible (const bfd_target *input,
5283 const bfd_target *output)
5284 {
5285 return ((xvec_get_elf_backend_data (input)->s->elfclass
5286 == xvec_get_elf_backend_data (output)->s->elfclass)
5287 && _bfd_elf_relocs_compatible (input, output));
5288 }
5289
5290 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5291 with GNU properties if found. Otherwise, return NULL. */
5292
5293 static bfd *
5294 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5295 {
5296 struct elf_x86_init_table init_table;
5297 const struct elf_backend_data *bed;
5298 struct elf_x86_link_hash_table *htab;
5299
5300 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5301 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5302 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5303 != (int) R_X86_64_GNU_VTINHERIT)
5304 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5305 != (int) R_X86_64_GNU_VTENTRY))
5306 abort ();
5307
5308 /* This is unused for x86-64. */
5309 init_table.plt0_pad_byte = 0x90;
5310
5311 bed = get_elf_backend_data (info->output_bfd);
5312 htab = elf_x86_hash_table (info, bed->target_id);
5313 if (!htab)
5314 abort ();
5315
5316 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5317 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5318
5319 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5320 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5321
5322 if (ABI_64_P (info->output_bfd))
5323 {
5324 init_table.sframe_lazy_plt = &elf_x86_64_sframe_plt;
5325 init_table.sframe_non_lazy_plt = &elf_x86_64_sframe_non_lazy_plt;
5326 init_table.sframe_lazy_ibt_plt = &elf_x86_64_sframe_plt;
5327 init_table.sframe_non_lazy_ibt_plt = &elf_x86_64_sframe_non_lazy_plt;
5328 }
5329 else
5330 {
5331 /* SFrame is not supported for non AMD64. */
5332 init_table.sframe_lazy_plt = NULL;
5333 init_table.sframe_non_lazy_plt = NULL;
5334 }
5335
5336 if (ABI_64_P (info->output_bfd))
5337 {
5338 init_table.r_info = elf64_r_info;
5339 init_table.r_sym = elf64_r_sym;
5340 }
5341 else
5342 {
5343 init_table.r_info = elf32_r_info;
5344 init_table.r_sym = elf32_r_sym;
5345 }
5346
5347 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5348 }
5349
5350 static const struct bfd_elf_special_section
5351 elf_x86_64_special_sections[]=
5352 {
5353 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5354 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5355 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5356 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5357 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5358 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5359 { NULL, 0, 0, 0, 0 }
5360 };
5361
5362 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5363 #define TARGET_LITTLE_NAME "elf64-x86-64"
5364 #define ELF_ARCH bfd_arch_i386
5365 #define ELF_TARGET_ID X86_64_ELF_DATA
5366 #define ELF_MACHINE_CODE EM_X86_64
5367 #define ELF_MAXPAGESIZE 0x1000
5368 #define ELF_COMMONPAGESIZE 0x1000
5369
5370 #define elf_backend_can_gc_sections 1
5371 #define elf_backend_can_refcount 1
5372 #define elf_backend_want_got_plt 1
5373 #define elf_backend_plt_readonly 1
5374 #define elf_backend_want_plt_sym 0
5375 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5376 #define elf_backend_rela_normal 1
5377 #define elf_backend_plt_alignment 4
5378 #define elf_backend_caches_rawsize 1
5379 #define elf_backend_dtrel_excludes_plt 1
5380 #define elf_backend_want_dynrelro 1
5381
5382 #define elf_info_to_howto elf_x86_64_info_to_howto
5383
5384 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5385 #define bfd_elf64_bfd_reloc_name_lookup \
5386 elf_x86_64_reloc_name_lookup
5387
5388 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5389 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5390 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5391 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5392 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5393 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5394 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5395 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5396 #ifdef CORE_HEADER
5397 #define elf_backend_write_core_note elf_x86_64_write_core_note
5398 #endif
5399 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5400 #define elf_backend_relocate_section elf_x86_64_relocate_section
5401 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5402 #define elf_backend_object_p elf64_x86_64_elf_object_p
5403 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5404
5405 #define elf_backend_section_from_shdr \
5406 elf_x86_64_section_from_shdr
5407
5408 #define elf_backend_section_from_bfd_section \
5409 elf_x86_64_elf_section_from_bfd_section
5410 #define elf_backend_add_symbol_hook \
5411 elf_x86_64_add_symbol_hook
5412 #define elf_backend_symbol_processing \
5413 elf_x86_64_symbol_processing
5414 #define elf_backend_common_section_index \
5415 elf_x86_64_common_section_index
5416 #define elf_backend_common_section \
5417 elf_x86_64_common_section
5418 #define elf_backend_common_definition \
5419 elf_x86_64_common_definition
5420 #define elf_backend_merge_symbol \
5421 elf_x86_64_merge_symbol
5422 #define elf_backend_special_sections \
5423 elf_x86_64_special_sections
5424 #define elf_backend_additional_program_headers \
5425 elf_x86_64_additional_program_headers
5426 #define elf_backend_setup_gnu_properties \
5427 elf_x86_64_link_setup_gnu_properties
5428 #define elf_backend_hide_symbol \
5429 _bfd_x86_elf_hide_symbol
5430
5431 #undef elf64_bed
5432 #define elf64_bed elf64_x86_64_bed
5433
5434 #include "elf64-target.h"
5435
5436 /* CloudABI support. */
5437
5438 #undef TARGET_LITTLE_SYM
5439 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5440 #undef TARGET_LITTLE_NAME
5441 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5442
5443 #undef ELF_OSABI
5444 #define ELF_OSABI ELFOSABI_CLOUDABI
5445
5446 #undef elf64_bed
5447 #define elf64_bed elf64_x86_64_cloudabi_bed
5448
5449 #include "elf64-target.h"
5450
5451 /* FreeBSD support. */
5452
5453 #undef TARGET_LITTLE_SYM
5454 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5455 #undef TARGET_LITTLE_NAME
5456 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5457
5458 #undef ELF_OSABI
5459 #define ELF_OSABI ELFOSABI_FREEBSD
5460
5461 #undef elf64_bed
5462 #define elf64_bed elf64_x86_64_fbsd_bed
5463
5464 #include "elf64-target.h"
5465
5466 /* Solaris 2 support. */
5467
5468 #undef TARGET_LITTLE_SYM
5469 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5470 #undef TARGET_LITTLE_NAME
5471 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5472
5473 #undef ELF_TARGET_OS
5474 #define ELF_TARGET_OS is_solaris
5475
5476 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5477 objects won't be recognized. */
5478 #undef ELF_OSABI
5479
5480 #undef elf64_bed
5481 #define elf64_bed elf64_x86_64_sol2_bed
5482
5483 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5484 boundary. */
5485 #undef elf_backend_static_tls_alignment
5486 #define elf_backend_static_tls_alignment 16
5487
5488 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5489
5490 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5491 File, p.63. */
5492 #undef elf_backend_want_plt_sym
5493 #define elf_backend_want_plt_sym 1
5494
5495 #undef elf_backend_strtab_flags
5496 #define elf_backend_strtab_flags SHF_STRINGS
5497
5498 static bool
5499 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5500 bfd *obfd ATTRIBUTE_UNUSED,
5501 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5502 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5503 {
5504 /* PR 19938: FIXME: Need to add code for setting the sh_info
5505 and sh_link fields of Solaris specific section types. */
5506 return false;
5507 }
5508
5509 #undef elf_backend_copy_special_section_fields
5510 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5511
5512 #include "elf64-target.h"
5513
5514 /* Restore defaults. */
5515 #undef ELF_OSABI
5516 #undef elf_backend_static_tls_alignment
5517 #undef elf_backend_want_plt_sym
5518 #define elf_backend_want_plt_sym 0
5519 #undef elf_backend_strtab_flags
5520 #undef elf_backend_copy_special_section_fields
5521
5522 /* 32bit x86-64 support. */
5523
5524 #undef TARGET_LITTLE_SYM
5525 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5526 #undef TARGET_LITTLE_NAME
5527 #define TARGET_LITTLE_NAME "elf32-x86-64"
5528 #undef elf32_bed
5529 #define elf32_bed elf32_x86_64_bed
5530
5531 #undef ELF_ARCH
5532 #define ELF_ARCH bfd_arch_i386
5533
5534 #undef ELF_MACHINE_CODE
5535 #define ELF_MACHINE_CODE EM_X86_64
5536
5537 #undef ELF_TARGET_OS
5538 #undef ELF_OSABI
5539
5540 #define bfd_elf32_bfd_reloc_type_lookup \
5541 elf_x86_64_reloc_type_lookup
5542 #define bfd_elf32_bfd_reloc_name_lookup \
5543 elf_x86_64_reloc_name_lookup
5544 #define bfd_elf32_get_synthetic_symtab \
5545 elf_x86_64_get_synthetic_symtab
5546
5547 #undef elf_backend_object_p
5548 #define elf_backend_object_p \
5549 elf32_x86_64_elf_object_p
5550
5551 #undef elf_backend_bfd_from_remote_memory
5552 #define elf_backend_bfd_from_remote_memory \
5553 _bfd_elf32_bfd_from_remote_memory
5554
5555 #undef elf_backend_size_info
5556 #define elf_backend_size_info \
5557 _bfd_elf32_size_info
5558
5559 #include "elf32-target.h"