]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf64-x86-64.c
Update the config.guess and config.sub files from the master repository and regenerat...
[thirdparty/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2022 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "dwarf2.h"
24 #include "libiberty.h"
25
26 #include "opcode/i386.h"
27
28 #ifdef CORE_HEADER
29 #include <stdarg.h>
30 #include CORE_HEADER
31 #endif
32
33 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
34 #define MINUS_ONE (~ (bfd_vma) 0)
35
36 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
37 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
38 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
39 since they are the same. */
40
41 /* The relocation "howto" table. Order of fields:
42 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
43 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
44 static reloc_howto_type x86_64_elf_howto_table[] =
45 {
46 HOWTO(R_X86_64_NONE, 0, 3, 0, false, 0, complain_overflow_dont,
47 bfd_elf_generic_reloc, "R_X86_64_NONE", false, 0, 0x00000000,
48 false),
49 HOWTO(R_X86_64_64, 0, 4, 64, false, 0, complain_overflow_dont,
50 bfd_elf_generic_reloc, "R_X86_64_64", false, 0, MINUS_ONE,
51 false),
52 HOWTO(R_X86_64_PC32, 0, 2, 32, true, 0, complain_overflow_signed,
53 bfd_elf_generic_reloc, "R_X86_64_PC32", false, 0, 0xffffffff,
54 true),
55 HOWTO(R_X86_64_GOT32, 0, 2, 32, false, 0, complain_overflow_signed,
56 bfd_elf_generic_reloc, "R_X86_64_GOT32", false, 0, 0xffffffff,
57 false),
58 HOWTO(R_X86_64_PLT32, 0, 2, 32, true, 0, complain_overflow_signed,
59 bfd_elf_generic_reloc, "R_X86_64_PLT32", false, 0, 0xffffffff,
60 true),
61 HOWTO(R_X86_64_COPY, 0, 2, 32, false, 0, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "R_X86_64_COPY", false, 0, 0xffffffff,
63 false),
64 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, false, 0, complain_overflow_dont,
65 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", false, 0, MINUS_ONE,
66 false),
67 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, false, 0, complain_overflow_dont,
68 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", false, 0, MINUS_ONE,
69 false),
70 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, false, 0, complain_overflow_dont,
71 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", false, 0, MINUS_ONE,
72 false),
73 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, true, 0, complain_overflow_signed,
74 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", false, 0, 0xffffffff,
75 true),
76 HOWTO(R_X86_64_32, 0, 2, 32, false, 0, complain_overflow_unsigned,
77 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
78 false),
79 HOWTO(R_X86_64_32S, 0, 2, 32, false, 0, complain_overflow_signed,
80 bfd_elf_generic_reloc, "R_X86_64_32S", false, 0, 0xffffffff,
81 false),
82 HOWTO(R_X86_64_16, 0, 1, 16, false, 0, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "R_X86_64_16", false, 0, 0xffff, false),
84 HOWTO(R_X86_64_PC16, 0, 1, 16, true, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_PC16", false, 0, 0xffff, true),
86 HOWTO(R_X86_64_8, 0, 0, 8, false, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_8", false, 0, 0xff, false),
88 HOWTO(R_X86_64_PC8, 0, 0, 8, true, 0, complain_overflow_signed,
89 bfd_elf_generic_reloc, "R_X86_64_PC8", false, 0, 0xff, true),
90 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, false, 0, complain_overflow_dont,
91 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", false, 0, MINUS_ONE,
92 false),
93 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
94 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", false, 0, MINUS_ONE,
95 false),
96 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
97 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", false, 0, MINUS_ONE,
98 false),
99 HOWTO(R_X86_64_TLSGD, 0, 2, 32, true, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_TLSGD", false, 0, 0xffffffff,
101 true),
102 HOWTO(R_X86_64_TLSLD, 0, 2, 32, true, 0, complain_overflow_signed,
103 bfd_elf_generic_reloc, "R_X86_64_TLSLD", false, 0, 0xffffffff,
104 true),
105 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, false, 0, complain_overflow_signed,
106 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", false, 0, 0xffffffff,
107 false),
108 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, true, 0, complain_overflow_signed,
109 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", false, 0, 0xffffffff,
110 true),
111 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, false, 0, complain_overflow_signed,
112 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", false, 0, 0xffffffff,
113 false),
114 HOWTO(R_X86_64_PC64, 0, 4, 64, true, 0, complain_overflow_dont,
115 bfd_elf_generic_reloc, "R_X86_64_PC64", false, 0, MINUS_ONE,
116 true),
117 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
118 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64", false, 0, MINUS_ONE,
119 false),
120 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, true, 0, complain_overflow_signed,
121 bfd_elf_generic_reloc, "R_X86_64_GOTPC32", false, 0, 0xffffffff,
122 true),
123 HOWTO(R_X86_64_GOT64, 0, 4, 64, false, 0, complain_overflow_signed,
124 bfd_elf_generic_reloc, "R_X86_64_GOT64", false, 0, MINUS_ONE,
125 false),
126 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, true, 0, complain_overflow_signed,
127 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", false, 0, MINUS_ONE,
128 true),
129 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, true, 0, complain_overflow_signed,
130 bfd_elf_generic_reloc, "R_X86_64_GOTPC64", false, 0, MINUS_ONE,
131 true),
132 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, false, 0, complain_overflow_signed,
133 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", false, 0, MINUS_ONE,
134 false),
135 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, false, 0, complain_overflow_signed,
136 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", false, 0, MINUS_ONE,
137 false),
138 HOWTO(R_X86_64_SIZE32, 0, 2, 32, false, 0, complain_overflow_unsigned,
139 bfd_elf_generic_reloc, "R_X86_64_SIZE32", false, 0, 0xffffffff,
140 false),
141 HOWTO(R_X86_64_SIZE64, 0, 4, 64, false, 0, complain_overflow_dont,
142 bfd_elf_generic_reloc, "R_X86_64_SIZE64", false, 0, MINUS_ONE,
143 false),
144 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, true, 0,
145 complain_overflow_bitfield, bfd_elf_generic_reloc,
146 "R_X86_64_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
147 HOWTO(R_X86_64_TLSDESC_CALL, 0, 3, 0, false, 0,
148 complain_overflow_dont, bfd_elf_generic_reloc,
149 "R_X86_64_TLSDESC_CALL",
150 false, 0, 0, false),
151 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, false, 0,
152 complain_overflow_dont, bfd_elf_generic_reloc,
153 "R_X86_64_TLSDESC", false, 0, MINUS_ONE, false),
154 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, false, 0, complain_overflow_dont,
155 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", false, 0, MINUS_ONE,
156 false),
157 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, false, 0, complain_overflow_dont,
158 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", false, 0, MINUS_ONE,
159 false),
160 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, true, 0, complain_overflow_signed,
161 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", false, 0, 0xffffffff,
162 true),
163 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, true, 0, complain_overflow_signed,
164 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", false, 0, 0xffffffff,
165 true),
166 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, true, 0, complain_overflow_signed,
167 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", false, 0, 0xffffffff,
168 true),
169 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, true, 0, complain_overflow_signed,
170 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", false, 0, 0xffffffff,
171 true),
172
173 /* We have a gap in the reloc numbers here.
174 R_X86_64_standard counts the number up to this point, and
175 R_X86_64_vt_offset is the value to subtract from a reloc type of
176 R_X86_64_GNU_VT* to form an index into this table. */
177 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
178 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
179
180 /* GNU extension to record C++ vtable hierarchy. */
181 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, false, 0, complain_overflow_dont,
182 NULL, "R_X86_64_GNU_VTINHERIT", false, 0, 0, false),
183
184 /* GNU extension to record C++ vtable member usage. */
185 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, false, 0, complain_overflow_dont,
186 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", false, 0, 0,
187 false),
188
189 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
190 HOWTO(R_X86_64_32, 0, 2, 32, false, 0, complain_overflow_bitfield,
191 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
192 false)
193 };
194
195 /* Map BFD relocs to the x86_64 elf relocs. */
196 struct elf_reloc_map
197 {
198 bfd_reloc_code_real_type bfd_reloc_val;
199 unsigned char elf_reloc_val;
200 };
201
202 static const struct elf_reloc_map x86_64_reloc_map[] =
203 {
204 { BFD_RELOC_NONE, R_X86_64_NONE, },
205 { BFD_RELOC_64, R_X86_64_64, },
206 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
207 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
208 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
209 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
210 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
211 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
212 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
213 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
214 { BFD_RELOC_32, R_X86_64_32, },
215 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
216 { BFD_RELOC_16, R_X86_64_16, },
217 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
218 { BFD_RELOC_8, R_X86_64_8, },
219 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
220 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
221 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
222 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
223 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
224 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
225 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
226 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
227 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
228 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
229 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
230 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
231 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
232 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
233 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
234 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
235 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
236 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
237 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
238 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
239 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
240 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
241 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
242 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
243 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
244 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
245 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
246 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
247 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
248 };
249
250 static reloc_howto_type *
251 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
252 {
253 unsigned i;
254
255 if (r_type == (unsigned int) R_X86_64_32)
256 {
257 if (ABI_64_P (abfd))
258 i = r_type;
259 else
260 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
261 }
262 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
263 || r_type >= (unsigned int) R_X86_64_max)
264 {
265 if (r_type >= (unsigned int) R_X86_64_standard)
266 {
267 /* xgettext:c-format */
268 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
269 abfd, r_type);
270 bfd_set_error (bfd_error_bad_value);
271 return NULL;
272 }
273 i = r_type;
274 }
275 else
276 i = r_type - (unsigned int) R_X86_64_vt_offset;
277 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
278 return &x86_64_elf_howto_table[i];
279 }
280
281 /* Given a BFD reloc type, return a HOWTO structure. */
282 static reloc_howto_type *
283 elf_x86_64_reloc_type_lookup (bfd *abfd,
284 bfd_reloc_code_real_type code)
285 {
286 unsigned int i;
287
288 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
289 i++)
290 {
291 if (x86_64_reloc_map[i].bfd_reloc_val == code)
292 return elf_x86_64_rtype_to_howto (abfd,
293 x86_64_reloc_map[i].elf_reloc_val);
294 }
295 return NULL;
296 }
297
298 static reloc_howto_type *
299 elf_x86_64_reloc_name_lookup (bfd *abfd,
300 const char *r_name)
301 {
302 unsigned int i;
303
304 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
305 {
306 /* Get x32 R_X86_64_32. */
307 reloc_howto_type *reloc
308 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
309 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
310 return reloc;
311 }
312
313 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
314 if (x86_64_elf_howto_table[i].name != NULL
315 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
316 return &x86_64_elf_howto_table[i];
317
318 return NULL;
319 }
320
321 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
322
323 static bool
324 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
325 Elf_Internal_Rela *dst)
326 {
327 unsigned r_type;
328
329 r_type = ELF32_R_TYPE (dst->r_info);
330 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
331 if (cache_ptr->howto == NULL)
332 return false;
333 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
334 return true;
335 }
336 \f
337 /* Support for core dump NOTE sections. */
338 static bool
339 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
340 {
341 int offset;
342 size_t size;
343
344 switch (note->descsz)
345 {
346 default:
347 return false;
348
349 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
350 /* pr_cursig */
351 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
352
353 /* pr_pid */
354 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
355
356 /* pr_reg */
357 offset = 72;
358 size = 216;
359
360 break;
361
362 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
363 /* pr_cursig */
364 elf_tdata (abfd)->core->signal
365 = bfd_get_16 (abfd, note->descdata + 12);
366
367 /* pr_pid */
368 elf_tdata (abfd)->core->lwpid
369 = bfd_get_32 (abfd, note->descdata + 32);
370
371 /* pr_reg */
372 offset = 112;
373 size = 216;
374
375 break;
376 }
377
378 /* Make a ".reg/999" section. */
379 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
380 size, note->descpos + offset);
381 }
382
383 static bool
384 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
385 {
386 switch (note->descsz)
387 {
388 default:
389 return false;
390
391 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
392 elf_tdata (abfd)->core->pid
393 = bfd_get_32 (abfd, note->descdata + 12);
394 elf_tdata (abfd)->core->program
395 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
396 elf_tdata (abfd)->core->command
397 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
398 break;
399
400 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
401 elf_tdata (abfd)->core->pid
402 = bfd_get_32 (abfd, note->descdata + 24);
403 elf_tdata (abfd)->core->program
404 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
405 elf_tdata (abfd)->core->command
406 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
407 }
408
409 /* Note that for some reason, a spurious space is tacked
410 onto the end of the args in some (at least one anyway)
411 implementations, so strip it off if it exists. */
412
413 {
414 char *command = elf_tdata (abfd)->core->command;
415 int n = strlen (command);
416
417 if (0 < n && command[n - 1] == ' ')
418 command[n - 1] = '\0';
419 }
420
421 return true;
422 }
423
424 #ifdef CORE_HEADER
425 # if GCC_VERSION >= 8000
426 # pragma GCC diagnostic push
427 # pragma GCC diagnostic ignored "-Wstringop-truncation"
428 # endif
429 static char *
430 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
431 int note_type, ...)
432 {
433 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
434 va_list ap;
435 const char *fname, *psargs;
436 long pid;
437 int cursig;
438 const void *gregs;
439
440 switch (note_type)
441 {
442 default:
443 return NULL;
444
445 case NT_PRPSINFO:
446 va_start (ap, note_type);
447 fname = va_arg (ap, const char *);
448 psargs = va_arg (ap, const char *);
449 va_end (ap);
450
451 if (bed->s->elfclass == ELFCLASS32)
452 {
453 prpsinfo32_t data;
454 memset (&data, 0, sizeof (data));
455 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
456 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
457 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
458 &data, sizeof (data));
459 }
460 else
461 {
462 prpsinfo64_t data;
463 memset (&data, 0, sizeof (data));
464 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
465 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
466 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
467 &data, sizeof (data));
468 }
469 /* NOTREACHED */
470
471 case NT_PRSTATUS:
472 va_start (ap, note_type);
473 pid = va_arg (ap, long);
474 cursig = va_arg (ap, int);
475 gregs = va_arg (ap, const void *);
476 va_end (ap);
477
478 if (bed->s->elfclass == ELFCLASS32)
479 {
480 if (bed->elf_machine_code == EM_X86_64)
481 {
482 prstatusx32_t prstat;
483 memset (&prstat, 0, sizeof (prstat));
484 prstat.pr_pid = pid;
485 prstat.pr_cursig = cursig;
486 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
487 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
488 &prstat, sizeof (prstat));
489 }
490 else
491 {
492 prstatus32_t prstat;
493 memset (&prstat, 0, sizeof (prstat));
494 prstat.pr_pid = pid;
495 prstat.pr_cursig = cursig;
496 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
497 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
498 &prstat, sizeof (prstat));
499 }
500 }
501 else
502 {
503 prstatus64_t prstat;
504 memset (&prstat, 0, sizeof (prstat));
505 prstat.pr_pid = pid;
506 prstat.pr_cursig = cursig;
507 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
508 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
509 &prstat, sizeof (prstat));
510 }
511 }
512 /* NOTREACHED */
513 }
514 # if GCC_VERSION >= 8000
515 # pragma GCC diagnostic pop
516 # endif
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The size in bytes of an entry in the global offset table. */
522
523 #define GOT_ENTRY_SIZE 8
524
525 /* The size in bytes of an entry in the lazy procedure linkage table. */
526
527 #define LAZY_PLT_ENTRY_SIZE 16
528
529 /* The size in bytes of an entry in the non-lazy procedure linkage
530 table. */
531
532 #define NON_LAZY_PLT_ENTRY_SIZE 8
533
534 /* The first entry in a lazy procedure linkage table looks like this.
535 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
536 works. */
537
538 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
539 {
540 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
541 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
542 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
543 };
544
545 /* Subsequent entries in a lazy procedure linkage table look like this. */
546
547 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
548 {
549 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
550 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
551 0x68, /* pushq immediate */
552 0, 0, 0, 0, /* replaced with index into relocation table. */
553 0xe9, /* jmp relative */
554 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
555 };
556
557 /* The first entry in a lazy procedure linkage table with BND prefix
558 like this. */
559
560 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
561 {
562 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
563 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
564 0x0f, 0x1f, 0 /* nopl (%rax) */
565 };
566
567 /* Subsequent entries for branches with BND prefx in a lazy procedure
568 linkage table look like this. */
569
570 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
571 {
572 0x68, 0, 0, 0, 0, /* pushq immediate */
573 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
574 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
575 };
576
577 /* The first entry in the IBT-enabled lazy procedure linkage table is the
578 the same as the lazy PLT with BND prefix so that bound registers are
579 preserved when control is passed to dynamic linker. Subsequent
580 entries for a IBT-enabled lazy procedure linkage table look like
581 this. */
582
583 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
584 {
585 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
586 0x68, 0, 0, 0, 0, /* pushq immediate */
587 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
588 0x90 /* nop */
589 };
590
591 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
592 is the same as the normal lazy PLT. Subsequent entries for an
593 x32 IBT-enabled lazy procedure linkage table look like this. */
594
595 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
596 {
597 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
598 0x68, 0, 0, 0, 0, /* pushq immediate */
599 0xe9, 0, 0, 0, 0, /* jmpq relative */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries in the non-lazey procedure linkage table look like this. */
604
605 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
606 {
607 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
608 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
609 0x66, 0x90 /* xchg %ax,%ax */
610 };
611
612 /* Entries for branches with BND prefix in the non-lazey procedure
613 linkage table look like this. */
614
615 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
616 {
617 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x90 /* nop */
620 };
621
622 /* Entries for branches with IBT-enabled in the non-lazey procedure
623 linkage table look like this. They have the same size as the lazy
624 PLT entry. */
625
626 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
627 {
628 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
629 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
630 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
631 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
632 };
633
634 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
635 linkage table look like this. They have the same size as the lazy
636 PLT entry. */
637
638 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
639 {
640 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
641 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
642 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
643 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
644 };
645
646 /* The TLSDESC entry in a lazy procedure linkage table. */
647 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
648 {
649 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
650 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
651 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
652 };
653
654 /* .eh_frame covering the lazy .plt section. */
655
656 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
657 {
658 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
659 0, 0, 0, 0, /* CIE ID */
660 1, /* CIE version */
661 'z', 'R', 0, /* Augmentation string */
662 1, /* Code alignment factor */
663 0x78, /* Data alignment factor */
664 16, /* Return address column */
665 1, /* Augmentation size */
666 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
667 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
668 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
669 DW_CFA_nop, DW_CFA_nop,
670
671 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
672 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
673 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
674 0, 0, 0, 0, /* .plt size goes here */
675 0, /* Augmentation size */
676 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
677 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
678 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
679 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
680 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
681 11, /* Block length */
682 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
683 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
684 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
685 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
686 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
687 };
688
689 /* .eh_frame covering the lazy BND .plt section. */
690
691 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
692 {
693 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
694 0, 0, 0, 0, /* CIE ID */
695 1, /* CIE version */
696 'z', 'R', 0, /* Augmentation string */
697 1, /* Code alignment factor */
698 0x78, /* Data alignment factor */
699 16, /* Return address column */
700 1, /* Augmentation size */
701 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
702 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
703 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
704 DW_CFA_nop, DW_CFA_nop,
705
706 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
707 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
708 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
709 0, 0, 0, 0, /* .plt size goes here */
710 0, /* Augmentation size */
711 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
712 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
713 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
714 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
715 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
716 11, /* Block length */
717 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
718 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
719 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
720 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
721 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
722 };
723
724 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
725
726 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
727 {
728 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
729 0, 0, 0, 0, /* CIE ID */
730 1, /* CIE version */
731 'z', 'R', 0, /* Augmentation string */
732 1, /* Code alignment factor */
733 0x78, /* Data alignment factor */
734 16, /* Return address column */
735 1, /* Augmentation size */
736 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
737 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
738 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
739 DW_CFA_nop, DW_CFA_nop,
740
741 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
742 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
743 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
744 0, 0, 0, 0, /* .plt size goes here */
745 0, /* Augmentation size */
746 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
747 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
748 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
749 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
750 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
751 11, /* Block length */
752 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
753 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
754 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
755 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
756 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
757 };
758
759 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
760
761 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
762 {
763 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
764 0, 0, 0, 0, /* CIE ID */
765 1, /* CIE version */
766 'z', 'R', 0, /* Augmentation string */
767 1, /* Code alignment factor */
768 0x78, /* Data alignment factor */
769 16, /* Return address column */
770 1, /* Augmentation size */
771 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
772 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
773 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
774 DW_CFA_nop, DW_CFA_nop,
775
776 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
777 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
778 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
779 0, 0, 0, 0, /* .plt size goes here */
780 0, /* Augmentation size */
781 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
782 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
783 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
784 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
785 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
786 11, /* Block length */
787 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
788 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
789 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
790 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
791 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
792 };
793
794 /* .eh_frame covering the non-lazy .plt section. */
795
796 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
797 {
798 #define PLT_GOT_FDE_LENGTH 20
799 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
800 0, 0, 0, 0, /* CIE ID */
801 1, /* CIE version */
802 'z', 'R', 0, /* Augmentation string */
803 1, /* Code alignment factor */
804 0x78, /* Data alignment factor */
805 16, /* Return address column */
806 1, /* Augmentation size */
807 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
808 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
809 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
810 DW_CFA_nop, DW_CFA_nop,
811
812 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
813 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
814 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
815 0, 0, 0, 0, /* non-lazy .plt size goes here */
816 0, /* Augmentation size */
817 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
818 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
819 };
820
821 /* These are the standard parameters. */
822 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
823 {
824 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
825 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
826 elf_x86_64_lazy_plt_entry, /* plt_entry */
827 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
828 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
829 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
830 6, /* plt_tlsdesc_got1_offset */
831 12, /* plt_tlsdesc_got2_offset */
832 10, /* plt_tlsdesc_got1_insn_end */
833 16, /* plt_tlsdesc_got2_insn_end */
834 2, /* plt0_got1_offset */
835 8, /* plt0_got2_offset */
836 12, /* plt0_got2_insn_end */
837 2, /* plt_got_offset */
838 7, /* plt_reloc_offset */
839 12, /* plt_plt_offset */
840 6, /* plt_got_insn_size */
841 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
842 6, /* plt_lazy_offset */
843 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
844 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
845 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
846 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
847 };
848
849 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
850 {
851 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
852 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
853 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
854 2, /* plt_got_offset */
855 6, /* plt_got_insn_size */
856 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
857 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
858 };
859
860 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
861 {
862 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
863 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
864 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
865 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
866 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
867 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
868 6, /* plt_tlsdesc_got1_offset */
869 12, /* plt_tlsdesc_got2_offset */
870 10, /* plt_tlsdesc_got1_insn_end */
871 16, /* plt_tlsdesc_got2_insn_end */
872 2, /* plt0_got1_offset */
873 1+8, /* plt0_got2_offset */
874 1+12, /* plt0_got2_insn_end */
875 1+2, /* plt_got_offset */
876 1, /* plt_reloc_offset */
877 7, /* plt_plt_offset */
878 1+6, /* plt_got_insn_size */
879 11, /* plt_plt_insn_end */
880 0, /* plt_lazy_offset */
881 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
882 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
883 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
884 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
885 };
886
887 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
888 {
889 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
890 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
891 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
892 1+2, /* plt_got_offset */
893 1+6, /* plt_got_insn_size */
894 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
895 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
896 };
897
898 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
899 {
900 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
901 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
902 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
903 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
904 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
905 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
906 6, /* plt_tlsdesc_got1_offset */
907 12, /* plt_tlsdesc_got2_offset */
908 10, /* plt_tlsdesc_got1_insn_end */
909 16, /* plt_tlsdesc_got2_insn_end */
910 2, /* plt0_got1_offset */
911 1+8, /* plt0_got2_offset */
912 1+12, /* plt0_got2_insn_end */
913 4+1+2, /* plt_got_offset */
914 4+1, /* plt_reloc_offset */
915 4+1+6, /* plt_plt_offset */
916 4+1+6, /* plt_got_insn_size */
917 4+1+5+5, /* plt_plt_insn_end */
918 0, /* plt_lazy_offset */
919 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
920 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
921 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
922 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
923 };
924
925 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
926 {
927 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
928 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
929 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
930 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
931 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
932 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
933 6, /* plt_tlsdesc_got1_offset */
934 12, /* plt_tlsdesc_got2_offset */
935 10, /* plt_tlsdesc_got1_insn_end */
936 16, /* plt_tlsdesc_got2_insn_end */
937 2, /* plt0_got1_offset */
938 8, /* plt0_got2_offset */
939 12, /* plt0_got2_insn_end */
940 4+2, /* plt_got_offset */
941 4+1, /* plt_reloc_offset */
942 4+6, /* plt_plt_offset */
943 4+6, /* plt_got_insn_size */
944 4+5+5, /* plt_plt_insn_end */
945 0, /* plt_lazy_offset */
946 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
947 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
948 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
949 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
950 };
951
952 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
953 {
954 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
955 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
956 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
957 4+1+2, /* plt_got_offset */
958 4+1+6, /* plt_got_insn_size */
959 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
960 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
961 };
962
963 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
964 {
965 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
966 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
967 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
968 4+2, /* plt_got_offset */
969 4+6, /* plt_got_insn_size */
970 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
971 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
972 };
973
974
975 static bool
976 elf64_x86_64_elf_object_p (bfd *abfd)
977 {
978 /* Set the right machine number for an x86-64 elf64 file. */
979 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
980 return true;
981 }
982
983 static bool
984 elf32_x86_64_elf_object_p (bfd *abfd)
985 {
986 /* Set the right machine number for an x86-64 elf32 file. */
987 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
988 return true;
989 }
990
991 /* Return TRUE if the TLS access code sequence support transition
992 from R_TYPE. */
993
994 static bool
995 elf_x86_64_check_tls_transition (bfd *abfd,
996 struct bfd_link_info *info,
997 asection *sec,
998 bfd_byte *contents,
999 Elf_Internal_Shdr *symtab_hdr,
1000 struct elf_link_hash_entry **sym_hashes,
1001 unsigned int r_type,
1002 const Elf_Internal_Rela *rel,
1003 const Elf_Internal_Rela *relend)
1004 {
1005 unsigned int val;
1006 unsigned long r_symndx;
1007 bool largepic = false;
1008 struct elf_link_hash_entry *h;
1009 bfd_vma offset;
1010 struct elf_x86_link_hash_table *htab;
1011 bfd_byte *call;
1012 bool indirect_call;
1013
1014 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1015 offset = rel->r_offset;
1016 switch (r_type)
1017 {
1018 case R_X86_64_TLSGD:
1019 case R_X86_64_TLSLD:
1020 if ((rel + 1) >= relend)
1021 return false;
1022
1023 if (r_type == R_X86_64_TLSGD)
1024 {
1025 /* Check transition from GD access model. For 64bit, only
1026 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1027 .word 0x6666; rex64; call __tls_get_addr@PLT
1028 or
1029 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1030 .byte 0x66; rex64
1031 call *__tls_get_addr@GOTPCREL(%rip)
1032 which may be converted to
1033 addr32 call __tls_get_addr
1034 can transit to different access model. For 32bit, only
1035 leaq foo@tlsgd(%rip), %rdi
1036 .word 0x6666; rex64; call __tls_get_addr@PLT
1037 or
1038 leaq foo@tlsgd(%rip), %rdi
1039 .byte 0x66; rex64
1040 call *__tls_get_addr@GOTPCREL(%rip)
1041 which may be converted to
1042 addr32 call __tls_get_addr
1043 can transit to different access model. For largepic,
1044 we also support:
1045 leaq foo@tlsgd(%rip), %rdi
1046 movabsq $__tls_get_addr@pltoff, %rax
1047 addq $r15, %rax
1048 call *%rax
1049 or
1050 leaq foo@tlsgd(%rip), %rdi
1051 movabsq $__tls_get_addr@pltoff, %rax
1052 addq $rbx, %rax
1053 call *%rax */
1054
1055 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1056
1057 if ((offset + 12) > sec->size)
1058 return false;
1059
1060 call = contents + offset + 4;
1061 if (call[0] != 0x66
1062 || !((call[1] == 0x48
1063 && call[2] == 0xff
1064 && call[3] == 0x15)
1065 || (call[1] == 0x48
1066 && call[2] == 0x67
1067 && call[3] == 0xe8)
1068 || (call[1] == 0x66
1069 && call[2] == 0x48
1070 && call[3] == 0xe8)))
1071 {
1072 if (!ABI_64_P (abfd)
1073 || (offset + 19) > sec->size
1074 || offset < 3
1075 || memcmp (call - 7, leaq + 1, 3) != 0
1076 || memcmp (call, "\x48\xb8", 2) != 0
1077 || call[11] != 0x01
1078 || call[13] != 0xff
1079 || call[14] != 0xd0
1080 || !((call[10] == 0x48 && call[12] == 0xd8)
1081 || (call[10] == 0x4c && call[12] == 0xf8)))
1082 return false;
1083 largepic = true;
1084 }
1085 else if (ABI_64_P (abfd))
1086 {
1087 if (offset < 4
1088 || memcmp (contents + offset - 4, leaq, 4) != 0)
1089 return false;
1090 }
1091 else
1092 {
1093 if (offset < 3
1094 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1095 return false;
1096 }
1097 indirect_call = call[2] == 0xff;
1098 }
1099 else
1100 {
1101 /* Check transition from LD access model. Only
1102 leaq foo@tlsld(%rip), %rdi;
1103 call __tls_get_addr@PLT
1104 or
1105 leaq foo@tlsld(%rip), %rdi;
1106 call *__tls_get_addr@GOTPCREL(%rip)
1107 which may be converted to
1108 addr32 call __tls_get_addr
1109 can transit to different access model. For largepic
1110 we also support:
1111 leaq foo@tlsld(%rip), %rdi
1112 movabsq $__tls_get_addr@pltoff, %rax
1113 addq $r15, %rax
1114 call *%rax
1115 or
1116 leaq foo@tlsld(%rip), %rdi
1117 movabsq $__tls_get_addr@pltoff, %rax
1118 addq $rbx, %rax
1119 call *%rax */
1120
1121 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1122
1123 if (offset < 3 || (offset + 9) > sec->size)
1124 return false;
1125
1126 if (memcmp (contents + offset - 3, lea, 3) != 0)
1127 return false;
1128
1129 call = contents + offset + 4;
1130 if (!(call[0] == 0xe8
1131 || (call[0] == 0xff && call[1] == 0x15)
1132 || (call[0] == 0x67 && call[1] == 0xe8)))
1133 {
1134 if (!ABI_64_P (abfd)
1135 || (offset + 19) > sec->size
1136 || memcmp (call, "\x48\xb8", 2) != 0
1137 || call[11] != 0x01
1138 || call[13] != 0xff
1139 || call[14] != 0xd0
1140 || !((call[10] == 0x48 && call[12] == 0xd8)
1141 || (call[10] == 0x4c && call[12] == 0xf8)))
1142 return false;
1143 largepic = true;
1144 }
1145 indirect_call = call[0] == 0xff;
1146 }
1147
1148 r_symndx = htab->r_sym (rel[1].r_info);
1149 if (r_symndx < symtab_hdr->sh_info)
1150 return false;
1151
1152 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1153 if (h == NULL
1154 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1155 return false;
1156 else
1157 {
1158 r_type = (ELF32_R_TYPE (rel[1].r_info)
1159 & ~R_X86_64_converted_reloc_bit);
1160 if (largepic)
1161 return r_type == R_X86_64_PLTOFF64;
1162 else if (indirect_call)
1163 return r_type == R_X86_64_GOTPCRELX;
1164 else
1165 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1166 }
1167
1168 case R_X86_64_GOTTPOFF:
1169 /* Check transition from IE access model:
1170 mov foo@gottpoff(%rip), %reg
1171 add foo@gottpoff(%rip), %reg
1172 */
1173
1174 /* Check REX prefix first. */
1175 if (offset >= 3 && (offset + 4) <= sec->size)
1176 {
1177 val = bfd_get_8 (abfd, contents + offset - 3);
1178 if (val != 0x48 && val != 0x4c)
1179 {
1180 /* X32 may have 0x44 REX prefix or no REX prefix. */
1181 if (ABI_64_P (abfd))
1182 return false;
1183 }
1184 }
1185 else
1186 {
1187 /* X32 may not have any REX prefix. */
1188 if (ABI_64_P (abfd))
1189 return false;
1190 if (offset < 2 || (offset + 3) > sec->size)
1191 return false;
1192 }
1193
1194 val = bfd_get_8 (abfd, contents + offset - 2);
1195 if (val != 0x8b && val != 0x03)
1196 return false;
1197
1198 val = bfd_get_8 (abfd, contents + offset - 1);
1199 return (val & 0xc7) == 5;
1200
1201 case R_X86_64_GOTPC32_TLSDESC:
1202 /* Check transition from GDesc access model:
1203 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
1204 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
1205
1206 Make sure it's a leaq adding rip to a 32-bit offset
1207 into any register, although it's probably almost always
1208 going to be rax. */
1209
1210 if (offset < 3 || (offset + 4) > sec->size)
1211 return false;
1212
1213 val = bfd_get_8 (abfd, contents + offset - 3);
1214 val &= 0xfb;
1215 if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40))
1216 return false;
1217
1218 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1219 return false;
1220
1221 val = bfd_get_8 (abfd, contents + offset - 1);
1222 return (val & 0xc7) == 0x05;
1223
1224 case R_X86_64_TLSDESC_CALL:
1225 /* Check transition from GDesc access model:
1226 call *x@tlsdesc(%rax) <--- LP64 mode.
1227 call *x@tlsdesc(%eax) <--- X32 mode.
1228 */
1229 if (offset + 2 <= sec->size)
1230 {
1231 unsigned int prefix;
1232 call = contents + offset;
1233 prefix = 0;
1234 if (!ABI_64_P (abfd))
1235 {
1236 /* Check for call *x@tlsdesc(%eax). */
1237 if (call[0] == 0x67)
1238 {
1239 prefix = 1;
1240 if (offset + 3 > sec->size)
1241 return false;
1242 }
1243 }
1244 /* Make sure that it's a call *x@tlsdesc(%rax). */
1245 return call[prefix] == 0xff && call[1 + prefix] == 0x10;
1246 }
1247
1248 return false;
1249
1250 default:
1251 abort ();
1252 }
1253 }
1254
1255 /* Return TRUE if the TLS access transition is OK or no transition
1256 will be performed. Update R_TYPE if there is a transition. */
1257
1258 static bool
1259 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1260 asection *sec, bfd_byte *contents,
1261 Elf_Internal_Shdr *symtab_hdr,
1262 struct elf_link_hash_entry **sym_hashes,
1263 unsigned int *r_type, int tls_type,
1264 const Elf_Internal_Rela *rel,
1265 const Elf_Internal_Rela *relend,
1266 struct elf_link_hash_entry *h,
1267 unsigned long r_symndx,
1268 bool from_relocate_section)
1269 {
1270 unsigned int from_type = *r_type;
1271 unsigned int to_type = from_type;
1272 bool check = true;
1273
1274 /* Skip TLS transition for functions. */
1275 if (h != NULL
1276 && (h->type == STT_FUNC
1277 || h->type == STT_GNU_IFUNC))
1278 return true;
1279
1280 switch (from_type)
1281 {
1282 case R_X86_64_TLSGD:
1283 case R_X86_64_GOTPC32_TLSDESC:
1284 case R_X86_64_TLSDESC_CALL:
1285 case R_X86_64_GOTTPOFF:
1286 if (bfd_link_executable (info))
1287 {
1288 if (h == NULL)
1289 to_type = R_X86_64_TPOFF32;
1290 else
1291 to_type = R_X86_64_GOTTPOFF;
1292 }
1293
1294 /* When we are called from elf_x86_64_relocate_section, there may
1295 be additional transitions based on TLS_TYPE. */
1296 if (from_relocate_section)
1297 {
1298 unsigned int new_to_type = to_type;
1299
1300 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1301 new_to_type = R_X86_64_TPOFF32;
1302
1303 if (to_type == R_X86_64_TLSGD
1304 || to_type == R_X86_64_GOTPC32_TLSDESC
1305 || to_type == R_X86_64_TLSDESC_CALL)
1306 {
1307 if (tls_type == GOT_TLS_IE)
1308 new_to_type = R_X86_64_GOTTPOFF;
1309 }
1310
1311 /* We checked the transition before when we were called from
1312 elf_x86_64_scan_relocs. We only want to check the new
1313 transition which hasn't been checked before. */
1314 check = new_to_type != to_type && from_type == to_type;
1315 to_type = new_to_type;
1316 }
1317
1318 break;
1319
1320 case R_X86_64_TLSLD:
1321 if (bfd_link_executable (info))
1322 to_type = R_X86_64_TPOFF32;
1323 break;
1324
1325 default:
1326 return true;
1327 }
1328
1329 /* Return TRUE if there is no transition. */
1330 if (from_type == to_type)
1331 return true;
1332
1333 /* Check if the transition can be performed. */
1334 if (check
1335 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1336 symtab_hdr, sym_hashes,
1337 from_type, rel, relend))
1338 {
1339 reloc_howto_type *from, *to;
1340 const char *name;
1341
1342 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1343 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1344
1345 if (from == NULL || to == NULL)
1346 return false;
1347
1348 if (h)
1349 name = h->root.root.string;
1350 else
1351 {
1352 struct elf_x86_link_hash_table *htab;
1353
1354 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1355 if (htab == NULL)
1356 name = "*unknown*";
1357 else
1358 {
1359 Elf_Internal_Sym *isym;
1360
1361 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1362 abfd, r_symndx);
1363 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1364 }
1365 }
1366
1367 _bfd_error_handler
1368 /* xgettext:c-format */
1369 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1370 " in section `%pA' failed"),
1371 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1372 bfd_set_error (bfd_error_bad_value);
1373 return false;
1374 }
1375
1376 *r_type = to_type;
1377 return true;
1378 }
1379
1380 static bool
1381 elf_x86_64_need_pic (struct bfd_link_info *info,
1382 bfd *input_bfd, asection *sec,
1383 struct elf_link_hash_entry *h,
1384 Elf_Internal_Shdr *symtab_hdr,
1385 Elf_Internal_Sym *isym,
1386 reloc_howto_type *howto)
1387 {
1388 const char *v = "";
1389 const char *und = "";
1390 const char *pic = "";
1391 const char *object;
1392
1393 const char *name;
1394 if (h)
1395 {
1396 name = h->root.root.string;
1397 switch (ELF_ST_VISIBILITY (h->other))
1398 {
1399 case STV_HIDDEN:
1400 v = _("hidden symbol ");
1401 break;
1402 case STV_INTERNAL:
1403 v = _("internal symbol ");
1404 break;
1405 case STV_PROTECTED:
1406 v = _("protected symbol ");
1407 break;
1408 default:
1409 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1410 v = _("protected symbol ");
1411 else
1412 v = _("symbol ");
1413 pic = NULL;
1414 break;
1415 }
1416
1417 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1418 und = _("undefined ");
1419 }
1420 else
1421 {
1422 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1423 pic = NULL;
1424 }
1425
1426 if (bfd_link_dll (info))
1427 {
1428 object = _("a shared object");
1429 if (!pic)
1430 pic = _("; recompile with -fPIC");
1431 }
1432 else
1433 {
1434 if (bfd_link_pie (info))
1435 object = _("a PIE object");
1436 else
1437 object = _("a PDE object");
1438 if (!pic)
1439 pic = _("; recompile with -fPIE");
1440 }
1441
1442 /* xgettext:c-format */
1443 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1444 "not be used when making %s%s"),
1445 input_bfd, howto->name, und, v, name,
1446 object, pic);
1447 bfd_set_error (bfd_error_bad_value);
1448 sec->check_relocs_failed = 1;
1449 return false;
1450 }
1451
1452 /* With the local symbol, foo, we convert
1453 mov foo@GOTPCREL(%rip), %reg
1454 to
1455 lea foo(%rip), %reg
1456 and convert
1457 call/jmp *foo@GOTPCREL(%rip)
1458 to
1459 nop call foo/jmp foo nop
1460 When PIC is false, convert
1461 test %reg, foo@GOTPCREL(%rip)
1462 to
1463 test $foo, %reg
1464 and convert
1465 binop foo@GOTPCREL(%rip), %reg
1466 to
1467 binop $foo, %reg
1468 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1469 instructions. */
1470
1471 static bool
1472 elf_x86_64_convert_load_reloc (bfd *abfd,
1473 bfd_byte *contents,
1474 unsigned int *r_type_p,
1475 Elf_Internal_Rela *irel,
1476 struct elf_link_hash_entry *h,
1477 bool *converted,
1478 struct bfd_link_info *link_info)
1479 {
1480 struct elf_x86_link_hash_table *htab;
1481 bool is_pic;
1482 bool no_overflow;
1483 bool relocx;
1484 bool to_reloc_pc32;
1485 bool abs_symbol;
1486 bool local_ref;
1487 asection *tsec;
1488 bfd_signed_vma raddend;
1489 unsigned int opcode;
1490 unsigned int modrm;
1491 unsigned int r_type = *r_type_p;
1492 unsigned int r_symndx;
1493 bfd_vma roff = irel->r_offset;
1494 bfd_vma abs_relocation;
1495
1496 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1497 return true;
1498
1499 raddend = irel->r_addend;
1500 /* Addend for 32-bit PC-relative relocation must be -4. */
1501 if (raddend != -4)
1502 return true;
1503
1504 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1505 is_pic = bfd_link_pic (link_info);
1506
1507 relocx = (r_type == R_X86_64_GOTPCRELX
1508 || r_type == R_X86_64_REX_GOTPCRELX);
1509
1510 /* TRUE if --no-relax is used. */
1511 no_overflow = link_info->disable_target_specific_optimizations > 1;
1512
1513 r_symndx = htab->r_sym (irel->r_info);
1514
1515 opcode = bfd_get_8 (abfd, contents + roff - 2);
1516
1517 /* Convert mov to lea since it has been done for a while. */
1518 if (opcode != 0x8b)
1519 {
1520 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1521 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1522 test, xor instructions. */
1523 if (!relocx)
1524 return true;
1525 }
1526
1527 /* We convert only to R_X86_64_PC32:
1528 1. Branch.
1529 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1530 3. no_overflow is true.
1531 4. PIC.
1532 */
1533 to_reloc_pc32 = (opcode == 0xff
1534 || !relocx
1535 || no_overflow
1536 || is_pic);
1537
1538 abs_symbol = false;
1539 abs_relocation = 0;
1540
1541 /* Get the symbol referred to by the reloc. */
1542 if (h == NULL)
1543 {
1544 Elf_Internal_Sym *isym
1545 = bfd_sym_from_r_symndx (&htab->elf.sym_cache, abfd, r_symndx);
1546
1547 /* Skip relocation against undefined symbols. */
1548 if (isym->st_shndx == SHN_UNDEF)
1549 return true;
1550
1551 local_ref = true;
1552 if (isym->st_shndx == SHN_ABS)
1553 {
1554 tsec = bfd_abs_section_ptr;
1555 abs_symbol = true;
1556 abs_relocation = isym->st_value;
1557 }
1558 else if (isym->st_shndx == SHN_COMMON)
1559 tsec = bfd_com_section_ptr;
1560 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1561 tsec = &_bfd_elf_large_com_section;
1562 else
1563 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1564 }
1565 else
1566 {
1567 /* Undefined weak symbol is only bound locally in executable
1568 and its reference is resolved as 0 without relocation
1569 overflow. We can only perform this optimization for
1570 GOTPCRELX relocations since we need to modify REX byte.
1571 It is OK convert mov with R_X86_64_GOTPCREL to
1572 R_X86_64_PC32. */
1573 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1574
1575 abs_symbol = ABS_SYMBOL_P (h);
1576 abs_relocation = h->root.u.def.value;
1577
1578 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1579 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1580 if ((relocx || opcode == 0x8b)
1581 && (h->root.type == bfd_link_hash_undefweak
1582 && !eh->linker_def
1583 && local_ref))
1584 {
1585 if (opcode == 0xff)
1586 {
1587 /* Skip for branch instructions since R_X86_64_PC32
1588 may overflow. */
1589 if (no_overflow)
1590 return true;
1591 }
1592 else if (relocx)
1593 {
1594 /* For non-branch instructions, we can convert to
1595 R_X86_64_32/R_X86_64_32S since we know if there
1596 is a REX byte. */
1597 to_reloc_pc32 = false;
1598 }
1599
1600 /* Since we don't know the current PC when PIC is true,
1601 we can't convert to R_X86_64_PC32. */
1602 if (to_reloc_pc32 && is_pic)
1603 return true;
1604
1605 goto convert;
1606 }
1607 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1608 ld.so may use its link-time address. */
1609 else if (h->start_stop
1610 || eh->linker_def
1611 || ((h->def_regular
1612 || h->root.type == bfd_link_hash_defined
1613 || h->root.type == bfd_link_hash_defweak)
1614 && h != htab->elf.hdynamic
1615 && local_ref))
1616 {
1617 /* bfd_link_hash_new or bfd_link_hash_undefined is
1618 set by an assignment in a linker script in
1619 bfd_elf_record_link_assignment. start_stop is set
1620 on __start_SECNAME/__stop_SECNAME which mark section
1621 SECNAME. */
1622 if (h->start_stop
1623 || eh->linker_def
1624 || (h->def_regular
1625 && (h->root.type == bfd_link_hash_new
1626 || h->root.type == bfd_link_hash_undefined
1627 || ((h->root.type == bfd_link_hash_defined
1628 || h->root.type == bfd_link_hash_defweak)
1629 && h->root.u.def.section == bfd_und_section_ptr))))
1630 {
1631 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1632 if (no_overflow)
1633 return true;
1634 goto convert;
1635 }
1636 tsec = h->root.u.def.section;
1637 }
1638 else
1639 return true;
1640 }
1641
1642 /* Don't convert GOTPCREL relocation against large section. */
1643 if (elf_section_data (tsec) != NULL
1644 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1645 return true;
1646
1647 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1648 if (no_overflow)
1649 return true;
1650
1651 convert:
1652 if (opcode == 0xff)
1653 {
1654 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1655 unsigned int nop;
1656 unsigned int disp;
1657 bfd_vma nop_offset;
1658
1659 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1660 R_X86_64_PC32. */
1661 modrm = bfd_get_8 (abfd, contents + roff - 1);
1662 if (modrm == 0x25)
1663 {
1664 /* Convert to "jmp foo nop". */
1665 modrm = 0xe9;
1666 nop = NOP_OPCODE;
1667 nop_offset = irel->r_offset + 3;
1668 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1669 irel->r_offset -= 1;
1670 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1671 }
1672 else
1673 {
1674 struct elf_x86_link_hash_entry *eh
1675 = (struct elf_x86_link_hash_entry *) h;
1676
1677 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1678 is a nop prefix. */
1679 modrm = 0xe8;
1680 /* To support TLS optimization, always use addr32 prefix for
1681 "call *__tls_get_addr@GOTPCREL(%rip)". */
1682 if (eh && eh->tls_get_addr)
1683 {
1684 nop = 0x67;
1685 nop_offset = irel->r_offset - 2;
1686 }
1687 else
1688 {
1689 nop = htab->params->call_nop_byte;
1690 if (htab->params->call_nop_as_suffix)
1691 {
1692 nop_offset = irel->r_offset + 3;
1693 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1694 irel->r_offset -= 1;
1695 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1696 }
1697 else
1698 nop_offset = irel->r_offset - 2;
1699 }
1700 }
1701 bfd_put_8 (abfd, nop, contents + nop_offset);
1702 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1703 r_type = R_X86_64_PC32;
1704 }
1705 else
1706 {
1707 unsigned int rex;
1708 unsigned int rex_mask = REX_R;
1709
1710 if (r_type == R_X86_64_REX_GOTPCRELX)
1711 rex = bfd_get_8 (abfd, contents + roff - 3);
1712 else
1713 rex = 0;
1714
1715 if (opcode == 0x8b)
1716 {
1717 if (abs_symbol && local_ref && relocx)
1718 to_reloc_pc32 = false;
1719
1720 if (to_reloc_pc32)
1721 {
1722 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1723 "lea foo(%rip), %reg". */
1724 opcode = 0x8d;
1725 r_type = R_X86_64_PC32;
1726 }
1727 else
1728 {
1729 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1730 "mov $foo, %reg". */
1731 opcode = 0xc7;
1732 modrm = bfd_get_8 (abfd, contents + roff - 1);
1733 modrm = 0xc0 | (modrm & 0x38) >> 3;
1734 if ((rex & REX_W) != 0
1735 && ABI_64_P (link_info->output_bfd))
1736 {
1737 /* Keep the REX_W bit in REX byte for LP64. */
1738 r_type = R_X86_64_32S;
1739 goto rewrite_modrm_rex;
1740 }
1741 else
1742 {
1743 /* If the REX_W bit in REX byte isn't needed,
1744 use R_X86_64_32 and clear the W bit to avoid
1745 sign-extend imm32 to imm64. */
1746 r_type = R_X86_64_32;
1747 /* Clear the W bit in REX byte. */
1748 rex_mask |= REX_W;
1749 goto rewrite_modrm_rex;
1750 }
1751 }
1752 }
1753 else
1754 {
1755 /* R_X86_64_PC32 isn't supported. */
1756 if (to_reloc_pc32)
1757 return true;
1758
1759 modrm = bfd_get_8 (abfd, contents + roff - 1);
1760 if (opcode == 0x85)
1761 {
1762 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1763 "test $foo, %reg". */
1764 modrm = 0xc0 | (modrm & 0x38) >> 3;
1765 opcode = 0xf7;
1766 }
1767 else
1768 {
1769 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1770 "binop $foo, %reg". */
1771 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1772 opcode = 0x81;
1773 }
1774
1775 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1776 overflow when sign-extending imm32 to imm64. */
1777 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1778
1779 rewrite_modrm_rex:
1780 if (abs_relocation)
1781 {
1782 /* Check if R_X86_64_32S/R_X86_64_32 fits. */
1783 if (r_type == R_X86_64_32S)
1784 {
1785 if ((abs_relocation + 0x80000000) > 0xffffffff)
1786 return true;
1787 }
1788 else
1789 {
1790 if (abs_relocation > 0xffffffff)
1791 return true;
1792 }
1793 }
1794
1795 bfd_put_8 (abfd, modrm, contents + roff - 1);
1796
1797 if (rex)
1798 {
1799 /* Move the R bit to the B bit in REX byte. */
1800 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1801 bfd_put_8 (abfd, rex, contents + roff - 3);
1802 }
1803
1804 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1805 irel->r_addend = 0;
1806 }
1807
1808 bfd_put_8 (abfd, opcode, contents + roff - 2);
1809 }
1810
1811 *r_type_p = r_type;
1812 irel->r_info = htab->r_info (r_symndx,
1813 r_type | R_X86_64_converted_reloc_bit);
1814
1815 *converted = true;
1816
1817 return true;
1818 }
1819
1820 /* Look through the relocs for a section during the first phase, and
1821 calculate needed space in the global offset table, and procedure
1822 linkage table. */
1823
1824 static bool
1825 elf_x86_64_scan_relocs (bfd *abfd, struct bfd_link_info *info,
1826 asection *sec,
1827 const Elf_Internal_Rela *relocs)
1828 {
1829 struct elf_x86_link_hash_table *htab;
1830 Elf_Internal_Shdr *symtab_hdr;
1831 struct elf_link_hash_entry **sym_hashes;
1832 const Elf_Internal_Rela *rel;
1833 const Elf_Internal_Rela *rel_end;
1834 bfd_byte *contents;
1835 bool converted;
1836
1837 if (bfd_link_relocatable (info))
1838 return true;
1839
1840 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1841 if (htab == NULL)
1842 {
1843 sec->check_relocs_failed = 1;
1844 return false;
1845 }
1846
1847 BFD_ASSERT (is_x86_elf (abfd, htab));
1848
1849 /* Get the section contents. */
1850 if (elf_section_data (sec)->this_hdr.contents != NULL)
1851 contents = elf_section_data (sec)->this_hdr.contents;
1852 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1853 {
1854 sec->check_relocs_failed = 1;
1855 return false;
1856 }
1857
1858 symtab_hdr = &elf_symtab_hdr (abfd);
1859 sym_hashes = elf_sym_hashes (abfd);
1860
1861 converted = false;
1862
1863 rel_end = relocs + sec->reloc_count;
1864 for (rel = relocs; rel < rel_end; rel++)
1865 {
1866 unsigned int r_type;
1867 unsigned int r_symndx;
1868 struct elf_link_hash_entry *h;
1869 struct elf_x86_link_hash_entry *eh;
1870 Elf_Internal_Sym *isym;
1871 const char *name;
1872 bool size_reloc;
1873 bool converted_reloc;
1874 bool no_dynreloc;
1875
1876 r_symndx = htab->r_sym (rel->r_info);
1877 r_type = ELF32_R_TYPE (rel->r_info);
1878
1879 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1880 {
1881 /* xgettext:c-format */
1882 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1883 abfd, r_symndx);
1884 goto error_return;
1885 }
1886
1887 if (r_symndx < symtab_hdr->sh_info)
1888 {
1889 /* A local symbol. */
1890 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1891 abfd, r_symndx);
1892 if (isym == NULL)
1893 goto error_return;
1894
1895 /* Check relocation against local STT_GNU_IFUNC symbol. */
1896 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1897 {
1898 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1899 true);
1900 if (h == NULL)
1901 goto error_return;
1902
1903 /* Fake a STT_GNU_IFUNC symbol. */
1904 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1905 isym, NULL);
1906 h->type = STT_GNU_IFUNC;
1907 h->def_regular = 1;
1908 h->ref_regular = 1;
1909 h->forced_local = 1;
1910 h->root.type = bfd_link_hash_defined;
1911 }
1912 else
1913 h = NULL;
1914 }
1915 else
1916 {
1917 isym = NULL;
1918 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1919 while (h->root.type == bfd_link_hash_indirect
1920 || h->root.type == bfd_link_hash_warning)
1921 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1922 }
1923
1924 /* Check invalid x32 relocations. */
1925 if (!ABI_64_P (abfd))
1926 switch (r_type)
1927 {
1928 default:
1929 break;
1930
1931 case R_X86_64_DTPOFF64:
1932 case R_X86_64_TPOFF64:
1933 case R_X86_64_PC64:
1934 case R_X86_64_GOTOFF64:
1935 case R_X86_64_GOT64:
1936 case R_X86_64_GOTPCREL64:
1937 case R_X86_64_GOTPC64:
1938 case R_X86_64_GOTPLT64:
1939 case R_X86_64_PLTOFF64:
1940 {
1941 if (h)
1942 name = h->root.root.string;
1943 else
1944 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1945 NULL);
1946 _bfd_error_handler
1947 /* xgettext:c-format */
1948 (_("%pB: relocation %s against symbol `%s' isn't "
1949 "supported in x32 mode"), abfd,
1950 x86_64_elf_howto_table[r_type].name, name);
1951 bfd_set_error (bfd_error_bad_value);
1952 goto error_return;
1953 }
1954 break;
1955 }
1956
1957 eh = (struct elf_x86_link_hash_entry *) h;
1958
1959 if (h != NULL)
1960 {
1961 /* It is referenced by a non-shared object. */
1962 h->ref_regular = 1;
1963 }
1964
1965 converted_reloc = false;
1966 if ((r_type == R_X86_64_GOTPCREL
1967 || r_type == R_X86_64_GOTPCRELX
1968 || r_type == R_X86_64_REX_GOTPCRELX)
1969 && (h == NULL || h->type != STT_GNU_IFUNC))
1970 {
1971 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1972 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1973 irel, h, &converted_reloc,
1974 info))
1975 goto error_return;
1976
1977 if (converted_reloc)
1978 converted = true;
1979 }
1980
1981 if (!_bfd_elf_x86_valid_reloc_p (sec, info, htab, rel, h, isym,
1982 symtab_hdr, &no_dynreloc))
1983 return false;
1984
1985 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1986 symtab_hdr, sym_hashes,
1987 &r_type, GOT_UNKNOWN,
1988 rel, rel_end, h, r_symndx, false))
1989 goto error_return;
1990
1991 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1992 if (h == htab->elf.hgot)
1993 htab->got_referenced = true;
1994
1995 switch (r_type)
1996 {
1997 case R_X86_64_TLSLD:
1998 htab->tls_ld_or_ldm_got.refcount = 1;
1999 goto create_got;
2000
2001 case R_X86_64_TPOFF32:
2002 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2003 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2004 &x86_64_elf_howto_table[r_type]);
2005 if (eh != NULL)
2006 eh->zero_undefweak &= 0x2;
2007 break;
2008
2009 case R_X86_64_GOTTPOFF:
2010 if (!bfd_link_executable (info))
2011 info->flags |= DF_STATIC_TLS;
2012 /* Fall through */
2013
2014 case R_X86_64_GOT32:
2015 case R_X86_64_GOTPCREL:
2016 case R_X86_64_GOTPCRELX:
2017 case R_X86_64_REX_GOTPCRELX:
2018 case R_X86_64_TLSGD:
2019 case R_X86_64_GOT64:
2020 case R_X86_64_GOTPCREL64:
2021 case R_X86_64_GOTPLT64:
2022 case R_X86_64_GOTPC32_TLSDESC:
2023 case R_X86_64_TLSDESC_CALL:
2024 /* This symbol requires a global offset table entry. */
2025 {
2026 int tls_type, old_tls_type;
2027
2028 switch (r_type)
2029 {
2030 default:
2031 tls_type = GOT_NORMAL;
2032 if (h)
2033 {
2034 if (ABS_SYMBOL_P (h))
2035 tls_type = GOT_ABS;
2036 }
2037 else if (isym->st_shndx == SHN_ABS)
2038 tls_type = GOT_ABS;
2039 break;
2040 case R_X86_64_TLSGD:
2041 tls_type = GOT_TLS_GD;
2042 break;
2043 case R_X86_64_GOTTPOFF:
2044 tls_type = GOT_TLS_IE;
2045 break;
2046 case R_X86_64_GOTPC32_TLSDESC:
2047 case R_X86_64_TLSDESC_CALL:
2048 tls_type = GOT_TLS_GDESC;
2049 break;
2050 }
2051
2052 if (h != NULL)
2053 {
2054 h->got.refcount = 1;
2055 old_tls_type = eh->tls_type;
2056 }
2057 else
2058 {
2059 bfd_signed_vma *local_got_refcounts;
2060
2061 if (!elf_x86_allocate_local_got_info (abfd,
2062 symtab_hdr->sh_info))
2063 goto error_return;
2064
2065 /* This is a global offset table entry for a local symbol. */
2066 local_got_refcounts = elf_local_got_refcounts (abfd);
2067 local_got_refcounts[r_symndx] = 1;
2068 old_tls_type
2069 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2070 }
2071
2072 /* If a TLS symbol is accessed using IE at least once,
2073 there is no point to use dynamic model for it. */
2074 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2075 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2076 || tls_type != GOT_TLS_IE))
2077 {
2078 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2079 tls_type = old_tls_type;
2080 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2081 && GOT_TLS_GD_ANY_P (tls_type))
2082 tls_type |= old_tls_type;
2083 else
2084 {
2085 if (h)
2086 name = h->root.root.string;
2087 else
2088 name = bfd_elf_sym_name (abfd, symtab_hdr,
2089 isym, NULL);
2090 _bfd_error_handler
2091 /* xgettext:c-format */
2092 (_("%pB: '%s' accessed both as normal and"
2093 " thread local symbol"),
2094 abfd, name);
2095 bfd_set_error (bfd_error_bad_value);
2096 goto error_return;
2097 }
2098 }
2099
2100 if (old_tls_type != tls_type)
2101 {
2102 if (eh != NULL)
2103 eh->tls_type = tls_type;
2104 else
2105 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2106 }
2107 }
2108 /* Fall through */
2109
2110 case R_X86_64_GOTOFF64:
2111 case R_X86_64_GOTPC32:
2112 case R_X86_64_GOTPC64:
2113 create_got:
2114 if (eh != NULL)
2115 eh->zero_undefweak &= 0x2;
2116 break;
2117
2118 case R_X86_64_PLT32:
2119 case R_X86_64_PLT32_BND:
2120 /* This symbol requires a procedure linkage table entry. We
2121 actually build the entry in adjust_dynamic_symbol,
2122 because this might be a case of linking PIC code which is
2123 never referenced by a dynamic object, in which case we
2124 don't need to generate a procedure linkage table entry
2125 after all. */
2126
2127 /* If this is a local symbol, we resolve it directly without
2128 creating a procedure linkage table entry. */
2129 if (h == NULL)
2130 continue;
2131
2132 eh->zero_undefweak &= 0x2;
2133 h->needs_plt = 1;
2134 h->plt.refcount = 1;
2135 break;
2136
2137 case R_X86_64_PLTOFF64:
2138 /* This tries to form the 'address' of a function relative
2139 to GOT. For global symbols we need a PLT entry. */
2140 if (h != NULL)
2141 {
2142 h->needs_plt = 1;
2143 h->plt.refcount = 1;
2144 }
2145 goto create_got;
2146
2147 case R_X86_64_SIZE32:
2148 case R_X86_64_SIZE64:
2149 size_reloc = true;
2150 goto do_size;
2151
2152 case R_X86_64_32:
2153 if (!ABI_64_P (abfd))
2154 goto pointer;
2155 /* Fall through. */
2156 case R_X86_64_8:
2157 case R_X86_64_16:
2158 case R_X86_64_32S:
2159 /* Check relocation overflow as these relocs may lead to
2160 run-time relocation overflow. Don't error out for
2161 sections we don't care about, such as debug sections or
2162 when relocation overflow check is disabled. */
2163 if (!htab->params->no_reloc_overflow_check
2164 && !converted_reloc
2165 && (bfd_link_pic (info)
2166 || (bfd_link_executable (info)
2167 && h != NULL
2168 && !h->def_regular
2169 && h->def_dynamic
2170 && (sec->flags & SEC_READONLY) == 0)))
2171 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2172 &x86_64_elf_howto_table[r_type]);
2173 /* Fall through. */
2174
2175 case R_X86_64_PC8:
2176 case R_X86_64_PC16:
2177 case R_X86_64_PC32:
2178 case R_X86_64_PC32_BND:
2179 case R_X86_64_PC64:
2180 case R_X86_64_64:
2181 pointer:
2182 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2183 eh->zero_undefweak |= 0x2;
2184 /* We are called after all symbols have been resolved. Only
2185 relocation against STT_GNU_IFUNC symbol must go through
2186 PLT. */
2187 if (h != NULL
2188 && (bfd_link_executable (info)
2189 || h->type == STT_GNU_IFUNC))
2190 {
2191 bool func_pointer_ref = false;
2192
2193 if (r_type == R_X86_64_PC32)
2194 {
2195 /* Since something like ".long foo - ." may be used
2196 as pointer, make sure that PLT is used if foo is
2197 a function defined in a shared library. */
2198 if ((sec->flags & SEC_CODE) == 0)
2199 {
2200 h->pointer_equality_needed = 1;
2201 if (bfd_link_pie (info)
2202 && h->type == STT_FUNC
2203 && !h->def_regular
2204 && h->def_dynamic)
2205 {
2206 h->needs_plt = 1;
2207 h->plt.refcount = 1;
2208 }
2209 }
2210 }
2211 else if (r_type != R_X86_64_PC32_BND
2212 && r_type != R_X86_64_PC64)
2213 {
2214 h->pointer_equality_needed = 1;
2215 /* At run-time, R_X86_64_64 can be resolved for both
2216 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2217 can only be resolved for x32. */
2218 if ((sec->flags & SEC_READONLY) == 0
2219 && (r_type == R_X86_64_64
2220 || (!ABI_64_P (abfd)
2221 && (r_type == R_X86_64_32
2222 || r_type == R_X86_64_32S))))
2223 func_pointer_ref = true;
2224 }
2225
2226 if (!func_pointer_ref)
2227 {
2228 /* If this reloc is in a read-only section, we might
2229 need a copy reloc. We can't check reliably at this
2230 stage whether the section is read-only, as input
2231 sections have not yet been mapped to output sections.
2232 Tentatively set the flag for now, and correct in
2233 adjust_dynamic_symbol. */
2234 h->non_got_ref = 1;
2235
2236 if (!elf_has_indirect_extern_access (sec->owner))
2237 eh->non_got_ref_without_indirect_extern_access = 1;
2238
2239 /* We may need a .plt entry if the symbol is a function
2240 defined in a shared lib or is a function referenced
2241 from the code or read-only section. */
2242 if (!h->def_regular
2243 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2244 h->plt.refcount = 1;
2245 }
2246 }
2247
2248 size_reloc = false;
2249 do_size:
2250 if (!no_dynreloc
2251 && NEED_DYNAMIC_RELOCATION_P (true, info, true, h, sec,
2252 r_type,
2253 htab->pointer_r_type))
2254 {
2255 struct elf_dyn_relocs *p;
2256 struct elf_dyn_relocs **head;
2257
2258 /* If this is a global symbol, we count the number of
2259 relocations we need for this symbol. */
2260 if (h != NULL)
2261 head = &h->dyn_relocs;
2262 else
2263 {
2264 /* Track dynamic relocs needed for local syms too.
2265 We really need local syms available to do this
2266 easily. Oh well. */
2267 asection *s;
2268 void **vpp;
2269
2270 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
2271 abfd, r_symndx);
2272 if (isym == NULL)
2273 goto error_return;
2274
2275 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2276 if (s == NULL)
2277 s = sec;
2278
2279 /* Beware of type punned pointers vs strict aliasing
2280 rules. */
2281 vpp = &(elf_section_data (s)->local_dynrel);
2282 head = (struct elf_dyn_relocs **)vpp;
2283 }
2284
2285 p = *head;
2286 if (p == NULL || p->sec != sec)
2287 {
2288 size_t amt = sizeof *p;
2289
2290 p = ((struct elf_dyn_relocs *)
2291 bfd_alloc (htab->elf.dynobj, amt));
2292 if (p == NULL)
2293 goto error_return;
2294 p->next = *head;
2295 *head = p;
2296 p->sec = sec;
2297 p->count = 0;
2298 p->pc_count = 0;
2299 }
2300
2301 p->count += 1;
2302 /* Count size relocation as PC-relative relocation. */
2303 if (X86_PCREL_TYPE_P (true, r_type) || size_reloc)
2304 p->pc_count += 1;
2305 }
2306 break;
2307
2308 /* This relocation describes the C++ object vtable hierarchy.
2309 Reconstruct it for later use during GC. */
2310 case R_X86_64_GNU_VTINHERIT:
2311 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2312 goto error_return;
2313 break;
2314
2315 /* This relocation describes which C++ vtable entries are actually
2316 used. Record for later use during GC. */
2317 case R_X86_64_GNU_VTENTRY:
2318 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2319 goto error_return;
2320 break;
2321
2322 default:
2323 break;
2324 }
2325 }
2326
2327 if (elf_section_data (sec)->this_hdr.contents != contents)
2328 {
2329 if (!converted && !_bfd_link_keep_memory (info))
2330 free (contents);
2331 else
2332 {
2333 /* Cache the section contents for elf_link_input_bfd if any
2334 load is converted or --no-keep-memory isn't used. */
2335 elf_section_data (sec)->this_hdr.contents = contents;
2336 info->cache_size += sec->size;
2337 }
2338 }
2339
2340 /* Cache relocations if any load is converted. */
2341 if (elf_section_data (sec)->relocs != relocs && converted)
2342 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2343
2344 return true;
2345
2346 error_return:
2347 if (elf_section_data (sec)->this_hdr.contents != contents)
2348 free (contents);
2349 sec->check_relocs_failed = 1;
2350 return false;
2351 }
2352
2353 static bool
2354 elf_x86_64_always_size_sections (bfd *output_bfd,
2355 struct bfd_link_info *info)
2356 {
2357 bfd *abfd;
2358
2359 /* Scan relocations after rel_from_abs has been set on __ehdr_start. */
2360 for (abfd = info->input_bfds;
2361 abfd != (bfd *) NULL;
2362 abfd = abfd->link.next)
2363 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour
2364 && !_bfd_elf_link_iterate_on_relocs (abfd, info,
2365 elf_x86_64_scan_relocs))
2366 return false;
2367
2368 return _bfd_x86_elf_always_size_sections (output_bfd, info);
2369 }
2370
2371 /* Return the relocation value for @tpoff relocation
2372 if STT_TLS virtual address is ADDRESS. */
2373
2374 static bfd_vma
2375 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2376 {
2377 struct elf_link_hash_table *htab = elf_hash_table (info);
2378 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2379 bfd_vma static_tls_size;
2380
2381 /* If tls_segment is NULL, we should have signalled an error already. */
2382 if (htab->tls_sec == NULL)
2383 return 0;
2384
2385 /* Consider special static TLS alignment requirements. */
2386 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2387 return address - static_tls_size - htab->tls_sec->vma;
2388 }
2389
2390 /* Relocate an x86_64 ELF section. */
2391
2392 static int
2393 elf_x86_64_relocate_section (bfd *output_bfd,
2394 struct bfd_link_info *info,
2395 bfd *input_bfd,
2396 asection *input_section,
2397 bfd_byte *contents,
2398 Elf_Internal_Rela *relocs,
2399 Elf_Internal_Sym *local_syms,
2400 asection **local_sections)
2401 {
2402 struct elf_x86_link_hash_table *htab;
2403 Elf_Internal_Shdr *symtab_hdr;
2404 struct elf_link_hash_entry **sym_hashes;
2405 bfd_vma *local_got_offsets;
2406 bfd_vma *local_tlsdesc_gotents;
2407 Elf_Internal_Rela *rel;
2408 Elf_Internal_Rela *wrel;
2409 Elf_Internal_Rela *relend;
2410 unsigned int plt_entry_size;
2411 bool status;
2412
2413 /* Skip if check_relocs or scan_relocs failed. */
2414 if (input_section->check_relocs_failed)
2415 return false;
2416
2417 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2418 if (htab == NULL)
2419 return false;
2420
2421 if (!is_x86_elf (input_bfd, htab))
2422 {
2423 bfd_set_error (bfd_error_wrong_format);
2424 return false;
2425 }
2426
2427 plt_entry_size = htab->plt.plt_entry_size;
2428 symtab_hdr = &elf_symtab_hdr (input_bfd);
2429 sym_hashes = elf_sym_hashes (input_bfd);
2430 local_got_offsets = elf_local_got_offsets (input_bfd);
2431 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2432
2433 _bfd_x86_elf_set_tls_module_base (info);
2434
2435 status = true;
2436 rel = wrel = relocs;
2437 relend = relocs + input_section->reloc_count;
2438 for (; rel < relend; wrel++, rel++)
2439 {
2440 unsigned int r_type, r_type_tls;
2441 reloc_howto_type *howto;
2442 unsigned long r_symndx;
2443 struct elf_link_hash_entry *h;
2444 struct elf_x86_link_hash_entry *eh;
2445 Elf_Internal_Sym *sym;
2446 asection *sec;
2447 bfd_vma off, offplt, plt_offset;
2448 bfd_vma relocation;
2449 bool unresolved_reloc;
2450 bfd_reloc_status_type r;
2451 int tls_type;
2452 asection *base_got, *resolved_plt;
2453 bfd_vma st_size;
2454 bool resolved_to_zero;
2455 bool relative_reloc;
2456 bool converted_reloc;
2457 bool need_copy_reloc_in_pie;
2458 bool no_copyreloc_p;
2459
2460 r_type = ELF32_R_TYPE (rel->r_info);
2461 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2462 || r_type == (int) R_X86_64_GNU_VTENTRY)
2463 {
2464 if (wrel != rel)
2465 *wrel = *rel;
2466 continue;
2467 }
2468
2469 r_symndx = htab->r_sym (rel->r_info);
2470 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2471 if (converted_reloc)
2472 {
2473 r_type &= ~R_X86_64_converted_reloc_bit;
2474 rel->r_info = htab->r_info (r_symndx, r_type);
2475 }
2476
2477 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2478 if (howto == NULL)
2479 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2480
2481 h = NULL;
2482 sym = NULL;
2483 sec = NULL;
2484 unresolved_reloc = false;
2485 if (r_symndx < symtab_hdr->sh_info)
2486 {
2487 sym = local_syms + r_symndx;
2488 sec = local_sections[r_symndx];
2489
2490 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2491 &sec, rel);
2492 st_size = sym->st_size;
2493
2494 /* Relocate against local STT_GNU_IFUNC symbol. */
2495 if (!bfd_link_relocatable (info)
2496 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2497 {
2498 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2499 rel, false);
2500 if (h == NULL)
2501 abort ();
2502
2503 /* Set STT_GNU_IFUNC symbol value. */
2504 h->root.u.def.value = sym->st_value;
2505 h->root.u.def.section = sec;
2506 }
2507 }
2508 else
2509 {
2510 bool warned ATTRIBUTE_UNUSED;
2511 bool ignored ATTRIBUTE_UNUSED;
2512
2513 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2514 r_symndx, symtab_hdr, sym_hashes,
2515 h, sec, relocation,
2516 unresolved_reloc, warned, ignored);
2517 st_size = h->size;
2518 }
2519
2520 if (sec != NULL && discarded_section (sec))
2521 {
2522 _bfd_clear_contents (howto, input_bfd, input_section,
2523 contents, rel->r_offset);
2524 wrel->r_offset = rel->r_offset;
2525 wrel->r_info = 0;
2526 wrel->r_addend = 0;
2527
2528 /* For ld -r, remove relocations in debug sections against
2529 sections defined in discarded sections. Not done for
2530 eh_frame editing code expects to be present. */
2531 if (bfd_link_relocatable (info)
2532 && (input_section->flags & SEC_DEBUGGING))
2533 wrel--;
2534
2535 continue;
2536 }
2537
2538 if (bfd_link_relocatable (info))
2539 {
2540 if (wrel != rel)
2541 *wrel = *rel;
2542 continue;
2543 }
2544
2545 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2546 {
2547 if (r_type == R_X86_64_64)
2548 {
2549 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2550 zero-extend it to 64bit if addend is zero. */
2551 r_type = R_X86_64_32;
2552 memset (contents + rel->r_offset + 4, 0, 4);
2553 }
2554 else if (r_type == R_X86_64_SIZE64)
2555 {
2556 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2557 zero-extend it to 64bit if addend is zero. */
2558 r_type = R_X86_64_SIZE32;
2559 memset (contents + rel->r_offset + 4, 0, 4);
2560 }
2561 }
2562
2563 eh = (struct elf_x86_link_hash_entry *) h;
2564
2565 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2566 it here if it is defined in a non-shared object. */
2567 if (h != NULL
2568 && h->type == STT_GNU_IFUNC
2569 && h->def_regular)
2570 {
2571 bfd_vma plt_index;
2572 const char *name;
2573
2574 if ((input_section->flags & SEC_ALLOC) == 0)
2575 {
2576 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2577 STT_GNU_IFUNC symbol as STT_FUNC. */
2578 if (elf_section_type (input_section) == SHT_NOTE)
2579 goto skip_ifunc;
2580 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2581 sections because such sections are not SEC_ALLOC and
2582 thus ld.so will not process them. */
2583 if ((input_section->flags & SEC_DEBUGGING) != 0)
2584 continue;
2585 abort ();
2586 }
2587
2588 switch (r_type)
2589 {
2590 default:
2591 break;
2592
2593 case R_X86_64_GOTPCREL:
2594 case R_X86_64_GOTPCRELX:
2595 case R_X86_64_REX_GOTPCRELX:
2596 case R_X86_64_GOTPCREL64:
2597 base_got = htab->elf.sgot;
2598 off = h->got.offset;
2599
2600 if (base_got == NULL)
2601 abort ();
2602
2603 if (off == (bfd_vma) -1)
2604 {
2605 /* We can't use h->got.offset here to save state, or
2606 even just remember the offset, as finish_dynamic_symbol
2607 would use that as offset into .got. */
2608
2609 if (h->plt.offset == (bfd_vma) -1)
2610 abort ();
2611
2612 if (htab->elf.splt != NULL)
2613 {
2614 plt_index = (h->plt.offset / plt_entry_size
2615 - htab->plt.has_plt0);
2616 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2617 base_got = htab->elf.sgotplt;
2618 }
2619 else
2620 {
2621 plt_index = h->plt.offset / plt_entry_size;
2622 off = plt_index * GOT_ENTRY_SIZE;
2623 base_got = htab->elf.igotplt;
2624 }
2625
2626 if (h->dynindx == -1
2627 || h->forced_local
2628 || info->symbolic)
2629 {
2630 /* This references the local defitionion. We must
2631 initialize this entry in the global offset table.
2632 Since the offset must always be a multiple of 8,
2633 we use the least significant bit to record
2634 whether we have initialized it already.
2635
2636 When doing a dynamic link, we create a .rela.got
2637 relocation entry to initialize the value. This
2638 is done in the finish_dynamic_symbol routine. */
2639 if ((off & 1) != 0)
2640 off &= ~1;
2641 else
2642 {
2643 bfd_put_64 (output_bfd, relocation,
2644 base_got->contents + off);
2645 /* Note that this is harmless for the GOTPLT64
2646 case, as -1 | 1 still is -1. */
2647 h->got.offset |= 1;
2648 }
2649 }
2650 }
2651
2652 relocation = (base_got->output_section->vma
2653 + base_got->output_offset + off);
2654
2655 goto do_relocation;
2656 }
2657
2658 if (h->plt.offset == (bfd_vma) -1)
2659 {
2660 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2661 if (r_type == htab->pointer_r_type
2662 && (input_section->flags & SEC_CODE) == 0)
2663 goto do_ifunc_pointer;
2664 goto bad_ifunc_reloc;
2665 }
2666
2667 /* STT_GNU_IFUNC symbol must go through PLT. */
2668 if (htab->elf.splt != NULL)
2669 {
2670 if (htab->plt_second != NULL)
2671 {
2672 resolved_plt = htab->plt_second;
2673 plt_offset = eh->plt_second.offset;
2674 }
2675 else
2676 {
2677 resolved_plt = htab->elf.splt;
2678 plt_offset = h->plt.offset;
2679 }
2680 }
2681 else
2682 {
2683 resolved_plt = htab->elf.iplt;
2684 plt_offset = h->plt.offset;
2685 }
2686
2687 relocation = (resolved_plt->output_section->vma
2688 + resolved_plt->output_offset + plt_offset);
2689
2690 switch (r_type)
2691 {
2692 default:
2693 bad_ifunc_reloc:
2694 if (h->root.root.string)
2695 name = h->root.root.string;
2696 else
2697 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2698 NULL);
2699 _bfd_error_handler
2700 /* xgettext:c-format */
2701 (_("%pB: relocation %s against STT_GNU_IFUNC "
2702 "symbol `%s' isn't supported"), input_bfd,
2703 howto->name, name);
2704 bfd_set_error (bfd_error_bad_value);
2705 return false;
2706
2707 case R_X86_64_32S:
2708 if (bfd_link_pic (info))
2709 abort ();
2710 goto do_relocation;
2711
2712 case R_X86_64_32:
2713 if (ABI_64_P (output_bfd))
2714 goto do_relocation;
2715 /* FALLTHROUGH */
2716 case R_X86_64_64:
2717 do_ifunc_pointer:
2718 if (rel->r_addend != 0)
2719 {
2720 if (h->root.root.string)
2721 name = h->root.root.string;
2722 else
2723 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2724 sym, NULL);
2725 _bfd_error_handler
2726 /* xgettext:c-format */
2727 (_("%pB: relocation %s against STT_GNU_IFUNC "
2728 "symbol `%s' has non-zero addend: %" PRId64),
2729 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2730 bfd_set_error (bfd_error_bad_value);
2731 return false;
2732 }
2733
2734 /* Generate dynamic relcoation only when there is a
2735 non-GOT reference in a shared object or there is no
2736 PLT. */
2737 if ((bfd_link_pic (info) && h->non_got_ref)
2738 || h->plt.offset == (bfd_vma) -1)
2739 {
2740 Elf_Internal_Rela outrel;
2741 asection *sreloc;
2742
2743 /* Need a dynamic relocation to get the real function
2744 address. */
2745 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2746 info,
2747 input_section,
2748 rel->r_offset);
2749 if (outrel.r_offset == (bfd_vma) -1
2750 || outrel.r_offset == (bfd_vma) -2)
2751 abort ();
2752
2753 outrel.r_offset += (input_section->output_section->vma
2754 + input_section->output_offset);
2755
2756 if (POINTER_LOCAL_IFUNC_P (info, h))
2757 {
2758 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2759 h->root.root.string,
2760 h->root.u.def.section->owner);
2761
2762 /* This symbol is resolved locally. */
2763 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2764 outrel.r_addend = (h->root.u.def.value
2765 + h->root.u.def.section->output_section->vma
2766 + h->root.u.def.section->output_offset);
2767
2768 if (htab->params->report_relative_reloc)
2769 _bfd_x86_elf_link_report_relative_reloc
2770 (info, input_section, h, sym,
2771 "R_X86_64_IRELATIVE", &outrel);
2772 }
2773 else
2774 {
2775 outrel.r_info = htab->r_info (h->dynindx, r_type);
2776 outrel.r_addend = 0;
2777 }
2778
2779 /* Dynamic relocations are stored in
2780 1. .rela.ifunc section in PIC object.
2781 2. .rela.got section in dynamic executable.
2782 3. .rela.iplt section in static executable. */
2783 if (bfd_link_pic (info))
2784 sreloc = htab->elf.irelifunc;
2785 else if (htab->elf.splt != NULL)
2786 sreloc = htab->elf.srelgot;
2787 else
2788 sreloc = htab->elf.irelplt;
2789 elf_append_rela (output_bfd, sreloc, &outrel);
2790
2791 /* If this reloc is against an external symbol, we
2792 do not want to fiddle with the addend. Otherwise,
2793 we need to include the symbol value so that it
2794 becomes an addend for the dynamic reloc. For an
2795 internal symbol, we have updated addend. */
2796 continue;
2797 }
2798 /* FALLTHROUGH */
2799 case R_X86_64_PC32:
2800 case R_X86_64_PC32_BND:
2801 case R_X86_64_PC64:
2802 case R_X86_64_PLT32:
2803 case R_X86_64_PLT32_BND:
2804 goto do_relocation;
2805 }
2806 }
2807
2808 skip_ifunc:
2809 resolved_to_zero = (eh != NULL
2810 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2811
2812 /* When generating a shared object, the relocations handled here are
2813 copied into the output file to be resolved at run time. */
2814 switch (r_type)
2815 {
2816 case R_X86_64_GOT32:
2817 case R_X86_64_GOT64:
2818 /* Relocation is to the entry for this symbol in the global
2819 offset table. */
2820 case R_X86_64_GOTPCREL:
2821 case R_X86_64_GOTPCRELX:
2822 case R_X86_64_REX_GOTPCRELX:
2823 case R_X86_64_GOTPCREL64:
2824 /* Use global offset table entry as symbol value. */
2825 case R_X86_64_GOTPLT64:
2826 /* This is obsolete and treated the same as GOT64. */
2827 base_got = htab->elf.sgot;
2828
2829 if (htab->elf.sgot == NULL)
2830 abort ();
2831
2832 relative_reloc = false;
2833 if (h != NULL)
2834 {
2835 off = h->got.offset;
2836 if (h->needs_plt
2837 && h->plt.offset != (bfd_vma)-1
2838 && off == (bfd_vma)-1)
2839 {
2840 /* We can't use h->got.offset here to save
2841 state, or even just remember the offset, as
2842 finish_dynamic_symbol would use that as offset into
2843 .got. */
2844 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2845 - htab->plt.has_plt0);
2846 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2847 base_got = htab->elf.sgotplt;
2848 }
2849
2850 if (RESOLVED_LOCALLY_P (info, h, htab))
2851 {
2852 /* We must initialize this entry in the global offset
2853 table. Since the offset must always be a multiple
2854 of 8, we use the least significant bit to record
2855 whether we have initialized it already.
2856
2857 When doing a dynamic link, we create a .rela.got
2858 relocation entry to initialize the value. This is
2859 done in the finish_dynamic_symbol routine. */
2860 if ((off & 1) != 0)
2861 off &= ~1;
2862 else
2863 {
2864 bfd_put_64 (output_bfd, relocation,
2865 base_got->contents + off);
2866 /* Note that this is harmless for the GOTPLT64 case,
2867 as -1 | 1 still is -1. */
2868 h->got.offset |= 1;
2869
2870 /* NB: Don't generate relative relocation here if
2871 it has been generated by DT_RELR. */
2872 if (!info->enable_dt_relr
2873 && GENERATE_RELATIVE_RELOC_P (info, h))
2874 {
2875 /* If this symbol isn't dynamic in PIC,
2876 generate R_X86_64_RELATIVE here. */
2877 eh->no_finish_dynamic_symbol = 1;
2878 relative_reloc = true;
2879 }
2880 }
2881 }
2882 else
2883 unresolved_reloc = false;
2884 }
2885 else
2886 {
2887 if (local_got_offsets == NULL)
2888 abort ();
2889
2890 off = local_got_offsets[r_symndx];
2891
2892 /* The offset must always be a multiple of 8. We use
2893 the least significant bit to record whether we have
2894 already generated the necessary reloc. */
2895 if ((off & 1) != 0)
2896 off &= ~1;
2897 else
2898 {
2899 bfd_put_64 (output_bfd, relocation,
2900 base_got->contents + off);
2901 local_got_offsets[r_symndx] |= 1;
2902
2903 /* NB: GOTPCREL relocations against local absolute
2904 symbol store relocation value in the GOT slot
2905 without relative relocation. Don't generate
2906 relative relocation here if it has been generated
2907 by DT_RELR. */
2908 if (!info->enable_dt_relr
2909 && bfd_link_pic (info)
2910 && !(sym->st_shndx == SHN_ABS
2911 && (r_type == R_X86_64_GOTPCREL
2912 || r_type == R_X86_64_GOTPCRELX
2913 || r_type == R_X86_64_REX_GOTPCRELX)))
2914 relative_reloc = true;
2915 }
2916 }
2917
2918 if (relative_reloc)
2919 {
2920 asection *s;
2921 Elf_Internal_Rela outrel;
2922
2923 /* We need to generate a R_X86_64_RELATIVE reloc
2924 for the dynamic linker. */
2925 s = htab->elf.srelgot;
2926 if (s == NULL)
2927 abort ();
2928
2929 outrel.r_offset = (base_got->output_section->vma
2930 + base_got->output_offset
2931 + off);
2932 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2933 outrel.r_addend = relocation;
2934
2935 if (htab->params->report_relative_reloc)
2936 _bfd_x86_elf_link_report_relative_reloc
2937 (info, input_section, h, sym, "R_X86_64_RELATIVE",
2938 &outrel);
2939
2940 elf_append_rela (output_bfd, s, &outrel);
2941 }
2942
2943 if (off >= (bfd_vma) -2)
2944 abort ();
2945
2946 relocation = base_got->output_section->vma
2947 + base_got->output_offset + off;
2948 if (r_type != R_X86_64_GOTPCREL
2949 && r_type != R_X86_64_GOTPCRELX
2950 && r_type != R_X86_64_REX_GOTPCRELX
2951 && r_type != R_X86_64_GOTPCREL64)
2952 relocation -= htab->elf.sgotplt->output_section->vma
2953 - htab->elf.sgotplt->output_offset;
2954
2955 break;
2956
2957 case R_X86_64_GOTOFF64:
2958 /* Relocation is relative to the start of the global offset
2959 table. */
2960
2961 /* Check to make sure it isn't a protected function or data
2962 symbol for shared library since it may not be local when
2963 used as function address or with copy relocation. We also
2964 need to make sure that a symbol is referenced locally. */
2965 if (bfd_link_pic (info) && h)
2966 {
2967 if (!h->def_regular)
2968 {
2969 const char *v;
2970
2971 switch (ELF_ST_VISIBILITY (h->other))
2972 {
2973 case STV_HIDDEN:
2974 v = _("hidden symbol");
2975 break;
2976 case STV_INTERNAL:
2977 v = _("internal symbol");
2978 break;
2979 case STV_PROTECTED:
2980 v = _("protected symbol");
2981 break;
2982 default:
2983 v = _("symbol");
2984 break;
2985 }
2986
2987 _bfd_error_handler
2988 /* xgettext:c-format */
2989 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
2990 " `%s' can not be used when making a shared object"),
2991 input_bfd, v, h->root.root.string);
2992 bfd_set_error (bfd_error_bad_value);
2993 return false;
2994 }
2995 else if (!bfd_link_executable (info)
2996 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2997 && (h->type == STT_FUNC
2998 || h->type == STT_OBJECT)
2999 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3000 {
3001 _bfd_error_handler
3002 /* xgettext:c-format */
3003 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3004 " `%s' can not be used when making a shared object"),
3005 input_bfd,
3006 h->type == STT_FUNC ? "function" : "data",
3007 h->root.root.string);
3008 bfd_set_error (bfd_error_bad_value);
3009 return false;
3010 }
3011 }
3012
3013 /* Note that sgot is not involved in this
3014 calculation. We always want the start of .got.plt. If we
3015 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3016 permitted by the ABI, we might have to change this
3017 calculation. */
3018 relocation -= htab->elf.sgotplt->output_section->vma
3019 + htab->elf.sgotplt->output_offset;
3020 break;
3021
3022 case R_X86_64_GOTPC32:
3023 case R_X86_64_GOTPC64:
3024 /* Use global offset table as symbol value. */
3025 relocation = htab->elf.sgotplt->output_section->vma
3026 + htab->elf.sgotplt->output_offset;
3027 unresolved_reloc = false;
3028 break;
3029
3030 case R_X86_64_PLTOFF64:
3031 /* Relocation is PLT entry relative to GOT. For local
3032 symbols it's the symbol itself relative to GOT. */
3033 if (h != NULL
3034 /* See PLT32 handling. */
3035 && (h->plt.offset != (bfd_vma) -1
3036 || eh->plt_got.offset != (bfd_vma) -1)
3037 && htab->elf.splt != NULL)
3038 {
3039 if (eh->plt_got.offset != (bfd_vma) -1)
3040 {
3041 /* Use the GOT PLT. */
3042 resolved_plt = htab->plt_got;
3043 plt_offset = eh->plt_got.offset;
3044 }
3045 else if (htab->plt_second != NULL)
3046 {
3047 resolved_plt = htab->plt_second;
3048 plt_offset = eh->plt_second.offset;
3049 }
3050 else
3051 {
3052 resolved_plt = htab->elf.splt;
3053 plt_offset = h->plt.offset;
3054 }
3055
3056 relocation = (resolved_plt->output_section->vma
3057 + resolved_plt->output_offset
3058 + plt_offset);
3059 unresolved_reloc = false;
3060 }
3061
3062 relocation -= htab->elf.sgotplt->output_section->vma
3063 + htab->elf.sgotplt->output_offset;
3064 break;
3065
3066 case R_X86_64_PLT32:
3067 case R_X86_64_PLT32_BND:
3068 /* Relocation is to the entry for this symbol in the
3069 procedure linkage table. */
3070
3071 /* Resolve a PLT32 reloc against a local symbol directly,
3072 without using the procedure linkage table. */
3073 if (h == NULL)
3074 break;
3075
3076 if ((h->plt.offset == (bfd_vma) -1
3077 && eh->plt_got.offset == (bfd_vma) -1)
3078 || htab->elf.splt == NULL)
3079 {
3080 /* We didn't make a PLT entry for this symbol. This
3081 happens when statically linking PIC code, or when
3082 using -Bsymbolic. */
3083 break;
3084 }
3085
3086 use_plt:
3087 if (h->plt.offset != (bfd_vma) -1)
3088 {
3089 if (htab->plt_second != NULL)
3090 {
3091 resolved_plt = htab->plt_second;
3092 plt_offset = eh->plt_second.offset;
3093 }
3094 else
3095 {
3096 resolved_plt = htab->elf.splt;
3097 plt_offset = h->plt.offset;
3098 }
3099 }
3100 else
3101 {
3102 /* Use the GOT PLT. */
3103 resolved_plt = htab->plt_got;
3104 plt_offset = eh->plt_got.offset;
3105 }
3106
3107 relocation = (resolved_plt->output_section->vma
3108 + resolved_plt->output_offset
3109 + plt_offset);
3110 unresolved_reloc = false;
3111 break;
3112
3113 case R_X86_64_SIZE32:
3114 case R_X86_64_SIZE64:
3115 /* Set to symbol size. */
3116 relocation = st_size;
3117 goto direct;
3118
3119 case R_X86_64_PC8:
3120 case R_X86_64_PC16:
3121 case R_X86_64_PC32:
3122 case R_X86_64_PC32_BND:
3123 /* Don't complain about -fPIC if the symbol is undefined when
3124 building executable unless it is unresolved weak symbol,
3125 references a dynamic definition in PIE or -z nocopyreloc
3126 is used. */
3127 no_copyreloc_p
3128 = (info->nocopyreloc
3129 || (h != NULL
3130 && !h->root.linker_def
3131 && !h->root.ldscript_def
3132 && eh->def_protected
3133 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
3134
3135 if ((input_section->flags & SEC_ALLOC) != 0
3136 && (input_section->flags & SEC_READONLY) != 0
3137 && h != NULL
3138 && ((bfd_link_executable (info)
3139 && ((h->root.type == bfd_link_hash_undefweak
3140 && (eh == NULL
3141 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3142 eh)))
3143 || (bfd_link_pie (info)
3144 && !SYMBOL_DEFINED_NON_SHARED_P (h)
3145 && h->def_dynamic)
3146 || (no_copyreloc_p
3147 && h->def_dynamic
3148 && !(h->root.u.def.section->flags & SEC_CODE))))
3149 || (bfd_link_pie (info)
3150 && h->root.type == bfd_link_hash_undefweak)
3151 || bfd_link_dll (info)))
3152 {
3153 bool fail = false;
3154 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3155 {
3156 /* Symbol is referenced locally. Make sure it is
3157 defined locally. */
3158 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
3159 }
3160 else if (bfd_link_pie (info))
3161 {
3162 /* We can only use PC-relative relocations in PIE
3163 from non-code sections. */
3164 if (h->root.type == bfd_link_hash_undefweak
3165 || (h->type == STT_FUNC
3166 && (sec->flags & SEC_CODE) != 0))
3167 fail = true;
3168 }
3169 else if (no_copyreloc_p || bfd_link_dll (info))
3170 {
3171 /* Symbol doesn't need copy reloc and isn't
3172 referenced locally. Don't allow PC-relative
3173 relocations against default and protected
3174 symbols since address of protected function
3175 and location of protected data may not be in
3176 the shared object. */
3177 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3178 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
3179 }
3180
3181 if (fail)
3182 return elf_x86_64_need_pic (info, input_bfd, input_section,
3183 h, NULL, NULL, howto);
3184 }
3185 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3186 as function address. */
3187 else if (h != NULL
3188 && (input_section->flags & SEC_CODE) == 0
3189 && bfd_link_pie (info)
3190 && h->type == STT_FUNC
3191 && !h->def_regular
3192 && h->def_dynamic)
3193 goto use_plt;
3194 /* Fall through. */
3195
3196 case R_X86_64_8:
3197 case R_X86_64_16:
3198 case R_X86_64_32:
3199 case R_X86_64_PC64:
3200 case R_X86_64_64:
3201 /* FIXME: The ABI says the linker should make sure the value is
3202 the same when it's zeroextended to 64 bit. */
3203
3204 direct:
3205 if ((input_section->flags & SEC_ALLOC) == 0)
3206 break;
3207
3208 need_copy_reloc_in_pie = (bfd_link_pie (info)
3209 && h != NULL
3210 && (h->needs_copy
3211 || eh->needs_copy
3212 || (h->root.type
3213 == bfd_link_hash_undefined))
3214 && (X86_PCREL_TYPE_P (true, r_type)
3215 || X86_SIZE_TYPE_P (true,
3216 r_type)));
3217
3218 if (GENERATE_DYNAMIC_RELOCATION_P (true, info, eh, r_type, sec,
3219 need_copy_reloc_in_pie,
3220 resolved_to_zero, false))
3221 {
3222 Elf_Internal_Rela outrel;
3223 bool skip, relocate;
3224 bool generate_dynamic_reloc = true;
3225 asection *sreloc;
3226 const char *relative_reloc_name = NULL;
3227
3228 /* When generating a shared object, these relocations
3229 are copied into the output file to be resolved at run
3230 time. */
3231 skip = false;
3232 relocate = false;
3233
3234 outrel.r_offset =
3235 _bfd_elf_section_offset (output_bfd, info, input_section,
3236 rel->r_offset);
3237 if (outrel.r_offset == (bfd_vma) -1)
3238 skip = true;
3239 else if (outrel.r_offset == (bfd_vma) -2)
3240 skip = true, relocate = true;
3241
3242 outrel.r_offset += (input_section->output_section->vma
3243 + input_section->output_offset);
3244
3245 if (skip)
3246 memset (&outrel, 0, sizeof outrel);
3247
3248 else if (COPY_INPUT_RELOC_P (true, info, h, r_type))
3249 {
3250 outrel.r_info = htab->r_info (h->dynindx, r_type);
3251 outrel.r_addend = rel->r_addend;
3252 }
3253 else
3254 {
3255 /* This symbol is local, or marked to become local.
3256 When relocation overflow check is disabled, we
3257 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3258 if (r_type == htab->pointer_r_type
3259 || (r_type == R_X86_64_32
3260 && htab->params->no_reloc_overflow_check))
3261 {
3262 relocate = true;
3263 /* NB: Don't generate relative relocation here if
3264 it has been generated by DT_RELR. */
3265 if (info->enable_dt_relr)
3266 generate_dynamic_reloc = false;
3267 else
3268 {
3269 outrel.r_info =
3270 htab->r_info (0, R_X86_64_RELATIVE);
3271 outrel.r_addend = relocation + rel->r_addend;
3272 relative_reloc_name = "R_X86_64_RELATIVE";
3273 }
3274 }
3275 else if (r_type == R_X86_64_64
3276 && !ABI_64_P (output_bfd))
3277 {
3278 relocate = true;
3279 outrel.r_info = htab->r_info (0,
3280 R_X86_64_RELATIVE64);
3281 outrel.r_addend = relocation + rel->r_addend;
3282 relative_reloc_name = "R_X86_64_RELATIVE64";
3283 /* Check addend overflow. */
3284 if ((outrel.r_addend & 0x80000000)
3285 != (rel->r_addend & 0x80000000))
3286 {
3287 const char *name;
3288 int addend = rel->r_addend;
3289 if (h && h->root.root.string)
3290 name = h->root.root.string;
3291 else
3292 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3293 sym, NULL);
3294 _bfd_error_handler
3295 /* xgettext:c-format */
3296 (_("%pB: addend %s%#x in relocation %s against "
3297 "symbol `%s' at %#" PRIx64
3298 " in section `%pA' is out of range"),
3299 input_bfd, addend < 0 ? "-" : "", addend,
3300 howto->name, name, (uint64_t) rel->r_offset,
3301 input_section);
3302 bfd_set_error (bfd_error_bad_value);
3303 return false;
3304 }
3305 }
3306 else
3307 {
3308 long sindx;
3309
3310 if (bfd_is_abs_section (sec))
3311 sindx = 0;
3312 else if (sec == NULL || sec->owner == NULL)
3313 {
3314 bfd_set_error (bfd_error_bad_value);
3315 return false;
3316 }
3317 else
3318 {
3319 asection *osec;
3320
3321 /* We are turning this relocation into one
3322 against a section symbol. It would be
3323 proper to subtract the symbol's value,
3324 osec->vma, from the emitted reloc addend,
3325 but ld.so expects buggy relocs. */
3326 osec = sec->output_section;
3327 sindx = elf_section_data (osec)->dynindx;
3328 if (sindx == 0)
3329 {
3330 asection *oi = htab->elf.text_index_section;
3331 sindx = elf_section_data (oi)->dynindx;
3332 }
3333 BFD_ASSERT (sindx != 0);
3334 }
3335
3336 outrel.r_info = htab->r_info (sindx, r_type);
3337 outrel.r_addend = relocation + rel->r_addend;
3338 }
3339 }
3340
3341 if (generate_dynamic_reloc)
3342 {
3343 sreloc = elf_section_data (input_section)->sreloc;
3344
3345 if (sreloc == NULL || sreloc->contents == NULL)
3346 {
3347 r = bfd_reloc_notsupported;
3348 goto check_relocation_error;
3349 }
3350
3351 if (relative_reloc_name
3352 && htab->params->report_relative_reloc)
3353 _bfd_x86_elf_link_report_relative_reloc
3354 (info, input_section, h, sym,
3355 relative_reloc_name, &outrel);
3356
3357 elf_append_rela (output_bfd, sreloc, &outrel);
3358 }
3359
3360 /* If this reloc is against an external symbol, we do
3361 not want to fiddle with the addend. Otherwise, we
3362 need to include the symbol value so that it becomes
3363 an addend for the dynamic reloc. */
3364 if (! relocate)
3365 continue;
3366 }
3367
3368 break;
3369
3370 case R_X86_64_TLSGD:
3371 case R_X86_64_GOTPC32_TLSDESC:
3372 case R_X86_64_TLSDESC_CALL:
3373 case R_X86_64_GOTTPOFF:
3374 tls_type = GOT_UNKNOWN;
3375 if (h == NULL && local_got_offsets)
3376 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3377 else if (h != NULL)
3378 tls_type = elf_x86_hash_entry (h)->tls_type;
3379
3380 r_type_tls = r_type;
3381 if (! elf_x86_64_tls_transition (info, input_bfd,
3382 input_section, contents,
3383 symtab_hdr, sym_hashes,
3384 &r_type_tls, tls_type, rel,
3385 relend, h, r_symndx, true))
3386 return false;
3387
3388 if (r_type_tls == R_X86_64_TPOFF32)
3389 {
3390 bfd_vma roff = rel->r_offset;
3391
3392 BFD_ASSERT (! unresolved_reloc);
3393
3394 if (r_type == R_X86_64_TLSGD)
3395 {
3396 /* GD->LE transition. For 64bit, change
3397 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3398 .word 0x6666; rex64; call __tls_get_addr@PLT
3399 or
3400 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3401 .byte 0x66; rex64
3402 call *__tls_get_addr@GOTPCREL(%rip)
3403 which may be converted to
3404 addr32 call __tls_get_addr
3405 into:
3406 movq %fs:0, %rax
3407 leaq foo@tpoff(%rax), %rax
3408 For 32bit, change
3409 leaq foo@tlsgd(%rip), %rdi
3410 .word 0x6666; rex64; call __tls_get_addr@PLT
3411 or
3412 leaq foo@tlsgd(%rip), %rdi
3413 .byte 0x66; rex64
3414 call *__tls_get_addr@GOTPCREL(%rip)
3415 which may be converted to
3416 addr32 call __tls_get_addr
3417 into:
3418 movl %fs:0, %eax
3419 leaq foo@tpoff(%rax), %rax
3420 For largepic, change:
3421 leaq foo@tlsgd(%rip), %rdi
3422 movabsq $__tls_get_addr@pltoff, %rax
3423 addq %r15, %rax
3424 call *%rax
3425 into:
3426 movq %fs:0, %rax
3427 leaq foo@tpoff(%rax), %rax
3428 nopw 0x0(%rax,%rax,1) */
3429 int largepic = 0;
3430 if (ABI_64_P (output_bfd))
3431 {
3432 if (contents[roff + 5] == 0xb8)
3433 {
3434 if (roff < 3
3435 || (roff - 3 + 22) > input_section->size)
3436 {
3437 corrupt_input:
3438 info->callbacks->einfo
3439 (_("%F%P: corrupt input: %pB\n"),
3440 input_bfd);
3441 return false;
3442 }
3443 memcpy (contents + roff - 3,
3444 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3445 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3446 largepic = 1;
3447 }
3448 else
3449 {
3450 if (roff < 4
3451 || (roff - 4 + 16) > input_section->size)
3452 goto corrupt_input;
3453 memcpy (contents + roff - 4,
3454 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3455 16);
3456 }
3457 }
3458 else
3459 {
3460 if (roff < 3
3461 || (roff - 3 + 15) > input_section->size)
3462 goto corrupt_input;
3463 memcpy (contents + roff - 3,
3464 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3465 15);
3466 }
3467 bfd_put_32 (output_bfd,
3468 elf_x86_64_tpoff (info, relocation),
3469 contents + roff + 8 + largepic);
3470 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3471 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3472 rel++;
3473 wrel++;
3474 continue;
3475 }
3476 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3477 {
3478 /* GDesc -> LE transition.
3479 It's originally something like:
3480 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3481 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3482
3483 Change it to:
3484 movq $x@tpoff, %rax <--- LP64 mode.
3485 rex movl $x@tpoff, %eax <--- X32 mode.
3486 */
3487
3488 unsigned int val, type;
3489
3490 if (roff < 3)
3491 goto corrupt_input;
3492 type = bfd_get_8 (input_bfd, contents + roff - 3);
3493 val = bfd_get_8 (input_bfd, contents + roff - 1);
3494 bfd_put_8 (output_bfd,
3495 (type & 0x48) | ((type >> 2) & 1),
3496 contents + roff - 3);
3497 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3498 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3499 contents + roff - 1);
3500 bfd_put_32 (output_bfd,
3501 elf_x86_64_tpoff (info, relocation),
3502 contents + roff);
3503 continue;
3504 }
3505 else if (r_type == R_X86_64_TLSDESC_CALL)
3506 {
3507 /* GDesc -> LE transition.
3508 It's originally:
3509 call *(%rax) <--- LP64 mode.
3510 call *(%eax) <--- X32 mode.
3511 Turn it into:
3512 xchg %ax,%ax <-- LP64 mode.
3513 nopl (%rax) <-- X32 mode.
3514 */
3515 unsigned int prefix = 0;
3516 if (!ABI_64_P (input_bfd))
3517 {
3518 /* Check for call *x@tlsdesc(%eax). */
3519 if (contents[roff] == 0x67)
3520 prefix = 1;
3521 }
3522 if (prefix)
3523 {
3524 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3525 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3526 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3527 }
3528 else
3529 {
3530 bfd_put_8 (output_bfd, 0x66, contents + roff);
3531 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3532 }
3533 continue;
3534 }
3535 else if (r_type == R_X86_64_GOTTPOFF)
3536 {
3537 /* IE->LE transition:
3538 For 64bit, originally it can be one of:
3539 movq foo@gottpoff(%rip), %reg
3540 addq foo@gottpoff(%rip), %reg
3541 We change it into:
3542 movq $foo, %reg
3543 leaq foo(%reg), %reg
3544 addq $foo, %reg.
3545 For 32bit, originally it can be one of:
3546 movq foo@gottpoff(%rip), %reg
3547 addl foo@gottpoff(%rip), %reg
3548 We change it into:
3549 movq $foo, %reg
3550 leal foo(%reg), %reg
3551 addl $foo, %reg. */
3552
3553 unsigned int val, type, reg;
3554
3555 if (roff >= 3)
3556 val = bfd_get_8 (input_bfd, contents + roff - 3);
3557 else
3558 {
3559 if (roff < 2)
3560 goto corrupt_input;
3561 val = 0;
3562 }
3563 type = bfd_get_8 (input_bfd, contents + roff - 2);
3564 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3565 reg >>= 3;
3566 if (type == 0x8b)
3567 {
3568 /* movq */
3569 if (val == 0x4c)
3570 {
3571 if (roff < 3)
3572 goto corrupt_input;
3573 bfd_put_8 (output_bfd, 0x49,
3574 contents + roff - 3);
3575 }
3576 else if (!ABI_64_P (output_bfd) && val == 0x44)
3577 {
3578 if (roff < 3)
3579 goto corrupt_input;
3580 bfd_put_8 (output_bfd, 0x41,
3581 contents + roff - 3);
3582 }
3583 bfd_put_8 (output_bfd, 0xc7,
3584 contents + roff - 2);
3585 bfd_put_8 (output_bfd, 0xc0 | reg,
3586 contents + roff - 1);
3587 }
3588 else if (reg == 4)
3589 {
3590 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3591 is special */
3592 if (val == 0x4c)
3593 {
3594 if (roff < 3)
3595 goto corrupt_input;
3596 bfd_put_8 (output_bfd, 0x49,
3597 contents + roff - 3);
3598 }
3599 else if (!ABI_64_P (output_bfd) && val == 0x44)
3600 {
3601 if (roff < 3)
3602 goto corrupt_input;
3603 bfd_put_8 (output_bfd, 0x41,
3604 contents + roff - 3);
3605 }
3606 bfd_put_8 (output_bfd, 0x81,
3607 contents + roff - 2);
3608 bfd_put_8 (output_bfd, 0xc0 | reg,
3609 contents + roff - 1);
3610 }
3611 else
3612 {
3613 /* addq/addl -> leaq/leal */
3614 if (val == 0x4c)
3615 {
3616 if (roff < 3)
3617 goto corrupt_input;
3618 bfd_put_8 (output_bfd, 0x4d,
3619 contents + roff - 3);
3620 }
3621 else if (!ABI_64_P (output_bfd) && val == 0x44)
3622 {
3623 if (roff < 3)
3624 goto corrupt_input;
3625 bfd_put_8 (output_bfd, 0x45,
3626 contents + roff - 3);
3627 }
3628 bfd_put_8 (output_bfd, 0x8d,
3629 contents + roff - 2);
3630 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3631 contents + roff - 1);
3632 }
3633 bfd_put_32 (output_bfd,
3634 elf_x86_64_tpoff (info, relocation),
3635 contents + roff);
3636 continue;
3637 }
3638 else
3639 BFD_ASSERT (false);
3640 }
3641
3642 if (htab->elf.sgot == NULL)
3643 abort ();
3644
3645 if (h != NULL)
3646 {
3647 off = h->got.offset;
3648 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3649 }
3650 else
3651 {
3652 if (local_got_offsets == NULL)
3653 abort ();
3654
3655 off = local_got_offsets[r_symndx];
3656 offplt = local_tlsdesc_gotents[r_symndx];
3657 }
3658
3659 if ((off & 1) != 0)
3660 off &= ~1;
3661 else
3662 {
3663 Elf_Internal_Rela outrel;
3664 int dr_type, indx;
3665 asection *sreloc;
3666
3667 if (htab->elf.srelgot == NULL)
3668 abort ();
3669
3670 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3671
3672 if (GOT_TLS_GDESC_P (tls_type))
3673 {
3674 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3675 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3676 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3677 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3678 + htab->elf.sgotplt->output_offset
3679 + offplt
3680 + htab->sgotplt_jump_table_size);
3681 sreloc = htab->elf.srelplt;
3682 if (indx == 0)
3683 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3684 else
3685 outrel.r_addend = 0;
3686 elf_append_rela (output_bfd, sreloc, &outrel);
3687 }
3688
3689 sreloc = htab->elf.srelgot;
3690
3691 outrel.r_offset = (htab->elf.sgot->output_section->vma
3692 + htab->elf.sgot->output_offset + off);
3693
3694 if (GOT_TLS_GD_P (tls_type))
3695 dr_type = R_X86_64_DTPMOD64;
3696 else if (GOT_TLS_GDESC_P (tls_type))
3697 goto dr_done;
3698 else
3699 dr_type = R_X86_64_TPOFF64;
3700
3701 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3702 outrel.r_addend = 0;
3703 if ((dr_type == R_X86_64_TPOFF64
3704 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3705 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3706 outrel.r_info = htab->r_info (indx, dr_type);
3707
3708 elf_append_rela (output_bfd, sreloc, &outrel);
3709
3710 if (GOT_TLS_GD_P (tls_type))
3711 {
3712 if (indx == 0)
3713 {
3714 BFD_ASSERT (! unresolved_reloc);
3715 bfd_put_64 (output_bfd,
3716 relocation - _bfd_x86_elf_dtpoff_base (info),
3717 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3718 }
3719 else
3720 {
3721 bfd_put_64 (output_bfd, 0,
3722 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3723 outrel.r_info = htab->r_info (indx,
3724 R_X86_64_DTPOFF64);
3725 outrel.r_offset += GOT_ENTRY_SIZE;
3726 elf_append_rela (output_bfd, sreloc,
3727 &outrel);
3728 }
3729 }
3730
3731 dr_done:
3732 if (h != NULL)
3733 h->got.offset |= 1;
3734 else
3735 local_got_offsets[r_symndx] |= 1;
3736 }
3737
3738 if (off >= (bfd_vma) -2
3739 && ! GOT_TLS_GDESC_P (tls_type))
3740 abort ();
3741 if (r_type_tls == r_type)
3742 {
3743 if (r_type == R_X86_64_GOTPC32_TLSDESC
3744 || r_type == R_X86_64_TLSDESC_CALL)
3745 relocation = htab->elf.sgotplt->output_section->vma
3746 + htab->elf.sgotplt->output_offset
3747 + offplt + htab->sgotplt_jump_table_size;
3748 else
3749 relocation = htab->elf.sgot->output_section->vma
3750 + htab->elf.sgot->output_offset + off;
3751 unresolved_reloc = false;
3752 }
3753 else
3754 {
3755 bfd_vma roff = rel->r_offset;
3756
3757 if (r_type == R_X86_64_TLSGD)
3758 {
3759 /* GD->IE transition. For 64bit, change
3760 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3761 .word 0x6666; rex64; call __tls_get_addr@PLT
3762 or
3763 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3764 .byte 0x66; rex64
3765 call *__tls_get_addr@GOTPCREL(%rip
3766 which may be converted to
3767 addr32 call __tls_get_addr
3768 into:
3769 movq %fs:0, %rax
3770 addq foo@gottpoff(%rip), %rax
3771 For 32bit, change
3772 leaq foo@tlsgd(%rip), %rdi
3773 .word 0x6666; rex64; call __tls_get_addr@PLT
3774 or
3775 leaq foo@tlsgd(%rip), %rdi
3776 .byte 0x66; rex64;
3777 call *__tls_get_addr@GOTPCREL(%rip)
3778 which may be converted to
3779 addr32 call __tls_get_addr
3780 into:
3781 movl %fs:0, %eax
3782 addq foo@gottpoff(%rip), %rax
3783 For largepic, change:
3784 leaq foo@tlsgd(%rip), %rdi
3785 movabsq $__tls_get_addr@pltoff, %rax
3786 addq %r15, %rax
3787 call *%rax
3788 into:
3789 movq %fs:0, %rax
3790 addq foo@gottpoff(%rax), %rax
3791 nopw 0x0(%rax,%rax,1) */
3792 int largepic = 0;
3793 if (ABI_64_P (output_bfd))
3794 {
3795 if (contents[roff + 5] == 0xb8)
3796 {
3797 if (roff < 3
3798 || (roff - 3 + 22) > input_section->size)
3799 goto corrupt_input;
3800 memcpy (contents + roff - 3,
3801 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3802 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3803 largepic = 1;
3804 }
3805 else
3806 {
3807 if (roff < 4
3808 || (roff - 4 + 16) > input_section->size)
3809 goto corrupt_input;
3810 memcpy (contents + roff - 4,
3811 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3812 16);
3813 }
3814 }
3815 else
3816 {
3817 if (roff < 3
3818 || (roff - 3 + 15) > input_section->size)
3819 goto corrupt_input;
3820 memcpy (contents + roff - 3,
3821 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3822 15);
3823 }
3824
3825 relocation = (htab->elf.sgot->output_section->vma
3826 + htab->elf.sgot->output_offset + off
3827 - roff
3828 - largepic
3829 - input_section->output_section->vma
3830 - input_section->output_offset
3831 - 12);
3832 bfd_put_32 (output_bfd, relocation,
3833 contents + roff + 8 + largepic);
3834 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3835 rel++;
3836 wrel++;
3837 continue;
3838 }
3839 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3840 {
3841 /* GDesc -> IE transition.
3842 It's originally something like:
3843 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3844 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3845
3846 Change it to:
3847 # before xchg %ax,%ax in LP64 mode.
3848 movq x@gottpoff(%rip), %rax
3849 # before nopl (%rax) in X32 mode.
3850 rex movl x@gottpoff(%rip), %eax
3851 */
3852
3853 /* Now modify the instruction as appropriate. To
3854 turn a lea into a mov in the form we use it, it
3855 suffices to change the second byte from 0x8d to
3856 0x8b. */
3857 if (roff < 2)
3858 goto corrupt_input;
3859 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3860
3861 bfd_put_32 (output_bfd,
3862 htab->elf.sgot->output_section->vma
3863 + htab->elf.sgot->output_offset + off
3864 - rel->r_offset
3865 - input_section->output_section->vma
3866 - input_section->output_offset
3867 - 4,
3868 contents + roff);
3869 continue;
3870 }
3871 else if (r_type == R_X86_64_TLSDESC_CALL)
3872 {
3873 /* GDesc -> IE transition.
3874 It's originally:
3875 call *(%rax) <--- LP64 mode.
3876 call *(%eax) <--- X32 mode.
3877
3878 Change it to:
3879 xchg %ax, %ax <-- LP64 mode.
3880 nopl (%rax) <-- X32 mode.
3881 */
3882
3883 unsigned int prefix = 0;
3884 if (!ABI_64_P (input_bfd))
3885 {
3886 /* Check for call *x@tlsdesc(%eax). */
3887 if (contents[roff] == 0x67)
3888 prefix = 1;
3889 }
3890 if (prefix)
3891 {
3892 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3893 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3894 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3895 }
3896 else
3897 {
3898 bfd_put_8 (output_bfd, 0x66, contents + roff);
3899 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3900 }
3901 continue;
3902 }
3903 else
3904 BFD_ASSERT (false);
3905 }
3906 break;
3907
3908 case R_X86_64_TLSLD:
3909 if (! elf_x86_64_tls_transition (info, input_bfd,
3910 input_section, contents,
3911 symtab_hdr, sym_hashes,
3912 &r_type, GOT_UNKNOWN, rel,
3913 relend, h, r_symndx, true))
3914 return false;
3915
3916 if (r_type != R_X86_64_TLSLD)
3917 {
3918 /* LD->LE transition:
3919 leaq foo@tlsld(%rip), %rdi
3920 call __tls_get_addr@PLT
3921 For 64bit, we change it into:
3922 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3923 For 32bit, we change it into:
3924 nopl 0x0(%rax); movl %fs:0, %eax
3925 Or
3926 leaq foo@tlsld(%rip), %rdi;
3927 call *__tls_get_addr@GOTPCREL(%rip)
3928 which may be converted to
3929 addr32 call __tls_get_addr
3930 For 64bit, we change it into:
3931 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3932 For 32bit, we change it into:
3933 nopw 0x0(%rax); movl %fs:0, %eax
3934 For largepic, change:
3935 leaq foo@tlsgd(%rip), %rdi
3936 movabsq $__tls_get_addr@pltoff, %rax
3937 addq %rbx, %rax
3938 call *%rax
3939 into
3940 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3941 movq %fs:0, %eax */
3942
3943 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3944 if (ABI_64_P (output_bfd))
3945 {
3946 if ((rel->r_offset + 5) >= input_section->size)
3947 goto corrupt_input;
3948 if (contents[rel->r_offset + 5] == 0xb8)
3949 {
3950 if (rel->r_offset < 3
3951 || (rel->r_offset - 3 + 22) > input_section->size)
3952 goto corrupt_input;
3953 memcpy (contents + rel->r_offset - 3,
3954 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3955 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3956 }
3957 else if (contents[rel->r_offset + 4] == 0xff
3958 || contents[rel->r_offset + 4] == 0x67)
3959 {
3960 if (rel->r_offset < 3
3961 || (rel->r_offset - 3 + 13) > input_section->size)
3962 goto corrupt_input;
3963 memcpy (contents + rel->r_offset - 3,
3964 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3965 13);
3966
3967 }
3968 else
3969 {
3970 if (rel->r_offset < 3
3971 || (rel->r_offset - 3 + 12) > input_section->size)
3972 goto corrupt_input;
3973 memcpy (contents + rel->r_offset - 3,
3974 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3975 }
3976 }
3977 else
3978 {
3979 if ((rel->r_offset + 4) >= input_section->size)
3980 goto corrupt_input;
3981 if (contents[rel->r_offset + 4] == 0xff)
3982 {
3983 if (rel->r_offset < 3
3984 || (rel->r_offset - 3 + 13) > input_section->size)
3985 goto corrupt_input;
3986 memcpy (contents + rel->r_offset - 3,
3987 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3988 13);
3989 }
3990 else
3991 {
3992 if (rel->r_offset < 3
3993 || (rel->r_offset - 3 + 12) > input_section->size)
3994 goto corrupt_input;
3995 memcpy (contents + rel->r_offset - 3,
3996 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3997 }
3998 }
3999 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
4000 and R_X86_64_PLTOFF64. */
4001 rel++;
4002 wrel++;
4003 continue;
4004 }
4005
4006 if (htab->elf.sgot == NULL)
4007 abort ();
4008
4009 off = htab->tls_ld_or_ldm_got.offset;
4010 if (off & 1)
4011 off &= ~1;
4012 else
4013 {
4014 Elf_Internal_Rela outrel;
4015
4016 if (htab->elf.srelgot == NULL)
4017 abort ();
4018
4019 outrel.r_offset = (htab->elf.sgot->output_section->vma
4020 + htab->elf.sgot->output_offset + off);
4021
4022 bfd_put_64 (output_bfd, 0,
4023 htab->elf.sgot->contents + off);
4024 bfd_put_64 (output_bfd, 0,
4025 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4026 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4027 outrel.r_addend = 0;
4028 elf_append_rela (output_bfd, htab->elf.srelgot,
4029 &outrel);
4030 htab->tls_ld_or_ldm_got.offset |= 1;
4031 }
4032 relocation = htab->elf.sgot->output_section->vma
4033 + htab->elf.sgot->output_offset + off;
4034 unresolved_reloc = false;
4035 break;
4036
4037 case R_X86_64_DTPOFF32:
4038 if (!bfd_link_executable (info)
4039 || (input_section->flags & SEC_CODE) == 0)
4040 relocation -= _bfd_x86_elf_dtpoff_base (info);
4041 else
4042 relocation = elf_x86_64_tpoff (info, relocation);
4043 break;
4044
4045 case R_X86_64_TPOFF32:
4046 case R_X86_64_TPOFF64:
4047 BFD_ASSERT (bfd_link_executable (info));
4048 relocation = elf_x86_64_tpoff (info, relocation);
4049 break;
4050
4051 case R_X86_64_DTPOFF64:
4052 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4053 relocation -= _bfd_x86_elf_dtpoff_base (info);
4054 break;
4055
4056 default:
4057 break;
4058 }
4059
4060 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4061 because such sections are not SEC_ALLOC and thus ld.so will
4062 not process them. */
4063 if (unresolved_reloc
4064 && !((input_section->flags & SEC_DEBUGGING) != 0
4065 && h->def_dynamic)
4066 && _bfd_elf_section_offset (output_bfd, info, input_section,
4067 rel->r_offset) != (bfd_vma) -1)
4068 {
4069 switch (r_type)
4070 {
4071 case R_X86_64_32S:
4072 sec = h->root.u.def.section;
4073 if ((info->nocopyreloc
4074 || (eh->def_protected
4075 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
4076 && !(h->root.u.def.section->flags & SEC_CODE))
4077 return elf_x86_64_need_pic (info, input_bfd, input_section,
4078 h, NULL, NULL, howto);
4079 /* Fall through. */
4080
4081 default:
4082 _bfd_error_handler
4083 /* xgettext:c-format */
4084 (_("%pB(%pA+%#" PRIx64 "): "
4085 "unresolvable %s relocation against symbol `%s'"),
4086 input_bfd,
4087 input_section,
4088 (uint64_t) rel->r_offset,
4089 howto->name,
4090 h->root.root.string);
4091 return false;
4092 }
4093 }
4094
4095 do_relocation:
4096 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4097 contents, rel->r_offset,
4098 relocation, rel->r_addend);
4099
4100 check_relocation_error:
4101 if (r != bfd_reloc_ok)
4102 {
4103 const char *name;
4104
4105 if (h != NULL)
4106 name = h->root.root.string;
4107 else
4108 {
4109 name = bfd_elf_string_from_elf_section (input_bfd,
4110 symtab_hdr->sh_link,
4111 sym->st_name);
4112 if (name == NULL)
4113 return false;
4114 if (*name == '\0')
4115 name = bfd_section_name (sec);
4116 }
4117
4118 if (r == bfd_reloc_overflow)
4119 {
4120 if (converted_reloc)
4121 {
4122 info->callbacks->einfo
4123 ("%X%H:", input_bfd, input_section, rel->r_offset);
4124 info->callbacks->einfo
4125 (_(" failed to convert GOTPCREL relocation against "
4126 "'%s'; relink with --no-relax\n"),
4127 name);
4128 status = false;
4129 continue;
4130 }
4131 (*info->callbacks->reloc_overflow)
4132 (info, (h ? &h->root : NULL), name, howto->name,
4133 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4134 }
4135 else
4136 {
4137 _bfd_error_handler
4138 /* xgettext:c-format */
4139 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4140 input_bfd, input_section,
4141 (uint64_t) rel->r_offset, name, (int) r);
4142 return false;
4143 }
4144 }
4145
4146 if (wrel != rel)
4147 *wrel = *rel;
4148 }
4149
4150 if (wrel != rel)
4151 {
4152 Elf_Internal_Shdr *rel_hdr;
4153 size_t deleted = rel - wrel;
4154
4155 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4156 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4157 if (rel_hdr->sh_size == 0)
4158 {
4159 /* It is too late to remove an empty reloc section. Leave
4160 one NONE reloc.
4161 ??? What is wrong with an empty section??? */
4162 rel_hdr->sh_size = rel_hdr->sh_entsize;
4163 deleted -= 1;
4164 }
4165 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4166 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4167 input_section->reloc_count -= deleted;
4168 }
4169
4170 return status;
4171 }
4172
4173 /* Finish up dynamic symbol handling. We set the contents of various
4174 dynamic sections here. */
4175
4176 static bool
4177 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4178 struct bfd_link_info *info,
4179 struct elf_link_hash_entry *h,
4180 Elf_Internal_Sym *sym)
4181 {
4182 struct elf_x86_link_hash_table *htab;
4183 bool use_plt_second;
4184 struct elf_x86_link_hash_entry *eh;
4185 bool local_undefweak;
4186
4187 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4188 if (htab == NULL)
4189 return false;
4190
4191 /* Use the second PLT section only if there is .plt section. */
4192 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4193
4194 eh = (struct elf_x86_link_hash_entry *) h;
4195 if (eh->no_finish_dynamic_symbol)
4196 abort ();
4197
4198 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4199 resolved undefined weak symbols in executable so that their
4200 references have value 0 at run-time. */
4201 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4202
4203 if (h->plt.offset != (bfd_vma) -1)
4204 {
4205 bfd_vma plt_index;
4206 bfd_vma got_offset, plt_offset;
4207 Elf_Internal_Rela rela;
4208 bfd_byte *loc;
4209 asection *plt, *gotplt, *relplt, *resolved_plt;
4210 const struct elf_backend_data *bed;
4211 bfd_vma plt_got_pcrel_offset;
4212
4213 /* When building a static executable, use .iplt, .igot.plt and
4214 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4215 if (htab->elf.splt != NULL)
4216 {
4217 plt = htab->elf.splt;
4218 gotplt = htab->elf.sgotplt;
4219 relplt = htab->elf.srelplt;
4220 }
4221 else
4222 {
4223 plt = htab->elf.iplt;
4224 gotplt = htab->elf.igotplt;
4225 relplt = htab->elf.irelplt;
4226 }
4227
4228 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4229
4230 /* Get the index in the procedure linkage table which
4231 corresponds to this symbol. This is the index of this symbol
4232 in all the symbols for which we are making plt entries. The
4233 first entry in the procedure linkage table is reserved.
4234
4235 Get the offset into the .got table of the entry that
4236 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4237 bytes. The first three are reserved for the dynamic linker.
4238
4239 For static executables, we don't reserve anything. */
4240
4241 if (plt == htab->elf.splt)
4242 {
4243 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4244 - htab->plt.has_plt0);
4245 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4246 }
4247 else
4248 {
4249 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4250 got_offset = got_offset * GOT_ENTRY_SIZE;
4251 }
4252
4253 /* Fill in the entry in the procedure linkage table. */
4254 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4255 htab->plt.plt_entry_size);
4256 if (use_plt_second)
4257 {
4258 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4259 htab->non_lazy_plt->plt_entry,
4260 htab->non_lazy_plt->plt_entry_size);
4261
4262 resolved_plt = htab->plt_second;
4263 plt_offset = eh->plt_second.offset;
4264 }
4265 else
4266 {
4267 resolved_plt = plt;
4268 plt_offset = h->plt.offset;
4269 }
4270
4271 /* Insert the relocation positions of the plt section. */
4272
4273 /* Put offset the PC-relative instruction referring to the GOT entry,
4274 subtracting the size of that instruction. */
4275 plt_got_pcrel_offset = (gotplt->output_section->vma
4276 + gotplt->output_offset
4277 + got_offset
4278 - resolved_plt->output_section->vma
4279 - resolved_plt->output_offset
4280 - plt_offset
4281 - htab->plt.plt_got_insn_size);
4282
4283 /* Check PC-relative offset overflow in PLT entry. */
4284 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4285 /* xgettext:c-format */
4286 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4287 output_bfd, h->root.root.string);
4288
4289 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4290 (resolved_plt->contents + plt_offset
4291 + htab->plt.plt_got_offset));
4292
4293 /* Fill in the entry in the global offset table, initially this
4294 points to the second part of the PLT entry. Leave the entry
4295 as zero for undefined weak symbol in PIE. No PLT relocation
4296 against undefined weak symbol in PIE. */
4297 if (!local_undefweak)
4298 {
4299 if (htab->plt.has_plt0)
4300 bfd_put_64 (output_bfd, (plt->output_section->vma
4301 + plt->output_offset
4302 + h->plt.offset
4303 + htab->lazy_plt->plt_lazy_offset),
4304 gotplt->contents + got_offset);
4305
4306 /* Fill in the entry in the .rela.plt section. */
4307 rela.r_offset = (gotplt->output_section->vma
4308 + gotplt->output_offset
4309 + got_offset);
4310 if (PLT_LOCAL_IFUNC_P (info, h))
4311 {
4312 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4313 h->root.root.string,
4314 h->root.u.def.section->owner);
4315
4316 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4317 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4318 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4319 rela.r_addend = (h->root.u.def.value
4320 + h->root.u.def.section->output_section->vma
4321 + h->root.u.def.section->output_offset);
4322
4323 if (htab->params->report_relative_reloc)
4324 _bfd_x86_elf_link_report_relative_reloc
4325 (info, relplt, h, sym, "R_X86_64_IRELATIVE", &rela);
4326
4327 /* R_X86_64_IRELATIVE comes last. */
4328 plt_index = htab->next_irelative_index--;
4329 }
4330 else
4331 {
4332 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4333 rela.r_addend = 0;
4334 plt_index = htab->next_jump_slot_index++;
4335 }
4336
4337 /* Don't fill the second and third slots in PLT entry for
4338 static executables nor without PLT0. */
4339 if (plt == htab->elf.splt && htab->plt.has_plt0)
4340 {
4341 bfd_vma plt0_offset
4342 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4343
4344 /* Put relocation index. */
4345 bfd_put_32 (output_bfd, plt_index,
4346 (plt->contents + h->plt.offset
4347 + htab->lazy_plt->plt_reloc_offset));
4348
4349 /* Put offset for jmp .PLT0 and check for overflow. We don't
4350 check relocation index for overflow since branch displacement
4351 will overflow first. */
4352 if (plt0_offset > 0x80000000)
4353 /* xgettext:c-format */
4354 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4355 output_bfd, h->root.root.string);
4356 bfd_put_32 (output_bfd, - plt0_offset,
4357 (plt->contents + h->plt.offset
4358 + htab->lazy_plt->plt_plt_offset));
4359 }
4360
4361 bed = get_elf_backend_data (output_bfd);
4362 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4363 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4364 }
4365 }
4366 else if (eh->plt_got.offset != (bfd_vma) -1)
4367 {
4368 bfd_vma got_offset, plt_offset;
4369 asection *plt, *got;
4370 bool got_after_plt;
4371 int32_t got_pcrel_offset;
4372
4373 /* Set the entry in the GOT procedure linkage table. */
4374 plt = htab->plt_got;
4375 got = htab->elf.sgot;
4376 got_offset = h->got.offset;
4377
4378 if (got_offset == (bfd_vma) -1
4379 || (h->type == STT_GNU_IFUNC && h->def_regular)
4380 || plt == NULL
4381 || got == NULL)
4382 abort ();
4383
4384 /* Use the non-lazy PLT entry template for the GOT PLT since they
4385 are the identical. */
4386 /* Fill in the entry in the GOT procedure linkage table. */
4387 plt_offset = eh->plt_got.offset;
4388 memcpy (plt->contents + plt_offset,
4389 htab->non_lazy_plt->plt_entry,
4390 htab->non_lazy_plt->plt_entry_size);
4391
4392 /* Put offset the PC-relative instruction referring to the GOT
4393 entry, subtracting the size of that instruction. */
4394 got_pcrel_offset = (got->output_section->vma
4395 + got->output_offset
4396 + got_offset
4397 - plt->output_section->vma
4398 - plt->output_offset
4399 - plt_offset
4400 - htab->non_lazy_plt->plt_got_insn_size);
4401
4402 /* Check PC-relative offset overflow in GOT PLT entry. */
4403 got_after_plt = got->output_section->vma > plt->output_section->vma;
4404 if ((got_after_plt && got_pcrel_offset < 0)
4405 || (!got_after_plt && got_pcrel_offset > 0))
4406 /* xgettext:c-format */
4407 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4408 output_bfd, h->root.root.string);
4409
4410 bfd_put_32 (output_bfd, got_pcrel_offset,
4411 (plt->contents + plt_offset
4412 + htab->non_lazy_plt->plt_got_offset));
4413 }
4414
4415 if (!local_undefweak
4416 && !h->def_regular
4417 && (h->plt.offset != (bfd_vma) -1
4418 || eh->plt_got.offset != (bfd_vma) -1))
4419 {
4420 /* Mark the symbol as undefined, rather than as defined in
4421 the .plt section. Leave the value if there were any
4422 relocations where pointer equality matters (this is a clue
4423 for the dynamic linker, to make function pointer
4424 comparisons work between an application and shared
4425 library), otherwise set it to zero. If a function is only
4426 called from a binary, there is no need to slow down
4427 shared libraries because of that. */
4428 sym->st_shndx = SHN_UNDEF;
4429 if (!h->pointer_equality_needed)
4430 sym->st_value = 0;
4431 }
4432
4433 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4434
4435 /* Don't generate dynamic GOT relocation against undefined weak
4436 symbol in executable. */
4437 if (h->got.offset != (bfd_vma) -1
4438 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4439 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4440 && !local_undefweak)
4441 {
4442 Elf_Internal_Rela rela;
4443 asection *relgot = htab->elf.srelgot;
4444 const char *relative_reloc_name = NULL;
4445 bool generate_dynamic_reloc = true;
4446
4447 /* This symbol has an entry in the global offset table. Set it
4448 up. */
4449 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4450 abort ();
4451
4452 rela.r_offset = (htab->elf.sgot->output_section->vma
4453 + htab->elf.sgot->output_offset
4454 + (h->got.offset &~ (bfd_vma) 1));
4455
4456 /* If this is a static link, or it is a -Bsymbolic link and the
4457 symbol is defined locally or was forced to be local because
4458 of a version file, we just want to emit a RELATIVE reloc.
4459 The entry in the global offset table will already have been
4460 initialized in the relocate_section function. */
4461 if (h->def_regular
4462 && h->type == STT_GNU_IFUNC)
4463 {
4464 if (h->plt.offset == (bfd_vma) -1)
4465 {
4466 /* STT_GNU_IFUNC is referenced without PLT. */
4467 if (htab->elf.splt == NULL)
4468 {
4469 /* use .rel[a].iplt section to store .got relocations
4470 in static executable. */
4471 relgot = htab->elf.irelplt;
4472 }
4473 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4474 {
4475 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4476 h->root.root.string,
4477 h->root.u.def.section->owner);
4478
4479 rela.r_info = htab->r_info (0,
4480 R_X86_64_IRELATIVE);
4481 rela.r_addend = (h->root.u.def.value
4482 + h->root.u.def.section->output_section->vma
4483 + h->root.u.def.section->output_offset);
4484 relative_reloc_name = "R_X86_64_IRELATIVE";
4485 }
4486 else
4487 goto do_glob_dat;
4488 }
4489 else if (bfd_link_pic (info))
4490 {
4491 /* Generate R_X86_64_GLOB_DAT. */
4492 goto do_glob_dat;
4493 }
4494 else
4495 {
4496 asection *plt;
4497 bfd_vma plt_offset;
4498
4499 if (!h->pointer_equality_needed)
4500 abort ();
4501
4502 /* For non-shared object, we can't use .got.plt, which
4503 contains the real function addres if we need pointer
4504 equality. We load the GOT entry with the PLT entry. */
4505 if (htab->plt_second != NULL)
4506 {
4507 plt = htab->plt_second;
4508 plt_offset = eh->plt_second.offset;
4509 }
4510 else
4511 {
4512 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4513 plt_offset = h->plt.offset;
4514 }
4515 bfd_put_64 (output_bfd, (plt->output_section->vma
4516 + plt->output_offset
4517 + plt_offset),
4518 htab->elf.sgot->contents + h->got.offset);
4519 return true;
4520 }
4521 }
4522 else if (bfd_link_pic (info)
4523 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4524 {
4525 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4526 return false;
4527 BFD_ASSERT((h->got.offset & 1) != 0);
4528 if (info->enable_dt_relr)
4529 generate_dynamic_reloc = false;
4530 else
4531 {
4532 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4533 rela.r_addend = (h->root.u.def.value
4534 + h->root.u.def.section->output_section->vma
4535 + h->root.u.def.section->output_offset);
4536 relative_reloc_name = "R_X86_64_RELATIVE";
4537 }
4538 }
4539 else
4540 {
4541 BFD_ASSERT((h->got.offset & 1) == 0);
4542 do_glob_dat:
4543 bfd_put_64 (output_bfd, (bfd_vma) 0,
4544 htab->elf.sgot->contents + h->got.offset);
4545 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4546 rela.r_addend = 0;
4547 }
4548
4549 if (generate_dynamic_reloc)
4550 {
4551 if (relative_reloc_name != NULL
4552 && htab->params->report_relative_reloc)
4553 _bfd_x86_elf_link_report_relative_reloc
4554 (info, relgot, h, sym, relative_reloc_name, &rela);
4555
4556 elf_append_rela (output_bfd, relgot, &rela);
4557 }
4558 }
4559
4560 if (h->needs_copy)
4561 {
4562 Elf_Internal_Rela rela;
4563 asection *s;
4564
4565 /* This symbol needs a copy reloc. Set it up. */
4566 VERIFY_COPY_RELOC (h, htab)
4567
4568 rela.r_offset = (h->root.u.def.value
4569 + h->root.u.def.section->output_section->vma
4570 + h->root.u.def.section->output_offset);
4571 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4572 rela.r_addend = 0;
4573 if (h->root.u.def.section == htab->elf.sdynrelro)
4574 s = htab->elf.sreldynrelro;
4575 else
4576 s = htab->elf.srelbss;
4577 elf_append_rela (output_bfd, s, &rela);
4578 }
4579
4580 return true;
4581 }
4582
4583 /* Finish up local dynamic symbol handling. We set the contents of
4584 various dynamic sections here. */
4585
4586 static int
4587 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4588 {
4589 struct elf_link_hash_entry *h
4590 = (struct elf_link_hash_entry *) *slot;
4591 struct bfd_link_info *info
4592 = (struct bfd_link_info *) inf;
4593
4594 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4595 info, h, NULL);
4596 }
4597
4598 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4599 here since undefined weak symbol may not be dynamic and may not be
4600 called for elf_x86_64_finish_dynamic_symbol. */
4601
4602 static bool
4603 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4604 void *inf)
4605 {
4606 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4607 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4608
4609 if (h->root.type != bfd_link_hash_undefweak
4610 || h->dynindx != -1)
4611 return true;
4612
4613 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4614 info, h, NULL);
4615 }
4616
4617 /* Used to decide how to sort relocs in an optimal manner for the
4618 dynamic linker, before writing them out. */
4619
4620 static enum elf_reloc_type_class
4621 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4622 const asection *rel_sec ATTRIBUTE_UNUSED,
4623 const Elf_Internal_Rela *rela)
4624 {
4625 bfd *abfd = info->output_bfd;
4626 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4627 struct elf_x86_link_hash_table *htab
4628 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4629
4630 if (htab->elf.dynsym != NULL
4631 && htab->elf.dynsym->contents != NULL)
4632 {
4633 /* Check relocation against STT_GNU_IFUNC symbol if there are
4634 dynamic symbols. */
4635 unsigned long r_symndx = htab->r_sym (rela->r_info);
4636 if (r_symndx != STN_UNDEF)
4637 {
4638 Elf_Internal_Sym sym;
4639 if (!bed->s->swap_symbol_in (abfd,
4640 (htab->elf.dynsym->contents
4641 + r_symndx * bed->s->sizeof_sym),
4642 0, &sym))
4643 abort ();
4644
4645 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4646 return reloc_class_ifunc;
4647 }
4648 }
4649
4650 switch ((int) ELF32_R_TYPE (rela->r_info))
4651 {
4652 case R_X86_64_IRELATIVE:
4653 return reloc_class_ifunc;
4654 case R_X86_64_RELATIVE:
4655 case R_X86_64_RELATIVE64:
4656 return reloc_class_relative;
4657 case R_X86_64_JUMP_SLOT:
4658 return reloc_class_plt;
4659 case R_X86_64_COPY:
4660 return reloc_class_copy;
4661 default:
4662 return reloc_class_normal;
4663 }
4664 }
4665
4666 /* Finish up the dynamic sections. */
4667
4668 static bool
4669 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4670 struct bfd_link_info *info)
4671 {
4672 struct elf_x86_link_hash_table *htab;
4673
4674 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4675 if (htab == NULL)
4676 return false;
4677
4678 if (! htab->elf.dynamic_sections_created)
4679 return true;
4680
4681 if (htab->elf.splt && htab->elf.splt->size > 0)
4682 {
4683 if (bfd_is_abs_section (htab->elf.splt->output_section))
4684 {
4685 info->callbacks->einfo
4686 (_("%F%P: discarded output section: `%pA'\n"),
4687 htab->elf.splt);
4688 return false;
4689 }
4690
4691 elf_section_data (htab->elf.splt->output_section)
4692 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4693
4694 if (htab->plt.has_plt0)
4695 {
4696 /* Fill in the special first entry in the procedure linkage
4697 table. */
4698 memcpy (htab->elf.splt->contents,
4699 htab->lazy_plt->plt0_entry,
4700 htab->lazy_plt->plt0_entry_size);
4701 /* Add offset for pushq GOT+8(%rip), since the instruction
4702 uses 6 bytes subtract this value. */
4703 bfd_put_32 (output_bfd,
4704 (htab->elf.sgotplt->output_section->vma
4705 + htab->elf.sgotplt->output_offset
4706 + 8
4707 - htab->elf.splt->output_section->vma
4708 - htab->elf.splt->output_offset
4709 - 6),
4710 (htab->elf.splt->contents
4711 + htab->lazy_plt->plt0_got1_offset));
4712 /* Add offset for the PC-relative instruction accessing
4713 GOT+16, subtracting the offset to the end of that
4714 instruction. */
4715 bfd_put_32 (output_bfd,
4716 (htab->elf.sgotplt->output_section->vma
4717 + htab->elf.sgotplt->output_offset
4718 + 16
4719 - htab->elf.splt->output_section->vma
4720 - htab->elf.splt->output_offset
4721 - htab->lazy_plt->plt0_got2_insn_end),
4722 (htab->elf.splt->contents
4723 + htab->lazy_plt->plt0_got2_offset));
4724 }
4725
4726 if (htab->elf.tlsdesc_plt)
4727 {
4728 bfd_put_64 (output_bfd, (bfd_vma) 0,
4729 htab->elf.sgot->contents + htab->elf.tlsdesc_got);
4730
4731 memcpy (htab->elf.splt->contents + htab->elf.tlsdesc_plt,
4732 htab->lazy_plt->plt_tlsdesc_entry,
4733 htab->lazy_plt->plt_tlsdesc_entry_size);
4734
4735 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4736 bytes and the instruction uses 6 bytes, subtract these
4737 values. */
4738 bfd_put_32 (output_bfd,
4739 (htab->elf.sgotplt->output_section->vma
4740 + htab->elf.sgotplt->output_offset
4741 + 8
4742 - htab->elf.splt->output_section->vma
4743 - htab->elf.splt->output_offset
4744 - htab->elf.tlsdesc_plt
4745 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4746 (htab->elf.splt->contents
4747 + htab->elf.tlsdesc_plt
4748 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4749 /* Add offset for indirect branch via GOT+TDG, where TDG
4750 stands for htab->tlsdesc_got, subtracting the offset
4751 to the end of that instruction. */
4752 bfd_put_32 (output_bfd,
4753 (htab->elf.sgot->output_section->vma
4754 + htab->elf.sgot->output_offset
4755 + htab->elf.tlsdesc_got
4756 - htab->elf.splt->output_section->vma
4757 - htab->elf.splt->output_offset
4758 - htab->elf.tlsdesc_plt
4759 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4760 (htab->elf.splt->contents
4761 + htab->elf.tlsdesc_plt
4762 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4763 }
4764 }
4765
4766 /* Fill PLT entries for undefined weak symbols in PIE. */
4767 if (bfd_link_pie (info))
4768 bfd_hash_traverse (&info->hash->table,
4769 elf_x86_64_pie_finish_undefweak_symbol,
4770 info);
4771
4772 return true;
4773 }
4774
4775 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4776 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4777 It has to be done before elf_link_sort_relocs is called so that
4778 dynamic relocations are properly sorted. */
4779
4780 static bool
4781 elf_x86_64_output_arch_local_syms
4782 (bfd *output_bfd ATTRIBUTE_UNUSED,
4783 struct bfd_link_info *info,
4784 void *flaginfo ATTRIBUTE_UNUSED,
4785 int (*func) (void *, const char *,
4786 Elf_Internal_Sym *,
4787 asection *,
4788 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4789 {
4790 struct elf_x86_link_hash_table *htab
4791 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4792 if (htab == NULL)
4793 return false;
4794
4795 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4796 htab_traverse (htab->loc_hash_table,
4797 elf_x86_64_finish_local_dynamic_symbol,
4798 info);
4799
4800 return true;
4801 }
4802
4803 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4804 dynamic relocations. */
4805
4806 static long
4807 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4808 long symcount ATTRIBUTE_UNUSED,
4809 asymbol **syms ATTRIBUTE_UNUSED,
4810 long dynsymcount,
4811 asymbol **dynsyms,
4812 asymbol **ret)
4813 {
4814 long count, i, n;
4815 int j;
4816 bfd_byte *plt_contents;
4817 long relsize;
4818 const struct elf_x86_lazy_plt_layout *lazy_plt;
4819 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4820 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4821 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4822 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4823 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4824 asection *plt;
4825 enum elf_x86_plt_type plt_type;
4826 struct elf_x86_plt plts[] =
4827 {
4828 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4829 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4830 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4831 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4832 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4833 };
4834
4835 *ret = NULL;
4836
4837 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4838 return 0;
4839
4840 if (dynsymcount <= 0)
4841 return 0;
4842
4843 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4844 if (relsize <= 0)
4845 return -1;
4846
4847 lazy_plt = &elf_x86_64_lazy_plt;
4848 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4849 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4850 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4851 if (ABI_64_P (abfd))
4852 {
4853 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4854 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4855 }
4856 else
4857 {
4858 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4859 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4860 }
4861
4862 count = 0;
4863 for (j = 0; plts[j].name != NULL; j++)
4864 {
4865 plt = bfd_get_section_by_name (abfd, plts[j].name);
4866 if (plt == NULL || plt->size == 0)
4867 continue;
4868
4869 /* Get the PLT section contents. */
4870 if (!bfd_malloc_and_get_section (abfd, plt, &plt_contents))
4871 break;
4872
4873 /* Check what kind of PLT it is. */
4874 plt_type = plt_unknown;
4875 if (plts[j].type == plt_unknown
4876 && (plt->size >= (lazy_plt->plt_entry_size
4877 + lazy_plt->plt_entry_size)))
4878 {
4879 /* Match lazy PLT first. Need to check the first two
4880 instructions. */
4881 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4882 lazy_plt->plt0_got1_offset) == 0)
4883 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4884 2) == 0))
4885 plt_type = plt_lazy;
4886 else if (lazy_bnd_plt != NULL
4887 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4888 lazy_bnd_plt->plt0_got1_offset) == 0)
4889 && (memcmp (plt_contents + 6,
4890 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4891 {
4892 plt_type = plt_lazy | plt_second;
4893 /* The fist entry in the lazy IBT PLT is the same as the
4894 lazy BND PLT. */
4895 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4896 lazy_ibt_plt->plt_entry,
4897 lazy_ibt_plt->plt_got_offset) == 0))
4898 lazy_plt = lazy_ibt_plt;
4899 else
4900 lazy_plt = lazy_bnd_plt;
4901 }
4902 }
4903
4904 if (non_lazy_plt != NULL
4905 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4906 && plt->size >= non_lazy_plt->plt_entry_size)
4907 {
4908 /* Match non-lazy PLT. */
4909 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4910 non_lazy_plt->plt_got_offset) == 0)
4911 plt_type = plt_non_lazy;
4912 }
4913
4914 if (plt_type == plt_unknown || plt_type == plt_second)
4915 {
4916 if (non_lazy_bnd_plt != NULL
4917 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4918 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4919 non_lazy_bnd_plt->plt_got_offset) == 0))
4920 {
4921 /* Match BND PLT. */
4922 plt_type = plt_second;
4923 non_lazy_plt = non_lazy_bnd_plt;
4924 }
4925 else if (non_lazy_ibt_plt != NULL
4926 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4927 && (memcmp (plt_contents,
4928 non_lazy_ibt_plt->plt_entry,
4929 non_lazy_ibt_plt->plt_got_offset) == 0))
4930 {
4931 /* Match IBT PLT. */
4932 plt_type = plt_second;
4933 non_lazy_plt = non_lazy_ibt_plt;
4934 }
4935 }
4936
4937 if (plt_type == plt_unknown)
4938 {
4939 free (plt_contents);
4940 continue;
4941 }
4942
4943 plts[j].sec = plt;
4944 plts[j].type = plt_type;
4945
4946 if ((plt_type & plt_lazy))
4947 {
4948 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4949 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4950 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4951 /* Skip PLT0 in lazy PLT. */
4952 i = 1;
4953 }
4954 else
4955 {
4956 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4957 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4958 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4959 i = 0;
4960 }
4961
4962 /* Skip lazy PLT when the second PLT is used. */
4963 if (plt_type == (plt_lazy | plt_second))
4964 plts[j].count = 0;
4965 else
4966 {
4967 n = plt->size / plts[j].plt_entry_size;
4968 plts[j].count = n;
4969 count += n - i;
4970 }
4971
4972 plts[j].contents = plt_contents;
4973 }
4974
4975 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4976 (bfd_vma) 0, plts, dynsyms,
4977 ret);
4978 }
4979
4980 /* Handle an x86-64 specific section when reading an object file. This
4981 is called when elfcode.h finds a section with an unknown type. */
4982
4983 static bool
4984 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4985 const char *name, int shindex)
4986 {
4987 if (hdr->sh_type != SHT_X86_64_UNWIND)
4988 return false;
4989
4990 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4991 return false;
4992
4993 return true;
4994 }
4995
4996 /* Hook called by the linker routine which adds symbols from an object
4997 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4998 of .bss. */
4999
5000 static bool
5001 elf_x86_64_add_symbol_hook (bfd *abfd,
5002 struct bfd_link_info *info ATTRIBUTE_UNUSED,
5003 Elf_Internal_Sym *sym,
5004 const char **namep ATTRIBUTE_UNUSED,
5005 flagword *flagsp ATTRIBUTE_UNUSED,
5006 asection **secp,
5007 bfd_vma *valp)
5008 {
5009 asection *lcomm;
5010
5011 switch (sym->st_shndx)
5012 {
5013 case SHN_X86_64_LCOMMON:
5014 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5015 if (lcomm == NULL)
5016 {
5017 lcomm = bfd_make_section_with_flags (abfd,
5018 "LARGE_COMMON",
5019 (SEC_ALLOC
5020 | SEC_IS_COMMON
5021 | SEC_LINKER_CREATED));
5022 if (lcomm == NULL)
5023 return false;
5024 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5025 }
5026 *secp = lcomm;
5027 *valp = sym->st_size;
5028 return true;
5029 }
5030
5031 return true;
5032 }
5033
5034
5035 /* Given a BFD section, try to locate the corresponding ELF section
5036 index. */
5037
5038 static bool
5039 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5040 asection *sec, int *index_return)
5041 {
5042 if (sec == &_bfd_elf_large_com_section)
5043 {
5044 *index_return = SHN_X86_64_LCOMMON;
5045 return true;
5046 }
5047 return false;
5048 }
5049
5050 /* Process a symbol. */
5051
5052 static void
5053 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5054 asymbol *asym)
5055 {
5056 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5057
5058 switch (elfsym->internal_elf_sym.st_shndx)
5059 {
5060 case SHN_X86_64_LCOMMON:
5061 asym->section = &_bfd_elf_large_com_section;
5062 asym->value = elfsym->internal_elf_sym.st_size;
5063 /* Common symbol doesn't set BSF_GLOBAL. */
5064 asym->flags &= ~BSF_GLOBAL;
5065 break;
5066 }
5067 }
5068
5069 static bool
5070 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5071 {
5072 return (sym->st_shndx == SHN_COMMON
5073 || sym->st_shndx == SHN_X86_64_LCOMMON);
5074 }
5075
5076 static unsigned int
5077 elf_x86_64_common_section_index (asection *sec)
5078 {
5079 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5080 return SHN_COMMON;
5081 else
5082 return SHN_X86_64_LCOMMON;
5083 }
5084
5085 static asection *
5086 elf_x86_64_common_section (asection *sec)
5087 {
5088 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5089 return bfd_com_section_ptr;
5090 else
5091 return &_bfd_elf_large_com_section;
5092 }
5093
5094 static bool
5095 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5096 const Elf_Internal_Sym *sym,
5097 asection **psec,
5098 bool newdef,
5099 bool olddef,
5100 bfd *oldbfd,
5101 const asection *oldsec)
5102 {
5103 /* A normal common symbol and a large common symbol result in a
5104 normal common symbol. We turn the large common symbol into a
5105 normal one. */
5106 if (!olddef
5107 && h->root.type == bfd_link_hash_common
5108 && !newdef
5109 && bfd_is_com_section (*psec)
5110 && oldsec != *psec)
5111 {
5112 if (sym->st_shndx == SHN_COMMON
5113 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5114 {
5115 h->root.u.c.p->section
5116 = bfd_make_section_old_way (oldbfd, "COMMON");
5117 h->root.u.c.p->section->flags = SEC_ALLOC;
5118 }
5119 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5120 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5121 *psec = bfd_com_section_ptr;
5122 }
5123
5124 return true;
5125 }
5126
5127 static int
5128 elf_x86_64_additional_program_headers (bfd *abfd,
5129 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5130 {
5131 asection *s;
5132 int count = 0;
5133
5134 /* Check to see if we need a large readonly segment. */
5135 s = bfd_get_section_by_name (abfd, ".lrodata");
5136 if (s && (s->flags & SEC_LOAD))
5137 count++;
5138
5139 /* Check to see if we need a large data segment. Since .lbss sections
5140 is placed right after the .bss section, there should be no need for
5141 a large data segment just because of .lbss. */
5142 s = bfd_get_section_by_name (abfd, ".ldata");
5143 if (s && (s->flags & SEC_LOAD))
5144 count++;
5145
5146 return count;
5147 }
5148
5149 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5150
5151 static bool
5152 elf_x86_64_relocs_compatible (const bfd_target *input,
5153 const bfd_target *output)
5154 {
5155 return ((xvec_get_elf_backend_data (input)->s->elfclass
5156 == xvec_get_elf_backend_data (output)->s->elfclass)
5157 && _bfd_elf_relocs_compatible (input, output));
5158 }
5159
5160 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5161 with GNU properties if found. Otherwise, return NULL. */
5162
5163 static bfd *
5164 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5165 {
5166 struct elf_x86_init_table init_table;
5167 const struct elf_backend_data *bed;
5168 struct elf_x86_link_hash_table *htab;
5169
5170 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5171 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5172 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5173 != (int) R_X86_64_GNU_VTINHERIT)
5174 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5175 != (int) R_X86_64_GNU_VTENTRY))
5176 abort ();
5177
5178 /* This is unused for x86-64. */
5179 init_table.plt0_pad_byte = 0x90;
5180
5181 bed = get_elf_backend_data (info->output_bfd);
5182 htab = elf_x86_hash_table (info, bed->target_id);
5183 if (!htab)
5184 abort ();
5185 if (htab->params->bndplt)
5186 {
5187 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5188 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5189 }
5190 else
5191 {
5192 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5193 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5194 }
5195
5196 if (ABI_64_P (info->output_bfd))
5197 {
5198 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5199 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5200 }
5201 else
5202 {
5203 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5204 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5205 }
5206
5207 if (ABI_64_P (info->output_bfd))
5208 {
5209 init_table.r_info = elf64_r_info;
5210 init_table.r_sym = elf64_r_sym;
5211 }
5212 else
5213 {
5214 init_table.r_info = elf32_r_info;
5215 init_table.r_sym = elf32_r_sym;
5216 }
5217
5218 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5219 }
5220
5221 static const struct bfd_elf_special_section
5222 elf_x86_64_special_sections[]=
5223 {
5224 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5225 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5226 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5227 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5228 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5229 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5230 { NULL, 0, 0, 0, 0 }
5231 };
5232
5233 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5234 #define TARGET_LITTLE_NAME "elf64-x86-64"
5235 #define ELF_ARCH bfd_arch_i386
5236 #define ELF_TARGET_ID X86_64_ELF_DATA
5237 #define ELF_MACHINE_CODE EM_X86_64
5238 #if DEFAULT_LD_Z_SEPARATE_CODE
5239 # define ELF_MAXPAGESIZE 0x1000
5240 #else
5241 # define ELF_MAXPAGESIZE 0x200000
5242 #endif
5243 #define ELF_COMMONPAGESIZE 0x1000
5244
5245 #define elf_backend_can_gc_sections 1
5246 #define elf_backend_can_refcount 1
5247 #define elf_backend_want_got_plt 1
5248 #define elf_backend_plt_readonly 1
5249 #define elf_backend_want_plt_sym 0
5250 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5251 #define elf_backend_rela_normal 1
5252 #define elf_backend_plt_alignment 4
5253 #define elf_backend_extern_protected_data 1
5254 #define elf_backend_caches_rawsize 1
5255 #define elf_backend_dtrel_excludes_plt 1
5256 #define elf_backend_want_dynrelro 1
5257
5258 #define elf_info_to_howto elf_x86_64_info_to_howto
5259
5260 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5261 #define bfd_elf64_bfd_reloc_name_lookup \
5262 elf_x86_64_reloc_name_lookup
5263
5264 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5265 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5266 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5267 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5268 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5269 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5270 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5271 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5272 #ifdef CORE_HEADER
5273 #define elf_backend_write_core_note elf_x86_64_write_core_note
5274 #endif
5275 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5276 #define elf_backend_relocate_section elf_x86_64_relocate_section
5277 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5278 #define elf_backend_object_p elf64_x86_64_elf_object_p
5279 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5280
5281 #define elf_backend_section_from_shdr \
5282 elf_x86_64_section_from_shdr
5283
5284 #define elf_backend_section_from_bfd_section \
5285 elf_x86_64_elf_section_from_bfd_section
5286 #define elf_backend_add_symbol_hook \
5287 elf_x86_64_add_symbol_hook
5288 #define elf_backend_symbol_processing \
5289 elf_x86_64_symbol_processing
5290 #define elf_backend_common_section_index \
5291 elf_x86_64_common_section_index
5292 #define elf_backend_common_section \
5293 elf_x86_64_common_section
5294 #define elf_backend_common_definition \
5295 elf_x86_64_common_definition
5296 #define elf_backend_merge_symbol \
5297 elf_x86_64_merge_symbol
5298 #define elf_backend_special_sections \
5299 elf_x86_64_special_sections
5300 #define elf_backend_additional_program_headers \
5301 elf_x86_64_additional_program_headers
5302 #define elf_backend_setup_gnu_properties \
5303 elf_x86_64_link_setup_gnu_properties
5304 #define elf_backend_hide_symbol \
5305 _bfd_x86_elf_hide_symbol
5306
5307 #undef elf64_bed
5308 #define elf64_bed elf64_x86_64_bed
5309
5310 #include "elf64-target.h"
5311
5312 /* CloudABI support. */
5313
5314 #undef TARGET_LITTLE_SYM
5315 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5316 #undef TARGET_LITTLE_NAME
5317 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5318
5319 #undef ELF_OSABI
5320 #define ELF_OSABI ELFOSABI_CLOUDABI
5321
5322 #undef elf64_bed
5323 #define elf64_bed elf64_x86_64_cloudabi_bed
5324
5325 #include "elf64-target.h"
5326
5327 /* FreeBSD support. */
5328
5329 #undef TARGET_LITTLE_SYM
5330 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5331 #undef TARGET_LITTLE_NAME
5332 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5333
5334 #undef ELF_OSABI
5335 #define ELF_OSABI ELFOSABI_FREEBSD
5336
5337 #undef elf64_bed
5338 #define elf64_bed elf64_x86_64_fbsd_bed
5339
5340 #include "elf64-target.h"
5341
5342 /* Solaris 2 support. */
5343
5344 #undef TARGET_LITTLE_SYM
5345 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5346 #undef TARGET_LITTLE_NAME
5347 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5348
5349 #undef ELF_TARGET_OS
5350 #define ELF_TARGET_OS is_solaris
5351
5352 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5353 objects won't be recognized. */
5354 #undef ELF_OSABI
5355
5356 #undef elf64_bed
5357 #define elf64_bed elf64_x86_64_sol2_bed
5358
5359 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5360 boundary. */
5361 #undef elf_backend_static_tls_alignment
5362 #define elf_backend_static_tls_alignment 16
5363
5364 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5365
5366 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5367 File, p.63. */
5368 #undef elf_backend_want_plt_sym
5369 #define elf_backend_want_plt_sym 1
5370
5371 #undef elf_backend_strtab_flags
5372 #define elf_backend_strtab_flags SHF_STRINGS
5373
5374 static bool
5375 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5376 bfd *obfd ATTRIBUTE_UNUSED,
5377 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5378 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5379 {
5380 /* PR 19938: FIXME: Need to add code for setting the sh_info
5381 and sh_link fields of Solaris specific section types. */
5382 return false;
5383 }
5384
5385 #undef elf_backend_copy_special_section_fields
5386 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5387
5388 #include "elf64-target.h"
5389
5390 /* Restore defaults. */
5391 #undef ELF_OSABI
5392 #undef elf_backend_static_tls_alignment
5393 #undef elf_backend_want_plt_sym
5394 #define elf_backend_want_plt_sym 0
5395 #undef elf_backend_strtab_flags
5396 #undef elf_backend_copy_special_section_fields
5397
5398 /* Intel L1OM support. */
5399
5400 static bool
5401 elf64_l1om_elf_object_p (bfd *abfd)
5402 {
5403 /* Set the right machine number for an L1OM elf64 file. */
5404 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5405 return true;
5406 }
5407
5408 #undef TARGET_LITTLE_SYM
5409 #define TARGET_LITTLE_SYM l1om_elf64_vec
5410 #undef TARGET_LITTLE_NAME
5411 #define TARGET_LITTLE_NAME "elf64-l1om"
5412 #undef ELF_ARCH
5413 #define ELF_ARCH bfd_arch_l1om
5414
5415 #undef ELF_MACHINE_CODE
5416 #define ELF_MACHINE_CODE EM_L1OM
5417
5418 #undef ELF_OSABI
5419
5420 #undef elf64_bed
5421 #define elf64_bed elf64_l1om_bed
5422
5423 #undef elf_backend_object_p
5424 #define elf_backend_object_p elf64_l1om_elf_object_p
5425
5426 /* Restore defaults. */
5427 #undef ELF_TARGET_OS
5428
5429 #include "elf64-target.h"
5430
5431 /* FreeBSD L1OM support. */
5432
5433 #undef TARGET_LITTLE_SYM
5434 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5435 #undef TARGET_LITTLE_NAME
5436 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5437
5438 #undef ELF_OSABI
5439 #define ELF_OSABI ELFOSABI_FREEBSD
5440
5441 #undef elf64_bed
5442 #define elf64_bed elf64_l1om_fbsd_bed
5443
5444 #include "elf64-target.h"
5445
5446 /* Intel K1OM support. */
5447
5448 static bool
5449 elf64_k1om_elf_object_p (bfd *abfd)
5450 {
5451 /* Set the right machine number for an K1OM elf64 file. */
5452 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5453 return true;
5454 }
5455
5456 #undef TARGET_LITTLE_SYM
5457 #define TARGET_LITTLE_SYM k1om_elf64_vec
5458 #undef TARGET_LITTLE_NAME
5459 #define TARGET_LITTLE_NAME "elf64-k1om"
5460 #undef ELF_ARCH
5461 #define ELF_ARCH bfd_arch_k1om
5462
5463 #undef ELF_MACHINE_CODE
5464 #define ELF_MACHINE_CODE EM_K1OM
5465
5466 #undef ELF_OSABI
5467
5468 #undef elf64_bed
5469 #define elf64_bed elf64_k1om_bed
5470
5471 #undef elf_backend_object_p
5472 #define elf_backend_object_p elf64_k1om_elf_object_p
5473
5474 #include "elf64-target.h"
5475
5476 /* FreeBSD K1OM support. */
5477
5478 #undef TARGET_LITTLE_SYM
5479 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5480 #undef TARGET_LITTLE_NAME
5481 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5482
5483 #undef ELF_OSABI
5484 #define ELF_OSABI ELFOSABI_FREEBSD
5485
5486 #undef elf64_bed
5487 #define elf64_bed elf64_k1om_fbsd_bed
5488
5489 #include "elf64-target.h"
5490
5491 /* 32bit x86-64 support. */
5492
5493 #undef TARGET_LITTLE_SYM
5494 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5495 #undef TARGET_LITTLE_NAME
5496 #define TARGET_LITTLE_NAME "elf32-x86-64"
5497 #undef elf32_bed
5498 #define elf32_bed elf32_x86_64_bed
5499
5500 #undef ELF_ARCH
5501 #define ELF_ARCH bfd_arch_i386
5502
5503 #undef ELF_MACHINE_CODE
5504 #define ELF_MACHINE_CODE EM_X86_64
5505
5506 #undef ELF_OSABI
5507
5508 #define bfd_elf32_bfd_reloc_type_lookup \
5509 elf_x86_64_reloc_type_lookup
5510 #define bfd_elf32_bfd_reloc_name_lookup \
5511 elf_x86_64_reloc_name_lookup
5512 #define bfd_elf32_get_synthetic_symtab \
5513 elf_x86_64_get_synthetic_symtab
5514
5515 #undef elf_backend_object_p
5516 #define elf_backend_object_p \
5517 elf32_x86_64_elf_object_p
5518
5519 #undef elf_backend_bfd_from_remote_memory
5520 #define elf_backend_bfd_from_remote_memory \
5521 _bfd_elf32_bfd_from_remote_memory
5522
5523 #undef elf_backend_size_info
5524 #define elf_backend_size_info \
5525 _bfd_elf32_size_info
5526
5527 #include "elf32-target.h"