]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf64-x86-64.c
[PATCH] fix windmc typedef bug
[thirdparty/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2020 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 #define X86_PCREL_TYPE_P(TYPE) \
200 ( ((TYPE) == R_X86_64_PC8) \
201 || ((TYPE) == R_X86_64_PC16) \
202 || ((TYPE) == R_X86_64_PC32) \
203 || ((TYPE) == R_X86_64_PC32_BND) \
204 || ((TYPE) == R_X86_64_PC64))
205
206 #define X86_SIZE_TYPE_P(TYPE) \
207 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
258 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
259 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
260 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
261 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
262 };
263
264 static reloc_howto_type *
265 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
266 {
267 unsigned i;
268
269 if (r_type == (unsigned int) R_X86_64_32)
270 {
271 if (ABI_64_P (abfd))
272 i = r_type;
273 else
274 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
275 }
276 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
277 || r_type >= (unsigned int) R_X86_64_max)
278 {
279 if (r_type >= (unsigned int) R_X86_64_standard)
280 {
281 /* xgettext:c-format */
282 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
283 abfd, r_type);
284 bfd_set_error (bfd_error_bad_value);
285 return NULL;
286 }
287 i = r_type;
288 }
289 else
290 i = r_type - (unsigned int) R_X86_64_vt_offset;
291 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
292 return &x86_64_elf_howto_table[i];
293 }
294
295 /* Given a BFD reloc type, return a HOWTO structure. */
296 static reloc_howto_type *
297 elf_x86_64_reloc_type_lookup (bfd *abfd,
298 bfd_reloc_code_real_type code)
299 {
300 unsigned int i;
301
302 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
303 i++)
304 {
305 if (x86_64_reloc_map[i].bfd_reloc_val == code)
306 return elf_x86_64_rtype_to_howto (abfd,
307 x86_64_reloc_map[i].elf_reloc_val);
308 }
309 return NULL;
310 }
311
312 static reloc_howto_type *
313 elf_x86_64_reloc_name_lookup (bfd *abfd,
314 const char *r_name)
315 {
316 unsigned int i;
317
318 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
319 {
320 /* Get x32 R_X86_64_32. */
321 reloc_howto_type *reloc
322 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
323 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
324 return reloc;
325 }
326
327 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
328 if (x86_64_elf_howto_table[i].name != NULL
329 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
330 return &x86_64_elf_howto_table[i];
331
332 return NULL;
333 }
334
335 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
336
337 static bfd_boolean
338 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
339 Elf_Internal_Rela *dst)
340 {
341 unsigned r_type;
342
343 r_type = ELF32_R_TYPE (dst->r_info);
344 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
345 if (cache_ptr->howto == NULL)
346 return FALSE;
347 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
348 return TRUE;
349 }
350 \f
351 /* Support for core dump NOTE sections. */
352 static bfd_boolean
353 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
354 {
355 int offset;
356 size_t size;
357
358 switch (note->descsz)
359 {
360 default:
361 return FALSE;
362
363 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
364 /* pr_cursig */
365 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
366
367 /* pr_pid */
368 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
369
370 /* pr_reg */
371 offset = 72;
372 size = 216;
373
374 break;
375
376 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
377 /* pr_cursig */
378 elf_tdata (abfd)->core->signal
379 = bfd_get_16 (abfd, note->descdata + 12);
380
381 /* pr_pid */
382 elf_tdata (abfd)->core->lwpid
383 = bfd_get_32 (abfd, note->descdata + 32);
384
385 /* pr_reg */
386 offset = 112;
387 size = 216;
388
389 break;
390 }
391
392 /* Make a ".reg/999" section. */
393 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
394 size, note->descpos + offset);
395 }
396
397 static bfd_boolean
398 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
399 {
400 switch (note->descsz)
401 {
402 default:
403 return FALSE;
404
405 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
406 elf_tdata (abfd)->core->pid
407 = bfd_get_32 (abfd, note->descdata + 12);
408 elf_tdata (abfd)->core->program
409 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
410 elf_tdata (abfd)->core->command
411 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
412 break;
413
414 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
415 elf_tdata (abfd)->core->pid
416 = bfd_get_32 (abfd, note->descdata + 24);
417 elf_tdata (abfd)->core->program
418 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
419 elf_tdata (abfd)->core->command
420 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
421 }
422
423 /* Note that for some reason, a spurious space is tacked
424 onto the end of the args in some (at least one anyway)
425 implementations, so strip it off if it exists. */
426
427 {
428 char *command = elf_tdata (abfd)->core->command;
429 int n = strlen (command);
430
431 if (0 < n && command[n - 1] == ' ')
432 command[n - 1] = '\0';
433 }
434
435 return TRUE;
436 }
437
438 #ifdef CORE_HEADER
439 # if GCC_VERSION >= 8000
440 # pragma GCC diagnostic push
441 # pragma GCC diagnostic ignored "-Wstringop-truncation"
442 # endif
443 static char *
444 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
445 int note_type, ...)
446 {
447 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
448 va_list ap;
449 const char *fname, *psargs;
450 long pid;
451 int cursig;
452 const void *gregs;
453
454 switch (note_type)
455 {
456 default:
457 return NULL;
458
459 case NT_PRPSINFO:
460 va_start (ap, note_type);
461 fname = va_arg (ap, const char *);
462 psargs = va_arg (ap, const char *);
463 va_end (ap);
464
465 if (bed->s->elfclass == ELFCLASS32)
466 {
467 prpsinfo32_t data;
468 memset (&data, 0, sizeof (data));
469 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
470 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
471 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
472 &data, sizeof (data));
473 }
474 else
475 {
476 prpsinfo64_t data;
477 memset (&data, 0, sizeof (data));
478 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
479 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
480 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
481 &data, sizeof (data));
482 }
483 /* NOTREACHED */
484
485 case NT_PRSTATUS:
486 va_start (ap, note_type);
487 pid = va_arg (ap, long);
488 cursig = va_arg (ap, int);
489 gregs = va_arg (ap, const void *);
490 va_end (ap);
491
492 if (bed->s->elfclass == ELFCLASS32)
493 {
494 if (bed->elf_machine_code == EM_X86_64)
495 {
496 prstatusx32_t prstat;
497 memset (&prstat, 0, sizeof (prstat));
498 prstat.pr_pid = pid;
499 prstat.pr_cursig = cursig;
500 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
501 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
502 &prstat, sizeof (prstat));
503 }
504 else
505 {
506 prstatus32_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 else
516 {
517 prstatus64_t prstat;
518 memset (&prstat, 0, sizeof (prstat));
519 prstat.pr_pid = pid;
520 prstat.pr_cursig = cursig;
521 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
522 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
523 &prstat, sizeof (prstat));
524 }
525 }
526 /* NOTREACHED */
527 }
528 # if GCC_VERSION >= 8000
529 # pragma GCC diagnostic pop
530 # endif
531 #endif
532 \f
533 /* Functions for the x86-64 ELF linker. */
534
535 /* The size in bytes of an entry in the global offset table. */
536
537 #define GOT_ENTRY_SIZE 8
538
539 /* The size in bytes of an entry in the lazy procedure linkage table. */
540
541 #define LAZY_PLT_ENTRY_SIZE 16
542
543 /* The size in bytes of an entry in the non-lazy procedure linkage
544 table. */
545
546 #define NON_LAZY_PLT_ENTRY_SIZE 8
547
548 /* The first entry in a lazy procedure linkage table looks like this.
549 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
550 works. */
551
552 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
553 {
554 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
555 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
556 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
557 };
558
559 /* Subsequent entries in a lazy procedure linkage table look like this. */
560
561 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
562 {
563 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
564 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
565 0x68, /* pushq immediate */
566 0, 0, 0, 0, /* replaced with index into relocation table. */
567 0xe9, /* jmp relative */
568 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
569 };
570
571 /* The first entry in a lazy procedure linkage table with BND prefix
572 like this. */
573
574 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
575 {
576 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
577 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
578 0x0f, 0x1f, 0 /* nopl (%rax) */
579 };
580
581 /* Subsequent entries for branches with BND prefx in a lazy procedure
582 linkage table look like this. */
583
584 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
585 {
586 0x68, 0, 0, 0, 0, /* pushq immediate */
587 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
588 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
589 };
590
591 /* The first entry in the IBT-enabled lazy procedure linkage table is the
592 the same as the lazy PLT with BND prefix so that bound registers are
593 preserved when control is passed to dynamic linker. Subsequent
594 entries for a IBT-enabled lazy procedure linkage table look like
595 this. */
596
597 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
598 {
599 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
600 0x68, 0, 0, 0, 0, /* pushq immediate */
601 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
602 0x90 /* nop */
603 };
604
605 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
606 is the same as the normal lazy PLT. Subsequent entries for an
607 x32 IBT-enabled lazy procedure linkage table look like this. */
608
609 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
610 {
611 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
612 0x68, 0, 0, 0, 0, /* pushq immediate */
613 0xe9, 0, 0, 0, 0, /* jmpq relative */
614 0x66, 0x90 /* xchg %ax,%ax */
615 };
616
617 /* Entries in the non-lazey procedure linkage table look like this. */
618
619 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
620 {
621 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
622 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
623 0x66, 0x90 /* xchg %ax,%ax */
624 };
625
626 /* Entries for branches with BND prefix in the non-lazey procedure
627 linkage table look like this. */
628
629 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
630 {
631 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
632 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
633 0x90 /* nop */
634 };
635
636 /* Entries for branches with IBT-enabled in the non-lazey procedure
637 linkage table look like this. They have the same size as the lazy
638 PLT entry. */
639
640 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
641 {
642 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
643 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
644 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
645 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
646 };
647
648 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
649 linkage table look like this. They have the same size as the lazy
650 PLT entry. */
651
652 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
653 {
654 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
655 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
656 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
657 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
658 };
659
660 /* The TLSDESC entry in a lazy procedure linkage table. */
661 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
662 {
663 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
664 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
665 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
666 };
667
668 /* .eh_frame covering the lazy .plt section. */
669
670 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
671 {
672 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
673 0, 0, 0, 0, /* CIE ID */
674 1, /* CIE version */
675 'z', 'R', 0, /* Augmentation string */
676 1, /* Code alignment factor */
677 0x78, /* Data alignment factor */
678 16, /* Return address column */
679 1, /* Augmentation size */
680 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
681 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
682 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
683 DW_CFA_nop, DW_CFA_nop,
684
685 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
686 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
687 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
688 0, 0, 0, 0, /* .plt size goes here */
689 0, /* Augmentation size */
690 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
691 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
692 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
693 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
694 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
695 11, /* Block length */
696 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
697 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
698 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
699 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
700 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
701 };
702
703 /* .eh_frame covering the lazy BND .plt section. */
704
705 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
706 {
707 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
708 0, 0, 0, 0, /* CIE ID */
709 1, /* CIE version */
710 'z', 'R', 0, /* Augmentation string */
711 1, /* Code alignment factor */
712 0x78, /* Data alignment factor */
713 16, /* Return address column */
714 1, /* Augmentation size */
715 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
716 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
717 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
718 DW_CFA_nop, DW_CFA_nop,
719
720 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
721 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
722 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
723 0, 0, 0, 0, /* .plt size goes here */
724 0, /* Augmentation size */
725 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
726 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
727 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
728 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
729 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
730 11, /* Block length */
731 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
732 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
733 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
734 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
735 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
736 };
737
738 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
739
740 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
741 {
742 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
743 0, 0, 0, 0, /* CIE ID */
744 1, /* CIE version */
745 'z', 'R', 0, /* Augmentation string */
746 1, /* Code alignment factor */
747 0x78, /* Data alignment factor */
748 16, /* Return address column */
749 1, /* Augmentation size */
750 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
751 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
752 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
753 DW_CFA_nop, DW_CFA_nop,
754
755 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
756 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
757 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
758 0, 0, 0, 0, /* .plt size goes here */
759 0, /* Augmentation size */
760 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
761 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
762 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
763 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
764 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
765 11, /* Block length */
766 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
767 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
768 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
769 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
770 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
771 };
772
773 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
774
775 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
776 {
777 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
778 0, 0, 0, 0, /* CIE ID */
779 1, /* CIE version */
780 'z', 'R', 0, /* Augmentation string */
781 1, /* Code alignment factor */
782 0x78, /* Data alignment factor */
783 16, /* Return address column */
784 1, /* Augmentation size */
785 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
786 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
787 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
788 DW_CFA_nop, DW_CFA_nop,
789
790 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
791 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
792 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
793 0, 0, 0, 0, /* .plt size goes here */
794 0, /* Augmentation size */
795 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
796 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
797 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
798 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
799 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
800 11, /* Block length */
801 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
802 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
803 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
804 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
805 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
806 };
807
808 /* .eh_frame covering the non-lazy .plt section. */
809
810 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
811 {
812 #define PLT_GOT_FDE_LENGTH 20
813 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
814 0, 0, 0, 0, /* CIE ID */
815 1, /* CIE version */
816 'z', 'R', 0, /* Augmentation string */
817 1, /* Code alignment factor */
818 0x78, /* Data alignment factor */
819 16, /* Return address column */
820 1, /* Augmentation size */
821 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
822 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
823 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
824 DW_CFA_nop, DW_CFA_nop,
825
826 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
827 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
828 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
829 0, 0, 0, 0, /* non-lazy .plt size goes here */
830 0, /* Augmentation size */
831 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
832 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
833 };
834
835 /* These are the standard parameters. */
836 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
837 {
838 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
839 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
840 elf_x86_64_lazy_plt_entry, /* plt_entry */
841 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
842 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
843 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
844 6, /* plt_tlsdesc_got1_offset */
845 12, /* plt_tlsdesc_got2_offset */
846 10, /* plt_tlsdesc_got1_insn_end */
847 16, /* plt_tlsdesc_got2_insn_end */
848 2, /* plt0_got1_offset */
849 8, /* plt0_got2_offset */
850 12, /* plt0_got2_insn_end */
851 2, /* plt_got_offset */
852 7, /* plt_reloc_offset */
853 12, /* plt_plt_offset */
854 6, /* plt_got_insn_size */
855 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
856 6, /* plt_lazy_offset */
857 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
858 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
859 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
860 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
861 };
862
863 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
864 {
865 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
866 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
867 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
868 2, /* plt_got_offset */
869 6, /* plt_got_insn_size */
870 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
871 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
872 };
873
874 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
875 {
876 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
877 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
878 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
879 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
880 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
881 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
882 6, /* plt_tlsdesc_got1_offset */
883 12, /* plt_tlsdesc_got2_offset */
884 10, /* plt_tlsdesc_got1_insn_end */
885 16, /* plt_tlsdesc_got2_insn_end */
886 2, /* plt0_got1_offset */
887 1+8, /* plt0_got2_offset */
888 1+12, /* plt0_got2_insn_end */
889 1+2, /* plt_got_offset */
890 1, /* plt_reloc_offset */
891 7, /* plt_plt_offset */
892 1+6, /* plt_got_insn_size */
893 11, /* plt_plt_insn_end */
894 0, /* plt_lazy_offset */
895 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
896 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
897 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
898 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
899 };
900
901 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
902 {
903 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
904 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
905 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
906 1+2, /* plt_got_offset */
907 1+6, /* plt_got_insn_size */
908 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
909 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
910 };
911
912 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
913 {
914 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
915 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
916 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
917 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
918 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
919 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
920 6, /* plt_tlsdesc_got1_offset */
921 12, /* plt_tlsdesc_got2_offset */
922 10, /* plt_tlsdesc_got1_insn_end */
923 16, /* plt_tlsdesc_got2_insn_end */
924 2, /* plt0_got1_offset */
925 1+8, /* plt0_got2_offset */
926 1+12, /* plt0_got2_insn_end */
927 4+1+2, /* plt_got_offset */
928 4+1, /* plt_reloc_offset */
929 4+1+6, /* plt_plt_offset */
930 4+1+6, /* plt_got_insn_size */
931 4+1+5+5, /* plt_plt_insn_end */
932 0, /* plt_lazy_offset */
933 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
934 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
935 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
936 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
937 };
938
939 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
940 {
941 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
942 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
943 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
944 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
945 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
946 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
947 6, /* plt_tlsdesc_got1_offset */
948 12, /* plt_tlsdesc_got2_offset */
949 10, /* plt_tlsdesc_got1_insn_end */
950 16, /* plt_tlsdesc_got2_insn_end */
951 2, /* plt0_got1_offset */
952 8, /* plt0_got2_offset */
953 12, /* plt0_got2_insn_end */
954 4+2, /* plt_got_offset */
955 4+1, /* plt_reloc_offset */
956 4+6, /* plt_plt_offset */
957 4+6, /* plt_got_insn_size */
958 4+5+5, /* plt_plt_insn_end */
959 0, /* plt_lazy_offset */
960 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
961 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
962 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
963 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
964 };
965
966 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
967 {
968 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
969 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
970 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
971 4+1+2, /* plt_got_offset */
972 4+1+6, /* plt_got_insn_size */
973 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
974 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
975 };
976
977 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
978 {
979 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
980 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
981 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
982 4+2, /* plt_got_offset */
983 4+6, /* plt_got_insn_size */
984 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
985 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
986 };
987
988 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
989 {
990 is_normal /* os */
991 };
992
993 #define elf_backend_arch_data &elf_x86_64_arch_bed
994
995 static bfd_boolean
996 elf64_x86_64_elf_object_p (bfd *abfd)
997 {
998 /* Set the right machine number for an x86-64 elf64 file. */
999 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1000 return TRUE;
1001 }
1002
1003 static bfd_boolean
1004 elf32_x86_64_elf_object_p (bfd *abfd)
1005 {
1006 /* Set the right machine number for an x86-64 elf32 file. */
1007 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1008 return TRUE;
1009 }
1010
1011 /* Return TRUE if the TLS access code sequence support transition
1012 from R_TYPE. */
1013
1014 static bfd_boolean
1015 elf_x86_64_check_tls_transition (bfd *abfd,
1016 struct bfd_link_info *info,
1017 asection *sec,
1018 bfd_byte *contents,
1019 Elf_Internal_Shdr *symtab_hdr,
1020 struct elf_link_hash_entry **sym_hashes,
1021 unsigned int r_type,
1022 const Elf_Internal_Rela *rel,
1023 const Elf_Internal_Rela *relend)
1024 {
1025 unsigned int val;
1026 unsigned long r_symndx;
1027 bfd_boolean largepic = FALSE;
1028 struct elf_link_hash_entry *h;
1029 bfd_vma offset;
1030 struct elf_x86_link_hash_table *htab;
1031 bfd_byte *call;
1032 bfd_boolean indirect_call;
1033
1034 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1035 offset = rel->r_offset;
1036 switch (r_type)
1037 {
1038 case R_X86_64_TLSGD:
1039 case R_X86_64_TLSLD:
1040 if ((rel + 1) >= relend)
1041 return FALSE;
1042
1043 if (r_type == R_X86_64_TLSGD)
1044 {
1045 /* Check transition from GD access model. For 64bit, only
1046 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1047 .word 0x6666; rex64; call __tls_get_addr@PLT
1048 or
1049 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1050 .byte 0x66; rex64
1051 call *__tls_get_addr@GOTPCREL(%rip)
1052 which may be converted to
1053 addr32 call __tls_get_addr
1054 can transit to different access model. For 32bit, only
1055 leaq foo@tlsgd(%rip), %rdi
1056 .word 0x6666; rex64; call __tls_get_addr@PLT
1057 or
1058 leaq foo@tlsgd(%rip), %rdi
1059 .byte 0x66; rex64
1060 call *__tls_get_addr@GOTPCREL(%rip)
1061 which may be converted to
1062 addr32 call __tls_get_addr
1063 can transit to different access model. For largepic,
1064 we also support:
1065 leaq foo@tlsgd(%rip), %rdi
1066 movabsq $__tls_get_addr@pltoff, %rax
1067 addq $r15, %rax
1068 call *%rax
1069 or
1070 leaq foo@tlsgd(%rip), %rdi
1071 movabsq $__tls_get_addr@pltoff, %rax
1072 addq $rbx, %rax
1073 call *%rax */
1074
1075 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1076
1077 if ((offset + 12) > sec->size)
1078 return FALSE;
1079
1080 call = contents + offset + 4;
1081 if (call[0] != 0x66
1082 || !((call[1] == 0x48
1083 && call[2] == 0xff
1084 && call[3] == 0x15)
1085 || (call[1] == 0x48
1086 && call[2] == 0x67
1087 && call[3] == 0xe8)
1088 || (call[1] == 0x66
1089 && call[2] == 0x48
1090 && call[3] == 0xe8)))
1091 {
1092 if (!ABI_64_P (abfd)
1093 || (offset + 19) > sec->size
1094 || offset < 3
1095 || memcmp (call - 7, leaq + 1, 3) != 0
1096 || memcmp (call, "\x48\xb8", 2) != 0
1097 || call[11] != 0x01
1098 || call[13] != 0xff
1099 || call[14] != 0xd0
1100 || !((call[10] == 0x48 && call[12] == 0xd8)
1101 || (call[10] == 0x4c && call[12] == 0xf8)))
1102 return FALSE;
1103 largepic = TRUE;
1104 }
1105 else if (ABI_64_P (abfd))
1106 {
1107 if (offset < 4
1108 || memcmp (contents + offset - 4, leaq, 4) != 0)
1109 return FALSE;
1110 }
1111 else
1112 {
1113 if (offset < 3
1114 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1115 return FALSE;
1116 }
1117 indirect_call = call[2] == 0xff;
1118 }
1119 else
1120 {
1121 /* Check transition from LD access model. Only
1122 leaq foo@tlsld(%rip), %rdi;
1123 call __tls_get_addr@PLT
1124 or
1125 leaq foo@tlsld(%rip), %rdi;
1126 call *__tls_get_addr@GOTPCREL(%rip)
1127 which may be converted to
1128 addr32 call __tls_get_addr
1129 can transit to different access model. For largepic
1130 we also support:
1131 leaq foo@tlsld(%rip), %rdi
1132 movabsq $__tls_get_addr@pltoff, %rax
1133 addq $r15, %rax
1134 call *%rax
1135 or
1136 leaq foo@tlsld(%rip), %rdi
1137 movabsq $__tls_get_addr@pltoff, %rax
1138 addq $rbx, %rax
1139 call *%rax */
1140
1141 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1142
1143 if (offset < 3 || (offset + 9) > sec->size)
1144 return FALSE;
1145
1146 if (memcmp (contents + offset - 3, lea, 3) != 0)
1147 return FALSE;
1148
1149 call = contents + offset + 4;
1150 if (!(call[0] == 0xe8
1151 || (call[0] == 0xff && call[1] == 0x15)
1152 || (call[0] == 0x67 && call[1] == 0xe8)))
1153 {
1154 if (!ABI_64_P (abfd)
1155 || (offset + 19) > sec->size
1156 || memcmp (call, "\x48\xb8", 2) != 0
1157 || call[11] != 0x01
1158 || call[13] != 0xff
1159 || call[14] != 0xd0
1160 || !((call[10] == 0x48 && call[12] == 0xd8)
1161 || (call[10] == 0x4c && call[12] == 0xf8)))
1162 return FALSE;
1163 largepic = TRUE;
1164 }
1165 indirect_call = call[0] == 0xff;
1166 }
1167
1168 r_symndx = htab->r_sym (rel[1].r_info);
1169 if (r_symndx < symtab_hdr->sh_info)
1170 return FALSE;
1171
1172 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1173 if (h == NULL
1174 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1175 return FALSE;
1176 else
1177 {
1178 r_type = (ELF32_R_TYPE (rel[1].r_info)
1179 & ~R_X86_64_converted_reloc_bit);
1180 if (largepic)
1181 return r_type == R_X86_64_PLTOFF64;
1182 else if (indirect_call)
1183 return r_type == R_X86_64_GOTPCRELX;
1184 else
1185 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1186 }
1187
1188 case R_X86_64_GOTTPOFF:
1189 /* Check transition from IE access model:
1190 mov foo@gottpoff(%rip), %reg
1191 add foo@gottpoff(%rip), %reg
1192 */
1193
1194 /* Check REX prefix first. */
1195 if (offset >= 3 && (offset + 4) <= sec->size)
1196 {
1197 val = bfd_get_8 (abfd, contents + offset - 3);
1198 if (val != 0x48 && val != 0x4c)
1199 {
1200 /* X32 may have 0x44 REX prefix or no REX prefix. */
1201 if (ABI_64_P (abfd))
1202 return FALSE;
1203 }
1204 }
1205 else
1206 {
1207 /* X32 may not have any REX prefix. */
1208 if (ABI_64_P (abfd))
1209 return FALSE;
1210 if (offset < 2 || (offset + 3) > sec->size)
1211 return FALSE;
1212 }
1213
1214 val = bfd_get_8 (abfd, contents + offset - 2);
1215 if (val != 0x8b && val != 0x03)
1216 return FALSE;
1217
1218 val = bfd_get_8 (abfd, contents + offset - 1);
1219 return (val & 0xc7) == 5;
1220
1221 case R_X86_64_GOTPC32_TLSDESC:
1222 /* Check transition from GDesc access model:
1223 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
1224 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
1225
1226 Make sure it's a leaq adding rip to a 32-bit offset
1227 into any register, although it's probably almost always
1228 going to be rax. */
1229
1230 if (offset < 3 || (offset + 4) > sec->size)
1231 return FALSE;
1232
1233 val = bfd_get_8 (abfd, contents + offset - 3);
1234 val &= 0xfb;
1235 if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40))
1236 return FALSE;
1237
1238 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1239 return FALSE;
1240
1241 val = bfd_get_8 (abfd, contents + offset - 1);
1242 return (val & 0xc7) == 0x05;
1243
1244 case R_X86_64_TLSDESC_CALL:
1245 /* Check transition from GDesc access model:
1246 call *x@tlsdesc(%rax) <--- LP64 mode.
1247 call *x@tlsdesc(%eax) <--- X32 mode.
1248 */
1249 if (offset + 2 <= sec->size)
1250 {
1251 unsigned int prefix;
1252 call = contents + offset;
1253 prefix = 0;
1254 if (!ABI_64_P (abfd))
1255 {
1256 /* Check for call *x@tlsdesc(%eax). */
1257 if (call[0] == 0x67)
1258 {
1259 prefix = 1;
1260 if (offset + 3 > sec->size)
1261 return FALSE;
1262 }
1263 }
1264 /* Make sure that it's a call *x@tlsdesc(%rax). */
1265 return call[prefix] == 0xff && call[1 + prefix] == 0x10;
1266 }
1267
1268 return FALSE;
1269
1270 default:
1271 abort ();
1272 }
1273 }
1274
1275 /* Return TRUE if the TLS access transition is OK or no transition
1276 will be performed. Update R_TYPE if there is a transition. */
1277
1278 static bfd_boolean
1279 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1280 asection *sec, bfd_byte *contents,
1281 Elf_Internal_Shdr *symtab_hdr,
1282 struct elf_link_hash_entry **sym_hashes,
1283 unsigned int *r_type, int tls_type,
1284 const Elf_Internal_Rela *rel,
1285 const Elf_Internal_Rela *relend,
1286 struct elf_link_hash_entry *h,
1287 unsigned long r_symndx,
1288 bfd_boolean from_relocate_section)
1289 {
1290 unsigned int from_type = *r_type;
1291 unsigned int to_type = from_type;
1292 bfd_boolean check = TRUE;
1293
1294 /* Skip TLS transition for functions. */
1295 if (h != NULL
1296 && (h->type == STT_FUNC
1297 || h->type == STT_GNU_IFUNC))
1298 return TRUE;
1299
1300 switch (from_type)
1301 {
1302 case R_X86_64_TLSGD:
1303 case R_X86_64_GOTPC32_TLSDESC:
1304 case R_X86_64_TLSDESC_CALL:
1305 case R_X86_64_GOTTPOFF:
1306 if (bfd_link_executable (info))
1307 {
1308 if (h == NULL)
1309 to_type = R_X86_64_TPOFF32;
1310 else
1311 to_type = R_X86_64_GOTTPOFF;
1312 }
1313
1314 /* When we are called from elf_x86_64_relocate_section, there may
1315 be additional transitions based on TLS_TYPE. */
1316 if (from_relocate_section)
1317 {
1318 unsigned int new_to_type = to_type;
1319
1320 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1321 new_to_type = R_X86_64_TPOFF32;
1322
1323 if (to_type == R_X86_64_TLSGD
1324 || to_type == R_X86_64_GOTPC32_TLSDESC
1325 || to_type == R_X86_64_TLSDESC_CALL)
1326 {
1327 if (tls_type == GOT_TLS_IE)
1328 new_to_type = R_X86_64_GOTTPOFF;
1329 }
1330
1331 /* We checked the transition before when we were called from
1332 elf_x86_64_check_relocs. We only want to check the new
1333 transition which hasn't been checked before. */
1334 check = new_to_type != to_type && from_type == to_type;
1335 to_type = new_to_type;
1336 }
1337
1338 break;
1339
1340 case R_X86_64_TLSLD:
1341 if (bfd_link_executable (info))
1342 to_type = R_X86_64_TPOFF32;
1343 break;
1344
1345 default:
1346 return TRUE;
1347 }
1348
1349 /* Return TRUE if there is no transition. */
1350 if (from_type == to_type)
1351 return TRUE;
1352
1353 /* Check if the transition can be performed. */
1354 if (check
1355 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1356 symtab_hdr, sym_hashes,
1357 from_type, rel, relend))
1358 {
1359 reloc_howto_type *from, *to;
1360 const char *name;
1361
1362 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1363 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1364
1365 if (from == NULL || to == NULL)
1366 return FALSE;
1367
1368 if (h)
1369 name = h->root.root.string;
1370 else
1371 {
1372 struct elf_x86_link_hash_table *htab;
1373
1374 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1375 if (htab == NULL)
1376 name = "*unknown*";
1377 else
1378 {
1379 Elf_Internal_Sym *isym;
1380
1381 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1382 abfd, r_symndx);
1383 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1384 }
1385 }
1386
1387 _bfd_error_handler
1388 /* xgettext:c-format */
1389 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1390 " in section `%pA' failed"),
1391 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1392 bfd_set_error (bfd_error_bad_value);
1393 return FALSE;
1394 }
1395
1396 *r_type = to_type;
1397 return TRUE;
1398 }
1399
1400 /* Rename some of the generic section flags to better document how they
1401 are used here. */
1402 #define check_relocs_failed sec_flg0
1403
1404 static bfd_boolean
1405 elf_x86_64_need_pic (struct bfd_link_info *info,
1406 bfd *input_bfd, asection *sec,
1407 struct elf_link_hash_entry *h,
1408 Elf_Internal_Shdr *symtab_hdr,
1409 Elf_Internal_Sym *isym,
1410 reloc_howto_type *howto)
1411 {
1412 const char *v = "";
1413 const char *und = "";
1414 const char *pic = "";
1415 const char *object;
1416
1417 const char *name;
1418 if (h)
1419 {
1420 name = h->root.root.string;
1421 switch (ELF_ST_VISIBILITY (h->other))
1422 {
1423 case STV_HIDDEN:
1424 v = _("hidden symbol ");
1425 break;
1426 case STV_INTERNAL:
1427 v = _("internal symbol ");
1428 break;
1429 case STV_PROTECTED:
1430 v = _("protected symbol ");
1431 break;
1432 default:
1433 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1434 v = _("protected symbol ");
1435 else
1436 v = _("symbol ");
1437 pic = NULL;
1438 break;
1439 }
1440
1441 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1442 und = _("undefined ");
1443 }
1444 else
1445 {
1446 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1447 pic = NULL;
1448 }
1449
1450 if (bfd_link_dll (info))
1451 {
1452 object = _("a shared object");
1453 if (!pic)
1454 pic = _("; recompile with -fPIC");
1455 }
1456 else
1457 {
1458 if (bfd_link_pie (info))
1459 object = _("a PIE object");
1460 else
1461 object = _("a PDE object");
1462 if (!pic)
1463 pic = _("; recompile with -fPIE");
1464 }
1465
1466 /* xgettext:c-format */
1467 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1468 "not be used when making %s%s"),
1469 input_bfd, howto->name, und, v, name,
1470 object, pic);
1471 bfd_set_error (bfd_error_bad_value);
1472 sec->check_relocs_failed = 1;
1473 return FALSE;
1474 }
1475
1476 /* With the local symbol, foo, we convert
1477 mov foo@GOTPCREL(%rip), %reg
1478 to
1479 lea foo(%rip), %reg
1480 and convert
1481 call/jmp *foo@GOTPCREL(%rip)
1482 to
1483 nop call foo/jmp foo nop
1484 When PIC is false, convert
1485 test %reg, foo@GOTPCREL(%rip)
1486 to
1487 test $foo, %reg
1488 and convert
1489 binop foo@GOTPCREL(%rip), %reg
1490 to
1491 binop $foo, %reg
1492 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1493 instructions. */
1494
1495 static bfd_boolean
1496 elf_x86_64_convert_load_reloc (bfd *abfd,
1497 bfd_byte *contents,
1498 unsigned int *r_type_p,
1499 Elf_Internal_Rela *irel,
1500 struct elf_link_hash_entry *h,
1501 bfd_boolean *converted,
1502 struct bfd_link_info *link_info)
1503 {
1504 struct elf_x86_link_hash_table *htab;
1505 bfd_boolean is_pic;
1506 bfd_boolean no_overflow;
1507 bfd_boolean relocx;
1508 bfd_boolean to_reloc_pc32;
1509 bfd_boolean abs_symbol;
1510 bfd_boolean local_ref;
1511 asection *tsec;
1512 bfd_signed_vma raddend;
1513 unsigned int opcode;
1514 unsigned int modrm;
1515 unsigned int r_type = *r_type_p;
1516 unsigned int r_symndx;
1517 bfd_vma roff = irel->r_offset;
1518 bfd_vma abs_relocation;
1519
1520 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1521 return TRUE;
1522
1523 raddend = irel->r_addend;
1524 /* Addend for 32-bit PC-relative relocation must be -4. */
1525 if (raddend != -4)
1526 return TRUE;
1527
1528 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1529 is_pic = bfd_link_pic (link_info);
1530
1531 relocx = (r_type == R_X86_64_GOTPCRELX
1532 || r_type == R_X86_64_REX_GOTPCRELX);
1533
1534 /* TRUE if --no-relax is used. */
1535 no_overflow = link_info->disable_target_specific_optimizations > 1;
1536
1537 r_symndx = htab->r_sym (irel->r_info);
1538
1539 opcode = bfd_get_8 (abfd, contents + roff - 2);
1540
1541 /* Convert mov to lea since it has been done for a while. */
1542 if (opcode != 0x8b)
1543 {
1544 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1545 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1546 test, xor instructions. */
1547 if (!relocx)
1548 return TRUE;
1549 }
1550
1551 /* We convert only to R_X86_64_PC32:
1552 1. Branch.
1553 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1554 3. no_overflow is true.
1555 4. PIC.
1556 */
1557 to_reloc_pc32 = (opcode == 0xff
1558 || !relocx
1559 || no_overflow
1560 || is_pic);
1561
1562 abs_symbol = FALSE;
1563 abs_relocation = 0;
1564
1565 /* Get the symbol referred to by the reloc. */
1566 if (h == NULL)
1567 {
1568 Elf_Internal_Sym *isym
1569 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1570
1571 /* Skip relocation against undefined symbols. */
1572 if (isym->st_shndx == SHN_UNDEF)
1573 return TRUE;
1574
1575 local_ref = TRUE;
1576 if (isym->st_shndx == SHN_ABS)
1577 {
1578 tsec = bfd_abs_section_ptr;
1579 abs_symbol = TRUE;
1580 abs_relocation = isym->st_value;
1581 }
1582 else if (isym->st_shndx == SHN_COMMON)
1583 tsec = bfd_com_section_ptr;
1584 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1585 tsec = &_bfd_elf_large_com_section;
1586 else
1587 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1588 }
1589 else
1590 {
1591 /* Undefined weak symbol is only bound locally in executable
1592 and its reference is resolved as 0 without relocation
1593 overflow. We can only perform this optimization for
1594 GOTPCRELX relocations since we need to modify REX byte.
1595 It is OK convert mov with R_X86_64_GOTPCREL to
1596 R_X86_64_PC32. */
1597 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1598
1599 abs_symbol = ABS_SYMBOL_P (h);
1600 abs_relocation = h->root.u.def.value;
1601
1602 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1603 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1604 if ((relocx || opcode == 0x8b)
1605 && (h->root.type == bfd_link_hash_undefweak
1606 && !eh->linker_def
1607 && local_ref))
1608 {
1609 if (opcode == 0xff)
1610 {
1611 /* Skip for branch instructions since R_X86_64_PC32
1612 may overflow. */
1613 if (no_overflow)
1614 return TRUE;
1615 }
1616 else if (relocx)
1617 {
1618 /* For non-branch instructions, we can convert to
1619 R_X86_64_32/R_X86_64_32S since we know if there
1620 is a REX byte. */
1621 to_reloc_pc32 = FALSE;
1622 }
1623
1624 /* Since we don't know the current PC when PIC is true,
1625 we can't convert to R_X86_64_PC32. */
1626 if (to_reloc_pc32 && is_pic)
1627 return TRUE;
1628
1629 goto convert;
1630 }
1631 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1632 ld.so may use its link-time address. */
1633 else if (h->start_stop
1634 || eh->linker_def
1635 || ((h->def_regular
1636 || h->root.type == bfd_link_hash_defined
1637 || h->root.type == bfd_link_hash_defweak)
1638 && h != htab->elf.hdynamic
1639 && local_ref))
1640 {
1641 /* bfd_link_hash_new or bfd_link_hash_undefined is
1642 set by an assignment in a linker script in
1643 bfd_elf_record_link_assignment. start_stop is set
1644 on __start_SECNAME/__stop_SECNAME which mark section
1645 SECNAME. */
1646 if (h->start_stop
1647 || eh->linker_def
1648 || (h->def_regular
1649 && (h->root.type == bfd_link_hash_new
1650 || h->root.type == bfd_link_hash_undefined
1651 || ((h->root.type == bfd_link_hash_defined
1652 || h->root.type == bfd_link_hash_defweak)
1653 && h->root.u.def.section == bfd_und_section_ptr))))
1654 {
1655 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1656 if (no_overflow)
1657 return TRUE;
1658 goto convert;
1659 }
1660 tsec = h->root.u.def.section;
1661 }
1662 else
1663 return TRUE;
1664 }
1665
1666 /* Don't convert GOTPCREL relocation against large section. */
1667 if (elf_section_data (tsec) != NULL
1668 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1669 return TRUE;
1670
1671 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1672 if (no_overflow)
1673 return TRUE;
1674
1675 convert:
1676 if (opcode == 0xff)
1677 {
1678 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1679 unsigned int nop;
1680 unsigned int disp;
1681 bfd_vma nop_offset;
1682
1683 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1684 R_X86_64_PC32. */
1685 modrm = bfd_get_8 (abfd, contents + roff - 1);
1686 if (modrm == 0x25)
1687 {
1688 /* Convert to "jmp foo nop". */
1689 modrm = 0xe9;
1690 nop = NOP_OPCODE;
1691 nop_offset = irel->r_offset + 3;
1692 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1693 irel->r_offset -= 1;
1694 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1695 }
1696 else
1697 {
1698 struct elf_x86_link_hash_entry *eh
1699 = (struct elf_x86_link_hash_entry *) h;
1700
1701 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1702 is a nop prefix. */
1703 modrm = 0xe8;
1704 /* To support TLS optimization, always use addr32 prefix for
1705 "call *__tls_get_addr@GOTPCREL(%rip)". */
1706 if (eh && eh->tls_get_addr)
1707 {
1708 nop = 0x67;
1709 nop_offset = irel->r_offset - 2;
1710 }
1711 else
1712 {
1713 nop = htab->params->call_nop_byte;
1714 if (htab->params->call_nop_as_suffix)
1715 {
1716 nop_offset = irel->r_offset + 3;
1717 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1718 irel->r_offset -= 1;
1719 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1720 }
1721 else
1722 nop_offset = irel->r_offset - 2;
1723 }
1724 }
1725 bfd_put_8 (abfd, nop, contents + nop_offset);
1726 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1727 r_type = R_X86_64_PC32;
1728 }
1729 else
1730 {
1731 unsigned int rex;
1732 unsigned int rex_mask = REX_R;
1733
1734 if (r_type == R_X86_64_REX_GOTPCRELX)
1735 rex = bfd_get_8 (abfd, contents + roff - 3);
1736 else
1737 rex = 0;
1738
1739 if (opcode == 0x8b)
1740 {
1741 if (abs_symbol && local_ref)
1742 to_reloc_pc32 = FALSE;
1743
1744 if (to_reloc_pc32)
1745 {
1746 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1747 "lea foo(%rip), %reg". */
1748 opcode = 0x8d;
1749 r_type = R_X86_64_PC32;
1750 }
1751 else
1752 {
1753 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1754 "mov $foo, %reg". */
1755 opcode = 0xc7;
1756 modrm = bfd_get_8 (abfd, contents + roff - 1);
1757 modrm = 0xc0 | (modrm & 0x38) >> 3;
1758 if ((rex & REX_W) != 0
1759 && ABI_64_P (link_info->output_bfd))
1760 {
1761 /* Keep the REX_W bit in REX byte for LP64. */
1762 r_type = R_X86_64_32S;
1763 goto rewrite_modrm_rex;
1764 }
1765 else
1766 {
1767 /* If the REX_W bit in REX byte isn't needed,
1768 use R_X86_64_32 and clear the W bit to avoid
1769 sign-extend imm32 to imm64. */
1770 r_type = R_X86_64_32;
1771 /* Clear the W bit in REX byte. */
1772 rex_mask |= REX_W;
1773 goto rewrite_modrm_rex;
1774 }
1775 }
1776 }
1777 else
1778 {
1779 /* R_X86_64_PC32 isn't supported. */
1780 if (to_reloc_pc32)
1781 return TRUE;
1782
1783 modrm = bfd_get_8 (abfd, contents + roff - 1);
1784 if (opcode == 0x85)
1785 {
1786 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1787 "test $foo, %reg". */
1788 modrm = 0xc0 | (modrm & 0x38) >> 3;
1789 opcode = 0xf7;
1790 }
1791 else
1792 {
1793 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1794 "binop $foo, %reg". */
1795 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1796 opcode = 0x81;
1797 }
1798
1799 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1800 overflow when sign-extending imm32 to imm64. */
1801 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1802
1803 rewrite_modrm_rex:
1804 if (abs_relocation)
1805 {
1806 /* Check if R_X86_64_32S/R_X86_64_32 fits. */
1807 if (r_type == R_X86_64_32S)
1808 {
1809 if ((abs_relocation + 0x80000000) > 0xffffffff)
1810 return TRUE;
1811 }
1812 else
1813 {
1814 if (abs_relocation > 0xffffffff)
1815 return TRUE;
1816 }
1817 }
1818
1819 bfd_put_8 (abfd, modrm, contents + roff - 1);
1820
1821 if (rex)
1822 {
1823 /* Move the R bit to the B bit in REX byte. */
1824 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1825 bfd_put_8 (abfd, rex, contents + roff - 3);
1826 }
1827
1828 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1829 irel->r_addend = 0;
1830 }
1831
1832 bfd_put_8 (abfd, opcode, contents + roff - 2);
1833 }
1834
1835 *r_type_p = r_type;
1836 irel->r_info = htab->r_info (r_symndx,
1837 r_type | R_X86_64_converted_reloc_bit);
1838
1839 *converted = TRUE;
1840
1841 return TRUE;
1842 }
1843
1844 /* Look through the relocs for a section during the first phase, and
1845 calculate needed space in the global offset table, procedure
1846 linkage table, and dynamic reloc sections. */
1847
1848 static bfd_boolean
1849 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1850 asection *sec,
1851 const Elf_Internal_Rela *relocs)
1852 {
1853 struct elf_x86_link_hash_table *htab;
1854 Elf_Internal_Shdr *symtab_hdr;
1855 struct elf_link_hash_entry **sym_hashes;
1856 const Elf_Internal_Rela *rel;
1857 const Elf_Internal_Rela *rel_end;
1858 asection *sreloc;
1859 bfd_byte *contents;
1860 bfd_boolean converted;
1861
1862 if (bfd_link_relocatable (info))
1863 return TRUE;
1864
1865 /* Don't do anything special with non-loaded, non-alloced sections.
1866 In particular, any relocs in such sections should not affect GOT
1867 and PLT reference counting (ie. we don't allow them to create GOT
1868 or PLT entries), there's no possibility or desire to optimize TLS
1869 relocs, and there's not much point in propagating relocs to shared
1870 libs that the dynamic linker won't relocate. */
1871 if ((sec->flags & SEC_ALLOC) == 0)
1872 return TRUE;
1873
1874 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1875 if (htab == NULL)
1876 {
1877 sec->check_relocs_failed = 1;
1878 return FALSE;
1879 }
1880
1881 BFD_ASSERT (is_x86_elf (abfd, htab));
1882
1883 /* Get the section contents. */
1884 if (elf_section_data (sec)->this_hdr.contents != NULL)
1885 contents = elf_section_data (sec)->this_hdr.contents;
1886 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1887 {
1888 sec->check_relocs_failed = 1;
1889 return FALSE;
1890 }
1891
1892 symtab_hdr = &elf_symtab_hdr (abfd);
1893 sym_hashes = elf_sym_hashes (abfd);
1894
1895 converted = FALSE;
1896
1897 sreloc = NULL;
1898
1899 rel_end = relocs + sec->reloc_count;
1900 for (rel = relocs; rel < rel_end; rel++)
1901 {
1902 unsigned int r_type;
1903 unsigned int r_symndx;
1904 struct elf_link_hash_entry *h;
1905 struct elf_x86_link_hash_entry *eh;
1906 Elf_Internal_Sym *isym;
1907 const char *name;
1908 bfd_boolean size_reloc;
1909 bfd_boolean converted_reloc;
1910 bfd_boolean no_dynreloc;
1911
1912 r_symndx = htab->r_sym (rel->r_info);
1913 r_type = ELF32_R_TYPE (rel->r_info);
1914
1915 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1916 {
1917 /* xgettext:c-format */
1918 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1919 abfd, r_symndx);
1920 goto error_return;
1921 }
1922
1923 if (r_symndx < symtab_hdr->sh_info)
1924 {
1925 /* A local symbol. */
1926 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1927 abfd, r_symndx);
1928 if (isym == NULL)
1929 goto error_return;
1930
1931 /* Check relocation against local STT_GNU_IFUNC symbol. */
1932 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1933 {
1934 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1935 TRUE);
1936 if (h == NULL)
1937 goto error_return;
1938
1939 /* Fake a STT_GNU_IFUNC symbol. */
1940 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1941 isym, NULL);
1942 h->type = STT_GNU_IFUNC;
1943 h->def_regular = 1;
1944 h->ref_regular = 1;
1945 h->forced_local = 1;
1946 h->root.type = bfd_link_hash_defined;
1947 }
1948 else
1949 h = NULL;
1950 }
1951 else
1952 {
1953 isym = NULL;
1954 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1955 while (h->root.type == bfd_link_hash_indirect
1956 || h->root.type == bfd_link_hash_warning)
1957 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1958 }
1959
1960 /* Check invalid x32 relocations. */
1961 if (!ABI_64_P (abfd))
1962 switch (r_type)
1963 {
1964 default:
1965 break;
1966
1967 case R_X86_64_DTPOFF64:
1968 case R_X86_64_TPOFF64:
1969 case R_X86_64_PC64:
1970 case R_X86_64_GOTOFF64:
1971 case R_X86_64_GOT64:
1972 case R_X86_64_GOTPCREL64:
1973 case R_X86_64_GOTPC64:
1974 case R_X86_64_GOTPLT64:
1975 case R_X86_64_PLTOFF64:
1976 {
1977 if (h)
1978 name = h->root.root.string;
1979 else
1980 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1981 NULL);
1982 _bfd_error_handler
1983 /* xgettext:c-format */
1984 (_("%pB: relocation %s against symbol `%s' isn't "
1985 "supported in x32 mode"), abfd,
1986 x86_64_elf_howto_table[r_type].name, name);
1987 bfd_set_error (bfd_error_bad_value);
1988 goto error_return;
1989 }
1990 break;
1991 }
1992
1993 if (h != NULL)
1994 {
1995 /* It is referenced by a non-shared object. */
1996 h->ref_regular = 1;
1997 }
1998
1999 converted_reloc = FALSE;
2000 if ((r_type == R_X86_64_GOTPCREL
2001 || r_type == R_X86_64_GOTPCRELX
2002 || r_type == R_X86_64_REX_GOTPCRELX)
2003 && (h == NULL || h->type != STT_GNU_IFUNC))
2004 {
2005 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
2006 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
2007 irel, h, &converted_reloc,
2008 info))
2009 goto error_return;
2010
2011 if (converted_reloc)
2012 converted = TRUE;
2013 }
2014
2015 if (!_bfd_elf_x86_valid_reloc_p (sec, info, htab, rel, h, isym,
2016 symtab_hdr, &no_dynreloc))
2017 return FALSE;
2018
2019 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2020 symtab_hdr, sym_hashes,
2021 &r_type, GOT_UNKNOWN,
2022 rel, rel_end, h, r_symndx, FALSE))
2023 goto error_return;
2024
2025 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
2026 if (h == htab->elf.hgot)
2027 htab->got_referenced = TRUE;
2028
2029 eh = (struct elf_x86_link_hash_entry *) h;
2030 switch (r_type)
2031 {
2032 case R_X86_64_TLSLD:
2033 htab->tls_ld_or_ldm_got.refcount = 1;
2034 goto create_got;
2035
2036 case R_X86_64_TPOFF32:
2037 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2038 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2039 &x86_64_elf_howto_table[r_type]);
2040 if (eh != NULL)
2041 eh->zero_undefweak &= 0x2;
2042 break;
2043
2044 case R_X86_64_GOTTPOFF:
2045 if (!bfd_link_executable (info))
2046 info->flags |= DF_STATIC_TLS;
2047 /* Fall through */
2048
2049 case R_X86_64_GOT32:
2050 case R_X86_64_GOTPCREL:
2051 case R_X86_64_GOTPCRELX:
2052 case R_X86_64_REX_GOTPCRELX:
2053 case R_X86_64_TLSGD:
2054 case R_X86_64_GOT64:
2055 case R_X86_64_GOTPCREL64:
2056 case R_X86_64_GOTPLT64:
2057 case R_X86_64_GOTPC32_TLSDESC:
2058 case R_X86_64_TLSDESC_CALL:
2059 /* This symbol requires a global offset table entry. */
2060 {
2061 int tls_type, old_tls_type;
2062
2063 switch (r_type)
2064 {
2065 default:
2066 tls_type = GOT_NORMAL;
2067 if (h)
2068 {
2069 if (ABS_SYMBOL_P (h))
2070 tls_type = GOT_ABS;
2071 }
2072 else if (isym->st_shndx == SHN_ABS)
2073 tls_type = GOT_ABS;
2074 break;
2075 case R_X86_64_TLSGD:
2076 tls_type = GOT_TLS_GD;
2077 break;
2078 case R_X86_64_GOTTPOFF:
2079 tls_type = GOT_TLS_IE;
2080 break;
2081 case R_X86_64_GOTPC32_TLSDESC:
2082 case R_X86_64_TLSDESC_CALL:
2083 tls_type = GOT_TLS_GDESC;
2084 break;
2085 }
2086
2087 if (h != NULL)
2088 {
2089 h->got.refcount = 1;
2090 old_tls_type = eh->tls_type;
2091 }
2092 else
2093 {
2094 bfd_signed_vma *local_got_refcounts;
2095
2096 /* This is a global offset table entry for a local symbol. */
2097 local_got_refcounts = elf_local_got_refcounts (abfd);
2098 if (local_got_refcounts == NULL)
2099 {
2100 bfd_size_type size;
2101
2102 size = symtab_hdr->sh_info;
2103 size *= sizeof (bfd_signed_vma)
2104 + sizeof (bfd_vma) + sizeof (char);
2105 local_got_refcounts = ((bfd_signed_vma *)
2106 bfd_zalloc (abfd, size));
2107 if (local_got_refcounts == NULL)
2108 goto error_return;
2109 elf_local_got_refcounts (abfd) = local_got_refcounts;
2110 elf_x86_local_tlsdesc_gotent (abfd)
2111 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2112 elf_x86_local_got_tls_type (abfd)
2113 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2114 }
2115 local_got_refcounts[r_symndx] = 1;
2116 old_tls_type
2117 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2118 }
2119
2120 /* If a TLS symbol is accessed using IE at least once,
2121 there is no point to use dynamic model for it. */
2122 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2123 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2124 || tls_type != GOT_TLS_IE))
2125 {
2126 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2127 tls_type = old_tls_type;
2128 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2129 && GOT_TLS_GD_ANY_P (tls_type))
2130 tls_type |= old_tls_type;
2131 else
2132 {
2133 if (h)
2134 name = h->root.root.string;
2135 else
2136 name = bfd_elf_sym_name (abfd, symtab_hdr,
2137 isym, NULL);
2138 _bfd_error_handler
2139 /* xgettext:c-format */
2140 (_("%pB: '%s' accessed both as normal and"
2141 " thread local symbol"),
2142 abfd, name);
2143 bfd_set_error (bfd_error_bad_value);
2144 goto error_return;
2145 }
2146 }
2147
2148 if (old_tls_type != tls_type)
2149 {
2150 if (eh != NULL)
2151 eh->tls_type = tls_type;
2152 else
2153 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2154 }
2155 }
2156 /* Fall through */
2157
2158 case R_X86_64_GOTOFF64:
2159 case R_X86_64_GOTPC32:
2160 case R_X86_64_GOTPC64:
2161 create_got:
2162 if (eh != NULL)
2163 eh->zero_undefweak &= 0x2;
2164 break;
2165
2166 case R_X86_64_PLT32:
2167 case R_X86_64_PLT32_BND:
2168 /* This symbol requires a procedure linkage table entry. We
2169 actually build the entry in adjust_dynamic_symbol,
2170 because this might be a case of linking PIC code which is
2171 never referenced by a dynamic object, in which case we
2172 don't need to generate a procedure linkage table entry
2173 after all. */
2174
2175 /* If this is a local symbol, we resolve it directly without
2176 creating a procedure linkage table entry. */
2177 if (h == NULL)
2178 continue;
2179
2180 eh->zero_undefweak &= 0x2;
2181 h->needs_plt = 1;
2182 h->plt.refcount = 1;
2183 break;
2184
2185 case R_X86_64_PLTOFF64:
2186 /* This tries to form the 'address' of a function relative
2187 to GOT. For global symbols we need a PLT entry. */
2188 if (h != NULL)
2189 {
2190 h->needs_plt = 1;
2191 h->plt.refcount = 1;
2192 }
2193 goto create_got;
2194
2195 case R_X86_64_SIZE32:
2196 case R_X86_64_SIZE64:
2197 size_reloc = TRUE;
2198 goto do_size;
2199
2200 case R_X86_64_32:
2201 if (!ABI_64_P (abfd))
2202 goto pointer;
2203 /* Fall through. */
2204 case R_X86_64_8:
2205 case R_X86_64_16:
2206 case R_X86_64_32S:
2207 /* Check relocation overflow as these relocs may lead to
2208 run-time relocation overflow. Don't error out for
2209 sections we don't care about, such as debug sections or
2210 when relocation overflow check is disabled. */
2211 if (!htab->params->no_reloc_overflow_check
2212 && !converted_reloc
2213 && (bfd_link_pic (info)
2214 || (bfd_link_executable (info)
2215 && h != NULL
2216 && !h->def_regular
2217 && h->def_dynamic
2218 && (sec->flags & SEC_READONLY) == 0)))
2219 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2220 &x86_64_elf_howto_table[r_type]);
2221 /* Fall through. */
2222
2223 case R_X86_64_PC8:
2224 case R_X86_64_PC16:
2225 case R_X86_64_PC32:
2226 case R_X86_64_PC32_BND:
2227 case R_X86_64_PC64:
2228 case R_X86_64_64:
2229 pointer:
2230 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2231 eh->zero_undefweak |= 0x2;
2232 /* We are called after all symbols have been resolved. Only
2233 relocation against STT_GNU_IFUNC symbol must go through
2234 PLT. */
2235 if (h != NULL
2236 && (bfd_link_executable (info)
2237 || h->type == STT_GNU_IFUNC))
2238 {
2239 bfd_boolean func_pointer_ref = FALSE;
2240
2241 if (r_type == R_X86_64_PC32)
2242 {
2243 /* Since something like ".long foo - ." may be used
2244 as pointer, make sure that PLT is used if foo is
2245 a function defined in a shared library. */
2246 if ((sec->flags & SEC_CODE) == 0)
2247 {
2248 h->pointer_equality_needed = 1;
2249 if (bfd_link_pie (info)
2250 && h->type == STT_FUNC
2251 && !h->def_regular
2252 && h->def_dynamic)
2253 {
2254 h->needs_plt = 1;
2255 h->plt.refcount = 1;
2256 }
2257 }
2258 }
2259 else if (r_type != R_X86_64_PC32_BND
2260 && r_type != R_X86_64_PC64)
2261 {
2262 h->pointer_equality_needed = 1;
2263 /* At run-time, R_X86_64_64 can be resolved for both
2264 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2265 can only be resolved for x32. */
2266 if ((sec->flags & SEC_READONLY) == 0
2267 && (r_type == R_X86_64_64
2268 || (!ABI_64_P (abfd)
2269 && (r_type == R_X86_64_32
2270 || r_type == R_X86_64_32S))))
2271 func_pointer_ref = TRUE;
2272 }
2273
2274 if (!func_pointer_ref)
2275 {
2276 /* If this reloc is in a read-only section, we might
2277 need a copy reloc. We can't check reliably at this
2278 stage whether the section is read-only, as input
2279 sections have not yet been mapped to output sections.
2280 Tentatively set the flag for now, and correct in
2281 adjust_dynamic_symbol. */
2282 h->non_got_ref = 1;
2283
2284 /* We may need a .plt entry if the symbol is a function
2285 defined in a shared lib or is a function referenced
2286 from the code or read-only section. */
2287 if (!h->def_regular
2288 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2289 h->plt.refcount = 1;
2290 }
2291 }
2292
2293 size_reloc = FALSE;
2294 do_size:
2295 if (!no_dynreloc
2296 && NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2297 htab->pointer_r_type))
2298 {
2299 struct elf_dyn_relocs *p;
2300 struct elf_dyn_relocs **head;
2301
2302 /* We must copy these reloc types into the output file.
2303 Create a reloc section in dynobj and make room for
2304 this reloc. */
2305 if (sreloc == NULL)
2306 {
2307 sreloc = _bfd_elf_make_dynamic_reloc_section
2308 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2309 abfd, /*rela?*/ TRUE);
2310
2311 if (sreloc == NULL)
2312 goto error_return;
2313 }
2314
2315 /* If this is a global symbol, we count the number of
2316 relocations we need for this symbol. */
2317 if (h != NULL)
2318 head = &h->dyn_relocs;
2319 else
2320 {
2321 /* Track dynamic relocs needed for local syms too.
2322 We really need local syms available to do this
2323 easily. Oh well. */
2324 asection *s;
2325 void **vpp;
2326
2327 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2328 abfd, r_symndx);
2329 if (isym == NULL)
2330 goto error_return;
2331
2332 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2333 if (s == NULL)
2334 s = sec;
2335
2336 /* Beware of type punned pointers vs strict aliasing
2337 rules. */
2338 vpp = &(elf_section_data (s)->local_dynrel);
2339 head = (struct elf_dyn_relocs **)vpp;
2340 }
2341
2342 p = *head;
2343 if (p == NULL || p->sec != sec)
2344 {
2345 size_t amt = sizeof *p;
2346
2347 p = ((struct elf_dyn_relocs *)
2348 bfd_alloc (htab->elf.dynobj, amt));
2349 if (p == NULL)
2350 goto error_return;
2351 p->next = *head;
2352 *head = p;
2353 p->sec = sec;
2354 p->count = 0;
2355 p->pc_count = 0;
2356 }
2357
2358 p->count += 1;
2359 /* Count size relocation as PC-relative relocation. */
2360 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2361 p->pc_count += 1;
2362 }
2363 break;
2364
2365 /* This relocation describes the C++ object vtable hierarchy.
2366 Reconstruct it for later use during GC. */
2367 case R_X86_64_GNU_VTINHERIT:
2368 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2369 goto error_return;
2370 break;
2371
2372 /* This relocation describes which C++ vtable entries are actually
2373 used. Record for later use during GC. */
2374 case R_X86_64_GNU_VTENTRY:
2375 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2376 goto error_return;
2377 break;
2378
2379 default:
2380 break;
2381 }
2382 }
2383
2384 if (elf_section_data (sec)->this_hdr.contents != contents)
2385 {
2386 if (!converted && !info->keep_memory)
2387 free (contents);
2388 else
2389 {
2390 /* Cache the section contents for elf_link_input_bfd if any
2391 load is converted or --no-keep-memory isn't used. */
2392 elf_section_data (sec)->this_hdr.contents = contents;
2393 }
2394 }
2395
2396 /* Cache relocations if any load is converted. */
2397 if (elf_section_data (sec)->relocs != relocs && converted)
2398 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2399
2400 return TRUE;
2401
2402 error_return:
2403 if (elf_section_data (sec)->this_hdr.contents != contents)
2404 free (contents);
2405 sec->check_relocs_failed = 1;
2406 return FALSE;
2407 }
2408
2409 /* Return the relocation value for @tpoff relocation
2410 if STT_TLS virtual address is ADDRESS. */
2411
2412 static bfd_vma
2413 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2414 {
2415 struct elf_link_hash_table *htab = elf_hash_table (info);
2416 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2417 bfd_vma static_tls_size;
2418
2419 /* If tls_segment is NULL, we should have signalled an error already. */
2420 if (htab->tls_sec == NULL)
2421 return 0;
2422
2423 /* Consider special static TLS alignment requirements. */
2424 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2425 return address - static_tls_size - htab->tls_sec->vma;
2426 }
2427
2428 /* Relocate an x86_64 ELF section. */
2429
2430 static bfd_boolean
2431 elf_x86_64_relocate_section (bfd *output_bfd,
2432 struct bfd_link_info *info,
2433 bfd *input_bfd,
2434 asection *input_section,
2435 bfd_byte *contents,
2436 Elf_Internal_Rela *relocs,
2437 Elf_Internal_Sym *local_syms,
2438 asection **local_sections)
2439 {
2440 struct elf_x86_link_hash_table *htab;
2441 Elf_Internal_Shdr *symtab_hdr;
2442 struct elf_link_hash_entry **sym_hashes;
2443 bfd_vma *local_got_offsets;
2444 bfd_vma *local_tlsdesc_gotents;
2445 Elf_Internal_Rela *rel;
2446 Elf_Internal_Rela *wrel;
2447 Elf_Internal_Rela *relend;
2448 unsigned int plt_entry_size;
2449
2450 /* Skip if check_relocs failed. */
2451 if (input_section->check_relocs_failed)
2452 return FALSE;
2453
2454 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2455 if (htab == NULL)
2456 return FALSE;
2457
2458 if (!is_x86_elf (input_bfd, htab))
2459 {
2460 bfd_set_error (bfd_error_wrong_format);
2461 return FALSE;
2462 }
2463
2464 plt_entry_size = htab->plt.plt_entry_size;
2465 symtab_hdr = &elf_symtab_hdr (input_bfd);
2466 sym_hashes = elf_sym_hashes (input_bfd);
2467 local_got_offsets = elf_local_got_offsets (input_bfd);
2468 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2469
2470 _bfd_x86_elf_set_tls_module_base (info);
2471
2472 rel = wrel = relocs;
2473 relend = relocs + input_section->reloc_count;
2474 for (; rel < relend; wrel++, rel++)
2475 {
2476 unsigned int r_type, r_type_tls;
2477 reloc_howto_type *howto;
2478 unsigned long r_symndx;
2479 struct elf_link_hash_entry *h;
2480 struct elf_x86_link_hash_entry *eh;
2481 Elf_Internal_Sym *sym;
2482 asection *sec;
2483 bfd_vma off, offplt, plt_offset;
2484 bfd_vma relocation;
2485 bfd_boolean unresolved_reloc;
2486 bfd_reloc_status_type r;
2487 int tls_type;
2488 asection *base_got, *resolved_plt;
2489 bfd_vma st_size;
2490 bfd_boolean resolved_to_zero;
2491 bfd_boolean relative_reloc;
2492 bfd_boolean converted_reloc;
2493 bfd_boolean need_copy_reloc_in_pie;
2494 bfd_boolean no_copyreloc_p;
2495
2496 r_type = ELF32_R_TYPE (rel->r_info);
2497 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2498 || r_type == (int) R_X86_64_GNU_VTENTRY)
2499 {
2500 if (wrel != rel)
2501 *wrel = *rel;
2502 continue;
2503 }
2504
2505 r_symndx = htab->r_sym (rel->r_info);
2506 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2507 if (converted_reloc)
2508 {
2509 r_type &= ~R_X86_64_converted_reloc_bit;
2510 rel->r_info = htab->r_info (r_symndx, r_type);
2511 }
2512
2513 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2514 if (howto == NULL)
2515 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2516
2517 h = NULL;
2518 sym = NULL;
2519 sec = NULL;
2520 unresolved_reloc = FALSE;
2521 if (r_symndx < symtab_hdr->sh_info)
2522 {
2523 sym = local_syms + r_symndx;
2524 sec = local_sections[r_symndx];
2525
2526 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2527 &sec, rel);
2528 st_size = sym->st_size;
2529
2530 /* Relocate against local STT_GNU_IFUNC symbol. */
2531 if (!bfd_link_relocatable (info)
2532 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2533 {
2534 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2535 rel, FALSE);
2536 if (h == NULL)
2537 abort ();
2538
2539 /* Set STT_GNU_IFUNC symbol value. */
2540 h->root.u.def.value = sym->st_value;
2541 h->root.u.def.section = sec;
2542 }
2543 }
2544 else
2545 {
2546 bfd_boolean warned ATTRIBUTE_UNUSED;
2547 bfd_boolean ignored ATTRIBUTE_UNUSED;
2548
2549 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2550 r_symndx, symtab_hdr, sym_hashes,
2551 h, sec, relocation,
2552 unresolved_reloc, warned, ignored);
2553 st_size = h->size;
2554 }
2555
2556 if (sec != NULL && discarded_section (sec))
2557 {
2558 _bfd_clear_contents (howto, input_bfd, input_section,
2559 contents, rel->r_offset);
2560 wrel->r_offset = rel->r_offset;
2561 wrel->r_info = 0;
2562 wrel->r_addend = 0;
2563
2564 /* For ld -r, remove relocations in debug sections against
2565 sections defined in discarded sections. Not done for
2566 eh_frame editing code expects to be present. */
2567 if (bfd_link_relocatable (info)
2568 && (input_section->flags & SEC_DEBUGGING))
2569 wrel--;
2570
2571 continue;
2572 }
2573
2574 if (bfd_link_relocatable (info))
2575 {
2576 if (wrel != rel)
2577 *wrel = *rel;
2578 continue;
2579 }
2580
2581 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2582 {
2583 if (r_type == R_X86_64_64)
2584 {
2585 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2586 zero-extend it to 64bit if addend is zero. */
2587 r_type = R_X86_64_32;
2588 memset (contents + rel->r_offset + 4, 0, 4);
2589 }
2590 else if (r_type == R_X86_64_SIZE64)
2591 {
2592 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2593 zero-extend it to 64bit if addend is zero. */
2594 r_type = R_X86_64_SIZE32;
2595 memset (contents + rel->r_offset + 4, 0, 4);
2596 }
2597 }
2598
2599 eh = (struct elf_x86_link_hash_entry *) h;
2600
2601 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2602 it here if it is defined in a non-shared object. */
2603 if (h != NULL
2604 && h->type == STT_GNU_IFUNC
2605 && h->def_regular)
2606 {
2607 bfd_vma plt_index;
2608 const char *name;
2609
2610 if ((input_section->flags & SEC_ALLOC) == 0)
2611 {
2612 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2613 STT_GNU_IFUNC symbol as STT_FUNC. */
2614 if (elf_section_type (input_section) == SHT_NOTE)
2615 goto skip_ifunc;
2616 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2617 sections because such sections are not SEC_ALLOC and
2618 thus ld.so will not process them. */
2619 if ((input_section->flags & SEC_DEBUGGING) != 0)
2620 continue;
2621 abort ();
2622 }
2623
2624 switch (r_type)
2625 {
2626 default:
2627 break;
2628
2629 case R_X86_64_GOTPCREL:
2630 case R_X86_64_GOTPCRELX:
2631 case R_X86_64_REX_GOTPCRELX:
2632 case R_X86_64_GOTPCREL64:
2633 base_got = htab->elf.sgot;
2634 off = h->got.offset;
2635
2636 if (base_got == NULL)
2637 abort ();
2638
2639 if (off == (bfd_vma) -1)
2640 {
2641 /* We can't use h->got.offset here to save state, or
2642 even just remember the offset, as finish_dynamic_symbol
2643 would use that as offset into .got. */
2644
2645 if (h->plt.offset == (bfd_vma) -1)
2646 abort ();
2647
2648 if (htab->elf.splt != NULL)
2649 {
2650 plt_index = (h->plt.offset / plt_entry_size
2651 - htab->plt.has_plt0);
2652 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2653 base_got = htab->elf.sgotplt;
2654 }
2655 else
2656 {
2657 plt_index = h->plt.offset / plt_entry_size;
2658 off = plt_index * GOT_ENTRY_SIZE;
2659 base_got = htab->elf.igotplt;
2660 }
2661
2662 if (h->dynindx == -1
2663 || h->forced_local
2664 || info->symbolic)
2665 {
2666 /* This references the local defitionion. We must
2667 initialize this entry in the global offset table.
2668 Since the offset must always be a multiple of 8,
2669 we use the least significant bit to record
2670 whether we have initialized it already.
2671
2672 When doing a dynamic link, we create a .rela.got
2673 relocation entry to initialize the value. This
2674 is done in the finish_dynamic_symbol routine. */
2675 if ((off & 1) != 0)
2676 off &= ~1;
2677 else
2678 {
2679 bfd_put_64 (output_bfd, relocation,
2680 base_got->contents + off);
2681 /* Note that this is harmless for the GOTPLT64
2682 case, as -1 | 1 still is -1. */
2683 h->got.offset |= 1;
2684 }
2685 }
2686 }
2687
2688 relocation = (base_got->output_section->vma
2689 + base_got->output_offset + off);
2690
2691 goto do_relocation;
2692 }
2693
2694 if (h->plt.offset == (bfd_vma) -1)
2695 {
2696 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2697 if (r_type == htab->pointer_r_type
2698 && (input_section->flags & SEC_CODE) == 0)
2699 goto do_ifunc_pointer;
2700 goto bad_ifunc_reloc;
2701 }
2702
2703 /* STT_GNU_IFUNC symbol must go through PLT. */
2704 if (htab->elf.splt != NULL)
2705 {
2706 if (htab->plt_second != NULL)
2707 {
2708 resolved_plt = htab->plt_second;
2709 plt_offset = eh->plt_second.offset;
2710 }
2711 else
2712 {
2713 resolved_plt = htab->elf.splt;
2714 plt_offset = h->plt.offset;
2715 }
2716 }
2717 else
2718 {
2719 resolved_plt = htab->elf.iplt;
2720 plt_offset = h->plt.offset;
2721 }
2722
2723 relocation = (resolved_plt->output_section->vma
2724 + resolved_plt->output_offset + plt_offset);
2725
2726 switch (r_type)
2727 {
2728 default:
2729 bad_ifunc_reloc:
2730 if (h->root.root.string)
2731 name = h->root.root.string;
2732 else
2733 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2734 NULL);
2735 _bfd_error_handler
2736 /* xgettext:c-format */
2737 (_("%pB: relocation %s against STT_GNU_IFUNC "
2738 "symbol `%s' isn't supported"), input_bfd,
2739 howto->name, name);
2740 bfd_set_error (bfd_error_bad_value);
2741 return FALSE;
2742
2743 case R_X86_64_32S:
2744 if (bfd_link_pic (info))
2745 abort ();
2746 goto do_relocation;
2747
2748 case R_X86_64_32:
2749 if (ABI_64_P (output_bfd))
2750 goto do_relocation;
2751 /* FALLTHROUGH */
2752 case R_X86_64_64:
2753 do_ifunc_pointer:
2754 if (rel->r_addend != 0)
2755 {
2756 if (h->root.root.string)
2757 name = h->root.root.string;
2758 else
2759 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2760 sym, NULL);
2761 _bfd_error_handler
2762 /* xgettext:c-format */
2763 (_("%pB: relocation %s against STT_GNU_IFUNC "
2764 "symbol `%s' has non-zero addend: %" PRId64),
2765 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2766 bfd_set_error (bfd_error_bad_value);
2767 return FALSE;
2768 }
2769
2770 /* Generate dynamic relcoation only when there is a
2771 non-GOT reference in a shared object or there is no
2772 PLT. */
2773 if ((bfd_link_pic (info) && h->non_got_ref)
2774 || h->plt.offset == (bfd_vma) -1)
2775 {
2776 Elf_Internal_Rela outrel;
2777 asection *sreloc;
2778
2779 /* Need a dynamic relocation to get the real function
2780 address. */
2781 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2782 info,
2783 input_section,
2784 rel->r_offset);
2785 if (outrel.r_offset == (bfd_vma) -1
2786 || outrel.r_offset == (bfd_vma) -2)
2787 abort ();
2788
2789 outrel.r_offset += (input_section->output_section->vma
2790 + input_section->output_offset);
2791
2792 if (POINTER_LOCAL_IFUNC_P (info, h))
2793 {
2794 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2795 h->root.root.string,
2796 h->root.u.def.section->owner);
2797
2798 /* This symbol is resolved locally. */
2799 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2800 outrel.r_addend = (h->root.u.def.value
2801 + h->root.u.def.section->output_section->vma
2802 + h->root.u.def.section->output_offset);
2803 }
2804 else
2805 {
2806 outrel.r_info = htab->r_info (h->dynindx, r_type);
2807 outrel.r_addend = 0;
2808 }
2809
2810 /* Dynamic relocations are stored in
2811 1. .rela.ifunc section in PIC object.
2812 2. .rela.got section in dynamic executable.
2813 3. .rela.iplt section in static executable. */
2814 if (bfd_link_pic (info))
2815 sreloc = htab->elf.irelifunc;
2816 else if (htab->elf.splt != NULL)
2817 sreloc = htab->elf.srelgot;
2818 else
2819 sreloc = htab->elf.irelplt;
2820 elf_append_rela (output_bfd, sreloc, &outrel);
2821
2822 /* If this reloc is against an external symbol, we
2823 do not want to fiddle with the addend. Otherwise,
2824 we need to include the symbol value so that it
2825 becomes an addend for the dynamic reloc. For an
2826 internal symbol, we have updated addend. */
2827 continue;
2828 }
2829 /* FALLTHROUGH */
2830 case R_X86_64_PC32:
2831 case R_X86_64_PC32_BND:
2832 case R_X86_64_PC64:
2833 case R_X86_64_PLT32:
2834 case R_X86_64_PLT32_BND:
2835 goto do_relocation;
2836 }
2837 }
2838
2839 skip_ifunc:
2840 resolved_to_zero = (eh != NULL
2841 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2842
2843 /* When generating a shared object, the relocations handled here are
2844 copied into the output file to be resolved at run time. */
2845 switch (r_type)
2846 {
2847 case R_X86_64_GOT32:
2848 case R_X86_64_GOT64:
2849 /* Relocation is to the entry for this symbol in the global
2850 offset table. */
2851 case R_X86_64_GOTPCREL:
2852 case R_X86_64_GOTPCRELX:
2853 case R_X86_64_REX_GOTPCRELX:
2854 case R_X86_64_GOTPCREL64:
2855 /* Use global offset table entry as symbol value. */
2856 case R_X86_64_GOTPLT64:
2857 /* This is obsolete and treated the same as GOT64. */
2858 base_got = htab->elf.sgot;
2859
2860 if (htab->elf.sgot == NULL)
2861 abort ();
2862
2863 relative_reloc = FALSE;
2864 if (h != NULL)
2865 {
2866 off = h->got.offset;
2867 if (h->needs_plt
2868 && h->plt.offset != (bfd_vma)-1
2869 && off == (bfd_vma)-1)
2870 {
2871 /* We can't use h->got.offset here to save
2872 state, or even just remember the offset, as
2873 finish_dynamic_symbol would use that as offset into
2874 .got. */
2875 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2876 - htab->plt.has_plt0);
2877 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2878 base_got = htab->elf.sgotplt;
2879 }
2880
2881 if (RESOLVED_LOCALLY_P (info, h, htab))
2882 {
2883 /* We must initialize this entry in the global offset
2884 table. Since the offset must always be a multiple
2885 of 8, we use the least significant bit to record
2886 whether we have initialized it already.
2887
2888 When doing a dynamic link, we create a .rela.got
2889 relocation entry to initialize the value. This is
2890 done in the finish_dynamic_symbol routine. */
2891 if ((off & 1) != 0)
2892 off &= ~1;
2893 else
2894 {
2895 bfd_put_64 (output_bfd, relocation,
2896 base_got->contents + off);
2897 /* Note that this is harmless for the GOTPLT64 case,
2898 as -1 | 1 still is -1. */
2899 h->got.offset |= 1;
2900
2901 if (GENERATE_RELATIVE_RELOC_P (info, h))
2902 {
2903 /* If this symbol isn't dynamic in PIC,
2904 generate R_X86_64_RELATIVE here. */
2905 eh->no_finish_dynamic_symbol = 1;
2906 relative_reloc = TRUE;
2907 }
2908 }
2909 }
2910 else
2911 unresolved_reloc = FALSE;
2912 }
2913 else
2914 {
2915 if (local_got_offsets == NULL)
2916 abort ();
2917
2918 off = local_got_offsets[r_symndx];
2919
2920 /* The offset must always be a multiple of 8. We use
2921 the least significant bit to record whether we have
2922 already generated the necessary reloc. */
2923 if ((off & 1) != 0)
2924 off &= ~1;
2925 else
2926 {
2927 bfd_put_64 (output_bfd, relocation,
2928 base_got->contents + off);
2929 local_got_offsets[r_symndx] |= 1;
2930
2931 /* NB: GOTPCREL relocations against local absolute
2932 symbol store relocation value in the GOT slot
2933 without relative relocation. */
2934 if (bfd_link_pic (info)
2935 && !(sym->st_shndx == SHN_ABS
2936 && (r_type == R_X86_64_GOTPCREL
2937 || r_type == R_X86_64_GOTPCRELX
2938 || r_type == R_X86_64_REX_GOTPCRELX)))
2939 relative_reloc = TRUE;
2940 }
2941 }
2942
2943 if (relative_reloc)
2944 {
2945 asection *s;
2946 Elf_Internal_Rela outrel;
2947
2948 /* We need to generate a R_X86_64_RELATIVE reloc
2949 for the dynamic linker. */
2950 s = htab->elf.srelgot;
2951 if (s == NULL)
2952 abort ();
2953
2954 outrel.r_offset = (base_got->output_section->vma
2955 + base_got->output_offset
2956 + off);
2957 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2958 outrel.r_addend = relocation;
2959 elf_append_rela (output_bfd, s, &outrel);
2960 }
2961
2962 if (off >= (bfd_vma) -2)
2963 abort ();
2964
2965 relocation = base_got->output_section->vma
2966 + base_got->output_offset + off;
2967 if (r_type != R_X86_64_GOTPCREL
2968 && r_type != R_X86_64_GOTPCRELX
2969 && r_type != R_X86_64_REX_GOTPCRELX
2970 && r_type != R_X86_64_GOTPCREL64)
2971 relocation -= htab->elf.sgotplt->output_section->vma
2972 - htab->elf.sgotplt->output_offset;
2973
2974 break;
2975
2976 case R_X86_64_GOTOFF64:
2977 /* Relocation is relative to the start of the global offset
2978 table. */
2979
2980 /* Check to make sure it isn't a protected function or data
2981 symbol for shared library since it may not be local when
2982 used as function address or with copy relocation. We also
2983 need to make sure that a symbol is referenced locally. */
2984 if (bfd_link_pic (info) && h)
2985 {
2986 if (!h->def_regular)
2987 {
2988 const char *v;
2989
2990 switch (ELF_ST_VISIBILITY (h->other))
2991 {
2992 case STV_HIDDEN:
2993 v = _("hidden symbol");
2994 break;
2995 case STV_INTERNAL:
2996 v = _("internal symbol");
2997 break;
2998 case STV_PROTECTED:
2999 v = _("protected symbol");
3000 break;
3001 default:
3002 v = _("symbol");
3003 break;
3004 }
3005
3006 _bfd_error_handler
3007 /* xgettext:c-format */
3008 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3009 " `%s' can not be used when making a shared object"),
3010 input_bfd, v, h->root.root.string);
3011 bfd_set_error (bfd_error_bad_value);
3012 return FALSE;
3013 }
3014 else if (!bfd_link_executable (info)
3015 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3016 && (h->type == STT_FUNC
3017 || h->type == STT_OBJECT)
3018 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3019 {
3020 _bfd_error_handler
3021 /* xgettext:c-format */
3022 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3023 " `%s' can not be used when making a shared object"),
3024 input_bfd,
3025 h->type == STT_FUNC ? "function" : "data",
3026 h->root.root.string);
3027 bfd_set_error (bfd_error_bad_value);
3028 return FALSE;
3029 }
3030 }
3031
3032 /* Note that sgot is not involved in this
3033 calculation. We always want the start of .got.plt. If we
3034 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3035 permitted by the ABI, we might have to change this
3036 calculation. */
3037 relocation -= htab->elf.sgotplt->output_section->vma
3038 + htab->elf.sgotplt->output_offset;
3039 break;
3040
3041 case R_X86_64_GOTPC32:
3042 case R_X86_64_GOTPC64:
3043 /* Use global offset table as symbol value. */
3044 relocation = htab->elf.sgotplt->output_section->vma
3045 + htab->elf.sgotplt->output_offset;
3046 unresolved_reloc = FALSE;
3047 break;
3048
3049 case R_X86_64_PLTOFF64:
3050 /* Relocation is PLT entry relative to GOT. For local
3051 symbols it's the symbol itself relative to GOT. */
3052 if (h != NULL
3053 /* See PLT32 handling. */
3054 && (h->plt.offset != (bfd_vma) -1
3055 || eh->plt_got.offset != (bfd_vma) -1)
3056 && htab->elf.splt != NULL)
3057 {
3058 if (eh->plt_got.offset != (bfd_vma) -1)
3059 {
3060 /* Use the GOT PLT. */
3061 resolved_plt = htab->plt_got;
3062 plt_offset = eh->plt_got.offset;
3063 }
3064 else if (htab->plt_second != NULL)
3065 {
3066 resolved_plt = htab->plt_second;
3067 plt_offset = eh->plt_second.offset;
3068 }
3069 else
3070 {
3071 resolved_plt = htab->elf.splt;
3072 plt_offset = h->plt.offset;
3073 }
3074
3075 relocation = (resolved_plt->output_section->vma
3076 + resolved_plt->output_offset
3077 + plt_offset);
3078 unresolved_reloc = FALSE;
3079 }
3080
3081 relocation -= htab->elf.sgotplt->output_section->vma
3082 + htab->elf.sgotplt->output_offset;
3083 break;
3084
3085 case R_X86_64_PLT32:
3086 case R_X86_64_PLT32_BND:
3087 /* Relocation is to the entry for this symbol in the
3088 procedure linkage table. */
3089
3090 /* Resolve a PLT32 reloc against a local symbol directly,
3091 without using the procedure linkage table. */
3092 if (h == NULL)
3093 break;
3094
3095 if ((h->plt.offset == (bfd_vma) -1
3096 && eh->plt_got.offset == (bfd_vma) -1)
3097 || htab->elf.splt == NULL)
3098 {
3099 /* We didn't make a PLT entry for this symbol. This
3100 happens when statically linking PIC code, or when
3101 using -Bsymbolic. */
3102 break;
3103 }
3104
3105 use_plt:
3106 if (h->plt.offset != (bfd_vma) -1)
3107 {
3108 if (htab->plt_second != NULL)
3109 {
3110 resolved_plt = htab->plt_second;
3111 plt_offset = eh->plt_second.offset;
3112 }
3113 else
3114 {
3115 resolved_plt = htab->elf.splt;
3116 plt_offset = h->plt.offset;
3117 }
3118 }
3119 else
3120 {
3121 /* Use the GOT PLT. */
3122 resolved_plt = htab->plt_got;
3123 plt_offset = eh->plt_got.offset;
3124 }
3125
3126 relocation = (resolved_plt->output_section->vma
3127 + resolved_plt->output_offset
3128 + plt_offset);
3129 unresolved_reloc = FALSE;
3130 break;
3131
3132 case R_X86_64_SIZE32:
3133 case R_X86_64_SIZE64:
3134 /* Set to symbol size. */
3135 relocation = st_size;
3136 goto direct;
3137
3138 case R_X86_64_PC8:
3139 case R_X86_64_PC16:
3140 case R_X86_64_PC32:
3141 case R_X86_64_PC32_BND:
3142 /* Don't complain about -fPIC if the symbol is undefined when
3143 building executable unless it is unresolved weak symbol,
3144 references a dynamic definition in PIE or -z nocopyreloc
3145 is used. */
3146 no_copyreloc_p
3147 = (info->nocopyreloc
3148 || (h != NULL
3149 && !h->root.linker_def
3150 && !h->root.ldscript_def
3151 && eh->def_protected
3152 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
3153
3154 if ((input_section->flags & SEC_ALLOC) != 0
3155 && (input_section->flags & SEC_READONLY) != 0
3156 && h != NULL
3157 && ((bfd_link_executable (info)
3158 && ((h->root.type == bfd_link_hash_undefweak
3159 && (eh == NULL
3160 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3161 eh)))
3162 || (bfd_link_pie (info)
3163 && !SYMBOL_DEFINED_NON_SHARED_P (h)
3164 && h->def_dynamic)
3165 || (no_copyreloc_p
3166 && h->def_dynamic
3167 && !(h->root.u.def.section->flags & SEC_CODE))))
3168 || bfd_link_dll (info)))
3169 {
3170 bfd_boolean fail = FALSE;
3171 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3172 {
3173 /* Symbol is referenced locally. Make sure it is
3174 defined locally. */
3175 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
3176 }
3177 else if (bfd_link_pie (info))
3178 {
3179 /* We can only use PC-relative relocations in PIE
3180 from non-code sections. */
3181 if (h->type == STT_FUNC
3182 && (sec->flags & SEC_CODE) != 0)
3183 fail = TRUE;
3184 }
3185 else if (no_copyreloc_p || bfd_link_dll (info))
3186 {
3187 /* Symbol doesn't need copy reloc and isn't
3188 referenced locally. Don't allow PC-relative
3189 relocations against default and protected
3190 symbols since address of protected function
3191 and location of protected data may not be in
3192 the shared object. */
3193 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3194 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
3195 }
3196
3197 if (fail)
3198 return elf_x86_64_need_pic (info, input_bfd, input_section,
3199 h, NULL, NULL, howto);
3200 }
3201 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3202 as function address. */
3203 else if (h != NULL
3204 && (input_section->flags & SEC_CODE) == 0
3205 && bfd_link_pie (info)
3206 && h->type == STT_FUNC
3207 && !h->def_regular
3208 && h->def_dynamic)
3209 goto use_plt;
3210 /* Fall through. */
3211
3212 case R_X86_64_8:
3213 case R_X86_64_16:
3214 case R_X86_64_32:
3215 case R_X86_64_PC64:
3216 case R_X86_64_64:
3217 /* FIXME: The ABI says the linker should make sure the value is
3218 the same when it's zeroextended to 64 bit. */
3219
3220 direct:
3221 if ((input_section->flags & SEC_ALLOC) == 0)
3222 break;
3223
3224 need_copy_reloc_in_pie = (bfd_link_pie (info)
3225 && h != NULL
3226 && (h->needs_copy
3227 || eh->needs_copy
3228 || (h->root.type
3229 == bfd_link_hash_undefined))
3230 && (X86_PCREL_TYPE_P (r_type)
3231 || X86_SIZE_TYPE_P (r_type)));
3232
3233 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type, sec,
3234 need_copy_reloc_in_pie,
3235 resolved_to_zero, FALSE))
3236 {
3237 Elf_Internal_Rela outrel;
3238 bfd_boolean skip, relocate;
3239 asection *sreloc;
3240
3241 /* When generating a shared object, these relocations
3242 are copied into the output file to be resolved at run
3243 time. */
3244 skip = FALSE;
3245 relocate = FALSE;
3246
3247 outrel.r_offset =
3248 _bfd_elf_section_offset (output_bfd, info, input_section,
3249 rel->r_offset);
3250 if (outrel.r_offset == (bfd_vma) -1)
3251 skip = TRUE;
3252 else if (outrel.r_offset == (bfd_vma) -2)
3253 skip = TRUE, relocate = TRUE;
3254
3255 outrel.r_offset += (input_section->output_section->vma
3256 + input_section->output_offset);
3257
3258 if (skip)
3259 memset (&outrel, 0, sizeof outrel);
3260
3261 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3262 {
3263 outrel.r_info = htab->r_info (h->dynindx, r_type);
3264 outrel.r_addend = rel->r_addend;
3265 }
3266 else
3267 {
3268 /* This symbol is local, or marked to become local.
3269 When relocation overflow check is disabled, we
3270 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3271 if (r_type == htab->pointer_r_type
3272 || (r_type == R_X86_64_32
3273 && htab->params->no_reloc_overflow_check))
3274 {
3275 relocate = TRUE;
3276 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3277 outrel.r_addend = relocation + rel->r_addend;
3278 }
3279 else if (r_type == R_X86_64_64
3280 && !ABI_64_P (output_bfd))
3281 {
3282 relocate = TRUE;
3283 outrel.r_info = htab->r_info (0,
3284 R_X86_64_RELATIVE64);
3285 outrel.r_addend = relocation + rel->r_addend;
3286 /* Check addend overflow. */
3287 if ((outrel.r_addend & 0x80000000)
3288 != (rel->r_addend & 0x80000000))
3289 {
3290 const char *name;
3291 int addend = rel->r_addend;
3292 if (h && h->root.root.string)
3293 name = h->root.root.string;
3294 else
3295 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3296 sym, NULL);
3297 _bfd_error_handler
3298 /* xgettext:c-format */
3299 (_("%pB: addend %s%#x in relocation %s against "
3300 "symbol `%s' at %#" PRIx64
3301 " in section `%pA' is out of range"),
3302 input_bfd, addend < 0 ? "-" : "", addend,
3303 howto->name, name, (uint64_t) rel->r_offset,
3304 input_section);
3305 bfd_set_error (bfd_error_bad_value);
3306 return FALSE;
3307 }
3308 }
3309 else
3310 {
3311 long sindx;
3312
3313 if (bfd_is_abs_section (sec))
3314 sindx = 0;
3315 else if (sec == NULL || sec->owner == NULL)
3316 {
3317 bfd_set_error (bfd_error_bad_value);
3318 return FALSE;
3319 }
3320 else
3321 {
3322 asection *osec;
3323
3324 /* We are turning this relocation into one
3325 against a section symbol. It would be
3326 proper to subtract the symbol's value,
3327 osec->vma, from the emitted reloc addend,
3328 but ld.so expects buggy relocs. */
3329 osec = sec->output_section;
3330 sindx = elf_section_data (osec)->dynindx;
3331 if (sindx == 0)
3332 {
3333 asection *oi = htab->elf.text_index_section;
3334 sindx = elf_section_data (oi)->dynindx;
3335 }
3336 BFD_ASSERT (sindx != 0);
3337 }
3338
3339 outrel.r_info = htab->r_info (sindx, r_type);
3340 outrel.r_addend = relocation + rel->r_addend;
3341 }
3342 }
3343
3344 sreloc = elf_section_data (input_section)->sreloc;
3345
3346 if (sreloc == NULL || sreloc->contents == NULL)
3347 {
3348 r = bfd_reloc_notsupported;
3349 goto check_relocation_error;
3350 }
3351
3352 elf_append_rela (output_bfd, sreloc, &outrel);
3353
3354 /* If this reloc is against an external symbol, we do
3355 not want to fiddle with the addend. Otherwise, we
3356 need to include the symbol value so that it becomes
3357 an addend for the dynamic reloc. */
3358 if (! relocate)
3359 continue;
3360 }
3361
3362 break;
3363
3364 case R_X86_64_TLSGD:
3365 case R_X86_64_GOTPC32_TLSDESC:
3366 case R_X86_64_TLSDESC_CALL:
3367 case R_X86_64_GOTTPOFF:
3368 tls_type = GOT_UNKNOWN;
3369 if (h == NULL && local_got_offsets)
3370 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3371 else if (h != NULL)
3372 tls_type = elf_x86_hash_entry (h)->tls_type;
3373
3374 r_type_tls = r_type;
3375 if (! elf_x86_64_tls_transition (info, input_bfd,
3376 input_section, contents,
3377 symtab_hdr, sym_hashes,
3378 &r_type_tls, tls_type, rel,
3379 relend, h, r_symndx, TRUE))
3380 return FALSE;
3381
3382 if (r_type_tls == R_X86_64_TPOFF32)
3383 {
3384 bfd_vma roff = rel->r_offset;
3385
3386 BFD_ASSERT (! unresolved_reloc);
3387
3388 if (r_type == R_X86_64_TLSGD)
3389 {
3390 /* GD->LE transition. For 64bit, change
3391 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3392 .word 0x6666; rex64; call __tls_get_addr@PLT
3393 or
3394 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3395 .byte 0x66; rex64
3396 call *__tls_get_addr@GOTPCREL(%rip)
3397 which may be converted to
3398 addr32 call __tls_get_addr
3399 into:
3400 movq %fs:0, %rax
3401 leaq foo@tpoff(%rax), %rax
3402 For 32bit, change
3403 leaq foo@tlsgd(%rip), %rdi
3404 .word 0x6666; rex64; call __tls_get_addr@PLT
3405 or
3406 leaq foo@tlsgd(%rip), %rdi
3407 .byte 0x66; rex64
3408 call *__tls_get_addr@GOTPCREL(%rip)
3409 which may be converted to
3410 addr32 call __tls_get_addr
3411 into:
3412 movl %fs:0, %eax
3413 leaq foo@tpoff(%rax), %rax
3414 For largepic, change:
3415 leaq foo@tlsgd(%rip), %rdi
3416 movabsq $__tls_get_addr@pltoff, %rax
3417 addq %r15, %rax
3418 call *%rax
3419 into:
3420 movq %fs:0, %rax
3421 leaq foo@tpoff(%rax), %rax
3422 nopw 0x0(%rax,%rax,1) */
3423 int largepic = 0;
3424 if (ABI_64_P (output_bfd))
3425 {
3426 if (contents[roff + 5] == 0xb8)
3427 {
3428 if (roff < 3
3429 || (roff - 3 + 22) > input_section->size)
3430 {
3431 corrupt_input:
3432 info->callbacks->einfo
3433 (_("%F%P: corrupt input: %pB\n"),
3434 input_bfd);
3435 return FALSE;
3436 }
3437 memcpy (contents + roff - 3,
3438 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3439 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3440 largepic = 1;
3441 }
3442 else
3443 {
3444 if (roff < 4
3445 || (roff - 4 + 16) > input_section->size)
3446 goto corrupt_input;
3447 memcpy (contents + roff - 4,
3448 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3449 16);
3450 }
3451 }
3452 else
3453 {
3454 if (roff < 3
3455 || (roff - 3 + 15) > input_section->size)
3456 goto corrupt_input;
3457 memcpy (contents + roff - 3,
3458 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3459 15);
3460 }
3461 bfd_put_32 (output_bfd,
3462 elf_x86_64_tpoff (info, relocation),
3463 contents + roff + 8 + largepic);
3464 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3465 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3466 rel++;
3467 wrel++;
3468 continue;
3469 }
3470 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3471 {
3472 /* GDesc -> LE transition.
3473 It's originally something like:
3474 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3475 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3476
3477 Change it to:
3478 movq $x@tpoff, %rax <--- LP64 mode.
3479 rex movl $x@tpoff, %eax <--- X32 mode.
3480 */
3481
3482 unsigned int val, type;
3483
3484 if (roff < 3)
3485 goto corrupt_input;
3486 type = bfd_get_8 (input_bfd, contents + roff - 3);
3487 val = bfd_get_8 (input_bfd, contents + roff - 1);
3488 bfd_put_8 (output_bfd,
3489 (type & 0x48) | ((type >> 2) & 1),
3490 contents + roff - 3);
3491 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3492 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3493 contents + roff - 1);
3494 bfd_put_32 (output_bfd,
3495 elf_x86_64_tpoff (info, relocation),
3496 contents + roff);
3497 continue;
3498 }
3499 else if (r_type == R_X86_64_TLSDESC_CALL)
3500 {
3501 /* GDesc -> LE transition.
3502 It's originally:
3503 call *(%rax) <--- LP64 mode.
3504 call *(%eax) <--- X32 mode.
3505 Turn it into:
3506 xchg %ax,%ax <-- LP64 mode.
3507 nopl (%rax) <-- X32 mode.
3508 */
3509 unsigned int prefix = 0;
3510 if (!ABI_64_P (input_bfd))
3511 {
3512 /* Check for call *x@tlsdesc(%eax). */
3513 if (contents[roff] == 0x67)
3514 prefix = 1;
3515 }
3516 if (prefix)
3517 {
3518 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3519 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3520 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3521 }
3522 else
3523 {
3524 bfd_put_8 (output_bfd, 0x66, contents + roff);
3525 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3526 }
3527 continue;
3528 }
3529 else if (r_type == R_X86_64_GOTTPOFF)
3530 {
3531 /* IE->LE transition:
3532 For 64bit, originally it can be one of:
3533 movq foo@gottpoff(%rip), %reg
3534 addq foo@gottpoff(%rip), %reg
3535 We change it into:
3536 movq $foo, %reg
3537 leaq foo(%reg), %reg
3538 addq $foo, %reg.
3539 For 32bit, originally it can be one of:
3540 movq foo@gottpoff(%rip), %reg
3541 addl foo@gottpoff(%rip), %reg
3542 We change it into:
3543 movq $foo, %reg
3544 leal foo(%reg), %reg
3545 addl $foo, %reg. */
3546
3547 unsigned int val, type, reg;
3548
3549 if (roff >= 3)
3550 val = bfd_get_8 (input_bfd, contents + roff - 3);
3551 else
3552 {
3553 if (roff < 2)
3554 goto corrupt_input;
3555 val = 0;
3556 }
3557 type = bfd_get_8 (input_bfd, contents + roff - 2);
3558 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3559 reg >>= 3;
3560 if (type == 0x8b)
3561 {
3562 /* movq */
3563 if (val == 0x4c)
3564 {
3565 if (roff < 3)
3566 goto corrupt_input;
3567 bfd_put_8 (output_bfd, 0x49,
3568 contents + roff - 3);
3569 }
3570 else if (!ABI_64_P (output_bfd) && val == 0x44)
3571 {
3572 if (roff < 3)
3573 goto corrupt_input;
3574 bfd_put_8 (output_bfd, 0x41,
3575 contents + roff - 3);
3576 }
3577 bfd_put_8 (output_bfd, 0xc7,
3578 contents + roff - 2);
3579 bfd_put_8 (output_bfd, 0xc0 | reg,
3580 contents + roff - 1);
3581 }
3582 else if (reg == 4)
3583 {
3584 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3585 is special */
3586 if (val == 0x4c)
3587 {
3588 if (roff < 3)
3589 goto corrupt_input;
3590 bfd_put_8 (output_bfd, 0x49,
3591 contents + roff - 3);
3592 }
3593 else if (!ABI_64_P (output_bfd) && val == 0x44)
3594 {
3595 if (roff < 3)
3596 goto corrupt_input;
3597 bfd_put_8 (output_bfd, 0x41,
3598 contents + roff - 3);
3599 }
3600 bfd_put_8 (output_bfd, 0x81,
3601 contents + roff - 2);
3602 bfd_put_8 (output_bfd, 0xc0 | reg,
3603 contents + roff - 1);
3604 }
3605 else
3606 {
3607 /* addq/addl -> leaq/leal */
3608 if (val == 0x4c)
3609 {
3610 if (roff < 3)
3611 goto corrupt_input;
3612 bfd_put_8 (output_bfd, 0x4d,
3613 contents + roff - 3);
3614 }
3615 else if (!ABI_64_P (output_bfd) && val == 0x44)
3616 {
3617 if (roff < 3)
3618 goto corrupt_input;
3619 bfd_put_8 (output_bfd, 0x45,
3620 contents + roff - 3);
3621 }
3622 bfd_put_8 (output_bfd, 0x8d,
3623 contents + roff - 2);
3624 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3625 contents + roff - 1);
3626 }
3627 bfd_put_32 (output_bfd,
3628 elf_x86_64_tpoff (info, relocation),
3629 contents + roff);
3630 continue;
3631 }
3632 else
3633 BFD_ASSERT (FALSE);
3634 }
3635
3636 if (htab->elf.sgot == NULL)
3637 abort ();
3638
3639 if (h != NULL)
3640 {
3641 off = h->got.offset;
3642 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3643 }
3644 else
3645 {
3646 if (local_got_offsets == NULL)
3647 abort ();
3648
3649 off = local_got_offsets[r_symndx];
3650 offplt = local_tlsdesc_gotents[r_symndx];
3651 }
3652
3653 if ((off & 1) != 0)
3654 off &= ~1;
3655 else
3656 {
3657 Elf_Internal_Rela outrel;
3658 int dr_type, indx;
3659 asection *sreloc;
3660
3661 if (htab->elf.srelgot == NULL)
3662 abort ();
3663
3664 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3665
3666 if (GOT_TLS_GDESC_P (tls_type))
3667 {
3668 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3669 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3670 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3671 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3672 + htab->elf.sgotplt->output_offset
3673 + offplt
3674 + htab->sgotplt_jump_table_size);
3675 sreloc = htab->elf.srelplt;
3676 if (indx == 0)
3677 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3678 else
3679 outrel.r_addend = 0;
3680 elf_append_rela (output_bfd, sreloc, &outrel);
3681 }
3682
3683 sreloc = htab->elf.srelgot;
3684
3685 outrel.r_offset = (htab->elf.sgot->output_section->vma
3686 + htab->elf.sgot->output_offset + off);
3687
3688 if (GOT_TLS_GD_P (tls_type))
3689 dr_type = R_X86_64_DTPMOD64;
3690 else if (GOT_TLS_GDESC_P (tls_type))
3691 goto dr_done;
3692 else
3693 dr_type = R_X86_64_TPOFF64;
3694
3695 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3696 outrel.r_addend = 0;
3697 if ((dr_type == R_X86_64_TPOFF64
3698 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3699 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3700 outrel.r_info = htab->r_info (indx, dr_type);
3701
3702 elf_append_rela (output_bfd, sreloc, &outrel);
3703
3704 if (GOT_TLS_GD_P (tls_type))
3705 {
3706 if (indx == 0)
3707 {
3708 BFD_ASSERT (! unresolved_reloc);
3709 bfd_put_64 (output_bfd,
3710 relocation - _bfd_x86_elf_dtpoff_base (info),
3711 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3712 }
3713 else
3714 {
3715 bfd_put_64 (output_bfd, 0,
3716 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3717 outrel.r_info = htab->r_info (indx,
3718 R_X86_64_DTPOFF64);
3719 outrel.r_offset += GOT_ENTRY_SIZE;
3720 elf_append_rela (output_bfd, sreloc,
3721 &outrel);
3722 }
3723 }
3724
3725 dr_done:
3726 if (h != NULL)
3727 h->got.offset |= 1;
3728 else
3729 local_got_offsets[r_symndx] |= 1;
3730 }
3731
3732 if (off >= (bfd_vma) -2
3733 && ! GOT_TLS_GDESC_P (tls_type))
3734 abort ();
3735 if (r_type_tls == r_type)
3736 {
3737 if (r_type == R_X86_64_GOTPC32_TLSDESC
3738 || r_type == R_X86_64_TLSDESC_CALL)
3739 relocation = htab->elf.sgotplt->output_section->vma
3740 + htab->elf.sgotplt->output_offset
3741 + offplt + htab->sgotplt_jump_table_size;
3742 else
3743 relocation = htab->elf.sgot->output_section->vma
3744 + htab->elf.sgot->output_offset + off;
3745 unresolved_reloc = FALSE;
3746 }
3747 else
3748 {
3749 bfd_vma roff = rel->r_offset;
3750
3751 if (r_type == R_X86_64_TLSGD)
3752 {
3753 /* GD->IE transition. For 64bit, change
3754 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3755 .word 0x6666; rex64; call __tls_get_addr@PLT
3756 or
3757 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3758 .byte 0x66; rex64
3759 call *__tls_get_addr@GOTPCREL(%rip
3760 which may be converted to
3761 addr32 call __tls_get_addr
3762 into:
3763 movq %fs:0, %rax
3764 addq foo@gottpoff(%rip), %rax
3765 For 32bit, change
3766 leaq foo@tlsgd(%rip), %rdi
3767 .word 0x6666; rex64; call __tls_get_addr@PLT
3768 or
3769 leaq foo@tlsgd(%rip), %rdi
3770 .byte 0x66; rex64;
3771 call *__tls_get_addr@GOTPCREL(%rip)
3772 which may be converted to
3773 addr32 call __tls_get_addr
3774 into:
3775 movl %fs:0, %eax
3776 addq foo@gottpoff(%rip), %rax
3777 For largepic, change:
3778 leaq foo@tlsgd(%rip), %rdi
3779 movabsq $__tls_get_addr@pltoff, %rax
3780 addq %r15, %rax
3781 call *%rax
3782 into:
3783 movq %fs:0, %rax
3784 addq foo@gottpoff(%rax), %rax
3785 nopw 0x0(%rax,%rax,1) */
3786 int largepic = 0;
3787 if (ABI_64_P (output_bfd))
3788 {
3789 if (contents[roff + 5] == 0xb8)
3790 {
3791 if (roff < 3
3792 || (roff - 3 + 22) > input_section->size)
3793 goto corrupt_input;
3794 memcpy (contents + roff - 3,
3795 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3796 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3797 largepic = 1;
3798 }
3799 else
3800 {
3801 if (roff < 4
3802 || (roff - 4 + 16) > input_section->size)
3803 goto corrupt_input;
3804 memcpy (contents + roff - 4,
3805 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3806 16);
3807 }
3808 }
3809 else
3810 {
3811 if (roff < 3
3812 || (roff - 3 + 15) > input_section->size)
3813 goto corrupt_input;
3814 memcpy (contents + roff - 3,
3815 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3816 15);
3817 }
3818
3819 relocation = (htab->elf.sgot->output_section->vma
3820 + htab->elf.sgot->output_offset + off
3821 - roff
3822 - largepic
3823 - input_section->output_section->vma
3824 - input_section->output_offset
3825 - 12);
3826 bfd_put_32 (output_bfd, relocation,
3827 contents + roff + 8 + largepic);
3828 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3829 rel++;
3830 wrel++;
3831 continue;
3832 }
3833 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3834 {
3835 /* GDesc -> IE transition.
3836 It's originally something like:
3837 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3838 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3839
3840 Change it to:
3841 # before xchg %ax,%ax in LP64 mode.
3842 movq x@gottpoff(%rip), %rax
3843 # before nopl (%rax) in X32 mode.
3844 rex movl x@gottpoff(%rip), %eax
3845 */
3846
3847 /* Now modify the instruction as appropriate. To
3848 turn a lea into a mov in the form we use it, it
3849 suffices to change the second byte from 0x8d to
3850 0x8b. */
3851 if (roff < 2)
3852 goto corrupt_input;
3853 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3854
3855 bfd_put_32 (output_bfd,
3856 htab->elf.sgot->output_section->vma
3857 + htab->elf.sgot->output_offset + off
3858 - rel->r_offset
3859 - input_section->output_section->vma
3860 - input_section->output_offset
3861 - 4,
3862 contents + roff);
3863 continue;
3864 }
3865 else if (r_type == R_X86_64_TLSDESC_CALL)
3866 {
3867 /* GDesc -> IE transition.
3868 It's originally:
3869 call *(%rax) <--- LP64 mode.
3870 call *(%eax) <--- X32 mode.
3871
3872 Change it to:
3873 xchg %ax, %ax <-- LP64 mode.
3874 nopl (%rax) <-- X32 mode.
3875 */
3876
3877 unsigned int prefix = 0;
3878 if (!ABI_64_P (input_bfd))
3879 {
3880 /* Check for call *x@tlsdesc(%eax). */
3881 if (contents[roff] == 0x67)
3882 prefix = 1;
3883 }
3884 if (prefix)
3885 {
3886 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3887 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3888 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3889 }
3890 else
3891 {
3892 bfd_put_8 (output_bfd, 0x66, contents + roff);
3893 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3894 }
3895 continue;
3896 }
3897 else
3898 BFD_ASSERT (FALSE);
3899 }
3900 break;
3901
3902 case R_X86_64_TLSLD:
3903 if (! elf_x86_64_tls_transition (info, input_bfd,
3904 input_section, contents,
3905 symtab_hdr, sym_hashes,
3906 &r_type, GOT_UNKNOWN, rel,
3907 relend, h, r_symndx, TRUE))
3908 return FALSE;
3909
3910 if (r_type != R_X86_64_TLSLD)
3911 {
3912 /* LD->LE transition:
3913 leaq foo@tlsld(%rip), %rdi
3914 call __tls_get_addr@PLT
3915 For 64bit, we change it into:
3916 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3917 For 32bit, we change it into:
3918 nopl 0x0(%rax); movl %fs:0, %eax
3919 Or
3920 leaq foo@tlsld(%rip), %rdi;
3921 call *__tls_get_addr@GOTPCREL(%rip)
3922 which may be converted to
3923 addr32 call __tls_get_addr
3924 For 64bit, we change it into:
3925 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3926 For 32bit, we change it into:
3927 nopw 0x0(%rax); movl %fs:0, %eax
3928 For largepic, change:
3929 leaq foo@tlsgd(%rip), %rdi
3930 movabsq $__tls_get_addr@pltoff, %rax
3931 addq %rbx, %rax
3932 call *%rax
3933 into
3934 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3935 movq %fs:0, %eax */
3936
3937 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3938 if (ABI_64_P (output_bfd))
3939 {
3940 if ((rel->r_offset + 5) >= input_section->size)
3941 goto corrupt_input;
3942 if (contents[rel->r_offset + 5] == 0xb8)
3943 {
3944 if (rel->r_offset < 3
3945 || (rel->r_offset - 3 + 22) > input_section->size)
3946 goto corrupt_input;
3947 memcpy (contents + rel->r_offset - 3,
3948 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3949 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3950 }
3951 else if (contents[rel->r_offset + 4] == 0xff
3952 || contents[rel->r_offset + 4] == 0x67)
3953 {
3954 if (rel->r_offset < 3
3955 || (rel->r_offset - 3 + 13) > input_section->size)
3956 goto corrupt_input;
3957 memcpy (contents + rel->r_offset - 3,
3958 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3959 13);
3960
3961 }
3962 else
3963 {
3964 if (rel->r_offset < 3
3965 || (rel->r_offset - 3 + 12) > input_section->size)
3966 goto corrupt_input;
3967 memcpy (contents + rel->r_offset - 3,
3968 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3969 }
3970 }
3971 else
3972 {
3973 if ((rel->r_offset + 4) >= input_section->size)
3974 goto corrupt_input;
3975 if (contents[rel->r_offset + 4] == 0xff)
3976 {
3977 if (rel->r_offset < 3
3978 || (rel->r_offset - 3 + 13) > input_section->size)
3979 goto corrupt_input;
3980 memcpy (contents + rel->r_offset - 3,
3981 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3982 13);
3983 }
3984 else
3985 {
3986 if (rel->r_offset < 3
3987 || (rel->r_offset - 3 + 12) > input_section->size)
3988 goto corrupt_input;
3989 memcpy (contents + rel->r_offset - 3,
3990 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3991 }
3992 }
3993 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3994 and R_X86_64_PLTOFF64. */
3995 rel++;
3996 wrel++;
3997 continue;
3998 }
3999
4000 if (htab->elf.sgot == NULL)
4001 abort ();
4002
4003 off = htab->tls_ld_or_ldm_got.offset;
4004 if (off & 1)
4005 off &= ~1;
4006 else
4007 {
4008 Elf_Internal_Rela outrel;
4009
4010 if (htab->elf.srelgot == NULL)
4011 abort ();
4012
4013 outrel.r_offset = (htab->elf.sgot->output_section->vma
4014 + htab->elf.sgot->output_offset + off);
4015
4016 bfd_put_64 (output_bfd, 0,
4017 htab->elf.sgot->contents + off);
4018 bfd_put_64 (output_bfd, 0,
4019 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4020 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4021 outrel.r_addend = 0;
4022 elf_append_rela (output_bfd, htab->elf.srelgot,
4023 &outrel);
4024 htab->tls_ld_or_ldm_got.offset |= 1;
4025 }
4026 relocation = htab->elf.sgot->output_section->vma
4027 + htab->elf.sgot->output_offset + off;
4028 unresolved_reloc = FALSE;
4029 break;
4030
4031 case R_X86_64_DTPOFF32:
4032 if (!bfd_link_executable (info)
4033 || (input_section->flags & SEC_CODE) == 0)
4034 relocation -= _bfd_x86_elf_dtpoff_base (info);
4035 else
4036 relocation = elf_x86_64_tpoff (info, relocation);
4037 break;
4038
4039 case R_X86_64_TPOFF32:
4040 case R_X86_64_TPOFF64:
4041 BFD_ASSERT (bfd_link_executable (info));
4042 relocation = elf_x86_64_tpoff (info, relocation);
4043 break;
4044
4045 case R_X86_64_DTPOFF64:
4046 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4047 relocation -= _bfd_x86_elf_dtpoff_base (info);
4048 break;
4049
4050 default:
4051 break;
4052 }
4053
4054 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4055 because such sections are not SEC_ALLOC and thus ld.so will
4056 not process them. */
4057 if (unresolved_reloc
4058 && !((input_section->flags & SEC_DEBUGGING) != 0
4059 && h->def_dynamic)
4060 && _bfd_elf_section_offset (output_bfd, info, input_section,
4061 rel->r_offset) != (bfd_vma) -1)
4062 {
4063 switch (r_type)
4064 {
4065 case R_X86_64_32S:
4066 sec = h->root.u.def.section;
4067 if ((info->nocopyreloc
4068 || (eh->def_protected
4069 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
4070 && !(h->root.u.def.section->flags & SEC_CODE))
4071 return elf_x86_64_need_pic (info, input_bfd, input_section,
4072 h, NULL, NULL, howto);
4073 /* Fall through. */
4074
4075 default:
4076 _bfd_error_handler
4077 /* xgettext:c-format */
4078 (_("%pB(%pA+%#" PRIx64 "): "
4079 "unresolvable %s relocation against symbol `%s'"),
4080 input_bfd,
4081 input_section,
4082 (uint64_t) rel->r_offset,
4083 howto->name,
4084 h->root.root.string);
4085 return FALSE;
4086 }
4087 }
4088
4089 do_relocation:
4090 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4091 contents, rel->r_offset,
4092 relocation, rel->r_addend);
4093
4094 check_relocation_error:
4095 if (r != bfd_reloc_ok)
4096 {
4097 const char *name;
4098
4099 if (h != NULL)
4100 name = h->root.root.string;
4101 else
4102 {
4103 name = bfd_elf_string_from_elf_section (input_bfd,
4104 symtab_hdr->sh_link,
4105 sym->st_name);
4106 if (name == NULL)
4107 return FALSE;
4108 if (*name == '\0')
4109 name = bfd_section_name (sec);
4110 }
4111
4112 if (r == bfd_reloc_overflow)
4113 {
4114 if (converted_reloc)
4115 {
4116 info->callbacks->einfo
4117 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
4118 return FALSE;
4119 }
4120 (*info->callbacks->reloc_overflow)
4121 (info, (h ? &h->root : NULL), name, howto->name,
4122 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4123 }
4124 else
4125 {
4126 _bfd_error_handler
4127 /* xgettext:c-format */
4128 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4129 input_bfd, input_section,
4130 (uint64_t) rel->r_offset, name, (int) r);
4131 return FALSE;
4132 }
4133 }
4134
4135 if (wrel != rel)
4136 *wrel = *rel;
4137 }
4138
4139 if (wrel != rel)
4140 {
4141 Elf_Internal_Shdr *rel_hdr;
4142 size_t deleted = rel - wrel;
4143
4144 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4145 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4146 if (rel_hdr->sh_size == 0)
4147 {
4148 /* It is too late to remove an empty reloc section. Leave
4149 one NONE reloc.
4150 ??? What is wrong with an empty section??? */
4151 rel_hdr->sh_size = rel_hdr->sh_entsize;
4152 deleted -= 1;
4153 }
4154 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4155 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4156 input_section->reloc_count -= deleted;
4157 }
4158
4159 return TRUE;
4160 }
4161
4162 /* Finish up dynamic symbol handling. We set the contents of various
4163 dynamic sections here. */
4164
4165 static bfd_boolean
4166 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4167 struct bfd_link_info *info,
4168 struct elf_link_hash_entry *h,
4169 Elf_Internal_Sym *sym)
4170 {
4171 struct elf_x86_link_hash_table *htab;
4172 bfd_boolean use_plt_second;
4173 struct elf_x86_link_hash_entry *eh;
4174 bfd_boolean local_undefweak;
4175
4176 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4177 if (htab == NULL)
4178 return FALSE;
4179
4180 /* Use the second PLT section only if there is .plt section. */
4181 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4182
4183 eh = (struct elf_x86_link_hash_entry *) h;
4184 if (eh->no_finish_dynamic_symbol)
4185 abort ();
4186
4187 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4188 resolved undefined weak symbols in executable so that their
4189 references have value 0 at run-time. */
4190 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4191
4192 if (h->plt.offset != (bfd_vma) -1)
4193 {
4194 bfd_vma plt_index;
4195 bfd_vma got_offset, plt_offset;
4196 Elf_Internal_Rela rela;
4197 bfd_byte *loc;
4198 asection *plt, *gotplt, *relplt, *resolved_plt;
4199 const struct elf_backend_data *bed;
4200 bfd_vma plt_got_pcrel_offset;
4201
4202 /* When building a static executable, use .iplt, .igot.plt and
4203 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4204 if (htab->elf.splt != NULL)
4205 {
4206 plt = htab->elf.splt;
4207 gotplt = htab->elf.sgotplt;
4208 relplt = htab->elf.srelplt;
4209 }
4210 else
4211 {
4212 plt = htab->elf.iplt;
4213 gotplt = htab->elf.igotplt;
4214 relplt = htab->elf.irelplt;
4215 }
4216
4217 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4218
4219 /* Get the index in the procedure linkage table which
4220 corresponds to this symbol. This is the index of this symbol
4221 in all the symbols for which we are making plt entries. The
4222 first entry in the procedure linkage table is reserved.
4223
4224 Get the offset into the .got table of the entry that
4225 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4226 bytes. The first three are reserved for the dynamic linker.
4227
4228 For static executables, we don't reserve anything. */
4229
4230 if (plt == htab->elf.splt)
4231 {
4232 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4233 - htab->plt.has_plt0);
4234 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4235 }
4236 else
4237 {
4238 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4239 got_offset = got_offset * GOT_ENTRY_SIZE;
4240 }
4241
4242 /* Fill in the entry in the procedure linkage table. */
4243 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4244 htab->plt.plt_entry_size);
4245 if (use_plt_second)
4246 {
4247 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4248 htab->non_lazy_plt->plt_entry,
4249 htab->non_lazy_plt->plt_entry_size);
4250
4251 resolved_plt = htab->plt_second;
4252 plt_offset = eh->plt_second.offset;
4253 }
4254 else
4255 {
4256 resolved_plt = plt;
4257 plt_offset = h->plt.offset;
4258 }
4259
4260 /* Insert the relocation positions of the plt section. */
4261
4262 /* Put offset the PC-relative instruction referring to the GOT entry,
4263 subtracting the size of that instruction. */
4264 plt_got_pcrel_offset = (gotplt->output_section->vma
4265 + gotplt->output_offset
4266 + got_offset
4267 - resolved_plt->output_section->vma
4268 - resolved_plt->output_offset
4269 - plt_offset
4270 - htab->plt.plt_got_insn_size);
4271
4272 /* Check PC-relative offset overflow in PLT entry. */
4273 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4274 /* xgettext:c-format */
4275 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4276 output_bfd, h->root.root.string);
4277
4278 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4279 (resolved_plt->contents + plt_offset
4280 + htab->plt.plt_got_offset));
4281
4282 /* Fill in the entry in the global offset table, initially this
4283 points to the second part of the PLT entry. Leave the entry
4284 as zero for undefined weak symbol in PIE. No PLT relocation
4285 against undefined weak symbol in PIE. */
4286 if (!local_undefweak)
4287 {
4288 if (htab->plt.has_plt0)
4289 bfd_put_64 (output_bfd, (plt->output_section->vma
4290 + plt->output_offset
4291 + h->plt.offset
4292 + htab->lazy_plt->plt_lazy_offset),
4293 gotplt->contents + got_offset);
4294
4295 /* Fill in the entry in the .rela.plt section. */
4296 rela.r_offset = (gotplt->output_section->vma
4297 + gotplt->output_offset
4298 + got_offset);
4299 if (PLT_LOCAL_IFUNC_P (info, h))
4300 {
4301 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4302 h->root.root.string,
4303 h->root.u.def.section->owner);
4304
4305 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4306 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4307 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4308 rela.r_addend = (h->root.u.def.value
4309 + h->root.u.def.section->output_section->vma
4310 + h->root.u.def.section->output_offset);
4311 /* R_X86_64_IRELATIVE comes last. */
4312 plt_index = htab->next_irelative_index--;
4313 }
4314 else
4315 {
4316 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4317 rela.r_addend = 0;
4318 plt_index = htab->next_jump_slot_index++;
4319 }
4320
4321 /* Don't fill the second and third slots in PLT entry for
4322 static executables nor without PLT0. */
4323 if (plt == htab->elf.splt && htab->plt.has_plt0)
4324 {
4325 bfd_vma plt0_offset
4326 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4327
4328 /* Put relocation index. */
4329 bfd_put_32 (output_bfd, plt_index,
4330 (plt->contents + h->plt.offset
4331 + htab->lazy_plt->plt_reloc_offset));
4332
4333 /* Put offset for jmp .PLT0 and check for overflow. We don't
4334 check relocation index for overflow since branch displacement
4335 will overflow first. */
4336 if (plt0_offset > 0x80000000)
4337 /* xgettext:c-format */
4338 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4339 output_bfd, h->root.root.string);
4340 bfd_put_32 (output_bfd, - plt0_offset,
4341 (plt->contents + h->plt.offset
4342 + htab->lazy_plt->plt_plt_offset));
4343 }
4344
4345 bed = get_elf_backend_data (output_bfd);
4346 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4347 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4348 }
4349 }
4350 else if (eh->plt_got.offset != (bfd_vma) -1)
4351 {
4352 bfd_vma got_offset, plt_offset;
4353 asection *plt, *got;
4354 bfd_boolean got_after_plt;
4355 int32_t got_pcrel_offset;
4356
4357 /* Set the entry in the GOT procedure linkage table. */
4358 plt = htab->plt_got;
4359 got = htab->elf.sgot;
4360 got_offset = h->got.offset;
4361
4362 if (got_offset == (bfd_vma) -1
4363 || (h->type == STT_GNU_IFUNC && h->def_regular)
4364 || plt == NULL
4365 || got == NULL)
4366 abort ();
4367
4368 /* Use the non-lazy PLT entry template for the GOT PLT since they
4369 are the identical. */
4370 /* Fill in the entry in the GOT procedure linkage table. */
4371 plt_offset = eh->plt_got.offset;
4372 memcpy (plt->contents + plt_offset,
4373 htab->non_lazy_plt->plt_entry,
4374 htab->non_lazy_plt->plt_entry_size);
4375
4376 /* Put offset the PC-relative instruction referring to the GOT
4377 entry, subtracting the size of that instruction. */
4378 got_pcrel_offset = (got->output_section->vma
4379 + got->output_offset
4380 + got_offset
4381 - plt->output_section->vma
4382 - plt->output_offset
4383 - plt_offset
4384 - htab->non_lazy_plt->plt_got_insn_size);
4385
4386 /* Check PC-relative offset overflow in GOT PLT entry. */
4387 got_after_plt = got->output_section->vma > plt->output_section->vma;
4388 if ((got_after_plt && got_pcrel_offset < 0)
4389 || (!got_after_plt && got_pcrel_offset > 0))
4390 /* xgettext:c-format */
4391 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4392 output_bfd, h->root.root.string);
4393
4394 bfd_put_32 (output_bfd, got_pcrel_offset,
4395 (plt->contents + plt_offset
4396 + htab->non_lazy_plt->plt_got_offset));
4397 }
4398
4399 if (!local_undefweak
4400 && !h->def_regular
4401 && (h->plt.offset != (bfd_vma) -1
4402 || eh->plt_got.offset != (bfd_vma) -1))
4403 {
4404 /* Mark the symbol as undefined, rather than as defined in
4405 the .plt section. Leave the value if there were any
4406 relocations where pointer equality matters (this is a clue
4407 for the dynamic linker, to make function pointer
4408 comparisons work between an application and shared
4409 library), otherwise set it to zero. If a function is only
4410 called from a binary, there is no need to slow down
4411 shared libraries because of that. */
4412 sym->st_shndx = SHN_UNDEF;
4413 if (!h->pointer_equality_needed)
4414 sym->st_value = 0;
4415 }
4416
4417 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4418
4419 /* Don't generate dynamic GOT relocation against undefined weak
4420 symbol in executable. */
4421 if (h->got.offset != (bfd_vma) -1
4422 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4423 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4424 && !local_undefweak)
4425 {
4426 Elf_Internal_Rela rela;
4427 asection *relgot = htab->elf.srelgot;
4428
4429 /* This symbol has an entry in the global offset table. Set it
4430 up. */
4431 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4432 abort ();
4433
4434 rela.r_offset = (htab->elf.sgot->output_section->vma
4435 + htab->elf.sgot->output_offset
4436 + (h->got.offset &~ (bfd_vma) 1));
4437
4438 /* If this is a static link, or it is a -Bsymbolic link and the
4439 symbol is defined locally or was forced to be local because
4440 of a version file, we just want to emit a RELATIVE reloc.
4441 The entry in the global offset table will already have been
4442 initialized in the relocate_section function. */
4443 if (h->def_regular
4444 && h->type == STT_GNU_IFUNC)
4445 {
4446 if (h->plt.offset == (bfd_vma) -1)
4447 {
4448 /* STT_GNU_IFUNC is referenced without PLT. */
4449 if (htab->elf.splt == NULL)
4450 {
4451 /* use .rel[a].iplt section to store .got relocations
4452 in static executable. */
4453 relgot = htab->elf.irelplt;
4454 }
4455 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4456 {
4457 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4458 h->root.root.string,
4459 h->root.u.def.section->owner);
4460
4461 rela.r_info = htab->r_info (0,
4462 R_X86_64_IRELATIVE);
4463 rela.r_addend = (h->root.u.def.value
4464 + h->root.u.def.section->output_section->vma
4465 + h->root.u.def.section->output_offset);
4466 }
4467 else
4468 goto do_glob_dat;
4469 }
4470 else if (bfd_link_pic (info))
4471 {
4472 /* Generate R_X86_64_GLOB_DAT. */
4473 goto do_glob_dat;
4474 }
4475 else
4476 {
4477 asection *plt;
4478 bfd_vma plt_offset;
4479
4480 if (!h->pointer_equality_needed)
4481 abort ();
4482
4483 /* For non-shared object, we can't use .got.plt, which
4484 contains the real function addres if we need pointer
4485 equality. We load the GOT entry with the PLT entry. */
4486 if (htab->plt_second != NULL)
4487 {
4488 plt = htab->plt_second;
4489 plt_offset = eh->plt_second.offset;
4490 }
4491 else
4492 {
4493 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4494 plt_offset = h->plt.offset;
4495 }
4496 bfd_put_64 (output_bfd, (plt->output_section->vma
4497 + plt->output_offset
4498 + plt_offset),
4499 htab->elf.sgot->contents + h->got.offset);
4500 return TRUE;
4501 }
4502 }
4503 else if (bfd_link_pic (info)
4504 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4505 {
4506 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4507 return FALSE;
4508 BFD_ASSERT((h->got.offset & 1) != 0);
4509 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4510 rela.r_addend = (h->root.u.def.value
4511 + h->root.u.def.section->output_section->vma
4512 + h->root.u.def.section->output_offset);
4513 }
4514 else
4515 {
4516 BFD_ASSERT((h->got.offset & 1) == 0);
4517 do_glob_dat:
4518 bfd_put_64 (output_bfd, (bfd_vma) 0,
4519 htab->elf.sgot->contents + h->got.offset);
4520 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4521 rela.r_addend = 0;
4522 }
4523
4524 elf_append_rela (output_bfd, relgot, &rela);
4525 }
4526
4527 if (h->needs_copy)
4528 {
4529 Elf_Internal_Rela rela;
4530 asection *s;
4531
4532 /* This symbol needs a copy reloc. Set it up. */
4533 VERIFY_COPY_RELOC (h, htab)
4534
4535 rela.r_offset = (h->root.u.def.value
4536 + h->root.u.def.section->output_section->vma
4537 + h->root.u.def.section->output_offset);
4538 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4539 rela.r_addend = 0;
4540 if (h->root.u.def.section == htab->elf.sdynrelro)
4541 s = htab->elf.sreldynrelro;
4542 else
4543 s = htab->elf.srelbss;
4544 elf_append_rela (output_bfd, s, &rela);
4545 }
4546
4547 return TRUE;
4548 }
4549
4550 /* Finish up local dynamic symbol handling. We set the contents of
4551 various dynamic sections here. */
4552
4553 static bfd_boolean
4554 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4555 {
4556 struct elf_link_hash_entry *h
4557 = (struct elf_link_hash_entry *) *slot;
4558 struct bfd_link_info *info
4559 = (struct bfd_link_info *) inf;
4560
4561 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4562 info, h, NULL);
4563 }
4564
4565 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4566 here since undefined weak symbol may not be dynamic and may not be
4567 called for elf_x86_64_finish_dynamic_symbol. */
4568
4569 static bfd_boolean
4570 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4571 void *inf)
4572 {
4573 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4574 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4575
4576 if (h->root.type != bfd_link_hash_undefweak
4577 || h->dynindx != -1)
4578 return TRUE;
4579
4580 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4581 info, h, NULL);
4582 }
4583
4584 /* Used to decide how to sort relocs in an optimal manner for the
4585 dynamic linker, before writing them out. */
4586
4587 static enum elf_reloc_type_class
4588 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4589 const asection *rel_sec ATTRIBUTE_UNUSED,
4590 const Elf_Internal_Rela *rela)
4591 {
4592 bfd *abfd = info->output_bfd;
4593 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4594 struct elf_x86_link_hash_table *htab
4595 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4596
4597 if (htab->elf.dynsym != NULL
4598 && htab->elf.dynsym->contents != NULL)
4599 {
4600 /* Check relocation against STT_GNU_IFUNC symbol if there are
4601 dynamic symbols. */
4602 unsigned long r_symndx = htab->r_sym (rela->r_info);
4603 if (r_symndx != STN_UNDEF)
4604 {
4605 Elf_Internal_Sym sym;
4606 if (!bed->s->swap_symbol_in (abfd,
4607 (htab->elf.dynsym->contents
4608 + r_symndx * bed->s->sizeof_sym),
4609 0, &sym))
4610 abort ();
4611
4612 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4613 return reloc_class_ifunc;
4614 }
4615 }
4616
4617 switch ((int) ELF32_R_TYPE (rela->r_info))
4618 {
4619 case R_X86_64_IRELATIVE:
4620 return reloc_class_ifunc;
4621 case R_X86_64_RELATIVE:
4622 case R_X86_64_RELATIVE64:
4623 return reloc_class_relative;
4624 case R_X86_64_JUMP_SLOT:
4625 return reloc_class_plt;
4626 case R_X86_64_COPY:
4627 return reloc_class_copy;
4628 default:
4629 return reloc_class_normal;
4630 }
4631 }
4632
4633 /* Finish up the dynamic sections. */
4634
4635 static bfd_boolean
4636 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4637 struct bfd_link_info *info)
4638 {
4639 struct elf_x86_link_hash_table *htab;
4640
4641 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4642 if (htab == NULL)
4643 return FALSE;
4644
4645 if (! htab->elf.dynamic_sections_created)
4646 return TRUE;
4647
4648 if (htab->elf.splt && htab->elf.splt->size > 0)
4649 {
4650 elf_section_data (htab->elf.splt->output_section)
4651 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4652
4653 if (htab->plt.has_plt0)
4654 {
4655 /* Fill in the special first entry in the procedure linkage
4656 table. */
4657 memcpy (htab->elf.splt->contents,
4658 htab->lazy_plt->plt0_entry,
4659 htab->lazy_plt->plt0_entry_size);
4660 /* Add offset for pushq GOT+8(%rip), since the instruction
4661 uses 6 bytes subtract this value. */
4662 bfd_put_32 (output_bfd,
4663 (htab->elf.sgotplt->output_section->vma
4664 + htab->elf.sgotplt->output_offset
4665 + 8
4666 - htab->elf.splt->output_section->vma
4667 - htab->elf.splt->output_offset
4668 - 6),
4669 (htab->elf.splt->contents
4670 + htab->lazy_plt->plt0_got1_offset));
4671 /* Add offset for the PC-relative instruction accessing
4672 GOT+16, subtracting the offset to the end of that
4673 instruction. */
4674 bfd_put_32 (output_bfd,
4675 (htab->elf.sgotplt->output_section->vma
4676 + htab->elf.sgotplt->output_offset
4677 + 16
4678 - htab->elf.splt->output_section->vma
4679 - htab->elf.splt->output_offset
4680 - htab->lazy_plt->plt0_got2_insn_end),
4681 (htab->elf.splt->contents
4682 + htab->lazy_plt->plt0_got2_offset));
4683 }
4684
4685 if (htab->tlsdesc_plt)
4686 {
4687 bfd_put_64 (output_bfd, (bfd_vma) 0,
4688 htab->elf.sgot->contents + htab->tlsdesc_got);
4689
4690 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4691 htab->lazy_plt->plt_tlsdesc_entry,
4692 htab->lazy_plt->plt_tlsdesc_entry_size);
4693
4694 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4695 bytes and the instruction uses 6 bytes, subtract these
4696 values. */
4697 bfd_put_32 (output_bfd,
4698 (htab->elf.sgotplt->output_section->vma
4699 + htab->elf.sgotplt->output_offset
4700 + 8
4701 - htab->elf.splt->output_section->vma
4702 - htab->elf.splt->output_offset
4703 - htab->tlsdesc_plt
4704 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4705 (htab->elf.splt->contents
4706 + htab->tlsdesc_plt
4707 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4708 /* Add offset for indirect branch via GOT+TDG, where TDG
4709 stands for htab->tlsdesc_got, subtracting the offset
4710 to the end of that instruction. */
4711 bfd_put_32 (output_bfd,
4712 (htab->elf.sgot->output_section->vma
4713 + htab->elf.sgot->output_offset
4714 + htab->tlsdesc_got
4715 - htab->elf.splt->output_section->vma
4716 - htab->elf.splt->output_offset
4717 - htab->tlsdesc_plt
4718 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4719 (htab->elf.splt->contents
4720 + htab->tlsdesc_plt
4721 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4722 }
4723 }
4724
4725 /* Fill PLT entries for undefined weak symbols in PIE. */
4726 if (bfd_link_pie (info))
4727 bfd_hash_traverse (&info->hash->table,
4728 elf_x86_64_pie_finish_undefweak_symbol,
4729 info);
4730
4731 return TRUE;
4732 }
4733
4734 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4735 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4736 It has to be done before elf_link_sort_relocs is called so that
4737 dynamic relocations are properly sorted. */
4738
4739 static bfd_boolean
4740 elf_x86_64_output_arch_local_syms
4741 (bfd *output_bfd ATTRIBUTE_UNUSED,
4742 struct bfd_link_info *info,
4743 void *flaginfo ATTRIBUTE_UNUSED,
4744 int (*func) (void *, const char *,
4745 Elf_Internal_Sym *,
4746 asection *,
4747 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4748 {
4749 struct elf_x86_link_hash_table *htab
4750 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4751 if (htab == NULL)
4752 return FALSE;
4753
4754 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4755 htab_traverse (htab->loc_hash_table,
4756 elf_x86_64_finish_local_dynamic_symbol,
4757 info);
4758
4759 return TRUE;
4760 }
4761
4762 /* Forward declaration. */
4763 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4764
4765 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4766 dynamic relocations. */
4767
4768 static long
4769 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4770 long symcount ATTRIBUTE_UNUSED,
4771 asymbol **syms ATTRIBUTE_UNUSED,
4772 long dynsymcount,
4773 asymbol **dynsyms,
4774 asymbol **ret)
4775 {
4776 long count, i, n;
4777 int j;
4778 bfd_byte *plt_contents;
4779 long relsize;
4780 const struct elf_x86_lazy_plt_layout *lazy_plt;
4781 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4782 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4783 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4784 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4785 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4786 asection *plt;
4787 enum elf_x86_plt_type plt_type;
4788 struct elf_x86_plt plts[] =
4789 {
4790 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4791 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4792 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4793 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4794 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4795 };
4796
4797 *ret = NULL;
4798
4799 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4800 return 0;
4801
4802 if (dynsymcount <= 0)
4803 return 0;
4804
4805 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4806 if (relsize <= 0)
4807 return -1;
4808
4809 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4810 {
4811 lazy_plt = &elf_x86_64_lazy_plt;
4812 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4813 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4814 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4815 if (ABI_64_P (abfd))
4816 {
4817 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4818 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4819 }
4820 else
4821 {
4822 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4823 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4824 }
4825 }
4826 else
4827 {
4828 lazy_plt = &elf_x86_64_nacl_plt;
4829 non_lazy_plt = NULL;
4830 lazy_bnd_plt = NULL;
4831 non_lazy_bnd_plt = NULL;
4832 lazy_ibt_plt = NULL;
4833 non_lazy_ibt_plt = NULL;
4834 }
4835
4836 count = 0;
4837 for (j = 0; plts[j].name != NULL; j++)
4838 {
4839 plt = bfd_get_section_by_name (abfd, plts[j].name);
4840 if (plt == NULL || plt->size == 0)
4841 continue;
4842
4843 /* Get the PLT section contents. */
4844 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4845 if (plt_contents == NULL)
4846 break;
4847 if (!bfd_get_section_contents (abfd, (asection *) plt,
4848 plt_contents, 0, plt->size))
4849 {
4850 free (plt_contents);
4851 break;
4852 }
4853
4854 /* Check what kind of PLT it is. */
4855 plt_type = plt_unknown;
4856 if (plts[j].type == plt_unknown
4857 && (plt->size >= (lazy_plt->plt_entry_size
4858 + lazy_plt->plt_entry_size)))
4859 {
4860 /* Match lazy PLT first. Need to check the first two
4861 instructions. */
4862 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4863 lazy_plt->plt0_got1_offset) == 0)
4864 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4865 2) == 0))
4866 plt_type = plt_lazy;
4867 else if (lazy_bnd_plt != NULL
4868 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4869 lazy_bnd_plt->plt0_got1_offset) == 0)
4870 && (memcmp (plt_contents + 6,
4871 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4872 {
4873 plt_type = plt_lazy | plt_second;
4874 /* The fist entry in the lazy IBT PLT is the same as the
4875 lazy BND PLT. */
4876 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4877 lazy_ibt_plt->plt_entry,
4878 lazy_ibt_plt->plt_got_offset) == 0))
4879 lazy_plt = lazy_ibt_plt;
4880 else
4881 lazy_plt = lazy_bnd_plt;
4882 }
4883 }
4884
4885 if (non_lazy_plt != NULL
4886 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4887 && plt->size >= non_lazy_plt->plt_entry_size)
4888 {
4889 /* Match non-lazy PLT. */
4890 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4891 non_lazy_plt->plt_got_offset) == 0)
4892 plt_type = plt_non_lazy;
4893 }
4894
4895 if (plt_type == plt_unknown || plt_type == plt_second)
4896 {
4897 if (non_lazy_bnd_plt != NULL
4898 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4899 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4900 non_lazy_bnd_plt->plt_got_offset) == 0))
4901 {
4902 /* Match BND PLT. */
4903 plt_type = plt_second;
4904 non_lazy_plt = non_lazy_bnd_plt;
4905 }
4906 else if (non_lazy_ibt_plt != NULL
4907 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4908 && (memcmp (plt_contents,
4909 non_lazy_ibt_plt->plt_entry,
4910 non_lazy_ibt_plt->plt_got_offset) == 0))
4911 {
4912 /* Match IBT PLT. */
4913 plt_type = plt_second;
4914 non_lazy_plt = non_lazy_ibt_plt;
4915 }
4916 }
4917
4918 if (plt_type == plt_unknown)
4919 {
4920 free (plt_contents);
4921 continue;
4922 }
4923
4924 plts[j].sec = plt;
4925 plts[j].type = plt_type;
4926
4927 if ((plt_type & plt_lazy))
4928 {
4929 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4930 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4931 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4932 /* Skip PLT0 in lazy PLT. */
4933 i = 1;
4934 }
4935 else
4936 {
4937 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4938 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4939 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4940 i = 0;
4941 }
4942
4943 /* Skip lazy PLT when the second PLT is used. */
4944 if (plt_type == (plt_lazy | plt_second))
4945 plts[j].count = 0;
4946 else
4947 {
4948 n = plt->size / plts[j].plt_entry_size;
4949 plts[j].count = n;
4950 count += n - i;
4951 }
4952
4953 plts[j].contents = plt_contents;
4954 }
4955
4956 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4957 (bfd_vma) 0, plts, dynsyms,
4958 ret);
4959 }
4960
4961 /* Handle an x86-64 specific section when reading an object file. This
4962 is called when elfcode.h finds a section with an unknown type. */
4963
4964 static bfd_boolean
4965 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4966 const char *name, int shindex)
4967 {
4968 if (hdr->sh_type != SHT_X86_64_UNWIND)
4969 return FALSE;
4970
4971 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4972 return FALSE;
4973
4974 return TRUE;
4975 }
4976
4977 /* Hook called by the linker routine which adds symbols from an object
4978 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4979 of .bss. */
4980
4981 static bfd_boolean
4982 elf_x86_64_add_symbol_hook (bfd *abfd,
4983 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4984 Elf_Internal_Sym *sym,
4985 const char **namep ATTRIBUTE_UNUSED,
4986 flagword *flagsp ATTRIBUTE_UNUSED,
4987 asection **secp,
4988 bfd_vma *valp)
4989 {
4990 asection *lcomm;
4991
4992 switch (sym->st_shndx)
4993 {
4994 case SHN_X86_64_LCOMMON:
4995 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4996 if (lcomm == NULL)
4997 {
4998 lcomm = bfd_make_section_with_flags (abfd,
4999 "LARGE_COMMON",
5000 (SEC_ALLOC
5001 | SEC_IS_COMMON
5002 | SEC_LINKER_CREATED));
5003 if (lcomm == NULL)
5004 return FALSE;
5005 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5006 }
5007 *secp = lcomm;
5008 *valp = sym->st_size;
5009 return TRUE;
5010 }
5011
5012 return TRUE;
5013 }
5014
5015
5016 /* Given a BFD section, try to locate the corresponding ELF section
5017 index. */
5018
5019 static bfd_boolean
5020 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5021 asection *sec, int *index_return)
5022 {
5023 if (sec == &_bfd_elf_large_com_section)
5024 {
5025 *index_return = SHN_X86_64_LCOMMON;
5026 return TRUE;
5027 }
5028 return FALSE;
5029 }
5030
5031 /* Process a symbol. */
5032
5033 static void
5034 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5035 asymbol *asym)
5036 {
5037 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5038
5039 switch (elfsym->internal_elf_sym.st_shndx)
5040 {
5041 case SHN_X86_64_LCOMMON:
5042 asym->section = &_bfd_elf_large_com_section;
5043 asym->value = elfsym->internal_elf_sym.st_size;
5044 /* Common symbol doesn't set BSF_GLOBAL. */
5045 asym->flags &= ~BSF_GLOBAL;
5046 break;
5047 }
5048 }
5049
5050 static bfd_boolean
5051 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5052 {
5053 return (sym->st_shndx == SHN_COMMON
5054 || sym->st_shndx == SHN_X86_64_LCOMMON);
5055 }
5056
5057 static unsigned int
5058 elf_x86_64_common_section_index (asection *sec)
5059 {
5060 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5061 return SHN_COMMON;
5062 else
5063 return SHN_X86_64_LCOMMON;
5064 }
5065
5066 static asection *
5067 elf_x86_64_common_section (asection *sec)
5068 {
5069 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5070 return bfd_com_section_ptr;
5071 else
5072 return &_bfd_elf_large_com_section;
5073 }
5074
5075 static bfd_boolean
5076 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5077 const Elf_Internal_Sym *sym,
5078 asection **psec,
5079 bfd_boolean newdef,
5080 bfd_boolean olddef,
5081 bfd *oldbfd,
5082 const asection *oldsec)
5083 {
5084 /* A normal common symbol and a large common symbol result in a
5085 normal common symbol. We turn the large common symbol into a
5086 normal one. */
5087 if (!olddef
5088 && h->root.type == bfd_link_hash_common
5089 && !newdef
5090 && bfd_is_com_section (*psec)
5091 && oldsec != *psec)
5092 {
5093 if (sym->st_shndx == SHN_COMMON
5094 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5095 {
5096 h->root.u.c.p->section
5097 = bfd_make_section_old_way (oldbfd, "COMMON");
5098 h->root.u.c.p->section->flags = SEC_ALLOC;
5099 }
5100 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5101 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5102 *psec = bfd_com_section_ptr;
5103 }
5104
5105 return TRUE;
5106 }
5107
5108 static int
5109 elf_x86_64_additional_program_headers (bfd *abfd,
5110 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5111 {
5112 asection *s;
5113 int count = 0;
5114
5115 /* Check to see if we need a large readonly segment. */
5116 s = bfd_get_section_by_name (abfd, ".lrodata");
5117 if (s && (s->flags & SEC_LOAD))
5118 count++;
5119
5120 /* Check to see if we need a large data segment. Since .lbss sections
5121 is placed right after the .bss section, there should be no need for
5122 a large data segment just because of .lbss. */
5123 s = bfd_get_section_by_name (abfd, ".ldata");
5124 if (s && (s->flags & SEC_LOAD))
5125 count++;
5126
5127 return count;
5128 }
5129
5130 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5131
5132 static bfd_boolean
5133 elf_x86_64_relocs_compatible (const bfd_target *input,
5134 const bfd_target *output)
5135 {
5136 return ((xvec_get_elf_backend_data (input)->s->elfclass
5137 == xvec_get_elf_backend_data (output)->s->elfclass)
5138 && _bfd_elf_relocs_compatible (input, output));
5139 }
5140
5141 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5142 with GNU properties if found. Otherwise, return NULL. */
5143
5144 static bfd *
5145 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5146 {
5147 struct elf_x86_init_table init_table;
5148
5149 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5150 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5151 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5152 != (int) R_X86_64_GNU_VTINHERIT)
5153 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5154 != (int) R_X86_64_GNU_VTENTRY))
5155 abort ();
5156
5157 /* This is unused for x86-64. */
5158 init_table.plt0_pad_byte = 0x90;
5159
5160 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
5161 {
5162 const struct elf_backend_data *bed
5163 = get_elf_backend_data (info->output_bfd);
5164 struct elf_x86_link_hash_table *htab
5165 = elf_x86_hash_table (info, bed->target_id);
5166 if (!htab)
5167 abort ();
5168 if (htab->params->bndplt)
5169 {
5170 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5171 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5172 }
5173 else
5174 {
5175 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5176 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5177 }
5178
5179 if (ABI_64_P (info->output_bfd))
5180 {
5181 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5182 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5183 }
5184 else
5185 {
5186 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5187 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5188 }
5189 }
5190 else
5191 {
5192 init_table.lazy_plt = &elf_x86_64_nacl_plt;
5193 init_table.non_lazy_plt = NULL;
5194 init_table.lazy_ibt_plt = NULL;
5195 init_table.non_lazy_ibt_plt = NULL;
5196 }
5197
5198 if (ABI_64_P (info->output_bfd))
5199 {
5200 init_table.r_info = elf64_r_info;
5201 init_table.r_sym = elf64_r_sym;
5202 }
5203 else
5204 {
5205 init_table.r_info = elf32_r_info;
5206 init_table.r_sym = elf32_r_sym;
5207 }
5208
5209 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5210 }
5211
5212 static const struct bfd_elf_special_section
5213 elf_x86_64_special_sections[]=
5214 {
5215 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5216 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5217 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5218 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5219 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5220 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5221 { NULL, 0, 0, 0, 0 }
5222 };
5223
5224 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5225 #define TARGET_LITTLE_NAME "elf64-x86-64"
5226 #define ELF_ARCH bfd_arch_i386
5227 #define ELF_TARGET_ID X86_64_ELF_DATA
5228 #define ELF_MACHINE_CODE EM_X86_64
5229 #if DEFAULT_LD_Z_SEPARATE_CODE
5230 # define ELF_MAXPAGESIZE 0x1000
5231 #else
5232 # define ELF_MAXPAGESIZE 0x200000
5233 #endif
5234 #define ELF_MINPAGESIZE 0x1000
5235 #define ELF_COMMONPAGESIZE 0x1000
5236
5237 #define elf_backend_can_gc_sections 1
5238 #define elf_backend_can_refcount 1
5239 #define elf_backend_want_got_plt 1
5240 #define elf_backend_plt_readonly 1
5241 #define elf_backend_want_plt_sym 0
5242 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5243 #define elf_backend_rela_normal 1
5244 #define elf_backend_plt_alignment 4
5245 #define elf_backend_extern_protected_data 1
5246 #define elf_backend_caches_rawsize 1
5247 #define elf_backend_dtrel_excludes_plt 1
5248 #define elf_backend_want_dynrelro 1
5249
5250 #define elf_info_to_howto elf_x86_64_info_to_howto
5251
5252 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5253 #define bfd_elf64_bfd_reloc_name_lookup \
5254 elf_x86_64_reloc_name_lookup
5255
5256 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5257 #define elf_backend_check_relocs elf_x86_64_check_relocs
5258 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5259 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5260 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5261 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5262 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5263 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5264 #ifdef CORE_HEADER
5265 #define elf_backend_write_core_note elf_x86_64_write_core_note
5266 #endif
5267 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5268 #define elf_backend_relocate_section elf_x86_64_relocate_section
5269 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5270 #define elf_backend_object_p elf64_x86_64_elf_object_p
5271 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5272
5273 #define elf_backend_section_from_shdr \
5274 elf_x86_64_section_from_shdr
5275
5276 #define elf_backend_section_from_bfd_section \
5277 elf_x86_64_elf_section_from_bfd_section
5278 #define elf_backend_add_symbol_hook \
5279 elf_x86_64_add_symbol_hook
5280 #define elf_backend_symbol_processing \
5281 elf_x86_64_symbol_processing
5282 #define elf_backend_common_section_index \
5283 elf_x86_64_common_section_index
5284 #define elf_backend_common_section \
5285 elf_x86_64_common_section
5286 #define elf_backend_common_definition \
5287 elf_x86_64_common_definition
5288 #define elf_backend_merge_symbol \
5289 elf_x86_64_merge_symbol
5290 #define elf_backend_special_sections \
5291 elf_x86_64_special_sections
5292 #define elf_backend_additional_program_headers \
5293 elf_x86_64_additional_program_headers
5294 #define elf_backend_setup_gnu_properties \
5295 elf_x86_64_link_setup_gnu_properties
5296 #define elf_backend_hide_symbol \
5297 _bfd_x86_elf_hide_symbol
5298
5299 #undef elf64_bed
5300 #define elf64_bed elf64_x86_64_bed
5301
5302 #include "elf64-target.h"
5303
5304 /* CloudABI support. */
5305
5306 #undef TARGET_LITTLE_SYM
5307 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5308 #undef TARGET_LITTLE_NAME
5309 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5310
5311 #undef ELF_OSABI
5312 #define ELF_OSABI ELFOSABI_CLOUDABI
5313
5314 #undef elf64_bed
5315 #define elf64_bed elf64_x86_64_cloudabi_bed
5316
5317 #include "elf64-target.h"
5318
5319 /* FreeBSD support. */
5320
5321 #undef TARGET_LITTLE_SYM
5322 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5323 #undef TARGET_LITTLE_NAME
5324 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5325
5326 #undef ELF_OSABI
5327 #define ELF_OSABI ELFOSABI_FREEBSD
5328
5329 #undef elf64_bed
5330 #define elf64_bed elf64_x86_64_fbsd_bed
5331
5332 #include "elf64-target.h"
5333
5334 /* Solaris 2 support. */
5335
5336 #undef TARGET_LITTLE_SYM
5337 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5338 #undef TARGET_LITTLE_NAME
5339 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5340
5341 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5342 {
5343 is_solaris /* os */
5344 };
5345
5346 #undef elf_backend_arch_data
5347 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5348
5349 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5350 objects won't be recognized. */
5351 #undef ELF_OSABI
5352
5353 #undef elf64_bed
5354 #define elf64_bed elf64_x86_64_sol2_bed
5355
5356 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5357 boundary. */
5358 #undef elf_backend_static_tls_alignment
5359 #define elf_backend_static_tls_alignment 16
5360
5361 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5362
5363 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5364 File, p.63. */
5365 #undef elf_backend_want_plt_sym
5366 #define elf_backend_want_plt_sym 1
5367
5368 #undef elf_backend_strtab_flags
5369 #define elf_backend_strtab_flags SHF_STRINGS
5370
5371 static bfd_boolean
5372 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5373 bfd *obfd ATTRIBUTE_UNUSED,
5374 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5375 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5376 {
5377 /* PR 19938: FIXME: Need to add code for setting the sh_info
5378 and sh_link fields of Solaris specific section types. */
5379 return FALSE;
5380 }
5381
5382 #undef elf_backend_copy_special_section_fields
5383 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5384
5385 #include "elf64-target.h"
5386
5387 /* Native Client support. */
5388
5389 static bfd_boolean
5390 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5391 {
5392 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5393 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5394 return TRUE;
5395 }
5396
5397 #undef TARGET_LITTLE_SYM
5398 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5399 #undef TARGET_LITTLE_NAME
5400 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5401 #undef elf64_bed
5402 #define elf64_bed elf64_x86_64_nacl_bed
5403
5404 #undef ELF_MAXPAGESIZE
5405 #undef ELF_MINPAGESIZE
5406 #undef ELF_COMMONPAGESIZE
5407 #define ELF_MAXPAGESIZE 0x10000
5408 #define ELF_MINPAGESIZE 0x10000
5409 #define ELF_COMMONPAGESIZE 0x10000
5410
5411 /* Restore defaults. */
5412 #undef ELF_OSABI
5413 #undef elf_backend_static_tls_alignment
5414 #undef elf_backend_want_plt_sym
5415 #define elf_backend_want_plt_sym 0
5416 #undef elf_backend_strtab_flags
5417 #undef elf_backend_copy_special_section_fields
5418
5419 /* NaCl uses substantially different PLT entries for the same effects. */
5420
5421 #undef elf_backend_plt_alignment
5422 #define elf_backend_plt_alignment 5
5423 #define NACL_PLT_ENTRY_SIZE 64
5424 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5425
5426 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5427 {
5428 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5429 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5430 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5431 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5432 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5433
5434 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5435 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5436
5437 /* 32 bytes of nop to pad out to the standard size. */
5438 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5439 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5440 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5441 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5442 0x66, /* excess data16 prefix */
5443 0x90 /* nop */
5444 };
5445
5446 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5447 {
5448 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5449 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5450 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5451 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5452
5453 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5454 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5455 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5456
5457 /* Lazy GOT entries point here (32-byte aligned). */
5458 0x68, /* pushq immediate */
5459 0, 0, 0, 0, /* replaced with index into relocation table. */
5460 0xe9, /* jmp relative */
5461 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5462
5463 /* 22 bytes of nop to pad out to the standard size. */
5464 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5465 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5466 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5467 };
5468
5469 /* .eh_frame covering the .plt section. */
5470
5471 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5472 {
5473 #if (PLT_CIE_LENGTH != 20 \
5474 || PLT_FDE_LENGTH != 36 \
5475 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5476 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5477 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5478 #endif
5479 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5480 0, 0, 0, 0, /* CIE ID */
5481 1, /* CIE version */
5482 'z', 'R', 0, /* Augmentation string */
5483 1, /* Code alignment factor */
5484 0x78, /* Data alignment factor */
5485 16, /* Return address column */
5486 1, /* Augmentation size */
5487 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5488 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5489 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5490 DW_CFA_nop, DW_CFA_nop,
5491
5492 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5493 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5494 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5495 0, 0, 0, 0, /* .plt size goes here */
5496 0, /* Augmentation size */
5497 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5498 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5499 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5500 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5501 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5502 13, /* Block length */
5503 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5504 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5505 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5506 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5507 DW_CFA_nop, DW_CFA_nop
5508 };
5509
5510 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5511 {
5512 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5513 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5514 elf_x86_64_nacl_plt_entry, /* plt_entry */
5515 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5516 elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */
5517 NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
5518 2, /* plt_tlsdesc_got1_offset */
5519 9, /* plt_tlsdesc_got2_offset */
5520 6, /* plt_tlsdesc_got1_insn_end */
5521 13, /* plt_tlsdesc_got2_insn_end */
5522 2, /* plt0_got1_offset */
5523 9, /* plt0_got2_offset */
5524 13, /* plt0_got2_insn_end */
5525 3, /* plt_got_offset */
5526 33, /* plt_reloc_offset */
5527 38, /* plt_plt_offset */
5528 7, /* plt_got_insn_size */
5529 42, /* plt_plt_insn_end */
5530 32, /* plt_lazy_offset */
5531 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5532 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5533 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5534 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5535 };
5536
5537 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5538 {
5539 is_nacl /* os */
5540 };
5541
5542 #undef elf_backend_arch_data
5543 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5544
5545 #undef elf_backend_object_p
5546 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5547 #undef elf_backend_modify_segment_map
5548 #define elf_backend_modify_segment_map nacl_modify_segment_map
5549 #undef elf_backend_modify_headers
5550 #define elf_backend_modify_headers nacl_modify_headers
5551 #undef elf_backend_final_write_processing
5552 #define elf_backend_final_write_processing nacl_final_write_processing
5553
5554 #include "elf64-target.h"
5555
5556 /* Native Client x32 support. */
5557
5558 static bfd_boolean
5559 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5560 {
5561 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5562 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5563 return TRUE;
5564 }
5565
5566 #undef TARGET_LITTLE_SYM
5567 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5568 #undef TARGET_LITTLE_NAME
5569 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5570 #undef elf32_bed
5571 #define elf32_bed elf32_x86_64_nacl_bed
5572
5573 #define bfd_elf32_bfd_reloc_type_lookup \
5574 elf_x86_64_reloc_type_lookup
5575 #define bfd_elf32_bfd_reloc_name_lookup \
5576 elf_x86_64_reloc_name_lookup
5577 #define bfd_elf32_get_synthetic_symtab \
5578 elf_x86_64_get_synthetic_symtab
5579
5580 #undef elf_backend_object_p
5581 #define elf_backend_object_p \
5582 elf32_x86_64_nacl_elf_object_p
5583
5584 #undef elf_backend_bfd_from_remote_memory
5585 #define elf_backend_bfd_from_remote_memory \
5586 _bfd_elf32_bfd_from_remote_memory
5587
5588 #undef elf_backend_size_info
5589 #define elf_backend_size_info \
5590 _bfd_elf32_size_info
5591
5592 #undef elf32_bed
5593 #define elf32_bed elf32_x86_64_bed
5594
5595 #include "elf32-target.h"
5596
5597 /* Restore defaults. */
5598 #undef elf_backend_object_p
5599 #define elf_backend_object_p elf64_x86_64_elf_object_p
5600 #undef elf_backend_bfd_from_remote_memory
5601 #undef elf_backend_size_info
5602 #undef elf_backend_modify_segment_map
5603 #undef elf_backend_modify_headers
5604 #undef elf_backend_final_write_processing
5605
5606 /* Intel L1OM support. */
5607
5608 static bfd_boolean
5609 elf64_l1om_elf_object_p (bfd *abfd)
5610 {
5611 /* Set the right machine number for an L1OM elf64 file. */
5612 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5613 return TRUE;
5614 }
5615
5616 #undef TARGET_LITTLE_SYM
5617 #define TARGET_LITTLE_SYM l1om_elf64_vec
5618 #undef TARGET_LITTLE_NAME
5619 #define TARGET_LITTLE_NAME "elf64-l1om"
5620 #undef ELF_ARCH
5621 #define ELF_ARCH bfd_arch_l1om
5622
5623 #undef ELF_MACHINE_CODE
5624 #define ELF_MACHINE_CODE EM_L1OM
5625
5626 #undef ELF_OSABI
5627
5628 #undef elf64_bed
5629 #define elf64_bed elf64_l1om_bed
5630
5631 #undef elf_backend_object_p
5632 #define elf_backend_object_p elf64_l1om_elf_object_p
5633
5634 /* Restore defaults. */
5635 #undef ELF_MAXPAGESIZE
5636 #undef ELF_MINPAGESIZE
5637 #undef ELF_COMMONPAGESIZE
5638 #if DEFAULT_LD_Z_SEPARATE_CODE
5639 # define ELF_MAXPAGESIZE 0x1000
5640 #else
5641 # define ELF_MAXPAGESIZE 0x200000
5642 #endif
5643 #define ELF_MINPAGESIZE 0x1000
5644 #define ELF_COMMONPAGESIZE 0x1000
5645 #undef elf_backend_plt_alignment
5646 #define elf_backend_plt_alignment 4
5647 #undef elf_backend_arch_data
5648 #define elf_backend_arch_data &elf_x86_64_arch_bed
5649
5650 #include "elf64-target.h"
5651
5652 /* FreeBSD L1OM support. */
5653
5654 #undef TARGET_LITTLE_SYM
5655 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5656 #undef TARGET_LITTLE_NAME
5657 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5658
5659 #undef ELF_OSABI
5660 #define ELF_OSABI ELFOSABI_FREEBSD
5661
5662 #undef elf64_bed
5663 #define elf64_bed elf64_l1om_fbsd_bed
5664
5665 #include "elf64-target.h"
5666
5667 /* Intel K1OM support. */
5668
5669 static bfd_boolean
5670 elf64_k1om_elf_object_p (bfd *abfd)
5671 {
5672 /* Set the right machine number for an K1OM elf64 file. */
5673 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5674 return TRUE;
5675 }
5676
5677 #undef TARGET_LITTLE_SYM
5678 #define TARGET_LITTLE_SYM k1om_elf64_vec
5679 #undef TARGET_LITTLE_NAME
5680 #define TARGET_LITTLE_NAME "elf64-k1om"
5681 #undef ELF_ARCH
5682 #define ELF_ARCH bfd_arch_k1om
5683
5684 #undef ELF_MACHINE_CODE
5685 #define ELF_MACHINE_CODE EM_K1OM
5686
5687 #undef ELF_OSABI
5688
5689 #undef elf64_bed
5690 #define elf64_bed elf64_k1om_bed
5691
5692 #undef elf_backend_object_p
5693 #define elf_backend_object_p elf64_k1om_elf_object_p
5694
5695 #undef elf_backend_static_tls_alignment
5696
5697 #undef elf_backend_want_plt_sym
5698 #define elf_backend_want_plt_sym 0
5699
5700 #include "elf64-target.h"
5701
5702 /* FreeBSD K1OM support. */
5703
5704 #undef TARGET_LITTLE_SYM
5705 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5706 #undef TARGET_LITTLE_NAME
5707 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5708
5709 #undef ELF_OSABI
5710 #define ELF_OSABI ELFOSABI_FREEBSD
5711
5712 #undef elf64_bed
5713 #define elf64_bed elf64_k1om_fbsd_bed
5714
5715 #include "elf64-target.h"
5716
5717 /* 32bit x86-64 support. */
5718
5719 #undef TARGET_LITTLE_SYM
5720 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5721 #undef TARGET_LITTLE_NAME
5722 #define TARGET_LITTLE_NAME "elf32-x86-64"
5723 #undef elf32_bed
5724
5725 #undef ELF_ARCH
5726 #define ELF_ARCH bfd_arch_i386
5727
5728 #undef ELF_MACHINE_CODE
5729 #define ELF_MACHINE_CODE EM_X86_64
5730
5731 #undef ELF_OSABI
5732
5733 #undef elf_backend_object_p
5734 #define elf_backend_object_p \
5735 elf32_x86_64_elf_object_p
5736
5737 #undef elf_backend_bfd_from_remote_memory
5738 #define elf_backend_bfd_from_remote_memory \
5739 _bfd_elf32_bfd_from_remote_memory
5740
5741 #undef elf_backend_size_info
5742 #define elf_backend_size_info \
5743 _bfd_elf32_size_info
5744
5745 #include "elf32-target.h"