]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf64-x86-64.c
79b8f1c7e4c263255447830c2dbc569faebbde31
[thirdparty/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2016 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "opcode/i386.h"
35 #include "elf/x86-64.h"
36
37 #ifdef CORE_HEADER
38 #include <stdarg.h>
39 #include CORE_HEADER
40 #endif
41
42 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
43 #define MINUS_ONE (~ (bfd_vma) 0)
44
45 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
46 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
47 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
48 since they are the same. */
49
50 #define ABI_64_P(abfd) \
51 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
52
53 /* The relocation "howto" table. Order of fields:
54 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
55 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
56 static reloc_howto_type x86_64_elf_howto_table[] =
57 {
58 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
60 FALSE),
61 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
63 FALSE),
64 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
65 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
66 TRUE),
67 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
68 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
69 FALSE),
70 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
71 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
72 TRUE),
73 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
74 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
75 FALSE),
76 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
77 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
78 MINUS_ONE, FALSE),
79 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
80 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
81 MINUS_ONE, FALSE),
82 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
84 MINUS_ONE, FALSE),
85 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
86 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
87 0xffffffff, TRUE),
88 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
89 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
90 FALSE),
91 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
92 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
93 FALSE),
94 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
95 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
96 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
97 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
98 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
100 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
102 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
103 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
104 MINUS_ONE, FALSE),
105 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
106 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
107 MINUS_ONE, FALSE),
108 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
109 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
110 MINUS_ONE, FALSE),
111 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
112 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
113 0xffffffff, TRUE),
114 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
115 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
116 0xffffffff, TRUE),
117 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
118 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
119 0xffffffff, FALSE),
120 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
121 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
122 0xffffffff, TRUE),
123 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
124 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
125 0xffffffff, FALSE),
126 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
127 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
128 TRUE),
129 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
130 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
131 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
132 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
133 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
134 FALSE, 0xffffffff, 0xffffffff, TRUE),
135 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
136 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
137 FALSE),
138 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
139 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
140 MINUS_ONE, TRUE),
141 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
142 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
143 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
144 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
145 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
146 MINUS_ONE, FALSE),
147 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
148 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
149 MINUS_ONE, FALSE),
150 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
151 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
152 FALSE),
153 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
154 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
155 FALSE),
156 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
157 complain_overflow_bitfield, bfd_elf_generic_reloc,
158 "R_X86_64_GOTPC32_TLSDESC",
159 FALSE, 0xffffffff, 0xffffffff, TRUE),
160 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
161 complain_overflow_dont, bfd_elf_generic_reloc,
162 "R_X86_64_TLSDESC_CALL",
163 FALSE, 0, 0, FALSE),
164 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
165 complain_overflow_bitfield, bfd_elf_generic_reloc,
166 "R_X86_64_TLSDESC",
167 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
168 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
169 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
170 MINUS_ONE, FALSE),
171 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
172 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
173 MINUS_ONE, FALSE),
174 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
175 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
176 TRUE),
177 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
178 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
179 TRUE),
180 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
181 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
182 0xffffffff, TRUE),
183 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
184 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
185 0xffffffff, TRUE),
186
187 /* We have a gap in the reloc numbers here.
188 R_X86_64_standard counts the number up to this point, and
189 R_X86_64_vt_offset is the value to subtract from a reloc type of
190 R_X86_64_GNU_VT* to form an index into this table. */
191 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
192 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
193
194 /* GNU extension to record C++ vtable hierarchy. */
195 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
196 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
197
198 /* GNU extension to record C++ vtable member usage. */
199 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
200 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
201 FALSE),
202
203 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
204 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
205 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
206 FALSE)
207 };
208
209 #define IS_X86_64_PCREL_TYPE(TYPE) \
210 ( ((TYPE) == R_X86_64_PC8) \
211 || ((TYPE) == R_X86_64_PC16) \
212 || ((TYPE) == R_X86_64_PC32) \
213 || ((TYPE) == R_X86_64_PC32_BND) \
214 || ((TYPE) == R_X86_64_PC64))
215
216 /* Map BFD relocs to the x86_64 elf relocs. */
217 struct elf_reloc_map
218 {
219 bfd_reloc_code_real_type bfd_reloc_val;
220 unsigned char elf_reloc_val;
221 };
222
223 static const struct elf_reloc_map x86_64_reloc_map[] =
224 {
225 { BFD_RELOC_NONE, R_X86_64_NONE, },
226 { BFD_RELOC_64, R_X86_64_64, },
227 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
228 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
229 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
230 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
231 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
232 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
233 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
234 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
235 { BFD_RELOC_32, R_X86_64_32, },
236 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
237 { BFD_RELOC_16, R_X86_64_16, },
238 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
239 { BFD_RELOC_8, R_X86_64_8, },
240 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
241 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
242 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
243 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
244 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
245 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
246 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
247 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
248 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
249 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
250 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
251 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
252 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
253 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
254 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
255 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
256 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
257 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
258 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
259 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
260 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
261 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
262 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
263 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
264 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
265 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
266 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
267 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
268 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
269 };
270
271 static reloc_howto_type *
272 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
273 {
274 unsigned i;
275
276 if (r_type == (unsigned int) R_X86_64_32)
277 {
278 if (ABI_64_P (abfd))
279 i = r_type;
280 else
281 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
282 }
283 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
284 || r_type >= (unsigned int) R_X86_64_max)
285 {
286 if (r_type >= (unsigned int) R_X86_64_standard)
287 {
288 _bfd_error_handler (_("%B: invalid relocation type %d"),
289 abfd, (int) r_type);
290 r_type = R_X86_64_NONE;
291 }
292 i = r_type;
293 }
294 else
295 i = r_type - (unsigned int) R_X86_64_vt_offset;
296 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
297 return &x86_64_elf_howto_table[i];
298 }
299
300 /* Given a BFD reloc type, return a HOWTO structure. */
301 static reloc_howto_type *
302 elf_x86_64_reloc_type_lookup (bfd *abfd,
303 bfd_reloc_code_real_type code)
304 {
305 unsigned int i;
306
307 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
308 i++)
309 {
310 if (x86_64_reloc_map[i].bfd_reloc_val == code)
311 return elf_x86_64_rtype_to_howto (abfd,
312 x86_64_reloc_map[i].elf_reloc_val);
313 }
314 return NULL;
315 }
316
317 static reloc_howto_type *
318 elf_x86_64_reloc_name_lookup (bfd *abfd,
319 const char *r_name)
320 {
321 unsigned int i;
322
323 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
324 {
325 /* Get x32 R_X86_64_32. */
326 reloc_howto_type *reloc
327 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
328 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
329 return reloc;
330 }
331
332 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
333 if (x86_64_elf_howto_table[i].name != NULL
334 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
335 return &x86_64_elf_howto_table[i];
336
337 return NULL;
338 }
339
340 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
341
342 static void
343 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
344 Elf_Internal_Rela *dst)
345 {
346 unsigned r_type;
347
348 r_type = ELF32_R_TYPE (dst->r_info);
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
350 BFD_ASSERT (r_type == cache_ptr->howto->type);
351 }
352 \f
353 /* Support for core dump NOTE sections. */
354 static bfd_boolean
355 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
356 {
357 int offset;
358 size_t size;
359
360 switch (note->descsz)
361 {
362 default:
363 return FALSE;
364
365 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
366 /* pr_cursig */
367 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
368
369 /* pr_pid */
370 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
371
372 /* pr_reg */
373 offset = 72;
374 size = 216;
375
376 break;
377
378 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
379 /* pr_cursig */
380 elf_tdata (abfd)->core->signal
381 = bfd_get_16 (abfd, note->descdata + 12);
382
383 /* pr_pid */
384 elf_tdata (abfd)->core->lwpid
385 = bfd_get_32 (abfd, note->descdata + 32);
386
387 /* pr_reg */
388 offset = 112;
389 size = 216;
390
391 break;
392 }
393
394 /* Make a ".reg/999" section. */
395 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
396 size, note->descpos + offset);
397 }
398
399 static bfd_boolean
400 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
401 {
402 switch (note->descsz)
403 {
404 default:
405 return FALSE;
406
407 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 12);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
414 break;
415
416 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
417 elf_tdata (abfd)->core->pid
418 = bfd_get_32 (abfd, note->descdata + 24);
419 elf_tdata (abfd)->core->program
420 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
421 elf_tdata (abfd)->core->command
422 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
423 }
424
425 /* Note that for some reason, a spurious space is tacked
426 onto the end of the args in some (at least one anyway)
427 implementations, so strip it off if it exists. */
428
429 {
430 char *command = elf_tdata (abfd)->core->command;
431 int n = strlen (command);
432
433 if (0 < n && command[n - 1] == ' ')
434 command[n - 1] = '\0';
435 }
436
437 return TRUE;
438 }
439
440 #ifdef CORE_HEADER
441 static char *
442 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
443 int note_type, ...)
444 {
445 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
446 va_list ap;
447 const char *fname, *psargs;
448 long pid;
449 int cursig;
450 const void *gregs;
451
452 switch (note_type)
453 {
454 default:
455 return NULL;
456
457 case NT_PRPSINFO:
458 va_start (ap, note_type);
459 fname = va_arg (ap, const char *);
460 psargs = va_arg (ap, const char *);
461 va_end (ap);
462
463 if (bed->s->elfclass == ELFCLASS32)
464 {
465 prpsinfo32_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 else
473 {
474 prpsinfo64_t data;
475 memset (&data, 0, sizeof (data));
476 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
477 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
478 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
479 &data, sizeof (data));
480 }
481 /* NOTREACHED */
482
483 case NT_PRSTATUS:
484 va_start (ap, note_type);
485 pid = va_arg (ap, long);
486 cursig = va_arg (ap, int);
487 gregs = va_arg (ap, const void *);
488 va_end (ap);
489
490 if (bed->s->elfclass == ELFCLASS32)
491 {
492 if (bed->elf_machine_code == EM_X86_64)
493 {
494 prstatusx32_t prstat;
495 memset (&prstat, 0, sizeof (prstat));
496 prstat.pr_pid = pid;
497 prstat.pr_cursig = cursig;
498 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
499 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
500 &prstat, sizeof (prstat));
501 }
502 else
503 {
504 prstatus32_t prstat;
505 memset (&prstat, 0, sizeof (prstat));
506 prstat.pr_pid = pid;
507 prstat.pr_cursig = cursig;
508 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
509 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
510 &prstat, sizeof (prstat));
511 }
512 }
513 else
514 {
515 prstatus64_t prstat;
516 memset (&prstat, 0, sizeof (prstat));
517 prstat.pr_pid = pid;
518 prstat.pr_cursig = cursig;
519 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
520 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
521 &prstat, sizeof (prstat));
522 }
523 }
524 /* NOTREACHED */
525 }
526 #endif
527 \f
528 /* Functions for the x86-64 ELF linker. */
529
530 /* The name of the dynamic interpreter. This is put in the .interp
531 section. */
532
533 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
534 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
535
536 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
537 copying dynamic variables from a shared lib into an app's dynbss
538 section, and instead use a dynamic relocation to point into the
539 shared lib. */
540 #define ELIMINATE_COPY_RELOCS 1
541
542 /* The size in bytes of an entry in the global offset table. */
543
544 #define GOT_ENTRY_SIZE 8
545
546 /* The size in bytes of an entry in the procedure linkage table. */
547
548 #define PLT_ENTRY_SIZE 16
549
550 /* The first entry in a procedure linkage table looks like this. See the
551 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
552
553 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
556 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
557 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
558 };
559
560 /* Subsequent entries in a procedure linkage table look like this. */
561
562 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
563 {
564 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
565 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
566 0x68, /* pushq immediate */
567 0, 0, 0, 0, /* replaced with index into relocation table. */
568 0xe9, /* jmp relative */
569 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
570 };
571
572 /* The first entry in a procedure linkage table with BND relocations
573 like this. */
574
575 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
576 {
577 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
578 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
579 0x0f, 0x1f, 0 /* nopl (%rax) */
580 };
581
582 /* Subsequent entries for legacy branches in a procedure linkage table
583 with BND relocations look like this. */
584
585 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
586 {
587 0x68, 0, 0, 0, 0, /* pushq immediate */
588 0xe9, 0, 0, 0, 0, /* jmpq relative */
589 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
590 };
591
592 /* Subsequent entries for branches with BND prefx in a procedure linkage
593 table with BND relocations look like this. */
594
595 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
596 {
597 0x68, 0, 0, 0, 0, /* pushq immediate */
598 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
599 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
600 };
601
602 /* Entries for legacy branches in the second procedure linkage table
603 look like this. */
604
605 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
606 {
607 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
608 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
609 0x66, 0x90 /* xchg %ax,%ax */
610 };
611
612 /* Entries for branches with BND prefix in the second procedure linkage
613 table look like this. */
614
615 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
616 {
617 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x90 /* nop */
620 };
621
622 /* .eh_frame covering the .plt section. */
623
624 static const bfd_byte elf_x86_64_eh_frame_plt[] =
625 {
626 #define PLT_CIE_LENGTH 20
627 #define PLT_FDE_LENGTH 36
628 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
629 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
630 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
631 0, 0, 0, 0, /* CIE ID */
632 1, /* CIE version */
633 'z', 'R', 0, /* Augmentation string */
634 1, /* Code alignment factor */
635 0x78, /* Data alignment factor */
636 16, /* Return address column */
637 1, /* Augmentation size */
638 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
639 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
640 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
641 DW_CFA_nop, DW_CFA_nop,
642
643 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
644 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
645 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
646 0, 0, 0, 0, /* .plt size goes here */
647 0, /* Augmentation size */
648 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
649 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
650 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
651 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
652 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
653 11, /* Block length */
654 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
655 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
656 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
657 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
658 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
659 };
660
661 /* Architecture-specific backend data for x86-64. */
662
663 struct elf_x86_64_backend_data
664 {
665 /* Templates for the initial PLT entry and for subsequent entries. */
666 const bfd_byte *plt0_entry;
667 const bfd_byte *plt_entry;
668 unsigned int plt_entry_size; /* Size of each PLT entry. */
669
670 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
671 unsigned int plt0_got1_offset;
672 unsigned int plt0_got2_offset;
673
674 /* Offset of the end of the PC-relative instruction containing
675 plt0_got2_offset. */
676 unsigned int plt0_got2_insn_end;
677
678 /* Offsets into plt_entry that are to be replaced with... */
679 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
680 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
681 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
682
683 /* Length of the PC-relative instruction containing plt_got_offset. */
684 unsigned int plt_got_insn_size;
685
686 /* Offset of the end of the PC-relative jump to plt0_entry. */
687 unsigned int plt_plt_insn_end;
688
689 /* Offset into plt_entry where the initial value of the GOT entry points. */
690 unsigned int plt_lazy_offset;
691
692 /* .eh_frame covering the .plt section. */
693 const bfd_byte *eh_frame_plt;
694 unsigned int eh_frame_plt_size;
695 };
696
697 #define get_elf_x86_64_arch_data(bed) \
698 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
699
700 #define get_elf_x86_64_backend_data(abfd) \
701 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
702
703 #define GET_PLT_ENTRY_SIZE(abfd) \
704 get_elf_x86_64_backend_data (abfd)->plt_entry_size
705
706 /* These are the standard parameters. */
707 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
708 {
709 elf_x86_64_plt0_entry, /* plt0_entry */
710 elf_x86_64_plt_entry, /* plt_entry */
711 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
712 2, /* plt0_got1_offset */
713 8, /* plt0_got2_offset */
714 12, /* plt0_got2_insn_end */
715 2, /* plt_got_offset */
716 7, /* plt_reloc_offset */
717 12, /* plt_plt_offset */
718 6, /* plt_got_insn_size */
719 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
720 6, /* plt_lazy_offset */
721 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
722 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
723 };
724
725 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
726 {
727 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
728 elf_x86_64_bnd_plt_entry, /* plt_entry */
729 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
730 2, /* plt0_got1_offset */
731 1+8, /* plt0_got2_offset */
732 1+12, /* plt0_got2_insn_end */
733 1+2, /* plt_got_offset */
734 1, /* plt_reloc_offset */
735 7, /* plt_plt_offset */
736 1+6, /* plt_got_insn_size */
737 11, /* plt_plt_insn_end */
738 0, /* plt_lazy_offset */
739 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
740 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
741 };
742
743 #define elf_backend_arch_data &elf_x86_64_arch_bed
744
745 /* Is a undefined weak symbol which is resolved to 0. Reference to an
746 undefined weak symbol is resolved to 0 when building executable if
747 it isn't dynamic and
748 1. Has non-GOT/non-PLT relocations in text section. Or
749 2. Has no GOT/PLT relocation.
750 */
751 #define UNDEFINED_WEAK_RESOLVED_TO_ZERO(INFO, GOT_RELOC, EH) \
752 ((EH)->elf.root.type == bfd_link_hash_undefweak \
753 && bfd_link_executable (INFO) \
754 && (elf_x86_64_hash_table (INFO)->interp == NULL \
755 || !(GOT_RELOC) \
756 || (EH)->has_non_got_reloc \
757 || !(INFO)->dynamic_undefined_weak))
758
759 /* x86-64 ELF linker hash entry. */
760
761 struct elf_x86_64_link_hash_entry
762 {
763 struct elf_link_hash_entry elf;
764
765 /* Track dynamic relocs copied for this symbol. */
766 struct elf_dyn_relocs *dyn_relocs;
767
768 #define GOT_UNKNOWN 0
769 #define GOT_NORMAL 1
770 #define GOT_TLS_GD 2
771 #define GOT_TLS_IE 3
772 #define GOT_TLS_GDESC 4
773 #define GOT_TLS_GD_BOTH_P(type) \
774 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
775 #define GOT_TLS_GD_P(type) \
776 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
777 #define GOT_TLS_GDESC_P(type) \
778 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
779 #define GOT_TLS_GD_ANY_P(type) \
780 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
781 unsigned char tls_type;
782
783 /* TRUE if a weak symbol with a real definition needs a copy reloc.
784 When there is a weak symbol with a real definition, the processor
785 independent code will have arranged for us to see the real
786 definition first. We need to copy the needs_copy bit from the
787 real definition and check it when allowing copy reloc in PIE. */
788 unsigned int needs_copy : 1;
789
790 /* TRUE if symbol has at least one BND relocation. */
791 unsigned int has_bnd_reloc : 1;
792
793 /* TRUE if symbol has GOT or PLT relocations. */
794 unsigned int has_got_reloc : 1;
795
796 /* TRUE if symbol has non-GOT/non-PLT relocations in text sections. */
797 unsigned int has_non_got_reloc : 1;
798
799 /* 0: symbol isn't __tls_get_addr.
800 1: symbol is __tls_get_addr.
801 2: symbol is unknown. */
802 unsigned int tls_get_addr : 2;
803
804 /* Reference count of C/C++ function pointer relocations in read-write
805 section which can be resolved at run-time. */
806 bfd_signed_vma func_pointer_refcount;
807
808 /* Information about the GOT PLT entry. Filled when there are both
809 GOT and PLT relocations against the same function. */
810 union gotplt_union plt_got;
811
812 /* Information about the second PLT entry. Filled when has_bnd_reloc is
813 set. */
814 union gotplt_union plt_bnd;
815
816 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
817 starting at the end of the jump table. */
818 bfd_vma tlsdesc_got;
819 };
820
821 #define elf_x86_64_hash_entry(ent) \
822 ((struct elf_x86_64_link_hash_entry *)(ent))
823
824 struct elf_x86_64_obj_tdata
825 {
826 struct elf_obj_tdata root;
827
828 /* tls_type for each local got entry. */
829 char *local_got_tls_type;
830
831 /* GOTPLT entries for TLS descriptors. */
832 bfd_vma *local_tlsdesc_gotent;
833 };
834
835 #define elf_x86_64_tdata(abfd) \
836 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
837
838 #define elf_x86_64_local_got_tls_type(abfd) \
839 (elf_x86_64_tdata (abfd)->local_got_tls_type)
840
841 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
842 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
843
844 #define is_x86_64_elf(bfd) \
845 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
846 && elf_tdata (bfd) != NULL \
847 && elf_object_id (bfd) == X86_64_ELF_DATA)
848
849 static bfd_boolean
850 elf_x86_64_mkobject (bfd *abfd)
851 {
852 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
853 X86_64_ELF_DATA);
854 }
855
856 /* x86-64 ELF linker hash table. */
857
858 struct elf_x86_64_link_hash_table
859 {
860 struct elf_link_hash_table elf;
861
862 /* Short-cuts to get to dynamic linker sections. */
863 asection *interp;
864 asection *sdynbss;
865 asection *srelbss;
866 asection *plt_eh_frame;
867 asection *plt_bnd;
868 asection *plt_got;
869
870 union
871 {
872 bfd_signed_vma refcount;
873 bfd_vma offset;
874 } tls_ld_got;
875
876 /* The amount of space used by the jump slots in the GOT. */
877 bfd_vma sgotplt_jump_table_size;
878
879 /* Small local sym cache. */
880 struct sym_cache sym_cache;
881
882 bfd_vma (*r_info) (bfd_vma, bfd_vma);
883 bfd_vma (*r_sym) (bfd_vma);
884 unsigned int pointer_r_type;
885 const char *dynamic_interpreter;
886 int dynamic_interpreter_size;
887
888 /* _TLS_MODULE_BASE_ symbol. */
889 struct bfd_link_hash_entry *tls_module_base;
890
891 /* Used by local STT_GNU_IFUNC symbols. */
892 htab_t loc_hash_table;
893 void * loc_hash_memory;
894
895 /* The offset into splt of the PLT entry for the TLS descriptor
896 resolver. Special values are 0, if not necessary (or not found
897 to be necessary yet), and -1 if needed but not determined
898 yet. */
899 bfd_vma tlsdesc_plt;
900 /* The offset into sgot of the GOT entry used by the PLT entry
901 above. */
902 bfd_vma tlsdesc_got;
903
904 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
905 bfd_vma next_jump_slot_index;
906 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
907 bfd_vma next_irelative_index;
908
909 /* TRUE if there are dynamic relocs against IFUNC symbols that apply
910 to read-only sections. */
911 bfd_boolean readonly_dynrelocs_against_ifunc;
912 };
913
914 /* Get the x86-64 ELF linker hash table from a link_info structure. */
915
916 #define elf_x86_64_hash_table(p) \
917 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
918 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
919
920 #define elf_x86_64_compute_jump_table_size(htab) \
921 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
922
923 /* Create an entry in an x86-64 ELF linker hash table. */
924
925 static struct bfd_hash_entry *
926 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
927 struct bfd_hash_table *table,
928 const char *string)
929 {
930 /* Allocate the structure if it has not already been allocated by a
931 subclass. */
932 if (entry == NULL)
933 {
934 entry = (struct bfd_hash_entry *)
935 bfd_hash_allocate (table,
936 sizeof (struct elf_x86_64_link_hash_entry));
937 if (entry == NULL)
938 return entry;
939 }
940
941 /* Call the allocation method of the superclass. */
942 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
943 if (entry != NULL)
944 {
945 struct elf_x86_64_link_hash_entry *eh;
946
947 eh = (struct elf_x86_64_link_hash_entry *) entry;
948 eh->dyn_relocs = NULL;
949 eh->tls_type = GOT_UNKNOWN;
950 eh->needs_copy = 0;
951 eh->has_bnd_reloc = 0;
952 eh->has_got_reloc = 0;
953 eh->has_non_got_reloc = 0;
954 eh->tls_get_addr = 2;
955 eh->func_pointer_refcount = 0;
956 eh->plt_bnd.offset = (bfd_vma) -1;
957 eh->plt_got.offset = (bfd_vma) -1;
958 eh->tlsdesc_got = (bfd_vma) -1;
959 }
960
961 return entry;
962 }
963
964 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
965 for local symbol so that we can handle local STT_GNU_IFUNC symbols
966 as global symbol. We reuse indx and dynstr_index for local symbol
967 hash since they aren't used by global symbols in this backend. */
968
969 static hashval_t
970 elf_x86_64_local_htab_hash (const void *ptr)
971 {
972 struct elf_link_hash_entry *h
973 = (struct elf_link_hash_entry *) ptr;
974 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
975 }
976
977 /* Compare local hash entries. */
978
979 static int
980 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
981 {
982 struct elf_link_hash_entry *h1
983 = (struct elf_link_hash_entry *) ptr1;
984 struct elf_link_hash_entry *h2
985 = (struct elf_link_hash_entry *) ptr2;
986
987 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
988 }
989
990 /* Find and/or create a hash entry for local symbol. */
991
992 static struct elf_link_hash_entry *
993 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
994 bfd *abfd, const Elf_Internal_Rela *rel,
995 bfd_boolean create)
996 {
997 struct elf_x86_64_link_hash_entry e, *ret;
998 asection *sec = abfd->sections;
999 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
1000 htab->r_sym (rel->r_info));
1001 void **slot;
1002
1003 e.elf.indx = sec->id;
1004 e.elf.dynstr_index = htab->r_sym (rel->r_info);
1005 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
1006 create ? INSERT : NO_INSERT);
1007
1008 if (!slot)
1009 return NULL;
1010
1011 if (*slot)
1012 {
1013 ret = (struct elf_x86_64_link_hash_entry *) *slot;
1014 return &ret->elf;
1015 }
1016
1017 ret = (struct elf_x86_64_link_hash_entry *)
1018 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
1019 sizeof (struct elf_x86_64_link_hash_entry));
1020 if (ret)
1021 {
1022 memset (ret, 0, sizeof (*ret));
1023 ret->elf.indx = sec->id;
1024 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
1025 ret->elf.dynindx = -1;
1026 ret->func_pointer_refcount = 0;
1027 ret->plt_got.offset = (bfd_vma) -1;
1028 *slot = ret;
1029 }
1030 return &ret->elf;
1031 }
1032
1033 /* Destroy an X86-64 ELF linker hash table. */
1034
1035 static void
1036 elf_x86_64_link_hash_table_free (bfd *obfd)
1037 {
1038 struct elf_x86_64_link_hash_table *htab
1039 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
1040
1041 if (htab->loc_hash_table)
1042 htab_delete (htab->loc_hash_table);
1043 if (htab->loc_hash_memory)
1044 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
1045 _bfd_elf_link_hash_table_free (obfd);
1046 }
1047
1048 /* Create an X86-64 ELF linker hash table. */
1049
1050 static struct bfd_link_hash_table *
1051 elf_x86_64_link_hash_table_create (bfd *abfd)
1052 {
1053 struct elf_x86_64_link_hash_table *ret;
1054 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1055
1056 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1057 if (ret == NULL)
1058 return NULL;
1059
1060 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1061 elf_x86_64_link_hash_newfunc,
1062 sizeof (struct elf_x86_64_link_hash_entry),
1063 X86_64_ELF_DATA))
1064 {
1065 free (ret);
1066 return NULL;
1067 }
1068
1069 if (ABI_64_P (abfd))
1070 {
1071 ret->r_info = elf64_r_info;
1072 ret->r_sym = elf64_r_sym;
1073 ret->pointer_r_type = R_X86_64_64;
1074 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1075 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1076 }
1077 else
1078 {
1079 ret->r_info = elf32_r_info;
1080 ret->r_sym = elf32_r_sym;
1081 ret->pointer_r_type = R_X86_64_32;
1082 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1083 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1084 }
1085
1086 ret->loc_hash_table = htab_try_create (1024,
1087 elf_x86_64_local_htab_hash,
1088 elf_x86_64_local_htab_eq,
1089 NULL);
1090 ret->loc_hash_memory = objalloc_create ();
1091 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1092 {
1093 elf_x86_64_link_hash_table_free (abfd);
1094 return NULL;
1095 }
1096 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1097
1098 return &ret->elf.root;
1099 }
1100
1101 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1102 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1103 hash table. */
1104
1105 static bfd_boolean
1106 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1107 struct bfd_link_info *info)
1108 {
1109 struct elf_x86_64_link_hash_table *htab;
1110
1111 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1112 return FALSE;
1113
1114 htab = elf_x86_64_hash_table (info);
1115 if (htab == NULL)
1116 return FALSE;
1117
1118 /* Set the contents of the .interp section to the interpreter. */
1119 if (bfd_link_executable (info) && !info->nointerp)
1120 {
1121 asection *s = bfd_get_linker_section (dynobj, ".interp");
1122 if (s == NULL)
1123 abort ();
1124 s->size = htab->dynamic_interpreter_size;
1125 s->contents = (unsigned char *) htab->dynamic_interpreter;
1126 htab->interp = s;
1127 }
1128
1129 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1130 if (!htab->sdynbss)
1131 abort ();
1132
1133 if (bfd_link_executable (info))
1134 {
1135 /* Always allow copy relocs for building executables. */
1136 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1137 if (s == NULL)
1138 {
1139 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1140 s = bfd_make_section_anyway_with_flags (dynobj,
1141 ".rela.bss",
1142 (bed->dynamic_sec_flags
1143 | SEC_READONLY));
1144 if (s == NULL
1145 || ! bfd_set_section_alignment (dynobj, s,
1146 bed->s->log_file_align))
1147 return FALSE;
1148 }
1149 htab->srelbss = s;
1150 }
1151
1152 if (!info->no_ld_generated_unwind_info
1153 && htab->plt_eh_frame == NULL
1154 && htab->elf.splt != NULL)
1155 {
1156 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1157 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1158 | SEC_LINKER_CREATED);
1159 htab->plt_eh_frame
1160 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1161 if (htab->plt_eh_frame == NULL
1162 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1163 return FALSE;
1164 }
1165
1166 /* Align .got section to its entry size. */
1167 if (htab->elf.sgot != NULL
1168 && !bfd_set_section_alignment (dynobj, htab->elf.sgot, 3))
1169 return FALSE;
1170
1171 /* Align .got.plt section to its entry size. */
1172 if (htab->elf.sgotplt != NULL
1173 && !bfd_set_section_alignment (dynobj, htab->elf.sgotplt, 3))
1174 return FALSE;
1175
1176 return TRUE;
1177 }
1178
1179 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1180
1181 static void
1182 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1183 struct elf_link_hash_entry *dir,
1184 struct elf_link_hash_entry *ind)
1185 {
1186 struct elf_x86_64_link_hash_entry *edir, *eind;
1187
1188 edir = (struct elf_x86_64_link_hash_entry *) dir;
1189 eind = (struct elf_x86_64_link_hash_entry *) ind;
1190
1191 if (!edir->has_bnd_reloc)
1192 edir->has_bnd_reloc = eind->has_bnd_reloc;
1193
1194 if (!edir->has_got_reloc)
1195 edir->has_got_reloc = eind->has_got_reloc;
1196
1197 if (!edir->has_non_got_reloc)
1198 edir->has_non_got_reloc = eind->has_non_got_reloc;
1199
1200 if (eind->dyn_relocs != NULL)
1201 {
1202 if (edir->dyn_relocs != NULL)
1203 {
1204 struct elf_dyn_relocs **pp;
1205 struct elf_dyn_relocs *p;
1206
1207 /* Add reloc counts against the indirect sym to the direct sym
1208 list. Merge any entries against the same section. */
1209 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1210 {
1211 struct elf_dyn_relocs *q;
1212
1213 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1214 if (q->sec == p->sec)
1215 {
1216 q->pc_count += p->pc_count;
1217 q->count += p->count;
1218 *pp = p->next;
1219 break;
1220 }
1221 if (q == NULL)
1222 pp = &p->next;
1223 }
1224 *pp = edir->dyn_relocs;
1225 }
1226
1227 edir->dyn_relocs = eind->dyn_relocs;
1228 eind->dyn_relocs = NULL;
1229 }
1230
1231 if (ind->root.type == bfd_link_hash_indirect
1232 && dir->got.refcount <= 0)
1233 {
1234 edir->tls_type = eind->tls_type;
1235 eind->tls_type = GOT_UNKNOWN;
1236 }
1237
1238 if (ELIMINATE_COPY_RELOCS
1239 && ind->root.type != bfd_link_hash_indirect
1240 && dir->dynamic_adjusted)
1241 {
1242 /* If called to transfer flags for a weakdef during processing
1243 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1244 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1245 dir->ref_dynamic |= ind->ref_dynamic;
1246 dir->ref_regular |= ind->ref_regular;
1247 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1248 dir->needs_plt |= ind->needs_plt;
1249 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1250 }
1251 else
1252 {
1253 if (eind->func_pointer_refcount > 0)
1254 {
1255 edir->func_pointer_refcount += eind->func_pointer_refcount;
1256 eind->func_pointer_refcount = 0;
1257 }
1258
1259 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1260 }
1261 }
1262
1263 static bfd_boolean
1264 elf64_x86_64_elf_object_p (bfd *abfd)
1265 {
1266 /* Set the right machine number for an x86-64 elf64 file. */
1267 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1268 return TRUE;
1269 }
1270
1271 static bfd_boolean
1272 elf32_x86_64_elf_object_p (bfd *abfd)
1273 {
1274 /* Set the right machine number for an x86-64 elf32 file. */
1275 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1276 return TRUE;
1277 }
1278
1279 /* Return TRUE if the TLS access code sequence support transition
1280 from R_TYPE. */
1281
1282 static bfd_boolean
1283 elf_x86_64_check_tls_transition (bfd *abfd,
1284 struct bfd_link_info *info,
1285 asection *sec,
1286 bfd_byte *contents,
1287 Elf_Internal_Shdr *symtab_hdr,
1288 struct elf_link_hash_entry **sym_hashes,
1289 unsigned int r_type,
1290 const Elf_Internal_Rela *rel,
1291 const Elf_Internal_Rela *relend)
1292 {
1293 unsigned int val;
1294 unsigned long r_symndx;
1295 bfd_boolean largepic = FALSE;
1296 struct elf_link_hash_entry *h;
1297 bfd_vma offset;
1298 struct elf_x86_64_link_hash_table *htab;
1299 bfd_byte *call;
1300 bfd_boolean indirect_call, tls_get_addr;
1301
1302 htab = elf_x86_64_hash_table (info);
1303 offset = rel->r_offset;
1304 switch (r_type)
1305 {
1306 case R_X86_64_TLSGD:
1307 case R_X86_64_TLSLD:
1308 if ((rel + 1) >= relend)
1309 return FALSE;
1310
1311 if (r_type == R_X86_64_TLSGD)
1312 {
1313 /* Check transition from GD access model. For 64bit, only
1314 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1315 .word 0x6666; rex64; call __tls_get_addr@PLT
1316 or
1317 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1318 .byte 0x66; rex64
1319 call *__tls_get_addr@GOTPCREL(%rip)
1320 which may be converted to
1321 addr32 call __tls_get_addr
1322 can transit to different access model. For 32bit, only
1323 leaq foo@tlsgd(%rip), %rdi
1324 .word 0x6666; rex64; call __tls_get_addr@PLT
1325 or
1326 leaq foo@tlsgd(%rip), %rdi
1327 .byte 0x66; rex64
1328 call *__tls_get_addr@GOTPCREL(%rip)
1329 which may be converted to
1330 addr32 call __tls_get_addr
1331 can transit to different access model. For largepic,
1332 we also support:
1333 leaq foo@tlsgd(%rip), %rdi
1334 movabsq $__tls_get_addr@pltoff, %rax
1335 addq $r15, %rax
1336 call *%rax
1337 or
1338 leaq foo@tlsgd(%rip), %rdi
1339 movabsq $__tls_get_addr@pltoff, %rax
1340 addq $rbx, %rax
1341 call *%rax */
1342
1343 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1344
1345 if ((offset + 12) > sec->size)
1346 return FALSE;
1347
1348 call = contents + offset + 4;
1349 if (call[0] != 0x66
1350 || !((call[1] == 0x48
1351 && call[2] == 0xff
1352 && call[3] == 0x15)
1353 || (call[1] == 0x48
1354 && call[2] == 0x67
1355 && call[3] == 0xe8)
1356 || (call[1] == 0x66
1357 && call[2] == 0x48
1358 && call[3] == 0xe8)))
1359 {
1360 if (!ABI_64_P (abfd)
1361 || (offset + 19) > sec->size
1362 || offset < 3
1363 || memcmp (call - 7, leaq + 1, 3) != 0
1364 || memcmp (call, "\x48\xb8", 2) != 0
1365 || call[11] != 0x01
1366 || call[13] != 0xff
1367 || call[14] != 0xd0
1368 || !((call[10] == 0x48 && call[12] == 0xd8)
1369 || (call[10] == 0x4c && call[12] == 0xf8)))
1370 return FALSE;
1371 largepic = TRUE;
1372 }
1373 else if (ABI_64_P (abfd))
1374 {
1375 if (offset < 4
1376 || memcmp (contents + offset - 4, leaq, 4) != 0)
1377 return FALSE;
1378 }
1379 else
1380 {
1381 if (offset < 3
1382 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1383 return FALSE;
1384 }
1385 indirect_call = call[2] == 0xff;
1386 }
1387 else
1388 {
1389 /* Check transition from LD access model. Only
1390 leaq foo@tlsld(%rip), %rdi;
1391 call __tls_get_addr@PLT
1392 or
1393 leaq foo@tlsld(%rip), %rdi;
1394 call *__tls_get_addr@GOTPCREL(%rip)
1395 which may be converted to
1396 addr32 call __tls_get_addr
1397 can transit to different access model. For largepic
1398 we also support:
1399 leaq foo@tlsld(%rip), %rdi
1400 movabsq $__tls_get_addr@pltoff, %rax
1401 addq $r15, %rax
1402 call *%rax
1403 or
1404 leaq foo@tlsld(%rip), %rdi
1405 movabsq $__tls_get_addr@pltoff, %rax
1406 addq $rbx, %rax
1407 call *%rax */
1408
1409 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1410
1411 if (offset < 3 || (offset + 9) > sec->size)
1412 return FALSE;
1413
1414 if (memcmp (contents + offset - 3, lea, 3) != 0)
1415 return FALSE;
1416
1417 call = contents + offset + 4;
1418 if (!(call[0] == 0xe8
1419 || (call[0] == 0xff && call[1] == 0x15)
1420 || (call[0] == 0x67 && call[1] == 0xe8)))
1421 {
1422 if (!ABI_64_P (abfd)
1423 || (offset + 19) > sec->size
1424 || memcmp (call, "\x48\xb8", 2) != 0
1425 || call[11] != 0x01
1426 || call[13] != 0xff
1427 || call[14] != 0xd0
1428 || !((call[10] == 0x48 && call[12] == 0xd8)
1429 || (call[10] == 0x4c && call[12] == 0xf8)))
1430 return FALSE;
1431 largepic = TRUE;
1432 }
1433 indirect_call = call[0] == 0xff;
1434 }
1435
1436 r_symndx = htab->r_sym (rel[1].r_info);
1437 if (r_symndx < symtab_hdr->sh_info)
1438 return FALSE;
1439
1440 tls_get_addr = FALSE;
1441 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1442 if (h != NULL && h->root.root.string != NULL)
1443 {
1444 struct elf_x86_64_link_hash_entry *eh
1445 = (struct elf_x86_64_link_hash_entry *) h;
1446 tls_get_addr = eh->tls_get_addr == 1;
1447 if (eh->tls_get_addr > 1)
1448 {
1449 /* Use strncmp to check __tls_get_addr since
1450 __tls_get_addr may be versioned. */
1451 if (strncmp (h->root.root.string, "__tls_get_addr", 14)
1452 == 0)
1453 {
1454 eh->tls_get_addr = 1;
1455 tls_get_addr = TRUE;
1456 }
1457 else
1458 eh->tls_get_addr = 0;
1459 }
1460 }
1461
1462 if (!tls_get_addr)
1463 return FALSE;
1464 else if (largepic)
1465 return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64;
1466 else if (indirect_call)
1467 return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_GOTPCRELX;
1468 else
1469 return (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1470 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32);
1471
1472 case R_X86_64_GOTTPOFF:
1473 /* Check transition from IE access model:
1474 mov foo@gottpoff(%rip), %reg
1475 add foo@gottpoff(%rip), %reg
1476 */
1477
1478 /* Check REX prefix first. */
1479 if (offset >= 3 && (offset + 4) <= sec->size)
1480 {
1481 val = bfd_get_8 (abfd, contents + offset - 3);
1482 if (val != 0x48 && val != 0x4c)
1483 {
1484 /* X32 may have 0x44 REX prefix or no REX prefix. */
1485 if (ABI_64_P (abfd))
1486 return FALSE;
1487 }
1488 }
1489 else
1490 {
1491 /* X32 may not have any REX prefix. */
1492 if (ABI_64_P (abfd))
1493 return FALSE;
1494 if (offset < 2 || (offset + 3) > sec->size)
1495 return FALSE;
1496 }
1497
1498 val = bfd_get_8 (abfd, contents + offset - 2);
1499 if (val != 0x8b && val != 0x03)
1500 return FALSE;
1501
1502 val = bfd_get_8 (abfd, contents + offset - 1);
1503 return (val & 0xc7) == 5;
1504
1505 case R_X86_64_GOTPC32_TLSDESC:
1506 /* Check transition from GDesc access model:
1507 leaq x@tlsdesc(%rip), %rax
1508
1509 Make sure it's a leaq adding rip to a 32-bit offset
1510 into any register, although it's probably almost always
1511 going to be rax. */
1512
1513 if (offset < 3 || (offset + 4) > sec->size)
1514 return FALSE;
1515
1516 val = bfd_get_8 (abfd, contents + offset - 3);
1517 if ((val & 0xfb) != 0x48)
1518 return FALSE;
1519
1520 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1521 return FALSE;
1522
1523 val = bfd_get_8 (abfd, contents + offset - 1);
1524 return (val & 0xc7) == 0x05;
1525
1526 case R_X86_64_TLSDESC_CALL:
1527 /* Check transition from GDesc access model:
1528 call *x@tlsdesc(%rax)
1529 */
1530 if (offset + 2 <= sec->size)
1531 {
1532 /* Make sure that it's a call *x@tlsdesc(%rax). */
1533 call = contents + offset;
1534 return call[0] == 0xff && call[1] == 0x10;
1535 }
1536
1537 return FALSE;
1538
1539 default:
1540 abort ();
1541 }
1542 }
1543
1544 /* Return TRUE if the TLS access transition is OK or no transition
1545 will be performed. Update R_TYPE if there is a transition. */
1546
1547 static bfd_boolean
1548 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1549 asection *sec, bfd_byte *contents,
1550 Elf_Internal_Shdr *symtab_hdr,
1551 struct elf_link_hash_entry **sym_hashes,
1552 unsigned int *r_type, int tls_type,
1553 const Elf_Internal_Rela *rel,
1554 const Elf_Internal_Rela *relend,
1555 struct elf_link_hash_entry *h,
1556 unsigned long r_symndx,
1557 bfd_boolean from_relocate_section)
1558 {
1559 unsigned int from_type = *r_type;
1560 unsigned int to_type = from_type;
1561 bfd_boolean check = TRUE;
1562
1563 /* Skip TLS transition for functions. */
1564 if (h != NULL
1565 && (h->type == STT_FUNC
1566 || h->type == STT_GNU_IFUNC))
1567 return TRUE;
1568
1569 switch (from_type)
1570 {
1571 case R_X86_64_TLSGD:
1572 case R_X86_64_GOTPC32_TLSDESC:
1573 case R_X86_64_TLSDESC_CALL:
1574 case R_X86_64_GOTTPOFF:
1575 if (bfd_link_executable (info))
1576 {
1577 if (h == NULL)
1578 to_type = R_X86_64_TPOFF32;
1579 else
1580 to_type = R_X86_64_GOTTPOFF;
1581 }
1582
1583 /* When we are called from elf_x86_64_relocate_section, there may
1584 be additional transitions based on TLS_TYPE. */
1585 if (from_relocate_section)
1586 {
1587 unsigned int new_to_type = to_type;
1588
1589 if (bfd_link_executable (info)
1590 && h != NULL
1591 && h->dynindx == -1
1592 && tls_type == GOT_TLS_IE)
1593 new_to_type = R_X86_64_TPOFF32;
1594
1595 if (to_type == R_X86_64_TLSGD
1596 || to_type == R_X86_64_GOTPC32_TLSDESC
1597 || to_type == R_X86_64_TLSDESC_CALL)
1598 {
1599 if (tls_type == GOT_TLS_IE)
1600 new_to_type = R_X86_64_GOTTPOFF;
1601 }
1602
1603 /* We checked the transition before when we were called from
1604 elf_x86_64_check_relocs. We only want to check the new
1605 transition which hasn't been checked before. */
1606 check = new_to_type != to_type && from_type == to_type;
1607 to_type = new_to_type;
1608 }
1609
1610 break;
1611
1612 case R_X86_64_TLSLD:
1613 if (bfd_link_executable (info))
1614 to_type = R_X86_64_TPOFF32;
1615 break;
1616
1617 default:
1618 return TRUE;
1619 }
1620
1621 /* Return TRUE if there is no transition. */
1622 if (from_type == to_type)
1623 return TRUE;
1624
1625 /* Check if the transition can be performed. */
1626 if (check
1627 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1628 symtab_hdr, sym_hashes,
1629 from_type, rel, relend))
1630 {
1631 reloc_howto_type *from, *to;
1632 const char *name;
1633
1634 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1635 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1636
1637 if (h)
1638 name = h->root.root.string;
1639 else
1640 {
1641 struct elf_x86_64_link_hash_table *htab;
1642
1643 htab = elf_x86_64_hash_table (info);
1644 if (htab == NULL)
1645 name = "*unknown*";
1646 else
1647 {
1648 Elf_Internal_Sym *isym;
1649
1650 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1651 abfd, r_symndx);
1652 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1653 }
1654 }
1655
1656 _bfd_error_handler
1657 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1658 "in section `%A' failed"),
1659 abfd, sec, from->name, to->name, name,
1660 (unsigned long) rel->r_offset);
1661 bfd_set_error (bfd_error_bad_value);
1662 return FALSE;
1663 }
1664
1665 *r_type = to_type;
1666 return TRUE;
1667 }
1668
1669 /* Rename some of the generic section flags to better document how they
1670 are used here. */
1671 #define need_convert_load sec_flg0
1672 #define check_relocs_failed sec_flg1
1673
1674 static bfd_boolean
1675 elf_x86_64_need_pic (bfd *input_bfd, asection *sec,
1676 struct elf_link_hash_entry *h,
1677 Elf_Internal_Shdr *symtab_hdr,
1678 Elf_Internal_Sym *isym,
1679 reloc_howto_type *howto)
1680 {
1681 const char *v = "";
1682 const char *und = "";
1683 const char *pic = "";
1684
1685 const char *name;
1686 if (h)
1687 {
1688 name = h->root.root.string;
1689 switch (ELF_ST_VISIBILITY (h->other))
1690 {
1691 case STV_HIDDEN:
1692 v = _("hidden symbol ");
1693 break;
1694 case STV_INTERNAL:
1695 v = _("internal symbol ");
1696 break;
1697 case STV_PROTECTED:
1698 v = _("protected symbol ");
1699 break;
1700 default:
1701 v = _("symbol ");
1702 pic = _("; recompile with -fPIC");
1703 break;
1704 }
1705
1706 if (!h->def_regular && !h->def_dynamic)
1707 und = _("undefined ");
1708 }
1709 else
1710 {
1711 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1712 pic = _("; recompile with -fPIC");
1713 }
1714
1715 _bfd_error_handler (_("%B: relocation %s against %s%s`%s' can "
1716 "not be used when making a shared object%s"),
1717 input_bfd, howto->name, und, v, name, pic);
1718 bfd_set_error (bfd_error_bad_value);
1719 sec->check_relocs_failed = 1;
1720 return FALSE;
1721 }
1722
1723 /* With the local symbol, foo, we convert
1724 mov foo@GOTPCREL(%rip), %reg
1725 to
1726 lea foo(%rip), %reg
1727 and convert
1728 call/jmp *foo@GOTPCREL(%rip)
1729 to
1730 nop call foo/jmp foo nop
1731 When PIC is false, convert
1732 test %reg, foo@GOTPCREL(%rip)
1733 to
1734 test $foo, %reg
1735 and convert
1736 binop foo@GOTPCREL(%rip), %reg
1737 to
1738 binop $foo, %reg
1739 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1740 instructions. */
1741
1742 static bfd_boolean
1743 elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec,
1744 bfd_byte *contents,
1745 Elf_Internal_Rela *irel,
1746 struct elf_link_hash_entry *h,
1747 bfd_boolean *converted,
1748 struct bfd_link_info *link_info)
1749 {
1750 struct elf_x86_64_link_hash_table *htab;
1751 bfd_boolean is_pic;
1752 bfd_boolean require_reloc_pc32;
1753 bfd_boolean relocx;
1754 bfd_boolean to_reloc_pc32;
1755 asection *tsec;
1756 char symtype;
1757 bfd_signed_vma raddend;
1758 unsigned int opcode;
1759 unsigned int modrm;
1760 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
1761 unsigned int r_symndx;
1762 bfd_vma toff;
1763 bfd_vma roff = irel->r_offset;
1764
1765 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1766 return TRUE;
1767
1768 raddend = irel->r_addend;
1769 /* Addend for 32-bit PC-relative relocation must be -4. */
1770 if (raddend != -4)
1771 return TRUE;
1772
1773 htab = elf_x86_64_hash_table (link_info);
1774 is_pic = bfd_link_pic (link_info);
1775
1776 relocx = (r_type == R_X86_64_GOTPCRELX
1777 || r_type == R_X86_64_REX_GOTPCRELX);
1778
1779 /* TRUE if we can convert only to R_X86_64_PC32. Enable it for
1780 --no-relax. */
1781 require_reloc_pc32
1782 = link_info->disable_target_specific_optimizations > 1;
1783
1784 r_symndx = htab->r_sym (irel->r_info);
1785
1786 opcode = bfd_get_8 (abfd, contents + roff - 2);
1787
1788 /* Convert mov to lea since it has been done for a while. */
1789 if (opcode != 0x8b)
1790 {
1791 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1792 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1793 test, xor instructions. */
1794 if (!relocx)
1795 return TRUE;
1796 }
1797
1798 /* We convert only to R_X86_64_PC32:
1799 1. Branch.
1800 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1801 3. require_reloc_pc32 is true.
1802 4. PIC.
1803 */
1804 to_reloc_pc32 = (opcode == 0xff
1805 || !relocx
1806 || require_reloc_pc32
1807 || is_pic);
1808
1809 /* Get the symbol referred to by the reloc. */
1810 if (h == NULL)
1811 {
1812 Elf_Internal_Sym *isym
1813 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1814
1815 /* Skip relocation against undefined symbols. */
1816 if (isym->st_shndx == SHN_UNDEF)
1817 return TRUE;
1818
1819 symtype = ELF_ST_TYPE (isym->st_info);
1820
1821 if (isym->st_shndx == SHN_ABS)
1822 tsec = bfd_abs_section_ptr;
1823 else if (isym->st_shndx == SHN_COMMON)
1824 tsec = bfd_com_section_ptr;
1825 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1826 tsec = &_bfd_elf_large_com_section;
1827 else
1828 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1829
1830 toff = isym->st_value;
1831 }
1832 else
1833 {
1834 /* Undefined weak symbol is only bound locally in executable
1835 and its reference is resolved as 0 without relocation
1836 overflow. We can only perform this optimization for
1837 GOTPCRELX relocations since we need to modify REX byte.
1838 It is OK convert mov with R_X86_64_GOTPCREL to
1839 R_X86_64_PC32. */
1840 if ((relocx || opcode == 0x8b)
1841 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (link_info,
1842 TRUE,
1843 elf_x86_64_hash_entry (h)))
1844 {
1845 if (opcode == 0xff)
1846 {
1847 /* Skip for branch instructions since R_X86_64_PC32
1848 may overflow. */
1849 if (require_reloc_pc32)
1850 return TRUE;
1851 }
1852 else if (relocx)
1853 {
1854 /* For non-branch instructions, we can convert to
1855 R_X86_64_32/R_X86_64_32S since we know if there
1856 is a REX byte. */
1857 to_reloc_pc32 = FALSE;
1858 }
1859
1860 /* Since we don't know the current PC when PIC is true,
1861 we can't convert to R_X86_64_PC32. */
1862 if (to_reloc_pc32 && is_pic)
1863 return TRUE;
1864
1865 goto convert;
1866 }
1867 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1868 ld.so may use its link-time address. */
1869 else if ((h->def_regular
1870 || h->root.type == bfd_link_hash_defined
1871 || h->root.type == bfd_link_hash_defweak)
1872 && h != htab->elf.hdynamic
1873 && SYMBOL_REFERENCES_LOCAL (link_info, h))
1874 {
1875 /* bfd_link_hash_new or bfd_link_hash_undefined is
1876 set by an assignment in a linker script in
1877 bfd_elf_record_link_assignment. */
1878 if (h->def_regular
1879 && (h->root.type == bfd_link_hash_new
1880 || h->root.type == bfd_link_hash_undefined
1881 || ((h->root.type == bfd_link_hash_defined
1882 || h->root.type == bfd_link_hash_defweak)
1883 && h->root.u.def.section == bfd_und_section_ptr)))
1884 {
1885 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1886 if (require_reloc_pc32)
1887 return TRUE;
1888 goto convert;
1889 }
1890 tsec = h->root.u.def.section;
1891 toff = h->root.u.def.value;
1892 symtype = h->type;
1893 }
1894 else
1895 return TRUE;
1896 }
1897
1898 /* Don't convert GOTPCREL relocation against large section. */
1899 if (elf_section_data (tsec) != NULL
1900 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1901 return TRUE;
1902
1903 /* We can only estimate relocation overflow for R_X86_64_PC32. */
1904 if (!to_reloc_pc32)
1905 goto convert;
1906
1907 if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
1908 {
1909 /* At this stage in linking, no SEC_MERGE symbol has been
1910 adjusted, so all references to such symbols need to be
1911 passed through _bfd_merged_section_offset. (Later, in
1912 relocate_section, all SEC_MERGE symbols *except* for
1913 section symbols have been adjusted.)
1914
1915 gas may reduce relocations against symbols in SEC_MERGE
1916 sections to a relocation against the section symbol when
1917 the original addend was zero. When the reloc is against
1918 a section symbol we should include the addend in the
1919 offset passed to _bfd_merged_section_offset, since the
1920 location of interest is the original symbol. On the
1921 other hand, an access to "sym+addend" where "sym" is not
1922 a section symbol should not include the addend; Such an
1923 access is presumed to be an offset from "sym"; The
1924 location of interest is just "sym". */
1925 if (symtype == STT_SECTION)
1926 toff += raddend;
1927
1928 toff = _bfd_merged_section_offset (abfd, &tsec,
1929 elf_section_data (tsec)->sec_info,
1930 toff);
1931
1932 if (symtype != STT_SECTION)
1933 toff += raddend;
1934 }
1935 else
1936 toff += raddend;
1937
1938 /* Don't convert if R_X86_64_PC32 relocation overflows. */
1939 if (tsec->output_section == sec->output_section)
1940 {
1941 if ((toff - roff + 0x80000000) > 0xffffffff)
1942 return TRUE;
1943 }
1944 else
1945 {
1946 bfd_signed_vma distance;
1947
1948 /* At this point, we don't know the load addresses of TSEC
1949 section nor SEC section. We estimate the distrance between
1950 SEC and TSEC. We store the estimated distances in the
1951 compressed_size field of the output section, which is only
1952 used to decompress the compressed input section. */
1953 if (sec->output_section->compressed_size == 0)
1954 {
1955 asection *asect;
1956 bfd_size_type size = 0;
1957 for (asect = link_info->output_bfd->sections;
1958 asect != NULL;
1959 asect = asect->next)
1960 /* Skip debug sections since compressed_size is used to
1961 compress debug sections. */
1962 if ((asect->flags & SEC_DEBUGGING) == 0)
1963 {
1964 asection *i;
1965 for (i = asect->map_head.s;
1966 i != NULL;
1967 i = i->map_head.s)
1968 {
1969 size = align_power (size, i->alignment_power);
1970 size += i->size;
1971 }
1972 asect->compressed_size = size;
1973 }
1974 }
1975
1976 /* Don't convert GOTPCREL relocations if TSEC isn't placed
1977 after SEC. */
1978 distance = (tsec->output_section->compressed_size
1979 - sec->output_section->compressed_size);
1980 if (distance < 0)
1981 return TRUE;
1982
1983 /* Take PT_GNU_RELRO segment into account by adding
1984 maxpagesize. */
1985 if ((toff + distance + get_elf_backend_data (abfd)->maxpagesize
1986 - roff + 0x80000000) > 0xffffffff)
1987 return TRUE;
1988 }
1989
1990 convert:
1991 if (opcode == 0xff)
1992 {
1993 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1994 unsigned int nop;
1995 unsigned int disp;
1996 bfd_vma nop_offset;
1997
1998 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1999 R_X86_64_PC32. */
2000 modrm = bfd_get_8 (abfd, contents + roff - 1);
2001 if (modrm == 0x25)
2002 {
2003 /* Convert to "jmp foo nop". */
2004 modrm = 0xe9;
2005 nop = NOP_OPCODE;
2006 nop_offset = irel->r_offset + 3;
2007 disp = bfd_get_32 (abfd, contents + irel->r_offset);
2008 irel->r_offset -= 1;
2009 bfd_put_32 (abfd, disp, contents + irel->r_offset);
2010 }
2011 else
2012 {
2013 struct elf_x86_64_link_hash_entry *eh
2014 = (struct elf_x86_64_link_hash_entry *) h;
2015
2016 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
2017 is a nop prefix. */
2018 modrm = 0xe8;
2019 /* To support TLS optimization, always use addr32 prefix for
2020 "call *__tls_get_addr@GOTPCREL(%rip)". */
2021 if (eh && eh->tls_get_addr == 1)
2022 {
2023 nop = 0x67;
2024 nop_offset = irel->r_offset - 2;
2025 }
2026 else
2027 {
2028 nop = link_info->call_nop_byte;
2029 if (link_info->call_nop_as_suffix)
2030 {
2031 nop_offset = irel->r_offset + 3;
2032 disp = bfd_get_32 (abfd, contents + irel->r_offset);
2033 irel->r_offset -= 1;
2034 bfd_put_32 (abfd, disp, contents + irel->r_offset);
2035 }
2036 else
2037 nop_offset = irel->r_offset - 2;
2038 }
2039 }
2040 bfd_put_8 (abfd, nop, contents + nop_offset);
2041 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
2042 r_type = R_X86_64_PC32;
2043 }
2044 else
2045 {
2046 unsigned int rex;
2047 unsigned int rex_mask = REX_R;
2048
2049 if (r_type == R_X86_64_REX_GOTPCRELX)
2050 rex = bfd_get_8 (abfd, contents + roff - 3);
2051 else
2052 rex = 0;
2053
2054 if (opcode == 0x8b)
2055 {
2056 if (to_reloc_pc32)
2057 {
2058 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
2059 "lea foo(%rip), %reg". */
2060 opcode = 0x8d;
2061 r_type = R_X86_64_PC32;
2062 }
2063 else
2064 {
2065 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
2066 "mov $foo, %reg". */
2067 opcode = 0xc7;
2068 modrm = bfd_get_8 (abfd, contents + roff - 1);
2069 modrm = 0xc0 | (modrm & 0x38) >> 3;
2070 if ((rex & REX_W) != 0
2071 && ABI_64_P (link_info->output_bfd))
2072 {
2073 /* Keep the REX_W bit in REX byte for LP64. */
2074 r_type = R_X86_64_32S;
2075 goto rewrite_modrm_rex;
2076 }
2077 else
2078 {
2079 /* If the REX_W bit in REX byte isn't needed,
2080 use R_X86_64_32 and clear the W bit to avoid
2081 sign-extend imm32 to imm64. */
2082 r_type = R_X86_64_32;
2083 /* Clear the W bit in REX byte. */
2084 rex_mask |= REX_W;
2085 goto rewrite_modrm_rex;
2086 }
2087 }
2088 }
2089 else
2090 {
2091 /* R_X86_64_PC32 isn't supported. */
2092 if (to_reloc_pc32)
2093 return TRUE;
2094
2095 modrm = bfd_get_8 (abfd, contents + roff - 1);
2096 if (opcode == 0x85)
2097 {
2098 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
2099 "test $foo, %reg". */
2100 modrm = 0xc0 | (modrm & 0x38) >> 3;
2101 opcode = 0xf7;
2102 }
2103 else
2104 {
2105 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
2106 "binop $foo, %reg". */
2107 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
2108 opcode = 0x81;
2109 }
2110
2111 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
2112 overflow when sign-extending imm32 to imm64. */
2113 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
2114
2115 rewrite_modrm_rex:
2116 bfd_put_8 (abfd, modrm, contents + roff - 1);
2117
2118 if (rex)
2119 {
2120 /* Move the R bit to the B bit in REX byte. */
2121 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
2122 bfd_put_8 (abfd, rex, contents + roff - 3);
2123 }
2124
2125 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
2126 irel->r_addend = 0;
2127 }
2128
2129 bfd_put_8 (abfd, opcode, contents + roff - 2);
2130 }
2131
2132 irel->r_info = htab->r_info (r_symndx, r_type);
2133
2134 *converted = TRUE;
2135
2136 return TRUE;
2137 }
2138
2139 /* Look through the relocs for a section during the first phase, and
2140 calculate needed space in the global offset table, procedure
2141 linkage table, and dynamic reloc sections. */
2142
2143 static bfd_boolean
2144 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
2145 asection *sec,
2146 const Elf_Internal_Rela *relocs)
2147 {
2148 struct elf_x86_64_link_hash_table *htab;
2149 Elf_Internal_Shdr *symtab_hdr;
2150 struct elf_link_hash_entry **sym_hashes;
2151 const Elf_Internal_Rela *rel;
2152 const Elf_Internal_Rela *rel_end;
2153 asection *sreloc;
2154 bfd_byte *contents;
2155 bfd_boolean use_plt_got;
2156
2157 if (bfd_link_relocatable (info))
2158 return TRUE;
2159
2160 /* Don't do anything special with non-loaded, non-alloced sections.
2161 In particular, any relocs in such sections should not affect GOT
2162 and PLT reference counting (ie. we don't allow them to create GOT
2163 or PLT entries), there's no possibility or desire to optimize TLS
2164 relocs, and there's not much point in propagating relocs to shared
2165 libs that the dynamic linker won't relocate. */
2166 if ((sec->flags & SEC_ALLOC) == 0)
2167 return TRUE;
2168
2169 BFD_ASSERT (is_x86_64_elf (abfd));
2170
2171 htab = elf_x86_64_hash_table (info);
2172 if (htab == NULL)
2173 {
2174 sec->check_relocs_failed = 1;
2175 return FALSE;
2176 }
2177
2178 /* Get the section contents. */
2179 if (elf_section_data (sec)->this_hdr.contents != NULL)
2180 contents = elf_section_data (sec)->this_hdr.contents;
2181 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2182 {
2183 sec->check_relocs_failed = 1;
2184 return FALSE;
2185 }
2186
2187 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
2188
2189 symtab_hdr = &elf_symtab_hdr (abfd);
2190 sym_hashes = elf_sym_hashes (abfd);
2191
2192 sreloc = NULL;
2193
2194 rel_end = relocs + sec->reloc_count;
2195 for (rel = relocs; rel < rel_end; rel++)
2196 {
2197 unsigned int r_type;
2198 unsigned long r_symndx;
2199 struct elf_link_hash_entry *h;
2200 struct elf_x86_64_link_hash_entry *eh;
2201 Elf_Internal_Sym *isym;
2202 const char *name;
2203 bfd_boolean size_reloc;
2204
2205 r_symndx = htab->r_sym (rel->r_info);
2206 r_type = ELF32_R_TYPE (rel->r_info);
2207
2208 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
2209 {
2210 _bfd_error_handler (_("%B: bad symbol index: %d"),
2211 abfd, r_symndx);
2212 goto error_return;
2213 }
2214
2215 if (r_symndx < symtab_hdr->sh_info)
2216 {
2217 /* A local symbol. */
2218 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2219 abfd, r_symndx);
2220 if (isym == NULL)
2221 goto error_return;
2222
2223 /* Check relocation against local STT_GNU_IFUNC symbol. */
2224 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2225 {
2226 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
2227 TRUE);
2228 if (h == NULL)
2229 goto error_return;
2230
2231 /* Fake a STT_GNU_IFUNC symbol. */
2232 h->type = STT_GNU_IFUNC;
2233 h->def_regular = 1;
2234 h->ref_regular = 1;
2235 h->forced_local = 1;
2236 h->root.type = bfd_link_hash_defined;
2237 }
2238 else
2239 h = NULL;
2240 }
2241 else
2242 {
2243 isym = NULL;
2244 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2245 while (h->root.type == bfd_link_hash_indirect
2246 || h->root.type == bfd_link_hash_warning)
2247 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2248 }
2249
2250 /* Check invalid x32 relocations. */
2251 if (!ABI_64_P (abfd))
2252 switch (r_type)
2253 {
2254 default:
2255 break;
2256
2257 case R_X86_64_DTPOFF64:
2258 case R_X86_64_TPOFF64:
2259 case R_X86_64_PC64:
2260 case R_X86_64_GOTOFF64:
2261 case R_X86_64_GOT64:
2262 case R_X86_64_GOTPCREL64:
2263 case R_X86_64_GOTPC64:
2264 case R_X86_64_GOTPLT64:
2265 case R_X86_64_PLTOFF64:
2266 {
2267 if (h)
2268 name = h->root.root.string;
2269 else
2270 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
2271 NULL);
2272 _bfd_error_handler
2273 (_("%B: relocation %s against symbol `%s' isn't "
2274 "supported in x32 mode"), abfd,
2275 x86_64_elf_howto_table[r_type].name, name);
2276 bfd_set_error (bfd_error_bad_value);
2277 goto error_return;
2278 }
2279 break;
2280 }
2281
2282 if (h != NULL)
2283 {
2284 switch (r_type)
2285 {
2286 default:
2287 break;
2288
2289 case R_X86_64_PC32_BND:
2290 case R_X86_64_PLT32_BND:
2291 case R_X86_64_PC32:
2292 case R_X86_64_PLT32:
2293 case R_X86_64_32:
2294 case R_X86_64_64:
2295 /* MPX PLT is supported only if elf_x86_64_arch_bed
2296 is used in 64-bit mode. */
2297 if (ABI_64_P (abfd)
2298 && info->bndplt
2299 && (get_elf_x86_64_backend_data (abfd)
2300 == &elf_x86_64_arch_bed))
2301 {
2302 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
2303
2304 /* Create the second PLT for Intel MPX support. */
2305 if (htab->plt_bnd == NULL)
2306 {
2307 unsigned int plt_bnd_align;
2308 const struct elf_backend_data *bed;
2309
2310 bed = get_elf_backend_data (info->output_bfd);
2311 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
2312 && (sizeof (elf_x86_64_bnd_plt2_entry)
2313 == sizeof (elf_x86_64_legacy_plt2_entry)));
2314 plt_bnd_align = 3;
2315
2316 if (htab->elf.dynobj == NULL)
2317 htab->elf.dynobj = abfd;
2318 htab->plt_bnd
2319 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2320 ".plt.bnd",
2321 (bed->dynamic_sec_flags
2322 | SEC_ALLOC
2323 | SEC_CODE
2324 | SEC_LOAD
2325 | SEC_READONLY));
2326 if (htab->plt_bnd == NULL
2327 || !bfd_set_section_alignment (htab->elf.dynobj,
2328 htab->plt_bnd,
2329 plt_bnd_align))
2330 goto error_return;
2331 }
2332 }
2333 /* Fall through. */
2334
2335 case R_X86_64_32S:
2336 case R_X86_64_PC64:
2337 case R_X86_64_GOTPCREL:
2338 case R_X86_64_GOTPCRELX:
2339 case R_X86_64_REX_GOTPCRELX:
2340 case R_X86_64_GOTPCREL64:
2341 if (htab->elf.dynobj == NULL)
2342 htab->elf.dynobj = abfd;
2343 /* Create the ifunc sections for static executables. */
2344 if (h->type == STT_GNU_IFUNC
2345 && !_bfd_elf_create_ifunc_sections (htab->elf.dynobj,
2346 info))
2347 goto error_return;
2348 break;
2349 }
2350
2351 /* It is referenced by a non-shared object. */
2352 h->ref_regular = 1;
2353 h->root.non_ir_ref = 1;
2354
2355 if (h->type == STT_GNU_IFUNC)
2356 elf_tdata (info->output_bfd)->has_gnu_symbols
2357 |= elf_gnu_symbol_ifunc;
2358 }
2359
2360 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2361 symtab_hdr, sym_hashes,
2362 &r_type, GOT_UNKNOWN,
2363 rel, rel_end, h, r_symndx, FALSE))
2364 goto error_return;
2365
2366 eh = (struct elf_x86_64_link_hash_entry *) h;
2367 switch (r_type)
2368 {
2369 case R_X86_64_TLSLD:
2370 htab->tls_ld_got.refcount += 1;
2371 goto create_got;
2372
2373 case R_X86_64_TPOFF32:
2374 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2375 return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
2376 &x86_64_elf_howto_table[r_type]);
2377 if (eh != NULL)
2378 eh->has_got_reloc = 1;
2379 break;
2380
2381 case R_X86_64_GOTTPOFF:
2382 if (!bfd_link_executable (info))
2383 info->flags |= DF_STATIC_TLS;
2384 /* Fall through */
2385
2386 case R_X86_64_GOT32:
2387 case R_X86_64_GOTPCREL:
2388 case R_X86_64_GOTPCRELX:
2389 case R_X86_64_REX_GOTPCRELX:
2390 case R_X86_64_TLSGD:
2391 case R_X86_64_GOT64:
2392 case R_X86_64_GOTPCREL64:
2393 case R_X86_64_GOTPLT64:
2394 case R_X86_64_GOTPC32_TLSDESC:
2395 case R_X86_64_TLSDESC_CALL:
2396 /* This symbol requires a global offset table entry. */
2397 {
2398 int tls_type, old_tls_type;
2399
2400 switch (r_type)
2401 {
2402 default: tls_type = GOT_NORMAL; break;
2403 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2404 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2405 case R_X86_64_GOTPC32_TLSDESC:
2406 case R_X86_64_TLSDESC_CALL:
2407 tls_type = GOT_TLS_GDESC; break;
2408 }
2409
2410 if (h != NULL)
2411 {
2412 h->got.refcount += 1;
2413 old_tls_type = eh->tls_type;
2414 }
2415 else
2416 {
2417 bfd_signed_vma *local_got_refcounts;
2418
2419 /* This is a global offset table entry for a local symbol. */
2420 local_got_refcounts = elf_local_got_refcounts (abfd);
2421 if (local_got_refcounts == NULL)
2422 {
2423 bfd_size_type size;
2424
2425 size = symtab_hdr->sh_info;
2426 size *= sizeof (bfd_signed_vma)
2427 + sizeof (bfd_vma) + sizeof (char);
2428 local_got_refcounts = ((bfd_signed_vma *)
2429 bfd_zalloc (abfd, size));
2430 if (local_got_refcounts == NULL)
2431 goto error_return;
2432 elf_local_got_refcounts (abfd) = local_got_refcounts;
2433 elf_x86_64_local_tlsdesc_gotent (abfd)
2434 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2435 elf_x86_64_local_got_tls_type (abfd)
2436 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2437 }
2438 local_got_refcounts[r_symndx] += 1;
2439 old_tls_type
2440 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
2441 }
2442
2443 /* If a TLS symbol is accessed using IE at least once,
2444 there is no point to use dynamic model for it. */
2445 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2446 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2447 || tls_type != GOT_TLS_IE))
2448 {
2449 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2450 tls_type = old_tls_type;
2451 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2452 && GOT_TLS_GD_ANY_P (tls_type))
2453 tls_type |= old_tls_type;
2454 else
2455 {
2456 if (h)
2457 name = h->root.root.string;
2458 else
2459 name = bfd_elf_sym_name (abfd, symtab_hdr,
2460 isym, NULL);
2461 _bfd_error_handler
2462 (_("%B: '%s' accessed both as normal and thread local symbol"),
2463 abfd, name);
2464 bfd_set_error (bfd_error_bad_value);
2465 goto error_return;
2466 }
2467 }
2468
2469 if (old_tls_type != tls_type)
2470 {
2471 if (eh != NULL)
2472 eh->tls_type = tls_type;
2473 else
2474 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
2475 }
2476 }
2477 /* Fall through */
2478
2479 case R_X86_64_GOTOFF64:
2480 case R_X86_64_GOTPC32:
2481 case R_X86_64_GOTPC64:
2482 create_got:
2483 if (eh != NULL)
2484 eh->has_got_reloc = 1;
2485 if (htab->elf.sgot == NULL)
2486 {
2487 if (htab->elf.dynobj == NULL)
2488 htab->elf.dynobj = abfd;
2489 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
2490 info))
2491 goto error_return;
2492 }
2493 break;
2494
2495 case R_X86_64_PLT32:
2496 case R_X86_64_PLT32_BND:
2497 /* This symbol requires a procedure linkage table entry. We
2498 actually build the entry in adjust_dynamic_symbol,
2499 because this might be a case of linking PIC code which is
2500 never referenced by a dynamic object, in which case we
2501 don't need to generate a procedure linkage table entry
2502 after all. */
2503
2504 /* If this is a local symbol, we resolve it directly without
2505 creating a procedure linkage table entry. */
2506 if (h == NULL)
2507 continue;
2508
2509 eh->has_got_reloc = 1;
2510 h->needs_plt = 1;
2511 h->plt.refcount += 1;
2512 break;
2513
2514 case R_X86_64_PLTOFF64:
2515 /* This tries to form the 'address' of a function relative
2516 to GOT. For global symbols we need a PLT entry. */
2517 if (h != NULL)
2518 {
2519 h->needs_plt = 1;
2520 h->plt.refcount += 1;
2521 }
2522 goto create_got;
2523
2524 case R_X86_64_SIZE32:
2525 case R_X86_64_SIZE64:
2526 size_reloc = TRUE;
2527 goto do_size;
2528
2529 case R_X86_64_32:
2530 if (!ABI_64_P (abfd))
2531 goto pointer;
2532 /* Fall through. */
2533 case R_X86_64_8:
2534 case R_X86_64_16:
2535 case R_X86_64_32S:
2536 /* Check relocation overflow as these relocs may lead to
2537 run-time relocation overflow. Don't error out for
2538 sections we don't care about, such as debug sections or
2539 when relocation overflow check is disabled. */
2540 if (!info->no_reloc_overflow_check
2541 && (bfd_link_pic (info)
2542 || (bfd_link_executable (info)
2543 && h != NULL
2544 && !h->def_regular
2545 && h->def_dynamic
2546 && (sec->flags & SEC_READONLY) == 0)))
2547 return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
2548 &x86_64_elf_howto_table[r_type]);
2549 /* Fall through. */
2550
2551 case R_X86_64_PC8:
2552 case R_X86_64_PC16:
2553 case R_X86_64_PC32:
2554 case R_X86_64_PC32_BND:
2555 case R_X86_64_PC64:
2556 case R_X86_64_64:
2557 pointer:
2558 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2559 eh->has_non_got_reloc = 1;
2560 /* We are called after all symbols have been resolved. Only
2561 relocation against STT_GNU_IFUNC symbol must go through
2562 PLT. */
2563 if (h != NULL
2564 && (bfd_link_executable (info)
2565 || h->type == STT_GNU_IFUNC))
2566 {
2567 /* If this reloc is in a read-only section, we might
2568 need a copy reloc. We can't check reliably at this
2569 stage whether the section is read-only, as input
2570 sections have not yet been mapped to output sections.
2571 Tentatively set the flag for now, and correct in
2572 adjust_dynamic_symbol. */
2573 h->non_got_ref = 1;
2574
2575 /* We may need a .plt entry if the symbol is a function
2576 defined in a shared lib or is a STT_GNU_IFUNC function
2577 referenced from the code or read-only section. */
2578 if (!h->def_regular
2579 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2580 h->plt.refcount += 1;
2581
2582 if (r_type == R_X86_64_PC32)
2583 {
2584 /* Since something like ".long foo - ." may be used
2585 as pointer, make sure that PLT is used if foo is
2586 a function defined in a shared library. */
2587 if ((sec->flags & SEC_CODE) == 0)
2588 h->pointer_equality_needed = 1;
2589 }
2590 else if (r_type != R_X86_64_PC32_BND
2591 && r_type != R_X86_64_PC64)
2592 {
2593 h->pointer_equality_needed = 1;
2594 /* At run-time, R_X86_64_64 can be resolved for both
2595 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2596 can only be resolved for x32. */
2597 if ((sec->flags & SEC_READONLY) == 0
2598 && (r_type == R_X86_64_64
2599 || (!ABI_64_P (abfd)
2600 && (r_type == R_X86_64_32
2601 || r_type == R_X86_64_32S))))
2602 eh->func_pointer_refcount += 1;
2603 }
2604 }
2605
2606 size_reloc = FALSE;
2607 do_size:
2608 /* If we are creating a shared library, and this is a reloc
2609 against a global symbol, or a non PC relative reloc
2610 against a local symbol, then we need to copy the reloc
2611 into the shared library. However, if we are linking with
2612 -Bsymbolic, we do not need to copy a reloc against a
2613 global symbol which is defined in an object we are
2614 including in the link (i.e., DEF_REGULAR is set). At
2615 this point we have not seen all the input files, so it is
2616 possible that DEF_REGULAR is not set now but will be set
2617 later (it is never cleared). In case of a weak definition,
2618 DEF_REGULAR may be cleared later by a strong definition in
2619 a shared library. We account for that possibility below by
2620 storing information in the relocs_copied field of the hash
2621 table entry. A similar situation occurs when creating
2622 shared libraries and symbol visibility changes render the
2623 symbol local.
2624
2625 If on the other hand, we are creating an executable, we
2626 may need to keep relocations for symbols satisfied by a
2627 dynamic library if we manage to avoid copy relocs for the
2628 symbol.
2629
2630 Generate dynamic pointer relocation against STT_GNU_IFUNC
2631 symbol in the non-code section. */
2632 if ((bfd_link_pic (info)
2633 && (! IS_X86_64_PCREL_TYPE (r_type)
2634 || (h != NULL
2635 && (! (bfd_link_pie (info)
2636 || SYMBOLIC_BIND (info, h))
2637 || h->root.type == bfd_link_hash_defweak
2638 || !h->def_regular))))
2639 || (h != NULL
2640 && h->type == STT_GNU_IFUNC
2641 && r_type == htab->pointer_r_type
2642 && (sec->flags & SEC_CODE) == 0)
2643 || (ELIMINATE_COPY_RELOCS
2644 && !bfd_link_pic (info)
2645 && h != NULL
2646 && (h->root.type == bfd_link_hash_defweak
2647 || !h->def_regular)))
2648 {
2649 struct elf_dyn_relocs *p;
2650 struct elf_dyn_relocs **head;
2651
2652 /* We must copy these reloc types into the output file.
2653 Create a reloc section in dynobj and make room for
2654 this reloc. */
2655 if (sreloc == NULL)
2656 {
2657 if (htab->elf.dynobj == NULL)
2658 htab->elf.dynobj = abfd;
2659
2660 sreloc = _bfd_elf_make_dynamic_reloc_section
2661 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2662 abfd, /*rela?*/ TRUE);
2663
2664 if (sreloc == NULL)
2665 goto error_return;
2666 }
2667
2668 /* If this is a global symbol, we count the number of
2669 relocations we need for this symbol. */
2670 if (h != NULL)
2671 head = &eh->dyn_relocs;
2672 else
2673 {
2674 /* Track dynamic relocs needed for local syms too.
2675 We really need local syms available to do this
2676 easily. Oh well. */
2677 asection *s;
2678 void **vpp;
2679
2680 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2681 abfd, r_symndx);
2682 if (isym == NULL)
2683 goto error_return;
2684
2685 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2686 if (s == NULL)
2687 s = sec;
2688
2689 /* Beware of type punned pointers vs strict aliasing
2690 rules. */
2691 vpp = &(elf_section_data (s)->local_dynrel);
2692 head = (struct elf_dyn_relocs **)vpp;
2693 }
2694
2695 p = *head;
2696 if (p == NULL || p->sec != sec)
2697 {
2698 bfd_size_type amt = sizeof *p;
2699
2700 p = ((struct elf_dyn_relocs *)
2701 bfd_alloc (htab->elf.dynobj, amt));
2702 if (p == NULL)
2703 goto error_return;
2704 p->next = *head;
2705 *head = p;
2706 p->sec = sec;
2707 p->count = 0;
2708 p->pc_count = 0;
2709 }
2710
2711 p->count += 1;
2712 /* Count size relocation as PC-relative relocation. */
2713 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2714 p->pc_count += 1;
2715 }
2716 break;
2717
2718 /* This relocation describes the C++ object vtable hierarchy.
2719 Reconstruct it for later use during GC. */
2720 case R_X86_64_GNU_VTINHERIT:
2721 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2722 goto error_return;
2723 break;
2724
2725 /* This relocation describes which C++ vtable entries are actually
2726 used. Record for later use during GC. */
2727 case R_X86_64_GNU_VTENTRY:
2728 BFD_ASSERT (h != NULL);
2729 if (h != NULL
2730 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2731 goto error_return;
2732 break;
2733
2734 default:
2735 break;
2736 }
2737
2738 if (use_plt_got
2739 && h != NULL
2740 && h->plt.refcount > 0
2741 && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2742 || h->got.refcount > 0)
2743 && htab->plt_got == NULL)
2744 {
2745 /* Create the GOT procedure linkage table. */
2746 unsigned int plt_got_align;
2747 const struct elf_backend_data *bed;
2748
2749 bed = get_elf_backend_data (info->output_bfd);
2750 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2751 && (sizeof (elf_x86_64_bnd_plt2_entry)
2752 == sizeof (elf_x86_64_legacy_plt2_entry)));
2753 plt_got_align = 3;
2754
2755 if (htab->elf.dynobj == NULL)
2756 htab->elf.dynobj = abfd;
2757 htab->plt_got
2758 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2759 ".plt.got",
2760 (bed->dynamic_sec_flags
2761 | SEC_ALLOC
2762 | SEC_CODE
2763 | SEC_LOAD
2764 | SEC_READONLY));
2765 if (htab->plt_got == NULL
2766 || !bfd_set_section_alignment (htab->elf.dynobj,
2767 htab->plt_got,
2768 plt_got_align))
2769 goto error_return;
2770 }
2771
2772 if ((r_type == R_X86_64_GOTPCREL
2773 || r_type == R_X86_64_GOTPCRELX
2774 || r_type == R_X86_64_REX_GOTPCRELX)
2775 && (h == NULL || h->type != STT_GNU_IFUNC))
2776 sec->need_convert_load = 1;
2777 }
2778
2779 if (elf_section_data (sec)->this_hdr.contents != contents)
2780 {
2781 if (!info->keep_memory)
2782 free (contents);
2783 else
2784 {
2785 /* Cache the section contents for elf_link_input_bfd. */
2786 elf_section_data (sec)->this_hdr.contents = contents;
2787 }
2788 }
2789
2790 return TRUE;
2791
2792 error_return:
2793 if (elf_section_data (sec)->this_hdr.contents != contents)
2794 free (contents);
2795 sec->check_relocs_failed = 1;
2796 return FALSE;
2797 }
2798
2799 /* Return the section that should be marked against GC for a given
2800 relocation. */
2801
2802 static asection *
2803 elf_x86_64_gc_mark_hook (asection *sec,
2804 struct bfd_link_info *info,
2805 Elf_Internal_Rela *rel,
2806 struct elf_link_hash_entry *h,
2807 Elf_Internal_Sym *sym)
2808 {
2809 if (h != NULL)
2810 switch (ELF32_R_TYPE (rel->r_info))
2811 {
2812 case R_X86_64_GNU_VTINHERIT:
2813 case R_X86_64_GNU_VTENTRY:
2814 return NULL;
2815 }
2816
2817 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2818 }
2819
2820 /* Remove undefined weak symbol from the dynamic symbol table if it
2821 is resolved to 0. */
2822
2823 static bfd_boolean
2824 elf_x86_64_fixup_symbol (struct bfd_link_info *info,
2825 struct elf_link_hash_entry *h)
2826 {
2827 if (h->dynindx != -1
2828 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
2829 elf_x86_64_hash_entry (h)->has_got_reloc,
2830 elf_x86_64_hash_entry (h)))
2831 {
2832 h->dynindx = -1;
2833 _bfd_elf_strtab_delref (elf_hash_table (info)->dynstr,
2834 h->dynstr_index);
2835 }
2836 return TRUE;
2837 }
2838
2839 /* Adjust a symbol defined by a dynamic object and referenced by a
2840 regular object. The current definition is in some section of the
2841 dynamic object, but we're not including those sections. We have to
2842 change the definition to something the rest of the link can
2843 understand. */
2844
2845 static bfd_boolean
2846 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2847 struct elf_link_hash_entry *h)
2848 {
2849 struct elf_x86_64_link_hash_table *htab;
2850 asection *s;
2851 struct elf_x86_64_link_hash_entry *eh;
2852 struct elf_dyn_relocs *p;
2853
2854 /* STT_GNU_IFUNC symbol must go through PLT. */
2855 if (h->type == STT_GNU_IFUNC)
2856 {
2857 /* All local STT_GNU_IFUNC references must be treate as local
2858 calls via local PLT. */
2859 if (h->ref_regular
2860 && SYMBOL_CALLS_LOCAL (info, h))
2861 {
2862 bfd_size_type pc_count = 0, count = 0;
2863 struct elf_dyn_relocs **pp;
2864
2865 eh = (struct elf_x86_64_link_hash_entry *) h;
2866 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2867 {
2868 pc_count += p->pc_count;
2869 p->count -= p->pc_count;
2870 p->pc_count = 0;
2871 count += p->count;
2872 if (p->count == 0)
2873 *pp = p->next;
2874 else
2875 pp = &p->next;
2876 }
2877
2878 if (pc_count || count)
2879 {
2880 h->non_got_ref = 1;
2881 if (pc_count)
2882 {
2883 /* Increment PLT reference count only for PC-relative
2884 references. */
2885 h->needs_plt = 1;
2886 if (h->plt.refcount <= 0)
2887 h->plt.refcount = 1;
2888 else
2889 h->plt.refcount += 1;
2890 }
2891 }
2892 }
2893
2894 if (h->plt.refcount <= 0)
2895 {
2896 h->plt.offset = (bfd_vma) -1;
2897 h->needs_plt = 0;
2898 }
2899 return TRUE;
2900 }
2901
2902 /* If this is a function, put it in the procedure linkage table. We
2903 will fill in the contents of the procedure linkage table later,
2904 when we know the address of the .got section. */
2905 if (h->type == STT_FUNC
2906 || h->needs_plt)
2907 {
2908 if (h->plt.refcount <= 0
2909 || SYMBOL_CALLS_LOCAL (info, h)
2910 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2911 && h->root.type == bfd_link_hash_undefweak))
2912 {
2913 /* This case can occur if we saw a PLT32 reloc in an input
2914 file, but the symbol was never referred to by a dynamic
2915 object, or if all references were garbage collected. In
2916 such a case, we don't actually need to build a procedure
2917 linkage table, and we can just do a PC32 reloc instead. */
2918 h->plt.offset = (bfd_vma) -1;
2919 h->needs_plt = 0;
2920 }
2921
2922 return TRUE;
2923 }
2924 else
2925 /* It's possible that we incorrectly decided a .plt reloc was
2926 needed for an R_X86_64_PC32 reloc to a non-function sym in
2927 check_relocs. We can't decide accurately between function and
2928 non-function syms in check-relocs; Objects loaded later in
2929 the link may change h->type. So fix it now. */
2930 h->plt.offset = (bfd_vma) -1;
2931
2932 /* If this is a weak symbol, and there is a real definition, the
2933 processor independent code will have arranged for us to see the
2934 real definition first, and we can just use the same value. */
2935 if (h->u.weakdef != NULL)
2936 {
2937 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2938 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2939 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2940 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2941 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2942 {
2943 eh = (struct elf_x86_64_link_hash_entry *) h;
2944 h->non_got_ref = h->u.weakdef->non_got_ref;
2945 eh->needs_copy = h->u.weakdef->needs_copy;
2946 }
2947 return TRUE;
2948 }
2949
2950 /* This is a reference to a symbol defined by a dynamic object which
2951 is not a function. */
2952
2953 /* If we are creating a shared library, we must presume that the
2954 only references to the symbol are via the global offset table.
2955 For such cases we need not do anything here; the relocations will
2956 be handled correctly by relocate_section. */
2957 if (!bfd_link_executable (info))
2958 return TRUE;
2959
2960 /* If there are no references to this symbol that do not use the
2961 GOT, we don't need to generate a copy reloc. */
2962 if (!h->non_got_ref)
2963 return TRUE;
2964
2965 /* If -z nocopyreloc was given, we won't generate them either. */
2966 if (info->nocopyreloc)
2967 {
2968 h->non_got_ref = 0;
2969 return TRUE;
2970 }
2971
2972 if (ELIMINATE_COPY_RELOCS)
2973 {
2974 eh = (struct elf_x86_64_link_hash_entry *) h;
2975 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2976 {
2977 s = p->sec->output_section;
2978 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2979 break;
2980 }
2981
2982 /* If we didn't find any dynamic relocs in read-only sections, then
2983 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2984 if (p == NULL)
2985 {
2986 h->non_got_ref = 0;
2987 return TRUE;
2988 }
2989 }
2990
2991 /* We must allocate the symbol in our .dynbss section, which will
2992 become part of the .bss section of the executable. There will be
2993 an entry for this symbol in the .dynsym section. The dynamic
2994 object will contain position independent code, so all references
2995 from the dynamic object to this symbol will go through the global
2996 offset table. The dynamic linker will use the .dynsym entry to
2997 determine the address it must put in the global offset table, so
2998 both the dynamic object and the regular object will refer to the
2999 same memory location for the variable. */
3000
3001 htab = elf_x86_64_hash_table (info);
3002 if (htab == NULL)
3003 return FALSE;
3004
3005 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
3006 to copy the initial value out of the dynamic object and into the
3007 runtime process image. */
3008 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
3009 {
3010 const struct elf_backend_data *bed;
3011 bed = get_elf_backend_data (info->output_bfd);
3012 htab->srelbss->size += bed->s->sizeof_rela;
3013 h->needs_copy = 1;
3014 }
3015
3016 s = htab->sdynbss;
3017
3018 return _bfd_elf_adjust_dynamic_copy (info, h, s);
3019 }
3020
3021 /* Allocate space in .plt, .got and associated reloc sections for
3022 dynamic relocs. */
3023
3024 static bfd_boolean
3025 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
3026 {
3027 struct bfd_link_info *info;
3028 struct elf_x86_64_link_hash_table *htab;
3029 struct elf_x86_64_link_hash_entry *eh;
3030 struct elf_dyn_relocs *p;
3031 const struct elf_backend_data *bed;
3032 unsigned int plt_entry_size;
3033 bfd_boolean resolved_to_zero;
3034
3035 if (h->root.type == bfd_link_hash_indirect)
3036 return TRUE;
3037
3038 eh = (struct elf_x86_64_link_hash_entry *) h;
3039
3040 info = (struct bfd_link_info *) inf;
3041 htab = elf_x86_64_hash_table (info);
3042 if (htab == NULL)
3043 return FALSE;
3044 bed = get_elf_backend_data (info->output_bfd);
3045 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3046
3047 resolved_to_zero = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3048 eh->has_got_reloc,
3049 eh);
3050
3051 /* We can't use the GOT PLT if pointer equality is needed since
3052 finish_dynamic_symbol won't clear symbol value and the dynamic
3053 linker won't update the GOT slot. We will get into an infinite
3054 loop at run-time. */
3055 if (htab->plt_got != NULL
3056 && h->type != STT_GNU_IFUNC
3057 && !h->pointer_equality_needed
3058 && h->plt.refcount > 0
3059 && h->got.refcount > 0)
3060 {
3061 /* Don't use the regular PLT if there are both GOT and GOTPLT
3062 reloctions. */
3063 h->plt.offset = (bfd_vma) -1;
3064
3065 /* Use the GOT PLT. */
3066 eh->plt_got.refcount = 1;
3067 }
3068
3069 /* Clear the reference count of function pointer relocations if
3070 symbol isn't a normal function. */
3071 if (h->type != STT_FUNC)
3072 eh->func_pointer_refcount = 0;
3073
3074 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
3075 here if it is defined and referenced in a non-shared object. */
3076 if (h->type == STT_GNU_IFUNC
3077 && h->def_regular)
3078 {
3079 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
3080 &eh->dyn_relocs,
3081 &htab->readonly_dynrelocs_against_ifunc,
3082 plt_entry_size,
3083 plt_entry_size,
3084 GOT_ENTRY_SIZE, TRUE))
3085 {
3086 asection *s = htab->plt_bnd;
3087 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
3088 {
3089 /* Use the .plt.bnd section if it is created. */
3090 eh->plt_bnd.offset = s->size;
3091
3092 /* Make room for this entry in the .plt.bnd section. */
3093 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3094 }
3095
3096 return TRUE;
3097 }
3098 else
3099 return FALSE;
3100 }
3101 /* Don't create the PLT entry if there are only function pointer
3102 relocations which can be resolved at run-time. */
3103 else if (htab->elf.dynamic_sections_created
3104 && (h->plt.refcount > eh->func_pointer_refcount
3105 || eh->plt_got.refcount > 0))
3106 {
3107 bfd_boolean use_plt_got;
3108
3109 /* Clear the reference count of function pointer relocations
3110 if PLT is used. */
3111 eh->func_pointer_refcount = 0;
3112
3113 if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
3114 {
3115 /* Don't use the regular PLT for DF_BIND_NOW. */
3116 h->plt.offset = (bfd_vma) -1;
3117
3118 /* Use the GOT PLT. */
3119 h->got.refcount = 1;
3120 eh->plt_got.refcount = 1;
3121 }
3122
3123 use_plt_got = eh->plt_got.refcount > 0;
3124
3125 /* Make sure this symbol is output as a dynamic symbol.
3126 Undefined weak syms won't yet be marked as dynamic. */
3127 if (h->dynindx == -1
3128 && !h->forced_local
3129 && !resolved_to_zero)
3130 {
3131 if (! bfd_elf_link_record_dynamic_symbol (info, h))
3132 return FALSE;
3133 }
3134
3135 if (bfd_link_pic (info)
3136 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
3137 {
3138 asection *s = htab->elf.splt;
3139 asection *bnd_s = htab->plt_bnd;
3140 asection *got_s = htab->plt_got;
3141
3142 /* If this is the first .plt entry, make room for the special
3143 first entry. The .plt section is used by prelink to undo
3144 prelinking for dynamic relocations. */
3145 if (s->size == 0)
3146 s->size = plt_entry_size;
3147
3148 if (use_plt_got)
3149 eh->plt_got.offset = got_s->size;
3150 else
3151 {
3152 h->plt.offset = s->size;
3153 if (bnd_s)
3154 eh->plt_bnd.offset = bnd_s->size;
3155 }
3156
3157 /* If this symbol is not defined in a regular file, and we are
3158 not generating a shared library, then set the symbol to this
3159 location in the .plt. This is required to make function
3160 pointers compare as equal between the normal executable and
3161 the shared library. */
3162 if (! bfd_link_pic (info)
3163 && !h->def_regular)
3164 {
3165 if (use_plt_got)
3166 {
3167 /* We need to make a call to the entry of the GOT PLT
3168 instead of regular PLT entry. */
3169 h->root.u.def.section = got_s;
3170 h->root.u.def.value = eh->plt_got.offset;
3171 }
3172 else
3173 {
3174 if (bnd_s)
3175 {
3176 /* We need to make a call to the entry of the second
3177 PLT instead of regular PLT entry. */
3178 h->root.u.def.section = bnd_s;
3179 h->root.u.def.value = eh->plt_bnd.offset;
3180 }
3181 else
3182 {
3183 h->root.u.def.section = s;
3184 h->root.u.def.value = h->plt.offset;
3185 }
3186 }
3187 }
3188
3189 /* Make room for this entry. */
3190 if (use_plt_got)
3191 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3192 else
3193 {
3194 s->size += plt_entry_size;
3195 if (bnd_s)
3196 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3197
3198 /* We also need to make an entry in the .got.plt section,
3199 which will be placed in the .got section by the linker
3200 script. */
3201 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
3202
3203 /* There should be no PLT relocation against resolved
3204 undefined weak symbol in executable. */
3205 if (!resolved_to_zero)
3206 {
3207 /* We also need to make an entry in the .rela.plt
3208 section. */
3209 htab->elf.srelplt->size += bed->s->sizeof_rela;
3210 htab->elf.srelplt->reloc_count++;
3211 }
3212 }
3213 }
3214 else
3215 {
3216 eh->plt_got.offset = (bfd_vma) -1;
3217 h->plt.offset = (bfd_vma) -1;
3218 h->needs_plt = 0;
3219 }
3220 }
3221 else
3222 {
3223 eh->plt_got.offset = (bfd_vma) -1;
3224 h->plt.offset = (bfd_vma) -1;
3225 h->needs_plt = 0;
3226 }
3227
3228 eh->tlsdesc_got = (bfd_vma) -1;
3229
3230 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
3231 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
3232 if (h->got.refcount > 0
3233 && bfd_link_executable (info)
3234 && h->dynindx == -1
3235 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
3236 {
3237 h->got.offset = (bfd_vma) -1;
3238 }
3239 else if (h->got.refcount > 0)
3240 {
3241 asection *s;
3242 bfd_boolean dyn;
3243 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
3244
3245 /* Make sure this symbol is output as a dynamic symbol.
3246 Undefined weak syms won't yet be marked as dynamic. */
3247 if (h->dynindx == -1
3248 && !h->forced_local
3249 && !resolved_to_zero)
3250 {
3251 if (! bfd_elf_link_record_dynamic_symbol (info, h))
3252 return FALSE;
3253 }
3254
3255 if (GOT_TLS_GDESC_P (tls_type))
3256 {
3257 eh->tlsdesc_got = htab->elf.sgotplt->size
3258 - elf_x86_64_compute_jump_table_size (htab);
3259 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3260 h->got.offset = (bfd_vma) -2;
3261 }
3262 if (! GOT_TLS_GDESC_P (tls_type)
3263 || GOT_TLS_GD_P (tls_type))
3264 {
3265 s = htab->elf.sgot;
3266 h->got.offset = s->size;
3267 s->size += GOT_ENTRY_SIZE;
3268 if (GOT_TLS_GD_P (tls_type))
3269 s->size += GOT_ENTRY_SIZE;
3270 }
3271 dyn = htab->elf.dynamic_sections_created;
3272 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
3273 and two if global. R_X86_64_GOTTPOFF needs one dynamic
3274 relocation. No dynamic relocation against resolved undefined
3275 weak symbol in executable. */
3276 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
3277 || tls_type == GOT_TLS_IE)
3278 htab->elf.srelgot->size += bed->s->sizeof_rela;
3279 else if (GOT_TLS_GD_P (tls_type))
3280 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
3281 else if (! GOT_TLS_GDESC_P (tls_type)
3282 && ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3283 && !resolved_to_zero)
3284 || h->root.type != bfd_link_hash_undefweak)
3285 && (bfd_link_pic (info)
3286 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3287 htab->elf.srelgot->size += bed->s->sizeof_rela;
3288 if (GOT_TLS_GDESC_P (tls_type))
3289 {
3290 htab->elf.srelplt->size += bed->s->sizeof_rela;
3291 htab->tlsdesc_plt = (bfd_vma) -1;
3292 }
3293 }
3294 else
3295 h->got.offset = (bfd_vma) -1;
3296
3297 if (eh->dyn_relocs == NULL)
3298 return TRUE;
3299
3300 /* In the shared -Bsymbolic case, discard space allocated for
3301 dynamic pc-relative relocs against symbols which turn out to be
3302 defined in regular objects. For the normal shared case, discard
3303 space for pc-relative relocs that have become local due to symbol
3304 visibility changes. */
3305
3306 if (bfd_link_pic (info))
3307 {
3308 /* Relocs that use pc_count are those that appear on a call
3309 insn, or certain REL relocs that can generated via assembly.
3310 We want calls to protected symbols to resolve directly to the
3311 function rather than going via the plt. If people want
3312 function pointer comparisons to work as expected then they
3313 should avoid writing weird assembly. */
3314 if (SYMBOL_CALLS_LOCAL (info, h))
3315 {
3316 struct elf_dyn_relocs **pp;
3317
3318 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
3319 {
3320 p->count -= p->pc_count;
3321 p->pc_count = 0;
3322 if (p->count == 0)
3323 *pp = p->next;
3324 else
3325 pp = &p->next;
3326 }
3327 }
3328
3329 /* Also discard relocs on undefined weak syms with non-default
3330 visibility or in PIE. */
3331 if (eh->dyn_relocs != NULL)
3332 {
3333 if (h->root.type == bfd_link_hash_undefweak)
3334 {
3335 /* Undefined weak symbol is never bound locally in shared
3336 library. */
3337 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3338 || resolved_to_zero)
3339 eh->dyn_relocs = NULL;
3340 else if (h->dynindx == -1
3341 && ! h->forced_local
3342 && ! bfd_elf_link_record_dynamic_symbol (info, h))
3343 return FALSE;
3344 }
3345 /* For PIE, discard space for pc-relative relocs against
3346 symbols which turn out to need copy relocs. */
3347 else if (bfd_link_executable (info)
3348 && (h->needs_copy || eh->needs_copy)
3349 && h->def_dynamic
3350 && !h->def_regular)
3351 {
3352 struct elf_dyn_relocs **pp;
3353
3354 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
3355 {
3356 if (p->pc_count != 0)
3357 *pp = p->next;
3358 else
3359 pp = &p->next;
3360 }
3361 }
3362 }
3363 }
3364 else if (ELIMINATE_COPY_RELOCS)
3365 {
3366 /* For the non-shared case, discard space for relocs against
3367 symbols which turn out to need copy relocs or are not
3368 dynamic. Keep dynamic relocations for run-time function
3369 pointer initialization. */
3370
3371 if ((!h->non_got_ref
3372 || eh->func_pointer_refcount > 0
3373 || (h->root.type == bfd_link_hash_undefweak
3374 && !resolved_to_zero))
3375 && ((h->def_dynamic
3376 && !h->def_regular)
3377 || (htab->elf.dynamic_sections_created
3378 && (h->root.type == bfd_link_hash_undefweak
3379 || h->root.type == bfd_link_hash_undefined))))
3380 {
3381 /* Make sure this symbol is output as a dynamic symbol.
3382 Undefined weak syms won't yet be marked as dynamic. */
3383 if (h->dynindx == -1
3384 && ! h->forced_local
3385 && ! resolved_to_zero
3386 && ! bfd_elf_link_record_dynamic_symbol (info, h))
3387 return FALSE;
3388
3389 /* If that succeeded, we know we'll be keeping all the
3390 relocs. */
3391 if (h->dynindx != -1)
3392 goto keep;
3393 }
3394
3395 eh->dyn_relocs = NULL;
3396 eh->func_pointer_refcount = 0;
3397
3398 keep: ;
3399 }
3400
3401 /* Finally, allocate space. */
3402 for (p = eh->dyn_relocs; p != NULL; p = p->next)
3403 {
3404 asection * sreloc;
3405
3406 sreloc = elf_section_data (p->sec)->sreloc;
3407
3408 BFD_ASSERT (sreloc != NULL);
3409
3410 sreloc->size += p->count * bed->s->sizeof_rela;
3411 }
3412
3413 return TRUE;
3414 }
3415
3416 /* Allocate space in .plt, .got and associated reloc sections for
3417 local dynamic relocs. */
3418
3419 static bfd_boolean
3420 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
3421 {
3422 struct elf_link_hash_entry *h
3423 = (struct elf_link_hash_entry *) *slot;
3424
3425 if (h->type != STT_GNU_IFUNC
3426 || !h->def_regular
3427 || !h->ref_regular
3428 || !h->forced_local
3429 || h->root.type != bfd_link_hash_defined)
3430 abort ();
3431
3432 return elf_x86_64_allocate_dynrelocs (h, inf);
3433 }
3434
3435 /* Find any dynamic relocs that apply to read-only sections. */
3436
3437 static bfd_boolean
3438 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
3439 void * inf)
3440 {
3441 struct elf_x86_64_link_hash_entry *eh;
3442 struct elf_dyn_relocs *p;
3443
3444 /* Skip local IFUNC symbols. */
3445 if (h->forced_local && h->type == STT_GNU_IFUNC)
3446 return TRUE;
3447
3448 eh = (struct elf_x86_64_link_hash_entry *) h;
3449 for (p = eh->dyn_relocs; p != NULL; p = p->next)
3450 {
3451 asection *s = p->sec->output_section;
3452
3453 if (s != NULL && (s->flags & SEC_READONLY) != 0)
3454 {
3455 struct bfd_link_info *info = (struct bfd_link_info *) inf;
3456
3457 info->flags |= DF_TEXTREL;
3458
3459 if ((info->warn_shared_textrel && bfd_link_pic (info))
3460 || info->error_textrel)
3461 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
3462 p->sec->owner, h->root.root.string,
3463 p->sec);
3464
3465 /* Not an error, just cut short the traversal. */
3466 return FALSE;
3467 }
3468 }
3469 return TRUE;
3470 }
3471
3472 /* Convert load via the GOT slot to load immediate. */
3473
3474 static bfd_boolean
3475 elf_x86_64_convert_load (bfd *abfd, asection *sec,
3476 struct bfd_link_info *link_info)
3477 {
3478 Elf_Internal_Shdr *symtab_hdr;
3479 Elf_Internal_Rela *internal_relocs;
3480 Elf_Internal_Rela *irel, *irelend;
3481 bfd_byte *contents;
3482 struct elf_x86_64_link_hash_table *htab;
3483 bfd_boolean changed;
3484 bfd_signed_vma *local_got_refcounts;
3485
3486 /* Don't even try to convert non-ELF outputs. */
3487 if (!is_elf_hash_table (link_info->hash))
3488 return FALSE;
3489
3490 /* Nothing to do if there is no need or no output. */
3491 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
3492 || sec->need_convert_load == 0
3493 || bfd_is_abs_section (sec->output_section))
3494 return TRUE;
3495
3496 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
3497
3498 /* Load the relocations for this section. */
3499 internal_relocs = (_bfd_elf_link_read_relocs
3500 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
3501 link_info->keep_memory));
3502 if (internal_relocs == NULL)
3503 return FALSE;
3504
3505 changed = FALSE;
3506 htab = elf_x86_64_hash_table (link_info);
3507 local_got_refcounts = elf_local_got_refcounts (abfd);
3508
3509 /* Get the section contents. */
3510 if (elf_section_data (sec)->this_hdr.contents != NULL)
3511 contents = elf_section_data (sec)->this_hdr.contents;
3512 else
3513 {
3514 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
3515 goto error_return;
3516 }
3517
3518 irelend = internal_relocs + sec->reloc_count;
3519 for (irel = internal_relocs; irel < irelend; irel++)
3520 {
3521 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
3522 unsigned int r_symndx;
3523 struct elf_link_hash_entry *h;
3524 bfd_boolean converted;
3525
3526 if (r_type != R_X86_64_GOTPCRELX
3527 && r_type != R_X86_64_REX_GOTPCRELX
3528 && r_type != R_X86_64_GOTPCREL)
3529 continue;
3530
3531 r_symndx = htab->r_sym (irel->r_info);
3532 if (r_symndx < symtab_hdr->sh_info)
3533 h = elf_x86_64_get_local_sym_hash (htab, sec->owner,
3534 (const Elf_Internal_Rela *) irel,
3535 FALSE);
3536 else
3537 {
3538 h = elf_sym_hashes (abfd)[r_symndx - symtab_hdr->sh_info];
3539 while (h->root.type == bfd_link_hash_indirect
3540 || h->root.type == bfd_link_hash_warning)
3541 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3542 }
3543
3544 /* STT_GNU_IFUNC must keep GOTPCREL relocations. */
3545 if (h != NULL && h->type == STT_GNU_IFUNC)
3546 continue;
3547
3548 converted = FALSE;
3549 if (!elf_x86_64_convert_load_reloc (abfd, sec, contents, irel, h,
3550 &converted, link_info))
3551 goto error_return;
3552
3553 if (converted)
3554 {
3555 changed = converted;
3556 if (h)
3557 {
3558 if (h->got.refcount > 0)
3559 h->got.refcount -= 1;
3560 }
3561 else
3562 {
3563 if (local_got_refcounts != NULL
3564 && local_got_refcounts[r_symndx] > 0)
3565 local_got_refcounts[r_symndx] -= 1;
3566 }
3567 }
3568 }
3569
3570 if (contents != NULL
3571 && elf_section_data (sec)->this_hdr.contents != contents)
3572 {
3573 if (!changed && !link_info->keep_memory)
3574 free (contents);
3575 else
3576 {
3577 /* Cache the section contents for elf_link_input_bfd. */
3578 elf_section_data (sec)->this_hdr.contents = contents;
3579 }
3580 }
3581
3582 if (elf_section_data (sec)->relocs != internal_relocs)
3583 {
3584 if (!changed)
3585 free (internal_relocs);
3586 else
3587 elf_section_data (sec)->relocs = internal_relocs;
3588 }
3589
3590 return TRUE;
3591
3592 error_return:
3593 if (contents != NULL
3594 && elf_section_data (sec)->this_hdr.contents != contents)
3595 free (contents);
3596 if (internal_relocs != NULL
3597 && elf_section_data (sec)->relocs != internal_relocs)
3598 free (internal_relocs);
3599 return FALSE;
3600 }
3601
3602 /* Set the sizes of the dynamic sections. */
3603
3604 static bfd_boolean
3605 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3606 struct bfd_link_info *info)
3607 {
3608 struct elf_x86_64_link_hash_table *htab;
3609 bfd *dynobj;
3610 asection *s;
3611 bfd_boolean relocs;
3612 bfd *ibfd;
3613 const struct elf_backend_data *bed;
3614
3615 htab = elf_x86_64_hash_table (info);
3616 if (htab == NULL)
3617 return FALSE;
3618 bed = get_elf_backend_data (output_bfd);
3619
3620 dynobj = htab->elf.dynobj;
3621 if (dynobj == NULL)
3622 abort ();
3623
3624 /* Set up .got offsets for local syms, and space for local dynamic
3625 relocs. */
3626 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3627 {
3628 bfd_signed_vma *local_got;
3629 bfd_signed_vma *end_local_got;
3630 char *local_tls_type;
3631 bfd_vma *local_tlsdesc_gotent;
3632 bfd_size_type locsymcount;
3633 Elf_Internal_Shdr *symtab_hdr;
3634 asection *srel;
3635
3636 if (! is_x86_64_elf (ibfd))
3637 continue;
3638
3639 for (s = ibfd->sections; s != NULL; s = s->next)
3640 {
3641 struct elf_dyn_relocs *p;
3642
3643 if (!elf_x86_64_convert_load (ibfd, s, info))
3644 return FALSE;
3645
3646 for (p = (struct elf_dyn_relocs *)
3647 (elf_section_data (s)->local_dynrel);
3648 p != NULL;
3649 p = p->next)
3650 {
3651 if (!bfd_is_abs_section (p->sec)
3652 && bfd_is_abs_section (p->sec->output_section))
3653 {
3654 /* Input section has been discarded, either because
3655 it is a copy of a linkonce section or due to
3656 linker script /DISCARD/, so we'll be discarding
3657 the relocs too. */
3658 }
3659 else if (p->count != 0)
3660 {
3661 srel = elf_section_data (p->sec)->sreloc;
3662 srel->size += p->count * bed->s->sizeof_rela;
3663 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3664 && (info->flags & DF_TEXTREL) == 0)
3665 {
3666 info->flags |= DF_TEXTREL;
3667 if ((info->warn_shared_textrel && bfd_link_pic (info))
3668 || info->error_textrel)
3669 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3670 p->sec->owner, p->sec);
3671 }
3672 }
3673 }
3674 }
3675
3676 local_got = elf_local_got_refcounts (ibfd);
3677 if (!local_got)
3678 continue;
3679
3680 symtab_hdr = &elf_symtab_hdr (ibfd);
3681 locsymcount = symtab_hdr->sh_info;
3682 end_local_got = local_got + locsymcount;
3683 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3684 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3685 s = htab->elf.sgot;
3686 srel = htab->elf.srelgot;
3687 for (; local_got < end_local_got;
3688 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3689 {
3690 *local_tlsdesc_gotent = (bfd_vma) -1;
3691 if (*local_got > 0)
3692 {
3693 if (GOT_TLS_GDESC_P (*local_tls_type))
3694 {
3695 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3696 - elf_x86_64_compute_jump_table_size (htab);
3697 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3698 *local_got = (bfd_vma) -2;
3699 }
3700 if (! GOT_TLS_GDESC_P (*local_tls_type)
3701 || GOT_TLS_GD_P (*local_tls_type))
3702 {
3703 *local_got = s->size;
3704 s->size += GOT_ENTRY_SIZE;
3705 if (GOT_TLS_GD_P (*local_tls_type))
3706 s->size += GOT_ENTRY_SIZE;
3707 }
3708 if (bfd_link_pic (info)
3709 || GOT_TLS_GD_ANY_P (*local_tls_type)
3710 || *local_tls_type == GOT_TLS_IE)
3711 {
3712 if (GOT_TLS_GDESC_P (*local_tls_type))
3713 {
3714 htab->elf.srelplt->size
3715 += bed->s->sizeof_rela;
3716 htab->tlsdesc_plt = (bfd_vma) -1;
3717 }
3718 if (! GOT_TLS_GDESC_P (*local_tls_type)
3719 || GOT_TLS_GD_P (*local_tls_type))
3720 srel->size += bed->s->sizeof_rela;
3721 }
3722 }
3723 else
3724 *local_got = (bfd_vma) -1;
3725 }
3726 }
3727
3728 if (htab->tls_ld_got.refcount > 0)
3729 {
3730 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3731 relocs. */
3732 htab->tls_ld_got.offset = htab->elf.sgot->size;
3733 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3734 htab->elf.srelgot->size += bed->s->sizeof_rela;
3735 }
3736 else
3737 htab->tls_ld_got.offset = -1;
3738
3739 /* Allocate global sym .plt and .got entries, and space for global
3740 sym dynamic relocs. */
3741 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3742 info);
3743
3744 /* Allocate .plt and .got entries, and space for local symbols. */
3745 htab_traverse (htab->loc_hash_table,
3746 elf_x86_64_allocate_local_dynrelocs,
3747 info);
3748
3749 /* For every jump slot reserved in the sgotplt, reloc_count is
3750 incremented. However, when we reserve space for TLS descriptors,
3751 it's not incremented, so in order to compute the space reserved
3752 for them, it suffices to multiply the reloc count by the jump
3753 slot size.
3754
3755 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3756 so that R_X86_64_IRELATIVE entries come last. */
3757 if (htab->elf.srelplt)
3758 {
3759 htab->sgotplt_jump_table_size
3760 = elf_x86_64_compute_jump_table_size (htab);
3761 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3762 }
3763 else if (htab->elf.irelplt)
3764 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3765
3766 if (htab->tlsdesc_plt)
3767 {
3768 /* If we're not using lazy TLS relocations, don't generate the
3769 PLT and GOT entries they require. */
3770 if ((info->flags & DF_BIND_NOW))
3771 htab->tlsdesc_plt = 0;
3772 else
3773 {
3774 htab->tlsdesc_got = htab->elf.sgot->size;
3775 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3776 /* Reserve room for the initial entry.
3777 FIXME: we could probably do away with it in this case. */
3778 if (htab->elf.splt->size == 0)
3779 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3780 htab->tlsdesc_plt = htab->elf.splt->size;
3781 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3782 }
3783 }
3784
3785 if (htab->elf.sgotplt)
3786 {
3787 /* Don't allocate .got.plt section if there are no GOT nor PLT
3788 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3789 if ((htab->elf.hgot == NULL
3790 || !htab->elf.hgot->ref_regular_nonweak)
3791 && (htab->elf.sgotplt->size
3792 == get_elf_backend_data (output_bfd)->got_header_size)
3793 && (htab->elf.splt == NULL
3794 || htab->elf.splt->size == 0)
3795 && (htab->elf.sgot == NULL
3796 || htab->elf.sgot->size == 0)
3797 && (htab->elf.iplt == NULL
3798 || htab->elf.iplt->size == 0)
3799 && (htab->elf.igotplt == NULL
3800 || htab->elf.igotplt->size == 0))
3801 htab->elf.sgotplt->size = 0;
3802 }
3803
3804 if (htab->plt_eh_frame != NULL
3805 && htab->elf.splt != NULL
3806 && htab->elf.splt->size != 0
3807 && !bfd_is_abs_section (htab->elf.splt->output_section)
3808 && _bfd_elf_eh_frame_present (info))
3809 {
3810 const struct elf_x86_64_backend_data *arch_data
3811 = get_elf_x86_64_arch_data (bed);
3812 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3813 }
3814
3815 /* We now have determined the sizes of the various dynamic sections.
3816 Allocate memory for them. */
3817 relocs = FALSE;
3818 for (s = dynobj->sections; s != NULL; s = s->next)
3819 {
3820 if ((s->flags & SEC_LINKER_CREATED) == 0)
3821 continue;
3822
3823 if (s == htab->elf.splt
3824 || s == htab->elf.sgot
3825 || s == htab->elf.sgotplt
3826 || s == htab->elf.iplt
3827 || s == htab->elf.igotplt
3828 || s == htab->plt_bnd
3829 || s == htab->plt_got
3830 || s == htab->plt_eh_frame
3831 || s == htab->sdynbss)
3832 {
3833 /* Strip this section if we don't need it; see the
3834 comment below. */
3835 }
3836 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3837 {
3838 if (s->size != 0 && s != htab->elf.srelplt)
3839 relocs = TRUE;
3840
3841 /* We use the reloc_count field as a counter if we need
3842 to copy relocs into the output file. */
3843 if (s != htab->elf.srelplt)
3844 s->reloc_count = 0;
3845 }
3846 else
3847 {
3848 /* It's not one of our sections, so don't allocate space. */
3849 continue;
3850 }
3851
3852 if (s->size == 0)
3853 {
3854 /* If we don't need this section, strip it from the
3855 output file. This is mostly to handle .rela.bss and
3856 .rela.plt. We must create both sections in
3857 create_dynamic_sections, because they must be created
3858 before the linker maps input sections to output
3859 sections. The linker does that before
3860 adjust_dynamic_symbol is called, and it is that
3861 function which decides whether anything needs to go
3862 into these sections. */
3863
3864 s->flags |= SEC_EXCLUDE;
3865 continue;
3866 }
3867
3868 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3869 continue;
3870
3871 /* Allocate memory for the section contents. We use bfd_zalloc
3872 here in case unused entries are not reclaimed before the
3873 section's contents are written out. This should not happen,
3874 but this way if it does, we get a R_X86_64_NONE reloc instead
3875 of garbage. */
3876 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3877 if (s->contents == NULL)
3878 return FALSE;
3879 }
3880
3881 if (htab->plt_eh_frame != NULL
3882 && htab->plt_eh_frame->contents != NULL)
3883 {
3884 const struct elf_x86_64_backend_data *arch_data
3885 = get_elf_x86_64_arch_data (bed);
3886
3887 memcpy (htab->plt_eh_frame->contents,
3888 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3889 bfd_put_32 (dynobj, htab->elf.splt->size,
3890 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3891 }
3892
3893 if (htab->elf.dynamic_sections_created)
3894 {
3895 /* Add some entries to the .dynamic section. We fill in the
3896 values later, in elf_x86_64_finish_dynamic_sections, but we
3897 must add the entries now so that we get the correct size for
3898 the .dynamic section. The DT_DEBUG entry is filled in by the
3899 dynamic linker and used by the debugger. */
3900 #define add_dynamic_entry(TAG, VAL) \
3901 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3902
3903 if (bfd_link_executable (info))
3904 {
3905 if (!add_dynamic_entry (DT_DEBUG, 0))
3906 return FALSE;
3907 }
3908
3909 if (htab->elf.splt->size != 0)
3910 {
3911 /* DT_PLTGOT is used by prelink even if there is no PLT
3912 relocation. */
3913 if (!add_dynamic_entry (DT_PLTGOT, 0))
3914 return FALSE;
3915
3916 if (htab->elf.srelplt->size != 0)
3917 {
3918 if (!add_dynamic_entry (DT_PLTRELSZ, 0)
3919 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3920 || !add_dynamic_entry (DT_JMPREL, 0))
3921 return FALSE;
3922 }
3923
3924 if (htab->tlsdesc_plt
3925 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3926 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3927 return FALSE;
3928 }
3929
3930 if (relocs)
3931 {
3932 if (!add_dynamic_entry (DT_RELA, 0)
3933 || !add_dynamic_entry (DT_RELASZ, 0)
3934 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3935 return FALSE;
3936
3937 /* If any dynamic relocs apply to a read-only section,
3938 then we need a DT_TEXTREL entry. */
3939 if ((info->flags & DF_TEXTREL) == 0)
3940 elf_link_hash_traverse (&htab->elf,
3941 elf_x86_64_readonly_dynrelocs,
3942 info);
3943
3944 if ((info->flags & DF_TEXTREL) != 0)
3945 {
3946 if (htab->readonly_dynrelocs_against_ifunc)
3947 {
3948 info->callbacks->einfo
3949 (_("%P%X: read-only segment has dynamic IFUNC relocations; recompile with -fPIC\n"));
3950 bfd_set_error (bfd_error_bad_value);
3951 return FALSE;
3952 }
3953
3954 if (!add_dynamic_entry (DT_TEXTREL, 0))
3955 return FALSE;
3956 }
3957 }
3958 }
3959 #undef add_dynamic_entry
3960
3961 return TRUE;
3962 }
3963
3964 static bfd_boolean
3965 elf_x86_64_always_size_sections (bfd *output_bfd,
3966 struct bfd_link_info *info)
3967 {
3968 asection *tls_sec = elf_hash_table (info)->tls_sec;
3969
3970 if (tls_sec)
3971 {
3972 struct elf_link_hash_entry *tlsbase;
3973
3974 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3975 "_TLS_MODULE_BASE_",
3976 FALSE, FALSE, FALSE);
3977
3978 if (tlsbase && tlsbase->type == STT_TLS)
3979 {
3980 struct elf_x86_64_link_hash_table *htab;
3981 struct bfd_link_hash_entry *bh = NULL;
3982 const struct elf_backend_data *bed
3983 = get_elf_backend_data (output_bfd);
3984
3985 htab = elf_x86_64_hash_table (info);
3986 if (htab == NULL)
3987 return FALSE;
3988
3989 if (!(_bfd_generic_link_add_one_symbol
3990 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3991 tls_sec, 0, NULL, FALSE,
3992 bed->collect, &bh)))
3993 return FALSE;
3994
3995 htab->tls_module_base = bh;
3996
3997 tlsbase = (struct elf_link_hash_entry *)bh;
3998 tlsbase->def_regular = 1;
3999 tlsbase->other = STV_HIDDEN;
4000 tlsbase->root.linker_def = 1;
4001 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
4002 }
4003 }
4004
4005 return TRUE;
4006 }
4007
4008 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
4009 executables. Rather than setting it to the beginning of the TLS
4010 section, we have to set it to the end. This function may be called
4011 multiple times, it is idempotent. */
4012
4013 static void
4014 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
4015 {
4016 struct elf_x86_64_link_hash_table *htab;
4017 struct bfd_link_hash_entry *base;
4018
4019 if (!bfd_link_executable (info))
4020 return;
4021
4022 htab = elf_x86_64_hash_table (info);
4023 if (htab == NULL)
4024 return;
4025
4026 base = htab->tls_module_base;
4027 if (base == NULL)
4028 return;
4029
4030 base->u.def.value = htab->elf.tls_size;
4031 }
4032
4033 /* Return the base VMA address which should be subtracted from real addresses
4034 when resolving @dtpoff relocation.
4035 This is PT_TLS segment p_vaddr. */
4036
4037 static bfd_vma
4038 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
4039 {
4040 /* If tls_sec is NULL, we should have signalled an error already. */
4041 if (elf_hash_table (info)->tls_sec == NULL)
4042 return 0;
4043 return elf_hash_table (info)->tls_sec->vma;
4044 }
4045
4046 /* Return the relocation value for @tpoff relocation
4047 if STT_TLS virtual address is ADDRESS. */
4048
4049 static bfd_vma
4050 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
4051 {
4052 struct elf_link_hash_table *htab = elf_hash_table (info);
4053 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
4054 bfd_vma static_tls_size;
4055
4056 /* If tls_segment is NULL, we should have signalled an error already. */
4057 if (htab->tls_sec == NULL)
4058 return 0;
4059
4060 /* Consider special static TLS alignment requirements. */
4061 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
4062 return address - static_tls_size - htab->tls_sec->vma;
4063 }
4064
4065 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
4066 branch? */
4067
4068 static bfd_boolean
4069 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
4070 {
4071 /* Opcode Instruction
4072 0xe8 call
4073 0xe9 jump
4074 0x0f 0x8x conditional jump */
4075 return ((offset > 0
4076 && (contents [offset - 1] == 0xe8
4077 || contents [offset - 1] == 0xe9))
4078 || (offset > 1
4079 && contents [offset - 2] == 0x0f
4080 && (contents [offset - 1] & 0xf0) == 0x80));
4081 }
4082
4083 /* Relocate an x86_64 ELF section. */
4084
4085 static bfd_boolean
4086 elf_x86_64_relocate_section (bfd *output_bfd,
4087 struct bfd_link_info *info,
4088 bfd *input_bfd,
4089 asection *input_section,
4090 bfd_byte *contents,
4091 Elf_Internal_Rela *relocs,
4092 Elf_Internal_Sym *local_syms,
4093 asection **local_sections)
4094 {
4095 struct elf_x86_64_link_hash_table *htab;
4096 Elf_Internal_Shdr *symtab_hdr;
4097 struct elf_link_hash_entry **sym_hashes;
4098 bfd_vma *local_got_offsets;
4099 bfd_vma *local_tlsdesc_gotents;
4100 Elf_Internal_Rela *rel;
4101 Elf_Internal_Rela *wrel;
4102 Elf_Internal_Rela *relend;
4103 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
4104
4105 BFD_ASSERT (is_x86_64_elf (input_bfd));
4106
4107 /* Skip if check_relocs failed. */
4108 if (input_section->check_relocs_failed)
4109 return FALSE;
4110
4111 htab = elf_x86_64_hash_table (info);
4112 if (htab == NULL)
4113 return FALSE;
4114 symtab_hdr = &elf_symtab_hdr (input_bfd);
4115 sym_hashes = elf_sym_hashes (input_bfd);
4116 local_got_offsets = elf_local_got_offsets (input_bfd);
4117 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
4118
4119 elf_x86_64_set_tls_module_base (info);
4120
4121 rel = wrel = relocs;
4122 relend = relocs + input_section->reloc_count;
4123 for (; rel < relend; wrel++, rel++)
4124 {
4125 unsigned int r_type;
4126 reloc_howto_type *howto;
4127 unsigned long r_symndx;
4128 struct elf_link_hash_entry *h;
4129 struct elf_x86_64_link_hash_entry *eh;
4130 Elf_Internal_Sym *sym;
4131 asection *sec;
4132 bfd_vma off, offplt, plt_offset;
4133 bfd_vma relocation;
4134 bfd_boolean unresolved_reloc;
4135 bfd_reloc_status_type r;
4136 int tls_type;
4137 asection *base_got, *resolved_plt;
4138 bfd_vma st_size;
4139 bfd_boolean resolved_to_zero;
4140
4141 r_type = ELF32_R_TYPE (rel->r_info);
4142 if (r_type == (int) R_X86_64_GNU_VTINHERIT
4143 || r_type == (int) R_X86_64_GNU_VTENTRY)
4144 {
4145 if (wrel != rel)
4146 *wrel = *rel;
4147 continue;
4148 }
4149
4150 if (r_type >= (int) R_X86_64_standard)
4151 {
4152 _bfd_error_handler
4153 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4154 input_bfd, input_section, r_type);
4155 bfd_set_error (bfd_error_bad_value);
4156 return FALSE;
4157 }
4158
4159 if (r_type != (int) R_X86_64_32
4160 || ABI_64_P (output_bfd))
4161 howto = x86_64_elf_howto_table + r_type;
4162 else
4163 howto = (x86_64_elf_howto_table
4164 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
4165 r_symndx = htab->r_sym (rel->r_info);
4166 h = NULL;
4167 sym = NULL;
4168 sec = NULL;
4169 unresolved_reloc = FALSE;
4170 if (r_symndx < symtab_hdr->sh_info)
4171 {
4172 sym = local_syms + r_symndx;
4173 sec = local_sections[r_symndx];
4174
4175 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
4176 &sec, rel);
4177 st_size = sym->st_size;
4178
4179 /* Relocate against local STT_GNU_IFUNC symbol. */
4180 if (!bfd_link_relocatable (info)
4181 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
4182 {
4183 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
4184 rel, FALSE);
4185 if (h == NULL)
4186 abort ();
4187
4188 /* Set STT_GNU_IFUNC symbol value. */
4189 h->root.u.def.value = sym->st_value;
4190 h->root.u.def.section = sec;
4191 }
4192 }
4193 else
4194 {
4195 bfd_boolean warned ATTRIBUTE_UNUSED;
4196 bfd_boolean ignored ATTRIBUTE_UNUSED;
4197
4198 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4199 r_symndx, symtab_hdr, sym_hashes,
4200 h, sec, relocation,
4201 unresolved_reloc, warned, ignored);
4202 st_size = h->size;
4203 }
4204
4205 if (sec != NULL && discarded_section (sec))
4206 {
4207 _bfd_clear_contents (howto, input_bfd, input_section,
4208 contents + rel->r_offset);
4209 wrel->r_offset = rel->r_offset;
4210 wrel->r_info = 0;
4211 wrel->r_addend = 0;
4212
4213 /* For ld -r, remove relocations in debug sections against
4214 sections defined in discarded sections. Not done for
4215 eh_frame editing code expects to be present. */
4216 if (bfd_link_relocatable (info)
4217 && (input_section->flags & SEC_DEBUGGING))
4218 wrel--;
4219
4220 continue;
4221 }
4222
4223 if (bfd_link_relocatable (info))
4224 {
4225 if (wrel != rel)
4226 *wrel = *rel;
4227 continue;
4228 }
4229
4230 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
4231 {
4232 if (r_type == R_X86_64_64)
4233 {
4234 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
4235 zero-extend it to 64bit if addend is zero. */
4236 r_type = R_X86_64_32;
4237 memset (contents + rel->r_offset + 4, 0, 4);
4238 }
4239 else if (r_type == R_X86_64_SIZE64)
4240 {
4241 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
4242 zero-extend it to 64bit if addend is zero. */
4243 r_type = R_X86_64_SIZE32;
4244 memset (contents + rel->r_offset + 4, 0, 4);
4245 }
4246 }
4247
4248 eh = (struct elf_x86_64_link_hash_entry *) h;
4249
4250 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4251 it here if it is defined in a non-shared object. */
4252 if (h != NULL
4253 && h->type == STT_GNU_IFUNC
4254 && h->def_regular)
4255 {
4256 bfd_vma plt_index;
4257 const char *name;
4258
4259 if ((input_section->flags & SEC_ALLOC) == 0)
4260 {
4261 /* Dynamic relocs are not propagated for SEC_DEBUGGING
4262 sections because such sections are not SEC_ALLOC and
4263 thus ld.so will not process them. */
4264 if ((input_section->flags & SEC_DEBUGGING) != 0)
4265 continue;
4266 abort ();
4267 }
4268
4269 switch (r_type)
4270 {
4271 default:
4272 break;
4273
4274 case R_X86_64_GOTPCREL:
4275 case R_X86_64_GOTPCRELX:
4276 case R_X86_64_REX_GOTPCRELX:
4277 case R_X86_64_GOTPCREL64:
4278 base_got = htab->elf.sgot;
4279 off = h->got.offset;
4280
4281 if (base_got == NULL)
4282 abort ();
4283
4284 if (off == (bfd_vma) -1)
4285 {
4286 /* We can't use h->got.offset here to save state, or
4287 even just remember the offset, as finish_dynamic_symbol
4288 would use that as offset into .got. */
4289
4290 if (h->plt.offset == (bfd_vma) -1)
4291 abort ();
4292
4293 if (htab->elf.splt != NULL)
4294 {
4295 plt_index = h->plt.offset / plt_entry_size - 1;
4296 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4297 base_got = htab->elf.sgotplt;
4298 }
4299 else
4300 {
4301 plt_index = h->plt.offset / plt_entry_size;
4302 off = plt_index * GOT_ENTRY_SIZE;
4303 base_got = htab->elf.igotplt;
4304 }
4305
4306 if (h->dynindx == -1
4307 || h->forced_local
4308 || info->symbolic)
4309 {
4310 /* This references the local defitionion. We must
4311 initialize this entry in the global offset table.
4312 Since the offset must always be a multiple of 8,
4313 we use the least significant bit to record
4314 whether we have initialized it already.
4315
4316 When doing a dynamic link, we create a .rela.got
4317 relocation entry to initialize the value. This
4318 is done in the finish_dynamic_symbol routine. */
4319 if ((off & 1) != 0)
4320 off &= ~1;
4321 else
4322 {
4323 bfd_put_64 (output_bfd, relocation,
4324 base_got->contents + off);
4325 /* Note that this is harmless for the GOTPLT64
4326 case, as -1 | 1 still is -1. */
4327 h->got.offset |= 1;
4328 }
4329 }
4330 }
4331
4332 relocation = (base_got->output_section->vma
4333 + base_got->output_offset + off);
4334
4335 goto do_relocation;
4336 }
4337
4338 if (h->plt.offset == (bfd_vma) -1)
4339 {
4340 /* Handle static pointers of STT_GNU_IFUNC symbols. */
4341 if (r_type == htab->pointer_r_type
4342 && (input_section->flags & SEC_CODE) == 0)
4343 goto do_ifunc_pointer;
4344 goto bad_ifunc_reloc;
4345 }
4346
4347 /* STT_GNU_IFUNC symbol must go through PLT. */
4348 if (htab->elf.splt != NULL)
4349 {
4350 if (htab->plt_bnd != NULL)
4351 {
4352 resolved_plt = htab->plt_bnd;
4353 plt_offset = eh->plt_bnd.offset;
4354 }
4355 else
4356 {
4357 resolved_plt = htab->elf.splt;
4358 plt_offset = h->plt.offset;
4359 }
4360 }
4361 else
4362 {
4363 resolved_plt = htab->elf.iplt;
4364 plt_offset = h->plt.offset;
4365 }
4366
4367 relocation = (resolved_plt->output_section->vma
4368 + resolved_plt->output_offset + plt_offset);
4369
4370 switch (r_type)
4371 {
4372 default:
4373 bad_ifunc_reloc:
4374 if (h->root.root.string)
4375 name = h->root.root.string;
4376 else
4377 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4378 NULL);
4379 _bfd_error_handler
4380 (_("%B: relocation %s against STT_GNU_IFUNC "
4381 "symbol `%s' isn't supported"), input_bfd,
4382 howto->name, name);
4383 bfd_set_error (bfd_error_bad_value);
4384 return FALSE;
4385
4386 case R_X86_64_32S:
4387 if (bfd_link_pic (info))
4388 abort ();
4389 goto do_relocation;
4390
4391 case R_X86_64_32:
4392 if (ABI_64_P (output_bfd))
4393 goto do_relocation;
4394 /* FALLTHROUGH */
4395 case R_X86_64_64:
4396 do_ifunc_pointer:
4397 if (rel->r_addend != 0)
4398 {
4399 if (h->root.root.string)
4400 name = h->root.root.string;
4401 else
4402 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4403 sym, NULL);
4404 _bfd_error_handler
4405 (_("%B: relocation %s against STT_GNU_IFUNC "
4406 "symbol `%s' has non-zero addend: %d"),
4407 input_bfd, howto->name, name, rel->r_addend);
4408 bfd_set_error (bfd_error_bad_value);
4409 return FALSE;
4410 }
4411
4412 /* Generate dynamic relcoation only when there is a
4413 non-GOT reference in a shared object or there is no
4414 PLT. */
4415 if ((bfd_link_pic (info) && h->non_got_ref)
4416 || h->plt.offset == (bfd_vma) -1)
4417 {
4418 Elf_Internal_Rela outrel;
4419 asection *sreloc;
4420
4421 /* Need a dynamic relocation to get the real function
4422 address. */
4423 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4424 info,
4425 input_section,
4426 rel->r_offset);
4427 if (outrel.r_offset == (bfd_vma) -1
4428 || outrel.r_offset == (bfd_vma) -2)
4429 abort ();
4430
4431 outrel.r_offset += (input_section->output_section->vma
4432 + input_section->output_offset);
4433
4434 if (h->dynindx == -1
4435 || h->forced_local
4436 || bfd_link_executable (info))
4437 {
4438 /* This symbol is resolved locally. */
4439 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4440 outrel.r_addend = (h->root.u.def.value
4441 + h->root.u.def.section->output_section->vma
4442 + h->root.u.def.section->output_offset);
4443 }
4444 else
4445 {
4446 outrel.r_info = htab->r_info (h->dynindx, r_type);
4447 outrel.r_addend = 0;
4448 }
4449
4450 /* Dynamic relocations are stored in
4451 1. .rela.ifunc section in PIC object.
4452 2. .rela.got section in dynamic executable.
4453 3. .rela.iplt section in static executable. */
4454 if (bfd_link_pic (info))
4455 sreloc = htab->elf.irelifunc;
4456 else if (htab->elf.splt != NULL)
4457 sreloc = htab->elf.srelgot;
4458 else
4459 sreloc = htab->elf.irelplt;
4460 elf_append_rela (output_bfd, sreloc, &outrel);
4461
4462 /* If this reloc is against an external symbol, we
4463 do not want to fiddle with the addend. Otherwise,
4464 we need to include the symbol value so that it
4465 becomes an addend for the dynamic reloc. For an
4466 internal symbol, we have updated addend. */
4467 continue;
4468 }
4469 /* FALLTHROUGH */
4470 case R_X86_64_PC32:
4471 case R_X86_64_PC32_BND:
4472 case R_X86_64_PC64:
4473 case R_X86_64_PLT32:
4474 case R_X86_64_PLT32_BND:
4475 goto do_relocation;
4476 }
4477 }
4478
4479 resolved_to_zero = (eh != NULL
4480 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
4481 eh->has_got_reloc,
4482 eh));
4483
4484 /* When generating a shared object, the relocations handled here are
4485 copied into the output file to be resolved at run time. */
4486 switch (r_type)
4487 {
4488 case R_X86_64_GOT32:
4489 case R_X86_64_GOT64:
4490 /* Relocation is to the entry for this symbol in the global
4491 offset table. */
4492 case R_X86_64_GOTPCREL:
4493 case R_X86_64_GOTPCRELX:
4494 case R_X86_64_REX_GOTPCRELX:
4495 case R_X86_64_GOTPCREL64:
4496 /* Use global offset table entry as symbol value. */
4497 case R_X86_64_GOTPLT64:
4498 /* This is obsolete and treated the the same as GOT64. */
4499 base_got = htab->elf.sgot;
4500
4501 if (htab->elf.sgot == NULL)
4502 abort ();
4503
4504 if (h != NULL)
4505 {
4506 bfd_boolean dyn;
4507
4508 off = h->got.offset;
4509 if (h->needs_plt
4510 && h->plt.offset != (bfd_vma)-1
4511 && off == (bfd_vma)-1)
4512 {
4513 /* We can't use h->got.offset here to save
4514 state, or even just remember the offset, as
4515 finish_dynamic_symbol would use that as offset into
4516 .got. */
4517 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
4518 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4519 base_got = htab->elf.sgotplt;
4520 }
4521
4522 dyn = htab->elf.dynamic_sections_created;
4523
4524 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4525 || (bfd_link_pic (info)
4526 && SYMBOL_REFERENCES_LOCAL (info, h))
4527 || (ELF_ST_VISIBILITY (h->other)
4528 && h->root.type == bfd_link_hash_undefweak))
4529 {
4530 /* This is actually a static link, or it is a -Bsymbolic
4531 link and the symbol is defined locally, or the symbol
4532 was forced to be local because of a version file. We
4533 must initialize this entry in the global offset table.
4534 Since the offset must always be a multiple of 8, we
4535 use the least significant bit to record whether we
4536 have initialized it already.
4537
4538 When doing a dynamic link, we create a .rela.got
4539 relocation entry to initialize the value. This is
4540 done in the finish_dynamic_symbol routine. */
4541 if ((off & 1) != 0)
4542 off &= ~1;
4543 else
4544 {
4545 bfd_put_64 (output_bfd, relocation,
4546 base_got->contents + off);
4547 /* Note that this is harmless for the GOTPLT64 case,
4548 as -1 | 1 still is -1. */
4549 h->got.offset |= 1;
4550 }
4551 }
4552 else
4553 unresolved_reloc = FALSE;
4554 }
4555 else
4556 {
4557 if (local_got_offsets == NULL)
4558 abort ();
4559
4560 off = local_got_offsets[r_symndx];
4561
4562 /* The offset must always be a multiple of 8. We use
4563 the least significant bit to record whether we have
4564 already generated the necessary reloc. */
4565 if ((off & 1) != 0)
4566 off &= ~1;
4567 else
4568 {
4569 bfd_put_64 (output_bfd, relocation,
4570 base_got->contents + off);
4571
4572 if (bfd_link_pic (info))
4573 {
4574 asection *s;
4575 Elf_Internal_Rela outrel;
4576
4577 /* We need to generate a R_X86_64_RELATIVE reloc
4578 for the dynamic linker. */
4579 s = htab->elf.srelgot;
4580 if (s == NULL)
4581 abort ();
4582
4583 outrel.r_offset = (base_got->output_section->vma
4584 + base_got->output_offset
4585 + off);
4586 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4587 outrel.r_addend = relocation;
4588 elf_append_rela (output_bfd, s, &outrel);
4589 }
4590
4591 local_got_offsets[r_symndx] |= 1;
4592 }
4593 }
4594
4595 if (off >= (bfd_vma) -2)
4596 abort ();
4597
4598 relocation = base_got->output_section->vma
4599 + base_got->output_offset + off;
4600 if (r_type != R_X86_64_GOTPCREL
4601 && r_type != R_X86_64_GOTPCRELX
4602 && r_type != R_X86_64_REX_GOTPCRELX
4603 && r_type != R_X86_64_GOTPCREL64)
4604 relocation -= htab->elf.sgotplt->output_section->vma
4605 - htab->elf.sgotplt->output_offset;
4606
4607 break;
4608
4609 case R_X86_64_GOTOFF64:
4610 /* Relocation is relative to the start of the global offset
4611 table. */
4612
4613 /* Check to make sure it isn't a protected function or data
4614 symbol for shared library since it may not be local when
4615 used as function address or with copy relocation. We also
4616 need to make sure that a symbol is referenced locally. */
4617 if (bfd_link_pic (info) && h)
4618 {
4619 if (!h->def_regular)
4620 {
4621 const char *v;
4622
4623 switch (ELF_ST_VISIBILITY (h->other))
4624 {
4625 case STV_HIDDEN:
4626 v = _("hidden symbol");
4627 break;
4628 case STV_INTERNAL:
4629 v = _("internal symbol");
4630 break;
4631 case STV_PROTECTED:
4632 v = _("protected symbol");
4633 break;
4634 default:
4635 v = _("symbol");
4636 break;
4637 }
4638
4639 _bfd_error_handler
4640 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"),
4641 input_bfd, v, h->root.root.string);
4642 bfd_set_error (bfd_error_bad_value);
4643 return FALSE;
4644 }
4645 else if (!bfd_link_executable (info)
4646 && !SYMBOL_REFERENCES_LOCAL (info, h)
4647 && (h->type == STT_FUNC
4648 || h->type == STT_OBJECT)
4649 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
4650 {
4651 _bfd_error_handler
4652 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"),
4653 input_bfd,
4654 h->type == STT_FUNC ? "function" : "data",
4655 h->root.root.string);
4656 bfd_set_error (bfd_error_bad_value);
4657 return FALSE;
4658 }
4659 }
4660
4661 /* Note that sgot is not involved in this
4662 calculation. We always want the start of .got.plt. If we
4663 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
4664 permitted by the ABI, we might have to change this
4665 calculation. */
4666 relocation -= htab->elf.sgotplt->output_section->vma
4667 + htab->elf.sgotplt->output_offset;
4668 break;
4669
4670 case R_X86_64_GOTPC32:
4671 case R_X86_64_GOTPC64:
4672 /* Use global offset table as symbol value. */
4673 relocation = htab->elf.sgotplt->output_section->vma
4674 + htab->elf.sgotplt->output_offset;
4675 unresolved_reloc = FALSE;
4676 break;
4677
4678 case R_X86_64_PLTOFF64:
4679 /* Relocation is PLT entry relative to GOT. For local
4680 symbols it's the symbol itself relative to GOT. */
4681 if (h != NULL
4682 /* See PLT32 handling. */
4683 && h->plt.offset != (bfd_vma) -1
4684 && htab->elf.splt != NULL)
4685 {
4686 if (htab->plt_bnd != NULL)
4687 {
4688 resolved_plt = htab->plt_bnd;
4689 plt_offset = eh->plt_bnd.offset;
4690 }
4691 else
4692 {
4693 resolved_plt = htab->elf.splt;
4694 plt_offset = h->plt.offset;
4695 }
4696
4697 relocation = (resolved_plt->output_section->vma
4698 + resolved_plt->output_offset
4699 + plt_offset);
4700 unresolved_reloc = FALSE;
4701 }
4702
4703 relocation -= htab->elf.sgotplt->output_section->vma
4704 + htab->elf.sgotplt->output_offset;
4705 break;
4706
4707 case R_X86_64_PLT32:
4708 case R_X86_64_PLT32_BND:
4709 /* Relocation is to the entry for this symbol in the
4710 procedure linkage table. */
4711
4712 /* Resolve a PLT32 reloc against a local symbol directly,
4713 without using the procedure linkage table. */
4714 if (h == NULL)
4715 break;
4716
4717 if ((h->plt.offset == (bfd_vma) -1
4718 && eh->plt_got.offset == (bfd_vma) -1)
4719 || htab->elf.splt == NULL)
4720 {
4721 /* We didn't make a PLT entry for this symbol. This
4722 happens when statically linking PIC code, or when
4723 using -Bsymbolic. */
4724 break;
4725 }
4726
4727 if (h->plt.offset != (bfd_vma) -1)
4728 {
4729 if (htab->plt_bnd != NULL)
4730 {
4731 resolved_plt = htab->plt_bnd;
4732 plt_offset = eh->plt_bnd.offset;
4733 }
4734 else
4735 {
4736 resolved_plt = htab->elf.splt;
4737 plt_offset = h->plt.offset;
4738 }
4739 }
4740 else
4741 {
4742 /* Use the GOT PLT. */
4743 resolved_plt = htab->plt_got;
4744 plt_offset = eh->plt_got.offset;
4745 }
4746
4747 relocation = (resolved_plt->output_section->vma
4748 + resolved_plt->output_offset
4749 + plt_offset);
4750 unresolved_reloc = FALSE;
4751 break;
4752
4753 case R_X86_64_SIZE32:
4754 case R_X86_64_SIZE64:
4755 /* Set to symbol size. */
4756 relocation = st_size;
4757 goto direct;
4758
4759 case R_X86_64_PC8:
4760 case R_X86_64_PC16:
4761 case R_X86_64_PC32:
4762 case R_X86_64_PC32_BND:
4763 /* Don't complain about -fPIC if the symbol is undefined when
4764 building executable unless it is unresolved weak symbol. */
4765 if ((input_section->flags & SEC_ALLOC) != 0
4766 && (input_section->flags & SEC_READONLY) != 0
4767 && h != NULL
4768 && ((bfd_link_executable (info)
4769 && h->root.type == bfd_link_hash_undefweak
4770 && !resolved_to_zero)
4771 || (bfd_link_pic (info)
4772 && !(bfd_link_pie (info)
4773 && h->root.type == bfd_link_hash_undefined))))
4774 {
4775 bfd_boolean fail = FALSE;
4776 bfd_boolean branch
4777 = ((r_type == R_X86_64_PC32
4778 || r_type == R_X86_64_PC32_BND)
4779 && is_32bit_relative_branch (contents, rel->r_offset));
4780
4781 if (SYMBOL_REFERENCES_LOCAL (info, h))
4782 {
4783 /* Symbol is referenced locally. Make sure it is
4784 defined locally or for a branch. */
4785 fail = !h->def_regular && !branch;
4786 }
4787 else if (!(bfd_link_pie (info)
4788 && (h->needs_copy || eh->needs_copy)))
4789 {
4790 /* Symbol doesn't need copy reloc and isn't referenced
4791 locally. We only allow branch to symbol with
4792 non-default visibility. */
4793 fail = (!branch
4794 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4795 }
4796
4797 if (fail)
4798 return elf_x86_64_need_pic (input_bfd, input_section,
4799 h, NULL, NULL, howto);
4800 }
4801 /* Fall through. */
4802
4803 case R_X86_64_8:
4804 case R_X86_64_16:
4805 case R_X86_64_32:
4806 case R_X86_64_PC64:
4807 case R_X86_64_64:
4808 /* FIXME: The ABI says the linker should make sure the value is
4809 the same when it's zeroextended to 64 bit. */
4810
4811 direct:
4812 if ((input_section->flags & SEC_ALLOC) == 0)
4813 break;
4814
4815 /* Don't copy a pc-relative relocation into the output file
4816 if the symbol needs copy reloc or the symbol is undefined
4817 when building executable. Copy dynamic function pointer
4818 relocations. Don't generate dynamic relocations against
4819 resolved undefined weak symbols in PIE. */
4820 if ((bfd_link_pic (info)
4821 && !(bfd_link_pie (info)
4822 && h != NULL
4823 && (h->needs_copy
4824 || eh->needs_copy
4825 || h->root.type == bfd_link_hash_undefined)
4826 && (IS_X86_64_PCREL_TYPE (r_type)
4827 || r_type == R_X86_64_SIZE32
4828 || r_type == R_X86_64_SIZE64))
4829 && (h == NULL
4830 || ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4831 && !resolved_to_zero)
4832 || h->root.type != bfd_link_hash_undefweak))
4833 && ((! IS_X86_64_PCREL_TYPE (r_type)
4834 && r_type != R_X86_64_SIZE32
4835 && r_type != R_X86_64_SIZE64)
4836 || ! SYMBOL_CALLS_LOCAL (info, h)))
4837 || (ELIMINATE_COPY_RELOCS
4838 && !bfd_link_pic (info)
4839 && h != NULL
4840 && h->dynindx != -1
4841 && (!h->non_got_ref
4842 || eh->func_pointer_refcount > 0
4843 || (h->root.type == bfd_link_hash_undefweak
4844 && !resolved_to_zero))
4845 && ((h->def_dynamic && !h->def_regular)
4846 /* Undefined weak symbol is bound locally when
4847 PIC is false. */
4848 || h->root.type == bfd_link_hash_undefined)))
4849 {
4850 Elf_Internal_Rela outrel;
4851 bfd_boolean skip, relocate;
4852 asection *sreloc;
4853
4854 /* When generating a shared object, these relocations
4855 are copied into the output file to be resolved at run
4856 time. */
4857 skip = FALSE;
4858 relocate = FALSE;
4859
4860 outrel.r_offset =
4861 _bfd_elf_section_offset (output_bfd, info, input_section,
4862 rel->r_offset);
4863 if (outrel.r_offset == (bfd_vma) -1)
4864 skip = TRUE;
4865 else if (outrel.r_offset == (bfd_vma) -2)
4866 skip = TRUE, relocate = TRUE;
4867
4868 outrel.r_offset += (input_section->output_section->vma
4869 + input_section->output_offset);
4870
4871 if (skip)
4872 memset (&outrel, 0, sizeof outrel);
4873
4874 /* h->dynindx may be -1 if this symbol was marked to
4875 become local. */
4876 else if (h != NULL
4877 && h->dynindx != -1
4878 && (IS_X86_64_PCREL_TYPE (r_type)
4879 || !(bfd_link_executable (info)
4880 || SYMBOLIC_BIND (info, h))
4881 || ! h->def_regular))
4882 {
4883 outrel.r_info = htab->r_info (h->dynindx, r_type);
4884 outrel.r_addend = rel->r_addend;
4885 }
4886 else
4887 {
4888 /* This symbol is local, or marked to become local.
4889 When relocation overflow check is disabled, we
4890 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
4891 if (r_type == htab->pointer_r_type
4892 || (r_type == R_X86_64_32
4893 && info->no_reloc_overflow_check))
4894 {
4895 relocate = TRUE;
4896 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4897 outrel.r_addend = relocation + rel->r_addend;
4898 }
4899 else if (r_type == R_X86_64_64
4900 && !ABI_64_P (output_bfd))
4901 {
4902 relocate = TRUE;
4903 outrel.r_info = htab->r_info (0,
4904 R_X86_64_RELATIVE64);
4905 outrel.r_addend = relocation + rel->r_addend;
4906 /* Check addend overflow. */
4907 if ((outrel.r_addend & 0x80000000)
4908 != (rel->r_addend & 0x80000000))
4909 {
4910 const char *name;
4911 int addend = rel->r_addend;
4912 if (h && h->root.root.string)
4913 name = h->root.root.string;
4914 else
4915 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4916 sym, NULL);
4917 if (addend < 0)
4918 _bfd_error_handler
4919 (_("%B: addend -0x%x in relocation %s against "
4920 "symbol `%s' at 0x%lx in section `%A' is "
4921 "out of range"),
4922 input_bfd, input_section, addend,
4923 howto->name, name,
4924 (unsigned long) rel->r_offset);
4925 else
4926 _bfd_error_handler
4927 (_("%B: addend 0x%x in relocation %s against "
4928 "symbol `%s' at 0x%lx in section `%A' is "
4929 "out of range"),
4930 input_bfd, input_section, addend,
4931 howto->name, name,
4932 (unsigned long) rel->r_offset);
4933 bfd_set_error (bfd_error_bad_value);
4934 return FALSE;
4935 }
4936 }
4937 else
4938 {
4939 long sindx;
4940
4941 if (bfd_is_abs_section (sec))
4942 sindx = 0;
4943 else if (sec == NULL || sec->owner == NULL)
4944 {
4945 bfd_set_error (bfd_error_bad_value);
4946 return FALSE;
4947 }
4948 else
4949 {
4950 asection *osec;
4951
4952 /* We are turning this relocation into one
4953 against a section symbol. It would be
4954 proper to subtract the symbol's value,
4955 osec->vma, from the emitted reloc addend,
4956 but ld.so expects buggy relocs. */
4957 osec = sec->output_section;
4958 sindx = elf_section_data (osec)->dynindx;
4959 if (sindx == 0)
4960 {
4961 asection *oi = htab->elf.text_index_section;
4962 sindx = elf_section_data (oi)->dynindx;
4963 }
4964 BFD_ASSERT (sindx != 0);
4965 }
4966
4967 outrel.r_info = htab->r_info (sindx, r_type);
4968 outrel.r_addend = relocation + rel->r_addend;
4969 }
4970 }
4971
4972 sreloc = elf_section_data (input_section)->sreloc;
4973
4974 if (sreloc == NULL || sreloc->contents == NULL)
4975 {
4976 r = bfd_reloc_notsupported;
4977 goto check_relocation_error;
4978 }
4979
4980 elf_append_rela (output_bfd, sreloc, &outrel);
4981
4982 /* If this reloc is against an external symbol, we do
4983 not want to fiddle with the addend. Otherwise, we
4984 need to include the symbol value so that it becomes
4985 an addend for the dynamic reloc. */
4986 if (! relocate)
4987 continue;
4988 }
4989
4990 break;
4991
4992 case R_X86_64_TLSGD:
4993 case R_X86_64_GOTPC32_TLSDESC:
4994 case R_X86_64_TLSDESC_CALL:
4995 case R_X86_64_GOTTPOFF:
4996 tls_type = GOT_UNKNOWN;
4997 if (h == NULL && local_got_offsets)
4998 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4999 else if (h != NULL)
5000 tls_type = elf_x86_64_hash_entry (h)->tls_type;
5001
5002 if (! elf_x86_64_tls_transition (info, input_bfd,
5003 input_section, contents,
5004 symtab_hdr, sym_hashes,
5005 &r_type, tls_type, rel,
5006 relend, h, r_symndx, TRUE))
5007 return FALSE;
5008
5009 if (r_type == R_X86_64_TPOFF32)
5010 {
5011 bfd_vma roff = rel->r_offset;
5012
5013 BFD_ASSERT (! unresolved_reloc);
5014
5015 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
5016 {
5017 /* GD->LE transition. For 64bit, change
5018 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5019 .word 0x6666; rex64; call __tls_get_addr@PLT
5020 or
5021 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5022 .byte 0x66; rex64
5023 call *__tls_get_addr@GOTPCREL(%rip)
5024 which may be converted to
5025 addr32 call __tls_get_addr
5026 into:
5027 movq %fs:0, %rax
5028 leaq foo@tpoff(%rax), %rax
5029 For 32bit, change
5030 leaq foo@tlsgd(%rip), %rdi
5031 .word 0x6666; rex64; call __tls_get_addr@PLT
5032 or
5033 leaq foo@tlsgd(%rip), %rdi
5034 .byte 0x66; rex64
5035 call *__tls_get_addr@GOTPCREL(%rip)
5036 which may be converted to
5037 addr32 call __tls_get_addr
5038 into:
5039 movl %fs:0, %eax
5040 leaq foo@tpoff(%rax), %rax
5041 For largepic, change:
5042 leaq foo@tlsgd(%rip), %rdi
5043 movabsq $__tls_get_addr@pltoff, %rax
5044 addq %r15, %rax
5045 call *%rax
5046 into:
5047 movq %fs:0, %rax
5048 leaq foo@tpoff(%rax), %rax
5049 nopw 0x0(%rax,%rax,1) */
5050 int largepic = 0;
5051 if (ABI_64_P (output_bfd))
5052 {
5053 if (contents[roff + 5] == 0xb8)
5054 {
5055 memcpy (contents + roff - 3,
5056 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
5057 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
5058 largepic = 1;
5059 }
5060 else
5061 memcpy (contents + roff - 4,
5062 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
5063 16);
5064 }
5065 else
5066 memcpy (contents + roff - 3,
5067 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
5068 15);
5069 bfd_put_32 (output_bfd,
5070 elf_x86_64_tpoff (info, relocation),
5071 contents + roff + 8 + largepic);
5072 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
5073 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
5074 rel++;
5075 wrel++;
5076 continue;
5077 }
5078 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
5079 {
5080 /* GDesc -> LE transition.
5081 It's originally something like:
5082 leaq x@tlsdesc(%rip), %rax
5083
5084 Change it to:
5085 movl $x@tpoff, %rax. */
5086
5087 unsigned int val, type;
5088
5089 type = bfd_get_8 (input_bfd, contents + roff - 3);
5090 val = bfd_get_8 (input_bfd, contents + roff - 1);
5091 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
5092 contents + roff - 3);
5093 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
5094 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
5095 contents + roff - 1);
5096 bfd_put_32 (output_bfd,
5097 elf_x86_64_tpoff (info, relocation),
5098 contents + roff);
5099 continue;
5100 }
5101 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
5102 {
5103 /* GDesc -> LE transition.
5104 It's originally:
5105 call *(%rax)
5106 Turn it into:
5107 xchg %ax,%ax. */
5108 bfd_put_8 (output_bfd, 0x66, contents + roff);
5109 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
5110 continue;
5111 }
5112 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
5113 {
5114 /* IE->LE transition:
5115 For 64bit, originally it can be one of:
5116 movq foo@gottpoff(%rip), %reg
5117 addq foo@gottpoff(%rip), %reg
5118 We change it into:
5119 movq $foo, %reg
5120 leaq foo(%reg), %reg
5121 addq $foo, %reg.
5122 For 32bit, originally it can be one of:
5123 movq foo@gottpoff(%rip), %reg
5124 addl foo@gottpoff(%rip), %reg
5125 We change it into:
5126 movq $foo, %reg
5127 leal foo(%reg), %reg
5128 addl $foo, %reg. */
5129
5130 unsigned int val, type, reg;
5131
5132 if (roff >= 3)
5133 val = bfd_get_8 (input_bfd, contents + roff - 3);
5134 else
5135 val = 0;
5136 type = bfd_get_8 (input_bfd, contents + roff - 2);
5137 reg = bfd_get_8 (input_bfd, contents + roff - 1);
5138 reg >>= 3;
5139 if (type == 0x8b)
5140 {
5141 /* movq */
5142 if (val == 0x4c)
5143 bfd_put_8 (output_bfd, 0x49,
5144 contents + roff - 3);
5145 else if (!ABI_64_P (output_bfd) && val == 0x44)
5146 bfd_put_8 (output_bfd, 0x41,
5147 contents + roff - 3);
5148 bfd_put_8 (output_bfd, 0xc7,
5149 contents + roff - 2);
5150 bfd_put_8 (output_bfd, 0xc0 | reg,
5151 contents + roff - 1);
5152 }
5153 else if (reg == 4)
5154 {
5155 /* addq/addl -> addq/addl - addressing with %rsp/%r12
5156 is special */
5157 if (val == 0x4c)
5158 bfd_put_8 (output_bfd, 0x49,
5159 contents + roff - 3);
5160 else if (!ABI_64_P (output_bfd) && val == 0x44)
5161 bfd_put_8 (output_bfd, 0x41,
5162 contents + roff - 3);
5163 bfd_put_8 (output_bfd, 0x81,
5164 contents + roff - 2);
5165 bfd_put_8 (output_bfd, 0xc0 | reg,
5166 contents + roff - 1);
5167 }
5168 else
5169 {
5170 /* addq/addl -> leaq/leal */
5171 if (val == 0x4c)
5172 bfd_put_8 (output_bfd, 0x4d,
5173 contents + roff - 3);
5174 else if (!ABI_64_P (output_bfd) && val == 0x44)
5175 bfd_put_8 (output_bfd, 0x45,
5176 contents + roff - 3);
5177 bfd_put_8 (output_bfd, 0x8d,
5178 contents + roff - 2);
5179 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
5180 contents + roff - 1);
5181 }
5182 bfd_put_32 (output_bfd,
5183 elf_x86_64_tpoff (info, relocation),
5184 contents + roff);
5185 continue;
5186 }
5187 else
5188 BFD_ASSERT (FALSE);
5189 }
5190
5191 if (htab->elf.sgot == NULL)
5192 abort ();
5193
5194 if (h != NULL)
5195 {
5196 off = h->got.offset;
5197 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
5198 }
5199 else
5200 {
5201 if (local_got_offsets == NULL)
5202 abort ();
5203
5204 off = local_got_offsets[r_symndx];
5205 offplt = local_tlsdesc_gotents[r_symndx];
5206 }
5207
5208 if ((off & 1) != 0)
5209 off &= ~1;
5210 else
5211 {
5212 Elf_Internal_Rela outrel;
5213 int dr_type, indx;
5214 asection *sreloc;
5215
5216 if (htab->elf.srelgot == NULL)
5217 abort ();
5218
5219 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5220
5221 if (GOT_TLS_GDESC_P (tls_type))
5222 {
5223 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
5224 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
5225 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
5226 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
5227 + htab->elf.sgotplt->output_offset
5228 + offplt
5229 + htab->sgotplt_jump_table_size);
5230 sreloc = htab->elf.srelplt;
5231 if (indx == 0)
5232 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
5233 else
5234 outrel.r_addend = 0;
5235 elf_append_rela (output_bfd, sreloc, &outrel);
5236 }
5237
5238 sreloc = htab->elf.srelgot;
5239
5240 outrel.r_offset = (htab->elf.sgot->output_section->vma
5241 + htab->elf.sgot->output_offset + off);
5242
5243 if (GOT_TLS_GD_P (tls_type))
5244 dr_type = R_X86_64_DTPMOD64;
5245 else if (GOT_TLS_GDESC_P (tls_type))
5246 goto dr_done;
5247 else
5248 dr_type = R_X86_64_TPOFF64;
5249
5250 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
5251 outrel.r_addend = 0;
5252 if ((dr_type == R_X86_64_TPOFF64
5253 || dr_type == R_X86_64_TLSDESC) && indx == 0)
5254 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
5255 outrel.r_info = htab->r_info (indx, dr_type);
5256
5257 elf_append_rela (output_bfd, sreloc, &outrel);
5258
5259 if (GOT_TLS_GD_P (tls_type))
5260 {
5261 if (indx == 0)
5262 {
5263 BFD_ASSERT (! unresolved_reloc);
5264 bfd_put_64 (output_bfd,
5265 relocation - elf_x86_64_dtpoff_base (info),
5266 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5267 }
5268 else
5269 {
5270 bfd_put_64 (output_bfd, 0,
5271 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5272 outrel.r_info = htab->r_info (indx,
5273 R_X86_64_DTPOFF64);
5274 outrel.r_offset += GOT_ENTRY_SIZE;
5275 elf_append_rela (output_bfd, sreloc,
5276 &outrel);
5277 }
5278 }
5279
5280 dr_done:
5281 if (h != NULL)
5282 h->got.offset |= 1;
5283 else
5284 local_got_offsets[r_symndx] |= 1;
5285 }
5286
5287 if (off >= (bfd_vma) -2
5288 && ! GOT_TLS_GDESC_P (tls_type))
5289 abort ();
5290 if (r_type == ELF32_R_TYPE (rel->r_info))
5291 {
5292 if (r_type == R_X86_64_GOTPC32_TLSDESC
5293 || r_type == R_X86_64_TLSDESC_CALL)
5294 relocation = htab->elf.sgotplt->output_section->vma
5295 + htab->elf.sgotplt->output_offset
5296 + offplt + htab->sgotplt_jump_table_size;
5297 else
5298 relocation = htab->elf.sgot->output_section->vma
5299 + htab->elf.sgot->output_offset + off;
5300 unresolved_reloc = FALSE;
5301 }
5302 else
5303 {
5304 bfd_vma roff = rel->r_offset;
5305
5306 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
5307 {
5308 /* GD->IE transition. For 64bit, change
5309 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5310 .word 0x6666; rex64; call __tls_get_addr@PLT
5311 or
5312 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5313 .byte 0x66; rex64
5314 call *__tls_get_addr@GOTPCREL(%rip
5315 which may be converted to
5316 addr32 call __tls_get_addr
5317 into:
5318 movq %fs:0, %rax
5319 addq foo@gottpoff(%rip), %rax
5320 For 32bit, change
5321 leaq foo@tlsgd(%rip), %rdi
5322 .word 0x6666; rex64; call __tls_get_addr@PLT
5323 or
5324 leaq foo@tlsgd(%rip), %rdi
5325 .byte 0x66; rex64;
5326 call *__tls_get_addr@GOTPCREL(%rip)
5327 which may be converted to
5328 addr32 call __tls_get_addr
5329 into:
5330 movl %fs:0, %eax
5331 addq foo@gottpoff(%rip), %rax
5332 For largepic, change:
5333 leaq foo@tlsgd(%rip), %rdi
5334 movabsq $__tls_get_addr@pltoff, %rax
5335 addq %r15, %rax
5336 call *%rax
5337 into:
5338 movq %fs:0, %rax
5339 addq foo@gottpoff(%rax), %rax
5340 nopw 0x0(%rax,%rax,1) */
5341 int largepic = 0;
5342 if (ABI_64_P (output_bfd))
5343 {
5344 if (contents[roff + 5] == 0xb8)
5345 {
5346 memcpy (contents + roff - 3,
5347 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
5348 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
5349 largepic = 1;
5350 }
5351 else
5352 memcpy (contents + roff - 4,
5353 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
5354 16);
5355 }
5356 else
5357 memcpy (contents + roff - 3,
5358 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
5359 15);
5360
5361 relocation = (htab->elf.sgot->output_section->vma
5362 + htab->elf.sgot->output_offset + off
5363 - roff
5364 - largepic
5365 - input_section->output_section->vma
5366 - input_section->output_offset
5367 - 12);
5368 bfd_put_32 (output_bfd, relocation,
5369 contents + roff + 8 + largepic);
5370 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
5371 rel++;
5372 wrel++;
5373 continue;
5374 }
5375 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
5376 {
5377 /* GDesc -> IE transition.
5378 It's originally something like:
5379 leaq x@tlsdesc(%rip), %rax
5380
5381 Change it to:
5382 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
5383
5384 /* Now modify the instruction as appropriate. To
5385 turn a leaq into a movq in the form we use it, it
5386 suffices to change the second byte from 0x8d to
5387 0x8b. */
5388 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
5389
5390 bfd_put_32 (output_bfd,
5391 htab->elf.sgot->output_section->vma
5392 + htab->elf.sgot->output_offset + off
5393 - rel->r_offset
5394 - input_section->output_section->vma
5395 - input_section->output_offset
5396 - 4,
5397 contents + roff);
5398 continue;
5399 }
5400 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
5401 {
5402 /* GDesc -> IE transition.
5403 It's originally:
5404 call *(%rax)
5405
5406 Change it to:
5407 xchg %ax, %ax. */
5408
5409 bfd_put_8 (output_bfd, 0x66, contents + roff);
5410 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
5411 continue;
5412 }
5413 else
5414 BFD_ASSERT (FALSE);
5415 }
5416 break;
5417
5418 case R_X86_64_TLSLD:
5419 if (! elf_x86_64_tls_transition (info, input_bfd,
5420 input_section, contents,
5421 symtab_hdr, sym_hashes,
5422 &r_type, GOT_UNKNOWN, rel,
5423 relend, h, r_symndx, TRUE))
5424 return FALSE;
5425
5426 if (r_type != R_X86_64_TLSLD)
5427 {
5428 /* LD->LE transition:
5429 leaq foo@tlsld(%rip), %rdi
5430 call __tls_get_addr@PLT
5431 For 64bit, we change it into:
5432 .word 0x6666; .byte 0x66; movq %fs:0, %rax
5433 For 32bit, we change it into:
5434 nopl 0x0(%rax); movl %fs:0, %eax
5435 Or
5436 leaq foo@tlsld(%rip), %rdi;
5437 call *__tls_get_addr@GOTPCREL(%rip)
5438 which may be converted to
5439 addr32 call __tls_get_addr
5440 For 64bit, we change it into:
5441 .word 0x6666; .word 0x6666; movq %fs:0, %rax
5442 For 32bit, we change it into:
5443 nopw 0x0(%rax); movl %fs:0, %eax
5444 For largepic, change:
5445 leaq foo@tlsgd(%rip), %rdi
5446 movabsq $__tls_get_addr@pltoff, %rax
5447 addq %rbx, %rax
5448 call *%rax
5449 into
5450 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
5451 movq %fs:0, %eax */
5452
5453 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
5454 if (ABI_64_P (output_bfd))
5455 {
5456 if (contents[rel->r_offset + 5] == 0xb8)
5457 memcpy (contents + rel->r_offset - 3,
5458 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
5459 "\x64\x48\x8b\x04\x25\0\0\0", 22);
5460 else if (contents[rel->r_offset + 4] == 0xff
5461 || contents[rel->r_offset + 4] == 0x67)
5462 memcpy (contents + rel->r_offset - 3,
5463 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
5464 13);
5465 else
5466 memcpy (contents + rel->r_offset - 3,
5467 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
5468 }
5469 else
5470 {
5471 if (contents[rel->r_offset + 4] == 0xff)
5472 memcpy (contents + rel->r_offset - 3,
5473 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
5474 13);
5475 else
5476 memcpy (contents + rel->r_offset - 3,
5477 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
5478 }
5479 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
5480 and R_X86_64_PLTOFF64. */
5481 rel++;
5482 wrel++;
5483 continue;
5484 }
5485
5486 if (htab->elf.sgot == NULL)
5487 abort ();
5488
5489 off = htab->tls_ld_got.offset;
5490 if (off & 1)
5491 off &= ~1;
5492 else
5493 {
5494 Elf_Internal_Rela outrel;
5495
5496 if (htab->elf.srelgot == NULL)
5497 abort ();
5498
5499 outrel.r_offset = (htab->elf.sgot->output_section->vma
5500 + htab->elf.sgot->output_offset + off);
5501
5502 bfd_put_64 (output_bfd, 0,
5503 htab->elf.sgot->contents + off);
5504 bfd_put_64 (output_bfd, 0,
5505 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5506 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
5507 outrel.r_addend = 0;
5508 elf_append_rela (output_bfd, htab->elf.srelgot,
5509 &outrel);
5510 htab->tls_ld_got.offset |= 1;
5511 }
5512 relocation = htab->elf.sgot->output_section->vma
5513 + htab->elf.sgot->output_offset + off;
5514 unresolved_reloc = FALSE;
5515 break;
5516
5517 case R_X86_64_DTPOFF32:
5518 if (!bfd_link_executable (info)
5519 || (input_section->flags & SEC_CODE) == 0)
5520 relocation -= elf_x86_64_dtpoff_base (info);
5521 else
5522 relocation = elf_x86_64_tpoff (info, relocation);
5523 break;
5524
5525 case R_X86_64_TPOFF32:
5526 case R_X86_64_TPOFF64:
5527 BFD_ASSERT (bfd_link_executable (info));
5528 relocation = elf_x86_64_tpoff (info, relocation);
5529 break;
5530
5531 case R_X86_64_DTPOFF64:
5532 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
5533 relocation -= elf_x86_64_dtpoff_base (info);
5534 break;
5535
5536 default:
5537 break;
5538 }
5539
5540 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5541 because such sections are not SEC_ALLOC and thus ld.so will
5542 not process them. */
5543 if (unresolved_reloc
5544 && !((input_section->flags & SEC_DEBUGGING) != 0
5545 && h->def_dynamic)
5546 && _bfd_elf_section_offset (output_bfd, info, input_section,
5547 rel->r_offset) != (bfd_vma) -1)
5548 {
5549 _bfd_error_handler
5550 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5551 input_bfd,
5552 input_section,
5553 (long) rel->r_offset,
5554 howto->name,
5555 h->root.root.string);
5556 return FALSE;
5557 }
5558
5559 do_relocation:
5560 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
5561 contents, rel->r_offset,
5562 relocation, rel->r_addend);
5563
5564 check_relocation_error:
5565 if (r != bfd_reloc_ok)
5566 {
5567 const char *name;
5568
5569 if (h != NULL)
5570 name = h->root.root.string;
5571 else
5572 {
5573 name = bfd_elf_string_from_elf_section (input_bfd,
5574 symtab_hdr->sh_link,
5575 sym->st_name);
5576 if (name == NULL)
5577 return FALSE;
5578 if (*name == '\0')
5579 name = bfd_section_name (input_bfd, sec);
5580 }
5581
5582 if (r == bfd_reloc_overflow)
5583 (*info->callbacks->reloc_overflow)
5584 (info, (h ? &h->root : NULL), name, howto->name,
5585 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5586 else
5587 {
5588 _bfd_error_handler
5589 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
5590 input_bfd, input_section,
5591 (long) rel->r_offset, name, (int) r);
5592 return FALSE;
5593 }
5594 }
5595
5596 if (wrel != rel)
5597 *wrel = *rel;
5598 }
5599
5600 if (wrel != rel)
5601 {
5602 Elf_Internal_Shdr *rel_hdr;
5603 size_t deleted = rel - wrel;
5604
5605 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
5606 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5607 if (rel_hdr->sh_size == 0)
5608 {
5609 /* It is too late to remove an empty reloc section. Leave
5610 one NONE reloc.
5611 ??? What is wrong with an empty section??? */
5612 rel_hdr->sh_size = rel_hdr->sh_entsize;
5613 deleted -= 1;
5614 }
5615 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5616 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5617 input_section->reloc_count -= deleted;
5618 }
5619
5620 return TRUE;
5621 }
5622
5623 /* Finish up dynamic symbol handling. We set the contents of various
5624 dynamic sections here. */
5625
5626 static bfd_boolean
5627 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
5628 struct bfd_link_info *info,
5629 struct elf_link_hash_entry *h,
5630 Elf_Internal_Sym *sym)
5631 {
5632 struct elf_x86_64_link_hash_table *htab;
5633 const struct elf_x86_64_backend_data *abed;
5634 bfd_boolean use_plt_bnd;
5635 struct elf_x86_64_link_hash_entry *eh;
5636 bfd_boolean local_undefweak;
5637
5638 htab = elf_x86_64_hash_table (info);
5639 if (htab == NULL)
5640 return FALSE;
5641
5642 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5643 section only if there is .plt section. */
5644 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
5645 abed = (use_plt_bnd
5646 ? &elf_x86_64_bnd_arch_bed
5647 : get_elf_x86_64_backend_data (output_bfd));
5648
5649 eh = (struct elf_x86_64_link_hash_entry *) h;
5650
5651 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
5652 resolved undefined weak symbols in executable so that their
5653 references have value 0 at run-time. */
5654 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
5655 eh->has_got_reloc,
5656 eh);
5657
5658 if (h->plt.offset != (bfd_vma) -1)
5659 {
5660 bfd_vma plt_index;
5661 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
5662 bfd_vma plt_plt_insn_end, plt_got_insn_size;
5663 Elf_Internal_Rela rela;
5664 bfd_byte *loc;
5665 asection *plt, *gotplt, *relplt, *resolved_plt;
5666 const struct elf_backend_data *bed;
5667 bfd_vma plt_got_pcrel_offset;
5668
5669 /* When building a static executable, use .iplt, .igot.plt and
5670 .rela.iplt sections for STT_GNU_IFUNC symbols. */
5671 if (htab->elf.splt != NULL)
5672 {
5673 plt = htab->elf.splt;
5674 gotplt = htab->elf.sgotplt;
5675 relplt = htab->elf.srelplt;
5676 }
5677 else
5678 {
5679 plt = htab->elf.iplt;
5680 gotplt = htab->elf.igotplt;
5681 relplt = htab->elf.irelplt;
5682 }
5683
5684 /* This symbol has an entry in the procedure linkage table. Set
5685 it up. */
5686 if ((h->dynindx == -1
5687 && !local_undefweak
5688 && !((h->forced_local || bfd_link_executable (info))
5689 && h->def_regular
5690 && h->type == STT_GNU_IFUNC))
5691 || plt == NULL
5692 || gotplt == NULL
5693 || relplt == NULL)
5694 abort ();
5695
5696 /* Get the index in the procedure linkage table which
5697 corresponds to this symbol. This is the index of this symbol
5698 in all the symbols for which we are making plt entries. The
5699 first entry in the procedure linkage table is reserved.
5700
5701 Get the offset into the .got table of the entry that
5702 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
5703 bytes. The first three are reserved for the dynamic linker.
5704
5705 For static executables, we don't reserve anything. */
5706
5707 if (plt == htab->elf.splt)
5708 {
5709 got_offset = h->plt.offset / abed->plt_entry_size - 1;
5710 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
5711 }
5712 else
5713 {
5714 got_offset = h->plt.offset / abed->plt_entry_size;
5715 got_offset = got_offset * GOT_ENTRY_SIZE;
5716 }
5717
5718 plt_plt_insn_end = abed->plt_plt_insn_end;
5719 plt_plt_offset = abed->plt_plt_offset;
5720 plt_got_insn_size = abed->plt_got_insn_size;
5721 plt_got_offset = abed->plt_got_offset;
5722 if (use_plt_bnd)
5723 {
5724 /* Use the second PLT with BND relocations. */
5725 const bfd_byte *plt_entry, *plt2_entry;
5726
5727 if (eh->has_bnd_reloc)
5728 {
5729 plt_entry = elf_x86_64_bnd_plt_entry;
5730 plt2_entry = elf_x86_64_bnd_plt2_entry;
5731 }
5732 else
5733 {
5734 plt_entry = elf_x86_64_legacy_plt_entry;
5735 plt2_entry = elf_x86_64_legacy_plt2_entry;
5736
5737 /* Subtract 1 since there is no BND prefix. */
5738 plt_plt_insn_end -= 1;
5739 plt_plt_offset -= 1;
5740 plt_got_insn_size -= 1;
5741 plt_got_offset -= 1;
5742 }
5743
5744 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
5745 == sizeof (elf_x86_64_legacy_plt_entry));
5746
5747 /* Fill in the entry in the procedure linkage table. */
5748 memcpy (plt->contents + h->plt.offset,
5749 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
5750 /* Fill in the entry in the second PLT. */
5751 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
5752 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5753
5754 resolved_plt = htab->plt_bnd;
5755 plt_offset = eh->plt_bnd.offset;
5756 }
5757 else
5758 {
5759 /* Fill in the entry in the procedure linkage table. */
5760 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
5761 abed->plt_entry_size);
5762
5763 resolved_plt = plt;
5764 plt_offset = h->plt.offset;
5765 }
5766
5767 /* Insert the relocation positions of the plt section. */
5768
5769 /* Put offset the PC-relative instruction referring to the GOT entry,
5770 subtracting the size of that instruction. */
5771 plt_got_pcrel_offset = (gotplt->output_section->vma
5772 + gotplt->output_offset
5773 + got_offset
5774 - resolved_plt->output_section->vma
5775 - resolved_plt->output_offset
5776 - plt_offset
5777 - plt_got_insn_size);
5778
5779 /* Check PC-relative offset overflow in PLT entry. */
5780 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5781 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5782 output_bfd, h->root.root.string);
5783
5784 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5785 resolved_plt->contents + plt_offset + plt_got_offset);
5786
5787 /* Fill in the entry in the global offset table, initially this
5788 points to the second part of the PLT entry. Leave the entry
5789 as zero for undefined weak symbol in PIE. No PLT relocation
5790 against undefined weak symbol in PIE. */
5791 if (!local_undefweak)
5792 {
5793 bfd_put_64 (output_bfd, (plt->output_section->vma
5794 + plt->output_offset
5795 + h->plt.offset
5796 + abed->plt_lazy_offset),
5797 gotplt->contents + got_offset);
5798
5799 /* Fill in the entry in the .rela.plt section. */
5800 rela.r_offset = (gotplt->output_section->vma
5801 + gotplt->output_offset
5802 + got_offset);
5803 if (h->dynindx == -1
5804 || ((bfd_link_executable (info)
5805 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5806 && h->def_regular
5807 && h->type == STT_GNU_IFUNC))
5808 {
5809 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5810 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5811 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5812 rela.r_addend = (h->root.u.def.value
5813 + h->root.u.def.section->output_section->vma
5814 + h->root.u.def.section->output_offset);
5815 /* R_X86_64_IRELATIVE comes last. */
5816 plt_index = htab->next_irelative_index--;
5817 }
5818 else
5819 {
5820 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5821 rela.r_addend = 0;
5822 plt_index = htab->next_jump_slot_index++;
5823 }
5824
5825 /* Don't fill PLT entry for static executables. */
5826 if (plt == htab->elf.splt)
5827 {
5828 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5829
5830 /* Put relocation index. */
5831 bfd_put_32 (output_bfd, plt_index,
5832 (plt->contents + h->plt.offset
5833 + abed->plt_reloc_offset));
5834
5835 /* Put offset for jmp .PLT0 and check for overflow. We don't
5836 check relocation index for overflow since branch displacement
5837 will overflow first. */
5838 if (plt0_offset > 0x80000000)
5839 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5840 output_bfd, h->root.root.string);
5841 bfd_put_32 (output_bfd, - plt0_offset,
5842 plt->contents + h->plt.offset + plt_plt_offset);
5843 }
5844
5845 bed = get_elf_backend_data (output_bfd);
5846 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5847 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5848 }
5849 }
5850 else if (eh->plt_got.offset != (bfd_vma) -1)
5851 {
5852 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5853 asection *plt, *got;
5854 bfd_boolean got_after_plt;
5855 int32_t got_pcrel_offset;
5856 const bfd_byte *got_plt_entry;
5857
5858 /* Set the entry in the GOT procedure linkage table. */
5859 plt = htab->plt_got;
5860 got = htab->elf.sgot;
5861 got_offset = h->got.offset;
5862
5863 if (got_offset == (bfd_vma) -1
5864 || h->type == STT_GNU_IFUNC
5865 || plt == NULL
5866 || got == NULL)
5867 abort ();
5868
5869 /* Use the second PLT entry template for the GOT PLT since they
5870 are the identical. */
5871 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5872 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5873 if (eh->has_bnd_reloc)
5874 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5875 else
5876 {
5877 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5878
5879 /* Subtract 1 since there is no BND prefix. */
5880 plt_got_insn_size -= 1;
5881 plt_got_offset -= 1;
5882 }
5883
5884 /* Fill in the entry in the GOT procedure linkage table. */
5885 plt_offset = eh->plt_got.offset;
5886 memcpy (plt->contents + plt_offset,
5887 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5888
5889 /* Put offset the PC-relative instruction referring to the GOT
5890 entry, subtracting the size of that instruction. */
5891 got_pcrel_offset = (got->output_section->vma
5892 + got->output_offset
5893 + got_offset
5894 - plt->output_section->vma
5895 - plt->output_offset
5896 - plt_offset
5897 - plt_got_insn_size);
5898
5899 /* Check PC-relative offset overflow in GOT PLT entry. */
5900 got_after_plt = got->output_section->vma > plt->output_section->vma;
5901 if ((got_after_plt && got_pcrel_offset < 0)
5902 || (!got_after_plt && got_pcrel_offset > 0))
5903 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5904 output_bfd, h->root.root.string);
5905
5906 bfd_put_32 (output_bfd, got_pcrel_offset,
5907 plt->contents + plt_offset + plt_got_offset);
5908 }
5909
5910 if (!local_undefweak
5911 && !h->def_regular
5912 && (h->plt.offset != (bfd_vma) -1
5913 || eh->plt_got.offset != (bfd_vma) -1))
5914 {
5915 /* Mark the symbol as undefined, rather than as defined in
5916 the .plt section. Leave the value if there were any
5917 relocations where pointer equality matters (this is a clue
5918 for the dynamic linker, to make function pointer
5919 comparisons work between an application and shared
5920 library), otherwise set it to zero. If a function is only
5921 called from a binary, there is no need to slow down
5922 shared libraries because of that. */
5923 sym->st_shndx = SHN_UNDEF;
5924 if (!h->pointer_equality_needed)
5925 sym->st_value = 0;
5926 }
5927
5928 /* Don't generate dynamic GOT relocation against undefined weak
5929 symbol in executable. */
5930 if (h->got.offset != (bfd_vma) -1
5931 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5932 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE
5933 && !local_undefweak)
5934 {
5935 Elf_Internal_Rela rela;
5936 asection *relgot = htab->elf.srelgot;
5937
5938 /* This symbol has an entry in the global offset table. Set it
5939 up. */
5940 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5941 abort ();
5942
5943 rela.r_offset = (htab->elf.sgot->output_section->vma
5944 + htab->elf.sgot->output_offset
5945 + (h->got.offset &~ (bfd_vma) 1));
5946
5947 /* If this is a static link, or it is a -Bsymbolic link and the
5948 symbol is defined locally or was forced to be local because
5949 of a version file, we just want to emit a RELATIVE reloc.
5950 The entry in the global offset table will already have been
5951 initialized in the relocate_section function. */
5952 if (h->def_regular
5953 && h->type == STT_GNU_IFUNC)
5954 {
5955 if (h->plt.offset == (bfd_vma) -1)
5956 {
5957 /* STT_GNU_IFUNC is referenced without PLT. */
5958 if (htab->elf.splt == NULL)
5959 {
5960 /* use .rel[a].iplt section to store .got relocations
5961 in static executable. */
5962 relgot = htab->elf.irelplt;
5963 }
5964 if (SYMBOL_REFERENCES_LOCAL (info, h))
5965 {
5966 rela.r_info = htab->r_info (0,
5967 R_X86_64_IRELATIVE);
5968 rela.r_addend = (h->root.u.def.value
5969 + h->root.u.def.section->output_section->vma
5970 + h->root.u.def.section->output_offset);
5971 }
5972 else
5973 goto do_glob_dat;
5974 }
5975 else if (bfd_link_pic (info))
5976 {
5977 /* Generate R_X86_64_GLOB_DAT. */
5978 goto do_glob_dat;
5979 }
5980 else
5981 {
5982 asection *plt;
5983
5984 if (!h->pointer_equality_needed)
5985 abort ();
5986
5987 /* For non-shared object, we can't use .got.plt, which
5988 contains the real function addres if we need pointer
5989 equality. We load the GOT entry with the PLT entry. */
5990 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5991 bfd_put_64 (output_bfd, (plt->output_section->vma
5992 + plt->output_offset
5993 + h->plt.offset),
5994 htab->elf.sgot->contents + h->got.offset);
5995 return TRUE;
5996 }
5997 }
5998 else if (bfd_link_pic (info)
5999 && SYMBOL_REFERENCES_LOCAL (info, h))
6000 {
6001 if (!h->def_regular)
6002 return FALSE;
6003 BFD_ASSERT((h->got.offset & 1) != 0);
6004 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
6005 rela.r_addend = (h->root.u.def.value
6006 + h->root.u.def.section->output_section->vma
6007 + h->root.u.def.section->output_offset);
6008 }
6009 else
6010 {
6011 BFD_ASSERT((h->got.offset & 1) == 0);
6012 do_glob_dat:
6013 bfd_put_64 (output_bfd, (bfd_vma) 0,
6014 htab->elf.sgot->contents + h->got.offset);
6015 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
6016 rela.r_addend = 0;
6017 }
6018
6019 elf_append_rela (output_bfd, relgot, &rela);
6020 }
6021
6022 if (h->needs_copy)
6023 {
6024 Elf_Internal_Rela rela;
6025
6026 /* This symbol needs a copy reloc. Set it up. */
6027
6028 if (h->dynindx == -1
6029 || (h->root.type != bfd_link_hash_defined
6030 && h->root.type != bfd_link_hash_defweak)
6031 || htab->srelbss == NULL)
6032 abort ();
6033
6034 rela.r_offset = (h->root.u.def.value
6035 + h->root.u.def.section->output_section->vma
6036 + h->root.u.def.section->output_offset);
6037 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
6038 rela.r_addend = 0;
6039 elf_append_rela (output_bfd, htab->srelbss, &rela);
6040 }
6041
6042 return TRUE;
6043 }
6044
6045 /* Finish up local dynamic symbol handling. We set the contents of
6046 various dynamic sections here. */
6047
6048 static bfd_boolean
6049 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
6050 {
6051 struct elf_link_hash_entry *h
6052 = (struct elf_link_hash_entry *) *slot;
6053 struct bfd_link_info *info
6054 = (struct bfd_link_info *) inf;
6055
6056 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
6057 info, h, NULL);
6058 }
6059
6060 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
6061 here since undefined weak symbol may not be dynamic and may not be
6062 called for elf_x86_64_finish_dynamic_symbol. */
6063
6064 static bfd_boolean
6065 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
6066 void *inf)
6067 {
6068 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
6069 struct bfd_link_info *info = (struct bfd_link_info *) inf;
6070
6071 if (h->root.type != bfd_link_hash_undefweak
6072 || h->dynindx != -1)
6073 return TRUE;
6074
6075 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
6076 info, h, NULL);
6077 }
6078
6079 /* Used to decide how to sort relocs in an optimal manner for the
6080 dynamic linker, before writing them out. */
6081
6082 static enum elf_reloc_type_class
6083 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
6084 const asection *rel_sec ATTRIBUTE_UNUSED,
6085 const Elf_Internal_Rela *rela)
6086 {
6087 bfd *abfd = info->output_bfd;
6088 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6089 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
6090
6091 if (htab->elf.dynsym != NULL
6092 && htab->elf.dynsym->contents != NULL)
6093 {
6094 /* Check relocation against STT_GNU_IFUNC symbol if there are
6095 dynamic symbols. */
6096 unsigned long r_symndx = htab->r_sym (rela->r_info);
6097 if (r_symndx != STN_UNDEF)
6098 {
6099 Elf_Internal_Sym sym;
6100 if (!bed->s->swap_symbol_in (abfd,
6101 (htab->elf.dynsym->contents
6102 + r_symndx * bed->s->sizeof_sym),
6103 0, &sym))
6104 abort ();
6105
6106 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
6107 return reloc_class_ifunc;
6108 }
6109 }
6110
6111 switch ((int) ELF32_R_TYPE (rela->r_info))
6112 {
6113 case R_X86_64_IRELATIVE:
6114 return reloc_class_ifunc;
6115 case R_X86_64_RELATIVE:
6116 case R_X86_64_RELATIVE64:
6117 return reloc_class_relative;
6118 case R_X86_64_JUMP_SLOT:
6119 return reloc_class_plt;
6120 case R_X86_64_COPY:
6121 return reloc_class_copy;
6122 default:
6123 return reloc_class_normal;
6124 }
6125 }
6126
6127 /* Finish up the dynamic sections. */
6128
6129 static bfd_boolean
6130 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
6131 struct bfd_link_info *info)
6132 {
6133 struct elf_x86_64_link_hash_table *htab;
6134 bfd *dynobj;
6135 asection *sdyn;
6136 const struct elf_x86_64_backend_data *abed;
6137
6138 htab = elf_x86_64_hash_table (info);
6139 if (htab == NULL)
6140 return FALSE;
6141
6142 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
6143 section only if there is .plt section. */
6144 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
6145 ? &elf_x86_64_bnd_arch_bed
6146 : get_elf_x86_64_backend_data (output_bfd));
6147
6148 dynobj = htab->elf.dynobj;
6149 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6150
6151 if (htab->elf.dynamic_sections_created)
6152 {
6153 bfd_byte *dyncon, *dynconend;
6154 const struct elf_backend_data *bed;
6155 bfd_size_type sizeof_dyn;
6156
6157 if (sdyn == NULL || htab->elf.sgot == NULL)
6158 abort ();
6159
6160 bed = get_elf_backend_data (dynobj);
6161 sizeof_dyn = bed->s->sizeof_dyn;
6162 dyncon = sdyn->contents;
6163 dynconend = sdyn->contents + sdyn->size;
6164 for (; dyncon < dynconend; dyncon += sizeof_dyn)
6165 {
6166 Elf_Internal_Dyn dyn;
6167 asection *s;
6168
6169 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
6170
6171 switch (dyn.d_tag)
6172 {
6173 default:
6174 continue;
6175
6176 case DT_PLTGOT:
6177 s = htab->elf.sgotplt;
6178 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6179 break;
6180
6181 case DT_JMPREL:
6182 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
6183 break;
6184
6185 case DT_PLTRELSZ:
6186 s = htab->elf.srelplt->output_section;
6187 dyn.d_un.d_val = s->size;
6188 break;
6189
6190 case DT_RELASZ:
6191 /* The procedure linkage table relocs (DT_JMPREL) should
6192 not be included in the overall relocs (DT_RELA).
6193 Therefore, we override the DT_RELASZ entry here to
6194 make it not include the JMPREL relocs. Since the
6195 linker script arranges for .rela.plt to follow all
6196 other relocation sections, we don't have to worry
6197 about changing the DT_RELA entry. */
6198 if (htab->elf.srelplt != NULL)
6199 {
6200 s = htab->elf.srelplt->output_section;
6201 dyn.d_un.d_val -= s->size;
6202 }
6203 break;
6204
6205 case DT_TLSDESC_PLT:
6206 s = htab->elf.splt;
6207 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6208 + htab->tlsdesc_plt;
6209 break;
6210
6211 case DT_TLSDESC_GOT:
6212 s = htab->elf.sgot;
6213 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6214 + htab->tlsdesc_got;
6215 break;
6216 }
6217
6218 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
6219 }
6220
6221 /* Fill in the special first entry in the procedure linkage table. */
6222 if (htab->elf.splt && htab->elf.splt->size > 0)
6223 {
6224 /* Fill in the first entry in the procedure linkage table. */
6225 memcpy (htab->elf.splt->contents,
6226 abed->plt0_entry, abed->plt_entry_size);
6227 /* Add offset for pushq GOT+8(%rip), since the instruction
6228 uses 6 bytes subtract this value. */
6229 bfd_put_32 (output_bfd,
6230 (htab->elf.sgotplt->output_section->vma
6231 + htab->elf.sgotplt->output_offset
6232 + 8
6233 - htab->elf.splt->output_section->vma
6234 - htab->elf.splt->output_offset
6235 - 6),
6236 htab->elf.splt->contents + abed->plt0_got1_offset);
6237 /* Add offset for the PC-relative instruction accessing GOT+16,
6238 subtracting the offset to the end of that instruction. */
6239 bfd_put_32 (output_bfd,
6240 (htab->elf.sgotplt->output_section->vma
6241 + htab->elf.sgotplt->output_offset
6242 + 16
6243 - htab->elf.splt->output_section->vma
6244 - htab->elf.splt->output_offset
6245 - abed->plt0_got2_insn_end),
6246 htab->elf.splt->contents + abed->plt0_got2_offset);
6247
6248 elf_section_data (htab->elf.splt->output_section)
6249 ->this_hdr.sh_entsize = abed->plt_entry_size;
6250
6251 if (htab->tlsdesc_plt)
6252 {
6253 bfd_put_64 (output_bfd, (bfd_vma) 0,
6254 htab->elf.sgot->contents + htab->tlsdesc_got);
6255
6256 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
6257 abed->plt0_entry, abed->plt_entry_size);
6258
6259 /* Add offset for pushq GOT+8(%rip), since the
6260 instruction uses 6 bytes subtract this value. */
6261 bfd_put_32 (output_bfd,
6262 (htab->elf.sgotplt->output_section->vma
6263 + htab->elf.sgotplt->output_offset
6264 + 8
6265 - htab->elf.splt->output_section->vma
6266 - htab->elf.splt->output_offset
6267 - htab->tlsdesc_plt
6268 - 6),
6269 htab->elf.splt->contents
6270 + htab->tlsdesc_plt + abed->plt0_got1_offset);
6271 /* Add offset for the PC-relative instruction accessing GOT+TDG,
6272 where TGD stands for htab->tlsdesc_got, subtracting the offset
6273 to the end of that instruction. */
6274 bfd_put_32 (output_bfd,
6275 (htab->elf.sgot->output_section->vma
6276 + htab->elf.sgot->output_offset
6277 + htab->tlsdesc_got
6278 - htab->elf.splt->output_section->vma
6279 - htab->elf.splt->output_offset
6280 - htab->tlsdesc_plt
6281 - abed->plt0_got2_insn_end),
6282 htab->elf.splt->contents
6283 + htab->tlsdesc_plt + abed->plt0_got2_offset);
6284 }
6285 }
6286 }
6287
6288 if (htab->plt_bnd != NULL)
6289 elf_section_data (htab->plt_bnd->output_section)
6290 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
6291
6292 if (htab->elf.sgotplt)
6293 {
6294 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
6295 {
6296 _bfd_error_handler
6297 (_("discarded output section: `%A'"), htab->elf.sgotplt);
6298 return FALSE;
6299 }
6300
6301 /* Fill in the first three entries in the global offset table. */
6302 if (htab->elf.sgotplt->size > 0)
6303 {
6304 /* Set the first entry in the global offset table to the address of
6305 the dynamic section. */
6306 if (sdyn == NULL)
6307 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
6308 else
6309 bfd_put_64 (output_bfd,
6310 sdyn->output_section->vma + sdyn->output_offset,
6311 htab->elf.sgotplt->contents);
6312 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6313 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
6314 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
6315 }
6316
6317 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
6318 GOT_ENTRY_SIZE;
6319 }
6320
6321 /* Adjust .eh_frame for .plt section. */
6322 if (htab->plt_eh_frame != NULL
6323 && htab->plt_eh_frame->contents != NULL)
6324 {
6325 if (htab->elf.splt != NULL
6326 && htab->elf.splt->size != 0
6327 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
6328 && htab->elf.splt->output_section != NULL
6329 && htab->plt_eh_frame->output_section != NULL)
6330 {
6331 bfd_vma plt_start = htab->elf.splt->output_section->vma;
6332 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
6333 + htab->plt_eh_frame->output_offset
6334 + PLT_FDE_START_OFFSET;
6335 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
6336 htab->plt_eh_frame->contents
6337 + PLT_FDE_START_OFFSET);
6338 }
6339 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
6340 {
6341 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
6342 htab->plt_eh_frame,
6343 htab->plt_eh_frame->contents))
6344 return FALSE;
6345 }
6346 }
6347
6348 if (htab->elf.sgot && htab->elf.sgot->size > 0)
6349 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
6350 = GOT_ENTRY_SIZE;
6351
6352 /* Fill PLT entries for undefined weak symbols in PIE. */
6353 if (bfd_link_pie (info))
6354 bfd_hash_traverse (&info->hash->table,
6355 elf_x86_64_pie_finish_undefweak_symbol,
6356 info);
6357
6358 return TRUE;
6359 }
6360
6361 /* Fill PLT/GOT entries and allocate dynamic relocations for local
6362 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
6363 It has to be done before elf_link_sort_relocs is called so that
6364 dynamic relocations are properly sorted. */
6365
6366 static bfd_boolean
6367 elf_x86_64_output_arch_local_syms
6368 (bfd *output_bfd ATTRIBUTE_UNUSED,
6369 struct bfd_link_info *info,
6370 void *flaginfo ATTRIBUTE_UNUSED,
6371 int (*func) (void *, const char *,
6372 Elf_Internal_Sym *,
6373 asection *,
6374 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
6375 {
6376 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
6377 if (htab == NULL)
6378 return FALSE;
6379
6380 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
6381 htab_traverse (htab->loc_hash_table,
6382 elf_x86_64_finish_local_dynamic_symbol,
6383 info);
6384
6385 return TRUE;
6386 }
6387
6388 /* Return an array of PLT entry symbol values. */
6389
6390 static bfd_vma *
6391 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
6392 asection *relplt)
6393 {
6394 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
6395 arelent *p;
6396 long count, i;
6397 bfd_vma *plt_sym_val;
6398 bfd_vma plt_offset;
6399 bfd_byte *plt_contents;
6400 const struct elf_x86_64_backend_data *bed;
6401 Elf_Internal_Shdr *hdr;
6402 asection *plt_bnd;
6403
6404 /* Get the .plt section contents. PLT passed down may point to the
6405 .plt.bnd section. Make sure that PLT always points to the .plt
6406 section. */
6407 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
6408 if (plt_bnd)
6409 {
6410 if (plt != plt_bnd)
6411 abort ();
6412 plt = bfd_get_section_by_name (abfd, ".plt");
6413 if (plt == NULL)
6414 abort ();
6415 bed = &elf_x86_64_bnd_arch_bed;
6416 }
6417 else
6418 bed = get_elf_x86_64_backend_data (abfd);
6419
6420 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
6421 if (plt_contents == NULL)
6422 return NULL;
6423 if (!bfd_get_section_contents (abfd, (asection *) plt,
6424 plt_contents, 0, plt->size))
6425 {
6426 bad_return:
6427 free (plt_contents);
6428 return NULL;
6429 }
6430
6431 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
6432 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
6433 goto bad_return;
6434
6435 hdr = &elf_section_data (relplt)->this_hdr;
6436 count = relplt->size / hdr->sh_entsize;
6437
6438 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
6439 if (plt_sym_val == NULL)
6440 goto bad_return;
6441
6442 for (i = 0; i < count; i++)
6443 plt_sym_val[i] = -1;
6444
6445 plt_offset = bed->plt_entry_size;
6446 p = relplt->relocation;
6447 for (i = 0; i < count; i++, p++)
6448 {
6449 long reloc_index;
6450
6451 /* Skip unknown relocation. */
6452 if (p->howto == NULL)
6453 continue;
6454
6455 if (p->howto->type != R_X86_64_JUMP_SLOT
6456 && p->howto->type != R_X86_64_IRELATIVE)
6457 continue;
6458
6459 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
6460 + bed->plt_reloc_offset));
6461 if (reloc_index < count)
6462 {
6463 if (plt_bnd)
6464 {
6465 /* This is the index in .plt section. */
6466 long plt_index = plt_offset / bed->plt_entry_size;
6467 /* Store VMA + the offset in .plt.bnd section. */
6468 plt_sym_val[reloc_index] =
6469 (plt_bnd->vma
6470 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
6471 }
6472 else
6473 plt_sym_val[reloc_index] = plt->vma + plt_offset;
6474 }
6475 plt_offset += bed->plt_entry_size;
6476
6477 /* PR binutils/18437: Skip extra relocations in the .rela.plt
6478 section. */
6479 if (plt_offset >= plt->size)
6480 break;
6481 }
6482
6483 free (plt_contents);
6484
6485 return plt_sym_val;
6486 }
6487
6488 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
6489 support. */
6490
6491 static long
6492 elf_x86_64_get_synthetic_symtab (bfd *abfd,
6493 long symcount,
6494 asymbol **syms,
6495 long dynsymcount,
6496 asymbol **dynsyms,
6497 asymbol **ret)
6498 {
6499 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
6500 as PLT if it exists. */
6501 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
6502 if (plt == NULL)
6503 plt = bfd_get_section_by_name (abfd, ".plt");
6504 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
6505 dynsymcount, dynsyms, ret,
6506 plt,
6507 elf_x86_64_get_plt_sym_val);
6508 }
6509
6510 /* Handle an x86-64 specific section when reading an object file. This
6511 is called when elfcode.h finds a section with an unknown type. */
6512
6513 static bfd_boolean
6514 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
6515 const char *name, int shindex)
6516 {
6517 if (hdr->sh_type != SHT_X86_64_UNWIND)
6518 return FALSE;
6519
6520 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6521 return FALSE;
6522
6523 return TRUE;
6524 }
6525
6526 /* Hook called by the linker routine which adds symbols from an object
6527 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
6528 of .bss. */
6529
6530 static bfd_boolean
6531 elf_x86_64_add_symbol_hook (bfd *abfd,
6532 struct bfd_link_info *info ATTRIBUTE_UNUSED,
6533 Elf_Internal_Sym *sym,
6534 const char **namep ATTRIBUTE_UNUSED,
6535 flagword *flagsp ATTRIBUTE_UNUSED,
6536 asection **secp,
6537 bfd_vma *valp)
6538 {
6539 asection *lcomm;
6540
6541 switch (sym->st_shndx)
6542 {
6543 case SHN_X86_64_LCOMMON:
6544 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
6545 if (lcomm == NULL)
6546 {
6547 lcomm = bfd_make_section_with_flags (abfd,
6548 "LARGE_COMMON",
6549 (SEC_ALLOC
6550 | SEC_IS_COMMON
6551 | SEC_LINKER_CREATED));
6552 if (lcomm == NULL)
6553 return FALSE;
6554 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
6555 }
6556 *secp = lcomm;
6557 *valp = sym->st_size;
6558 return TRUE;
6559 }
6560
6561 return TRUE;
6562 }
6563
6564
6565 /* Given a BFD section, try to locate the corresponding ELF section
6566 index. */
6567
6568 static bfd_boolean
6569 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
6570 asection *sec, int *index_return)
6571 {
6572 if (sec == &_bfd_elf_large_com_section)
6573 {
6574 *index_return = SHN_X86_64_LCOMMON;
6575 return TRUE;
6576 }
6577 return FALSE;
6578 }
6579
6580 /* Process a symbol. */
6581
6582 static void
6583 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
6584 asymbol *asym)
6585 {
6586 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
6587
6588 switch (elfsym->internal_elf_sym.st_shndx)
6589 {
6590 case SHN_X86_64_LCOMMON:
6591 asym->section = &_bfd_elf_large_com_section;
6592 asym->value = elfsym->internal_elf_sym.st_size;
6593 /* Common symbol doesn't set BSF_GLOBAL. */
6594 asym->flags &= ~BSF_GLOBAL;
6595 break;
6596 }
6597 }
6598
6599 static bfd_boolean
6600 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
6601 {
6602 return (sym->st_shndx == SHN_COMMON
6603 || sym->st_shndx == SHN_X86_64_LCOMMON);
6604 }
6605
6606 static unsigned int
6607 elf_x86_64_common_section_index (asection *sec)
6608 {
6609 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6610 return SHN_COMMON;
6611 else
6612 return SHN_X86_64_LCOMMON;
6613 }
6614
6615 static asection *
6616 elf_x86_64_common_section (asection *sec)
6617 {
6618 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6619 return bfd_com_section_ptr;
6620 else
6621 return &_bfd_elf_large_com_section;
6622 }
6623
6624 static bfd_boolean
6625 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
6626 const Elf_Internal_Sym *sym,
6627 asection **psec,
6628 bfd_boolean newdef,
6629 bfd_boolean olddef,
6630 bfd *oldbfd,
6631 const asection *oldsec)
6632 {
6633 /* A normal common symbol and a large common symbol result in a
6634 normal common symbol. We turn the large common symbol into a
6635 normal one. */
6636 if (!olddef
6637 && h->root.type == bfd_link_hash_common
6638 && !newdef
6639 && bfd_is_com_section (*psec)
6640 && oldsec != *psec)
6641 {
6642 if (sym->st_shndx == SHN_COMMON
6643 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
6644 {
6645 h->root.u.c.p->section
6646 = bfd_make_section_old_way (oldbfd, "COMMON");
6647 h->root.u.c.p->section->flags = SEC_ALLOC;
6648 }
6649 else if (sym->st_shndx == SHN_X86_64_LCOMMON
6650 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
6651 *psec = bfd_com_section_ptr;
6652 }
6653
6654 return TRUE;
6655 }
6656
6657 static int
6658 elf_x86_64_additional_program_headers (bfd *abfd,
6659 struct bfd_link_info *info ATTRIBUTE_UNUSED)
6660 {
6661 asection *s;
6662 int count = 0;
6663
6664 /* Check to see if we need a large readonly segment. */
6665 s = bfd_get_section_by_name (abfd, ".lrodata");
6666 if (s && (s->flags & SEC_LOAD))
6667 count++;
6668
6669 /* Check to see if we need a large data segment. Since .lbss sections
6670 is placed right after the .bss section, there should be no need for
6671 a large data segment just because of .lbss. */
6672 s = bfd_get_section_by_name (abfd, ".ldata");
6673 if (s && (s->flags & SEC_LOAD))
6674 count++;
6675
6676 return count;
6677 }
6678
6679 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
6680
6681 static bfd_boolean
6682 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
6683 {
6684 if (h->plt.offset != (bfd_vma) -1
6685 && !h->def_regular
6686 && !h->pointer_equality_needed)
6687 return FALSE;
6688
6689 return _bfd_elf_hash_symbol (h);
6690 }
6691
6692 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
6693
6694 static bfd_boolean
6695 elf_x86_64_relocs_compatible (const bfd_target *input,
6696 const bfd_target *output)
6697 {
6698 return ((xvec_get_elf_backend_data (input)->s->elfclass
6699 == xvec_get_elf_backend_data (output)->s->elfclass)
6700 && _bfd_elf_relocs_compatible (input, output));
6701 }
6702
6703 static const struct bfd_elf_special_section
6704 elf_x86_64_special_sections[]=
6705 {
6706 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6707 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6708 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
6709 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6710 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6711 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6712 { NULL, 0, 0, 0, 0 }
6713 };
6714
6715 #define TARGET_LITTLE_SYM x86_64_elf64_vec
6716 #define TARGET_LITTLE_NAME "elf64-x86-64"
6717 #define ELF_ARCH bfd_arch_i386
6718 #define ELF_TARGET_ID X86_64_ELF_DATA
6719 #define ELF_MACHINE_CODE EM_X86_64
6720 #define ELF_MAXPAGESIZE 0x200000
6721 #define ELF_MINPAGESIZE 0x1000
6722 #define ELF_COMMONPAGESIZE 0x1000
6723
6724 #define elf_backend_can_gc_sections 1
6725 #define elf_backend_can_refcount 1
6726 #define elf_backend_want_got_plt 1
6727 #define elf_backend_plt_readonly 1
6728 #define elf_backend_want_plt_sym 0
6729 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
6730 #define elf_backend_rela_normal 1
6731 #define elf_backend_plt_alignment 4
6732 #define elf_backend_extern_protected_data 1
6733 #define elf_backend_caches_rawsize 1
6734
6735 #define elf_info_to_howto elf_x86_64_info_to_howto
6736
6737 #define bfd_elf64_bfd_link_hash_table_create \
6738 elf_x86_64_link_hash_table_create
6739 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
6740 #define bfd_elf64_bfd_reloc_name_lookup \
6741 elf_x86_64_reloc_name_lookup
6742
6743 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
6744 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
6745 #define elf_backend_check_relocs elf_x86_64_check_relocs
6746 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
6747 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
6748 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
6749 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
6750 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
6751 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
6752 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
6753 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
6754 #ifdef CORE_HEADER
6755 #define elf_backend_write_core_note elf_x86_64_write_core_note
6756 #endif
6757 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
6758 #define elf_backend_relocate_section elf_x86_64_relocate_section
6759 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
6760 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
6761 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
6762 #define elf_backend_object_p elf64_x86_64_elf_object_p
6763 #define bfd_elf64_mkobject elf_x86_64_mkobject
6764 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
6765
6766 #define elf_backend_section_from_shdr \
6767 elf_x86_64_section_from_shdr
6768
6769 #define elf_backend_section_from_bfd_section \
6770 elf_x86_64_elf_section_from_bfd_section
6771 #define elf_backend_add_symbol_hook \
6772 elf_x86_64_add_symbol_hook
6773 #define elf_backend_symbol_processing \
6774 elf_x86_64_symbol_processing
6775 #define elf_backend_common_section_index \
6776 elf_x86_64_common_section_index
6777 #define elf_backend_common_section \
6778 elf_x86_64_common_section
6779 #define elf_backend_common_definition \
6780 elf_x86_64_common_definition
6781 #define elf_backend_merge_symbol \
6782 elf_x86_64_merge_symbol
6783 #define elf_backend_special_sections \
6784 elf_x86_64_special_sections
6785 #define elf_backend_additional_program_headers \
6786 elf_x86_64_additional_program_headers
6787 #define elf_backend_hash_symbol \
6788 elf_x86_64_hash_symbol
6789 #define elf_backend_omit_section_dynsym \
6790 ((bfd_boolean (*) (bfd *, struct bfd_link_info *, asection *)) bfd_true)
6791 #define elf_backend_fixup_symbol \
6792 elf_x86_64_fixup_symbol
6793
6794 #include "elf64-target.h"
6795
6796 /* CloudABI support. */
6797
6798 #undef TARGET_LITTLE_SYM
6799 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
6800 #undef TARGET_LITTLE_NAME
6801 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
6802
6803 #undef ELF_OSABI
6804 #define ELF_OSABI ELFOSABI_CLOUDABI
6805
6806 #undef elf64_bed
6807 #define elf64_bed elf64_x86_64_cloudabi_bed
6808
6809 #include "elf64-target.h"
6810
6811 /* FreeBSD support. */
6812
6813 #undef TARGET_LITTLE_SYM
6814 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
6815 #undef TARGET_LITTLE_NAME
6816 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
6817
6818 #undef ELF_OSABI
6819 #define ELF_OSABI ELFOSABI_FREEBSD
6820
6821 #undef elf64_bed
6822 #define elf64_bed elf64_x86_64_fbsd_bed
6823
6824 #include "elf64-target.h"
6825
6826 /* Solaris 2 support. */
6827
6828 #undef TARGET_LITTLE_SYM
6829 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
6830 #undef TARGET_LITTLE_NAME
6831 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
6832
6833 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
6834 objects won't be recognized. */
6835 #undef ELF_OSABI
6836
6837 #undef elf64_bed
6838 #define elf64_bed elf64_x86_64_sol2_bed
6839
6840 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6841 boundary. */
6842 #undef elf_backend_static_tls_alignment
6843 #define elf_backend_static_tls_alignment 16
6844
6845 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6846
6847 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6848 File, p.63. */
6849 #undef elf_backend_want_plt_sym
6850 #define elf_backend_want_plt_sym 1
6851
6852 #undef elf_backend_strtab_flags
6853 #define elf_backend_strtab_flags SHF_STRINGS
6854
6855 static bfd_boolean
6856 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
6857 bfd *obfd ATTRIBUTE_UNUSED,
6858 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
6859 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
6860 {
6861 /* PR 19938: FIXME: Need to add code for setting the sh_info
6862 and sh_link fields of Solaris specific section types. */
6863 return FALSE;
6864 }
6865
6866 #undef elf_backend_copy_special_section_fields
6867 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
6868
6869 #include "elf64-target.h"
6870
6871 /* Native Client support. */
6872
6873 static bfd_boolean
6874 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6875 {
6876 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6877 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6878 return TRUE;
6879 }
6880
6881 #undef TARGET_LITTLE_SYM
6882 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6883 #undef TARGET_LITTLE_NAME
6884 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6885 #undef elf64_bed
6886 #define elf64_bed elf64_x86_64_nacl_bed
6887
6888 #undef ELF_MAXPAGESIZE
6889 #undef ELF_MINPAGESIZE
6890 #undef ELF_COMMONPAGESIZE
6891 #define ELF_MAXPAGESIZE 0x10000
6892 #define ELF_MINPAGESIZE 0x10000
6893 #define ELF_COMMONPAGESIZE 0x10000
6894
6895 /* Restore defaults. */
6896 #undef ELF_OSABI
6897 #undef elf_backend_static_tls_alignment
6898 #undef elf_backend_want_plt_sym
6899 #define elf_backend_want_plt_sym 0
6900 #undef elf_backend_strtab_flags
6901 #undef elf_backend_copy_special_section_fields
6902
6903 /* NaCl uses substantially different PLT entries for the same effects. */
6904
6905 #undef elf_backend_plt_alignment
6906 #define elf_backend_plt_alignment 5
6907 #define NACL_PLT_ENTRY_SIZE 64
6908 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6909
6910 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6911 {
6912 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6913 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6914 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6915 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6916 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6917
6918 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6919 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6920
6921 /* 32 bytes of nop to pad out to the standard size. */
6922 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6923 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6924 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6925 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6926 0x66, /* excess data16 prefix */
6927 0x90 /* nop */
6928 };
6929
6930 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6931 {
6932 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6933 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6934 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6935 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6936
6937 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6938 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6939 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6940
6941 /* Lazy GOT entries point here (32-byte aligned). */
6942 0x68, /* pushq immediate */
6943 0, 0, 0, 0, /* replaced with index into relocation table. */
6944 0xe9, /* jmp relative */
6945 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6946
6947 /* 22 bytes of nop to pad out to the standard size. */
6948 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6949 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6950 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6951 };
6952
6953 /* .eh_frame covering the .plt section. */
6954
6955 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6956 {
6957 #if (PLT_CIE_LENGTH != 20 \
6958 || PLT_FDE_LENGTH != 36 \
6959 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6960 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6961 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6962 #endif
6963 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6964 0, 0, 0, 0, /* CIE ID */
6965 1, /* CIE version */
6966 'z', 'R', 0, /* Augmentation string */
6967 1, /* Code alignment factor */
6968 0x78, /* Data alignment factor */
6969 16, /* Return address column */
6970 1, /* Augmentation size */
6971 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6972 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6973 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6974 DW_CFA_nop, DW_CFA_nop,
6975
6976 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6977 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6978 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6979 0, 0, 0, 0, /* .plt size goes here */
6980 0, /* Augmentation size */
6981 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6982 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6983 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6984 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6985 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6986 13, /* Block length */
6987 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6988 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6989 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6990 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6991 DW_CFA_nop, DW_CFA_nop
6992 };
6993
6994 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6995 {
6996 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6997 elf_x86_64_nacl_plt_entry, /* plt_entry */
6998 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6999 2, /* plt0_got1_offset */
7000 9, /* plt0_got2_offset */
7001 13, /* plt0_got2_insn_end */
7002 3, /* plt_got_offset */
7003 33, /* plt_reloc_offset */
7004 38, /* plt_plt_offset */
7005 7, /* plt_got_insn_size */
7006 42, /* plt_plt_insn_end */
7007 32, /* plt_lazy_offset */
7008 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
7009 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
7010 };
7011
7012 #undef elf_backend_arch_data
7013 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
7014
7015 #undef elf_backend_object_p
7016 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
7017 #undef elf_backend_modify_segment_map
7018 #define elf_backend_modify_segment_map nacl_modify_segment_map
7019 #undef elf_backend_modify_program_headers
7020 #define elf_backend_modify_program_headers nacl_modify_program_headers
7021 #undef elf_backend_final_write_processing
7022 #define elf_backend_final_write_processing nacl_final_write_processing
7023
7024 #include "elf64-target.h"
7025
7026 /* Native Client x32 support. */
7027
7028 static bfd_boolean
7029 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
7030 {
7031 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
7032 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
7033 return TRUE;
7034 }
7035
7036 #undef TARGET_LITTLE_SYM
7037 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
7038 #undef TARGET_LITTLE_NAME
7039 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
7040 #undef elf32_bed
7041 #define elf32_bed elf32_x86_64_nacl_bed
7042
7043 #define bfd_elf32_bfd_link_hash_table_create \
7044 elf_x86_64_link_hash_table_create
7045 #define bfd_elf32_bfd_reloc_type_lookup \
7046 elf_x86_64_reloc_type_lookup
7047 #define bfd_elf32_bfd_reloc_name_lookup \
7048 elf_x86_64_reloc_name_lookup
7049 #define bfd_elf32_mkobject \
7050 elf_x86_64_mkobject
7051 #define bfd_elf32_get_synthetic_symtab \
7052 elf_x86_64_get_synthetic_symtab
7053
7054 #undef elf_backend_object_p
7055 #define elf_backend_object_p \
7056 elf32_x86_64_nacl_elf_object_p
7057
7058 #undef elf_backend_bfd_from_remote_memory
7059 #define elf_backend_bfd_from_remote_memory \
7060 _bfd_elf32_bfd_from_remote_memory
7061
7062 #undef elf_backend_size_info
7063 #define elf_backend_size_info \
7064 _bfd_elf32_size_info
7065
7066 #include "elf32-target.h"
7067
7068 /* Restore defaults. */
7069 #undef elf_backend_object_p
7070 #define elf_backend_object_p elf64_x86_64_elf_object_p
7071 #undef elf_backend_bfd_from_remote_memory
7072 #undef elf_backend_size_info
7073 #undef elf_backend_modify_segment_map
7074 #undef elf_backend_modify_program_headers
7075 #undef elf_backend_final_write_processing
7076
7077 /* Intel L1OM support. */
7078
7079 static bfd_boolean
7080 elf64_l1om_elf_object_p (bfd *abfd)
7081 {
7082 /* Set the right machine number for an L1OM elf64 file. */
7083 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
7084 return TRUE;
7085 }
7086
7087 #undef TARGET_LITTLE_SYM
7088 #define TARGET_LITTLE_SYM l1om_elf64_vec
7089 #undef TARGET_LITTLE_NAME
7090 #define TARGET_LITTLE_NAME "elf64-l1om"
7091 #undef ELF_ARCH
7092 #define ELF_ARCH bfd_arch_l1om
7093
7094 #undef ELF_MACHINE_CODE
7095 #define ELF_MACHINE_CODE EM_L1OM
7096
7097 #undef ELF_OSABI
7098
7099 #undef elf64_bed
7100 #define elf64_bed elf64_l1om_bed
7101
7102 #undef elf_backend_object_p
7103 #define elf_backend_object_p elf64_l1om_elf_object_p
7104
7105 /* Restore defaults. */
7106 #undef ELF_MAXPAGESIZE
7107 #undef ELF_MINPAGESIZE
7108 #undef ELF_COMMONPAGESIZE
7109 #define ELF_MAXPAGESIZE 0x200000
7110 #define ELF_MINPAGESIZE 0x1000
7111 #define ELF_COMMONPAGESIZE 0x1000
7112 #undef elf_backend_plt_alignment
7113 #define elf_backend_plt_alignment 4
7114 #undef elf_backend_arch_data
7115 #define elf_backend_arch_data &elf_x86_64_arch_bed
7116
7117 #include "elf64-target.h"
7118
7119 /* FreeBSD L1OM support. */
7120
7121 #undef TARGET_LITTLE_SYM
7122 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
7123 #undef TARGET_LITTLE_NAME
7124 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
7125
7126 #undef ELF_OSABI
7127 #define ELF_OSABI ELFOSABI_FREEBSD
7128
7129 #undef elf64_bed
7130 #define elf64_bed elf64_l1om_fbsd_bed
7131
7132 #include "elf64-target.h"
7133
7134 /* Intel K1OM support. */
7135
7136 static bfd_boolean
7137 elf64_k1om_elf_object_p (bfd *abfd)
7138 {
7139 /* Set the right machine number for an K1OM elf64 file. */
7140 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
7141 return TRUE;
7142 }
7143
7144 #undef TARGET_LITTLE_SYM
7145 #define TARGET_LITTLE_SYM k1om_elf64_vec
7146 #undef TARGET_LITTLE_NAME
7147 #define TARGET_LITTLE_NAME "elf64-k1om"
7148 #undef ELF_ARCH
7149 #define ELF_ARCH bfd_arch_k1om
7150
7151 #undef ELF_MACHINE_CODE
7152 #define ELF_MACHINE_CODE EM_K1OM
7153
7154 #undef ELF_OSABI
7155
7156 #undef elf64_bed
7157 #define elf64_bed elf64_k1om_bed
7158
7159 #undef elf_backend_object_p
7160 #define elf_backend_object_p elf64_k1om_elf_object_p
7161
7162 #undef elf_backend_static_tls_alignment
7163
7164 #undef elf_backend_want_plt_sym
7165 #define elf_backend_want_plt_sym 0
7166
7167 #include "elf64-target.h"
7168
7169 /* FreeBSD K1OM support. */
7170
7171 #undef TARGET_LITTLE_SYM
7172 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
7173 #undef TARGET_LITTLE_NAME
7174 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
7175
7176 #undef ELF_OSABI
7177 #define ELF_OSABI ELFOSABI_FREEBSD
7178
7179 #undef elf64_bed
7180 #define elf64_bed elf64_k1om_fbsd_bed
7181
7182 #include "elf64-target.h"
7183
7184 /* 32bit x86-64 support. */
7185
7186 #undef TARGET_LITTLE_SYM
7187 #define TARGET_LITTLE_SYM x86_64_elf32_vec
7188 #undef TARGET_LITTLE_NAME
7189 #define TARGET_LITTLE_NAME "elf32-x86-64"
7190 #undef elf32_bed
7191
7192 #undef ELF_ARCH
7193 #define ELF_ARCH bfd_arch_i386
7194
7195 #undef ELF_MACHINE_CODE
7196 #define ELF_MACHINE_CODE EM_X86_64
7197
7198 #undef ELF_OSABI
7199
7200 #undef elf_backend_object_p
7201 #define elf_backend_object_p \
7202 elf32_x86_64_elf_object_p
7203
7204 #undef elf_backend_bfd_from_remote_memory
7205 #define elf_backend_bfd_from_remote_memory \
7206 _bfd_elf32_bfd_from_remote_memory
7207
7208 #undef elf_backend_size_info
7209 #define elf_backend_size_info \
7210 _bfd_elf32_size_info
7211
7212 #include "elf32-target.h"