]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf64-x86-64.c
b9efcfcf2eb3930ab9890521a827e6cd7ddeb50a
[thirdparty/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: invalid relocation type %d"),
286 abfd, (int) r_type);
287 r_type = R_X86_64_NONE;
288 }
289 i = r_type;
290 }
291 else
292 i = r_type - (unsigned int) R_X86_64_vt_offset;
293 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
294 return &x86_64_elf_howto_table[i];
295 }
296
297 /* Given a BFD reloc type, return a HOWTO structure. */
298 static reloc_howto_type *
299 elf_x86_64_reloc_type_lookup (bfd *abfd,
300 bfd_reloc_code_real_type code)
301 {
302 unsigned int i;
303
304 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
305 i++)
306 {
307 if (x86_64_reloc_map[i].bfd_reloc_val == code)
308 return elf_x86_64_rtype_to_howto (abfd,
309 x86_64_reloc_map[i].elf_reloc_val);
310 }
311 return NULL;
312 }
313
314 static reloc_howto_type *
315 elf_x86_64_reloc_name_lookup (bfd *abfd,
316 const char *r_name)
317 {
318 unsigned int i;
319
320 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
321 {
322 /* Get x32 R_X86_64_32. */
323 reloc_howto_type *reloc
324 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
325 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
326 return reloc;
327 }
328
329 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
330 if (x86_64_elf_howto_table[i].name != NULL
331 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
332 return &x86_64_elf_howto_table[i];
333
334 return NULL;
335 }
336
337 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
338
339 static void
340 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
341 Elf_Internal_Rela *dst)
342 {
343 unsigned r_type;
344
345 r_type = ELF32_R_TYPE (dst->r_info);
346 if (r_type != (unsigned int) R_X86_64_GNU_VTINHERIT
347 && r_type != (unsigned int) R_X86_64_GNU_VTENTRY)
348 r_type &= ~R_X86_64_converted_reloc_bit;
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
350
351 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 static char *
443 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
444 int note_type, ...)
445 {
446 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
447 va_list ap;
448 const char *fname, *psargs;
449 long pid;
450 int cursig;
451 const void *gregs;
452
453 switch (note_type)
454 {
455 default:
456 return NULL;
457
458 case NT_PRPSINFO:
459 va_start (ap, note_type);
460 fname = va_arg (ap, const char *);
461 psargs = va_arg (ap, const char *);
462 va_end (ap);
463
464 if (bed->s->elfclass == ELFCLASS32)
465 {
466 prpsinfo32_t data;
467 memset (&data, 0, sizeof (data));
468 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
469 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
470 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
471 &data, sizeof (data));
472 }
473 else
474 {
475 prpsinfo64_t data;
476 memset (&data, 0, sizeof (data));
477 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
478 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
479 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
480 &data, sizeof (data));
481 }
482 /* NOTREACHED */
483
484 case NT_PRSTATUS:
485 va_start (ap, note_type);
486 pid = va_arg (ap, long);
487 cursig = va_arg (ap, int);
488 gregs = va_arg (ap, const void *);
489 va_end (ap);
490
491 if (bed->s->elfclass == ELFCLASS32)
492 {
493 if (bed->elf_machine_code == EM_X86_64)
494 {
495 prstatusx32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 else
504 {
505 prstatus32_t prstat;
506 memset (&prstat, 0, sizeof (prstat));
507 prstat.pr_pid = pid;
508 prstat.pr_cursig = cursig;
509 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
510 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
511 &prstat, sizeof (prstat));
512 }
513 }
514 else
515 {
516 prstatus64_t prstat;
517 memset (&prstat, 0, sizeof (prstat));
518 prstat.pr_pid = pid;
519 prstat.pr_cursig = cursig;
520 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
521 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
522 &prstat, sizeof (prstat));
523 }
524 }
525 /* NOTREACHED */
526 }
527 #endif
528 \f
529 /* Functions for the x86-64 ELF linker. */
530
531 /* The size in bytes of an entry in the global offset table. */
532
533 #define GOT_ENTRY_SIZE 8
534
535 /* The size in bytes of an entry in the lazy procedure linkage table. */
536
537 #define LAZY_PLT_ENTRY_SIZE 16
538
539 /* The size in bytes of an entry in the non-lazy procedure linkage
540 table. */
541
542 #define NON_LAZY_PLT_ENTRY_SIZE 8
543
544 /* The first entry in a lazy procedure linkage table looks like this.
545 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
546 works. */
547
548 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
549 {
550 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
551 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
552 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
553 };
554
555 /* Subsequent entries in a lazy procedure linkage table look like this. */
556
557 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
558 {
559 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
560 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
561 0x68, /* pushq immediate */
562 0, 0, 0, 0, /* replaced with index into relocation table. */
563 0xe9, /* jmp relative */
564 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
565 };
566
567 /* The first entry in a lazy procedure linkage table with BND prefix
568 like this. */
569
570 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
571 {
572 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
573 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
574 0x0f, 0x1f, 0 /* nopl (%rax) */
575 };
576
577 /* Subsequent entries for branches with BND prefx in a lazy procedure
578 linkage table look like this. */
579
580 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
581 {
582 0x68, 0, 0, 0, 0, /* pushq immediate */
583 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
584 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
585 };
586
587 /* The first entry in the IBT-enabled lazy procedure linkage table is the
588 the same as the lazy PLT with BND prefix so that bound registers are
589 preserved when control is passed to dynamic linker. Subsequent
590 entries for a IBT-enabled lazy procedure linkage table look like
591 this. */
592
593 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
594 {
595 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
596 0x68, 0, 0, 0, 0, /* pushq immediate */
597 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
598 0x90 /* nop */
599 };
600
601 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
602 is the same as the normal lazy PLT. Subsequent entries for an
603 x32 IBT-enabled lazy procedure linkage table look like this. */
604
605 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
606 {
607 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
608 0x68, 0, 0, 0, 0, /* pushq immediate */
609 0xe9, 0, 0, 0, 0, /* jmpq relative */
610 0x66, 0x90 /* xchg %ax,%ax */
611 };
612
613 /* Entries in the non-lazey procedure linkage table look like this. */
614
615 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
616 {
617 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x66, 0x90 /* xchg %ax,%ax */
620 };
621
622 /* Entries for branches with BND prefix in the non-lazey procedure
623 linkage table look like this. */
624
625 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
626 {
627 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
628 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
629 0x90 /* nop */
630 };
631
632 /* Entries for branches with IBT-enabled in the non-lazey procedure
633 linkage table look like this. They have the same size as the lazy
634 PLT entry. */
635
636 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
637 {
638 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
639 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
640 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
641 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
642 };
643
644 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
645 linkage table look like this. They have the same size as the lazy
646 PLT entry. */
647
648 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
649 {
650 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
651 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
652 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
653 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
654 };
655
656 /* .eh_frame covering the lazy .plt section. */
657
658 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
659 {
660 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
661 0, 0, 0, 0, /* CIE ID */
662 1, /* CIE version */
663 'z', 'R', 0, /* Augmentation string */
664 1, /* Code alignment factor */
665 0x78, /* Data alignment factor */
666 16, /* Return address column */
667 1, /* Augmentation size */
668 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
669 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
670 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
671 DW_CFA_nop, DW_CFA_nop,
672
673 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
674 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
675 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
676 0, 0, 0, 0, /* .plt size goes here */
677 0, /* Augmentation size */
678 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
679 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
680 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
681 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
682 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
683 11, /* Block length */
684 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
685 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
686 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
687 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
688 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
689 };
690
691 /* .eh_frame covering the lazy BND .plt section. */
692
693 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
694 {
695 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
696 0, 0, 0, 0, /* CIE ID */
697 1, /* CIE version */
698 'z', 'R', 0, /* Augmentation string */
699 1, /* Code alignment factor */
700 0x78, /* Data alignment factor */
701 16, /* Return address column */
702 1, /* Augmentation size */
703 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
704 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
705 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
706 DW_CFA_nop, DW_CFA_nop,
707
708 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
709 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
710 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
711 0, 0, 0, 0, /* .plt size goes here */
712 0, /* Augmentation size */
713 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
714 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
715 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
716 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
717 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
718 11, /* Block length */
719 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
720 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
721 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
722 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
723 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
724 };
725
726 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
727
728 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
729 {
730 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
731 0, 0, 0, 0, /* CIE ID */
732 1, /* CIE version */
733 'z', 'R', 0, /* Augmentation string */
734 1, /* Code alignment factor */
735 0x78, /* Data alignment factor */
736 16, /* Return address column */
737 1, /* Augmentation size */
738 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
739 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
740 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
741 DW_CFA_nop, DW_CFA_nop,
742
743 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
744 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
745 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
746 0, 0, 0, 0, /* .plt size goes here */
747 0, /* Augmentation size */
748 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
749 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
750 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
751 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
752 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
753 11, /* Block length */
754 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
755 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
756 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
757 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
758 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
759 };
760
761 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
762
763 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
764 {
765 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
766 0, 0, 0, 0, /* CIE ID */
767 1, /* CIE version */
768 'z', 'R', 0, /* Augmentation string */
769 1, /* Code alignment factor */
770 0x78, /* Data alignment factor */
771 16, /* Return address column */
772 1, /* Augmentation size */
773 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
774 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
775 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
776 DW_CFA_nop, DW_CFA_nop,
777
778 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
779 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
780 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
781 0, 0, 0, 0, /* .plt size goes here */
782 0, /* Augmentation size */
783 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
784 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
785 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
786 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
787 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
788 11, /* Block length */
789 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
790 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
791 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
792 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
793 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
794 };
795
796 /* .eh_frame covering the non-lazy .plt section. */
797
798 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
799 {
800 #define PLT_GOT_FDE_LENGTH 20
801 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
802 0, 0, 0, 0, /* CIE ID */
803 1, /* CIE version */
804 'z', 'R', 0, /* Augmentation string */
805 1, /* Code alignment factor */
806 0x78, /* Data alignment factor */
807 16, /* Return address column */
808 1, /* Augmentation size */
809 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
810 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
811 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
812 DW_CFA_nop, DW_CFA_nop,
813
814 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
815 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
816 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
817 0, 0, 0, 0, /* non-lazy .plt size goes here */
818 0, /* Augmentation size */
819 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
820 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
821 };
822
823 /* These are the standard parameters. */
824 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
825 {
826 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
827 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
828 elf_x86_64_lazy_plt_entry, /* plt_entry */
829 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
830 2, /* plt0_got1_offset */
831 8, /* plt0_got2_offset */
832 12, /* plt0_got2_insn_end */
833 2, /* plt_got_offset */
834 7, /* plt_reloc_offset */
835 12, /* plt_plt_offset */
836 6, /* plt_got_insn_size */
837 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
838 6, /* plt_lazy_offset */
839 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
840 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
841 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
842 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
843 };
844
845 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
846 {
847 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
848 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
849 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
850 2, /* plt_got_offset */
851 6, /* plt_got_insn_size */
852 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
853 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
854 };
855
856 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
857 {
858 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
859 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
860 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
861 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
862 2, /* plt0_got1_offset */
863 1+8, /* plt0_got2_offset */
864 1+12, /* plt0_got2_insn_end */
865 1+2, /* plt_got_offset */
866 1, /* plt_reloc_offset */
867 7, /* plt_plt_offset */
868 1+6, /* plt_got_insn_size */
869 11, /* plt_plt_insn_end */
870 0, /* plt_lazy_offset */
871 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
872 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
873 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
875 };
876
877 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
878 {
879 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
880 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
881 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
882 1+2, /* plt_got_offset */
883 1+6, /* plt_got_insn_size */
884 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
885 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
886 };
887
888 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
889 {
890 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
891 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
892 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
893 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
894 2, /* plt0_got1_offset */
895 1+8, /* plt0_got2_offset */
896 1+12, /* plt0_got2_insn_end */
897 4+1+2, /* plt_got_offset */
898 4+1, /* plt_reloc_offset */
899 4+1+6, /* plt_plt_offset */
900 4+1+6, /* plt_got_insn_size */
901 4+1+5+5, /* plt_plt_insn_end */
902 0, /* plt_lazy_offset */
903 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
904 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
905 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
906 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
907 };
908
909 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
910 {
911 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
912 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
913 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
914 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
915 2, /* plt0_got1_offset */
916 8, /* plt0_got2_offset */
917 12, /* plt0_got2_insn_end */
918 4+2, /* plt_got_offset */
919 4+1, /* plt_reloc_offset */
920 4+6, /* plt_plt_offset */
921 4+6, /* plt_got_insn_size */
922 4+5+5, /* plt_plt_insn_end */
923 0, /* plt_lazy_offset */
924 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
925 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
926 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
927 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
928 };
929
930 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
931 {
932 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
933 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
934 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
935 4+1+2, /* plt_got_offset */
936 4+1+6, /* plt_got_insn_size */
937 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
938 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
939 };
940
941 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
942 {
943 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
944 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
946 4+2, /* plt_got_offset */
947 4+6, /* plt_got_insn_size */
948 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
949 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
950 };
951
952 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
953 {
954 is_normal /* os */
955 };
956
957 #define elf_backend_arch_data &elf_x86_64_arch_bed
958
959 static bfd_boolean
960 elf64_x86_64_elf_object_p (bfd *abfd)
961 {
962 /* Set the right machine number for an x86-64 elf64 file. */
963 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
964 return TRUE;
965 }
966
967 static bfd_boolean
968 elf32_x86_64_elf_object_p (bfd *abfd)
969 {
970 /* Set the right machine number for an x86-64 elf32 file. */
971 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
972 return TRUE;
973 }
974
975 /* Return TRUE if the TLS access code sequence support transition
976 from R_TYPE. */
977
978 static bfd_boolean
979 elf_x86_64_check_tls_transition (bfd *abfd,
980 struct bfd_link_info *info,
981 asection *sec,
982 bfd_byte *contents,
983 Elf_Internal_Shdr *symtab_hdr,
984 struct elf_link_hash_entry **sym_hashes,
985 unsigned int r_type,
986 const Elf_Internal_Rela *rel,
987 const Elf_Internal_Rela *relend)
988 {
989 unsigned int val;
990 unsigned long r_symndx;
991 bfd_boolean largepic = FALSE;
992 struct elf_link_hash_entry *h;
993 bfd_vma offset;
994 struct elf_x86_link_hash_table *htab;
995 bfd_byte *call;
996 bfd_boolean indirect_call;
997
998 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
999 offset = rel->r_offset;
1000 switch (r_type)
1001 {
1002 case R_X86_64_TLSGD:
1003 case R_X86_64_TLSLD:
1004 if ((rel + 1) >= relend)
1005 return FALSE;
1006
1007 if (r_type == R_X86_64_TLSGD)
1008 {
1009 /* Check transition from GD access model. For 64bit, only
1010 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1011 .word 0x6666; rex64; call __tls_get_addr@PLT
1012 or
1013 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1014 .byte 0x66; rex64
1015 call *__tls_get_addr@GOTPCREL(%rip)
1016 which may be converted to
1017 addr32 call __tls_get_addr
1018 can transit to different access model. For 32bit, only
1019 leaq foo@tlsgd(%rip), %rdi
1020 .word 0x6666; rex64; call __tls_get_addr@PLT
1021 or
1022 leaq foo@tlsgd(%rip), %rdi
1023 .byte 0x66; rex64
1024 call *__tls_get_addr@GOTPCREL(%rip)
1025 which may be converted to
1026 addr32 call __tls_get_addr
1027 can transit to different access model. For largepic,
1028 we also support:
1029 leaq foo@tlsgd(%rip), %rdi
1030 movabsq $__tls_get_addr@pltoff, %rax
1031 addq $r15, %rax
1032 call *%rax
1033 or
1034 leaq foo@tlsgd(%rip), %rdi
1035 movabsq $__tls_get_addr@pltoff, %rax
1036 addq $rbx, %rax
1037 call *%rax */
1038
1039 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1040
1041 if ((offset + 12) > sec->size)
1042 return FALSE;
1043
1044 call = contents + offset + 4;
1045 if (call[0] != 0x66
1046 || !((call[1] == 0x48
1047 && call[2] == 0xff
1048 && call[3] == 0x15)
1049 || (call[1] == 0x48
1050 && call[2] == 0x67
1051 && call[3] == 0xe8)
1052 || (call[1] == 0x66
1053 && call[2] == 0x48
1054 && call[3] == 0xe8)))
1055 {
1056 if (!ABI_64_P (abfd)
1057 || (offset + 19) > sec->size
1058 || offset < 3
1059 || memcmp (call - 7, leaq + 1, 3) != 0
1060 || memcmp (call, "\x48\xb8", 2) != 0
1061 || call[11] != 0x01
1062 || call[13] != 0xff
1063 || call[14] != 0xd0
1064 || !((call[10] == 0x48 && call[12] == 0xd8)
1065 || (call[10] == 0x4c && call[12] == 0xf8)))
1066 return FALSE;
1067 largepic = TRUE;
1068 }
1069 else if (ABI_64_P (abfd))
1070 {
1071 if (offset < 4
1072 || memcmp (contents + offset - 4, leaq, 4) != 0)
1073 return FALSE;
1074 }
1075 else
1076 {
1077 if (offset < 3
1078 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1079 return FALSE;
1080 }
1081 indirect_call = call[2] == 0xff;
1082 }
1083 else
1084 {
1085 /* Check transition from LD access model. Only
1086 leaq foo@tlsld(%rip), %rdi;
1087 call __tls_get_addr@PLT
1088 or
1089 leaq foo@tlsld(%rip), %rdi;
1090 call *__tls_get_addr@GOTPCREL(%rip)
1091 which may be converted to
1092 addr32 call __tls_get_addr
1093 can transit to different access model. For largepic
1094 we also support:
1095 leaq foo@tlsld(%rip), %rdi
1096 movabsq $__tls_get_addr@pltoff, %rax
1097 addq $r15, %rax
1098 call *%rax
1099 or
1100 leaq foo@tlsld(%rip), %rdi
1101 movabsq $__tls_get_addr@pltoff, %rax
1102 addq $rbx, %rax
1103 call *%rax */
1104
1105 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1106
1107 if (offset < 3 || (offset + 9) > sec->size)
1108 return FALSE;
1109
1110 if (memcmp (contents + offset - 3, lea, 3) != 0)
1111 return FALSE;
1112
1113 call = contents + offset + 4;
1114 if (!(call[0] == 0xe8
1115 || (call[0] == 0xff && call[1] == 0x15)
1116 || (call[0] == 0x67 && call[1] == 0xe8)))
1117 {
1118 if (!ABI_64_P (abfd)
1119 || (offset + 19) > sec->size
1120 || memcmp (call, "\x48\xb8", 2) != 0
1121 || call[11] != 0x01
1122 || call[13] != 0xff
1123 || call[14] != 0xd0
1124 || !((call[10] == 0x48 && call[12] == 0xd8)
1125 || (call[10] == 0x4c && call[12] == 0xf8)))
1126 return FALSE;
1127 largepic = TRUE;
1128 }
1129 indirect_call = call[0] == 0xff;
1130 }
1131
1132 r_symndx = htab->r_sym (rel[1].r_info);
1133 if (r_symndx < symtab_hdr->sh_info)
1134 return FALSE;
1135
1136 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1137 if (h == NULL
1138 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1139 return FALSE;
1140 else
1141 {
1142 r_type = (ELF32_R_TYPE (rel[1].r_info)
1143 & ~R_X86_64_converted_reloc_bit);
1144 if (largepic)
1145 return r_type == R_X86_64_PLTOFF64;
1146 else if (indirect_call)
1147 return r_type == R_X86_64_GOTPCRELX;
1148 else
1149 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1150 }
1151
1152 case R_X86_64_GOTTPOFF:
1153 /* Check transition from IE access model:
1154 mov foo@gottpoff(%rip), %reg
1155 add foo@gottpoff(%rip), %reg
1156 */
1157
1158 /* Check REX prefix first. */
1159 if (offset >= 3 && (offset + 4) <= sec->size)
1160 {
1161 val = bfd_get_8 (abfd, contents + offset - 3);
1162 if (val != 0x48 && val != 0x4c)
1163 {
1164 /* X32 may have 0x44 REX prefix or no REX prefix. */
1165 if (ABI_64_P (abfd))
1166 return FALSE;
1167 }
1168 }
1169 else
1170 {
1171 /* X32 may not have any REX prefix. */
1172 if (ABI_64_P (abfd))
1173 return FALSE;
1174 if (offset < 2 || (offset + 3) > sec->size)
1175 return FALSE;
1176 }
1177
1178 val = bfd_get_8 (abfd, contents + offset - 2);
1179 if (val != 0x8b && val != 0x03)
1180 return FALSE;
1181
1182 val = bfd_get_8 (abfd, contents + offset - 1);
1183 return (val & 0xc7) == 5;
1184
1185 case R_X86_64_GOTPC32_TLSDESC:
1186 /* Check transition from GDesc access model:
1187 leaq x@tlsdesc(%rip), %rax
1188
1189 Make sure it's a leaq adding rip to a 32-bit offset
1190 into any register, although it's probably almost always
1191 going to be rax. */
1192
1193 if (offset < 3 || (offset + 4) > sec->size)
1194 return FALSE;
1195
1196 val = bfd_get_8 (abfd, contents + offset - 3);
1197 if ((val & 0xfb) != 0x48)
1198 return FALSE;
1199
1200 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1201 return FALSE;
1202
1203 val = bfd_get_8 (abfd, contents + offset - 1);
1204 return (val & 0xc7) == 0x05;
1205
1206 case R_X86_64_TLSDESC_CALL:
1207 /* Check transition from GDesc access model:
1208 call *x@tlsdesc(%rax)
1209 */
1210 if (offset + 2 <= sec->size)
1211 {
1212 /* Make sure that it's a call *x@tlsdesc(%rax). */
1213 call = contents + offset;
1214 return call[0] == 0xff && call[1] == 0x10;
1215 }
1216
1217 return FALSE;
1218
1219 default:
1220 abort ();
1221 }
1222 }
1223
1224 /* Return TRUE if the TLS access transition is OK or no transition
1225 will be performed. Update R_TYPE if there is a transition. */
1226
1227 static bfd_boolean
1228 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1229 asection *sec, bfd_byte *contents,
1230 Elf_Internal_Shdr *symtab_hdr,
1231 struct elf_link_hash_entry **sym_hashes,
1232 unsigned int *r_type, int tls_type,
1233 const Elf_Internal_Rela *rel,
1234 const Elf_Internal_Rela *relend,
1235 struct elf_link_hash_entry *h,
1236 unsigned long r_symndx,
1237 bfd_boolean from_relocate_section)
1238 {
1239 unsigned int from_type = *r_type;
1240 unsigned int to_type = from_type;
1241 bfd_boolean check = TRUE;
1242
1243 /* Skip TLS transition for functions. */
1244 if (h != NULL
1245 && (h->type == STT_FUNC
1246 || h->type == STT_GNU_IFUNC))
1247 return TRUE;
1248
1249 switch (from_type)
1250 {
1251 case R_X86_64_TLSGD:
1252 case R_X86_64_GOTPC32_TLSDESC:
1253 case R_X86_64_TLSDESC_CALL:
1254 case R_X86_64_GOTTPOFF:
1255 if (bfd_link_executable (info))
1256 {
1257 if (h == NULL)
1258 to_type = R_X86_64_TPOFF32;
1259 else
1260 to_type = R_X86_64_GOTTPOFF;
1261 }
1262
1263 /* When we are called from elf_x86_64_relocate_section, there may
1264 be additional transitions based on TLS_TYPE. */
1265 if (from_relocate_section)
1266 {
1267 unsigned int new_to_type = to_type;
1268
1269 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1270 new_to_type = R_X86_64_TPOFF32;
1271
1272 if (to_type == R_X86_64_TLSGD
1273 || to_type == R_X86_64_GOTPC32_TLSDESC
1274 || to_type == R_X86_64_TLSDESC_CALL)
1275 {
1276 if (tls_type == GOT_TLS_IE)
1277 new_to_type = R_X86_64_GOTTPOFF;
1278 }
1279
1280 /* We checked the transition before when we were called from
1281 elf_x86_64_check_relocs. We only want to check the new
1282 transition which hasn't been checked before. */
1283 check = new_to_type != to_type && from_type == to_type;
1284 to_type = new_to_type;
1285 }
1286
1287 break;
1288
1289 case R_X86_64_TLSLD:
1290 if (bfd_link_executable (info))
1291 to_type = R_X86_64_TPOFF32;
1292 break;
1293
1294 default:
1295 return TRUE;
1296 }
1297
1298 /* Return TRUE if there is no transition. */
1299 if (from_type == to_type)
1300 return TRUE;
1301
1302 /* Check if the transition can be performed. */
1303 if (check
1304 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1305 symtab_hdr, sym_hashes,
1306 from_type, rel, relend))
1307 {
1308 reloc_howto_type *from, *to;
1309 const char *name;
1310
1311 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1312 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1313
1314 if (h)
1315 name = h->root.root.string;
1316 else
1317 {
1318 struct elf_x86_link_hash_table *htab;
1319
1320 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1321 if (htab == NULL)
1322 name = "*unknown*";
1323 else
1324 {
1325 Elf_Internal_Sym *isym;
1326
1327 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1328 abfd, r_symndx);
1329 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1330 }
1331 }
1332
1333 _bfd_error_handler
1334 /* xgettext:c-format */
1335 (_("%pB: TLS transition from %s to %s against `%s' at %#Lx "
1336 "in section `%pA' failed"),
1337 abfd, from->name, to->name, name, rel->r_offset, sec);
1338 bfd_set_error (bfd_error_bad_value);
1339 return FALSE;
1340 }
1341
1342 *r_type = to_type;
1343 return TRUE;
1344 }
1345
1346 /* Rename some of the generic section flags to better document how they
1347 are used here. */
1348 #define check_relocs_failed sec_flg0
1349
1350 static bfd_boolean
1351 elf_x86_64_need_pic (struct bfd_link_info *info,
1352 bfd *input_bfd, asection *sec,
1353 struct elf_link_hash_entry *h,
1354 Elf_Internal_Shdr *symtab_hdr,
1355 Elf_Internal_Sym *isym,
1356 reloc_howto_type *howto)
1357 {
1358 const char *v = "";
1359 const char *und = "";
1360 const char *pic = "";
1361 const char *object;
1362
1363 const char *name;
1364 if (h)
1365 {
1366 name = h->root.root.string;
1367 switch (ELF_ST_VISIBILITY (h->other))
1368 {
1369 case STV_HIDDEN:
1370 v = _("hidden symbol ");
1371 break;
1372 case STV_INTERNAL:
1373 v = _("internal symbol ");
1374 break;
1375 case STV_PROTECTED:
1376 v = _("protected symbol ");
1377 break;
1378 default:
1379 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1380 v = _("protected symbol ");
1381 else
1382 v = _("symbol ");
1383 pic = _("; recompile with -fPIC");
1384 break;
1385 }
1386
1387 if (!h->def_regular && !h->def_dynamic)
1388 und = _("undefined ");
1389 }
1390 else
1391 {
1392 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1393 pic = _("; recompile with -fPIC");
1394 }
1395
1396 if (bfd_link_dll (info))
1397 object = _("a shared object");
1398 else if (bfd_link_pie (info))
1399 object = _("a PIE object");
1400 else
1401 object = _("a PDE object");
1402
1403 /* xgettext:c-format */
1404 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1405 "not be used when making %s%s"),
1406 input_bfd, howto->name, und, v, name,
1407 object, pic);
1408 bfd_set_error (bfd_error_bad_value);
1409 sec->check_relocs_failed = 1;
1410 return FALSE;
1411 }
1412
1413 /* With the local symbol, foo, we convert
1414 mov foo@GOTPCREL(%rip), %reg
1415 to
1416 lea foo(%rip), %reg
1417 and convert
1418 call/jmp *foo@GOTPCREL(%rip)
1419 to
1420 nop call foo/jmp foo nop
1421 When PIC is false, convert
1422 test %reg, foo@GOTPCREL(%rip)
1423 to
1424 test $foo, %reg
1425 and convert
1426 binop foo@GOTPCREL(%rip), %reg
1427 to
1428 binop $foo, %reg
1429 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1430 instructions. */
1431
1432 static bfd_boolean
1433 elf_x86_64_convert_load_reloc (bfd *abfd,
1434 bfd_byte *contents,
1435 unsigned int *r_type_p,
1436 Elf_Internal_Rela *irel,
1437 struct elf_link_hash_entry *h,
1438 bfd_boolean *converted,
1439 struct bfd_link_info *link_info)
1440 {
1441 struct elf_x86_link_hash_table *htab;
1442 bfd_boolean is_pic;
1443 bfd_boolean no_overflow;
1444 bfd_boolean relocx;
1445 bfd_boolean to_reloc_pc32;
1446 asection *tsec;
1447 bfd_signed_vma raddend;
1448 unsigned int opcode;
1449 unsigned int modrm;
1450 unsigned int r_type = *r_type_p;
1451 unsigned int r_symndx;
1452 bfd_vma roff = irel->r_offset;
1453
1454 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1455 return TRUE;
1456
1457 raddend = irel->r_addend;
1458 /* Addend for 32-bit PC-relative relocation must be -4. */
1459 if (raddend != -4)
1460 return TRUE;
1461
1462 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1463 is_pic = bfd_link_pic (link_info);
1464
1465 relocx = (r_type == R_X86_64_GOTPCRELX
1466 || r_type == R_X86_64_REX_GOTPCRELX);
1467
1468 /* TRUE if --no-relax is used. */
1469 no_overflow = link_info->disable_target_specific_optimizations > 1;
1470
1471 r_symndx = htab->r_sym (irel->r_info);
1472
1473 opcode = bfd_get_8 (abfd, contents + roff - 2);
1474
1475 /* Convert mov to lea since it has been done for a while. */
1476 if (opcode != 0x8b)
1477 {
1478 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1479 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1480 test, xor instructions. */
1481 if (!relocx)
1482 return TRUE;
1483 }
1484
1485 /* We convert only to R_X86_64_PC32:
1486 1. Branch.
1487 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1488 3. no_overflow is true.
1489 4. PIC.
1490 */
1491 to_reloc_pc32 = (opcode == 0xff
1492 || !relocx
1493 || no_overflow
1494 || is_pic);
1495
1496 /* Get the symbol referred to by the reloc. */
1497 if (h == NULL)
1498 {
1499 Elf_Internal_Sym *isym
1500 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1501
1502 /* Skip relocation against undefined symbols. */
1503 if (isym->st_shndx == SHN_UNDEF)
1504 return TRUE;
1505
1506 if (isym->st_shndx == SHN_ABS)
1507 tsec = bfd_abs_section_ptr;
1508 else if (isym->st_shndx == SHN_COMMON)
1509 tsec = bfd_com_section_ptr;
1510 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1511 tsec = &_bfd_elf_large_com_section;
1512 else
1513 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1514 }
1515 else
1516 {
1517 /* Undefined weak symbol is only bound locally in executable
1518 and its reference is resolved as 0 without relocation
1519 overflow. We can only perform this optimization for
1520 GOTPCRELX relocations since we need to modify REX byte.
1521 It is OK convert mov with R_X86_64_GOTPCREL to
1522 R_X86_64_PC32. */
1523 bfd_boolean local_ref;
1524 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1525
1526 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1527 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1528 if ((relocx || opcode == 0x8b)
1529 && (h->root.type == bfd_link_hash_undefweak
1530 && !eh->linker_def
1531 && local_ref))
1532 {
1533 if (opcode == 0xff)
1534 {
1535 /* Skip for branch instructions since R_X86_64_PC32
1536 may overflow. */
1537 if (no_overflow)
1538 return TRUE;
1539 }
1540 else if (relocx)
1541 {
1542 /* For non-branch instructions, we can convert to
1543 R_X86_64_32/R_X86_64_32S since we know if there
1544 is a REX byte. */
1545 to_reloc_pc32 = FALSE;
1546 }
1547
1548 /* Since we don't know the current PC when PIC is true,
1549 we can't convert to R_X86_64_PC32. */
1550 if (to_reloc_pc32 && is_pic)
1551 return TRUE;
1552
1553 goto convert;
1554 }
1555 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1556 ld.so may use its link-time address. */
1557 else if (h->start_stop
1558 || eh->linker_def
1559 || ((h->def_regular
1560 || h->root.type == bfd_link_hash_defined
1561 || h->root.type == bfd_link_hash_defweak)
1562 && h != htab->elf.hdynamic
1563 && local_ref))
1564 {
1565 /* bfd_link_hash_new or bfd_link_hash_undefined is
1566 set by an assignment in a linker script in
1567 bfd_elf_record_link_assignment. start_stop is set
1568 on __start_SECNAME/__stop_SECNAME which mark section
1569 SECNAME. */
1570 if (h->start_stop
1571 || eh->linker_def
1572 || (h->def_regular
1573 && (h->root.type == bfd_link_hash_new
1574 || h->root.type == bfd_link_hash_undefined
1575 || ((h->root.type == bfd_link_hash_defined
1576 || h->root.type == bfd_link_hash_defweak)
1577 && h->root.u.def.section == bfd_und_section_ptr))))
1578 {
1579 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1580 if (no_overflow)
1581 return TRUE;
1582 goto convert;
1583 }
1584 tsec = h->root.u.def.section;
1585 }
1586 else
1587 return TRUE;
1588 }
1589
1590 /* Don't convert GOTPCREL relocation against large section. */
1591 if (elf_section_data (tsec) != NULL
1592 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1593 return TRUE;
1594
1595 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1596 if (no_overflow)
1597 return TRUE;
1598
1599 convert:
1600 if (opcode == 0xff)
1601 {
1602 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1603 unsigned int nop;
1604 unsigned int disp;
1605 bfd_vma nop_offset;
1606
1607 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1608 R_X86_64_PC32. */
1609 modrm = bfd_get_8 (abfd, contents + roff - 1);
1610 if (modrm == 0x25)
1611 {
1612 /* Convert to "jmp foo nop". */
1613 modrm = 0xe9;
1614 nop = NOP_OPCODE;
1615 nop_offset = irel->r_offset + 3;
1616 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1617 irel->r_offset -= 1;
1618 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1619 }
1620 else
1621 {
1622 struct elf_x86_link_hash_entry *eh
1623 = (struct elf_x86_link_hash_entry *) h;
1624
1625 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1626 is a nop prefix. */
1627 modrm = 0xe8;
1628 /* To support TLS optimization, always use addr32 prefix for
1629 "call *__tls_get_addr@GOTPCREL(%rip)". */
1630 if (eh && eh->tls_get_addr)
1631 {
1632 nop = 0x67;
1633 nop_offset = irel->r_offset - 2;
1634 }
1635 else
1636 {
1637 nop = link_info->call_nop_byte;
1638 if (link_info->call_nop_as_suffix)
1639 {
1640 nop_offset = irel->r_offset + 3;
1641 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1642 irel->r_offset -= 1;
1643 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1644 }
1645 else
1646 nop_offset = irel->r_offset - 2;
1647 }
1648 }
1649 bfd_put_8 (abfd, nop, contents + nop_offset);
1650 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1651 r_type = R_X86_64_PC32;
1652 }
1653 else
1654 {
1655 unsigned int rex;
1656 unsigned int rex_mask = REX_R;
1657
1658 if (r_type == R_X86_64_REX_GOTPCRELX)
1659 rex = bfd_get_8 (abfd, contents + roff - 3);
1660 else
1661 rex = 0;
1662
1663 if (opcode == 0x8b)
1664 {
1665 if (to_reloc_pc32)
1666 {
1667 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1668 "lea foo(%rip), %reg". */
1669 opcode = 0x8d;
1670 r_type = R_X86_64_PC32;
1671 }
1672 else
1673 {
1674 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1675 "mov $foo, %reg". */
1676 opcode = 0xc7;
1677 modrm = bfd_get_8 (abfd, contents + roff - 1);
1678 modrm = 0xc0 | (modrm & 0x38) >> 3;
1679 if ((rex & REX_W) != 0
1680 && ABI_64_P (link_info->output_bfd))
1681 {
1682 /* Keep the REX_W bit in REX byte for LP64. */
1683 r_type = R_X86_64_32S;
1684 goto rewrite_modrm_rex;
1685 }
1686 else
1687 {
1688 /* If the REX_W bit in REX byte isn't needed,
1689 use R_X86_64_32 and clear the W bit to avoid
1690 sign-extend imm32 to imm64. */
1691 r_type = R_X86_64_32;
1692 /* Clear the W bit in REX byte. */
1693 rex_mask |= REX_W;
1694 goto rewrite_modrm_rex;
1695 }
1696 }
1697 }
1698 else
1699 {
1700 /* R_X86_64_PC32 isn't supported. */
1701 if (to_reloc_pc32)
1702 return TRUE;
1703
1704 modrm = bfd_get_8 (abfd, contents + roff - 1);
1705 if (opcode == 0x85)
1706 {
1707 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1708 "test $foo, %reg". */
1709 modrm = 0xc0 | (modrm & 0x38) >> 3;
1710 opcode = 0xf7;
1711 }
1712 else
1713 {
1714 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1715 "binop $foo, %reg". */
1716 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1717 opcode = 0x81;
1718 }
1719
1720 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1721 overflow when sign-extending imm32 to imm64. */
1722 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1723
1724 rewrite_modrm_rex:
1725 bfd_put_8 (abfd, modrm, contents + roff - 1);
1726
1727 if (rex)
1728 {
1729 /* Move the R bit to the B bit in REX byte. */
1730 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1731 bfd_put_8 (abfd, rex, contents + roff - 3);
1732 }
1733
1734 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1735 irel->r_addend = 0;
1736 }
1737
1738 bfd_put_8 (abfd, opcode, contents + roff - 2);
1739 }
1740
1741 *r_type_p = r_type;
1742 irel->r_info = htab->r_info (r_symndx,
1743 r_type | R_X86_64_converted_reloc_bit);
1744
1745 *converted = TRUE;
1746
1747 return TRUE;
1748 }
1749
1750 /* Look through the relocs for a section during the first phase, and
1751 calculate needed space in the global offset table, procedure
1752 linkage table, and dynamic reloc sections. */
1753
1754 static bfd_boolean
1755 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1756 asection *sec,
1757 const Elf_Internal_Rela *relocs)
1758 {
1759 struct elf_x86_link_hash_table *htab;
1760 Elf_Internal_Shdr *symtab_hdr;
1761 struct elf_link_hash_entry **sym_hashes;
1762 const Elf_Internal_Rela *rel;
1763 const Elf_Internal_Rela *rel_end;
1764 asection *sreloc;
1765 bfd_byte *contents;
1766 bfd_boolean converted;
1767
1768 if (bfd_link_relocatable (info))
1769 return TRUE;
1770
1771 /* Don't do anything special with non-loaded, non-alloced sections.
1772 In particular, any relocs in such sections should not affect GOT
1773 and PLT reference counting (ie. we don't allow them to create GOT
1774 or PLT entries), there's no possibility or desire to optimize TLS
1775 relocs, and there's not much point in propagating relocs to shared
1776 libs that the dynamic linker won't relocate. */
1777 if ((sec->flags & SEC_ALLOC) == 0)
1778 return TRUE;
1779
1780 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1781 if (htab == NULL)
1782 {
1783 sec->check_relocs_failed = 1;
1784 return FALSE;
1785 }
1786
1787 BFD_ASSERT (is_x86_elf (abfd, htab));
1788
1789 /* Get the section contents. */
1790 if (elf_section_data (sec)->this_hdr.contents != NULL)
1791 contents = elf_section_data (sec)->this_hdr.contents;
1792 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1793 {
1794 sec->check_relocs_failed = 1;
1795 return FALSE;
1796 }
1797
1798 symtab_hdr = &elf_symtab_hdr (abfd);
1799 sym_hashes = elf_sym_hashes (abfd);
1800
1801 converted = FALSE;
1802
1803 sreloc = NULL;
1804
1805 rel_end = relocs + sec->reloc_count;
1806 for (rel = relocs; rel < rel_end; rel++)
1807 {
1808 unsigned int r_type;
1809 unsigned int r_symndx;
1810 struct elf_link_hash_entry *h;
1811 struct elf_x86_link_hash_entry *eh;
1812 Elf_Internal_Sym *isym;
1813 const char *name;
1814 bfd_boolean size_reloc;
1815 bfd_boolean converted_reloc;
1816
1817 r_symndx = htab->r_sym (rel->r_info);
1818 r_type = ELF32_R_TYPE (rel->r_info);
1819
1820 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1821 {
1822 /* xgettext:c-format */
1823 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1824 abfd, r_symndx);
1825 goto error_return;
1826 }
1827
1828 if (r_symndx < symtab_hdr->sh_info)
1829 {
1830 /* A local symbol. */
1831 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1832 abfd, r_symndx);
1833 if (isym == NULL)
1834 goto error_return;
1835
1836 /* Check relocation against local STT_GNU_IFUNC symbol. */
1837 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1838 {
1839 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1840 TRUE);
1841 if (h == NULL)
1842 goto error_return;
1843
1844 /* Fake a STT_GNU_IFUNC symbol. */
1845 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1846 isym, NULL);
1847 h->type = STT_GNU_IFUNC;
1848 h->def_regular = 1;
1849 h->ref_regular = 1;
1850 h->forced_local = 1;
1851 h->root.type = bfd_link_hash_defined;
1852 }
1853 else
1854 h = NULL;
1855 }
1856 else
1857 {
1858 isym = NULL;
1859 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1860 while (h->root.type == bfd_link_hash_indirect
1861 || h->root.type == bfd_link_hash_warning)
1862 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1863 }
1864
1865 /* Check invalid x32 relocations. */
1866 if (!ABI_64_P (abfd))
1867 switch (r_type)
1868 {
1869 default:
1870 break;
1871
1872 case R_X86_64_DTPOFF64:
1873 case R_X86_64_TPOFF64:
1874 case R_X86_64_PC64:
1875 case R_X86_64_GOTOFF64:
1876 case R_X86_64_GOT64:
1877 case R_X86_64_GOTPCREL64:
1878 case R_X86_64_GOTPC64:
1879 case R_X86_64_GOTPLT64:
1880 case R_X86_64_PLTOFF64:
1881 {
1882 if (h)
1883 name = h->root.root.string;
1884 else
1885 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1886 NULL);
1887 _bfd_error_handler
1888 /* xgettext:c-format */
1889 (_("%pB: relocation %s against symbol `%s' isn't "
1890 "supported in x32 mode"), abfd,
1891 x86_64_elf_howto_table[r_type].name, name);
1892 bfd_set_error (bfd_error_bad_value);
1893 goto error_return;
1894 }
1895 break;
1896 }
1897
1898 if (h != NULL)
1899 {
1900 /* It is referenced by a non-shared object. */
1901 h->ref_regular = 1;
1902
1903 if (h->type == STT_GNU_IFUNC)
1904 elf_tdata (info->output_bfd)->has_gnu_symbols
1905 |= elf_gnu_symbol_ifunc;
1906 }
1907
1908 converted_reloc = FALSE;
1909 if ((r_type == R_X86_64_GOTPCREL
1910 || r_type == R_X86_64_GOTPCRELX
1911 || r_type == R_X86_64_REX_GOTPCRELX)
1912 && (h == NULL || h->type != STT_GNU_IFUNC))
1913 {
1914 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1915 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1916 irel, h, &converted_reloc,
1917 info))
1918 goto error_return;
1919
1920 if (converted_reloc)
1921 converted = TRUE;
1922 }
1923
1924 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1925 symtab_hdr, sym_hashes,
1926 &r_type, GOT_UNKNOWN,
1927 rel, rel_end, h, r_symndx, FALSE))
1928 goto error_return;
1929
1930 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1931 if (h == htab->elf.hgot)
1932 htab->got_referenced = TRUE;
1933
1934 eh = (struct elf_x86_link_hash_entry *) h;
1935 switch (r_type)
1936 {
1937 case R_X86_64_TLSLD:
1938 htab->tls_ld_or_ldm_got.refcount = 1;
1939 goto create_got;
1940
1941 case R_X86_64_TPOFF32:
1942 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1943 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1944 &x86_64_elf_howto_table[r_type]);
1945 if (eh != NULL)
1946 eh->zero_undefweak &= 0x2;
1947 break;
1948
1949 case R_X86_64_GOTTPOFF:
1950 if (!bfd_link_executable (info))
1951 info->flags |= DF_STATIC_TLS;
1952 /* Fall through */
1953
1954 case R_X86_64_GOT32:
1955 case R_X86_64_GOTPCREL:
1956 case R_X86_64_GOTPCRELX:
1957 case R_X86_64_REX_GOTPCRELX:
1958 case R_X86_64_TLSGD:
1959 case R_X86_64_GOT64:
1960 case R_X86_64_GOTPCREL64:
1961 case R_X86_64_GOTPLT64:
1962 case R_X86_64_GOTPC32_TLSDESC:
1963 case R_X86_64_TLSDESC_CALL:
1964 /* This symbol requires a global offset table entry. */
1965 {
1966 int tls_type, old_tls_type;
1967
1968 switch (r_type)
1969 {
1970 default: tls_type = GOT_NORMAL; break;
1971 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1972 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1973 case R_X86_64_GOTPC32_TLSDESC:
1974 case R_X86_64_TLSDESC_CALL:
1975 tls_type = GOT_TLS_GDESC; break;
1976 }
1977
1978 if (h != NULL)
1979 {
1980 h->got.refcount = 1;
1981 old_tls_type = eh->tls_type;
1982 }
1983 else
1984 {
1985 bfd_signed_vma *local_got_refcounts;
1986
1987 /* This is a global offset table entry for a local symbol. */
1988 local_got_refcounts = elf_local_got_refcounts (abfd);
1989 if (local_got_refcounts == NULL)
1990 {
1991 bfd_size_type size;
1992
1993 size = symtab_hdr->sh_info;
1994 size *= sizeof (bfd_signed_vma)
1995 + sizeof (bfd_vma) + sizeof (char);
1996 local_got_refcounts = ((bfd_signed_vma *)
1997 bfd_zalloc (abfd, size));
1998 if (local_got_refcounts == NULL)
1999 goto error_return;
2000 elf_local_got_refcounts (abfd) = local_got_refcounts;
2001 elf_x86_local_tlsdesc_gotent (abfd)
2002 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2003 elf_x86_local_got_tls_type (abfd)
2004 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2005 }
2006 local_got_refcounts[r_symndx] = 1;
2007 old_tls_type
2008 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2009 }
2010
2011 /* If a TLS symbol is accessed using IE at least once,
2012 there is no point to use dynamic model for it. */
2013 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2014 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2015 || tls_type != GOT_TLS_IE))
2016 {
2017 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2018 tls_type = old_tls_type;
2019 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2020 && GOT_TLS_GD_ANY_P (tls_type))
2021 tls_type |= old_tls_type;
2022 else
2023 {
2024 if (h)
2025 name = h->root.root.string;
2026 else
2027 name = bfd_elf_sym_name (abfd, symtab_hdr,
2028 isym, NULL);
2029 _bfd_error_handler
2030 /* xgettext:c-format */
2031 (_("%pB: '%s' accessed both as normal and"
2032 " thread local symbol"),
2033 abfd, name);
2034 bfd_set_error (bfd_error_bad_value);
2035 goto error_return;
2036 }
2037 }
2038
2039 if (old_tls_type != tls_type)
2040 {
2041 if (eh != NULL)
2042 eh->tls_type = tls_type;
2043 else
2044 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2045 }
2046 }
2047 /* Fall through */
2048
2049 case R_X86_64_GOTOFF64:
2050 case R_X86_64_GOTPC32:
2051 case R_X86_64_GOTPC64:
2052 create_got:
2053 if (eh != NULL)
2054 eh->zero_undefweak &= 0x2;
2055 break;
2056
2057 case R_X86_64_PLT32:
2058 case R_X86_64_PLT32_BND:
2059 /* This symbol requires a procedure linkage table entry. We
2060 actually build the entry in adjust_dynamic_symbol,
2061 because this might be a case of linking PIC code which is
2062 never referenced by a dynamic object, in which case we
2063 don't need to generate a procedure linkage table entry
2064 after all. */
2065
2066 /* If this is a local symbol, we resolve it directly without
2067 creating a procedure linkage table entry. */
2068 if (h == NULL)
2069 continue;
2070
2071 eh->zero_undefweak &= 0x2;
2072 h->needs_plt = 1;
2073 h->plt.refcount = 1;
2074 break;
2075
2076 case R_X86_64_PLTOFF64:
2077 /* This tries to form the 'address' of a function relative
2078 to GOT. For global symbols we need a PLT entry. */
2079 if (h != NULL)
2080 {
2081 h->needs_plt = 1;
2082 h->plt.refcount = 1;
2083 }
2084 goto create_got;
2085
2086 case R_X86_64_SIZE32:
2087 case R_X86_64_SIZE64:
2088 size_reloc = TRUE;
2089 goto do_size;
2090
2091 case R_X86_64_32:
2092 if (!ABI_64_P (abfd))
2093 goto pointer;
2094 /* Fall through. */
2095 case R_X86_64_8:
2096 case R_X86_64_16:
2097 case R_X86_64_32S:
2098 /* Check relocation overflow as these relocs may lead to
2099 run-time relocation overflow. Don't error out for
2100 sections we don't care about, such as debug sections or
2101 when relocation overflow check is disabled. */
2102 if (!info->no_reloc_overflow_check
2103 && !converted_reloc
2104 && (bfd_link_pic (info)
2105 || (bfd_link_executable (info)
2106 && h != NULL
2107 && !h->def_regular
2108 && h->def_dynamic
2109 && (sec->flags & SEC_READONLY) == 0)))
2110 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2111 &x86_64_elf_howto_table[r_type]);
2112 /* Fall through. */
2113
2114 case R_X86_64_PC8:
2115 case R_X86_64_PC16:
2116 case R_X86_64_PC32:
2117 case R_X86_64_PC32_BND:
2118 case R_X86_64_PC64:
2119 case R_X86_64_64:
2120 pointer:
2121 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2122 eh->zero_undefweak |= 0x2;
2123 /* We are called after all symbols have been resolved. Only
2124 relocation against STT_GNU_IFUNC symbol must go through
2125 PLT. */
2126 if (h != NULL
2127 && (bfd_link_executable (info)
2128 || h->type == STT_GNU_IFUNC))
2129 {
2130 bfd_boolean func_pointer_ref = FALSE;
2131
2132 if (r_type == R_X86_64_PC32)
2133 {
2134 /* Since something like ".long foo - ." may be used
2135 as pointer, make sure that PLT is used if foo is
2136 a function defined in a shared library. */
2137 if ((sec->flags & SEC_CODE) == 0)
2138 {
2139 h->pointer_equality_needed = 1;
2140 if (bfd_link_pie (info)
2141 && h->type == STT_FUNC
2142 && !h->def_regular
2143 && h->def_dynamic)
2144 {
2145 h->needs_plt = 1;
2146 h->plt.refcount = 1;
2147 }
2148 }
2149 }
2150 else if (r_type != R_X86_64_PC32_BND
2151 && r_type != R_X86_64_PC64)
2152 {
2153 h->pointer_equality_needed = 1;
2154 /* At run-time, R_X86_64_64 can be resolved for both
2155 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2156 can only be resolved for x32. */
2157 if ((sec->flags & SEC_READONLY) == 0
2158 && (r_type == R_X86_64_64
2159 || (!ABI_64_P (abfd)
2160 && (r_type == R_X86_64_32
2161 || r_type == R_X86_64_32S))))
2162 func_pointer_ref = TRUE;
2163 }
2164
2165 if (!func_pointer_ref)
2166 {
2167 /* If this reloc is in a read-only section, we might
2168 need a copy reloc. We can't check reliably at this
2169 stage whether the section is read-only, as input
2170 sections have not yet been mapped to output sections.
2171 Tentatively set the flag for now, and correct in
2172 adjust_dynamic_symbol. */
2173 h->non_got_ref = 1;
2174
2175 /* We may need a .plt entry if the symbol is a function
2176 defined in a shared lib or is a function referenced
2177 from the code or read-only section. */
2178 if (!h->def_regular
2179 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2180 h->plt.refcount = 1;
2181 }
2182 }
2183
2184 size_reloc = FALSE;
2185 do_size:
2186 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2187 htab->pointer_r_type))
2188 {
2189 struct elf_dyn_relocs *p;
2190 struct elf_dyn_relocs **head;
2191
2192 /* We must copy these reloc types into the output file.
2193 Create a reloc section in dynobj and make room for
2194 this reloc. */
2195 if (sreloc == NULL)
2196 {
2197 sreloc = _bfd_elf_make_dynamic_reloc_section
2198 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2199 abfd, /*rela?*/ TRUE);
2200
2201 if (sreloc == NULL)
2202 goto error_return;
2203 }
2204
2205 /* If this is a global symbol, we count the number of
2206 relocations we need for this symbol. */
2207 if (h != NULL)
2208 head = &eh->dyn_relocs;
2209 else
2210 {
2211 /* Track dynamic relocs needed for local syms too.
2212 We really need local syms available to do this
2213 easily. Oh well. */
2214 asection *s;
2215 void **vpp;
2216
2217 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2218 abfd, r_symndx);
2219 if (isym == NULL)
2220 goto error_return;
2221
2222 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2223 if (s == NULL)
2224 s = sec;
2225
2226 /* Beware of type punned pointers vs strict aliasing
2227 rules. */
2228 vpp = &(elf_section_data (s)->local_dynrel);
2229 head = (struct elf_dyn_relocs **)vpp;
2230 }
2231
2232 p = *head;
2233 if (p == NULL || p->sec != sec)
2234 {
2235 bfd_size_type amt = sizeof *p;
2236
2237 p = ((struct elf_dyn_relocs *)
2238 bfd_alloc (htab->elf.dynobj, amt));
2239 if (p == NULL)
2240 goto error_return;
2241 p->next = *head;
2242 *head = p;
2243 p->sec = sec;
2244 p->count = 0;
2245 p->pc_count = 0;
2246 }
2247
2248 p->count += 1;
2249 /* Count size relocation as PC-relative relocation. */
2250 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2251 p->pc_count += 1;
2252 }
2253 break;
2254
2255 /* This relocation describes the C++ object vtable hierarchy.
2256 Reconstruct it for later use during GC. */
2257 case R_X86_64_GNU_VTINHERIT:
2258 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2259 goto error_return;
2260 break;
2261
2262 /* This relocation describes which C++ vtable entries are actually
2263 used. Record for later use during GC. */
2264 case R_X86_64_GNU_VTENTRY:
2265 BFD_ASSERT (h != NULL);
2266 if (h != NULL
2267 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2268 goto error_return;
2269 break;
2270
2271 default:
2272 break;
2273 }
2274 }
2275
2276 if (elf_section_data (sec)->this_hdr.contents != contents)
2277 {
2278 if (!converted && !info->keep_memory)
2279 free (contents);
2280 else
2281 {
2282 /* Cache the section contents for elf_link_input_bfd if any
2283 load is converted or --no-keep-memory isn't used. */
2284 elf_section_data (sec)->this_hdr.contents = contents;
2285 }
2286 }
2287
2288 /* Cache relocations if any load is converted. */
2289 if (elf_section_data (sec)->relocs != relocs && converted)
2290 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2291
2292 return TRUE;
2293
2294 error_return:
2295 if (elf_section_data (sec)->this_hdr.contents != contents)
2296 free (contents);
2297 sec->check_relocs_failed = 1;
2298 return FALSE;
2299 }
2300
2301 /* Return the relocation value for @tpoff relocation
2302 if STT_TLS virtual address is ADDRESS. */
2303
2304 static bfd_vma
2305 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2306 {
2307 struct elf_link_hash_table *htab = elf_hash_table (info);
2308 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2309 bfd_vma static_tls_size;
2310
2311 /* If tls_segment is NULL, we should have signalled an error already. */
2312 if (htab->tls_sec == NULL)
2313 return 0;
2314
2315 /* Consider special static TLS alignment requirements. */
2316 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2317 return address - static_tls_size - htab->tls_sec->vma;
2318 }
2319
2320 /* Relocate an x86_64 ELF section. */
2321
2322 static bfd_boolean
2323 elf_x86_64_relocate_section (bfd *output_bfd,
2324 struct bfd_link_info *info,
2325 bfd *input_bfd,
2326 asection *input_section,
2327 bfd_byte *contents,
2328 Elf_Internal_Rela *relocs,
2329 Elf_Internal_Sym *local_syms,
2330 asection **local_sections)
2331 {
2332 struct elf_x86_link_hash_table *htab;
2333 Elf_Internal_Shdr *symtab_hdr;
2334 struct elf_link_hash_entry **sym_hashes;
2335 bfd_vma *local_got_offsets;
2336 bfd_vma *local_tlsdesc_gotents;
2337 Elf_Internal_Rela *rel;
2338 Elf_Internal_Rela *wrel;
2339 Elf_Internal_Rela *relend;
2340 unsigned int plt_entry_size;
2341
2342 /* Skip if check_relocs failed. */
2343 if (input_section->check_relocs_failed)
2344 return FALSE;
2345
2346 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2347 if (htab == NULL)
2348 return FALSE;
2349
2350 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2351
2352 plt_entry_size = htab->plt.plt_entry_size;
2353 symtab_hdr = &elf_symtab_hdr (input_bfd);
2354 sym_hashes = elf_sym_hashes (input_bfd);
2355 local_got_offsets = elf_local_got_offsets (input_bfd);
2356 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2357
2358 _bfd_x86_elf_set_tls_module_base (info);
2359
2360 rel = wrel = relocs;
2361 relend = relocs + input_section->reloc_count;
2362 for (; rel < relend; wrel++, rel++)
2363 {
2364 unsigned int r_type, r_type_tls;
2365 reloc_howto_type *howto;
2366 unsigned long r_symndx;
2367 struct elf_link_hash_entry *h;
2368 struct elf_x86_link_hash_entry *eh;
2369 Elf_Internal_Sym *sym;
2370 asection *sec;
2371 bfd_vma off, offplt, plt_offset;
2372 bfd_vma relocation;
2373 bfd_boolean unresolved_reloc;
2374 bfd_reloc_status_type r;
2375 int tls_type;
2376 asection *base_got, *resolved_plt;
2377 bfd_vma st_size;
2378 bfd_boolean resolved_to_zero;
2379 bfd_boolean relative_reloc;
2380 bfd_boolean converted_reloc;
2381 bfd_boolean need_copy_reloc_in_pie;
2382
2383 r_type = ELF32_R_TYPE (rel->r_info);
2384 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2385 || r_type == (int) R_X86_64_GNU_VTENTRY)
2386 {
2387 if (wrel != rel)
2388 *wrel = *rel;
2389 continue;
2390 }
2391
2392 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2393 r_type &= ~R_X86_64_converted_reloc_bit;
2394
2395 if (r_type >= (int) R_X86_64_standard)
2396 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2397
2398 if (r_type != (int) R_X86_64_32
2399 || ABI_64_P (output_bfd))
2400 howto = x86_64_elf_howto_table + r_type;
2401 else
2402 howto = (x86_64_elf_howto_table
2403 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2404 r_symndx = htab->r_sym (rel->r_info);
2405 h = NULL;
2406 sym = NULL;
2407 sec = NULL;
2408 unresolved_reloc = FALSE;
2409 if (r_symndx < symtab_hdr->sh_info)
2410 {
2411 sym = local_syms + r_symndx;
2412 sec = local_sections[r_symndx];
2413
2414 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2415 &sec, rel);
2416 st_size = sym->st_size;
2417
2418 /* Relocate against local STT_GNU_IFUNC symbol. */
2419 if (!bfd_link_relocatable (info)
2420 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2421 {
2422 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2423 rel, FALSE);
2424 if (h == NULL)
2425 abort ();
2426
2427 /* Set STT_GNU_IFUNC symbol value. */
2428 h->root.u.def.value = sym->st_value;
2429 h->root.u.def.section = sec;
2430 }
2431 }
2432 else
2433 {
2434 bfd_boolean warned ATTRIBUTE_UNUSED;
2435 bfd_boolean ignored ATTRIBUTE_UNUSED;
2436
2437 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2438 r_symndx, symtab_hdr, sym_hashes,
2439 h, sec, relocation,
2440 unresolved_reloc, warned, ignored);
2441 st_size = h->size;
2442 }
2443
2444 if (sec != NULL && discarded_section (sec))
2445 {
2446 _bfd_clear_contents (howto, input_bfd, input_section,
2447 contents + rel->r_offset);
2448 wrel->r_offset = rel->r_offset;
2449 wrel->r_info = 0;
2450 wrel->r_addend = 0;
2451
2452 /* For ld -r, remove relocations in debug sections against
2453 sections defined in discarded sections. Not done for
2454 eh_frame editing code expects to be present. */
2455 if (bfd_link_relocatable (info)
2456 && (input_section->flags & SEC_DEBUGGING))
2457 wrel--;
2458
2459 continue;
2460 }
2461
2462 if (bfd_link_relocatable (info))
2463 {
2464 if (wrel != rel)
2465 *wrel = *rel;
2466 continue;
2467 }
2468
2469 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2470 {
2471 if (r_type == R_X86_64_64)
2472 {
2473 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2474 zero-extend it to 64bit if addend is zero. */
2475 r_type = R_X86_64_32;
2476 memset (contents + rel->r_offset + 4, 0, 4);
2477 }
2478 else if (r_type == R_X86_64_SIZE64)
2479 {
2480 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2481 zero-extend it to 64bit if addend is zero. */
2482 r_type = R_X86_64_SIZE32;
2483 memset (contents + rel->r_offset + 4, 0, 4);
2484 }
2485 }
2486
2487 eh = (struct elf_x86_link_hash_entry *) h;
2488
2489 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2490 it here if it is defined in a non-shared object. */
2491 if (h != NULL
2492 && h->type == STT_GNU_IFUNC
2493 && h->def_regular)
2494 {
2495 bfd_vma plt_index;
2496 const char *name;
2497
2498 if ((input_section->flags & SEC_ALLOC) == 0)
2499 {
2500 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2501 sections because such sections are not SEC_ALLOC and
2502 thus ld.so will not process them. */
2503 if ((input_section->flags & SEC_DEBUGGING) != 0)
2504 continue;
2505 abort ();
2506 }
2507
2508 switch (r_type)
2509 {
2510 default:
2511 break;
2512
2513 case R_X86_64_GOTPCREL:
2514 case R_X86_64_GOTPCRELX:
2515 case R_X86_64_REX_GOTPCRELX:
2516 case R_X86_64_GOTPCREL64:
2517 base_got = htab->elf.sgot;
2518 off = h->got.offset;
2519
2520 if (base_got == NULL)
2521 abort ();
2522
2523 if (off == (bfd_vma) -1)
2524 {
2525 /* We can't use h->got.offset here to save state, or
2526 even just remember the offset, as finish_dynamic_symbol
2527 would use that as offset into .got. */
2528
2529 if (h->plt.offset == (bfd_vma) -1)
2530 abort ();
2531
2532 if (htab->elf.splt != NULL)
2533 {
2534 plt_index = (h->plt.offset / plt_entry_size
2535 - htab->plt.has_plt0);
2536 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2537 base_got = htab->elf.sgotplt;
2538 }
2539 else
2540 {
2541 plt_index = h->plt.offset / plt_entry_size;
2542 off = plt_index * GOT_ENTRY_SIZE;
2543 base_got = htab->elf.igotplt;
2544 }
2545
2546 if (h->dynindx == -1
2547 || h->forced_local
2548 || info->symbolic)
2549 {
2550 /* This references the local defitionion. We must
2551 initialize this entry in the global offset table.
2552 Since the offset must always be a multiple of 8,
2553 we use the least significant bit to record
2554 whether we have initialized it already.
2555
2556 When doing a dynamic link, we create a .rela.got
2557 relocation entry to initialize the value. This
2558 is done in the finish_dynamic_symbol routine. */
2559 if ((off & 1) != 0)
2560 off &= ~1;
2561 else
2562 {
2563 bfd_put_64 (output_bfd, relocation,
2564 base_got->contents + off);
2565 /* Note that this is harmless for the GOTPLT64
2566 case, as -1 | 1 still is -1. */
2567 h->got.offset |= 1;
2568 }
2569 }
2570 }
2571
2572 relocation = (base_got->output_section->vma
2573 + base_got->output_offset + off);
2574
2575 goto do_relocation;
2576 }
2577
2578 if (h->plt.offset == (bfd_vma) -1)
2579 {
2580 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2581 if (r_type == htab->pointer_r_type
2582 && (input_section->flags & SEC_CODE) == 0)
2583 goto do_ifunc_pointer;
2584 goto bad_ifunc_reloc;
2585 }
2586
2587 /* STT_GNU_IFUNC symbol must go through PLT. */
2588 if (htab->elf.splt != NULL)
2589 {
2590 if (htab->plt_second != NULL)
2591 {
2592 resolved_plt = htab->plt_second;
2593 plt_offset = eh->plt_second.offset;
2594 }
2595 else
2596 {
2597 resolved_plt = htab->elf.splt;
2598 plt_offset = h->plt.offset;
2599 }
2600 }
2601 else
2602 {
2603 resolved_plt = htab->elf.iplt;
2604 plt_offset = h->plt.offset;
2605 }
2606
2607 relocation = (resolved_plt->output_section->vma
2608 + resolved_plt->output_offset + plt_offset);
2609
2610 switch (r_type)
2611 {
2612 default:
2613 bad_ifunc_reloc:
2614 if (h->root.root.string)
2615 name = h->root.root.string;
2616 else
2617 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2618 NULL);
2619 _bfd_error_handler
2620 /* xgettext:c-format */
2621 (_("%pB: relocation %s against STT_GNU_IFUNC "
2622 "symbol `%s' isn't supported"), input_bfd,
2623 howto->name, name);
2624 bfd_set_error (bfd_error_bad_value);
2625 return FALSE;
2626
2627 case R_X86_64_32S:
2628 if (bfd_link_pic (info))
2629 abort ();
2630 goto do_relocation;
2631
2632 case R_X86_64_32:
2633 if (ABI_64_P (output_bfd))
2634 goto do_relocation;
2635 /* FALLTHROUGH */
2636 case R_X86_64_64:
2637 do_ifunc_pointer:
2638 if (rel->r_addend != 0)
2639 {
2640 if (h->root.root.string)
2641 name = h->root.root.string;
2642 else
2643 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2644 sym, NULL);
2645 _bfd_error_handler
2646 /* xgettext:c-format */
2647 (_("%pB: relocation %s against STT_GNU_IFUNC "
2648 "symbol `%s' has non-zero addend: %Ld"),
2649 input_bfd, howto->name, name, rel->r_addend);
2650 bfd_set_error (bfd_error_bad_value);
2651 return FALSE;
2652 }
2653
2654 /* Generate dynamic relcoation only when there is a
2655 non-GOT reference in a shared object or there is no
2656 PLT. */
2657 if ((bfd_link_pic (info) && h->non_got_ref)
2658 || h->plt.offset == (bfd_vma) -1)
2659 {
2660 Elf_Internal_Rela outrel;
2661 asection *sreloc;
2662
2663 /* Need a dynamic relocation to get the real function
2664 address. */
2665 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2666 info,
2667 input_section,
2668 rel->r_offset);
2669 if (outrel.r_offset == (bfd_vma) -1
2670 || outrel.r_offset == (bfd_vma) -2)
2671 abort ();
2672
2673 outrel.r_offset += (input_section->output_section->vma
2674 + input_section->output_offset);
2675
2676 if (POINTER_LOCAL_IFUNC_P (info, h))
2677 {
2678 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2679 h->root.root.string,
2680 h->root.u.def.section->owner);
2681
2682 /* This symbol is resolved locally. */
2683 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2684 outrel.r_addend = (h->root.u.def.value
2685 + h->root.u.def.section->output_section->vma
2686 + h->root.u.def.section->output_offset);
2687 }
2688 else
2689 {
2690 outrel.r_info = htab->r_info (h->dynindx, r_type);
2691 outrel.r_addend = 0;
2692 }
2693
2694 /* Dynamic relocations are stored in
2695 1. .rela.ifunc section in PIC object.
2696 2. .rela.got section in dynamic executable.
2697 3. .rela.iplt section in static executable. */
2698 if (bfd_link_pic (info))
2699 sreloc = htab->elf.irelifunc;
2700 else if (htab->elf.splt != NULL)
2701 sreloc = htab->elf.srelgot;
2702 else
2703 sreloc = htab->elf.irelplt;
2704 elf_append_rela (output_bfd, sreloc, &outrel);
2705
2706 /* If this reloc is against an external symbol, we
2707 do not want to fiddle with the addend. Otherwise,
2708 we need to include the symbol value so that it
2709 becomes an addend for the dynamic reloc. For an
2710 internal symbol, we have updated addend. */
2711 continue;
2712 }
2713 /* FALLTHROUGH */
2714 case R_X86_64_PC32:
2715 case R_X86_64_PC32_BND:
2716 case R_X86_64_PC64:
2717 case R_X86_64_PLT32:
2718 case R_X86_64_PLT32_BND:
2719 goto do_relocation;
2720 }
2721 }
2722
2723 resolved_to_zero = (eh != NULL
2724 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2725
2726 /* When generating a shared object, the relocations handled here are
2727 copied into the output file to be resolved at run time. */
2728 switch (r_type)
2729 {
2730 case R_X86_64_GOT32:
2731 case R_X86_64_GOT64:
2732 /* Relocation is to the entry for this symbol in the global
2733 offset table. */
2734 case R_X86_64_GOTPCREL:
2735 case R_X86_64_GOTPCRELX:
2736 case R_X86_64_REX_GOTPCRELX:
2737 case R_X86_64_GOTPCREL64:
2738 /* Use global offset table entry as symbol value. */
2739 case R_X86_64_GOTPLT64:
2740 /* This is obsolete and treated the same as GOT64. */
2741 base_got = htab->elf.sgot;
2742
2743 if (htab->elf.sgot == NULL)
2744 abort ();
2745
2746 relative_reloc = FALSE;
2747 if (h != NULL)
2748 {
2749 off = h->got.offset;
2750 if (h->needs_plt
2751 && h->plt.offset != (bfd_vma)-1
2752 && off == (bfd_vma)-1)
2753 {
2754 /* We can't use h->got.offset here to save
2755 state, or even just remember the offset, as
2756 finish_dynamic_symbol would use that as offset into
2757 .got. */
2758 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2759 - htab->plt.has_plt0);
2760 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2761 base_got = htab->elf.sgotplt;
2762 }
2763
2764 if (RESOLVED_LOCALLY_P (info, h, htab))
2765 {
2766 /* We must initialize this entry in the global offset
2767 table. Since the offset must always be a multiple
2768 of 8, we use the least significant bit to record
2769 whether we have initialized it already.
2770
2771 When doing a dynamic link, we create a .rela.got
2772 relocation entry to initialize the value. This is
2773 done in the finish_dynamic_symbol routine. */
2774 if ((off & 1) != 0)
2775 off &= ~1;
2776 else
2777 {
2778 bfd_put_64 (output_bfd, relocation,
2779 base_got->contents + off);
2780 /* Note that this is harmless for the GOTPLT64 case,
2781 as -1 | 1 still is -1. */
2782 h->got.offset |= 1;
2783
2784 if (GENERATE_RELATIVE_RELOC_P (info, h))
2785 {
2786 /* If this symbol isn't dynamic in PIC,
2787 generate R_X86_64_RELATIVE here. */
2788 eh->no_finish_dynamic_symbol = 1;
2789 relative_reloc = TRUE;
2790 }
2791 }
2792 }
2793 else
2794 unresolved_reloc = FALSE;
2795 }
2796 else
2797 {
2798 if (local_got_offsets == NULL)
2799 abort ();
2800
2801 off = local_got_offsets[r_symndx];
2802
2803 /* The offset must always be a multiple of 8. We use
2804 the least significant bit to record whether we have
2805 already generated the necessary reloc. */
2806 if ((off & 1) != 0)
2807 off &= ~1;
2808 else
2809 {
2810 bfd_put_64 (output_bfd, relocation,
2811 base_got->contents + off);
2812 local_got_offsets[r_symndx] |= 1;
2813
2814 if (bfd_link_pic (info))
2815 relative_reloc = TRUE;
2816 }
2817 }
2818
2819 if (relative_reloc)
2820 {
2821 asection *s;
2822 Elf_Internal_Rela outrel;
2823
2824 /* We need to generate a R_X86_64_RELATIVE reloc
2825 for the dynamic linker. */
2826 s = htab->elf.srelgot;
2827 if (s == NULL)
2828 abort ();
2829
2830 outrel.r_offset = (base_got->output_section->vma
2831 + base_got->output_offset
2832 + off);
2833 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2834 outrel.r_addend = relocation;
2835 elf_append_rela (output_bfd, s, &outrel);
2836 }
2837
2838 if (off >= (bfd_vma) -2)
2839 abort ();
2840
2841 relocation = base_got->output_section->vma
2842 + base_got->output_offset + off;
2843 if (r_type != R_X86_64_GOTPCREL
2844 && r_type != R_X86_64_GOTPCRELX
2845 && r_type != R_X86_64_REX_GOTPCRELX
2846 && r_type != R_X86_64_GOTPCREL64)
2847 relocation -= htab->elf.sgotplt->output_section->vma
2848 - htab->elf.sgotplt->output_offset;
2849
2850 break;
2851
2852 case R_X86_64_GOTOFF64:
2853 /* Relocation is relative to the start of the global offset
2854 table. */
2855
2856 /* Check to make sure it isn't a protected function or data
2857 symbol for shared library since it may not be local when
2858 used as function address or with copy relocation. We also
2859 need to make sure that a symbol is referenced locally. */
2860 if (bfd_link_pic (info) && h)
2861 {
2862 if (!h->def_regular)
2863 {
2864 const char *v;
2865
2866 switch (ELF_ST_VISIBILITY (h->other))
2867 {
2868 case STV_HIDDEN:
2869 v = _("hidden symbol");
2870 break;
2871 case STV_INTERNAL:
2872 v = _("internal symbol");
2873 break;
2874 case STV_PROTECTED:
2875 v = _("protected symbol");
2876 break;
2877 default:
2878 v = _("symbol");
2879 break;
2880 }
2881
2882 _bfd_error_handler
2883 /* xgettext:c-format */
2884 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
2885 " `%s' can not be used when making a shared object"),
2886 input_bfd, v, h->root.root.string);
2887 bfd_set_error (bfd_error_bad_value);
2888 return FALSE;
2889 }
2890 else if (!bfd_link_executable (info)
2891 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2892 && (h->type == STT_FUNC
2893 || h->type == STT_OBJECT)
2894 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2895 {
2896 _bfd_error_handler
2897 /* xgettext:c-format */
2898 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
2899 " `%s' can not be used when making a shared object"),
2900 input_bfd,
2901 h->type == STT_FUNC ? "function" : "data",
2902 h->root.root.string);
2903 bfd_set_error (bfd_error_bad_value);
2904 return FALSE;
2905 }
2906 }
2907
2908 /* Note that sgot is not involved in this
2909 calculation. We always want the start of .got.plt. If we
2910 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2911 permitted by the ABI, we might have to change this
2912 calculation. */
2913 relocation -= htab->elf.sgotplt->output_section->vma
2914 + htab->elf.sgotplt->output_offset;
2915 break;
2916
2917 case R_X86_64_GOTPC32:
2918 case R_X86_64_GOTPC64:
2919 /* Use global offset table as symbol value. */
2920 relocation = htab->elf.sgotplt->output_section->vma
2921 + htab->elf.sgotplt->output_offset;
2922 unresolved_reloc = FALSE;
2923 break;
2924
2925 case R_X86_64_PLTOFF64:
2926 /* Relocation is PLT entry relative to GOT. For local
2927 symbols it's the symbol itself relative to GOT. */
2928 if (h != NULL
2929 /* See PLT32 handling. */
2930 && (h->plt.offset != (bfd_vma) -1
2931 || eh->plt_got.offset != (bfd_vma) -1)
2932 && htab->elf.splt != NULL)
2933 {
2934 if (eh->plt_got.offset != (bfd_vma) -1)
2935 {
2936 /* Use the GOT PLT. */
2937 resolved_plt = htab->plt_got;
2938 plt_offset = eh->plt_got.offset;
2939 }
2940 else if (htab->plt_second != NULL)
2941 {
2942 resolved_plt = htab->plt_second;
2943 plt_offset = eh->plt_second.offset;
2944 }
2945 else
2946 {
2947 resolved_plt = htab->elf.splt;
2948 plt_offset = h->plt.offset;
2949 }
2950
2951 relocation = (resolved_plt->output_section->vma
2952 + resolved_plt->output_offset
2953 + plt_offset);
2954 unresolved_reloc = FALSE;
2955 }
2956
2957 relocation -= htab->elf.sgotplt->output_section->vma
2958 + htab->elf.sgotplt->output_offset;
2959 break;
2960
2961 case R_X86_64_PLT32:
2962 case R_X86_64_PLT32_BND:
2963 /* Relocation is to the entry for this symbol in the
2964 procedure linkage table. */
2965
2966 /* Resolve a PLT32 reloc against a local symbol directly,
2967 without using the procedure linkage table. */
2968 if (h == NULL)
2969 break;
2970
2971 if ((h->plt.offset == (bfd_vma) -1
2972 && eh->plt_got.offset == (bfd_vma) -1)
2973 || htab->elf.splt == NULL)
2974 {
2975 /* We didn't make a PLT entry for this symbol. This
2976 happens when statically linking PIC code, or when
2977 using -Bsymbolic. */
2978 break;
2979 }
2980
2981 use_plt:
2982 if (h->plt.offset != (bfd_vma) -1)
2983 {
2984 if (htab->plt_second != NULL)
2985 {
2986 resolved_plt = htab->plt_second;
2987 plt_offset = eh->plt_second.offset;
2988 }
2989 else
2990 {
2991 resolved_plt = htab->elf.splt;
2992 plt_offset = h->plt.offset;
2993 }
2994 }
2995 else
2996 {
2997 /* Use the GOT PLT. */
2998 resolved_plt = htab->plt_got;
2999 plt_offset = eh->plt_got.offset;
3000 }
3001
3002 relocation = (resolved_plt->output_section->vma
3003 + resolved_plt->output_offset
3004 + plt_offset);
3005 unresolved_reloc = FALSE;
3006 break;
3007
3008 case R_X86_64_SIZE32:
3009 case R_X86_64_SIZE64:
3010 /* Set to symbol size. */
3011 relocation = st_size;
3012 goto direct;
3013
3014 case R_X86_64_PC8:
3015 case R_X86_64_PC16:
3016 case R_X86_64_PC32:
3017 case R_X86_64_PC32_BND:
3018 /* Don't complain about -fPIC if the symbol is undefined when
3019 building executable unless it is unresolved weak symbol,
3020 references a dynamic definition in PIE or -z nocopyreloc
3021 is used. */
3022 if ((input_section->flags & SEC_ALLOC) != 0
3023 && (input_section->flags & SEC_READONLY) != 0
3024 && h != NULL
3025 && ((bfd_link_executable (info)
3026 && ((h->root.type == bfd_link_hash_undefweak
3027 && !resolved_to_zero)
3028 || (bfd_link_pie (info)
3029 && !h->def_regular
3030 && h->def_dynamic)
3031 || ((info->nocopyreloc
3032 || (eh->def_protected
3033 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3034 && h->def_dynamic
3035 && !(h->root.u.def.section->flags & SEC_CODE))))
3036 || bfd_link_dll (info)))
3037 {
3038 bfd_boolean fail = FALSE;
3039 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3040 {
3041 /* Symbol is referenced locally. Make sure it is
3042 defined locally. */
3043 fail = !(h->def_regular || ELF_COMMON_DEF_P (h));
3044 }
3045 else if (!(bfd_link_pie (info)
3046 && (h->needs_copy || eh->needs_copy)))
3047 {
3048 /* Symbol doesn't need copy reloc and isn't referenced
3049 locally. Address of protected function may not be
3050 reachable at run-time. */
3051 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3052 || (ELF_ST_VISIBILITY (h->other) == STV_PROTECTED
3053 && h->type == STT_FUNC));
3054 }
3055
3056 if (fail)
3057 return elf_x86_64_need_pic (info, input_bfd, input_section,
3058 h, NULL, NULL, howto);
3059 }
3060 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3061 as function address. */
3062 else if (h != NULL
3063 && (input_section->flags & SEC_CODE) == 0
3064 && bfd_link_pie (info)
3065 && h->type == STT_FUNC
3066 && !h->def_regular
3067 && h->def_dynamic)
3068 goto use_plt;
3069 /* Fall through. */
3070
3071 case R_X86_64_8:
3072 case R_X86_64_16:
3073 case R_X86_64_32:
3074 case R_X86_64_PC64:
3075 case R_X86_64_64:
3076 /* FIXME: The ABI says the linker should make sure the value is
3077 the same when it's zeroextended to 64 bit. */
3078
3079 direct:
3080 if ((input_section->flags & SEC_ALLOC) == 0)
3081 break;
3082
3083 need_copy_reloc_in_pie = (bfd_link_pie (info)
3084 && h != NULL
3085 && (h->needs_copy
3086 || eh->needs_copy
3087 || (h->root.type
3088 == bfd_link_hash_undefined))
3089 && (X86_PCREL_TYPE_P (r_type)
3090 || X86_SIZE_TYPE_P (r_type)));
3091
3092 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3093 need_copy_reloc_in_pie,
3094 resolved_to_zero, FALSE))
3095 {
3096 Elf_Internal_Rela outrel;
3097 bfd_boolean skip, relocate;
3098 asection *sreloc;
3099
3100 /* When generating a shared object, these relocations
3101 are copied into the output file to be resolved at run
3102 time. */
3103 skip = FALSE;
3104 relocate = FALSE;
3105
3106 outrel.r_offset =
3107 _bfd_elf_section_offset (output_bfd, info, input_section,
3108 rel->r_offset);
3109 if (outrel.r_offset == (bfd_vma) -1)
3110 skip = TRUE;
3111 else if (outrel.r_offset == (bfd_vma) -2)
3112 skip = TRUE, relocate = TRUE;
3113
3114 outrel.r_offset += (input_section->output_section->vma
3115 + input_section->output_offset);
3116
3117 if (skip)
3118 memset (&outrel, 0, sizeof outrel);
3119
3120 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3121 {
3122 outrel.r_info = htab->r_info (h->dynindx, r_type);
3123 outrel.r_addend = rel->r_addend;
3124 }
3125 else
3126 {
3127 /* This symbol is local, or marked to become local.
3128 When relocation overflow check is disabled, we
3129 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3130 if (r_type == htab->pointer_r_type
3131 || (r_type == R_X86_64_32
3132 && info->no_reloc_overflow_check))
3133 {
3134 relocate = TRUE;
3135 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3136 outrel.r_addend = relocation + rel->r_addend;
3137 }
3138 else if (r_type == R_X86_64_64
3139 && !ABI_64_P (output_bfd))
3140 {
3141 relocate = TRUE;
3142 outrel.r_info = htab->r_info (0,
3143 R_X86_64_RELATIVE64);
3144 outrel.r_addend = relocation + rel->r_addend;
3145 /* Check addend overflow. */
3146 if ((outrel.r_addend & 0x80000000)
3147 != (rel->r_addend & 0x80000000))
3148 {
3149 const char *name;
3150 int addend = rel->r_addend;
3151 if (h && h->root.root.string)
3152 name = h->root.root.string;
3153 else
3154 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3155 sym, NULL);
3156 _bfd_error_handler
3157 /* xgettext:c-format */
3158 (_("%pB: addend %s%#x in relocation %s against "
3159 "symbol `%s' at %#Lx in section `%pA' is "
3160 "out of range"),
3161 input_bfd, addend < 0 ? "-" : "", addend,
3162 howto->name, name, rel->r_offset, input_section);
3163 bfd_set_error (bfd_error_bad_value);
3164 return FALSE;
3165 }
3166 }
3167 else
3168 {
3169 long sindx;
3170
3171 if (bfd_is_abs_section (sec))
3172 sindx = 0;
3173 else if (sec == NULL || sec->owner == NULL)
3174 {
3175 bfd_set_error (bfd_error_bad_value);
3176 return FALSE;
3177 }
3178 else
3179 {
3180 asection *osec;
3181
3182 /* We are turning this relocation into one
3183 against a section symbol. It would be
3184 proper to subtract the symbol's value,
3185 osec->vma, from the emitted reloc addend,
3186 but ld.so expects buggy relocs. */
3187 osec = sec->output_section;
3188 sindx = elf_section_data (osec)->dynindx;
3189 if (sindx == 0)
3190 {
3191 asection *oi = htab->elf.text_index_section;
3192 sindx = elf_section_data (oi)->dynindx;
3193 }
3194 BFD_ASSERT (sindx != 0);
3195 }
3196
3197 outrel.r_info = htab->r_info (sindx, r_type);
3198 outrel.r_addend = relocation + rel->r_addend;
3199 }
3200 }
3201
3202 sreloc = elf_section_data (input_section)->sreloc;
3203
3204 if (sreloc == NULL || sreloc->contents == NULL)
3205 {
3206 r = bfd_reloc_notsupported;
3207 goto check_relocation_error;
3208 }
3209
3210 elf_append_rela (output_bfd, sreloc, &outrel);
3211
3212 /* If this reloc is against an external symbol, we do
3213 not want to fiddle with the addend. Otherwise, we
3214 need to include the symbol value so that it becomes
3215 an addend for the dynamic reloc. */
3216 if (! relocate)
3217 continue;
3218 }
3219
3220 break;
3221
3222 case R_X86_64_TLSGD:
3223 case R_X86_64_GOTPC32_TLSDESC:
3224 case R_X86_64_TLSDESC_CALL:
3225 case R_X86_64_GOTTPOFF:
3226 tls_type = GOT_UNKNOWN;
3227 if (h == NULL && local_got_offsets)
3228 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3229 else if (h != NULL)
3230 tls_type = elf_x86_hash_entry (h)->tls_type;
3231
3232 r_type_tls = r_type;
3233 if (! elf_x86_64_tls_transition (info, input_bfd,
3234 input_section, contents,
3235 symtab_hdr, sym_hashes,
3236 &r_type_tls, tls_type, rel,
3237 relend, h, r_symndx, TRUE))
3238 return FALSE;
3239
3240 if (r_type_tls == R_X86_64_TPOFF32)
3241 {
3242 bfd_vma roff = rel->r_offset;
3243
3244 BFD_ASSERT (! unresolved_reloc);
3245
3246 if (r_type == R_X86_64_TLSGD)
3247 {
3248 /* GD->LE transition. For 64bit, change
3249 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3250 .word 0x6666; rex64; call __tls_get_addr@PLT
3251 or
3252 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3253 .byte 0x66; rex64
3254 call *__tls_get_addr@GOTPCREL(%rip)
3255 which may be converted to
3256 addr32 call __tls_get_addr
3257 into:
3258 movq %fs:0, %rax
3259 leaq foo@tpoff(%rax), %rax
3260 For 32bit, change
3261 leaq foo@tlsgd(%rip), %rdi
3262 .word 0x6666; rex64; call __tls_get_addr@PLT
3263 or
3264 leaq foo@tlsgd(%rip), %rdi
3265 .byte 0x66; rex64
3266 call *__tls_get_addr@GOTPCREL(%rip)
3267 which may be converted to
3268 addr32 call __tls_get_addr
3269 into:
3270 movl %fs:0, %eax
3271 leaq foo@tpoff(%rax), %rax
3272 For largepic, change:
3273 leaq foo@tlsgd(%rip), %rdi
3274 movabsq $__tls_get_addr@pltoff, %rax
3275 addq %r15, %rax
3276 call *%rax
3277 into:
3278 movq %fs:0, %rax
3279 leaq foo@tpoff(%rax), %rax
3280 nopw 0x0(%rax,%rax,1) */
3281 int largepic = 0;
3282 if (ABI_64_P (output_bfd))
3283 {
3284 if (contents[roff + 5] == 0xb8)
3285 {
3286 memcpy (contents + roff - 3,
3287 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3288 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3289 largepic = 1;
3290 }
3291 else
3292 memcpy (contents + roff - 4,
3293 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3294 16);
3295 }
3296 else
3297 memcpy (contents + roff - 3,
3298 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3299 15);
3300 bfd_put_32 (output_bfd,
3301 elf_x86_64_tpoff (info, relocation),
3302 contents + roff + 8 + largepic);
3303 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3304 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3305 rel++;
3306 wrel++;
3307 continue;
3308 }
3309 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3310 {
3311 /* GDesc -> LE transition.
3312 It's originally something like:
3313 leaq x@tlsdesc(%rip), %rax
3314
3315 Change it to:
3316 movl $x@tpoff, %rax. */
3317
3318 unsigned int val, type;
3319
3320 type = bfd_get_8 (input_bfd, contents + roff - 3);
3321 val = bfd_get_8 (input_bfd, contents + roff - 1);
3322 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3323 contents + roff - 3);
3324 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3325 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3326 contents + roff - 1);
3327 bfd_put_32 (output_bfd,
3328 elf_x86_64_tpoff (info, relocation),
3329 contents + roff);
3330 continue;
3331 }
3332 else if (r_type == R_X86_64_TLSDESC_CALL)
3333 {
3334 /* GDesc -> LE transition.
3335 It's originally:
3336 call *(%rax)
3337 Turn it into:
3338 xchg %ax,%ax. */
3339 bfd_put_8 (output_bfd, 0x66, contents + roff);
3340 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3341 continue;
3342 }
3343 else if (r_type == R_X86_64_GOTTPOFF)
3344 {
3345 /* IE->LE transition:
3346 For 64bit, originally it can be one of:
3347 movq foo@gottpoff(%rip), %reg
3348 addq foo@gottpoff(%rip), %reg
3349 We change it into:
3350 movq $foo, %reg
3351 leaq foo(%reg), %reg
3352 addq $foo, %reg.
3353 For 32bit, originally it can be one of:
3354 movq foo@gottpoff(%rip), %reg
3355 addl foo@gottpoff(%rip), %reg
3356 We change it into:
3357 movq $foo, %reg
3358 leal foo(%reg), %reg
3359 addl $foo, %reg. */
3360
3361 unsigned int val, type, reg;
3362
3363 if (roff >= 3)
3364 val = bfd_get_8 (input_bfd, contents + roff - 3);
3365 else
3366 val = 0;
3367 type = bfd_get_8 (input_bfd, contents + roff - 2);
3368 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3369 reg >>= 3;
3370 if (type == 0x8b)
3371 {
3372 /* movq */
3373 if (val == 0x4c)
3374 bfd_put_8 (output_bfd, 0x49,
3375 contents + roff - 3);
3376 else if (!ABI_64_P (output_bfd) && val == 0x44)
3377 bfd_put_8 (output_bfd, 0x41,
3378 contents + roff - 3);
3379 bfd_put_8 (output_bfd, 0xc7,
3380 contents + roff - 2);
3381 bfd_put_8 (output_bfd, 0xc0 | reg,
3382 contents + roff - 1);
3383 }
3384 else if (reg == 4)
3385 {
3386 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3387 is special */
3388 if (val == 0x4c)
3389 bfd_put_8 (output_bfd, 0x49,
3390 contents + roff - 3);
3391 else if (!ABI_64_P (output_bfd) && val == 0x44)
3392 bfd_put_8 (output_bfd, 0x41,
3393 contents + roff - 3);
3394 bfd_put_8 (output_bfd, 0x81,
3395 contents + roff - 2);
3396 bfd_put_8 (output_bfd, 0xc0 | reg,
3397 contents + roff - 1);
3398 }
3399 else
3400 {
3401 /* addq/addl -> leaq/leal */
3402 if (val == 0x4c)
3403 bfd_put_8 (output_bfd, 0x4d,
3404 contents + roff - 3);
3405 else if (!ABI_64_P (output_bfd) && val == 0x44)
3406 bfd_put_8 (output_bfd, 0x45,
3407 contents + roff - 3);
3408 bfd_put_8 (output_bfd, 0x8d,
3409 contents + roff - 2);
3410 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3411 contents + roff - 1);
3412 }
3413 bfd_put_32 (output_bfd,
3414 elf_x86_64_tpoff (info, relocation),
3415 contents + roff);
3416 continue;
3417 }
3418 else
3419 BFD_ASSERT (FALSE);
3420 }
3421
3422 if (htab->elf.sgot == NULL)
3423 abort ();
3424
3425 if (h != NULL)
3426 {
3427 off = h->got.offset;
3428 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3429 }
3430 else
3431 {
3432 if (local_got_offsets == NULL)
3433 abort ();
3434
3435 off = local_got_offsets[r_symndx];
3436 offplt = local_tlsdesc_gotents[r_symndx];
3437 }
3438
3439 if ((off & 1) != 0)
3440 off &= ~1;
3441 else
3442 {
3443 Elf_Internal_Rela outrel;
3444 int dr_type, indx;
3445 asection *sreloc;
3446
3447 if (htab->elf.srelgot == NULL)
3448 abort ();
3449
3450 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3451
3452 if (GOT_TLS_GDESC_P (tls_type))
3453 {
3454 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3455 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3456 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3457 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3458 + htab->elf.sgotplt->output_offset
3459 + offplt
3460 + htab->sgotplt_jump_table_size);
3461 sreloc = htab->elf.srelplt;
3462 if (indx == 0)
3463 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3464 else
3465 outrel.r_addend = 0;
3466 elf_append_rela (output_bfd, sreloc, &outrel);
3467 }
3468
3469 sreloc = htab->elf.srelgot;
3470
3471 outrel.r_offset = (htab->elf.sgot->output_section->vma
3472 + htab->elf.sgot->output_offset + off);
3473
3474 if (GOT_TLS_GD_P (tls_type))
3475 dr_type = R_X86_64_DTPMOD64;
3476 else if (GOT_TLS_GDESC_P (tls_type))
3477 goto dr_done;
3478 else
3479 dr_type = R_X86_64_TPOFF64;
3480
3481 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3482 outrel.r_addend = 0;
3483 if ((dr_type == R_X86_64_TPOFF64
3484 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3485 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3486 outrel.r_info = htab->r_info (indx, dr_type);
3487
3488 elf_append_rela (output_bfd, sreloc, &outrel);
3489
3490 if (GOT_TLS_GD_P (tls_type))
3491 {
3492 if (indx == 0)
3493 {
3494 BFD_ASSERT (! unresolved_reloc);
3495 bfd_put_64 (output_bfd,
3496 relocation - _bfd_x86_elf_dtpoff_base (info),
3497 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3498 }
3499 else
3500 {
3501 bfd_put_64 (output_bfd, 0,
3502 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3503 outrel.r_info = htab->r_info (indx,
3504 R_X86_64_DTPOFF64);
3505 outrel.r_offset += GOT_ENTRY_SIZE;
3506 elf_append_rela (output_bfd, sreloc,
3507 &outrel);
3508 }
3509 }
3510
3511 dr_done:
3512 if (h != NULL)
3513 h->got.offset |= 1;
3514 else
3515 local_got_offsets[r_symndx] |= 1;
3516 }
3517
3518 if (off >= (bfd_vma) -2
3519 && ! GOT_TLS_GDESC_P (tls_type))
3520 abort ();
3521 if (r_type_tls == r_type)
3522 {
3523 if (r_type == R_X86_64_GOTPC32_TLSDESC
3524 || r_type == R_X86_64_TLSDESC_CALL)
3525 relocation = htab->elf.sgotplt->output_section->vma
3526 + htab->elf.sgotplt->output_offset
3527 + offplt + htab->sgotplt_jump_table_size;
3528 else
3529 relocation = htab->elf.sgot->output_section->vma
3530 + htab->elf.sgot->output_offset + off;
3531 unresolved_reloc = FALSE;
3532 }
3533 else
3534 {
3535 bfd_vma roff = rel->r_offset;
3536
3537 if (r_type == R_X86_64_TLSGD)
3538 {
3539 /* GD->IE transition. For 64bit, change
3540 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3541 .word 0x6666; rex64; call __tls_get_addr@PLT
3542 or
3543 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3544 .byte 0x66; rex64
3545 call *__tls_get_addr@GOTPCREL(%rip
3546 which may be converted to
3547 addr32 call __tls_get_addr
3548 into:
3549 movq %fs:0, %rax
3550 addq foo@gottpoff(%rip), %rax
3551 For 32bit, change
3552 leaq foo@tlsgd(%rip), %rdi
3553 .word 0x6666; rex64; call __tls_get_addr@PLT
3554 or
3555 leaq foo@tlsgd(%rip), %rdi
3556 .byte 0x66; rex64;
3557 call *__tls_get_addr@GOTPCREL(%rip)
3558 which may be converted to
3559 addr32 call __tls_get_addr
3560 into:
3561 movl %fs:0, %eax
3562 addq foo@gottpoff(%rip), %rax
3563 For largepic, change:
3564 leaq foo@tlsgd(%rip), %rdi
3565 movabsq $__tls_get_addr@pltoff, %rax
3566 addq %r15, %rax
3567 call *%rax
3568 into:
3569 movq %fs:0, %rax
3570 addq foo@gottpoff(%rax), %rax
3571 nopw 0x0(%rax,%rax,1) */
3572 int largepic = 0;
3573 if (ABI_64_P (output_bfd))
3574 {
3575 if (contents[roff + 5] == 0xb8)
3576 {
3577 memcpy (contents + roff - 3,
3578 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3579 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3580 largepic = 1;
3581 }
3582 else
3583 memcpy (contents + roff - 4,
3584 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3585 16);
3586 }
3587 else
3588 memcpy (contents + roff - 3,
3589 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3590 15);
3591
3592 relocation = (htab->elf.sgot->output_section->vma
3593 + htab->elf.sgot->output_offset + off
3594 - roff
3595 - largepic
3596 - input_section->output_section->vma
3597 - input_section->output_offset
3598 - 12);
3599 bfd_put_32 (output_bfd, relocation,
3600 contents + roff + 8 + largepic);
3601 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3602 rel++;
3603 wrel++;
3604 continue;
3605 }
3606 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3607 {
3608 /* GDesc -> IE transition.
3609 It's originally something like:
3610 leaq x@tlsdesc(%rip), %rax
3611
3612 Change it to:
3613 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3614
3615 /* Now modify the instruction as appropriate. To
3616 turn a leaq into a movq in the form we use it, it
3617 suffices to change the second byte from 0x8d to
3618 0x8b. */
3619 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3620
3621 bfd_put_32 (output_bfd,
3622 htab->elf.sgot->output_section->vma
3623 + htab->elf.sgot->output_offset + off
3624 - rel->r_offset
3625 - input_section->output_section->vma
3626 - input_section->output_offset
3627 - 4,
3628 contents + roff);
3629 continue;
3630 }
3631 else if (r_type == R_X86_64_TLSDESC_CALL)
3632 {
3633 /* GDesc -> IE transition.
3634 It's originally:
3635 call *(%rax)
3636
3637 Change it to:
3638 xchg %ax, %ax. */
3639
3640 bfd_put_8 (output_bfd, 0x66, contents + roff);
3641 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3642 continue;
3643 }
3644 else
3645 BFD_ASSERT (FALSE);
3646 }
3647 break;
3648
3649 case R_X86_64_TLSLD:
3650 if (! elf_x86_64_tls_transition (info, input_bfd,
3651 input_section, contents,
3652 symtab_hdr, sym_hashes,
3653 &r_type, GOT_UNKNOWN, rel,
3654 relend, h, r_symndx, TRUE))
3655 return FALSE;
3656
3657 if (r_type != R_X86_64_TLSLD)
3658 {
3659 /* LD->LE transition:
3660 leaq foo@tlsld(%rip), %rdi
3661 call __tls_get_addr@PLT
3662 For 64bit, we change it into:
3663 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3664 For 32bit, we change it into:
3665 nopl 0x0(%rax); movl %fs:0, %eax
3666 Or
3667 leaq foo@tlsld(%rip), %rdi;
3668 call *__tls_get_addr@GOTPCREL(%rip)
3669 which may be converted to
3670 addr32 call __tls_get_addr
3671 For 64bit, we change it into:
3672 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3673 For 32bit, we change it into:
3674 nopw 0x0(%rax); movl %fs:0, %eax
3675 For largepic, change:
3676 leaq foo@tlsgd(%rip), %rdi
3677 movabsq $__tls_get_addr@pltoff, %rax
3678 addq %rbx, %rax
3679 call *%rax
3680 into
3681 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3682 movq %fs:0, %eax */
3683
3684 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3685 if (ABI_64_P (output_bfd))
3686 {
3687 if (contents[rel->r_offset + 5] == 0xb8)
3688 memcpy (contents + rel->r_offset - 3,
3689 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3690 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3691 else if (contents[rel->r_offset + 4] == 0xff
3692 || contents[rel->r_offset + 4] == 0x67)
3693 memcpy (contents + rel->r_offset - 3,
3694 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3695 13);
3696 else
3697 memcpy (contents + rel->r_offset - 3,
3698 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3699 }
3700 else
3701 {
3702 if (contents[rel->r_offset + 4] == 0xff)
3703 memcpy (contents + rel->r_offset - 3,
3704 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3705 13);
3706 else
3707 memcpy (contents + rel->r_offset - 3,
3708 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3709 }
3710 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3711 and R_X86_64_PLTOFF64. */
3712 rel++;
3713 wrel++;
3714 continue;
3715 }
3716
3717 if (htab->elf.sgot == NULL)
3718 abort ();
3719
3720 off = htab->tls_ld_or_ldm_got.offset;
3721 if (off & 1)
3722 off &= ~1;
3723 else
3724 {
3725 Elf_Internal_Rela outrel;
3726
3727 if (htab->elf.srelgot == NULL)
3728 abort ();
3729
3730 outrel.r_offset = (htab->elf.sgot->output_section->vma
3731 + htab->elf.sgot->output_offset + off);
3732
3733 bfd_put_64 (output_bfd, 0,
3734 htab->elf.sgot->contents + off);
3735 bfd_put_64 (output_bfd, 0,
3736 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3737 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3738 outrel.r_addend = 0;
3739 elf_append_rela (output_bfd, htab->elf.srelgot,
3740 &outrel);
3741 htab->tls_ld_or_ldm_got.offset |= 1;
3742 }
3743 relocation = htab->elf.sgot->output_section->vma
3744 + htab->elf.sgot->output_offset + off;
3745 unresolved_reloc = FALSE;
3746 break;
3747
3748 case R_X86_64_DTPOFF32:
3749 if (!bfd_link_executable (info)
3750 || (input_section->flags & SEC_CODE) == 0)
3751 relocation -= _bfd_x86_elf_dtpoff_base (info);
3752 else
3753 relocation = elf_x86_64_tpoff (info, relocation);
3754 break;
3755
3756 case R_X86_64_TPOFF32:
3757 case R_X86_64_TPOFF64:
3758 BFD_ASSERT (bfd_link_executable (info));
3759 relocation = elf_x86_64_tpoff (info, relocation);
3760 break;
3761
3762 case R_X86_64_DTPOFF64:
3763 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3764 relocation -= _bfd_x86_elf_dtpoff_base (info);
3765 break;
3766
3767 default:
3768 break;
3769 }
3770
3771 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3772 because such sections are not SEC_ALLOC and thus ld.so will
3773 not process them. */
3774 if (unresolved_reloc
3775 && !((input_section->flags & SEC_DEBUGGING) != 0
3776 && h->def_dynamic)
3777 && _bfd_elf_section_offset (output_bfd, info, input_section,
3778 rel->r_offset) != (bfd_vma) -1)
3779 {
3780 switch (r_type)
3781 {
3782 case R_X86_64_32S:
3783 sec = h->root.u.def.section;
3784 if ((info->nocopyreloc
3785 || (eh->def_protected
3786 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3787 && !(h->root.u.def.section->flags & SEC_CODE))
3788 return elf_x86_64_need_pic (info, input_bfd, input_section,
3789 h, NULL, NULL, howto);
3790 /* Fall through. */
3791
3792 default:
3793 _bfd_error_handler
3794 /* xgettext:c-format */
3795 (_("%pB(%pA+%#Lx): unresolvable %s relocation against symbol `%s'"),
3796 input_bfd,
3797 input_section,
3798 rel->r_offset,
3799 howto->name,
3800 h->root.root.string);
3801 return FALSE;
3802 }
3803 }
3804
3805 do_relocation:
3806 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3807 contents, rel->r_offset,
3808 relocation, rel->r_addend);
3809
3810 check_relocation_error:
3811 if (r != bfd_reloc_ok)
3812 {
3813 const char *name;
3814
3815 if (h != NULL)
3816 name = h->root.root.string;
3817 else
3818 {
3819 name = bfd_elf_string_from_elf_section (input_bfd,
3820 symtab_hdr->sh_link,
3821 sym->st_name);
3822 if (name == NULL)
3823 return FALSE;
3824 if (*name == '\0')
3825 name = bfd_section_name (input_bfd, sec);
3826 }
3827
3828 if (r == bfd_reloc_overflow)
3829 {
3830 if (converted_reloc)
3831 {
3832 info->callbacks->einfo
3833 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3834 return FALSE;
3835 }
3836 (*info->callbacks->reloc_overflow)
3837 (info, (h ? &h->root : NULL), name, howto->name,
3838 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3839 }
3840 else
3841 {
3842 _bfd_error_handler
3843 /* xgettext:c-format */
3844 (_("%pB(%pA+%#Lx): reloc against `%s': error %d"),
3845 input_bfd, input_section,
3846 rel->r_offset, name, (int) r);
3847 return FALSE;
3848 }
3849 }
3850
3851 if (wrel != rel)
3852 *wrel = *rel;
3853 }
3854
3855 if (wrel != rel)
3856 {
3857 Elf_Internal_Shdr *rel_hdr;
3858 size_t deleted = rel - wrel;
3859
3860 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3861 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3862 if (rel_hdr->sh_size == 0)
3863 {
3864 /* It is too late to remove an empty reloc section. Leave
3865 one NONE reloc.
3866 ??? What is wrong with an empty section??? */
3867 rel_hdr->sh_size = rel_hdr->sh_entsize;
3868 deleted -= 1;
3869 }
3870 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3871 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3872 input_section->reloc_count -= deleted;
3873 }
3874
3875 return TRUE;
3876 }
3877
3878 /* Finish up dynamic symbol handling. We set the contents of various
3879 dynamic sections here. */
3880
3881 static bfd_boolean
3882 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3883 struct bfd_link_info *info,
3884 struct elf_link_hash_entry *h,
3885 Elf_Internal_Sym *sym)
3886 {
3887 struct elf_x86_link_hash_table *htab;
3888 bfd_boolean use_plt_second;
3889 struct elf_x86_link_hash_entry *eh;
3890 bfd_boolean local_undefweak;
3891
3892 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3893 if (htab == NULL)
3894 return FALSE;
3895
3896 /* Use the second PLT section only if there is .plt section. */
3897 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3898
3899 eh = (struct elf_x86_link_hash_entry *) h;
3900 if (eh->no_finish_dynamic_symbol)
3901 abort ();
3902
3903 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3904 resolved undefined weak symbols in executable so that their
3905 references have value 0 at run-time. */
3906 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3907
3908 if (h->plt.offset != (bfd_vma) -1)
3909 {
3910 bfd_vma plt_index;
3911 bfd_vma got_offset, plt_offset;
3912 Elf_Internal_Rela rela;
3913 bfd_byte *loc;
3914 asection *plt, *gotplt, *relplt, *resolved_plt;
3915 const struct elf_backend_data *bed;
3916 bfd_vma plt_got_pcrel_offset;
3917
3918 /* When building a static executable, use .iplt, .igot.plt and
3919 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3920 if (htab->elf.splt != NULL)
3921 {
3922 plt = htab->elf.splt;
3923 gotplt = htab->elf.sgotplt;
3924 relplt = htab->elf.srelplt;
3925 }
3926 else
3927 {
3928 plt = htab->elf.iplt;
3929 gotplt = htab->elf.igotplt;
3930 relplt = htab->elf.irelplt;
3931 }
3932
3933 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3934
3935 /* Get the index in the procedure linkage table which
3936 corresponds to this symbol. This is the index of this symbol
3937 in all the symbols for which we are making plt entries. The
3938 first entry in the procedure linkage table is reserved.
3939
3940 Get the offset into the .got table of the entry that
3941 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3942 bytes. The first three are reserved for the dynamic linker.
3943
3944 For static executables, we don't reserve anything. */
3945
3946 if (plt == htab->elf.splt)
3947 {
3948 got_offset = (h->plt.offset / htab->plt.plt_entry_size
3949 - htab->plt.has_plt0);
3950 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
3951 }
3952 else
3953 {
3954 got_offset = h->plt.offset / htab->plt.plt_entry_size;
3955 got_offset = got_offset * GOT_ENTRY_SIZE;
3956 }
3957
3958 /* Fill in the entry in the procedure linkage table. */
3959 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
3960 htab->plt.plt_entry_size);
3961 if (use_plt_second)
3962 {
3963 memcpy (htab->plt_second->contents + eh->plt_second.offset,
3964 htab->non_lazy_plt->plt_entry,
3965 htab->non_lazy_plt->plt_entry_size);
3966
3967 resolved_plt = htab->plt_second;
3968 plt_offset = eh->plt_second.offset;
3969 }
3970 else
3971 {
3972 resolved_plt = plt;
3973 plt_offset = h->plt.offset;
3974 }
3975
3976 /* Insert the relocation positions of the plt section. */
3977
3978 /* Put offset the PC-relative instruction referring to the GOT entry,
3979 subtracting the size of that instruction. */
3980 plt_got_pcrel_offset = (gotplt->output_section->vma
3981 + gotplt->output_offset
3982 + got_offset
3983 - resolved_plt->output_section->vma
3984 - resolved_plt->output_offset
3985 - plt_offset
3986 - htab->plt.plt_got_insn_size);
3987
3988 /* Check PC-relative offset overflow in PLT entry. */
3989 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
3990 /* xgettext:c-format */
3991 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
3992 output_bfd, h->root.root.string);
3993
3994 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
3995 (resolved_plt->contents + plt_offset
3996 + htab->plt.plt_got_offset));
3997
3998 /* Fill in the entry in the global offset table, initially this
3999 points to the second part of the PLT entry. Leave the entry
4000 as zero for undefined weak symbol in PIE. No PLT relocation
4001 against undefined weak symbol in PIE. */
4002 if (!local_undefweak)
4003 {
4004 if (htab->plt.has_plt0)
4005 bfd_put_64 (output_bfd, (plt->output_section->vma
4006 + plt->output_offset
4007 + h->plt.offset
4008 + htab->lazy_plt->plt_lazy_offset),
4009 gotplt->contents + got_offset);
4010
4011 /* Fill in the entry in the .rela.plt section. */
4012 rela.r_offset = (gotplt->output_section->vma
4013 + gotplt->output_offset
4014 + got_offset);
4015 if (PLT_LOCAL_IFUNC_P (info, h))
4016 {
4017 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4018 h->root.root.string,
4019 h->root.u.def.section->owner);
4020
4021 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4022 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4023 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4024 rela.r_addend = (h->root.u.def.value
4025 + h->root.u.def.section->output_section->vma
4026 + h->root.u.def.section->output_offset);
4027 /* R_X86_64_IRELATIVE comes last. */
4028 plt_index = htab->next_irelative_index--;
4029 }
4030 else
4031 {
4032 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4033 rela.r_addend = 0;
4034 plt_index = htab->next_jump_slot_index++;
4035 }
4036
4037 /* Don't fill the second and third slots in PLT entry for
4038 static executables nor without PLT0. */
4039 if (plt == htab->elf.splt && htab->plt.has_plt0)
4040 {
4041 bfd_vma plt0_offset
4042 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4043
4044 /* Put relocation index. */
4045 bfd_put_32 (output_bfd, plt_index,
4046 (plt->contents + h->plt.offset
4047 + htab->lazy_plt->plt_reloc_offset));
4048
4049 /* Put offset for jmp .PLT0 and check for overflow. We don't
4050 check relocation index for overflow since branch displacement
4051 will overflow first. */
4052 if (plt0_offset > 0x80000000)
4053 /* xgettext:c-format */
4054 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4055 output_bfd, h->root.root.string);
4056 bfd_put_32 (output_bfd, - plt0_offset,
4057 (plt->contents + h->plt.offset
4058 + htab->lazy_plt->plt_plt_offset));
4059 }
4060
4061 bed = get_elf_backend_data (output_bfd);
4062 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4063 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4064 }
4065 }
4066 else if (eh->plt_got.offset != (bfd_vma) -1)
4067 {
4068 bfd_vma got_offset, plt_offset;
4069 asection *plt, *got;
4070 bfd_boolean got_after_plt;
4071 int32_t got_pcrel_offset;
4072
4073 /* Set the entry in the GOT procedure linkage table. */
4074 plt = htab->plt_got;
4075 got = htab->elf.sgot;
4076 got_offset = h->got.offset;
4077
4078 if (got_offset == (bfd_vma) -1
4079 || (h->type == STT_GNU_IFUNC && h->def_regular)
4080 || plt == NULL
4081 || got == NULL)
4082 abort ();
4083
4084 /* Use the non-lazy PLT entry template for the GOT PLT since they
4085 are the identical. */
4086 /* Fill in the entry in the GOT procedure linkage table. */
4087 plt_offset = eh->plt_got.offset;
4088 memcpy (plt->contents + plt_offset,
4089 htab->non_lazy_plt->plt_entry,
4090 htab->non_lazy_plt->plt_entry_size);
4091
4092 /* Put offset the PC-relative instruction referring to the GOT
4093 entry, subtracting the size of that instruction. */
4094 got_pcrel_offset = (got->output_section->vma
4095 + got->output_offset
4096 + got_offset
4097 - plt->output_section->vma
4098 - plt->output_offset
4099 - plt_offset
4100 - htab->non_lazy_plt->plt_got_insn_size);
4101
4102 /* Check PC-relative offset overflow in GOT PLT entry. */
4103 got_after_plt = got->output_section->vma > plt->output_section->vma;
4104 if ((got_after_plt && got_pcrel_offset < 0)
4105 || (!got_after_plt && got_pcrel_offset > 0))
4106 /* xgettext:c-format */
4107 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4108 output_bfd, h->root.root.string);
4109
4110 bfd_put_32 (output_bfd, got_pcrel_offset,
4111 (plt->contents + plt_offset
4112 + htab->non_lazy_plt->plt_got_offset));
4113 }
4114
4115 if (!local_undefweak
4116 && !h->def_regular
4117 && (h->plt.offset != (bfd_vma) -1
4118 || eh->plt_got.offset != (bfd_vma) -1))
4119 {
4120 /* Mark the symbol as undefined, rather than as defined in
4121 the .plt section. Leave the value if there were any
4122 relocations where pointer equality matters (this is a clue
4123 for the dynamic linker, to make function pointer
4124 comparisons work between an application and shared
4125 library), otherwise set it to zero. If a function is only
4126 called from a binary, there is no need to slow down
4127 shared libraries because of that. */
4128 sym->st_shndx = SHN_UNDEF;
4129 if (!h->pointer_equality_needed)
4130 sym->st_value = 0;
4131 }
4132
4133 /* Don't generate dynamic GOT relocation against undefined weak
4134 symbol in executable. */
4135 if (h->got.offset != (bfd_vma) -1
4136 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4137 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4138 && !local_undefweak)
4139 {
4140 Elf_Internal_Rela rela;
4141 asection *relgot = htab->elf.srelgot;
4142
4143 /* This symbol has an entry in the global offset table. Set it
4144 up. */
4145 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4146 abort ();
4147
4148 rela.r_offset = (htab->elf.sgot->output_section->vma
4149 + htab->elf.sgot->output_offset
4150 + (h->got.offset &~ (bfd_vma) 1));
4151
4152 /* If this is a static link, or it is a -Bsymbolic link and the
4153 symbol is defined locally or was forced to be local because
4154 of a version file, we just want to emit a RELATIVE reloc.
4155 The entry in the global offset table will already have been
4156 initialized in the relocate_section function. */
4157 if (h->def_regular
4158 && h->type == STT_GNU_IFUNC)
4159 {
4160 if (h->plt.offset == (bfd_vma) -1)
4161 {
4162 /* STT_GNU_IFUNC is referenced without PLT. */
4163 if (htab->elf.splt == NULL)
4164 {
4165 /* use .rel[a].iplt section to store .got relocations
4166 in static executable. */
4167 relgot = htab->elf.irelplt;
4168 }
4169 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4170 {
4171 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4172 h->root.root.string,
4173 h->root.u.def.section->owner);
4174
4175 rela.r_info = htab->r_info (0,
4176 R_X86_64_IRELATIVE);
4177 rela.r_addend = (h->root.u.def.value
4178 + h->root.u.def.section->output_section->vma
4179 + h->root.u.def.section->output_offset);
4180 }
4181 else
4182 goto do_glob_dat;
4183 }
4184 else if (bfd_link_pic (info))
4185 {
4186 /* Generate R_X86_64_GLOB_DAT. */
4187 goto do_glob_dat;
4188 }
4189 else
4190 {
4191 asection *plt;
4192 bfd_vma plt_offset;
4193
4194 if (!h->pointer_equality_needed)
4195 abort ();
4196
4197 /* For non-shared object, we can't use .got.plt, which
4198 contains the real function addres if we need pointer
4199 equality. We load the GOT entry with the PLT entry. */
4200 if (htab->plt_second != NULL)
4201 {
4202 plt = htab->plt_second;
4203 plt_offset = eh->plt_second.offset;
4204 }
4205 else
4206 {
4207 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4208 plt_offset = h->plt.offset;
4209 }
4210 bfd_put_64 (output_bfd, (plt->output_section->vma
4211 + plt->output_offset
4212 + plt_offset),
4213 htab->elf.sgot->contents + h->got.offset);
4214 return TRUE;
4215 }
4216 }
4217 else if (bfd_link_pic (info)
4218 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4219 {
4220 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4221 return FALSE;
4222 BFD_ASSERT((h->got.offset & 1) != 0);
4223 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4224 rela.r_addend = (h->root.u.def.value
4225 + h->root.u.def.section->output_section->vma
4226 + h->root.u.def.section->output_offset);
4227 }
4228 else
4229 {
4230 BFD_ASSERT((h->got.offset & 1) == 0);
4231 do_glob_dat:
4232 bfd_put_64 (output_bfd, (bfd_vma) 0,
4233 htab->elf.sgot->contents + h->got.offset);
4234 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4235 rela.r_addend = 0;
4236 }
4237
4238 elf_append_rela (output_bfd, relgot, &rela);
4239 }
4240
4241 if (h->needs_copy)
4242 {
4243 Elf_Internal_Rela rela;
4244 asection *s;
4245
4246 /* This symbol needs a copy reloc. Set it up. */
4247 VERIFY_COPY_RELOC (h, htab)
4248
4249 rela.r_offset = (h->root.u.def.value
4250 + h->root.u.def.section->output_section->vma
4251 + h->root.u.def.section->output_offset);
4252 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4253 rela.r_addend = 0;
4254 if (h->root.u.def.section == htab->elf.sdynrelro)
4255 s = htab->elf.sreldynrelro;
4256 else
4257 s = htab->elf.srelbss;
4258 elf_append_rela (output_bfd, s, &rela);
4259 }
4260
4261 return TRUE;
4262 }
4263
4264 /* Finish up local dynamic symbol handling. We set the contents of
4265 various dynamic sections here. */
4266
4267 static bfd_boolean
4268 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4269 {
4270 struct elf_link_hash_entry *h
4271 = (struct elf_link_hash_entry *) *slot;
4272 struct bfd_link_info *info
4273 = (struct bfd_link_info *) inf;
4274
4275 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4276 info, h, NULL);
4277 }
4278
4279 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4280 here since undefined weak symbol may not be dynamic and may not be
4281 called for elf_x86_64_finish_dynamic_symbol. */
4282
4283 static bfd_boolean
4284 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4285 void *inf)
4286 {
4287 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4288 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4289
4290 if (h->root.type != bfd_link_hash_undefweak
4291 || h->dynindx != -1)
4292 return TRUE;
4293
4294 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4295 info, h, NULL);
4296 }
4297
4298 /* Used to decide how to sort relocs in an optimal manner for the
4299 dynamic linker, before writing them out. */
4300
4301 static enum elf_reloc_type_class
4302 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4303 const asection *rel_sec ATTRIBUTE_UNUSED,
4304 const Elf_Internal_Rela *rela)
4305 {
4306 bfd *abfd = info->output_bfd;
4307 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4308 struct elf_x86_link_hash_table *htab
4309 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4310
4311 if (htab->elf.dynsym != NULL
4312 && htab->elf.dynsym->contents != NULL)
4313 {
4314 /* Check relocation against STT_GNU_IFUNC symbol if there are
4315 dynamic symbols. */
4316 unsigned long r_symndx = htab->r_sym (rela->r_info);
4317 if (r_symndx != STN_UNDEF)
4318 {
4319 Elf_Internal_Sym sym;
4320 if (!bed->s->swap_symbol_in (abfd,
4321 (htab->elf.dynsym->contents
4322 + r_symndx * bed->s->sizeof_sym),
4323 0, &sym))
4324 abort ();
4325
4326 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4327 return reloc_class_ifunc;
4328 }
4329 }
4330
4331 switch ((int) ELF32_R_TYPE (rela->r_info))
4332 {
4333 case R_X86_64_IRELATIVE:
4334 return reloc_class_ifunc;
4335 case R_X86_64_RELATIVE:
4336 case R_X86_64_RELATIVE64:
4337 return reloc_class_relative;
4338 case R_X86_64_JUMP_SLOT:
4339 return reloc_class_plt;
4340 case R_X86_64_COPY:
4341 return reloc_class_copy;
4342 default:
4343 return reloc_class_normal;
4344 }
4345 }
4346
4347 /* Finish up the dynamic sections. */
4348
4349 static bfd_boolean
4350 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4351 struct bfd_link_info *info)
4352 {
4353 struct elf_x86_link_hash_table *htab;
4354
4355 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4356 if (htab == NULL)
4357 return FALSE;
4358
4359 if (! htab->elf.dynamic_sections_created)
4360 return TRUE;
4361
4362 if (htab->elf.splt && htab->elf.splt->size > 0)
4363 {
4364 elf_section_data (htab->elf.splt->output_section)
4365 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4366
4367 if (htab->plt.has_plt0)
4368 {
4369 /* Fill in the special first entry in the procedure linkage
4370 table. */
4371 memcpy (htab->elf.splt->contents,
4372 htab->lazy_plt->plt0_entry,
4373 htab->lazy_plt->plt0_entry_size);
4374 /* Add offset for pushq GOT+8(%rip), since the instruction
4375 uses 6 bytes subtract this value. */
4376 bfd_put_32 (output_bfd,
4377 (htab->elf.sgotplt->output_section->vma
4378 + htab->elf.sgotplt->output_offset
4379 + 8
4380 - htab->elf.splt->output_section->vma
4381 - htab->elf.splt->output_offset
4382 - 6),
4383 (htab->elf.splt->contents
4384 + htab->lazy_plt->plt0_got1_offset));
4385 /* Add offset for the PC-relative instruction accessing
4386 GOT+16, subtracting the offset to the end of that
4387 instruction. */
4388 bfd_put_32 (output_bfd,
4389 (htab->elf.sgotplt->output_section->vma
4390 + htab->elf.sgotplt->output_offset
4391 + 16
4392 - htab->elf.splt->output_section->vma
4393 - htab->elf.splt->output_offset
4394 - htab->lazy_plt->plt0_got2_insn_end),
4395 (htab->elf.splt->contents
4396 + htab->lazy_plt->plt0_got2_offset));
4397 }
4398
4399 if (htab->tlsdesc_plt)
4400 {
4401 bfd_put_64 (output_bfd, (bfd_vma) 0,
4402 htab->elf.sgot->contents + htab->tlsdesc_got);
4403
4404 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4405 htab->lazy_plt->plt0_entry,
4406 htab->lazy_plt->plt0_entry_size);
4407
4408 /* Add offset for pushq GOT+8(%rip), since the
4409 instruction uses 6 bytes subtract this value. */
4410 bfd_put_32 (output_bfd,
4411 (htab->elf.sgotplt->output_section->vma
4412 + htab->elf.sgotplt->output_offset
4413 + 8
4414 - htab->elf.splt->output_section->vma
4415 - htab->elf.splt->output_offset
4416 - htab->tlsdesc_plt
4417 - 6),
4418 (htab->elf.splt->contents
4419 + htab->tlsdesc_plt
4420 + htab->lazy_plt->plt0_got1_offset));
4421 /* Add offset for the PC-relative instruction accessing
4422 GOT+TDG, where TDG stands for htab->tlsdesc_got,
4423 subtracting the offset to the end of that
4424 instruction. */
4425 bfd_put_32 (output_bfd,
4426 (htab->elf.sgot->output_section->vma
4427 + htab->elf.sgot->output_offset
4428 + htab->tlsdesc_got
4429 - htab->elf.splt->output_section->vma
4430 - htab->elf.splt->output_offset
4431 - htab->tlsdesc_plt
4432 - htab->lazy_plt->plt0_got2_insn_end),
4433 (htab->elf.splt->contents
4434 + htab->tlsdesc_plt
4435 + htab->lazy_plt->plt0_got2_offset));
4436 }
4437 }
4438
4439 /* Fill PLT entries for undefined weak symbols in PIE. */
4440 if (bfd_link_pie (info))
4441 bfd_hash_traverse (&info->hash->table,
4442 elf_x86_64_pie_finish_undefweak_symbol,
4443 info);
4444
4445 return TRUE;
4446 }
4447
4448 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4449 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4450 It has to be done before elf_link_sort_relocs is called so that
4451 dynamic relocations are properly sorted. */
4452
4453 static bfd_boolean
4454 elf_x86_64_output_arch_local_syms
4455 (bfd *output_bfd ATTRIBUTE_UNUSED,
4456 struct bfd_link_info *info,
4457 void *flaginfo ATTRIBUTE_UNUSED,
4458 int (*func) (void *, const char *,
4459 Elf_Internal_Sym *,
4460 asection *,
4461 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4462 {
4463 struct elf_x86_link_hash_table *htab
4464 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4465 if (htab == NULL)
4466 return FALSE;
4467
4468 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4469 htab_traverse (htab->loc_hash_table,
4470 elf_x86_64_finish_local_dynamic_symbol,
4471 info);
4472
4473 return TRUE;
4474 }
4475
4476 /* Forward declaration. */
4477 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4478
4479 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4480 dynamic relocations. */
4481
4482 static long
4483 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4484 long symcount ATTRIBUTE_UNUSED,
4485 asymbol **syms ATTRIBUTE_UNUSED,
4486 long dynsymcount,
4487 asymbol **dynsyms,
4488 asymbol **ret)
4489 {
4490 long count, i, n;
4491 int j;
4492 bfd_byte *plt_contents;
4493 long relsize;
4494 const struct elf_x86_lazy_plt_layout *lazy_plt;
4495 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4496 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4497 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4498 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4499 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4500 asection *plt;
4501 enum elf_x86_plt_type plt_type;
4502 struct elf_x86_plt plts[] =
4503 {
4504 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4505 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4506 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4507 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4508 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4509 };
4510
4511 *ret = NULL;
4512
4513 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4514 return 0;
4515
4516 if (dynsymcount <= 0)
4517 return 0;
4518
4519 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4520 if (relsize <= 0)
4521 return -1;
4522
4523 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4524 {
4525 lazy_plt = &elf_x86_64_lazy_plt;
4526 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4527 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4528 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4529 if (ABI_64_P (abfd))
4530 {
4531 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4532 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4533 }
4534 else
4535 {
4536 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4537 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4538 }
4539 }
4540 else
4541 {
4542 lazy_plt = &elf_x86_64_nacl_plt;
4543 non_lazy_plt = NULL;
4544 lazy_bnd_plt = NULL;
4545 non_lazy_bnd_plt = NULL;
4546 lazy_ibt_plt = NULL;
4547 non_lazy_ibt_plt = NULL;
4548 }
4549
4550 count = 0;
4551 for (j = 0; plts[j].name != NULL; j++)
4552 {
4553 plt = bfd_get_section_by_name (abfd, plts[j].name);
4554 if (plt == NULL || plt->size == 0)
4555 continue;
4556
4557 /* Get the PLT section contents. */
4558 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4559 if (plt_contents == NULL)
4560 break;
4561 if (!bfd_get_section_contents (abfd, (asection *) plt,
4562 plt_contents, 0, plt->size))
4563 {
4564 free (plt_contents);
4565 break;
4566 }
4567
4568 /* Check what kind of PLT it is. */
4569 plt_type = plt_unknown;
4570 if (plts[j].type == plt_unknown
4571 && (plt->size >= (lazy_plt->plt_entry_size
4572 + lazy_plt->plt_entry_size)))
4573 {
4574 /* Match lazy PLT first. Need to check the first two
4575 instructions. */
4576 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4577 lazy_plt->plt0_got1_offset) == 0)
4578 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4579 2) == 0))
4580 plt_type = plt_lazy;
4581 else if (lazy_bnd_plt != NULL
4582 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4583 lazy_bnd_plt->plt0_got1_offset) == 0)
4584 && (memcmp (plt_contents + 6,
4585 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4586 {
4587 plt_type = plt_lazy | plt_second;
4588 /* The fist entry in the lazy IBT PLT is the same as the
4589 lazy BND PLT. */
4590 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4591 lazy_ibt_plt->plt_entry,
4592 lazy_ibt_plt->plt_got_offset) == 0))
4593 lazy_plt = lazy_ibt_plt;
4594 else
4595 lazy_plt = lazy_bnd_plt;
4596 }
4597 }
4598
4599 if (non_lazy_plt != NULL
4600 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4601 && plt->size >= non_lazy_plt->plt_entry_size)
4602 {
4603 /* Match non-lazy PLT. */
4604 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4605 non_lazy_plt->plt_got_offset) == 0)
4606 plt_type = plt_non_lazy;
4607 }
4608
4609 if (plt_type == plt_unknown || plt_type == plt_second)
4610 {
4611 if (non_lazy_bnd_plt != NULL
4612 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4613 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4614 non_lazy_bnd_plt->plt_got_offset) == 0))
4615 {
4616 /* Match BND PLT. */
4617 plt_type = plt_second;
4618 non_lazy_plt = non_lazy_bnd_plt;
4619 }
4620 else if (non_lazy_ibt_plt != NULL
4621 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4622 && (memcmp (plt_contents,
4623 non_lazy_ibt_plt->plt_entry,
4624 non_lazy_ibt_plt->plt_got_offset) == 0))
4625 {
4626 /* Match IBT PLT. */
4627 plt_type = plt_second;
4628 non_lazy_plt = non_lazy_ibt_plt;
4629 }
4630 }
4631
4632 if (plt_type == plt_unknown)
4633 {
4634 free (plt_contents);
4635 continue;
4636 }
4637
4638 plts[j].sec = plt;
4639 plts[j].type = plt_type;
4640
4641 if ((plt_type & plt_lazy))
4642 {
4643 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4644 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4645 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4646 /* Skip PLT0 in lazy PLT. */
4647 i = 1;
4648 }
4649 else
4650 {
4651 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4652 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4653 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4654 i = 0;
4655 }
4656
4657 /* Skip lazy PLT when the second PLT is used. */
4658 if (plt_type == (plt_lazy | plt_second))
4659 plts[j].count = 0;
4660 else
4661 {
4662 n = plt->size / plts[j].plt_entry_size;
4663 plts[j].count = n;
4664 count += n - i;
4665 }
4666
4667 plts[j].contents = plt_contents;
4668 }
4669
4670 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4671 (bfd_vma) 0, plts, dynsyms,
4672 ret);
4673 }
4674
4675 /* Handle an x86-64 specific section when reading an object file. This
4676 is called when elfcode.h finds a section with an unknown type. */
4677
4678 static bfd_boolean
4679 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4680 const char *name, int shindex)
4681 {
4682 if (hdr->sh_type != SHT_X86_64_UNWIND)
4683 return FALSE;
4684
4685 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4686 return FALSE;
4687
4688 return TRUE;
4689 }
4690
4691 /* Hook called by the linker routine which adds symbols from an object
4692 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4693 of .bss. */
4694
4695 static bfd_boolean
4696 elf_x86_64_add_symbol_hook (bfd *abfd,
4697 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4698 Elf_Internal_Sym *sym,
4699 const char **namep ATTRIBUTE_UNUSED,
4700 flagword *flagsp ATTRIBUTE_UNUSED,
4701 asection **secp,
4702 bfd_vma *valp)
4703 {
4704 asection *lcomm;
4705
4706 switch (sym->st_shndx)
4707 {
4708 case SHN_X86_64_LCOMMON:
4709 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4710 if (lcomm == NULL)
4711 {
4712 lcomm = bfd_make_section_with_flags (abfd,
4713 "LARGE_COMMON",
4714 (SEC_ALLOC
4715 | SEC_IS_COMMON
4716 | SEC_LINKER_CREATED));
4717 if (lcomm == NULL)
4718 return FALSE;
4719 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4720 }
4721 *secp = lcomm;
4722 *valp = sym->st_size;
4723 return TRUE;
4724 }
4725
4726 return TRUE;
4727 }
4728
4729
4730 /* Given a BFD section, try to locate the corresponding ELF section
4731 index. */
4732
4733 static bfd_boolean
4734 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4735 asection *sec, int *index_return)
4736 {
4737 if (sec == &_bfd_elf_large_com_section)
4738 {
4739 *index_return = SHN_X86_64_LCOMMON;
4740 return TRUE;
4741 }
4742 return FALSE;
4743 }
4744
4745 /* Process a symbol. */
4746
4747 static void
4748 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4749 asymbol *asym)
4750 {
4751 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4752
4753 switch (elfsym->internal_elf_sym.st_shndx)
4754 {
4755 case SHN_X86_64_LCOMMON:
4756 asym->section = &_bfd_elf_large_com_section;
4757 asym->value = elfsym->internal_elf_sym.st_size;
4758 /* Common symbol doesn't set BSF_GLOBAL. */
4759 asym->flags &= ~BSF_GLOBAL;
4760 break;
4761 }
4762 }
4763
4764 static bfd_boolean
4765 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4766 {
4767 return (sym->st_shndx == SHN_COMMON
4768 || sym->st_shndx == SHN_X86_64_LCOMMON);
4769 }
4770
4771 static unsigned int
4772 elf_x86_64_common_section_index (asection *sec)
4773 {
4774 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4775 return SHN_COMMON;
4776 else
4777 return SHN_X86_64_LCOMMON;
4778 }
4779
4780 static asection *
4781 elf_x86_64_common_section (asection *sec)
4782 {
4783 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4784 return bfd_com_section_ptr;
4785 else
4786 return &_bfd_elf_large_com_section;
4787 }
4788
4789 static bfd_boolean
4790 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4791 const Elf_Internal_Sym *sym,
4792 asection **psec,
4793 bfd_boolean newdef,
4794 bfd_boolean olddef,
4795 bfd *oldbfd,
4796 const asection *oldsec)
4797 {
4798 /* A normal common symbol and a large common symbol result in a
4799 normal common symbol. We turn the large common symbol into a
4800 normal one. */
4801 if (!olddef
4802 && h->root.type == bfd_link_hash_common
4803 && !newdef
4804 && bfd_is_com_section (*psec)
4805 && oldsec != *psec)
4806 {
4807 if (sym->st_shndx == SHN_COMMON
4808 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4809 {
4810 h->root.u.c.p->section
4811 = bfd_make_section_old_way (oldbfd, "COMMON");
4812 h->root.u.c.p->section->flags = SEC_ALLOC;
4813 }
4814 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4815 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4816 *psec = bfd_com_section_ptr;
4817 }
4818
4819 return TRUE;
4820 }
4821
4822 static int
4823 elf_x86_64_additional_program_headers (bfd *abfd,
4824 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4825 {
4826 asection *s;
4827 int count = 0;
4828
4829 /* Check to see if we need a large readonly segment. */
4830 s = bfd_get_section_by_name (abfd, ".lrodata");
4831 if (s && (s->flags & SEC_LOAD))
4832 count++;
4833
4834 /* Check to see if we need a large data segment. Since .lbss sections
4835 is placed right after the .bss section, there should be no need for
4836 a large data segment just because of .lbss. */
4837 s = bfd_get_section_by_name (abfd, ".ldata");
4838 if (s && (s->flags & SEC_LOAD))
4839 count++;
4840
4841 return count;
4842 }
4843
4844 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4845
4846 static bfd_boolean
4847 elf_x86_64_relocs_compatible (const bfd_target *input,
4848 const bfd_target *output)
4849 {
4850 return ((xvec_get_elf_backend_data (input)->s->elfclass
4851 == xvec_get_elf_backend_data (output)->s->elfclass)
4852 && _bfd_elf_relocs_compatible (input, output));
4853 }
4854
4855 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4856 with GNU properties if found. Otherwise, return NULL. */
4857
4858 static bfd *
4859 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4860 {
4861 struct elf_x86_init_table init_table;
4862
4863 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4864 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4865 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4866 != (int) R_X86_64_GNU_VTINHERIT)
4867 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4868 != (int) R_X86_64_GNU_VTENTRY))
4869 abort ();
4870
4871 /* This is unused for x86-64. */
4872 init_table.plt0_pad_byte = 0x90;
4873
4874 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
4875 {
4876 if (info->bndplt)
4877 {
4878 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4879 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4880 }
4881 else
4882 {
4883 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4884 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4885 }
4886
4887 if (ABI_64_P (info->output_bfd))
4888 {
4889 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4890 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4891 }
4892 else
4893 {
4894 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4895 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4896 }
4897 }
4898 else
4899 {
4900 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4901 init_table.non_lazy_plt = NULL;
4902 init_table.lazy_ibt_plt = NULL;
4903 init_table.non_lazy_ibt_plt = NULL;
4904 }
4905
4906 if (ABI_64_P (info->output_bfd))
4907 {
4908 init_table.r_info = elf64_r_info;
4909 init_table.r_sym = elf64_r_sym;
4910 }
4911 else
4912 {
4913 init_table.r_info = elf32_r_info;
4914 init_table.r_sym = elf32_r_sym;
4915 }
4916
4917 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4918 }
4919
4920 static const struct bfd_elf_special_section
4921 elf_x86_64_special_sections[]=
4922 {
4923 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4924 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4925 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
4926 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4927 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4928 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4929 { NULL, 0, 0, 0, 0 }
4930 };
4931
4932 #define TARGET_LITTLE_SYM x86_64_elf64_vec
4933 #define TARGET_LITTLE_NAME "elf64-x86-64"
4934 #define ELF_ARCH bfd_arch_i386
4935 #define ELF_TARGET_ID X86_64_ELF_DATA
4936 #define ELF_MACHINE_CODE EM_X86_64
4937 #define ELF_MAXPAGESIZE 0x200000
4938 #define ELF_MINPAGESIZE 0x1000
4939 #define ELF_COMMONPAGESIZE 0x1000
4940
4941 #define elf_backend_can_gc_sections 1
4942 #define elf_backend_can_refcount 1
4943 #define elf_backend_want_got_plt 1
4944 #define elf_backend_plt_readonly 1
4945 #define elf_backend_want_plt_sym 0
4946 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
4947 #define elf_backend_rela_normal 1
4948 #define elf_backend_plt_alignment 4
4949 #define elf_backend_extern_protected_data 1
4950 #define elf_backend_caches_rawsize 1
4951 #define elf_backend_dtrel_excludes_plt 1
4952 #define elf_backend_want_dynrelro 1
4953
4954 #define elf_info_to_howto elf_x86_64_info_to_howto
4955
4956 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
4957 #define bfd_elf64_bfd_reloc_name_lookup \
4958 elf_x86_64_reloc_name_lookup
4959
4960 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
4961 #define elf_backend_check_relocs elf_x86_64_check_relocs
4962 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
4963 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
4964 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
4965 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
4966 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
4967 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
4968 #ifdef CORE_HEADER
4969 #define elf_backend_write_core_note elf_x86_64_write_core_note
4970 #endif
4971 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
4972 #define elf_backend_relocate_section elf_x86_64_relocate_section
4973 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
4974 #define elf_backend_object_p elf64_x86_64_elf_object_p
4975 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
4976
4977 #define elf_backend_section_from_shdr \
4978 elf_x86_64_section_from_shdr
4979
4980 #define elf_backend_section_from_bfd_section \
4981 elf_x86_64_elf_section_from_bfd_section
4982 #define elf_backend_add_symbol_hook \
4983 elf_x86_64_add_symbol_hook
4984 #define elf_backend_symbol_processing \
4985 elf_x86_64_symbol_processing
4986 #define elf_backend_common_section_index \
4987 elf_x86_64_common_section_index
4988 #define elf_backend_common_section \
4989 elf_x86_64_common_section
4990 #define elf_backend_common_definition \
4991 elf_x86_64_common_definition
4992 #define elf_backend_merge_symbol \
4993 elf_x86_64_merge_symbol
4994 #define elf_backend_special_sections \
4995 elf_x86_64_special_sections
4996 #define elf_backend_additional_program_headers \
4997 elf_x86_64_additional_program_headers
4998 #define elf_backend_setup_gnu_properties \
4999 elf_x86_64_link_setup_gnu_properties
5000 #define elf_backend_hide_symbol \
5001 _bfd_x86_elf_hide_symbol
5002
5003 #include "elf64-target.h"
5004
5005 /* CloudABI support. */
5006
5007 #undef TARGET_LITTLE_SYM
5008 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5009 #undef TARGET_LITTLE_NAME
5010 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5011
5012 #undef ELF_OSABI
5013 #define ELF_OSABI ELFOSABI_CLOUDABI
5014
5015 #undef elf64_bed
5016 #define elf64_bed elf64_x86_64_cloudabi_bed
5017
5018 #include "elf64-target.h"
5019
5020 /* FreeBSD support. */
5021
5022 #undef TARGET_LITTLE_SYM
5023 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5024 #undef TARGET_LITTLE_NAME
5025 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5026
5027 #undef ELF_OSABI
5028 #define ELF_OSABI ELFOSABI_FREEBSD
5029
5030 #undef elf64_bed
5031 #define elf64_bed elf64_x86_64_fbsd_bed
5032
5033 #include "elf64-target.h"
5034
5035 /* Solaris 2 support. */
5036
5037 #undef TARGET_LITTLE_SYM
5038 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5039 #undef TARGET_LITTLE_NAME
5040 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5041
5042 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5043 {
5044 is_solaris /* os */
5045 };
5046
5047 #undef elf_backend_arch_data
5048 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5049
5050 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5051 objects won't be recognized. */
5052 #undef ELF_OSABI
5053
5054 #undef elf64_bed
5055 #define elf64_bed elf64_x86_64_sol2_bed
5056
5057 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5058 boundary. */
5059 #undef elf_backend_static_tls_alignment
5060 #define elf_backend_static_tls_alignment 16
5061
5062 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5063
5064 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5065 File, p.63. */
5066 #undef elf_backend_want_plt_sym
5067 #define elf_backend_want_plt_sym 1
5068
5069 #undef elf_backend_strtab_flags
5070 #define elf_backend_strtab_flags SHF_STRINGS
5071
5072 static bfd_boolean
5073 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5074 bfd *obfd ATTRIBUTE_UNUSED,
5075 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5076 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5077 {
5078 /* PR 19938: FIXME: Need to add code for setting the sh_info
5079 and sh_link fields of Solaris specific section types. */
5080 return FALSE;
5081 }
5082
5083 #undef elf_backend_copy_special_section_fields
5084 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5085
5086 #include "elf64-target.h"
5087
5088 /* Native Client support. */
5089
5090 static bfd_boolean
5091 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5092 {
5093 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5094 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5095 return TRUE;
5096 }
5097
5098 #undef TARGET_LITTLE_SYM
5099 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5100 #undef TARGET_LITTLE_NAME
5101 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5102 #undef elf64_bed
5103 #define elf64_bed elf64_x86_64_nacl_bed
5104
5105 #undef ELF_MAXPAGESIZE
5106 #undef ELF_MINPAGESIZE
5107 #undef ELF_COMMONPAGESIZE
5108 #define ELF_MAXPAGESIZE 0x10000
5109 #define ELF_MINPAGESIZE 0x10000
5110 #define ELF_COMMONPAGESIZE 0x10000
5111
5112 /* Restore defaults. */
5113 #undef ELF_OSABI
5114 #undef elf_backend_static_tls_alignment
5115 #undef elf_backend_want_plt_sym
5116 #define elf_backend_want_plt_sym 0
5117 #undef elf_backend_strtab_flags
5118 #undef elf_backend_copy_special_section_fields
5119
5120 /* NaCl uses substantially different PLT entries for the same effects. */
5121
5122 #undef elf_backend_plt_alignment
5123 #define elf_backend_plt_alignment 5
5124 #define NACL_PLT_ENTRY_SIZE 64
5125 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5126
5127 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5128 {
5129 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5130 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5131 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5132 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5133 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5134
5135 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5136 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5137
5138 /* 32 bytes of nop to pad out to the standard size. */
5139 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5140 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5141 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5142 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5143 0x66, /* excess data16 prefix */
5144 0x90 /* nop */
5145 };
5146
5147 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5148 {
5149 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5150 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5151 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5152 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5153
5154 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5155 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5156 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5157
5158 /* Lazy GOT entries point here (32-byte aligned). */
5159 0x68, /* pushq immediate */
5160 0, 0, 0, 0, /* replaced with index into relocation table. */
5161 0xe9, /* jmp relative */
5162 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5163
5164 /* 22 bytes of nop to pad out to the standard size. */
5165 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5166 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5167 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5168 };
5169
5170 /* .eh_frame covering the .plt section. */
5171
5172 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5173 {
5174 #if (PLT_CIE_LENGTH != 20 \
5175 || PLT_FDE_LENGTH != 36 \
5176 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5177 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5178 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5179 #endif
5180 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5181 0, 0, 0, 0, /* CIE ID */
5182 1, /* CIE version */
5183 'z', 'R', 0, /* Augmentation string */
5184 1, /* Code alignment factor */
5185 0x78, /* Data alignment factor */
5186 16, /* Return address column */
5187 1, /* Augmentation size */
5188 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5189 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5190 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5191 DW_CFA_nop, DW_CFA_nop,
5192
5193 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5194 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5195 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5196 0, 0, 0, 0, /* .plt size goes here */
5197 0, /* Augmentation size */
5198 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5199 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5200 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5201 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5202 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5203 13, /* Block length */
5204 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5205 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5206 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5207 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5208 DW_CFA_nop, DW_CFA_nop
5209 };
5210
5211 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5212 {
5213 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5214 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5215 elf_x86_64_nacl_plt_entry, /* plt_entry */
5216 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5217 2, /* plt0_got1_offset */
5218 9, /* plt0_got2_offset */
5219 13, /* plt0_got2_insn_end */
5220 3, /* plt_got_offset */
5221 33, /* plt_reloc_offset */
5222 38, /* plt_plt_offset */
5223 7, /* plt_got_insn_size */
5224 42, /* plt_plt_insn_end */
5225 32, /* plt_lazy_offset */
5226 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5227 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5228 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5229 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5230 };
5231
5232 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5233 {
5234 is_nacl /* os */
5235 };
5236
5237 #undef elf_backend_arch_data
5238 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5239
5240 #undef elf_backend_object_p
5241 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5242 #undef elf_backend_modify_segment_map
5243 #define elf_backend_modify_segment_map nacl_modify_segment_map
5244 #undef elf_backend_modify_program_headers
5245 #define elf_backend_modify_program_headers nacl_modify_program_headers
5246 #undef elf_backend_final_write_processing
5247 #define elf_backend_final_write_processing nacl_final_write_processing
5248
5249 #include "elf64-target.h"
5250
5251 /* Native Client x32 support. */
5252
5253 static bfd_boolean
5254 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5255 {
5256 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5257 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5258 return TRUE;
5259 }
5260
5261 #undef TARGET_LITTLE_SYM
5262 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5263 #undef TARGET_LITTLE_NAME
5264 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5265 #undef elf32_bed
5266 #define elf32_bed elf32_x86_64_nacl_bed
5267
5268 #define bfd_elf32_bfd_reloc_type_lookup \
5269 elf_x86_64_reloc_type_lookup
5270 #define bfd_elf32_bfd_reloc_name_lookup \
5271 elf_x86_64_reloc_name_lookup
5272 #define bfd_elf32_get_synthetic_symtab \
5273 elf_x86_64_get_synthetic_symtab
5274
5275 #undef elf_backend_object_p
5276 #define elf_backend_object_p \
5277 elf32_x86_64_nacl_elf_object_p
5278
5279 #undef elf_backend_bfd_from_remote_memory
5280 #define elf_backend_bfd_from_remote_memory \
5281 _bfd_elf32_bfd_from_remote_memory
5282
5283 #undef elf_backend_size_info
5284 #define elf_backend_size_info \
5285 _bfd_elf32_size_info
5286
5287 #include "elf32-target.h"
5288
5289 /* Restore defaults. */
5290 #undef elf_backend_object_p
5291 #define elf_backend_object_p elf64_x86_64_elf_object_p
5292 #undef elf_backend_bfd_from_remote_memory
5293 #undef elf_backend_size_info
5294 #undef elf_backend_modify_segment_map
5295 #undef elf_backend_modify_program_headers
5296 #undef elf_backend_final_write_processing
5297
5298 /* Intel L1OM support. */
5299
5300 static bfd_boolean
5301 elf64_l1om_elf_object_p (bfd *abfd)
5302 {
5303 /* Set the right machine number for an L1OM elf64 file. */
5304 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5305 return TRUE;
5306 }
5307
5308 #undef TARGET_LITTLE_SYM
5309 #define TARGET_LITTLE_SYM l1om_elf64_vec
5310 #undef TARGET_LITTLE_NAME
5311 #define TARGET_LITTLE_NAME "elf64-l1om"
5312 #undef ELF_ARCH
5313 #define ELF_ARCH bfd_arch_l1om
5314
5315 #undef ELF_MACHINE_CODE
5316 #define ELF_MACHINE_CODE EM_L1OM
5317
5318 #undef ELF_OSABI
5319
5320 #undef elf64_bed
5321 #define elf64_bed elf64_l1om_bed
5322
5323 #undef elf_backend_object_p
5324 #define elf_backend_object_p elf64_l1om_elf_object_p
5325
5326 /* Restore defaults. */
5327 #undef ELF_MAXPAGESIZE
5328 #undef ELF_MINPAGESIZE
5329 #undef ELF_COMMONPAGESIZE
5330 #define ELF_MAXPAGESIZE 0x200000
5331 #define ELF_MINPAGESIZE 0x1000
5332 #define ELF_COMMONPAGESIZE 0x1000
5333 #undef elf_backend_plt_alignment
5334 #define elf_backend_plt_alignment 4
5335 #undef elf_backend_arch_data
5336 #define elf_backend_arch_data &elf_x86_64_arch_bed
5337
5338 #include "elf64-target.h"
5339
5340 /* FreeBSD L1OM support. */
5341
5342 #undef TARGET_LITTLE_SYM
5343 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5344 #undef TARGET_LITTLE_NAME
5345 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5346
5347 #undef ELF_OSABI
5348 #define ELF_OSABI ELFOSABI_FREEBSD
5349
5350 #undef elf64_bed
5351 #define elf64_bed elf64_l1om_fbsd_bed
5352
5353 #include "elf64-target.h"
5354
5355 /* Intel K1OM support. */
5356
5357 static bfd_boolean
5358 elf64_k1om_elf_object_p (bfd *abfd)
5359 {
5360 /* Set the right machine number for an K1OM elf64 file. */
5361 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5362 return TRUE;
5363 }
5364
5365 #undef TARGET_LITTLE_SYM
5366 #define TARGET_LITTLE_SYM k1om_elf64_vec
5367 #undef TARGET_LITTLE_NAME
5368 #define TARGET_LITTLE_NAME "elf64-k1om"
5369 #undef ELF_ARCH
5370 #define ELF_ARCH bfd_arch_k1om
5371
5372 #undef ELF_MACHINE_CODE
5373 #define ELF_MACHINE_CODE EM_K1OM
5374
5375 #undef ELF_OSABI
5376
5377 #undef elf64_bed
5378 #define elf64_bed elf64_k1om_bed
5379
5380 #undef elf_backend_object_p
5381 #define elf_backend_object_p elf64_k1om_elf_object_p
5382
5383 #undef elf_backend_static_tls_alignment
5384
5385 #undef elf_backend_want_plt_sym
5386 #define elf_backend_want_plt_sym 0
5387
5388 #include "elf64-target.h"
5389
5390 /* FreeBSD K1OM support. */
5391
5392 #undef TARGET_LITTLE_SYM
5393 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5394 #undef TARGET_LITTLE_NAME
5395 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5396
5397 #undef ELF_OSABI
5398 #define ELF_OSABI ELFOSABI_FREEBSD
5399
5400 #undef elf64_bed
5401 #define elf64_bed elf64_k1om_fbsd_bed
5402
5403 #include "elf64-target.h"
5404
5405 /* 32bit x86-64 support. */
5406
5407 #undef TARGET_LITTLE_SYM
5408 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5409 #undef TARGET_LITTLE_NAME
5410 #define TARGET_LITTLE_NAME "elf32-x86-64"
5411 #undef elf32_bed
5412
5413 #undef ELF_ARCH
5414 #define ELF_ARCH bfd_arch_i386
5415
5416 #undef ELF_MACHINE_CODE
5417 #define ELF_MACHINE_CODE EM_X86_64
5418
5419 #undef ELF_OSABI
5420
5421 #undef elf_backend_object_p
5422 #define elf_backend_object_p \
5423 elf32_x86_64_elf_object_p
5424
5425 #undef elf_backend_bfd_from_remote_memory
5426 #define elf_backend_bfd_from_remote_memory \
5427 _bfd_elf32_bfd_from_remote_memory
5428
5429 #undef elf_backend_size_info
5430 #define elf_backend_size_info \
5431 _bfd_elf32_size_info
5432
5433 #include "elf32-target.h"