]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf64-x86-64.c
x86: Use zero_undefweak in elf_x86_link_hash_entry
[thirdparty/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2017 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%B: invalid relocation type %d"),
286 abfd, (int) r_type);
287 r_type = R_X86_64_NONE;
288 }
289 i = r_type;
290 }
291 else
292 i = r_type - (unsigned int) R_X86_64_vt_offset;
293 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
294 return &x86_64_elf_howto_table[i];
295 }
296
297 /* Given a BFD reloc type, return a HOWTO structure. */
298 static reloc_howto_type *
299 elf_x86_64_reloc_type_lookup (bfd *abfd,
300 bfd_reloc_code_real_type code)
301 {
302 unsigned int i;
303
304 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
305 i++)
306 {
307 if (x86_64_reloc_map[i].bfd_reloc_val == code)
308 return elf_x86_64_rtype_to_howto (abfd,
309 x86_64_reloc_map[i].elf_reloc_val);
310 }
311 return NULL;
312 }
313
314 static reloc_howto_type *
315 elf_x86_64_reloc_name_lookup (bfd *abfd,
316 const char *r_name)
317 {
318 unsigned int i;
319
320 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
321 {
322 /* Get x32 R_X86_64_32. */
323 reloc_howto_type *reloc
324 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
325 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
326 return reloc;
327 }
328
329 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
330 if (x86_64_elf_howto_table[i].name != NULL
331 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
332 return &x86_64_elf_howto_table[i];
333
334 return NULL;
335 }
336
337 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
338
339 static void
340 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
341 Elf_Internal_Rela *dst)
342 {
343 unsigned r_type;
344
345 r_type = ELF32_R_TYPE (dst->r_info);
346 if (r_type != (unsigned int) R_X86_64_GNU_VTINHERIT
347 && r_type != (unsigned int) R_X86_64_GNU_VTENTRY)
348 r_type &= ~R_X86_64_converted_reloc_bit;
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
350
351 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 static char *
443 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
444 int note_type, ...)
445 {
446 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
447 va_list ap;
448 const char *fname, *psargs;
449 long pid;
450 int cursig;
451 const void *gregs;
452
453 switch (note_type)
454 {
455 default:
456 return NULL;
457
458 case NT_PRPSINFO:
459 va_start (ap, note_type);
460 fname = va_arg (ap, const char *);
461 psargs = va_arg (ap, const char *);
462 va_end (ap);
463
464 if (bed->s->elfclass == ELFCLASS32)
465 {
466 prpsinfo32_t data;
467 memset (&data, 0, sizeof (data));
468 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
469 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
470 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
471 &data, sizeof (data));
472 }
473 else
474 {
475 prpsinfo64_t data;
476 memset (&data, 0, sizeof (data));
477 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
478 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
479 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
480 &data, sizeof (data));
481 }
482 /* NOTREACHED */
483
484 case NT_PRSTATUS:
485 va_start (ap, note_type);
486 pid = va_arg (ap, long);
487 cursig = va_arg (ap, int);
488 gregs = va_arg (ap, const void *);
489 va_end (ap);
490
491 if (bed->s->elfclass == ELFCLASS32)
492 {
493 if (bed->elf_machine_code == EM_X86_64)
494 {
495 prstatusx32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 else
504 {
505 prstatus32_t prstat;
506 memset (&prstat, 0, sizeof (prstat));
507 prstat.pr_pid = pid;
508 prstat.pr_cursig = cursig;
509 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
510 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
511 &prstat, sizeof (prstat));
512 }
513 }
514 else
515 {
516 prstatus64_t prstat;
517 memset (&prstat, 0, sizeof (prstat));
518 prstat.pr_pid = pid;
519 prstat.pr_cursig = cursig;
520 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
521 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
522 &prstat, sizeof (prstat));
523 }
524 }
525 /* NOTREACHED */
526 }
527 #endif
528 \f
529 /* Functions for the x86-64 ELF linker. */
530
531 /* The size in bytes of an entry in the global offset table. */
532
533 #define GOT_ENTRY_SIZE 8
534
535 /* The size in bytes of an entry in the lazy procedure linkage table. */
536
537 #define LAZY_PLT_ENTRY_SIZE 16
538
539 /* The size in bytes of an entry in the non-lazy procedure linkage
540 table. */
541
542 #define NON_LAZY_PLT_ENTRY_SIZE 8
543
544 /* The first entry in a lazy procedure linkage table looks like this.
545 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
546 works. */
547
548 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
549 {
550 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
551 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
552 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
553 };
554
555 /* Subsequent entries in a lazy procedure linkage table look like this. */
556
557 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
558 {
559 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
560 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
561 0x68, /* pushq immediate */
562 0, 0, 0, 0, /* replaced with index into relocation table. */
563 0xe9, /* jmp relative */
564 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
565 };
566
567 /* The first entry in a lazy procedure linkage table with BND prefix
568 like this. */
569
570 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
571 {
572 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
573 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
574 0x0f, 0x1f, 0 /* nopl (%rax) */
575 };
576
577 /* Subsequent entries for branches with BND prefx in a lazy procedure
578 linkage table look like this. */
579
580 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
581 {
582 0x68, 0, 0, 0, 0, /* pushq immediate */
583 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
584 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
585 };
586
587 /* The first entry in the IBT-enabled lazy procedure linkage table is the
588 the same as the lazy PLT with BND prefix so that bound registers are
589 preserved when control is passed to dynamic linker. Subsequent
590 entries for a IBT-enabled lazy procedure linkage table look like
591 this. */
592
593 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
594 {
595 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
596 0x68, 0, 0, 0, 0, /* pushq immediate */
597 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
598 0x90 /* nop */
599 };
600
601 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
602 is the same as the normal lazy PLT. Subsequent entries for an
603 x32 IBT-enabled lazy procedure linkage table look like this. */
604
605 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
606 {
607 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
608 0x68, 0, 0, 0, 0, /* pushq immediate */
609 0xe9, 0, 0, 0, 0, /* jmpq relative */
610 0x66, 0x90 /* xchg %ax,%ax */
611 };
612
613 /* Entries in the non-lazey procedure linkage table look like this. */
614
615 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
616 {
617 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x66, 0x90 /* xchg %ax,%ax */
620 };
621
622 /* Entries for branches with BND prefix in the non-lazey procedure
623 linkage table look like this. */
624
625 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
626 {
627 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
628 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
629 0x90 /* nop */
630 };
631
632 /* Entries for branches with IBT-enabled in the non-lazey procedure
633 linkage table look like this. They have the same size as the lazy
634 PLT entry. */
635
636 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
637 {
638 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
639 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
640 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
641 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
642 };
643
644 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
645 linkage table look like this. They have the same size as the lazy
646 PLT entry. */
647
648 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
649 {
650 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
651 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
652 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
653 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
654 };
655
656 /* .eh_frame covering the lazy .plt section. */
657
658 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
659 {
660 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
661 0, 0, 0, 0, /* CIE ID */
662 1, /* CIE version */
663 'z', 'R', 0, /* Augmentation string */
664 1, /* Code alignment factor */
665 0x78, /* Data alignment factor */
666 16, /* Return address column */
667 1, /* Augmentation size */
668 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
669 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
670 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
671 DW_CFA_nop, DW_CFA_nop,
672
673 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
674 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
675 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
676 0, 0, 0, 0, /* .plt size goes here */
677 0, /* Augmentation size */
678 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
679 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
680 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
681 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
682 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
683 11, /* Block length */
684 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
685 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
686 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
687 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
688 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
689 };
690
691 /* .eh_frame covering the lazy BND .plt section. */
692
693 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
694 {
695 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
696 0, 0, 0, 0, /* CIE ID */
697 1, /* CIE version */
698 'z', 'R', 0, /* Augmentation string */
699 1, /* Code alignment factor */
700 0x78, /* Data alignment factor */
701 16, /* Return address column */
702 1, /* Augmentation size */
703 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
704 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
705 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
706 DW_CFA_nop, DW_CFA_nop,
707
708 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
709 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
710 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
711 0, 0, 0, 0, /* .plt size goes here */
712 0, /* Augmentation size */
713 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
714 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
715 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
716 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
717 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
718 11, /* Block length */
719 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
720 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
721 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
722 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
723 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
724 };
725
726 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
727
728 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
729 {
730 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
731 0, 0, 0, 0, /* CIE ID */
732 1, /* CIE version */
733 'z', 'R', 0, /* Augmentation string */
734 1, /* Code alignment factor */
735 0x78, /* Data alignment factor */
736 16, /* Return address column */
737 1, /* Augmentation size */
738 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
739 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
740 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
741 DW_CFA_nop, DW_CFA_nop,
742
743 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
744 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
745 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
746 0, 0, 0, 0, /* .plt size goes here */
747 0, /* Augmentation size */
748 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
749 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
750 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
751 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
752 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
753 11, /* Block length */
754 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
755 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
756 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
757 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
758 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
759 };
760
761 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
762
763 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
764 {
765 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
766 0, 0, 0, 0, /* CIE ID */
767 1, /* CIE version */
768 'z', 'R', 0, /* Augmentation string */
769 1, /* Code alignment factor */
770 0x78, /* Data alignment factor */
771 16, /* Return address column */
772 1, /* Augmentation size */
773 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
774 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
775 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
776 DW_CFA_nop, DW_CFA_nop,
777
778 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
779 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
780 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
781 0, 0, 0, 0, /* .plt size goes here */
782 0, /* Augmentation size */
783 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
784 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
785 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
786 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
787 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
788 11, /* Block length */
789 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
790 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
791 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
792 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
793 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
794 };
795
796 /* .eh_frame covering the non-lazy .plt section. */
797
798 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
799 {
800 #define PLT_GOT_FDE_LENGTH 20
801 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
802 0, 0, 0, 0, /* CIE ID */
803 1, /* CIE version */
804 'z', 'R', 0, /* Augmentation string */
805 1, /* Code alignment factor */
806 0x78, /* Data alignment factor */
807 16, /* Return address column */
808 1, /* Augmentation size */
809 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
810 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
811 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
812 DW_CFA_nop, DW_CFA_nop,
813
814 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
815 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
816 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
817 0, 0, 0, 0, /* non-lazy .plt size goes here */
818 0, /* Augmentation size */
819 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
820 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
821 };
822
823 /* Architecture-specific backend data for x86-64. */
824
825 struct elf_x86_64_backend_data
826 {
827 /* Target system. */
828 enum
829 {
830 is_normal,
831 is_nacl
832 } os;
833 };
834
835 #define get_elf_x86_64_arch_data(bed) \
836 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
837
838 #define get_elf_x86_64_backend_data(abfd) \
839 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
840
841 /* These are the standard parameters. */
842 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
843 {
844 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
845 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
846 elf_x86_64_lazy_plt_entry, /* plt_entry */
847 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
848 2, /* plt0_got1_offset */
849 8, /* plt0_got2_offset */
850 12, /* plt0_got2_insn_end */
851 2, /* plt_got_offset */
852 7, /* plt_reloc_offset */
853 12, /* plt_plt_offset */
854 6, /* plt_got_insn_size */
855 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
856 6, /* plt_lazy_offset */
857 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
858 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
859 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
860 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
861 };
862
863 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
864 {
865 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
866 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
867 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
868 2, /* plt_got_offset */
869 6, /* plt_got_insn_size */
870 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
871 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
872 };
873
874 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
875 {
876 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
877 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
878 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
879 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
880 2, /* plt0_got1_offset */
881 1+8, /* plt0_got2_offset */
882 1+12, /* plt0_got2_insn_end */
883 1+2, /* plt_got_offset */
884 1, /* plt_reloc_offset */
885 7, /* plt_plt_offset */
886 1+6, /* plt_got_insn_size */
887 11, /* plt_plt_insn_end */
888 0, /* plt_lazy_offset */
889 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
890 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
891 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
892 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
893 };
894
895 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
896 {
897 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
898 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
899 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
900 1+2, /* plt_got_offset */
901 1+6, /* plt_got_insn_size */
902 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
903 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
904 };
905
906 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
907 {
908 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
909 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
910 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
911 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
912 2, /* plt0_got1_offset */
913 1+8, /* plt0_got2_offset */
914 1+12, /* plt0_got2_insn_end */
915 4+1+2, /* plt_got_offset */
916 4+1, /* plt_reloc_offset */
917 4+1+6, /* plt_plt_offset */
918 4+1+6, /* plt_got_insn_size */
919 4+1+5+5, /* plt_plt_insn_end */
920 0, /* plt_lazy_offset */
921 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
922 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
923 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
924 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
925 };
926
927 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
928 {
929 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
930 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
931 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
932 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
933 2, /* plt0_got1_offset */
934 8, /* plt0_got2_offset */
935 12, /* plt0_got2_insn_end */
936 4+2, /* plt_got_offset */
937 4+1, /* plt_reloc_offset */
938 4+6, /* plt_plt_offset */
939 4+6, /* plt_got_insn_size */
940 4+5+5, /* plt_plt_insn_end */
941 0, /* plt_lazy_offset */
942 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
943 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
944 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
945 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
946 };
947
948 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
949 {
950 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
951 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
952 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
953 4+1+2, /* plt_got_offset */
954 4+1+6, /* plt_got_insn_size */
955 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
956 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
957 };
958
959 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
960 {
961 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
962 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
963 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
964 4+2, /* plt_got_offset */
965 4+6, /* plt_got_insn_size */
966 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
967 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
968 };
969
970 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
971 {
972 is_normal /* os */
973 };
974
975 #define elf_backend_arch_data &elf_x86_64_arch_bed
976
977 static bfd_boolean
978 elf64_x86_64_elf_object_p (bfd *abfd)
979 {
980 /* Set the right machine number for an x86-64 elf64 file. */
981 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
982 return TRUE;
983 }
984
985 static bfd_boolean
986 elf32_x86_64_elf_object_p (bfd *abfd)
987 {
988 /* Set the right machine number for an x86-64 elf32 file. */
989 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
990 return TRUE;
991 }
992
993 /* Return TRUE if the TLS access code sequence support transition
994 from R_TYPE. */
995
996 static bfd_boolean
997 elf_x86_64_check_tls_transition (bfd *abfd,
998 struct bfd_link_info *info,
999 asection *sec,
1000 bfd_byte *contents,
1001 Elf_Internal_Shdr *symtab_hdr,
1002 struct elf_link_hash_entry **sym_hashes,
1003 unsigned int r_type,
1004 const Elf_Internal_Rela *rel,
1005 const Elf_Internal_Rela *relend)
1006 {
1007 unsigned int val;
1008 unsigned long r_symndx;
1009 bfd_boolean largepic = FALSE;
1010 struct elf_link_hash_entry *h;
1011 bfd_vma offset;
1012 struct elf_x86_link_hash_table *htab;
1013 bfd_byte *call;
1014 bfd_boolean indirect_call;
1015
1016 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1017 offset = rel->r_offset;
1018 switch (r_type)
1019 {
1020 case R_X86_64_TLSGD:
1021 case R_X86_64_TLSLD:
1022 if ((rel + 1) >= relend)
1023 return FALSE;
1024
1025 if (r_type == R_X86_64_TLSGD)
1026 {
1027 /* Check transition from GD access model. For 64bit, only
1028 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1029 .word 0x6666; rex64; call __tls_get_addr@PLT
1030 or
1031 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1032 .byte 0x66; rex64
1033 call *__tls_get_addr@GOTPCREL(%rip)
1034 which may be converted to
1035 addr32 call __tls_get_addr
1036 can transit to different access model. For 32bit, only
1037 leaq foo@tlsgd(%rip), %rdi
1038 .word 0x6666; rex64; call __tls_get_addr@PLT
1039 or
1040 leaq foo@tlsgd(%rip), %rdi
1041 .byte 0x66; rex64
1042 call *__tls_get_addr@GOTPCREL(%rip)
1043 which may be converted to
1044 addr32 call __tls_get_addr
1045 can transit to different access model. For largepic,
1046 we also support:
1047 leaq foo@tlsgd(%rip), %rdi
1048 movabsq $__tls_get_addr@pltoff, %rax
1049 addq $r15, %rax
1050 call *%rax
1051 or
1052 leaq foo@tlsgd(%rip), %rdi
1053 movabsq $__tls_get_addr@pltoff, %rax
1054 addq $rbx, %rax
1055 call *%rax */
1056
1057 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1058
1059 if ((offset + 12) > sec->size)
1060 return FALSE;
1061
1062 call = contents + offset + 4;
1063 if (call[0] != 0x66
1064 || !((call[1] == 0x48
1065 && call[2] == 0xff
1066 && call[3] == 0x15)
1067 || (call[1] == 0x48
1068 && call[2] == 0x67
1069 && call[3] == 0xe8)
1070 || (call[1] == 0x66
1071 && call[2] == 0x48
1072 && call[3] == 0xe8)))
1073 {
1074 if (!ABI_64_P (abfd)
1075 || (offset + 19) > sec->size
1076 || offset < 3
1077 || memcmp (call - 7, leaq + 1, 3) != 0
1078 || memcmp (call, "\x48\xb8", 2) != 0
1079 || call[11] != 0x01
1080 || call[13] != 0xff
1081 || call[14] != 0xd0
1082 || !((call[10] == 0x48 && call[12] == 0xd8)
1083 || (call[10] == 0x4c && call[12] == 0xf8)))
1084 return FALSE;
1085 largepic = TRUE;
1086 }
1087 else if (ABI_64_P (abfd))
1088 {
1089 if (offset < 4
1090 || memcmp (contents + offset - 4, leaq, 4) != 0)
1091 return FALSE;
1092 }
1093 else
1094 {
1095 if (offset < 3
1096 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1097 return FALSE;
1098 }
1099 indirect_call = call[2] == 0xff;
1100 }
1101 else
1102 {
1103 /* Check transition from LD access model. Only
1104 leaq foo@tlsld(%rip), %rdi;
1105 call __tls_get_addr@PLT
1106 or
1107 leaq foo@tlsld(%rip), %rdi;
1108 call *__tls_get_addr@GOTPCREL(%rip)
1109 which may be converted to
1110 addr32 call __tls_get_addr
1111 can transit to different access model. For largepic
1112 we also support:
1113 leaq foo@tlsld(%rip), %rdi
1114 movabsq $__tls_get_addr@pltoff, %rax
1115 addq $r15, %rax
1116 call *%rax
1117 or
1118 leaq foo@tlsld(%rip), %rdi
1119 movabsq $__tls_get_addr@pltoff, %rax
1120 addq $rbx, %rax
1121 call *%rax */
1122
1123 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1124
1125 if (offset < 3 || (offset + 9) > sec->size)
1126 return FALSE;
1127
1128 if (memcmp (contents + offset - 3, lea, 3) != 0)
1129 return FALSE;
1130
1131 call = contents + offset + 4;
1132 if (!(call[0] == 0xe8
1133 || (call[0] == 0xff && call[1] == 0x15)
1134 || (call[0] == 0x67 && call[1] == 0xe8)))
1135 {
1136 if (!ABI_64_P (abfd)
1137 || (offset + 19) > sec->size
1138 || memcmp (call, "\x48\xb8", 2) != 0
1139 || call[11] != 0x01
1140 || call[13] != 0xff
1141 || call[14] != 0xd0
1142 || !((call[10] == 0x48 && call[12] == 0xd8)
1143 || (call[10] == 0x4c && call[12] == 0xf8)))
1144 return FALSE;
1145 largepic = TRUE;
1146 }
1147 indirect_call = call[0] == 0xff;
1148 }
1149
1150 r_symndx = htab->r_sym (rel[1].r_info);
1151 if (r_symndx < symtab_hdr->sh_info)
1152 return FALSE;
1153
1154 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1155 if (h == NULL
1156 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1157 return FALSE;
1158 else
1159 {
1160 r_type = (ELF32_R_TYPE (rel[1].r_info)
1161 & ~R_X86_64_converted_reloc_bit);
1162 if (largepic)
1163 return r_type == R_X86_64_PLTOFF64;
1164 else if (indirect_call)
1165 return r_type == R_X86_64_GOTPCRELX;
1166 else
1167 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1168 }
1169
1170 case R_X86_64_GOTTPOFF:
1171 /* Check transition from IE access model:
1172 mov foo@gottpoff(%rip), %reg
1173 add foo@gottpoff(%rip), %reg
1174 */
1175
1176 /* Check REX prefix first. */
1177 if (offset >= 3 && (offset + 4) <= sec->size)
1178 {
1179 val = bfd_get_8 (abfd, contents + offset - 3);
1180 if (val != 0x48 && val != 0x4c)
1181 {
1182 /* X32 may have 0x44 REX prefix or no REX prefix. */
1183 if (ABI_64_P (abfd))
1184 return FALSE;
1185 }
1186 }
1187 else
1188 {
1189 /* X32 may not have any REX prefix. */
1190 if (ABI_64_P (abfd))
1191 return FALSE;
1192 if (offset < 2 || (offset + 3) > sec->size)
1193 return FALSE;
1194 }
1195
1196 val = bfd_get_8 (abfd, contents + offset - 2);
1197 if (val != 0x8b && val != 0x03)
1198 return FALSE;
1199
1200 val = bfd_get_8 (abfd, contents + offset - 1);
1201 return (val & 0xc7) == 5;
1202
1203 case R_X86_64_GOTPC32_TLSDESC:
1204 /* Check transition from GDesc access model:
1205 leaq x@tlsdesc(%rip), %rax
1206
1207 Make sure it's a leaq adding rip to a 32-bit offset
1208 into any register, although it's probably almost always
1209 going to be rax. */
1210
1211 if (offset < 3 || (offset + 4) > sec->size)
1212 return FALSE;
1213
1214 val = bfd_get_8 (abfd, contents + offset - 3);
1215 if ((val & 0xfb) != 0x48)
1216 return FALSE;
1217
1218 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1219 return FALSE;
1220
1221 val = bfd_get_8 (abfd, contents + offset - 1);
1222 return (val & 0xc7) == 0x05;
1223
1224 case R_X86_64_TLSDESC_CALL:
1225 /* Check transition from GDesc access model:
1226 call *x@tlsdesc(%rax)
1227 */
1228 if (offset + 2 <= sec->size)
1229 {
1230 /* Make sure that it's a call *x@tlsdesc(%rax). */
1231 call = contents + offset;
1232 return call[0] == 0xff && call[1] == 0x10;
1233 }
1234
1235 return FALSE;
1236
1237 default:
1238 abort ();
1239 }
1240 }
1241
1242 /* Return TRUE if the TLS access transition is OK or no transition
1243 will be performed. Update R_TYPE if there is a transition. */
1244
1245 static bfd_boolean
1246 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1247 asection *sec, bfd_byte *contents,
1248 Elf_Internal_Shdr *symtab_hdr,
1249 struct elf_link_hash_entry **sym_hashes,
1250 unsigned int *r_type, int tls_type,
1251 const Elf_Internal_Rela *rel,
1252 const Elf_Internal_Rela *relend,
1253 struct elf_link_hash_entry *h,
1254 unsigned long r_symndx,
1255 bfd_boolean from_relocate_section)
1256 {
1257 unsigned int from_type = *r_type;
1258 unsigned int to_type = from_type;
1259 bfd_boolean check = TRUE;
1260
1261 /* Skip TLS transition for functions. */
1262 if (h != NULL
1263 && (h->type == STT_FUNC
1264 || h->type == STT_GNU_IFUNC))
1265 return TRUE;
1266
1267 switch (from_type)
1268 {
1269 case R_X86_64_TLSGD:
1270 case R_X86_64_GOTPC32_TLSDESC:
1271 case R_X86_64_TLSDESC_CALL:
1272 case R_X86_64_GOTTPOFF:
1273 if (bfd_link_executable (info))
1274 {
1275 if (h == NULL)
1276 to_type = R_X86_64_TPOFF32;
1277 else
1278 to_type = R_X86_64_GOTTPOFF;
1279 }
1280
1281 /* When we are called from elf_x86_64_relocate_section, there may
1282 be additional transitions based on TLS_TYPE. */
1283 if (from_relocate_section)
1284 {
1285 unsigned int new_to_type = to_type;
1286
1287 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1288 new_to_type = R_X86_64_TPOFF32;
1289
1290 if (to_type == R_X86_64_TLSGD
1291 || to_type == R_X86_64_GOTPC32_TLSDESC
1292 || to_type == R_X86_64_TLSDESC_CALL)
1293 {
1294 if (tls_type == GOT_TLS_IE)
1295 new_to_type = R_X86_64_GOTTPOFF;
1296 }
1297
1298 /* We checked the transition before when we were called from
1299 elf_x86_64_check_relocs. We only want to check the new
1300 transition which hasn't been checked before. */
1301 check = new_to_type != to_type && from_type == to_type;
1302 to_type = new_to_type;
1303 }
1304
1305 break;
1306
1307 case R_X86_64_TLSLD:
1308 if (bfd_link_executable (info))
1309 to_type = R_X86_64_TPOFF32;
1310 break;
1311
1312 default:
1313 return TRUE;
1314 }
1315
1316 /* Return TRUE if there is no transition. */
1317 if (from_type == to_type)
1318 return TRUE;
1319
1320 /* Check if the transition can be performed. */
1321 if (check
1322 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1323 symtab_hdr, sym_hashes,
1324 from_type, rel, relend))
1325 {
1326 reloc_howto_type *from, *to;
1327 const char *name;
1328
1329 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1330 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1331
1332 if (h)
1333 name = h->root.root.string;
1334 else
1335 {
1336 struct elf_x86_link_hash_table *htab;
1337
1338 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1339 if (htab == NULL)
1340 name = "*unknown*";
1341 else
1342 {
1343 Elf_Internal_Sym *isym;
1344
1345 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1346 abfd, r_symndx);
1347 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1348 }
1349 }
1350
1351 _bfd_error_handler
1352 /* xgettext:c-format */
1353 (_("%B: TLS transition from %s to %s against `%s' at %#Lx "
1354 "in section `%A' failed"),
1355 abfd, from->name, to->name, name, rel->r_offset, sec);
1356 bfd_set_error (bfd_error_bad_value);
1357 return FALSE;
1358 }
1359
1360 *r_type = to_type;
1361 return TRUE;
1362 }
1363
1364 /* Rename some of the generic section flags to better document how they
1365 are used here. */
1366 #define check_relocs_failed sec_flg0
1367
1368 static bfd_boolean
1369 elf_x86_64_need_pic (struct bfd_link_info *info,
1370 bfd *input_bfd, asection *sec,
1371 struct elf_link_hash_entry *h,
1372 Elf_Internal_Shdr *symtab_hdr,
1373 Elf_Internal_Sym *isym,
1374 reloc_howto_type *howto)
1375 {
1376 const char *v = "";
1377 const char *und = "";
1378 const char *pic = "";
1379 const char *object;
1380
1381 const char *name;
1382 if (h)
1383 {
1384 name = h->root.root.string;
1385 switch (ELF_ST_VISIBILITY (h->other))
1386 {
1387 case STV_HIDDEN:
1388 v = _("hidden symbol ");
1389 break;
1390 case STV_INTERNAL:
1391 v = _("internal symbol ");
1392 break;
1393 case STV_PROTECTED:
1394 v = _("protected symbol ");
1395 break;
1396 default:
1397 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1398 v = _("protected symbol ");
1399 else
1400 v = _("symbol ");
1401 pic = _("; recompile with -fPIC");
1402 break;
1403 }
1404
1405 if (!h->def_regular && !h->def_dynamic)
1406 und = _("undefined ");
1407 }
1408 else
1409 {
1410 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1411 pic = _("; recompile with -fPIC");
1412 }
1413
1414 if (bfd_link_dll (info))
1415 object = _("a shared object");
1416 else if (bfd_link_pie (info))
1417 object = _("a PIE object");
1418 else
1419 object = _("a PDE object");
1420
1421 /* xgettext:c-format */
1422 _bfd_error_handler (_("%B: relocation %s against %s%s`%s' can "
1423 "not be used when making %s%s"),
1424 input_bfd, howto->name, und, v, name,
1425 object, pic);
1426 bfd_set_error (bfd_error_bad_value);
1427 sec->check_relocs_failed = 1;
1428 return FALSE;
1429 }
1430
1431 /* With the local symbol, foo, we convert
1432 mov foo@GOTPCREL(%rip), %reg
1433 to
1434 lea foo(%rip), %reg
1435 and convert
1436 call/jmp *foo@GOTPCREL(%rip)
1437 to
1438 nop call foo/jmp foo nop
1439 When PIC is false, convert
1440 test %reg, foo@GOTPCREL(%rip)
1441 to
1442 test $foo, %reg
1443 and convert
1444 binop foo@GOTPCREL(%rip), %reg
1445 to
1446 binop $foo, %reg
1447 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1448 instructions. */
1449
1450 static bfd_boolean
1451 elf_x86_64_convert_load_reloc (bfd *abfd,
1452 bfd_byte *contents,
1453 unsigned int *r_type_p,
1454 Elf_Internal_Rela *irel,
1455 struct elf_link_hash_entry *h,
1456 bfd_boolean *converted,
1457 struct bfd_link_info *link_info)
1458 {
1459 struct elf_x86_link_hash_table *htab;
1460 bfd_boolean is_pic;
1461 bfd_boolean no_overflow;
1462 bfd_boolean relocx;
1463 bfd_boolean to_reloc_pc32;
1464 asection *tsec;
1465 bfd_signed_vma raddend;
1466 unsigned int opcode;
1467 unsigned int modrm;
1468 unsigned int r_type = *r_type_p;
1469 unsigned int r_symndx;
1470 bfd_vma roff = irel->r_offset;
1471
1472 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1473 return TRUE;
1474
1475 raddend = irel->r_addend;
1476 /* Addend for 32-bit PC-relative relocation must be -4. */
1477 if (raddend != -4)
1478 return TRUE;
1479
1480 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1481 is_pic = bfd_link_pic (link_info);
1482
1483 relocx = (r_type == R_X86_64_GOTPCRELX
1484 || r_type == R_X86_64_REX_GOTPCRELX);
1485
1486 /* TRUE if --no-relax is used. */
1487 no_overflow = link_info->disable_target_specific_optimizations > 1;
1488
1489 r_symndx = htab->r_sym (irel->r_info);
1490
1491 opcode = bfd_get_8 (abfd, contents + roff - 2);
1492
1493 /* Convert mov to lea since it has been done for a while. */
1494 if (opcode != 0x8b)
1495 {
1496 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1497 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1498 test, xor instructions. */
1499 if (!relocx)
1500 return TRUE;
1501 }
1502
1503 /* We convert only to R_X86_64_PC32:
1504 1. Branch.
1505 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1506 3. no_overflow is true.
1507 4. PIC.
1508 */
1509 to_reloc_pc32 = (opcode == 0xff
1510 || !relocx
1511 || no_overflow
1512 || is_pic);
1513
1514 /* Get the symbol referred to by the reloc. */
1515 if (h == NULL)
1516 {
1517 Elf_Internal_Sym *isym
1518 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1519
1520 /* Skip relocation against undefined symbols. */
1521 if (isym->st_shndx == SHN_UNDEF)
1522 return TRUE;
1523
1524 if (isym->st_shndx == SHN_ABS)
1525 tsec = bfd_abs_section_ptr;
1526 else if (isym->st_shndx == SHN_COMMON)
1527 tsec = bfd_com_section_ptr;
1528 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1529 tsec = &_bfd_elf_large_com_section;
1530 else
1531 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1532 }
1533 else
1534 {
1535 /* Undefined weak symbol is only bound locally in executable
1536 and its reference is resolved as 0 without relocation
1537 overflow. We can only perform this optimization for
1538 GOTPCRELX relocations since we need to modify REX byte.
1539 It is OK convert mov with R_X86_64_GOTPCREL to
1540 R_X86_64_PC32. */
1541 bfd_boolean local_ref;
1542 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1543
1544 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1545 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1546 if ((relocx || opcode == 0x8b)
1547 && (h->root.type == bfd_link_hash_undefweak
1548 && !eh->linker_def
1549 && local_ref))
1550 {
1551 if (opcode == 0xff)
1552 {
1553 /* Skip for branch instructions since R_X86_64_PC32
1554 may overflow. */
1555 if (no_overflow)
1556 return TRUE;
1557 }
1558 else if (relocx)
1559 {
1560 /* For non-branch instructions, we can convert to
1561 R_X86_64_32/R_X86_64_32S since we know if there
1562 is a REX byte. */
1563 to_reloc_pc32 = FALSE;
1564 }
1565
1566 /* Since we don't know the current PC when PIC is true,
1567 we can't convert to R_X86_64_PC32. */
1568 if (to_reloc_pc32 && is_pic)
1569 return TRUE;
1570
1571 goto convert;
1572 }
1573 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1574 ld.so may use its link-time address. */
1575 else if (h->start_stop
1576 || eh->linker_def
1577 || ((h->def_regular
1578 || h->root.type == bfd_link_hash_defined
1579 || h->root.type == bfd_link_hash_defweak)
1580 && h != htab->elf.hdynamic
1581 && local_ref))
1582 {
1583 /* bfd_link_hash_new or bfd_link_hash_undefined is
1584 set by an assignment in a linker script in
1585 bfd_elf_record_link_assignment. start_stop is set
1586 on __start_SECNAME/__stop_SECNAME which mark section
1587 SECNAME. */
1588 if (h->start_stop
1589 || eh->linker_def
1590 || (h->def_regular
1591 && (h->root.type == bfd_link_hash_new
1592 || h->root.type == bfd_link_hash_undefined
1593 || ((h->root.type == bfd_link_hash_defined
1594 || h->root.type == bfd_link_hash_defweak)
1595 && h->root.u.def.section == bfd_und_section_ptr))))
1596 {
1597 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1598 if (no_overflow)
1599 return TRUE;
1600 goto convert;
1601 }
1602 tsec = h->root.u.def.section;
1603 }
1604 else
1605 return TRUE;
1606 }
1607
1608 /* Don't convert GOTPCREL relocation against large section. */
1609 if (elf_section_data (tsec) != NULL
1610 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1611 return TRUE;
1612
1613 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1614 if (no_overflow)
1615 return TRUE;
1616
1617 convert:
1618 if (opcode == 0xff)
1619 {
1620 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1621 unsigned int nop;
1622 unsigned int disp;
1623 bfd_vma nop_offset;
1624
1625 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1626 R_X86_64_PC32. */
1627 modrm = bfd_get_8 (abfd, contents + roff - 1);
1628 if (modrm == 0x25)
1629 {
1630 /* Convert to "jmp foo nop". */
1631 modrm = 0xe9;
1632 nop = NOP_OPCODE;
1633 nop_offset = irel->r_offset + 3;
1634 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1635 irel->r_offset -= 1;
1636 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1637 }
1638 else
1639 {
1640 struct elf_x86_link_hash_entry *eh
1641 = (struct elf_x86_link_hash_entry *) h;
1642
1643 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1644 is a nop prefix. */
1645 modrm = 0xe8;
1646 /* To support TLS optimization, always use addr32 prefix for
1647 "call *__tls_get_addr@GOTPCREL(%rip)". */
1648 if (eh && eh->tls_get_addr)
1649 {
1650 nop = 0x67;
1651 nop_offset = irel->r_offset - 2;
1652 }
1653 else
1654 {
1655 nop = link_info->call_nop_byte;
1656 if (link_info->call_nop_as_suffix)
1657 {
1658 nop_offset = irel->r_offset + 3;
1659 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1660 irel->r_offset -= 1;
1661 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1662 }
1663 else
1664 nop_offset = irel->r_offset - 2;
1665 }
1666 }
1667 bfd_put_8 (abfd, nop, contents + nop_offset);
1668 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1669 r_type = R_X86_64_PC32;
1670 }
1671 else
1672 {
1673 unsigned int rex;
1674 unsigned int rex_mask = REX_R;
1675
1676 if (r_type == R_X86_64_REX_GOTPCRELX)
1677 rex = bfd_get_8 (abfd, contents + roff - 3);
1678 else
1679 rex = 0;
1680
1681 if (opcode == 0x8b)
1682 {
1683 if (to_reloc_pc32)
1684 {
1685 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1686 "lea foo(%rip), %reg". */
1687 opcode = 0x8d;
1688 r_type = R_X86_64_PC32;
1689 }
1690 else
1691 {
1692 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1693 "mov $foo, %reg". */
1694 opcode = 0xc7;
1695 modrm = bfd_get_8 (abfd, contents + roff - 1);
1696 modrm = 0xc0 | (modrm & 0x38) >> 3;
1697 if ((rex & REX_W) != 0
1698 && ABI_64_P (link_info->output_bfd))
1699 {
1700 /* Keep the REX_W bit in REX byte for LP64. */
1701 r_type = R_X86_64_32S;
1702 goto rewrite_modrm_rex;
1703 }
1704 else
1705 {
1706 /* If the REX_W bit in REX byte isn't needed,
1707 use R_X86_64_32 and clear the W bit to avoid
1708 sign-extend imm32 to imm64. */
1709 r_type = R_X86_64_32;
1710 /* Clear the W bit in REX byte. */
1711 rex_mask |= REX_W;
1712 goto rewrite_modrm_rex;
1713 }
1714 }
1715 }
1716 else
1717 {
1718 /* R_X86_64_PC32 isn't supported. */
1719 if (to_reloc_pc32)
1720 return TRUE;
1721
1722 modrm = bfd_get_8 (abfd, contents + roff - 1);
1723 if (opcode == 0x85)
1724 {
1725 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1726 "test $foo, %reg". */
1727 modrm = 0xc0 | (modrm & 0x38) >> 3;
1728 opcode = 0xf7;
1729 }
1730 else
1731 {
1732 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1733 "binop $foo, %reg". */
1734 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1735 opcode = 0x81;
1736 }
1737
1738 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1739 overflow when sign-extending imm32 to imm64. */
1740 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1741
1742 rewrite_modrm_rex:
1743 bfd_put_8 (abfd, modrm, contents + roff - 1);
1744
1745 if (rex)
1746 {
1747 /* Move the R bit to the B bit in REX byte. */
1748 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1749 bfd_put_8 (abfd, rex, contents + roff - 3);
1750 }
1751
1752 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1753 irel->r_addend = 0;
1754 }
1755
1756 bfd_put_8 (abfd, opcode, contents + roff - 2);
1757 }
1758
1759 *r_type_p = r_type;
1760 irel->r_info = htab->r_info (r_symndx,
1761 r_type | R_X86_64_converted_reloc_bit);
1762
1763 *converted = TRUE;
1764
1765 return TRUE;
1766 }
1767
1768 /* Look through the relocs for a section during the first phase, and
1769 calculate needed space in the global offset table, procedure
1770 linkage table, and dynamic reloc sections. */
1771
1772 static bfd_boolean
1773 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1774 asection *sec,
1775 const Elf_Internal_Rela *relocs)
1776 {
1777 struct elf_x86_link_hash_table *htab;
1778 Elf_Internal_Shdr *symtab_hdr;
1779 struct elf_link_hash_entry **sym_hashes;
1780 const Elf_Internal_Rela *rel;
1781 const Elf_Internal_Rela *rel_end;
1782 asection *sreloc;
1783 bfd_byte *contents;
1784 bfd_boolean converted;
1785
1786 if (bfd_link_relocatable (info))
1787 return TRUE;
1788
1789 /* Don't do anything special with non-loaded, non-alloced sections.
1790 In particular, any relocs in such sections should not affect GOT
1791 and PLT reference counting (ie. we don't allow them to create GOT
1792 or PLT entries), there's no possibility or desire to optimize TLS
1793 relocs, and there's not much point in propagating relocs to shared
1794 libs that the dynamic linker won't relocate. */
1795 if ((sec->flags & SEC_ALLOC) == 0)
1796 return TRUE;
1797
1798 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1799 if (htab == NULL)
1800 {
1801 sec->check_relocs_failed = 1;
1802 return FALSE;
1803 }
1804
1805 BFD_ASSERT (is_x86_elf (abfd, htab));
1806
1807 /* Get the section contents. */
1808 if (elf_section_data (sec)->this_hdr.contents != NULL)
1809 contents = elf_section_data (sec)->this_hdr.contents;
1810 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1811 {
1812 sec->check_relocs_failed = 1;
1813 return FALSE;
1814 }
1815
1816 symtab_hdr = &elf_symtab_hdr (abfd);
1817 sym_hashes = elf_sym_hashes (abfd);
1818
1819 converted = FALSE;
1820
1821 sreloc = NULL;
1822
1823 rel_end = relocs + sec->reloc_count;
1824 for (rel = relocs; rel < rel_end; rel++)
1825 {
1826 unsigned int r_type;
1827 unsigned int r_symndx;
1828 struct elf_link_hash_entry *h;
1829 struct elf_x86_link_hash_entry *eh;
1830 Elf_Internal_Sym *isym;
1831 const char *name;
1832 bfd_boolean size_reloc;
1833 bfd_boolean converted_reloc;
1834
1835 r_symndx = htab->r_sym (rel->r_info);
1836 r_type = ELF32_R_TYPE (rel->r_info);
1837
1838 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1839 {
1840 /* xgettext:c-format */
1841 _bfd_error_handler (_("%B: bad symbol index: %d"),
1842 abfd, r_symndx);
1843 goto error_return;
1844 }
1845
1846 if (r_symndx < symtab_hdr->sh_info)
1847 {
1848 /* A local symbol. */
1849 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1850 abfd, r_symndx);
1851 if (isym == NULL)
1852 goto error_return;
1853
1854 /* Check relocation against local STT_GNU_IFUNC symbol. */
1855 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1856 {
1857 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1858 TRUE);
1859 if (h == NULL)
1860 goto error_return;
1861
1862 /* Fake a STT_GNU_IFUNC symbol. */
1863 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1864 isym, NULL);
1865 h->type = STT_GNU_IFUNC;
1866 h->def_regular = 1;
1867 h->ref_regular = 1;
1868 h->forced_local = 1;
1869 h->root.type = bfd_link_hash_defined;
1870 }
1871 else
1872 h = NULL;
1873 }
1874 else
1875 {
1876 isym = NULL;
1877 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1878 while (h->root.type == bfd_link_hash_indirect
1879 || h->root.type == bfd_link_hash_warning)
1880 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1881 }
1882
1883 /* Check invalid x32 relocations. */
1884 if (!ABI_64_P (abfd))
1885 switch (r_type)
1886 {
1887 default:
1888 break;
1889
1890 case R_X86_64_DTPOFF64:
1891 case R_X86_64_TPOFF64:
1892 case R_X86_64_PC64:
1893 case R_X86_64_GOTOFF64:
1894 case R_X86_64_GOT64:
1895 case R_X86_64_GOTPCREL64:
1896 case R_X86_64_GOTPC64:
1897 case R_X86_64_GOTPLT64:
1898 case R_X86_64_PLTOFF64:
1899 {
1900 if (h)
1901 name = h->root.root.string;
1902 else
1903 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1904 NULL);
1905 _bfd_error_handler
1906 /* xgettext:c-format */
1907 (_("%B: relocation %s against symbol `%s' isn't "
1908 "supported in x32 mode"), abfd,
1909 x86_64_elf_howto_table[r_type].name, name);
1910 bfd_set_error (bfd_error_bad_value);
1911 goto error_return;
1912 }
1913 break;
1914 }
1915
1916 if (h != NULL)
1917 {
1918 /* It is referenced by a non-shared object. */
1919 h->ref_regular = 1;
1920 h->root.non_ir_ref_regular = 1;
1921
1922 if (h->type == STT_GNU_IFUNC)
1923 elf_tdata (info->output_bfd)->has_gnu_symbols
1924 |= elf_gnu_symbol_ifunc;
1925 }
1926
1927 converted_reloc = FALSE;
1928 if ((r_type == R_X86_64_GOTPCREL
1929 || r_type == R_X86_64_GOTPCRELX
1930 || r_type == R_X86_64_REX_GOTPCRELX)
1931 && (h == NULL || h->type != STT_GNU_IFUNC))
1932 {
1933 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1934 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1935 irel, h, &converted_reloc,
1936 info))
1937 goto error_return;
1938
1939 if (converted_reloc)
1940 converted = TRUE;
1941 }
1942
1943 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1944 symtab_hdr, sym_hashes,
1945 &r_type, GOT_UNKNOWN,
1946 rel, rel_end, h, r_symndx, FALSE))
1947 goto error_return;
1948
1949 eh = (struct elf_x86_link_hash_entry *) h;
1950 switch (r_type)
1951 {
1952 case R_X86_64_TLSLD:
1953 htab->tls_ld_or_ldm_got.refcount += 1;
1954 goto create_got;
1955
1956 case R_X86_64_TPOFF32:
1957 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1958 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1959 &x86_64_elf_howto_table[r_type]);
1960 if (eh != NULL)
1961 eh->zero_undefweak &= 0x2;
1962 break;
1963
1964 case R_X86_64_GOTTPOFF:
1965 if (!bfd_link_executable (info))
1966 info->flags |= DF_STATIC_TLS;
1967 /* Fall through */
1968
1969 case R_X86_64_GOT32:
1970 case R_X86_64_GOTPCREL:
1971 case R_X86_64_GOTPCRELX:
1972 case R_X86_64_REX_GOTPCRELX:
1973 case R_X86_64_TLSGD:
1974 case R_X86_64_GOT64:
1975 case R_X86_64_GOTPCREL64:
1976 case R_X86_64_GOTPLT64:
1977 case R_X86_64_GOTPC32_TLSDESC:
1978 case R_X86_64_TLSDESC_CALL:
1979 /* This symbol requires a global offset table entry. */
1980 {
1981 int tls_type, old_tls_type;
1982
1983 switch (r_type)
1984 {
1985 default: tls_type = GOT_NORMAL; break;
1986 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1987 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1988 case R_X86_64_GOTPC32_TLSDESC:
1989 case R_X86_64_TLSDESC_CALL:
1990 tls_type = GOT_TLS_GDESC; break;
1991 }
1992
1993 if (h != NULL)
1994 {
1995 h->got.refcount += 1;
1996 old_tls_type = eh->tls_type;
1997 }
1998 else
1999 {
2000 bfd_signed_vma *local_got_refcounts;
2001
2002 /* This is a global offset table entry for a local symbol. */
2003 local_got_refcounts = elf_local_got_refcounts (abfd);
2004 if (local_got_refcounts == NULL)
2005 {
2006 bfd_size_type size;
2007
2008 size = symtab_hdr->sh_info;
2009 size *= sizeof (bfd_signed_vma)
2010 + sizeof (bfd_vma) + sizeof (char);
2011 local_got_refcounts = ((bfd_signed_vma *)
2012 bfd_zalloc (abfd, size));
2013 if (local_got_refcounts == NULL)
2014 goto error_return;
2015 elf_local_got_refcounts (abfd) = local_got_refcounts;
2016 elf_x86_local_tlsdesc_gotent (abfd)
2017 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2018 elf_x86_local_got_tls_type (abfd)
2019 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2020 }
2021 local_got_refcounts[r_symndx] += 1;
2022 old_tls_type
2023 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2024 }
2025
2026 /* If a TLS symbol is accessed using IE at least once,
2027 there is no point to use dynamic model for it. */
2028 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2029 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2030 || tls_type != GOT_TLS_IE))
2031 {
2032 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2033 tls_type = old_tls_type;
2034 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2035 && GOT_TLS_GD_ANY_P (tls_type))
2036 tls_type |= old_tls_type;
2037 else
2038 {
2039 if (h)
2040 name = h->root.root.string;
2041 else
2042 name = bfd_elf_sym_name (abfd, symtab_hdr,
2043 isym, NULL);
2044 _bfd_error_handler
2045 /* xgettext:c-format */
2046 (_("%B: '%s' accessed both as normal and"
2047 " thread local symbol"),
2048 abfd, name);
2049 bfd_set_error (bfd_error_bad_value);
2050 goto error_return;
2051 }
2052 }
2053
2054 if (old_tls_type != tls_type)
2055 {
2056 if (eh != NULL)
2057 eh->tls_type = tls_type;
2058 else
2059 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2060 }
2061 }
2062 /* Fall through */
2063
2064 case R_X86_64_GOTOFF64:
2065 case R_X86_64_GOTPC32:
2066 case R_X86_64_GOTPC64:
2067 create_got:
2068 if (eh != NULL)
2069 eh->zero_undefweak &= 0x2;
2070 break;
2071
2072 case R_X86_64_PLT32:
2073 case R_X86_64_PLT32_BND:
2074 /* This symbol requires a procedure linkage table entry. We
2075 actually build the entry in adjust_dynamic_symbol,
2076 because this might be a case of linking PIC code which is
2077 never referenced by a dynamic object, in which case we
2078 don't need to generate a procedure linkage table entry
2079 after all. */
2080
2081 /* If this is a local symbol, we resolve it directly without
2082 creating a procedure linkage table entry. */
2083 if (h == NULL)
2084 continue;
2085
2086 eh->zero_undefweak &= 0x2;
2087 h->needs_plt = 1;
2088 h->plt.refcount += 1;
2089 break;
2090
2091 case R_X86_64_PLTOFF64:
2092 /* This tries to form the 'address' of a function relative
2093 to GOT. For global symbols we need a PLT entry. */
2094 if (h != NULL)
2095 {
2096 h->needs_plt = 1;
2097 h->plt.refcount += 1;
2098 }
2099 goto create_got;
2100
2101 case R_X86_64_SIZE32:
2102 case R_X86_64_SIZE64:
2103 size_reloc = TRUE;
2104 goto do_size;
2105
2106 case R_X86_64_32:
2107 if (!ABI_64_P (abfd))
2108 goto pointer;
2109 /* Fall through. */
2110 case R_X86_64_8:
2111 case R_X86_64_16:
2112 case R_X86_64_32S:
2113 /* Check relocation overflow as these relocs may lead to
2114 run-time relocation overflow. Don't error out for
2115 sections we don't care about, such as debug sections or
2116 when relocation overflow check is disabled. */
2117 if (!info->no_reloc_overflow_check
2118 && !converted_reloc
2119 && (bfd_link_pic (info)
2120 || (bfd_link_executable (info)
2121 && h != NULL
2122 && !h->def_regular
2123 && h->def_dynamic
2124 && (sec->flags & SEC_READONLY) == 0)))
2125 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2126 &x86_64_elf_howto_table[r_type]);
2127 /* Fall through. */
2128
2129 case R_X86_64_PC8:
2130 case R_X86_64_PC16:
2131 case R_X86_64_PC32:
2132 case R_X86_64_PC32_BND:
2133 case R_X86_64_PC64:
2134 case R_X86_64_64:
2135 pointer:
2136 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2137 eh->zero_undefweak |= 0x2;
2138 /* We are called after all symbols have been resolved. Only
2139 relocation against STT_GNU_IFUNC symbol must go through
2140 PLT. */
2141 if (h != NULL
2142 && (bfd_link_executable (info)
2143 || h->type == STT_GNU_IFUNC))
2144 {
2145 /* If this reloc is in a read-only section, we might
2146 need a copy reloc. We can't check reliably at this
2147 stage whether the section is read-only, as input
2148 sections have not yet been mapped to output sections.
2149 Tentatively set the flag for now, and correct in
2150 adjust_dynamic_symbol. */
2151 h->non_got_ref = 1;
2152
2153 /* We may need a .plt entry if the symbol is a function
2154 defined in a shared lib or is a STT_GNU_IFUNC function
2155 referenced from the code or read-only section. */
2156 if (!h->def_regular
2157 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2158 h->plt.refcount += 1;
2159
2160 if (r_type == R_X86_64_PC32)
2161 {
2162 /* Since something like ".long foo - ." may be used
2163 as pointer, make sure that PLT is used if foo is
2164 a function defined in a shared library. */
2165 if ((sec->flags & SEC_CODE) == 0)
2166 h->pointer_equality_needed = 1;
2167 }
2168 else if (r_type != R_X86_64_PC32_BND
2169 && r_type != R_X86_64_PC64)
2170 {
2171 h->pointer_equality_needed = 1;
2172 /* At run-time, R_X86_64_64 can be resolved for both
2173 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2174 can only be resolved for x32. */
2175 if ((sec->flags & SEC_READONLY) == 0
2176 && (r_type == R_X86_64_64
2177 || (!ABI_64_P (abfd)
2178 && (r_type == R_X86_64_32
2179 || r_type == R_X86_64_32S))))
2180 eh->func_pointer_refcount += 1;
2181 }
2182 }
2183
2184 size_reloc = FALSE;
2185 do_size:
2186 if (NEED_DYNAMIC_RELOCATION_P (info, h, sec, r_type,
2187 htab->pointer_r_type))
2188 {
2189 struct elf_dyn_relocs *p;
2190 struct elf_dyn_relocs **head;
2191
2192 /* We must copy these reloc types into the output file.
2193 Create a reloc section in dynobj and make room for
2194 this reloc. */
2195 if (sreloc == NULL)
2196 {
2197 sreloc = _bfd_elf_make_dynamic_reloc_section
2198 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2199 abfd, /*rela?*/ TRUE);
2200
2201 if (sreloc == NULL)
2202 goto error_return;
2203 }
2204
2205 /* If this is a global symbol, we count the number of
2206 relocations we need for this symbol. */
2207 if (h != NULL)
2208 head = &eh->dyn_relocs;
2209 else
2210 {
2211 /* Track dynamic relocs needed for local syms too.
2212 We really need local syms available to do this
2213 easily. Oh well. */
2214 asection *s;
2215 void **vpp;
2216
2217 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2218 abfd, r_symndx);
2219 if (isym == NULL)
2220 goto error_return;
2221
2222 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2223 if (s == NULL)
2224 s = sec;
2225
2226 /* Beware of type punned pointers vs strict aliasing
2227 rules. */
2228 vpp = &(elf_section_data (s)->local_dynrel);
2229 head = (struct elf_dyn_relocs **)vpp;
2230 }
2231
2232 p = *head;
2233 if (p == NULL || p->sec != sec)
2234 {
2235 bfd_size_type amt = sizeof *p;
2236
2237 p = ((struct elf_dyn_relocs *)
2238 bfd_alloc (htab->elf.dynobj, amt));
2239 if (p == NULL)
2240 goto error_return;
2241 p->next = *head;
2242 *head = p;
2243 p->sec = sec;
2244 p->count = 0;
2245 p->pc_count = 0;
2246 }
2247
2248 p->count += 1;
2249 /* Count size relocation as PC-relative relocation. */
2250 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2251 p->pc_count += 1;
2252 }
2253 break;
2254
2255 /* This relocation describes the C++ object vtable hierarchy.
2256 Reconstruct it for later use during GC. */
2257 case R_X86_64_GNU_VTINHERIT:
2258 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2259 goto error_return;
2260 break;
2261
2262 /* This relocation describes which C++ vtable entries are actually
2263 used. Record for later use during GC. */
2264 case R_X86_64_GNU_VTENTRY:
2265 BFD_ASSERT (h != NULL);
2266 if (h != NULL
2267 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2268 goto error_return;
2269 break;
2270
2271 default:
2272 break;
2273 }
2274 }
2275
2276 if (elf_section_data (sec)->this_hdr.contents != contents)
2277 {
2278 if (!converted && !info->keep_memory)
2279 free (contents);
2280 else
2281 {
2282 /* Cache the section contents for elf_link_input_bfd if any
2283 load is converted or --no-keep-memory isn't used. */
2284 elf_section_data (sec)->this_hdr.contents = contents;
2285 }
2286 }
2287
2288 /* Cache relocations if any load is converted. */
2289 if (elf_section_data (sec)->relocs != relocs && converted)
2290 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2291
2292 return TRUE;
2293
2294 error_return:
2295 if (elf_section_data (sec)->this_hdr.contents != contents)
2296 free (contents);
2297 sec->check_relocs_failed = 1;
2298 return FALSE;
2299 }
2300
2301 /* Return the relocation value for @tpoff relocation
2302 if STT_TLS virtual address is ADDRESS. */
2303
2304 static bfd_vma
2305 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2306 {
2307 struct elf_link_hash_table *htab = elf_hash_table (info);
2308 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2309 bfd_vma static_tls_size;
2310
2311 /* If tls_segment is NULL, we should have signalled an error already. */
2312 if (htab->tls_sec == NULL)
2313 return 0;
2314
2315 /* Consider special static TLS alignment requirements. */
2316 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2317 return address - static_tls_size - htab->tls_sec->vma;
2318 }
2319
2320 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
2321 branch? */
2322
2323 static bfd_boolean
2324 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
2325 {
2326 /* Opcode Instruction
2327 0xe8 call
2328 0xe9 jump
2329 0x0f 0x8x conditional jump */
2330 return ((offset > 0
2331 && (contents [offset - 1] == 0xe8
2332 || contents [offset - 1] == 0xe9))
2333 || (offset > 1
2334 && contents [offset - 2] == 0x0f
2335 && (contents [offset - 1] & 0xf0) == 0x80));
2336 }
2337
2338 /* Relocate an x86_64 ELF section. */
2339
2340 static bfd_boolean
2341 elf_x86_64_relocate_section (bfd *output_bfd,
2342 struct bfd_link_info *info,
2343 bfd *input_bfd,
2344 asection *input_section,
2345 bfd_byte *contents,
2346 Elf_Internal_Rela *relocs,
2347 Elf_Internal_Sym *local_syms,
2348 asection **local_sections)
2349 {
2350 struct elf_x86_link_hash_table *htab;
2351 Elf_Internal_Shdr *symtab_hdr;
2352 struct elf_link_hash_entry **sym_hashes;
2353 bfd_vma *local_got_offsets;
2354 bfd_vma *local_tlsdesc_gotents;
2355 Elf_Internal_Rela *rel;
2356 Elf_Internal_Rela *wrel;
2357 Elf_Internal_Rela *relend;
2358 unsigned int plt_entry_size;
2359
2360 /* Skip if check_relocs failed. */
2361 if (input_section->check_relocs_failed)
2362 return FALSE;
2363
2364 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2365 if (htab == NULL)
2366 return FALSE;
2367
2368 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2369
2370 plt_entry_size = htab->plt.plt_entry_size;
2371 symtab_hdr = &elf_symtab_hdr (input_bfd);
2372 sym_hashes = elf_sym_hashes (input_bfd);
2373 local_got_offsets = elf_local_got_offsets (input_bfd);
2374 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2375
2376 _bfd_x86_elf_set_tls_module_base (info);
2377
2378 rel = wrel = relocs;
2379 relend = relocs + input_section->reloc_count;
2380 for (; rel < relend; wrel++, rel++)
2381 {
2382 unsigned int r_type, r_type_tls;
2383 reloc_howto_type *howto;
2384 unsigned long r_symndx;
2385 struct elf_link_hash_entry *h;
2386 struct elf_x86_link_hash_entry *eh;
2387 Elf_Internal_Sym *sym;
2388 asection *sec;
2389 bfd_vma off, offplt, plt_offset;
2390 bfd_vma relocation;
2391 bfd_boolean unresolved_reloc;
2392 bfd_reloc_status_type r;
2393 int tls_type;
2394 asection *base_got, *resolved_plt;
2395 bfd_vma st_size;
2396 bfd_boolean resolved_to_zero;
2397 bfd_boolean relative_reloc;
2398 bfd_boolean converted_reloc;
2399 bfd_boolean need_copy_reloc_in_pie;
2400
2401 r_type = ELF32_R_TYPE (rel->r_info);
2402 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2403 || r_type == (int) R_X86_64_GNU_VTENTRY)
2404 {
2405 if (wrel != rel)
2406 *wrel = *rel;
2407 continue;
2408 }
2409
2410 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2411 r_type &= ~R_X86_64_converted_reloc_bit;
2412
2413 if (r_type >= (int) R_X86_64_standard)
2414 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2415
2416 if (r_type != (int) R_X86_64_32
2417 || ABI_64_P (output_bfd))
2418 howto = x86_64_elf_howto_table + r_type;
2419 else
2420 howto = (x86_64_elf_howto_table
2421 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2422 r_symndx = htab->r_sym (rel->r_info);
2423 h = NULL;
2424 sym = NULL;
2425 sec = NULL;
2426 unresolved_reloc = FALSE;
2427 if (r_symndx < symtab_hdr->sh_info)
2428 {
2429 sym = local_syms + r_symndx;
2430 sec = local_sections[r_symndx];
2431
2432 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2433 &sec, rel);
2434 st_size = sym->st_size;
2435
2436 /* Relocate against local STT_GNU_IFUNC symbol. */
2437 if (!bfd_link_relocatable (info)
2438 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2439 {
2440 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2441 rel, FALSE);
2442 if (h == NULL)
2443 abort ();
2444
2445 /* Set STT_GNU_IFUNC symbol value. */
2446 h->root.u.def.value = sym->st_value;
2447 h->root.u.def.section = sec;
2448 }
2449 }
2450 else
2451 {
2452 bfd_boolean warned ATTRIBUTE_UNUSED;
2453 bfd_boolean ignored ATTRIBUTE_UNUSED;
2454
2455 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2456 r_symndx, symtab_hdr, sym_hashes,
2457 h, sec, relocation,
2458 unresolved_reloc, warned, ignored);
2459 st_size = h->size;
2460 }
2461
2462 if (sec != NULL && discarded_section (sec))
2463 {
2464 _bfd_clear_contents (howto, input_bfd, input_section,
2465 contents + rel->r_offset);
2466 wrel->r_offset = rel->r_offset;
2467 wrel->r_info = 0;
2468 wrel->r_addend = 0;
2469
2470 /* For ld -r, remove relocations in debug sections against
2471 sections defined in discarded sections. Not done for
2472 eh_frame editing code expects to be present. */
2473 if (bfd_link_relocatable (info)
2474 && (input_section->flags & SEC_DEBUGGING))
2475 wrel--;
2476
2477 continue;
2478 }
2479
2480 if (bfd_link_relocatable (info))
2481 {
2482 if (wrel != rel)
2483 *wrel = *rel;
2484 continue;
2485 }
2486
2487 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2488 {
2489 if (r_type == R_X86_64_64)
2490 {
2491 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2492 zero-extend it to 64bit if addend is zero. */
2493 r_type = R_X86_64_32;
2494 memset (contents + rel->r_offset + 4, 0, 4);
2495 }
2496 else if (r_type == R_X86_64_SIZE64)
2497 {
2498 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2499 zero-extend it to 64bit if addend is zero. */
2500 r_type = R_X86_64_SIZE32;
2501 memset (contents + rel->r_offset + 4, 0, 4);
2502 }
2503 }
2504
2505 eh = (struct elf_x86_link_hash_entry *) h;
2506
2507 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2508 it here if it is defined in a non-shared object. */
2509 if (h != NULL
2510 && h->type == STT_GNU_IFUNC
2511 && h->def_regular)
2512 {
2513 bfd_vma plt_index;
2514 const char *name;
2515
2516 if ((input_section->flags & SEC_ALLOC) == 0)
2517 {
2518 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2519 sections because such sections are not SEC_ALLOC and
2520 thus ld.so will not process them. */
2521 if ((input_section->flags & SEC_DEBUGGING) != 0)
2522 continue;
2523 abort ();
2524 }
2525
2526 switch (r_type)
2527 {
2528 default:
2529 break;
2530
2531 case R_X86_64_GOTPCREL:
2532 case R_X86_64_GOTPCRELX:
2533 case R_X86_64_REX_GOTPCRELX:
2534 case R_X86_64_GOTPCREL64:
2535 base_got = htab->elf.sgot;
2536 off = h->got.offset;
2537
2538 if (base_got == NULL)
2539 abort ();
2540
2541 if (off == (bfd_vma) -1)
2542 {
2543 /* We can't use h->got.offset here to save state, or
2544 even just remember the offset, as finish_dynamic_symbol
2545 would use that as offset into .got. */
2546
2547 if (h->plt.offset == (bfd_vma) -1)
2548 abort ();
2549
2550 if (htab->elf.splt != NULL)
2551 {
2552 plt_index = (h->plt.offset / plt_entry_size
2553 - htab->plt.has_plt0);
2554 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2555 base_got = htab->elf.sgotplt;
2556 }
2557 else
2558 {
2559 plt_index = h->plt.offset / plt_entry_size;
2560 off = plt_index * GOT_ENTRY_SIZE;
2561 base_got = htab->elf.igotplt;
2562 }
2563
2564 if (h->dynindx == -1
2565 || h->forced_local
2566 || info->symbolic)
2567 {
2568 /* This references the local defitionion. We must
2569 initialize this entry in the global offset table.
2570 Since the offset must always be a multiple of 8,
2571 we use the least significant bit to record
2572 whether we have initialized it already.
2573
2574 When doing a dynamic link, we create a .rela.got
2575 relocation entry to initialize the value. This
2576 is done in the finish_dynamic_symbol routine. */
2577 if ((off & 1) != 0)
2578 off &= ~1;
2579 else
2580 {
2581 bfd_put_64 (output_bfd, relocation,
2582 base_got->contents + off);
2583 /* Note that this is harmless for the GOTPLT64
2584 case, as -1 | 1 still is -1. */
2585 h->got.offset |= 1;
2586 }
2587 }
2588 }
2589
2590 relocation = (base_got->output_section->vma
2591 + base_got->output_offset + off);
2592
2593 goto do_relocation;
2594 }
2595
2596 if (h->plt.offset == (bfd_vma) -1)
2597 {
2598 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2599 if (r_type == htab->pointer_r_type
2600 && (input_section->flags & SEC_CODE) == 0)
2601 goto do_ifunc_pointer;
2602 goto bad_ifunc_reloc;
2603 }
2604
2605 /* STT_GNU_IFUNC symbol must go through PLT. */
2606 if (htab->elf.splt != NULL)
2607 {
2608 if (htab->plt_second != NULL)
2609 {
2610 resolved_plt = htab->plt_second;
2611 plt_offset = eh->plt_second.offset;
2612 }
2613 else
2614 {
2615 resolved_plt = htab->elf.splt;
2616 plt_offset = h->plt.offset;
2617 }
2618 }
2619 else
2620 {
2621 resolved_plt = htab->elf.iplt;
2622 plt_offset = h->plt.offset;
2623 }
2624
2625 relocation = (resolved_plt->output_section->vma
2626 + resolved_plt->output_offset + plt_offset);
2627
2628 switch (r_type)
2629 {
2630 default:
2631 bad_ifunc_reloc:
2632 if (h->root.root.string)
2633 name = h->root.root.string;
2634 else
2635 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2636 NULL);
2637 _bfd_error_handler
2638 /* xgettext:c-format */
2639 (_("%B: relocation %s against STT_GNU_IFUNC "
2640 "symbol `%s' isn't supported"), input_bfd,
2641 howto->name, name);
2642 bfd_set_error (bfd_error_bad_value);
2643 return FALSE;
2644
2645 case R_X86_64_32S:
2646 if (bfd_link_pic (info))
2647 abort ();
2648 goto do_relocation;
2649
2650 case R_X86_64_32:
2651 if (ABI_64_P (output_bfd))
2652 goto do_relocation;
2653 /* FALLTHROUGH */
2654 case R_X86_64_64:
2655 do_ifunc_pointer:
2656 if (rel->r_addend != 0)
2657 {
2658 if (h->root.root.string)
2659 name = h->root.root.string;
2660 else
2661 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2662 sym, NULL);
2663 _bfd_error_handler
2664 /* xgettext:c-format */
2665 (_("%B: relocation %s against STT_GNU_IFUNC "
2666 "symbol `%s' has non-zero addend: %Ld"),
2667 input_bfd, howto->name, name, rel->r_addend);
2668 bfd_set_error (bfd_error_bad_value);
2669 return FALSE;
2670 }
2671
2672 /* Generate dynamic relcoation only when there is a
2673 non-GOT reference in a shared object or there is no
2674 PLT. */
2675 if ((bfd_link_pic (info) && h->non_got_ref)
2676 || h->plt.offset == (bfd_vma) -1)
2677 {
2678 Elf_Internal_Rela outrel;
2679 asection *sreloc;
2680
2681 /* Need a dynamic relocation to get the real function
2682 address. */
2683 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2684 info,
2685 input_section,
2686 rel->r_offset);
2687 if (outrel.r_offset == (bfd_vma) -1
2688 || outrel.r_offset == (bfd_vma) -2)
2689 abort ();
2690
2691 outrel.r_offset += (input_section->output_section->vma
2692 + input_section->output_offset);
2693
2694 if (POINTER_LOCAL_IFUNC_P (info, h))
2695 {
2696 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
2697 h->root.root.string,
2698 h->root.u.def.section->owner);
2699
2700 /* This symbol is resolved locally. */
2701 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2702 outrel.r_addend = (h->root.u.def.value
2703 + h->root.u.def.section->output_section->vma
2704 + h->root.u.def.section->output_offset);
2705 }
2706 else
2707 {
2708 outrel.r_info = htab->r_info (h->dynindx, r_type);
2709 outrel.r_addend = 0;
2710 }
2711
2712 /* Dynamic relocations are stored in
2713 1. .rela.ifunc section in PIC object.
2714 2. .rela.got section in dynamic executable.
2715 3. .rela.iplt section in static executable. */
2716 if (bfd_link_pic (info))
2717 sreloc = htab->elf.irelifunc;
2718 else if (htab->elf.splt != NULL)
2719 sreloc = htab->elf.srelgot;
2720 else
2721 sreloc = htab->elf.irelplt;
2722 elf_append_rela (output_bfd, sreloc, &outrel);
2723
2724 /* If this reloc is against an external symbol, we
2725 do not want to fiddle with the addend. Otherwise,
2726 we need to include the symbol value so that it
2727 becomes an addend for the dynamic reloc. For an
2728 internal symbol, we have updated addend. */
2729 continue;
2730 }
2731 /* FALLTHROUGH */
2732 case R_X86_64_PC32:
2733 case R_X86_64_PC32_BND:
2734 case R_X86_64_PC64:
2735 case R_X86_64_PLT32:
2736 case R_X86_64_PLT32_BND:
2737 goto do_relocation;
2738 }
2739 }
2740
2741 resolved_to_zero = (eh != NULL
2742 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2743
2744 /* When generating a shared object, the relocations handled here are
2745 copied into the output file to be resolved at run time. */
2746 switch (r_type)
2747 {
2748 case R_X86_64_GOT32:
2749 case R_X86_64_GOT64:
2750 /* Relocation is to the entry for this symbol in the global
2751 offset table. */
2752 case R_X86_64_GOTPCREL:
2753 case R_X86_64_GOTPCRELX:
2754 case R_X86_64_REX_GOTPCRELX:
2755 case R_X86_64_GOTPCREL64:
2756 /* Use global offset table entry as symbol value. */
2757 case R_X86_64_GOTPLT64:
2758 /* This is obsolete and treated the same as GOT64. */
2759 base_got = htab->elf.sgot;
2760
2761 if (htab->elf.sgot == NULL)
2762 abort ();
2763
2764 relative_reloc = FALSE;
2765 if (h != NULL)
2766 {
2767 off = h->got.offset;
2768 if (h->needs_plt
2769 && h->plt.offset != (bfd_vma)-1
2770 && off == (bfd_vma)-1)
2771 {
2772 /* We can't use h->got.offset here to save
2773 state, or even just remember the offset, as
2774 finish_dynamic_symbol would use that as offset into
2775 .got. */
2776 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2777 - htab->plt.has_plt0);
2778 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2779 base_got = htab->elf.sgotplt;
2780 }
2781
2782 if (RESOLVED_LOCALLY_P (info, h, htab))
2783 {
2784 /* We must initialize this entry in the global offset
2785 table. Since the offset must always be a multiple
2786 of 8, we use the least significant bit to record
2787 whether we have initialized it already.
2788
2789 When doing a dynamic link, we create a .rela.got
2790 relocation entry to initialize the value. This is
2791 done in the finish_dynamic_symbol routine. */
2792 if ((off & 1) != 0)
2793 off &= ~1;
2794 else
2795 {
2796 bfd_put_64 (output_bfd, relocation,
2797 base_got->contents + off);
2798 /* Note that this is harmless for the GOTPLT64 case,
2799 as -1 | 1 still is -1. */
2800 h->got.offset |= 1;
2801
2802 if (GENERATE_RELATIVE_RELOC_P (info, h))
2803 {
2804 /* If this symbol isn't dynamic in PIC,
2805 generate R_X86_64_RELATIVE here. */
2806 eh->no_finish_dynamic_symbol = 1;
2807 relative_reloc = TRUE;
2808 }
2809 }
2810 }
2811 else
2812 unresolved_reloc = FALSE;
2813 }
2814 else
2815 {
2816 if (local_got_offsets == NULL)
2817 abort ();
2818
2819 off = local_got_offsets[r_symndx];
2820
2821 /* The offset must always be a multiple of 8. We use
2822 the least significant bit to record whether we have
2823 already generated the necessary reloc. */
2824 if ((off & 1) != 0)
2825 off &= ~1;
2826 else
2827 {
2828 bfd_put_64 (output_bfd, relocation,
2829 base_got->contents + off);
2830 local_got_offsets[r_symndx] |= 1;
2831
2832 if (bfd_link_pic (info))
2833 relative_reloc = TRUE;
2834 }
2835 }
2836
2837 if (relative_reloc)
2838 {
2839 asection *s;
2840 Elf_Internal_Rela outrel;
2841
2842 /* We need to generate a R_X86_64_RELATIVE reloc
2843 for the dynamic linker. */
2844 s = htab->elf.srelgot;
2845 if (s == NULL)
2846 abort ();
2847
2848 outrel.r_offset = (base_got->output_section->vma
2849 + base_got->output_offset
2850 + off);
2851 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2852 outrel.r_addend = relocation;
2853 elf_append_rela (output_bfd, s, &outrel);
2854 }
2855
2856 if (off >= (bfd_vma) -2)
2857 abort ();
2858
2859 relocation = base_got->output_section->vma
2860 + base_got->output_offset + off;
2861 if (r_type != R_X86_64_GOTPCREL
2862 && r_type != R_X86_64_GOTPCRELX
2863 && r_type != R_X86_64_REX_GOTPCRELX
2864 && r_type != R_X86_64_GOTPCREL64)
2865 relocation -= htab->elf.sgotplt->output_section->vma
2866 - htab->elf.sgotplt->output_offset;
2867
2868 break;
2869
2870 case R_X86_64_GOTOFF64:
2871 /* Relocation is relative to the start of the global offset
2872 table. */
2873
2874 /* Check to make sure it isn't a protected function or data
2875 symbol for shared library since it may not be local when
2876 used as function address or with copy relocation. We also
2877 need to make sure that a symbol is referenced locally. */
2878 if (bfd_link_pic (info) && h)
2879 {
2880 if (!h->def_regular)
2881 {
2882 const char *v;
2883
2884 switch (ELF_ST_VISIBILITY (h->other))
2885 {
2886 case STV_HIDDEN:
2887 v = _("hidden symbol");
2888 break;
2889 case STV_INTERNAL:
2890 v = _("internal symbol");
2891 break;
2892 case STV_PROTECTED:
2893 v = _("protected symbol");
2894 break;
2895 default:
2896 v = _("symbol");
2897 break;
2898 }
2899
2900 _bfd_error_handler
2901 /* xgettext:c-format */
2902 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s"
2903 " `%s' can not be used when making a shared object"),
2904 input_bfd, v, h->root.root.string);
2905 bfd_set_error (bfd_error_bad_value);
2906 return FALSE;
2907 }
2908 else if (!bfd_link_executable (info)
2909 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2910 && (h->type == STT_FUNC
2911 || h->type == STT_OBJECT)
2912 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2913 {
2914 _bfd_error_handler
2915 /* xgettext:c-format */
2916 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s"
2917 " `%s' can not be used when making a shared object"),
2918 input_bfd,
2919 h->type == STT_FUNC ? "function" : "data",
2920 h->root.root.string);
2921 bfd_set_error (bfd_error_bad_value);
2922 return FALSE;
2923 }
2924 }
2925
2926 /* Note that sgot is not involved in this
2927 calculation. We always want the start of .got.plt. If we
2928 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2929 permitted by the ABI, we might have to change this
2930 calculation. */
2931 relocation -= htab->elf.sgotplt->output_section->vma
2932 + htab->elf.sgotplt->output_offset;
2933 break;
2934
2935 case R_X86_64_GOTPC32:
2936 case R_X86_64_GOTPC64:
2937 /* Use global offset table as symbol value. */
2938 relocation = htab->elf.sgotplt->output_section->vma
2939 + htab->elf.sgotplt->output_offset;
2940 unresolved_reloc = FALSE;
2941 break;
2942
2943 case R_X86_64_PLTOFF64:
2944 /* Relocation is PLT entry relative to GOT. For local
2945 symbols it's the symbol itself relative to GOT. */
2946 if (h != NULL
2947 /* See PLT32 handling. */
2948 && (h->plt.offset != (bfd_vma) -1
2949 || eh->plt_got.offset != (bfd_vma) -1)
2950 && htab->elf.splt != NULL)
2951 {
2952 if (eh->plt_got.offset != (bfd_vma) -1)
2953 {
2954 /* Use the GOT PLT. */
2955 resolved_plt = htab->plt_got;
2956 plt_offset = eh->plt_got.offset;
2957 }
2958 else if (htab->plt_second != NULL)
2959 {
2960 resolved_plt = htab->plt_second;
2961 plt_offset = eh->plt_second.offset;
2962 }
2963 else
2964 {
2965 resolved_plt = htab->elf.splt;
2966 plt_offset = h->plt.offset;
2967 }
2968
2969 relocation = (resolved_plt->output_section->vma
2970 + resolved_plt->output_offset
2971 + plt_offset);
2972 unresolved_reloc = FALSE;
2973 }
2974
2975 relocation -= htab->elf.sgotplt->output_section->vma
2976 + htab->elf.sgotplt->output_offset;
2977 break;
2978
2979 case R_X86_64_PLT32:
2980 case R_X86_64_PLT32_BND:
2981 /* Relocation is to the entry for this symbol in the
2982 procedure linkage table. */
2983
2984 /* Resolve a PLT32 reloc against a local symbol directly,
2985 without using the procedure linkage table. */
2986 if (h == NULL)
2987 break;
2988
2989 if ((h->plt.offset == (bfd_vma) -1
2990 && eh->plt_got.offset == (bfd_vma) -1)
2991 || htab->elf.splt == NULL)
2992 {
2993 /* We didn't make a PLT entry for this symbol. This
2994 happens when statically linking PIC code, or when
2995 using -Bsymbolic. */
2996 break;
2997 }
2998
2999 if (h->plt.offset != (bfd_vma) -1)
3000 {
3001 if (htab->plt_second != NULL)
3002 {
3003 resolved_plt = htab->plt_second;
3004 plt_offset = eh->plt_second.offset;
3005 }
3006 else
3007 {
3008 resolved_plt = htab->elf.splt;
3009 plt_offset = h->plt.offset;
3010 }
3011 }
3012 else
3013 {
3014 /* Use the GOT PLT. */
3015 resolved_plt = htab->plt_got;
3016 plt_offset = eh->plt_got.offset;
3017 }
3018
3019 relocation = (resolved_plt->output_section->vma
3020 + resolved_plt->output_offset
3021 + plt_offset);
3022 unresolved_reloc = FALSE;
3023 break;
3024
3025 case R_X86_64_SIZE32:
3026 case R_X86_64_SIZE64:
3027 /* Set to symbol size. */
3028 relocation = st_size;
3029 goto direct;
3030
3031 case R_X86_64_PC8:
3032 case R_X86_64_PC16:
3033 case R_X86_64_PC32:
3034 case R_X86_64_PC32_BND:
3035 /* Don't complain about -fPIC if the symbol is undefined when
3036 building executable unless it is unresolved weak symbol or
3037 -z nocopyreloc is used. */
3038 if ((input_section->flags & SEC_ALLOC) != 0
3039 && (input_section->flags & SEC_READONLY) != 0
3040 && h != NULL
3041 && ((bfd_link_executable (info)
3042 && ((h->root.type == bfd_link_hash_undefweak
3043 && !resolved_to_zero)
3044 || ((info->nocopyreloc
3045 || (eh->def_protected
3046 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3047 && h->def_dynamic
3048 && !(h->root.u.def.section->flags & SEC_CODE))))
3049 || bfd_link_dll (info)))
3050 {
3051 bfd_boolean fail = FALSE;
3052 bfd_boolean branch
3053 = ((r_type == R_X86_64_PC32
3054 || r_type == R_X86_64_PC32_BND)
3055 && is_32bit_relative_branch (contents, rel->r_offset));
3056
3057 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3058 {
3059 /* Symbol is referenced locally. Make sure it is
3060 defined locally or for a branch. */
3061 fail = (!(h->def_regular || ELF_COMMON_DEF_P (h))
3062 && !branch);
3063 }
3064 else if (!(bfd_link_pie (info)
3065 && (h->needs_copy || eh->needs_copy)))
3066 {
3067 /* Symbol doesn't need copy reloc and isn't referenced
3068 locally. We only allow branch to symbol with
3069 non-default visibility. */
3070 fail = (!branch
3071 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
3072 }
3073
3074 if (fail)
3075 return elf_x86_64_need_pic (info, input_bfd, input_section,
3076 h, NULL, NULL, howto);
3077 }
3078 /* Fall through. */
3079
3080 case R_X86_64_8:
3081 case R_X86_64_16:
3082 case R_X86_64_32:
3083 case R_X86_64_PC64:
3084 case R_X86_64_64:
3085 /* FIXME: The ABI says the linker should make sure the value is
3086 the same when it's zeroextended to 64 bit. */
3087
3088 direct:
3089 if ((input_section->flags & SEC_ALLOC) == 0)
3090 break;
3091
3092 need_copy_reloc_in_pie = (bfd_link_pie (info)
3093 && h != NULL
3094 && (h->needs_copy
3095 || eh->needs_copy
3096 || (h->root.type
3097 == bfd_link_hash_undefined))
3098 && (X86_PCREL_TYPE_P (r_type)
3099 || X86_SIZE_TYPE_P (r_type)));
3100
3101 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3102 need_copy_reloc_in_pie,
3103 resolved_to_zero, FALSE))
3104 {
3105 Elf_Internal_Rela outrel;
3106 bfd_boolean skip, relocate;
3107 asection *sreloc;
3108
3109 /* When generating a shared object, these relocations
3110 are copied into the output file to be resolved at run
3111 time. */
3112 skip = FALSE;
3113 relocate = FALSE;
3114
3115 outrel.r_offset =
3116 _bfd_elf_section_offset (output_bfd, info, input_section,
3117 rel->r_offset);
3118 if (outrel.r_offset == (bfd_vma) -1)
3119 skip = TRUE;
3120 else if (outrel.r_offset == (bfd_vma) -2)
3121 skip = TRUE, relocate = TRUE;
3122
3123 outrel.r_offset += (input_section->output_section->vma
3124 + input_section->output_offset);
3125
3126 if (skip)
3127 memset (&outrel, 0, sizeof outrel);
3128
3129 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3130 {
3131 outrel.r_info = htab->r_info (h->dynindx, r_type);
3132 outrel.r_addend = rel->r_addend;
3133 }
3134 else
3135 {
3136 /* This symbol is local, or marked to become local.
3137 When relocation overflow check is disabled, we
3138 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3139 if (r_type == htab->pointer_r_type
3140 || (r_type == R_X86_64_32
3141 && info->no_reloc_overflow_check))
3142 {
3143 relocate = TRUE;
3144 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3145 outrel.r_addend = relocation + rel->r_addend;
3146 }
3147 else if (r_type == R_X86_64_64
3148 && !ABI_64_P (output_bfd))
3149 {
3150 relocate = TRUE;
3151 outrel.r_info = htab->r_info (0,
3152 R_X86_64_RELATIVE64);
3153 outrel.r_addend = relocation + rel->r_addend;
3154 /* Check addend overflow. */
3155 if ((outrel.r_addend & 0x80000000)
3156 != (rel->r_addend & 0x80000000))
3157 {
3158 const char *name;
3159 int addend = rel->r_addend;
3160 if (h && h->root.root.string)
3161 name = h->root.root.string;
3162 else
3163 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3164 sym, NULL);
3165 _bfd_error_handler
3166 /* xgettext:c-format */
3167 (_("%B: addend %s%#x in relocation %s against "
3168 "symbol `%s' at %#Lx in section `%A' is "
3169 "out of range"),
3170 input_bfd, addend < 0 ? "-" : "", addend,
3171 howto->name, name, rel->r_offset, input_section);
3172 bfd_set_error (bfd_error_bad_value);
3173 return FALSE;
3174 }
3175 }
3176 else
3177 {
3178 long sindx;
3179
3180 if (bfd_is_abs_section (sec))
3181 sindx = 0;
3182 else if (sec == NULL || sec->owner == NULL)
3183 {
3184 bfd_set_error (bfd_error_bad_value);
3185 return FALSE;
3186 }
3187 else
3188 {
3189 asection *osec;
3190
3191 /* We are turning this relocation into one
3192 against a section symbol. It would be
3193 proper to subtract the symbol's value,
3194 osec->vma, from the emitted reloc addend,
3195 but ld.so expects buggy relocs. */
3196 osec = sec->output_section;
3197 sindx = elf_section_data (osec)->dynindx;
3198 if (sindx == 0)
3199 {
3200 asection *oi = htab->elf.text_index_section;
3201 sindx = elf_section_data (oi)->dynindx;
3202 }
3203 BFD_ASSERT (sindx != 0);
3204 }
3205
3206 outrel.r_info = htab->r_info (sindx, r_type);
3207 outrel.r_addend = relocation + rel->r_addend;
3208 }
3209 }
3210
3211 sreloc = elf_section_data (input_section)->sreloc;
3212
3213 if (sreloc == NULL || sreloc->contents == NULL)
3214 {
3215 r = bfd_reloc_notsupported;
3216 goto check_relocation_error;
3217 }
3218
3219 elf_append_rela (output_bfd, sreloc, &outrel);
3220
3221 /* If this reloc is against an external symbol, we do
3222 not want to fiddle with the addend. Otherwise, we
3223 need to include the symbol value so that it becomes
3224 an addend for the dynamic reloc. */
3225 if (! relocate)
3226 continue;
3227 }
3228
3229 break;
3230
3231 case R_X86_64_TLSGD:
3232 case R_X86_64_GOTPC32_TLSDESC:
3233 case R_X86_64_TLSDESC_CALL:
3234 case R_X86_64_GOTTPOFF:
3235 tls_type = GOT_UNKNOWN;
3236 if (h == NULL && local_got_offsets)
3237 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3238 else if (h != NULL)
3239 tls_type = elf_x86_hash_entry (h)->tls_type;
3240
3241 r_type_tls = r_type;
3242 if (! elf_x86_64_tls_transition (info, input_bfd,
3243 input_section, contents,
3244 symtab_hdr, sym_hashes,
3245 &r_type_tls, tls_type, rel,
3246 relend, h, r_symndx, TRUE))
3247 return FALSE;
3248
3249 if (r_type_tls == R_X86_64_TPOFF32)
3250 {
3251 bfd_vma roff = rel->r_offset;
3252
3253 BFD_ASSERT (! unresolved_reloc);
3254
3255 if (r_type == R_X86_64_TLSGD)
3256 {
3257 /* GD->LE transition. For 64bit, change
3258 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3259 .word 0x6666; rex64; call __tls_get_addr@PLT
3260 or
3261 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3262 .byte 0x66; rex64
3263 call *__tls_get_addr@GOTPCREL(%rip)
3264 which may be converted to
3265 addr32 call __tls_get_addr
3266 into:
3267 movq %fs:0, %rax
3268 leaq foo@tpoff(%rax), %rax
3269 For 32bit, change
3270 leaq foo@tlsgd(%rip), %rdi
3271 .word 0x6666; rex64; call __tls_get_addr@PLT
3272 or
3273 leaq foo@tlsgd(%rip), %rdi
3274 .byte 0x66; rex64
3275 call *__tls_get_addr@GOTPCREL(%rip)
3276 which may be converted to
3277 addr32 call __tls_get_addr
3278 into:
3279 movl %fs:0, %eax
3280 leaq foo@tpoff(%rax), %rax
3281 For largepic, change:
3282 leaq foo@tlsgd(%rip), %rdi
3283 movabsq $__tls_get_addr@pltoff, %rax
3284 addq %r15, %rax
3285 call *%rax
3286 into:
3287 movq %fs:0, %rax
3288 leaq foo@tpoff(%rax), %rax
3289 nopw 0x0(%rax,%rax,1) */
3290 int largepic = 0;
3291 if (ABI_64_P (output_bfd))
3292 {
3293 if (contents[roff + 5] == 0xb8)
3294 {
3295 memcpy (contents + roff - 3,
3296 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3297 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3298 largepic = 1;
3299 }
3300 else
3301 memcpy (contents + roff - 4,
3302 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3303 16);
3304 }
3305 else
3306 memcpy (contents + roff - 3,
3307 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3308 15);
3309 bfd_put_32 (output_bfd,
3310 elf_x86_64_tpoff (info, relocation),
3311 contents + roff + 8 + largepic);
3312 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3313 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3314 rel++;
3315 wrel++;
3316 continue;
3317 }
3318 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3319 {
3320 /* GDesc -> LE transition.
3321 It's originally something like:
3322 leaq x@tlsdesc(%rip), %rax
3323
3324 Change it to:
3325 movl $x@tpoff, %rax. */
3326
3327 unsigned int val, type;
3328
3329 type = bfd_get_8 (input_bfd, contents + roff - 3);
3330 val = bfd_get_8 (input_bfd, contents + roff - 1);
3331 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3332 contents + roff - 3);
3333 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3334 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3335 contents + roff - 1);
3336 bfd_put_32 (output_bfd,
3337 elf_x86_64_tpoff (info, relocation),
3338 contents + roff);
3339 continue;
3340 }
3341 else if (r_type == R_X86_64_TLSDESC_CALL)
3342 {
3343 /* GDesc -> LE transition.
3344 It's originally:
3345 call *(%rax)
3346 Turn it into:
3347 xchg %ax,%ax. */
3348 bfd_put_8 (output_bfd, 0x66, contents + roff);
3349 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3350 continue;
3351 }
3352 else if (r_type == R_X86_64_GOTTPOFF)
3353 {
3354 /* IE->LE transition:
3355 For 64bit, originally it can be one of:
3356 movq foo@gottpoff(%rip), %reg
3357 addq foo@gottpoff(%rip), %reg
3358 We change it into:
3359 movq $foo, %reg
3360 leaq foo(%reg), %reg
3361 addq $foo, %reg.
3362 For 32bit, originally it can be one of:
3363 movq foo@gottpoff(%rip), %reg
3364 addl foo@gottpoff(%rip), %reg
3365 We change it into:
3366 movq $foo, %reg
3367 leal foo(%reg), %reg
3368 addl $foo, %reg. */
3369
3370 unsigned int val, type, reg;
3371
3372 if (roff >= 3)
3373 val = bfd_get_8 (input_bfd, contents + roff - 3);
3374 else
3375 val = 0;
3376 type = bfd_get_8 (input_bfd, contents + roff - 2);
3377 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3378 reg >>= 3;
3379 if (type == 0x8b)
3380 {
3381 /* movq */
3382 if (val == 0x4c)
3383 bfd_put_8 (output_bfd, 0x49,
3384 contents + roff - 3);
3385 else if (!ABI_64_P (output_bfd) && val == 0x44)
3386 bfd_put_8 (output_bfd, 0x41,
3387 contents + roff - 3);
3388 bfd_put_8 (output_bfd, 0xc7,
3389 contents + roff - 2);
3390 bfd_put_8 (output_bfd, 0xc0 | reg,
3391 contents + roff - 1);
3392 }
3393 else if (reg == 4)
3394 {
3395 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3396 is special */
3397 if (val == 0x4c)
3398 bfd_put_8 (output_bfd, 0x49,
3399 contents + roff - 3);
3400 else if (!ABI_64_P (output_bfd) && val == 0x44)
3401 bfd_put_8 (output_bfd, 0x41,
3402 contents + roff - 3);
3403 bfd_put_8 (output_bfd, 0x81,
3404 contents + roff - 2);
3405 bfd_put_8 (output_bfd, 0xc0 | reg,
3406 contents + roff - 1);
3407 }
3408 else
3409 {
3410 /* addq/addl -> leaq/leal */
3411 if (val == 0x4c)
3412 bfd_put_8 (output_bfd, 0x4d,
3413 contents + roff - 3);
3414 else if (!ABI_64_P (output_bfd) && val == 0x44)
3415 bfd_put_8 (output_bfd, 0x45,
3416 contents + roff - 3);
3417 bfd_put_8 (output_bfd, 0x8d,
3418 contents + roff - 2);
3419 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3420 contents + roff - 1);
3421 }
3422 bfd_put_32 (output_bfd,
3423 elf_x86_64_tpoff (info, relocation),
3424 contents + roff);
3425 continue;
3426 }
3427 else
3428 BFD_ASSERT (FALSE);
3429 }
3430
3431 if (htab->elf.sgot == NULL)
3432 abort ();
3433
3434 if (h != NULL)
3435 {
3436 off = h->got.offset;
3437 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3438 }
3439 else
3440 {
3441 if (local_got_offsets == NULL)
3442 abort ();
3443
3444 off = local_got_offsets[r_symndx];
3445 offplt = local_tlsdesc_gotents[r_symndx];
3446 }
3447
3448 if ((off & 1) != 0)
3449 off &= ~1;
3450 else
3451 {
3452 Elf_Internal_Rela outrel;
3453 int dr_type, indx;
3454 asection *sreloc;
3455
3456 if (htab->elf.srelgot == NULL)
3457 abort ();
3458
3459 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3460
3461 if (GOT_TLS_GDESC_P (tls_type))
3462 {
3463 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3464 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3465 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3466 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3467 + htab->elf.sgotplt->output_offset
3468 + offplt
3469 + htab->sgotplt_jump_table_size);
3470 sreloc = htab->elf.srelplt;
3471 if (indx == 0)
3472 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3473 else
3474 outrel.r_addend = 0;
3475 elf_append_rela (output_bfd, sreloc, &outrel);
3476 }
3477
3478 sreloc = htab->elf.srelgot;
3479
3480 outrel.r_offset = (htab->elf.sgot->output_section->vma
3481 + htab->elf.sgot->output_offset + off);
3482
3483 if (GOT_TLS_GD_P (tls_type))
3484 dr_type = R_X86_64_DTPMOD64;
3485 else if (GOT_TLS_GDESC_P (tls_type))
3486 goto dr_done;
3487 else
3488 dr_type = R_X86_64_TPOFF64;
3489
3490 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3491 outrel.r_addend = 0;
3492 if ((dr_type == R_X86_64_TPOFF64
3493 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3494 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3495 outrel.r_info = htab->r_info (indx, dr_type);
3496
3497 elf_append_rela (output_bfd, sreloc, &outrel);
3498
3499 if (GOT_TLS_GD_P (tls_type))
3500 {
3501 if (indx == 0)
3502 {
3503 BFD_ASSERT (! unresolved_reloc);
3504 bfd_put_64 (output_bfd,
3505 relocation - _bfd_x86_elf_dtpoff_base (info),
3506 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3507 }
3508 else
3509 {
3510 bfd_put_64 (output_bfd, 0,
3511 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3512 outrel.r_info = htab->r_info (indx,
3513 R_X86_64_DTPOFF64);
3514 outrel.r_offset += GOT_ENTRY_SIZE;
3515 elf_append_rela (output_bfd, sreloc,
3516 &outrel);
3517 }
3518 }
3519
3520 dr_done:
3521 if (h != NULL)
3522 h->got.offset |= 1;
3523 else
3524 local_got_offsets[r_symndx] |= 1;
3525 }
3526
3527 if (off >= (bfd_vma) -2
3528 && ! GOT_TLS_GDESC_P (tls_type))
3529 abort ();
3530 if (r_type_tls == r_type)
3531 {
3532 if (r_type == R_X86_64_GOTPC32_TLSDESC
3533 || r_type == R_X86_64_TLSDESC_CALL)
3534 relocation = htab->elf.sgotplt->output_section->vma
3535 + htab->elf.sgotplt->output_offset
3536 + offplt + htab->sgotplt_jump_table_size;
3537 else
3538 relocation = htab->elf.sgot->output_section->vma
3539 + htab->elf.sgot->output_offset + off;
3540 unresolved_reloc = FALSE;
3541 }
3542 else
3543 {
3544 bfd_vma roff = rel->r_offset;
3545
3546 if (r_type == R_X86_64_TLSGD)
3547 {
3548 /* GD->IE transition. For 64bit, change
3549 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3550 .word 0x6666; rex64; call __tls_get_addr@PLT
3551 or
3552 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3553 .byte 0x66; rex64
3554 call *__tls_get_addr@GOTPCREL(%rip
3555 which may be converted to
3556 addr32 call __tls_get_addr
3557 into:
3558 movq %fs:0, %rax
3559 addq foo@gottpoff(%rip), %rax
3560 For 32bit, change
3561 leaq foo@tlsgd(%rip), %rdi
3562 .word 0x6666; rex64; call __tls_get_addr@PLT
3563 or
3564 leaq foo@tlsgd(%rip), %rdi
3565 .byte 0x66; rex64;
3566 call *__tls_get_addr@GOTPCREL(%rip)
3567 which may be converted to
3568 addr32 call __tls_get_addr
3569 into:
3570 movl %fs:0, %eax
3571 addq foo@gottpoff(%rip), %rax
3572 For largepic, change:
3573 leaq foo@tlsgd(%rip), %rdi
3574 movabsq $__tls_get_addr@pltoff, %rax
3575 addq %r15, %rax
3576 call *%rax
3577 into:
3578 movq %fs:0, %rax
3579 addq foo@gottpoff(%rax), %rax
3580 nopw 0x0(%rax,%rax,1) */
3581 int largepic = 0;
3582 if (ABI_64_P (output_bfd))
3583 {
3584 if (contents[roff + 5] == 0xb8)
3585 {
3586 memcpy (contents + roff - 3,
3587 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3588 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3589 largepic = 1;
3590 }
3591 else
3592 memcpy (contents + roff - 4,
3593 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3594 16);
3595 }
3596 else
3597 memcpy (contents + roff - 3,
3598 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3599 15);
3600
3601 relocation = (htab->elf.sgot->output_section->vma
3602 + htab->elf.sgot->output_offset + off
3603 - roff
3604 - largepic
3605 - input_section->output_section->vma
3606 - input_section->output_offset
3607 - 12);
3608 bfd_put_32 (output_bfd, relocation,
3609 contents + roff + 8 + largepic);
3610 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3611 rel++;
3612 wrel++;
3613 continue;
3614 }
3615 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3616 {
3617 /* GDesc -> IE transition.
3618 It's originally something like:
3619 leaq x@tlsdesc(%rip), %rax
3620
3621 Change it to:
3622 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3623
3624 /* Now modify the instruction as appropriate. To
3625 turn a leaq into a movq in the form we use it, it
3626 suffices to change the second byte from 0x8d to
3627 0x8b. */
3628 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3629
3630 bfd_put_32 (output_bfd,
3631 htab->elf.sgot->output_section->vma
3632 + htab->elf.sgot->output_offset + off
3633 - rel->r_offset
3634 - input_section->output_section->vma
3635 - input_section->output_offset
3636 - 4,
3637 contents + roff);
3638 continue;
3639 }
3640 else if (r_type == R_X86_64_TLSDESC_CALL)
3641 {
3642 /* GDesc -> IE transition.
3643 It's originally:
3644 call *(%rax)
3645
3646 Change it to:
3647 xchg %ax, %ax. */
3648
3649 bfd_put_8 (output_bfd, 0x66, contents + roff);
3650 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3651 continue;
3652 }
3653 else
3654 BFD_ASSERT (FALSE);
3655 }
3656 break;
3657
3658 case R_X86_64_TLSLD:
3659 if (! elf_x86_64_tls_transition (info, input_bfd,
3660 input_section, contents,
3661 symtab_hdr, sym_hashes,
3662 &r_type, GOT_UNKNOWN, rel,
3663 relend, h, r_symndx, TRUE))
3664 return FALSE;
3665
3666 if (r_type != R_X86_64_TLSLD)
3667 {
3668 /* LD->LE transition:
3669 leaq foo@tlsld(%rip), %rdi
3670 call __tls_get_addr@PLT
3671 For 64bit, we change it into:
3672 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3673 For 32bit, we change it into:
3674 nopl 0x0(%rax); movl %fs:0, %eax
3675 Or
3676 leaq foo@tlsld(%rip), %rdi;
3677 call *__tls_get_addr@GOTPCREL(%rip)
3678 which may be converted to
3679 addr32 call __tls_get_addr
3680 For 64bit, we change it into:
3681 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3682 For 32bit, we change it into:
3683 nopw 0x0(%rax); movl %fs:0, %eax
3684 For largepic, change:
3685 leaq foo@tlsgd(%rip), %rdi
3686 movabsq $__tls_get_addr@pltoff, %rax
3687 addq %rbx, %rax
3688 call *%rax
3689 into
3690 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3691 movq %fs:0, %eax */
3692
3693 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3694 if (ABI_64_P (output_bfd))
3695 {
3696 if (contents[rel->r_offset + 5] == 0xb8)
3697 memcpy (contents + rel->r_offset - 3,
3698 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3699 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3700 else if (contents[rel->r_offset + 4] == 0xff
3701 || contents[rel->r_offset + 4] == 0x67)
3702 memcpy (contents + rel->r_offset - 3,
3703 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3704 13);
3705 else
3706 memcpy (contents + rel->r_offset - 3,
3707 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3708 }
3709 else
3710 {
3711 if (contents[rel->r_offset + 4] == 0xff)
3712 memcpy (contents + rel->r_offset - 3,
3713 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3714 13);
3715 else
3716 memcpy (contents + rel->r_offset - 3,
3717 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3718 }
3719 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3720 and R_X86_64_PLTOFF64. */
3721 rel++;
3722 wrel++;
3723 continue;
3724 }
3725
3726 if (htab->elf.sgot == NULL)
3727 abort ();
3728
3729 off = htab->tls_ld_or_ldm_got.offset;
3730 if (off & 1)
3731 off &= ~1;
3732 else
3733 {
3734 Elf_Internal_Rela outrel;
3735
3736 if (htab->elf.srelgot == NULL)
3737 abort ();
3738
3739 outrel.r_offset = (htab->elf.sgot->output_section->vma
3740 + htab->elf.sgot->output_offset + off);
3741
3742 bfd_put_64 (output_bfd, 0,
3743 htab->elf.sgot->contents + off);
3744 bfd_put_64 (output_bfd, 0,
3745 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3746 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3747 outrel.r_addend = 0;
3748 elf_append_rela (output_bfd, htab->elf.srelgot,
3749 &outrel);
3750 htab->tls_ld_or_ldm_got.offset |= 1;
3751 }
3752 relocation = htab->elf.sgot->output_section->vma
3753 + htab->elf.sgot->output_offset + off;
3754 unresolved_reloc = FALSE;
3755 break;
3756
3757 case R_X86_64_DTPOFF32:
3758 if (!bfd_link_executable (info)
3759 || (input_section->flags & SEC_CODE) == 0)
3760 relocation -= _bfd_x86_elf_dtpoff_base (info);
3761 else
3762 relocation = elf_x86_64_tpoff (info, relocation);
3763 break;
3764
3765 case R_X86_64_TPOFF32:
3766 case R_X86_64_TPOFF64:
3767 BFD_ASSERT (bfd_link_executable (info));
3768 relocation = elf_x86_64_tpoff (info, relocation);
3769 break;
3770
3771 case R_X86_64_DTPOFF64:
3772 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3773 relocation -= _bfd_x86_elf_dtpoff_base (info);
3774 break;
3775
3776 default:
3777 break;
3778 }
3779
3780 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3781 because such sections are not SEC_ALLOC and thus ld.so will
3782 not process them. */
3783 if (unresolved_reloc
3784 && !((input_section->flags & SEC_DEBUGGING) != 0
3785 && h->def_dynamic)
3786 && _bfd_elf_section_offset (output_bfd, info, input_section,
3787 rel->r_offset) != (bfd_vma) -1)
3788 {
3789 switch (r_type)
3790 {
3791 case R_X86_64_32S:
3792 sec = h->root.u.def.section;
3793 if ((info->nocopyreloc
3794 || (eh->def_protected
3795 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3796 && !(h->root.u.def.section->flags & SEC_CODE))
3797 return elf_x86_64_need_pic (info, input_bfd, input_section,
3798 h, NULL, NULL, howto);
3799 /* Fall through. */
3800
3801 default:
3802 _bfd_error_handler
3803 /* xgettext:c-format */
3804 (_("%B(%A+%#Lx): unresolvable %s relocation against symbol `%s'"),
3805 input_bfd,
3806 input_section,
3807 rel->r_offset,
3808 howto->name,
3809 h->root.root.string);
3810 return FALSE;
3811 }
3812 }
3813
3814 do_relocation:
3815 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3816 contents, rel->r_offset,
3817 relocation, rel->r_addend);
3818
3819 check_relocation_error:
3820 if (r != bfd_reloc_ok)
3821 {
3822 const char *name;
3823
3824 if (h != NULL)
3825 name = h->root.root.string;
3826 else
3827 {
3828 name = bfd_elf_string_from_elf_section (input_bfd,
3829 symtab_hdr->sh_link,
3830 sym->st_name);
3831 if (name == NULL)
3832 return FALSE;
3833 if (*name == '\0')
3834 name = bfd_section_name (input_bfd, sec);
3835 }
3836
3837 if (r == bfd_reloc_overflow)
3838 {
3839 if (converted_reloc)
3840 {
3841 info->callbacks->einfo
3842 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3843 return FALSE;
3844 }
3845 (*info->callbacks->reloc_overflow)
3846 (info, (h ? &h->root : NULL), name, howto->name,
3847 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3848 }
3849 else
3850 {
3851 _bfd_error_handler
3852 /* xgettext:c-format */
3853 (_("%B(%A+%#Lx): reloc against `%s': error %d"),
3854 input_bfd, input_section,
3855 rel->r_offset, name, (int) r);
3856 return FALSE;
3857 }
3858 }
3859
3860 if (wrel != rel)
3861 *wrel = *rel;
3862 }
3863
3864 if (wrel != rel)
3865 {
3866 Elf_Internal_Shdr *rel_hdr;
3867 size_t deleted = rel - wrel;
3868
3869 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3870 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3871 if (rel_hdr->sh_size == 0)
3872 {
3873 /* It is too late to remove an empty reloc section. Leave
3874 one NONE reloc.
3875 ??? What is wrong with an empty section??? */
3876 rel_hdr->sh_size = rel_hdr->sh_entsize;
3877 deleted -= 1;
3878 }
3879 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3880 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3881 input_section->reloc_count -= deleted;
3882 }
3883
3884 return TRUE;
3885 }
3886
3887 /* Finish up dynamic symbol handling. We set the contents of various
3888 dynamic sections here. */
3889
3890 static bfd_boolean
3891 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3892 struct bfd_link_info *info,
3893 struct elf_link_hash_entry *h,
3894 Elf_Internal_Sym *sym)
3895 {
3896 struct elf_x86_link_hash_table *htab;
3897 bfd_boolean use_plt_second;
3898 struct elf_x86_link_hash_entry *eh;
3899 bfd_boolean local_undefweak;
3900
3901 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3902 if (htab == NULL)
3903 return FALSE;
3904
3905 /* Use the second PLT section only if there is .plt section. */
3906 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3907
3908 eh = (struct elf_x86_link_hash_entry *) h;
3909 if (eh->no_finish_dynamic_symbol)
3910 abort ();
3911
3912 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3913 resolved undefined weak symbols in executable so that their
3914 references have value 0 at run-time. */
3915 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3916
3917 if (h->plt.offset != (bfd_vma) -1)
3918 {
3919 bfd_vma plt_index;
3920 bfd_vma got_offset, plt_offset;
3921 Elf_Internal_Rela rela;
3922 bfd_byte *loc;
3923 asection *plt, *gotplt, *relplt, *resolved_plt;
3924 const struct elf_backend_data *bed;
3925 bfd_vma plt_got_pcrel_offset;
3926
3927 /* When building a static executable, use .iplt, .igot.plt and
3928 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3929 if (htab->elf.splt != NULL)
3930 {
3931 plt = htab->elf.splt;
3932 gotplt = htab->elf.sgotplt;
3933 relplt = htab->elf.srelplt;
3934 }
3935 else
3936 {
3937 plt = htab->elf.iplt;
3938 gotplt = htab->elf.igotplt;
3939 relplt = htab->elf.irelplt;
3940 }
3941
3942 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3943
3944 /* Get the index in the procedure linkage table which
3945 corresponds to this symbol. This is the index of this symbol
3946 in all the symbols for which we are making plt entries. The
3947 first entry in the procedure linkage table is reserved.
3948
3949 Get the offset into the .got table of the entry that
3950 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3951 bytes. The first three are reserved for the dynamic linker.
3952
3953 For static executables, we don't reserve anything. */
3954
3955 if (plt == htab->elf.splt)
3956 {
3957 got_offset = (h->plt.offset / htab->plt.plt_entry_size
3958 - htab->plt.has_plt0);
3959 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
3960 }
3961 else
3962 {
3963 got_offset = h->plt.offset / htab->plt.plt_entry_size;
3964 got_offset = got_offset * GOT_ENTRY_SIZE;
3965 }
3966
3967 /* Fill in the entry in the procedure linkage table. */
3968 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
3969 htab->plt.plt_entry_size);
3970 if (use_plt_second)
3971 {
3972 memcpy (htab->plt_second->contents + eh->plt_second.offset,
3973 htab->non_lazy_plt->plt_entry,
3974 htab->non_lazy_plt->plt_entry_size);
3975
3976 resolved_plt = htab->plt_second;
3977 plt_offset = eh->plt_second.offset;
3978 }
3979 else
3980 {
3981 resolved_plt = plt;
3982 plt_offset = h->plt.offset;
3983 }
3984
3985 /* Insert the relocation positions of the plt section. */
3986
3987 /* Put offset the PC-relative instruction referring to the GOT entry,
3988 subtracting the size of that instruction. */
3989 plt_got_pcrel_offset = (gotplt->output_section->vma
3990 + gotplt->output_offset
3991 + got_offset
3992 - resolved_plt->output_section->vma
3993 - resolved_plt->output_offset
3994 - plt_offset
3995 - htab->plt.plt_got_insn_size);
3996
3997 /* Check PC-relative offset overflow in PLT entry. */
3998 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
3999 /* xgettext:c-format */
4000 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
4001 output_bfd, h->root.root.string);
4002
4003 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4004 (resolved_plt->contents + plt_offset
4005 + htab->plt.plt_got_offset));
4006
4007 /* Fill in the entry in the global offset table, initially this
4008 points to the second part of the PLT entry. Leave the entry
4009 as zero for undefined weak symbol in PIE. No PLT relocation
4010 against undefined weak symbol in PIE. */
4011 if (!local_undefweak)
4012 {
4013 if (htab->plt.has_plt0)
4014 bfd_put_64 (output_bfd, (plt->output_section->vma
4015 + plt->output_offset
4016 + h->plt.offset
4017 + htab->lazy_plt->plt_lazy_offset),
4018 gotplt->contents + got_offset);
4019
4020 /* Fill in the entry in the .rela.plt section. */
4021 rela.r_offset = (gotplt->output_section->vma
4022 + gotplt->output_offset
4023 + got_offset);
4024 if (PLT_LOCAL_IFUNC_P (info, h))
4025 {
4026 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
4027 h->root.root.string,
4028 h->root.u.def.section->owner);
4029
4030 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4031 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4032 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4033 rela.r_addend = (h->root.u.def.value
4034 + h->root.u.def.section->output_section->vma
4035 + h->root.u.def.section->output_offset);
4036 /* R_X86_64_IRELATIVE comes last. */
4037 plt_index = htab->next_irelative_index--;
4038 }
4039 else
4040 {
4041 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4042 rela.r_addend = 0;
4043 plt_index = htab->next_jump_slot_index++;
4044 }
4045
4046 /* Don't fill the second and third slots in PLT entry for
4047 static executables nor without PLT0. */
4048 if (plt == htab->elf.splt && htab->plt.has_plt0)
4049 {
4050 bfd_vma plt0_offset
4051 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4052
4053 /* Put relocation index. */
4054 bfd_put_32 (output_bfd, plt_index,
4055 (plt->contents + h->plt.offset
4056 + htab->lazy_plt->plt_reloc_offset));
4057
4058 /* Put offset for jmp .PLT0 and check for overflow. We don't
4059 check relocation index for overflow since branch displacement
4060 will overflow first. */
4061 if (plt0_offset > 0x80000000)
4062 /* xgettext:c-format */
4063 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
4064 output_bfd, h->root.root.string);
4065 bfd_put_32 (output_bfd, - plt0_offset,
4066 (plt->contents + h->plt.offset
4067 + htab->lazy_plt->plt_plt_offset));
4068 }
4069
4070 bed = get_elf_backend_data (output_bfd);
4071 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4072 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4073 }
4074 }
4075 else if (eh->plt_got.offset != (bfd_vma) -1)
4076 {
4077 bfd_vma got_offset, plt_offset;
4078 asection *plt, *got;
4079 bfd_boolean got_after_plt;
4080 int32_t got_pcrel_offset;
4081
4082 /* Set the entry in the GOT procedure linkage table. */
4083 plt = htab->plt_got;
4084 got = htab->elf.sgot;
4085 got_offset = h->got.offset;
4086
4087 if (got_offset == (bfd_vma) -1
4088 || (h->type == STT_GNU_IFUNC && h->def_regular)
4089 || plt == NULL
4090 || got == NULL)
4091 abort ();
4092
4093 /* Use the non-lazy PLT entry template for the GOT PLT since they
4094 are the identical. */
4095 /* Fill in the entry in the GOT procedure linkage table. */
4096 plt_offset = eh->plt_got.offset;
4097 memcpy (plt->contents + plt_offset,
4098 htab->non_lazy_plt->plt_entry,
4099 htab->non_lazy_plt->plt_entry_size);
4100
4101 /* Put offset the PC-relative instruction referring to the GOT
4102 entry, subtracting the size of that instruction. */
4103 got_pcrel_offset = (got->output_section->vma
4104 + got->output_offset
4105 + got_offset
4106 - plt->output_section->vma
4107 - plt->output_offset
4108 - plt_offset
4109 - htab->non_lazy_plt->plt_got_insn_size);
4110
4111 /* Check PC-relative offset overflow in GOT PLT entry. */
4112 got_after_plt = got->output_section->vma > plt->output_section->vma;
4113 if ((got_after_plt && got_pcrel_offset < 0)
4114 || (!got_after_plt && got_pcrel_offset > 0))
4115 /* xgettext:c-format */
4116 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4117 output_bfd, h->root.root.string);
4118
4119 bfd_put_32 (output_bfd, got_pcrel_offset,
4120 (plt->contents + plt_offset
4121 + htab->non_lazy_plt->plt_got_offset));
4122 }
4123
4124 if (!local_undefweak
4125 && !h->def_regular
4126 && (h->plt.offset != (bfd_vma) -1
4127 || eh->plt_got.offset != (bfd_vma) -1))
4128 {
4129 /* Mark the symbol as undefined, rather than as defined in
4130 the .plt section. Leave the value if there were any
4131 relocations where pointer equality matters (this is a clue
4132 for the dynamic linker, to make function pointer
4133 comparisons work between an application and shared
4134 library), otherwise set it to zero. If a function is only
4135 called from a binary, there is no need to slow down
4136 shared libraries because of that. */
4137 sym->st_shndx = SHN_UNDEF;
4138 if (!h->pointer_equality_needed)
4139 sym->st_value = 0;
4140 }
4141
4142 /* Don't generate dynamic GOT relocation against undefined weak
4143 symbol in executable. */
4144 if (h->got.offset != (bfd_vma) -1
4145 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4146 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4147 && !local_undefweak)
4148 {
4149 Elf_Internal_Rela rela;
4150 asection *relgot = htab->elf.srelgot;
4151
4152 /* This symbol has an entry in the global offset table. Set it
4153 up. */
4154 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4155 abort ();
4156
4157 rela.r_offset = (htab->elf.sgot->output_section->vma
4158 + htab->elf.sgot->output_offset
4159 + (h->got.offset &~ (bfd_vma) 1));
4160
4161 /* If this is a static link, or it is a -Bsymbolic link and the
4162 symbol is defined locally or was forced to be local because
4163 of a version file, we just want to emit a RELATIVE reloc.
4164 The entry in the global offset table will already have been
4165 initialized in the relocate_section function. */
4166 if (h->def_regular
4167 && h->type == STT_GNU_IFUNC)
4168 {
4169 if (h->plt.offset == (bfd_vma) -1)
4170 {
4171 /* STT_GNU_IFUNC is referenced without PLT. */
4172 if (htab->elf.splt == NULL)
4173 {
4174 /* use .rel[a].iplt section to store .got relocations
4175 in static executable. */
4176 relgot = htab->elf.irelplt;
4177 }
4178 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4179 {
4180 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
4181 h->root.root.string,
4182 h->root.u.def.section->owner);
4183
4184 rela.r_info = htab->r_info (0,
4185 R_X86_64_IRELATIVE);
4186 rela.r_addend = (h->root.u.def.value
4187 + h->root.u.def.section->output_section->vma
4188 + h->root.u.def.section->output_offset);
4189 }
4190 else
4191 goto do_glob_dat;
4192 }
4193 else if (bfd_link_pic (info))
4194 {
4195 /* Generate R_X86_64_GLOB_DAT. */
4196 goto do_glob_dat;
4197 }
4198 else
4199 {
4200 asection *plt;
4201 bfd_vma plt_offset;
4202
4203 if (!h->pointer_equality_needed)
4204 abort ();
4205
4206 /* For non-shared object, we can't use .got.plt, which
4207 contains the real function addres if we need pointer
4208 equality. We load the GOT entry with the PLT entry. */
4209 if (htab->plt_second != NULL)
4210 {
4211 plt = htab->plt_second;
4212 plt_offset = eh->plt_second.offset;
4213 }
4214 else
4215 {
4216 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4217 plt_offset = h->plt.offset;
4218 }
4219 bfd_put_64 (output_bfd, (plt->output_section->vma
4220 + plt->output_offset
4221 + plt_offset),
4222 htab->elf.sgot->contents + h->got.offset);
4223 return TRUE;
4224 }
4225 }
4226 else if (bfd_link_pic (info)
4227 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4228 {
4229 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4230 return FALSE;
4231 BFD_ASSERT((h->got.offset & 1) != 0);
4232 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4233 rela.r_addend = (h->root.u.def.value
4234 + h->root.u.def.section->output_section->vma
4235 + h->root.u.def.section->output_offset);
4236 }
4237 else
4238 {
4239 BFD_ASSERT((h->got.offset & 1) == 0);
4240 do_glob_dat:
4241 bfd_put_64 (output_bfd, (bfd_vma) 0,
4242 htab->elf.sgot->contents + h->got.offset);
4243 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4244 rela.r_addend = 0;
4245 }
4246
4247 elf_append_rela (output_bfd, relgot, &rela);
4248 }
4249
4250 if (h->needs_copy)
4251 {
4252 Elf_Internal_Rela rela;
4253 asection *s;
4254
4255 /* This symbol needs a copy reloc. Set it up. */
4256 VERIFY_COPY_RELOC (h, htab)
4257
4258 rela.r_offset = (h->root.u.def.value
4259 + h->root.u.def.section->output_section->vma
4260 + h->root.u.def.section->output_offset);
4261 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4262 rela.r_addend = 0;
4263 if (h->root.u.def.section == htab->elf.sdynrelro)
4264 s = htab->elf.sreldynrelro;
4265 else
4266 s = htab->elf.srelbss;
4267 elf_append_rela (output_bfd, s, &rela);
4268 }
4269
4270 return TRUE;
4271 }
4272
4273 /* Finish up local dynamic symbol handling. We set the contents of
4274 various dynamic sections here. */
4275
4276 static bfd_boolean
4277 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4278 {
4279 struct elf_link_hash_entry *h
4280 = (struct elf_link_hash_entry *) *slot;
4281 struct bfd_link_info *info
4282 = (struct bfd_link_info *) inf;
4283
4284 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4285 info, h, NULL);
4286 }
4287
4288 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4289 here since undefined weak symbol may not be dynamic and may not be
4290 called for elf_x86_64_finish_dynamic_symbol. */
4291
4292 static bfd_boolean
4293 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4294 void *inf)
4295 {
4296 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4297 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4298
4299 if (h->root.type != bfd_link_hash_undefweak
4300 || h->dynindx != -1)
4301 return TRUE;
4302
4303 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4304 info, h, NULL);
4305 }
4306
4307 /* Used to decide how to sort relocs in an optimal manner for the
4308 dynamic linker, before writing them out. */
4309
4310 static enum elf_reloc_type_class
4311 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4312 const asection *rel_sec ATTRIBUTE_UNUSED,
4313 const Elf_Internal_Rela *rela)
4314 {
4315 bfd *abfd = info->output_bfd;
4316 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4317 struct elf_x86_link_hash_table *htab
4318 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4319
4320 if (htab->elf.dynsym != NULL
4321 && htab->elf.dynsym->contents != NULL)
4322 {
4323 /* Check relocation against STT_GNU_IFUNC symbol if there are
4324 dynamic symbols. */
4325 unsigned long r_symndx = htab->r_sym (rela->r_info);
4326 if (r_symndx != STN_UNDEF)
4327 {
4328 Elf_Internal_Sym sym;
4329 if (!bed->s->swap_symbol_in (abfd,
4330 (htab->elf.dynsym->contents
4331 + r_symndx * bed->s->sizeof_sym),
4332 0, &sym))
4333 abort ();
4334
4335 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4336 return reloc_class_ifunc;
4337 }
4338 }
4339
4340 switch ((int) ELF32_R_TYPE (rela->r_info))
4341 {
4342 case R_X86_64_IRELATIVE:
4343 return reloc_class_ifunc;
4344 case R_X86_64_RELATIVE:
4345 case R_X86_64_RELATIVE64:
4346 return reloc_class_relative;
4347 case R_X86_64_JUMP_SLOT:
4348 return reloc_class_plt;
4349 case R_X86_64_COPY:
4350 return reloc_class_copy;
4351 default:
4352 return reloc_class_normal;
4353 }
4354 }
4355
4356 /* Finish up the dynamic sections. */
4357
4358 static bfd_boolean
4359 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4360 struct bfd_link_info *info)
4361 {
4362 struct elf_x86_link_hash_table *htab;
4363 bfd *dynobj;
4364 asection *sdyn;
4365
4366 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4367 if (htab == NULL)
4368 return FALSE;
4369
4370 dynobj = htab->elf.dynobj;
4371 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
4372
4373 if (htab->elf.dynamic_sections_created)
4374 {
4375 bfd_byte *dyncon, *dynconend;
4376 const struct elf_backend_data *bed;
4377 bfd_size_type sizeof_dyn;
4378
4379 if (sdyn == NULL || htab->elf.sgot == NULL)
4380 abort ();
4381
4382 bed = get_elf_backend_data (dynobj);
4383 sizeof_dyn = bed->s->sizeof_dyn;
4384 dyncon = sdyn->contents;
4385 dynconend = sdyn->contents + sdyn->size;
4386 for (; dyncon < dynconend; dyncon += sizeof_dyn)
4387 {
4388 Elf_Internal_Dyn dyn;
4389 asection *s;
4390
4391 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
4392
4393 switch (dyn.d_tag)
4394 {
4395 default:
4396 continue;
4397
4398 case DT_PLTGOT:
4399 s = htab->elf.sgotplt;
4400 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4401 break;
4402
4403 case DT_JMPREL:
4404 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
4405 break;
4406
4407 case DT_PLTRELSZ:
4408 s = htab->elf.srelplt->output_section;
4409 dyn.d_un.d_val = s->size;
4410 break;
4411
4412 case DT_TLSDESC_PLT:
4413 s = htab->elf.splt;
4414 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
4415 + htab->tlsdesc_plt;
4416 break;
4417
4418 case DT_TLSDESC_GOT:
4419 s = htab->elf.sgot;
4420 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
4421 + htab->tlsdesc_got;
4422 break;
4423 }
4424
4425 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
4426 }
4427
4428 if (htab->elf.splt && htab->elf.splt->size > 0)
4429 {
4430 elf_section_data (htab->elf.splt->output_section)
4431 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4432
4433 if (htab->plt.has_plt0)
4434 {
4435 /* Fill in the special first entry in the procedure linkage
4436 table. */
4437 memcpy (htab->elf.splt->contents,
4438 htab->lazy_plt->plt0_entry,
4439 htab->lazy_plt->plt0_entry_size);
4440 /* Add offset for pushq GOT+8(%rip), since the instruction
4441 uses 6 bytes subtract this value. */
4442 bfd_put_32 (output_bfd,
4443 (htab->elf.sgotplt->output_section->vma
4444 + htab->elf.sgotplt->output_offset
4445 + 8
4446 - htab->elf.splt->output_section->vma
4447 - htab->elf.splt->output_offset
4448 - 6),
4449 (htab->elf.splt->contents
4450 + htab->lazy_plt->plt0_got1_offset));
4451 /* Add offset for the PC-relative instruction accessing
4452 GOT+16, subtracting the offset to the end of that
4453 instruction. */
4454 bfd_put_32 (output_bfd,
4455 (htab->elf.sgotplt->output_section->vma
4456 + htab->elf.sgotplt->output_offset
4457 + 16
4458 - htab->elf.splt->output_section->vma
4459 - htab->elf.splt->output_offset
4460 - htab->lazy_plt->plt0_got2_insn_end),
4461 (htab->elf.splt->contents
4462 + htab->lazy_plt->plt0_got2_offset));
4463
4464 if (htab->tlsdesc_plt)
4465 {
4466 bfd_put_64 (output_bfd, (bfd_vma) 0,
4467 htab->elf.sgot->contents + htab->tlsdesc_got);
4468
4469 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4470 htab->lazy_plt->plt0_entry,
4471 htab->lazy_plt->plt0_entry_size);
4472
4473 /* Add offset for pushq GOT+8(%rip), since the
4474 instruction uses 6 bytes subtract this value. */
4475 bfd_put_32 (output_bfd,
4476 (htab->elf.sgotplt->output_section->vma
4477 + htab->elf.sgotplt->output_offset
4478 + 8
4479 - htab->elf.splt->output_section->vma
4480 - htab->elf.splt->output_offset
4481 - htab->tlsdesc_plt
4482 - 6),
4483 (htab->elf.splt->contents
4484 + htab->tlsdesc_plt
4485 + htab->lazy_plt->plt0_got1_offset));
4486 /* Add offset for the PC-relative instruction accessing
4487 GOT+TDG, where TDG stands for htab->tlsdesc_got,
4488 subtracting the offset to the end of that
4489 instruction. */
4490 bfd_put_32 (output_bfd,
4491 (htab->elf.sgot->output_section->vma
4492 + htab->elf.sgot->output_offset
4493 + htab->tlsdesc_got
4494 - htab->elf.splt->output_section->vma
4495 - htab->elf.splt->output_offset
4496 - htab->tlsdesc_plt
4497 - htab->lazy_plt->plt0_got2_insn_end),
4498 (htab->elf.splt->contents
4499 + htab->tlsdesc_plt
4500 + htab->lazy_plt->plt0_got2_offset));
4501 }
4502 }
4503 }
4504
4505 if (htab->plt_got != NULL && htab->plt_got->size > 0)
4506 elf_section_data (htab->plt_got->output_section)
4507 ->this_hdr.sh_entsize = htab->non_lazy_plt->plt_entry_size;
4508
4509 if (htab->plt_second != NULL && htab->plt_second->size > 0)
4510 elf_section_data (htab->plt_second->output_section)
4511 ->this_hdr.sh_entsize = htab->non_lazy_plt->plt_entry_size;
4512 }
4513
4514 /* GOT is always created in setup_gnu_properties. But it may not be
4515 needed. */
4516 if (htab->elf.sgotplt && htab->elf.sgotplt->size > 0)
4517 {
4518 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
4519 {
4520 _bfd_error_handler
4521 (_("discarded output section: `%A'"), htab->elf.sgotplt);
4522 return FALSE;
4523 }
4524
4525 /* Set the first entry in the global offset table to the address of
4526 the dynamic section. */
4527 if (sdyn == NULL)
4528 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
4529 else
4530 bfd_put_64 (output_bfd,
4531 sdyn->output_section->vma + sdyn->output_offset,
4532 htab->elf.sgotplt->contents);
4533 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
4534 bfd_put_64 (output_bfd, (bfd_vma) 0,
4535 htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
4536 bfd_put_64 (output_bfd, (bfd_vma) 0,
4537 htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
4538
4539 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize
4540 = GOT_ENTRY_SIZE;
4541 }
4542
4543 /* Adjust .eh_frame for .plt section. */
4544 if (htab->plt_eh_frame != NULL
4545 && htab->plt_eh_frame->contents != NULL)
4546 {
4547 if (htab->elf.splt != NULL
4548 && htab->elf.splt->size != 0
4549 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
4550 && htab->elf.splt->output_section != NULL
4551 && htab->plt_eh_frame->output_section != NULL)
4552 {
4553 bfd_vma plt_start = htab->elf.splt->output_section->vma;
4554 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
4555 + htab->plt_eh_frame->output_offset
4556 + PLT_FDE_START_OFFSET;
4557 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
4558 htab->plt_eh_frame->contents
4559 + PLT_FDE_START_OFFSET);
4560 }
4561 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
4562 {
4563 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
4564 htab->plt_eh_frame,
4565 htab->plt_eh_frame->contents))
4566 return FALSE;
4567 }
4568 }
4569
4570 /* Adjust .eh_frame for .plt.got section. */
4571 if (htab->plt_got_eh_frame != NULL
4572 && htab->plt_got_eh_frame->contents != NULL)
4573 {
4574 if (htab->plt_got != NULL
4575 && htab->plt_got->size != 0
4576 && (htab->plt_got->flags & SEC_EXCLUDE) == 0
4577 && htab->plt_got->output_section != NULL
4578 && htab->plt_got_eh_frame->output_section != NULL)
4579 {
4580 bfd_vma plt_start = htab->plt_got->output_section->vma;
4581 bfd_vma eh_frame_start = htab->plt_got_eh_frame->output_section->vma
4582 + htab->plt_got_eh_frame->output_offset
4583 + PLT_FDE_START_OFFSET;
4584 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
4585 htab->plt_got_eh_frame->contents
4586 + PLT_FDE_START_OFFSET);
4587 }
4588 if (htab->plt_got_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
4589 {
4590 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
4591 htab->plt_got_eh_frame,
4592 htab->plt_got_eh_frame->contents))
4593 return FALSE;
4594 }
4595 }
4596
4597 /* Adjust .eh_frame for the second PLT section. */
4598 if (htab->plt_second_eh_frame != NULL
4599 && htab->plt_second_eh_frame->contents != NULL)
4600 {
4601 if (htab->plt_second != NULL
4602 && htab->plt_second->size != 0
4603 && (htab->plt_second->flags & SEC_EXCLUDE) == 0
4604 && htab->plt_second->output_section != NULL
4605 && htab->plt_second_eh_frame->output_section != NULL)
4606 {
4607 bfd_vma plt_start = htab->plt_second->output_section->vma;
4608 bfd_vma eh_frame_start
4609 = (htab->plt_second_eh_frame->output_section->vma
4610 + htab->plt_second_eh_frame->output_offset
4611 + PLT_FDE_START_OFFSET);
4612 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
4613 htab->plt_second_eh_frame->contents
4614 + PLT_FDE_START_OFFSET);
4615 }
4616 if (htab->plt_second_eh_frame->sec_info_type
4617 == SEC_INFO_TYPE_EH_FRAME)
4618 {
4619 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
4620 htab->plt_second_eh_frame,
4621 htab->plt_second_eh_frame->contents))
4622 return FALSE;
4623 }
4624 }
4625
4626 if (htab->elf.sgot && htab->elf.sgot->size > 0)
4627 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
4628 = GOT_ENTRY_SIZE;
4629
4630 /* Fill PLT entries for undefined weak symbols in PIE. */
4631 if (bfd_link_pie (info))
4632 bfd_hash_traverse (&info->hash->table,
4633 elf_x86_64_pie_finish_undefweak_symbol,
4634 info);
4635
4636 return TRUE;
4637 }
4638
4639 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4640 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4641 It has to be done before elf_link_sort_relocs is called so that
4642 dynamic relocations are properly sorted. */
4643
4644 static bfd_boolean
4645 elf_x86_64_output_arch_local_syms
4646 (bfd *output_bfd ATTRIBUTE_UNUSED,
4647 struct bfd_link_info *info,
4648 void *flaginfo ATTRIBUTE_UNUSED,
4649 int (*func) (void *, const char *,
4650 Elf_Internal_Sym *,
4651 asection *,
4652 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4653 {
4654 struct elf_x86_link_hash_table *htab
4655 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4656 if (htab == NULL)
4657 return FALSE;
4658
4659 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4660 htab_traverse (htab->loc_hash_table,
4661 elf_x86_64_finish_local_dynamic_symbol,
4662 info);
4663
4664 return TRUE;
4665 }
4666
4667 /* Forward declaration. */
4668 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4669
4670 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4671 dynamic relocations. */
4672
4673 static long
4674 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4675 long symcount ATTRIBUTE_UNUSED,
4676 asymbol **syms ATTRIBUTE_UNUSED,
4677 long dynsymcount,
4678 asymbol **dynsyms,
4679 asymbol **ret)
4680 {
4681 long count, i, n;
4682 int j;
4683 bfd_byte *plt_contents;
4684 long relsize;
4685 const struct elf_x86_lazy_plt_layout *lazy_plt;
4686 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4687 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4688 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4689 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4690 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4691 asection *plt;
4692 enum elf_x86_plt_type plt_type;
4693 struct elf_x86_plt plts[] =
4694 {
4695 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4696 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4697 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4698 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4699 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4700 };
4701
4702 *ret = NULL;
4703
4704 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4705 return 0;
4706
4707 if (dynsymcount <= 0)
4708 return 0;
4709
4710 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4711 if (relsize <= 0)
4712 return -1;
4713
4714 if (get_elf_x86_64_backend_data (abfd)->os == is_normal)
4715 {
4716 lazy_plt = &elf_x86_64_lazy_plt;
4717 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4718 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4719 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4720 if (ABI_64_P (abfd))
4721 {
4722 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4723 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4724 }
4725 else
4726 {
4727 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4728 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4729 }
4730 }
4731 else
4732 {
4733 lazy_plt = &elf_x86_64_nacl_plt;
4734 non_lazy_plt = NULL;
4735 lazy_bnd_plt = NULL;
4736 non_lazy_bnd_plt = NULL;
4737 lazy_ibt_plt = NULL;
4738 non_lazy_ibt_plt = NULL;
4739 }
4740
4741 count = 0;
4742 for (j = 0; plts[j].name != NULL; j++)
4743 {
4744 plt = bfd_get_section_by_name (abfd, plts[j].name);
4745 if (plt == NULL || plt->size == 0)
4746 continue;
4747
4748 /* Get the PLT section contents. */
4749 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4750 if (plt_contents == NULL)
4751 break;
4752 if (!bfd_get_section_contents (abfd, (asection *) plt,
4753 plt_contents, 0, plt->size))
4754 {
4755 free (plt_contents);
4756 break;
4757 }
4758
4759 /* Check what kind of PLT it is. */
4760 plt_type = plt_unknown;
4761 if (plts[j].type == plt_unknown
4762 && (plt->size >= (lazy_plt->plt_entry_size
4763 + lazy_plt->plt_entry_size)))
4764 {
4765 /* Match lazy PLT first. Need to check the first two
4766 instructions. */
4767 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4768 lazy_plt->plt0_got1_offset) == 0)
4769 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4770 2) == 0))
4771 plt_type = plt_lazy;
4772 else if (lazy_bnd_plt != NULL
4773 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4774 lazy_bnd_plt->plt0_got1_offset) == 0)
4775 && (memcmp (plt_contents + 6,
4776 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4777 {
4778 plt_type = plt_lazy | plt_second;
4779 /* The fist entry in the lazy IBT PLT is the same as the
4780 lazy BND PLT. */
4781 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4782 lazy_ibt_plt->plt_entry,
4783 lazy_ibt_plt->plt_got_offset) == 0))
4784 lazy_plt = lazy_ibt_plt;
4785 else
4786 lazy_plt = lazy_bnd_plt;
4787 }
4788 }
4789
4790 if (non_lazy_plt != NULL
4791 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4792 && plt->size >= non_lazy_plt->plt_entry_size)
4793 {
4794 /* Match non-lazy PLT. */
4795 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4796 non_lazy_plt->plt_got_offset) == 0)
4797 plt_type = plt_non_lazy;
4798 }
4799
4800 if (plt_type == plt_unknown || plt_type == plt_second)
4801 {
4802 if (non_lazy_bnd_plt != NULL
4803 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4804 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4805 non_lazy_bnd_plt->plt_got_offset) == 0))
4806 {
4807 /* Match BND PLT. */
4808 plt_type = plt_second;
4809 non_lazy_plt = non_lazy_bnd_plt;
4810 }
4811 else if (non_lazy_ibt_plt != NULL
4812 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4813 && (memcmp (plt_contents,
4814 non_lazy_ibt_plt->plt_entry,
4815 non_lazy_ibt_plt->plt_got_offset) == 0))
4816 {
4817 /* Match IBT PLT. */
4818 plt_type = plt_second;
4819 non_lazy_plt = non_lazy_ibt_plt;
4820 }
4821 }
4822
4823 if (plt_type == plt_unknown)
4824 {
4825 free (plt_contents);
4826 continue;
4827 }
4828
4829 plts[j].sec = plt;
4830 plts[j].type = plt_type;
4831
4832 if ((plt_type & plt_lazy))
4833 {
4834 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4835 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4836 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4837 /* Skip PLT0 in lazy PLT. */
4838 i = 1;
4839 }
4840 else
4841 {
4842 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4843 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4844 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4845 i = 0;
4846 }
4847
4848 /* Skip lazy PLT when the second PLT is used. */
4849 if (plt_type == (plt_lazy | plt_second))
4850 plts[j].count = 0;
4851 else
4852 {
4853 n = plt->size / plts[j].plt_entry_size;
4854 plts[j].count = n;
4855 count += n - i;
4856 }
4857
4858 plts[j].contents = plt_contents;
4859 }
4860
4861 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4862 (bfd_vma) 0, plts, dynsyms,
4863 ret);
4864 }
4865
4866 /* Handle an x86-64 specific section when reading an object file. This
4867 is called when elfcode.h finds a section with an unknown type. */
4868
4869 static bfd_boolean
4870 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4871 const char *name, int shindex)
4872 {
4873 if (hdr->sh_type != SHT_X86_64_UNWIND)
4874 return FALSE;
4875
4876 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4877 return FALSE;
4878
4879 return TRUE;
4880 }
4881
4882 /* Hook called by the linker routine which adds symbols from an object
4883 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4884 of .bss. */
4885
4886 static bfd_boolean
4887 elf_x86_64_add_symbol_hook (bfd *abfd,
4888 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4889 Elf_Internal_Sym *sym,
4890 const char **namep ATTRIBUTE_UNUSED,
4891 flagword *flagsp ATTRIBUTE_UNUSED,
4892 asection **secp,
4893 bfd_vma *valp)
4894 {
4895 asection *lcomm;
4896
4897 switch (sym->st_shndx)
4898 {
4899 case SHN_X86_64_LCOMMON:
4900 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4901 if (lcomm == NULL)
4902 {
4903 lcomm = bfd_make_section_with_flags (abfd,
4904 "LARGE_COMMON",
4905 (SEC_ALLOC
4906 | SEC_IS_COMMON
4907 | SEC_LINKER_CREATED));
4908 if (lcomm == NULL)
4909 return FALSE;
4910 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4911 }
4912 *secp = lcomm;
4913 *valp = sym->st_size;
4914 return TRUE;
4915 }
4916
4917 return TRUE;
4918 }
4919
4920
4921 /* Given a BFD section, try to locate the corresponding ELF section
4922 index. */
4923
4924 static bfd_boolean
4925 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4926 asection *sec, int *index_return)
4927 {
4928 if (sec == &_bfd_elf_large_com_section)
4929 {
4930 *index_return = SHN_X86_64_LCOMMON;
4931 return TRUE;
4932 }
4933 return FALSE;
4934 }
4935
4936 /* Process a symbol. */
4937
4938 static void
4939 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4940 asymbol *asym)
4941 {
4942 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4943
4944 switch (elfsym->internal_elf_sym.st_shndx)
4945 {
4946 case SHN_X86_64_LCOMMON:
4947 asym->section = &_bfd_elf_large_com_section;
4948 asym->value = elfsym->internal_elf_sym.st_size;
4949 /* Common symbol doesn't set BSF_GLOBAL. */
4950 asym->flags &= ~BSF_GLOBAL;
4951 break;
4952 }
4953 }
4954
4955 static bfd_boolean
4956 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4957 {
4958 return (sym->st_shndx == SHN_COMMON
4959 || sym->st_shndx == SHN_X86_64_LCOMMON);
4960 }
4961
4962 static unsigned int
4963 elf_x86_64_common_section_index (asection *sec)
4964 {
4965 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4966 return SHN_COMMON;
4967 else
4968 return SHN_X86_64_LCOMMON;
4969 }
4970
4971 static asection *
4972 elf_x86_64_common_section (asection *sec)
4973 {
4974 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4975 return bfd_com_section_ptr;
4976 else
4977 return &_bfd_elf_large_com_section;
4978 }
4979
4980 static bfd_boolean
4981 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4982 const Elf_Internal_Sym *sym,
4983 asection **psec,
4984 bfd_boolean newdef,
4985 bfd_boolean olddef,
4986 bfd *oldbfd,
4987 const asection *oldsec)
4988 {
4989 /* A normal common symbol and a large common symbol result in a
4990 normal common symbol. We turn the large common symbol into a
4991 normal one. */
4992 if (!olddef
4993 && h->root.type == bfd_link_hash_common
4994 && !newdef
4995 && bfd_is_com_section (*psec)
4996 && oldsec != *psec)
4997 {
4998 if (sym->st_shndx == SHN_COMMON
4999 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5000 {
5001 h->root.u.c.p->section
5002 = bfd_make_section_old_way (oldbfd, "COMMON");
5003 h->root.u.c.p->section->flags = SEC_ALLOC;
5004 }
5005 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5006 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5007 *psec = bfd_com_section_ptr;
5008 }
5009
5010 return TRUE;
5011 }
5012
5013 static int
5014 elf_x86_64_additional_program_headers (bfd *abfd,
5015 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5016 {
5017 asection *s;
5018 int count = 0;
5019
5020 /* Check to see if we need a large readonly segment. */
5021 s = bfd_get_section_by_name (abfd, ".lrodata");
5022 if (s && (s->flags & SEC_LOAD))
5023 count++;
5024
5025 /* Check to see if we need a large data segment. Since .lbss sections
5026 is placed right after the .bss section, there should be no need for
5027 a large data segment just because of .lbss. */
5028 s = bfd_get_section_by_name (abfd, ".ldata");
5029 if (s && (s->flags & SEC_LOAD))
5030 count++;
5031
5032 return count;
5033 }
5034
5035 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5036
5037 static bfd_boolean
5038 elf_x86_64_relocs_compatible (const bfd_target *input,
5039 const bfd_target *output)
5040 {
5041 return ((xvec_get_elf_backend_data (input)->s->elfclass
5042 == xvec_get_elf_backend_data (output)->s->elfclass)
5043 && _bfd_elf_relocs_compatible (input, output));
5044 }
5045
5046 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5047 with GNU properties if found. Otherwise, return NULL. */
5048
5049 static bfd *
5050 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5051 {
5052 struct elf_x86_init_table init_table;
5053
5054 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5055 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5056 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5057 != (int) R_X86_64_GNU_VTINHERIT)
5058 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5059 != (int) R_X86_64_GNU_VTENTRY))
5060 abort ();
5061
5062 init_table.is_vxworks = FALSE;
5063 if (get_elf_x86_64_backend_data (info->output_bfd)->os == is_normal)
5064 {
5065 if (info->bndplt)
5066 {
5067 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5068 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5069 }
5070 else
5071 {
5072 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5073 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5074 }
5075
5076 if (ABI_64_P (info->output_bfd))
5077 {
5078 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5079 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5080 }
5081 else
5082 {
5083 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5084 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5085 }
5086 init_table.normal_target = TRUE;
5087 }
5088 else
5089 {
5090 init_table.lazy_plt = &elf_x86_64_nacl_plt;
5091 init_table.non_lazy_plt = NULL;
5092 init_table.lazy_ibt_plt = NULL;
5093 init_table.non_lazy_ibt_plt = NULL;
5094 init_table.normal_target = FALSE;
5095 }
5096
5097 if (ABI_64_P (info->output_bfd))
5098 {
5099 init_table.r_info = elf64_r_info;
5100 init_table.r_sym = elf64_r_sym;
5101 }
5102 else
5103 {
5104 init_table.r_info = elf32_r_info;
5105 init_table.r_sym = elf32_r_sym;
5106 }
5107
5108 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5109 }
5110
5111 static const struct bfd_elf_special_section
5112 elf_x86_64_special_sections[]=
5113 {
5114 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5115 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5116 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5117 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5118 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5119 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5120 { NULL, 0, 0, 0, 0 }
5121 };
5122
5123 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5124 #define TARGET_LITTLE_NAME "elf64-x86-64"
5125 #define ELF_ARCH bfd_arch_i386
5126 #define ELF_TARGET_ID X86_64_ELF_DATA
5127 #define ELF_MACHINE_CODE EM_X86_64
5128 #define ELF_MAXPAGESIZE 0x200000
5129 #define ELF_MINPAGESIZE 0x1000
5130 #define ELF_COMMONPAGESIZE 0x1000
5131
5132 #define elf_backend_can_gc_sections 1
5133 #define elf_backend_can_refcount 1
5134 #define elf_backend_want_got_plt 1
5135 #define elf_backend_plt_readonly 1
5136 #define elf_backend_want_plt_sym 0
5137 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5138 #define elf_backend_rela_normal 1
5139 #define elf_backend_plt_alignment 4
5140 #define elf_backend_extern_protected_data 1
5141 #define elf_backend_caches_rawsize 1
5142 #define elf_backend_dtrel_excludes_plt 1
5143 #define elf_backend_want_dynrelro 1
5144
5145 #define elf_info_to_howto elf_x86_64_info_to_howto
5146
5147 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5148 #define bfd_elf64_bfd_reloc_name_lookup \
5149 elf_x86_64_reloc_name_lookup
5150
5151 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5152 #define elf_backend_check_relocs elf_x86_64_check_relocs
5153 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5154 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5155 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5156 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5157 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5158 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5159 #ifdef CORE_HEADER
5160 #define elf_backend_write_core_note elf_x86_64_write_core_note
5161 #endif
5162 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5163 #define elf_backend_relocate_section elf_x86_64_relocate_section
5164 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5165 #define elf_backend_object_p elf64_x86_64_elf_object_p
5166 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5167
5168 #define elf_backend_section_from_shdr \
5169 elf_x86_64_section_from_shdr
5170
5171 #define elf_backend_section_from_bfd_section \
5172 elf_x86_64_elf_section_from_bfd_section
5173 #define elf_backend_add_symbol_hook \
5174 elf_x86_64_add_symbol_hook
5175 #define elf_backend_symbol_processing \
5176 elf_x86_64_symbol_processing
5177 #define elf_backend_common_section_index \
5178 elf_x86_64_common_section_index
5179 #define elf_backend_common_section \
5180 elf_x86_64_common_section
5181 #define elf_backend_common_definition \
5182 elf_x86_64_common_definition
5183 #define elf_backend_merge_symbol \
5184 elf_x86_64_merge_symbol
5185 #define elf_backend_special_sections \
5186 elf_x86_64_special_sections
5187 #define elf_backend_additional_program_headers \
5188 elf_x86_64_additional_program_headers
5189 #define elf_backend_setup_gnu_properties \
5190 elf_x86_64_link_setup_gnu_properties
5191
5192 #include "elf64-target.h"
5193
5194 /* CloudABI support. */
5195
5196 #undef TARGET_LITTLE_SYM
5197 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5198 #undef TARGET_LITTLE_NAME
5199 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5200
5201 #undef ELF_OSABI
5202 #define ELF_OSABI ELFOSABI_CLOUDABI
5203
5204 #undef elf64_bed
5205 #define elf64_bed elf64_x86_64_cloudabi_bed
5206
5207 #include "elf64-target.h"
5208
5209 /* FreeBSD support. */
5210
5211 #undef TARGET_LITTLE_SYM
5212 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5213 #undef TARGET_LITTLE_NAME
5214 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5215
5216 #undef ELF_OSABI
5217 #define ELF_OSABI ELFOSABI_FREEBSD
5218
5219 #undef elf64_bed
5220 #define elf64_bed elf64_x86_64_fbsd_bed
5221
5222 #include "elf64-target.h"
5223
5224 /* Solaris 2 support. */
5225
5226 #undef TARGET_LITTLE_SYM
5227 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5228 #undef TARGET_LITTLE_NAME
5229 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5230
5231 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5232 objects won't be recognized. */
5233 #undef ELF_OSABI
5234
5235 #undef elf64_bed
5236 #define elf64_bed elf64_x86_64_sol2_bed
5237
5238 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5239 boundary. */
5240 #undef elf_backend_static_tls_alignment
5241 #define elf_backend_static_tls_alignment 16
5242
5243 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5244
5245 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5246 File, p.63. */
5247 #undef elf_backend_want_plt_sym
5248 #define elf_backend_want_plt_sym 1
5249
5250 #undef elf_backend_strtab_flags
5251 #define elf_backend_strtab_flags SHF_STRINGS
5252
5253 static bfd_boolean
5254 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5255 bfd *obfd ATTRIBUTE_UNUSED,
5256 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5257 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5258 {
5259 /* PR 19938: FIXME: Need to add code for setting the sh_info
5260 and sh_link fields of Solaris specific section types. */
5261 return FALSE;
5262 }
5263
5264 #undef elf_backend_copy_special_section_fields
5265 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5266
5267 #include "elf64-target.h"
5268
5269 /* Native Client support. */
5270
5271 static bfd_boolean
5272 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5273 {
5274 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5275 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5276 return TRUE;
5277 }
5278
5279 #undef TARGET_LITTLE_SYM
5280 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5281 #undef TARGET_LITTLE_NAME
5282 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5283 #undef elf64_bed
5284 #define elf64_bed elf64_x86_64_nacl_bed
5285
5286 #undef ELF_MAXPAGESIZE
5287 #undef ELF_MINPAGESIZE
5288 #undef ELF_COMMONPAGESIZE
5289 #define ELF_MAXPAGESIZE 0x10000
5290 #define ELF_MINPAGESIZE 0x10000
5291 #define ELF_COMMONPAGESIZE 0x10000
5292
5293 /* Restore defaults. */
5294 #undef ELF_OSABI
5295 #undef elf_backend_static_tls_alignment
5296 #undef elf_backend_want_plt_sym
5297 #define elf_backend_want_plt_sym 0
5298 #undef elf_backend_strtab_flags
5299 #undef elf_backend_copy_special_section_fields
5300
5301 /* NaCl uses substantially different PLT entries for the same effects. */
5302
5303 #undef elf_backend_plt_alignment
5304 #define elf_backend_plt_alignment 5
5305 #define NACL_PLT_ENTRY_SIZE 64
5306 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5307
5308 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5309 {
5310 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5311 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5312 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5313 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5314 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5315
5316 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5317 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5318
5319 /* 32 bytes of nop to pad out to the standard size. */
5320 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5321 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5322 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5323 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5324 0x66, /* excess data16 prefix */
5325 0x90 /* nop */
5326 };
5327
5328 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5329 {
5330 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5331 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5332 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5333 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5334
5335 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5336 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5337 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5338
5339 /* Lazy GOT entries point here (32-byte aligned). */
5340 0x68, /* pushq immediate */
5341 0, 0, 0, 0, /* replaced with index into relocation table. */
5342 0xe9, /* jmp relative */
5343 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5344
5345 /* 22 bytes of nop to pad out to the standard size. */
5346 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5347 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5348 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5349 };
5350
5351 /* .eh_frame covering the .plt section. */
5352
5353 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5354 {
5355 #if (PLT_CIE_LENGTH != 20 \
5356 || PLT_FDE_LENGTH != 36 \
5357 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5358 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5359 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
5360 #endif
5361 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5362 0, 0, 0, 0, /* CIE ID */
5363 1, /* CIE version */
5364 'z', 'R', 0, /* Augmentation string */
5365 1, /* Code alignment factor */
5366 0x78, /* Data alignment factor */
5367 16, /* Return address column */
5368 1, /* Augmentation size */
5369 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5370 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5371 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5372 DW_CFA_nop, DW_CFA_nop,
5373
5374 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5375 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5376 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5377 0, 0, 0, 0, /* .plt size goes here */
5378 0, /* Augmentation size */
5379 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5380 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5381 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5382 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5383 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5384 13, /* Block length */
5385 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5386 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5387 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5388 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5389 DW_CFA_nop, DW_CFA_nop
5390 };
5391
5392 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5393 {
5394 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5395 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5396 elf_x86_64_nacl_plt_entry, /* plt_entry */
5397 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5398 2, /* plt0_got1_offset */
5399 9, /* plt0_got2_offset */
5400 13, /* plt0_got2_insn_end */
5401 3, /* plt_got_offset */
5402 33, /* plt_reloc_offset */
5403 38, /* plt_plt_offset */
5404 7, /* plt_got_insn_size */
5405 42, /* plt_plt_insn_end */
5406 32, /* plt_lazy_offset */
5407 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5408 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5409 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5410 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5411 };
5412
5413 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
5414 {
5415 is_nacl /* os */
5416 };
5417
5418 #undef elf_backend_arch_data
5419 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5420
5421 #undef elf_backend_object_p
5422 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5423 #undef elf_backend_modify_segment_map
5424 #define elf_backend_modify_segment_map nacl_modify_segment_map
5425 #undef elf_backend_modify_program_headers
5426 #define elf_backend_modify_program_headers nacl_modify_program_headers
5427 #undef elf_backend_final_write_processing
5428 #define elf_backend_final_write_processing nacl_final_write_processing
5429
5430 #include "elf64-target.h"
5431
5432 /* Native Client x32 support. */
5433
5434 static bfd_boolean
5435 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5436 {
5437 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5438 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5439 return TRUE;
5440 }
5441
5442 #undef TARGET_LITTLE_SYM
5443 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5444 #undef TARGET_LITTLE_NAME
5445 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5446 #undef elf32_bed
5447 #define elf32_bed elf32_x86_64_nacl_bed
5448
5449 #define bfd_elf32_bfd_reloc_type_lookup \
5450 elf_x86_64_reloc_type_lookup
5451 #define bfd_elf32_bfd_reloc_name_lookup \
5452 elf_x86_64_reloc_name_lookup
5453 #define bfd_elf32_get_synthetic_symtab \
5454 elf_x86_64_get_synthetic_symtab
5455
5456 #undef elf_backend_object_p
5457 #define elf_backend_object_p \
5458 elf32_x86_64_nacl_elf_object_p
5459
5460 #undef elf_backend_bfd_from_remote_memory
5461 #define elf_backend_bfd_from_remote_memory \
5462 _bfd_elf32_bfd_from_remote_memory
5463
5464 #undef elf_backend_size_info
5465 #define elf_backend_size_info \
5466 _bfd_elf32_size_info
5467
5468 #include "elf32-target.h"
5469
5470 /* Restore defaults. */
5471 #undef elf_backend_object_p
5472 #define elf_backend_object_p elf64_x86_64_elf_object_p
5473 #undef elf_backend_bfd_from_remote_memory
5474 #undef elf_backend_size_info
5475 #undef elf_backend_modify_segment_map
5476 #undef elf_backend_modify_program_headers
5477 #undef elf_backend_final_write_processing
5478
5479 /* Intel L1OM support. */
5480
5481 static bfd_boolean
5482 elf64_l1om_elf_object_p (bfd *abfd)
5483 {
5484 /* Set the right machine number for an L1OM elf64 file. */
5485 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5486 return TRUE;
5487 }
5488
5489 #undef TARGET_LITTLE_SYM
5490 #define TARGET_LITTLE_SYM l1om_elf64_vec
5491 #undef TARGET_LITTLE_NAME
5492 #define TARGET_LITTLE_NAME "elf64-l1om"
5493 #undef ELF_ARCH
5494 #define ELF_ARCH bfd_arch_l1om
5495
5496 #undef ELF_MACHINE_CODE
5497 #define ELF_MACHINE_CODE EM_L1OM
5498
5499 #undef ELF_OSABI
5500
5501 #undef elf64_bed
5502 #define elf64_bed elf64_l1om_bed
5503
5504 #undef elf_backend_object_p
5505 #define elf_backend_object_p elf64_l1om_elf_object_p
5506
5507 /* Restore defaults. */
5508 #undef ELF_MAXPAGESIZE
5509 #undef ELF_MINPAGESIZE
5510 #undef ELF_COMMONPAGESIZE
5511 #define ELF_MAXPAGESIZE 0x200000
5512 #define ELF_MINPAGESIZE 0x1000
5513 #define ELF_COMMONPAGESIZE 0x1000
5514 #undef elf_backend_plt_alignment
5515 #define elf_backend_plt_alignment 4
5516 #undef elf_backend_arch_data
5517 #define elf_backend_arch_data &elf_x86_64_arch_bed
5518
5519 #include "elf64-target.h"
5520
5521 /* FreeBSD L1OM support. */
5522
5523 #undef TARGET_LITTLE_SYM
5524 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5525 #undef TARGET_LITTLE_NAME
5526 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5527
5528 #undef ELF_OSABI
5529 #define ELF_OSABI ELFOSABI_FREEBSD
5530
5531 #undef elf64_bed
5532 #define elf64_bed elf64_l1om_fbsd_bed
5533
5534 #include "elf64-target.h"
5535
5536 /* Intel K1OM support. */
5537
5538 static bfd_boolean
5539 elf64_k1om_elf_object_p (bfd *abfd)
5540 {
5541 /* Set the right machine number for an K1OM elf64 file. */
5542 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5543 return TRUE;
5544 }
5545
5546 #undef TARGET_LITTLE_SYM
5547 #define TARGET_LITTLE_SYM k1om_elf64_vec
5548 #undef TARGET_LITTLE_NAME
5549 #define TARGET_LITTLE_NAME "elf64-k1om"
5550 #undef ELF_ARCH
5551 #define ELF_ARCH bfd_arch_k1om
5552
5553 #undef ELF_MACHINE_CODE
5554 #define ELF_MACHINE_CODE EM_K1OM
5555
5556 #undef ELF_OSABI
5557
5558 #undef elf64_bed
5559 #define elf64_bed elf64_k1om_bed
5560
5561 #undef elf_backend_object_p
5562 #define elf_backend_object_p elf64_k1om_elf_object_p
5563
5564 #undef elf_backend_static_tls_alignment
5565
5566 #undef elf_backend_want_plt_sym
5567 #define elf_backend_want_plt_sym 0
5568
5569 #include "elf64-target.h"
5570
5571 /* FreeBSD K1OM support. */
5572
5573 #undef TARGET_LITTLE_SYM
5574 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5575 #undef TARGET_LITTLE_NAME
5576 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5577
5578 #undef ELF_OSABI
5579 #define ELF_OSABI ELFOSABI_FREEBSD
5580
5581 #undef elf64_bed
5582 #define elf64_bed elf64_k1om_fbsd_bed
5583
5584 #include "elf64-target.h"
5585
5586 /* 32bit x86-64 support. */
5587
5588 #undef TARGET_LITTLE_SYM
5589 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5590 #undef TARGET_LITTLE_NAME
5591 #define TARGET_LITTLE_NAME "elf32-x86-64"
5592 #undef elf32_bed
5593
5594 #undef ELF_ARCH
5595 #define ELF_ARCH bfd_arch_i386
5596
5597 #undef ELF_MACHINE_CODE
5598 #define ELF_MACHINE_CODE EM_X86_64
5599
5600 #undef ELF_OSABI
5601
5602 #undef elf_backend_object_p
5603 #define elf_backend_object_p \
5604 elf32_x86_64_elf_object_p
5605
5606 #undef elf_backend_bfd_from_remote_memory
5607 #define elf_backend_bfd_from_remote_memory \
5608 _bfd_elf32_bfd_from_remote_memory
5609
5610 #undef elf_backend_size_info
5611 #define elf_backend_size_info \
5612 _bfd_elf32_size_info
5613
5614 #include "elf32-target.h"