]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf64-x86-64.c
-Wstringop-truncation warnings
[thirdparty/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
286 abfd, r_type);
287 bfd_set_error (bfd_error_bad_value);
288 return NULL;
289 }
290 i = r_type;
291 }
292 else
293 i = r_type - (unsigned int) R_X86_64_vt_offset;
294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
295 return &x86_64_elf_howto_table[i];
296 }
297
298 /* Given a BFD reloc type, return a HOWTO structure. */
299 static reloc_howto_type *
300 elf_x86_64_reloc_type_lookup (bfd *abfd,
301 bfd_reloc_code_real_type code)
302 {
303 unsigned int i;
304
305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
306 i++)
307 {
308 if (x86_64_reloc_map[i].bfd_reloc_val == code)
309 return elf_x86_64_rtype_to_howto (abfd,
310 x86_64_reloc_map[i].elf_reloc_val);
311 }
312 return NULL;
313 }
314
315 static reloc_howto_type *
316 elf_x86_64_reloc_name_lookup (bfd *abfd,
317 const char *r_name)
318 {
319 unsigned int i;
320
321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 {
323 /* Get x32 R_X86_64_32. */
324 reloc_howto_type *reloc
325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
327 return reloc;
328 }
329
330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
331 if (x86_64_elf_howto_table[i].name != NULL
332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
333 return &x86_64_elf_howto_table[i];
334
335 return NULL;
336 }
337
338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
339
340 static bfd_boolean
341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
342 Elf_Internal_Rela *dst)
343 {
344 unsigned r_type;
345
346 r_type = ELF32_R_TYPE (dst->r_info);
347 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
348 if (cache_ptr->howto == NULL)
349 return FALSE;
350 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
351 return TRUE;
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 # if GCC_VERSION >= 8000
443 # pragma GCC diagnostic push
444 # pragma GCC diagnostic ignored "-Wstringop-truncation"
445 # endif
446 static char *
447 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
448 int note_type, ...)
449 {
450 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
451 va_list ap;
452 const char *fname, *psargs;
453 long pid;
454 int cursig;
455 const void *gregs;
456
457 switch (note_type)
458 {
459 default:
460 return NULL;
461
462 case NT_PRPSINFO:
463 va_start (ap, note_type);
464 fname = va_arg (ap, const char *);
465 psargs = va_arg (ap, const char *);
466 va_end (ap);
467
468 if (bed->s->elfclass == ELFCLASS32)
469 {
470 prpsinfo32_t data;
471 memset (&data, 0, sizeof (data));
472 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
473 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
474 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
475 &data, sizeof (data));
476 }
477 else
478 {
479 prpsinfo64_t data;
480 memset (&data, 0, sizeof (data));
481 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
482 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
483 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
484 &data, sizeof (data));
485 }
486 /* NOTREACHED */
487
488 case NT_PRSTATUS:
489 va_start (ap, note_type);
490 pid = va_arg (ap, long);
491 cursig = va_arg (ap, int);
492 gregs = va_arg (ap, const void *);
493 va_end (ap);
494
495 if (bed->s->elfclass == ELFCLASS32)
496 {
497 if (bed->elf_machine_code == EM_X86_64)
498 {
499 prstatusx32_t prstat;
500 memset (&prstat, 0, sizeof (prstat));
501 prstat.pr_pid = pid;
502 prstat.pr_cursig = cursig;
503 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
504 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
505 &prstat, sizeof (prstat));
506 }
507 else
508 {
509 prstatus32_t prstat;
510 memset (&prstat, 0, sizeof (prstat));
511 prstat.pr_pid = pid;
512 prstat.pr_cursig = cursig;
513 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
514 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
515 &prstat, sizeof (prstat));
516 }
517 }
518 else
519 {
520 prstatus64_t prstat;
521 memset (&prstat, 0, sizeof (prstat));
522 prstat.pr_pid = pid;
523 prstat.pr_cursig = cursig;
524 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
525 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
526 &prstat, sizeof (prstat));
527 }
528 }
529 /* NOTREACHED */
530 }
531 # if GCC_VERSION >= 8000
532 # pragma GCC diagnostic pop
533 # endif
534 #endif
535 \f
536 /* Functions for the x86-64 ELF linker. */
537
538 /* The size in bytes of an entry in the global offset table. */
539
540 #define GOT_ENTRY_SIZE 8
541
542 /* The size in bytes of an entry in the lazy procedure linkage table. */
543
544 #define LAZY_PLT_ENTRY_SIZE 16
545
546 /* The size in bytes of an entry in the non-lazy procedure linkage
547 table. */
548
549 #define NON_LAZY_PLT_ENTRY_SIZE 8
550
551 /* The first entry in a lazy procedure linkage table looks like this.
552 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
553 works. */
554
555 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
556 {
557 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
558 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
559 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
560 };
561
562 /* Subsequent entries in a lazy procedure linkage table look like this. */
563
564 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
565 {
566 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
567 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
568 0x68, /* pushq immediate */
569 0, 0, 0, 0, /* replaced with index into relocation table. */
570 0xe9, /* jmp relative */
571 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
572 };
573
574 /* The first entry in a lazy procedure linkage table with BND prefix
575 like this. */
576
577 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
578 {
579 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
580 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
581 0x0f, 0x1f, 0 /* nopl (%rax) */
582 };
583
584 /* Subsequent entries for branches with BND prefx in a lazy procedure
585 linkage table look like this. */
586
587 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
588 {
589 0x68, 0, 0, 0, 0, /* pushq immediate */
590 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
591 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
592 };
593
594 /* The first entry in the IBT-enabled lazy procedure linkage table is the
595 the same as the lazy PLT with BND prefix so that bound registers are
596 preserved when control is passed to dynamic linker. Subsequent
597 entries for a IBT-enabled lazy procedure linkage table look like
598 this. */
599
600 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
601 {
602 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
603 0x68, 0, 0, 0, 0, /* pushq immediate */
604 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
605 0x90 /* nop */
606 };
607
608 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
609 is the same as the normal lazy PLT. Subsequent entries for an
610 x32 IBT-enabled lazy procedure linkage table look like this. */
611
612 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
613 {
614 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
615 0x68, 0, 0, 0, 0, /* pushq immediate */
616 0xe9, 0, 0, 0, 0, /* jmpq relative */
617 0x66, 0x90 /* xchg %ax,%ax */
618 };
619
620 /* Entries in the non-lazey procedure linkage table look like this. */
621
622 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
623 {
624 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
625 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
626 0x66, 0x90 /* xchg %ax,%ax */
627 };
628
629 /* Entries for branches with BND prefix in the non-lazey procedure
630 linkage table look like this. */
631
632 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
633 {
634 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
635 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
636 0x90 /* nop */
637 };
638
639 /* Entries for branches with IBT-enabled in the non-lazey procedure
640 linkage table look like this. They have the same size as the lazy
641 PLT entry. */
642
643 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
644 {
645 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
646 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
647 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
648 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
649 };
650
651 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
652 linkage table look like this. They have the same size as the lazy
653 PLT entry. */
654
655 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
656 {
657 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
658 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
659 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
660 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
661 };
662
663 /* .eh_frame covering the lazy .plt section. */
664
665 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
666 {
667 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
668 0, 0, 0, 0, /* CIE ID */
669 1, /* CIE version */
670 'z', 'R', 0, /* Augmentation string */
671 1, /* Code alignment factor */
672 0x78, /* Data alignment factor */
673 16, /* Return address column */
674 1, /* Augmentation size */
675 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
676 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
677 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
678 DW_CFA_nop, DW_CFA_nop,
679
680 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
681 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
682 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
683 0, 0, 0, 0, /* .plt size goes here */
684 0, /* Augmentation size */
685 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
686 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
687 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
688 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
689 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
690 11, /* Block length */
691 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
692 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
693 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
694 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
695 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
696 };
697
698 /* .eh_frame covering the lazy BND .plt section. */
699
700 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
701 {
702 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
703 0, 0, 0, 0, /* CIE ID */
704 1, /* CIE version */
705 'z', 'R', 0, /* Augmentation string */
706 1, /* Code alignment factor */
707 0x78, /* Data alignment factor */
708 16, /* Return address column */
709 1, /* Augmentation size */
710 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
711 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
712 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
713 DW_CFA_nop, DW_CFA_nop,
714
715 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
716 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
717 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
718 0, 0, 0, 0, /* .plt size goes here */
719 0, /* Augmentation size */
720 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
721 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
722 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
723 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
724 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
725 11, /* Block length */
726 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
727 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
728 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
729 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
730 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
731 };
732
733 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
734
735 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
736 {
737 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
738 0, 0, 0, 0, /* CIE ID */
739 1, /* CIE version */
740 'z', 'R', 0, /* Augmentation string */
741 1, /* Code alignment factor */
742 0x78, /* Data alignment factor */
743 16, /* Return address column */
744 1, /* Augmentation size */
745 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
746 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
747 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
748 DW_CFA_nop, DW_CFA_nop,
749
750 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
751 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
752 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
753 0, 0, 0, 0, /* .plt size goes here */
754 0, /* Augmentation size */
755 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
756 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
757 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
758 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
759 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
760 11, /* Block length */
761 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
762 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
763 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
764 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
765 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
766 };
767
768 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
769
770 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
771 {
772 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
773 0, 0, 0, 0, /* CIE ID */
774 1, /* CIE version */
775 'z', 'R', 0, /* Augmentation string */
776 1, /* Code alignment factor */
777 0x78, /* Data alignment factor */
778 16, /* Return address column */
779 1, /* Augmentation size */
780 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
781 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
782 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
783 DW_CFA_nop, DW_CFA_nop,
784
785 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
786 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
787 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
788 0, 0, 0, 0, /* .plt size goes here */
789 0, /* Augmentation size */
790 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
791 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
792 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
793 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
794 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
795 11, /* Block length */
796 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
797 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
798 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
799 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
800 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
801 };
802
803 /* .eh_frame covering the non-lazy .plt section. */
804
805 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
806 {
807 #define PLT_GOT_FDE_LENGTH 20
808 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
809 0, 0, 0, 0, /* CIE ID */
810 1, /* CIE version */
811 'z', 'R', 0, /* Augmentation string */
812 1, /* Code alignment factor */
813 0x78, /* Data alignment factor */
814 16, /* Return address column */
815 1, /* Augmentation size */
816 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
817 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
818 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
819 DW_CFA_nop, DW_CFA_nop,
820
821 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
822 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
823 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
824 0, 0, 0, 0, /* non-lazy .plt size goes here */
825 0, /* Augmentation size */
826 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
827 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
828 };
829
830 /* These are the standard parameters. */
831 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
832 {
833 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
834 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
835 elf_x86_64_lazy_plt_entry, /* plt_entry */
836 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
837 2, /* plt0_got1_offset */
838 8, /* plt0_got2_offset */
839 12, /* plt0_got2_insn_end */
840 2, /* plt_got_offset */
841 7, /* plt_reloc_offset */
842 12, /* plt_plt_offset */
843 6, /* plt_got_insn_size */
844 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
845 6, /* plt_lazy_offset */
846 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
847 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
848 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
849 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
850 };
851
852 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
853 {
854 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
855 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
856 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
857 2, /* plt_got_offset */
858 6, /* plt_got_insn_size */
859 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
860 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
861 };
862
863 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
864 {
865 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
866 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
867 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
868 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
869 2, /* plt0_got1_offset */
870 1+8, /* plt0_got2_offset */
871 1+12, /* plt0_got2_insn_end */
872 1+2, /* plt_got_offset */
873 1, /* plt_reloc_offset */
874 7, /* plt_plt_offset */
875 1+6, /* plt_got_insn_size */
876 11, /* plt_plt_insn_end */
877 0, /* plt_lazy_offset */
878 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
879 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
880 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
881 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
882 };
883
884 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
885 {
886 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
887 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
888 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
889 1+2, /* plt_got_offset */
890 1+6, /* plt_got_insn_size */
891 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
892 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
893 };
894
895 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
896 {
897 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
898 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
899 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
900 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
901 2, /* plt0_got1_offset */
902 1+8, /* plt0_got2_offset */
903 1+12, /* plt0_got2_insn_end */
904 4+1+2, /* plt_got_offset */
905 4+1, /* plt_reloc_offset */
906 4+1+6, /* plt_plt_offset */
907 4+1+6, /* plt_got_insn_size */
908 4+1+5+5, /* plt_plt_insn_end */
909 0, /* plt_lazy_offset */
910 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
911 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
912 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
913 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
914 };
915
916 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
917 {
918 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
919 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
920 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
921 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
922 2, /* plt0_got1_offset */
923 8, /* plt0_got2_offset */
924 12, /* plt0_got2_insn_end */
925 4+2, /* plt_got_offset */
926 4+1, /* plt_reloc_offset */
927 4+6, /* plt_plt_offset */
928 4+6, /* plt_got_insn_size */
929 4+5+5, /* plt_plt_insn_end */
930 0, /* plt_lazy_offset */
931 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
932 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
933 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
934 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
935 };
936
937 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
938 {
939 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
940 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
941 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
942 4+1+2, /* plt_got_offset */
943 4+1+6, /* plt_got_insn_size */
944 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
945 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
946 };
947
948 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
949 {
950 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
951 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
952 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
953 4+2, /* plt_got_offset */
954 4+6, /* plt_got_insn_size */
955 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
956 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
957 };
958
959 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
960 {
961 is_normal /* os */
962 };
963
964 #define elf_backend_arch_data &elf_x86_64_arch_bed
965
966 static bfd_boolean
967 elf64_x86_64_elf_object_p (bfd *abfd)
968 {
969 /* Set the right machine number for an x86-64 elf64 file. */
970 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
971 return TRUE;
972 }
973
974 static bfd_boolean
975 elf32_x86_64_elf_object_p (bfd *abfd)
976 {
977 /* Set the right machine number for an x86-64 elf32 file. */
978 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
979 return TRUE;
980 }
981
982 /* Return TRUE if the TLS access code sequence support transition
983 from R_TYPE. */
984
985 static bfd_boolean
986 elf_x86_64_check_tls_transition (bfd *abfd,
987 struct bfd_link_info *info,
988 asection *sec,
989 bfd_byte *contents,
990 Elf_Internal_Shdr *symtab_hdr,
991 struct elf_link_hash_entry **sym_hashes,
992 unsigned int r_type,
993 const Elf_Internal_Rela *rel,
994 const Elf_Internal_Rela *relend)
995 {
996 unsigned int val;
997 unsigned long r_symndx;
998 bfd_boolean largepic = FALSE;
999 struct elf_link_hash_entry *h;
1000 bfd_vma offset;
1001 struct elf_x86_link_hash_table *htab;
1002 bfd_byte *call;
1003 bfd_boolean indirect_call;
1004
1005 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1006 offset = rel->r_offset;
1007 switch (r_type)
1008 {
1009 case R_X86_64_TLSGD:
1010 case R_X86_64_TLSLD:
1011 if ((rel + 1) >= relend)
1012 return FALSE;
1013
1014 if (r_type == R_X86_64_TLSGD)
1015 {
1016 /* Check transition from GD access model. For 64bit, only
1017 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1018 .word 0x6666; rex64; call __tls_get_addr@PLT
1019 or
1020 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1021 .byte 0x66; rex64
1022 call *__tls_get_addr@GOTPCREL(%rip)
1023 which may be converted to
1024 addr32 call __tls_get_addr
1025 can transit to different access model. For 32bit, only
1026 leaq foo@tlsgd(%rip), %rdi
1027 .word 0x6666; rex64; call __tls_get_addr@PLT
1028 or
1029 leaq foo@tlsgd(%rip), %rdi
1030 .byte 0x66; rex64
1031 call *__tls_get_addr@GOTPCREL(%rip)
1032 which may be converted to
1033 addr32 call __tls_get_addr
1034 can transit to different access model. For largepic,
1035 we also support:
1036 leaq foo@tlsgd(%rip), %rdi
1037 movabsq $__tls_get_addr@pltoff, %rax
1038 addq $r15, %rax
1039 call *%rax
1040 or
1041 leaq foo@tlsgd(%rip), %rdi
1042 movabsq $__tls_get_addr@pltoff, %rax
1043 addq $rbx, %rax
1044 call *%rax */
1045
1046 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1047
1048 if ((offset + 12) > sec->size)
1049 return FALSE;
1050
1051 call = contents + offset + 4;
1052 if (call[0] != 0x66
1053 || !((call[1] == 0x48
1054 && call[2] == 0xff
1055 && call[3] == 0x15)
1056 || (call[1] == 0x48
1057 && call[2] == 0x67
1058 && call[3] == 0xe8)
1059 || (call[1] == 0x66
1060 && call[2] == 0x48
1061 && call[3] == 0xe8)))
1062 {
1063 if (!ABI_64_P (abfd)
1064 || (offset + 19) > sec->size
1065 || offset < 3
1066 || memcmp (call - 7, leaq + 1, 3) != 0
1067 || memcmp (call, "\x48\xb8", 2) != 0
1068 || call[11] != 0x01
1069 || call[13] != 0xff
1070 || call[14] != 0xd0
1071 || !((call[10] == 0x48 && call[12] == 0xd8)
1072 || (call[10] == 0x4c && call[12] == 0xf8)))
1073 return FALSE;
1074 largepic = TRUE;
1075 }
1076 else if (ABI_64_P (abfd))
1077 {
1078 if (offset < 4
1079 || memcmp (contents + offset - 4, leaq, 4) != 0)
1080 return FALSE;
1081 }
1082 else
1083 {
1084 if (offset < 3
1085 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1086 return FALSE;
1087 }
1088 indirect_call = call[2] == 0xff;
1089 }
1090 else
1091 {
1092 /* Check transition from LD access model. Only
1093 leaq foo@tlsld(%rip), %rdi;
1094 call __tls_get_addr@PLT
1095 or
1096 leaq foo@tlsld(%rip), %rdi;
1097 call *__tls_get_addr@GOTPCREL(%rip)
1098 which may be converted to
1099 addr32 call __tls_get_addr
1100 can transit to different access model. For largepic
1101 we also support:
1102 leaq foo@tlsld(%rip), %rdi
1103 movabsq $__tls_get_addr@pltoff, %rax
1104 addq $r15, %rax
1105 call *%rax
1106 or
1107 leaq foo@tlsld(%rip), %rdi
1108 movabsq $__tls_get_addr@pltoff, %rax
1109 addq $rbx, %rax
1110 call *%rax */
1111
1112 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1113
1114 if (offset < 3 || (offset + 9) > sec->size)
1115 return FALSE;
1116
1117 if (memcmp (contents + offset - 3, lea, 3) != 0)
1118 return FALSE;
1119
1120 call = contents + offset + 4;
1121 if (!(call[0] == 0xe8
1122 || (call[0] == 0xff && call[1] == 0x15)
1123 || (call[0] == 0x67 && call[1] == 0xe8)))
1124 {
1125 if (!ABI_64_P (abfd)
1126 || (offset + 19) > sec->size
1127 || memcmp (call, "\x48\xb8", 2) != 0
1128 || call[11] != 0x01
1129 || call[13] != 0xff
1130 || call[14] != 0xd0
1131 || !((call[10] == 0x48 && call[12] == 0xd8)
1132 || (call[10] == 0x4c && call[12] == 0xf8)))
1133 return FALSE;
1134 largepic = TRUE;
1135 }
1136 indirect_call = call[0] == 0xff;
1137 }
1138
1139 r_symndx = htab->r_sym (rel[1].r_info);
1140 if (r_symndx < symtab_hdr->sh_info)
1141 return FALSE;
1142
1143 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1144 if (h == NULL
1145 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1146 return FALSE;
1147 else
1148 {
1149 r_type = (ELF32_R_TYPE (rel[1].r_info)
1150 & ~R_X86_64_converted_reloc_bit);
1151 if (largepic)
1152 return r_type == R_X86_64_PLTOFF64;
1153 else if (indirect_call)
1154 return r_type == R_X86_64_GOTPCRELX;
1155 else
1156 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1157 }
1158
1159 case R_X86_64_GOTTPOFF:
1160 /* Check transition from IE access model:
1161 mov foo@gottpoff(%rip), %reg
1162 add foo@gottpoff(%rip), %reg
1163 */
1164
1165 /* Check REX prefix first. */
1166 if (offset >= 3 && (offset + 4) <= sec->size)
1167 {
1168 val = bfd_get_8 (abfd, contents + offset - 3);
1169 if (val != 0x48 && val != 0x4c)
1170 {
1171 /* X32 may have 0x44 REX prefix or no REX prefix. */
1172 if (ABI_64_P (abfd))
1173 return FALSE;
1174 }
1175 }
1176 else
1177 {
1178 /* X32 may not have any REX prefix. */
1179 if (ABI_64_P (abfd))
1180 return FALSE;
1181 if (offset < 2 || (offset + 3) > sec->size)
1182 return FALSE;
1183 }
1184
1185 val = bfd_get_8 (abfd, contents + offset - 2);
1186 if (val != 0x8b && val != 0x03)
1187 return FALSE;
1188
1189 val = bfd_get_8 (abfd, contents + offset - 1);
1190 return (val & 0xc7) == 5;
1191
1192 case R_X86_64_GOTPC32_TLSDESC:
1193 /* Check transition from GDesc access model:
1194 leaq x@tlsdesc(%rip), %rax
1195
1196 Make sure it's a leaq adding rip to a 32-bit offset
1197 into any register, although it's probably almost always
1198 going to be rax. */
1199
1200 if (offset < 3 || (offset + 4) > sec->size)
1201 return FALSE;
1202
1203 val = bfd_get_8 (abfd, contents + offset - 3);
1204 if ((val & 0xfb) != 0x48)
1205 return FALSE;
1206
1207 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1208 return FALSE;
1209
1210 val = bfd_get_8 (abfd, contents + offset - 1);
1211 return (val & 0xc7) == 0x05;
1212
1213 case R_X86_64_TLSDESC_CALL:
1214 /* Check transition from GDesc access model:
1215 call *x@tlsdesc(%rax)
1216 */
1217 if (offset + 2 <= sec->size)
1218 {
1219 /* Make sure that it's a call *x@tlsdesc(%rax). */
1220 call = contents + offset;
1221 return call[0] == 0xff && call[1] == 0x10;
1222 }
1223
1224 return FALSE;
1225
1226 default:
1227 abort ();
1228 }
1229 }
1230
1231 /* Return TRUE if the TLS access transition is OK or no transition
1232 will be performed. Update R_TYPE if there is a transition. */
1233
1234 static bfd_boolean
1235 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1236 asection *sec, bfd_byte *contents,
1237 Elf_Internal_Shdr *symtab_hdr,
1238 struct elf_link_hash_entry **sym_hashes,
1239 unsigned int *r_type, int tls_type,
1240 const Elf_Internal_Rela *rel,
1241 const Elf_Internal_Rela *relend,
1242 struct elf_link_hash_entry *h,
1243 unsigned long r_symndx,
1244 bfd_boolean from_relocate_section)
1245 {
1246 unsigned int from_type = *r_type;
1247 unsigned int to_type = from_type;
1248 bfd_boolean check = TRUE;
1249
1250 /* Skip TLS transition for functions. */
1251 if (h != NULL
1252 && (h->type == STT_FUNC
1253 || h->type == STT_GNU_IFUNC))
1254 return TRUE;
1255
1256 switch (from_type)
1257 {
1258 case R_X86_64_TLSGD:
1259 case R_X86_64_GOTPC32_TLSDESC:
1260 case R_X86_64_TLSDESC_CALL:
1261 case R_X86_64_GOTTPOFF:
1262 if (bfd_link_executable (info))
1263 {
1264 if (h == NULL)
1265 to_type = R_X86_64_TPOFF32;
1266 else
1267 to_type = R_X86_64_GOTTPOFF;
1268 }
1269
1270 /* When we are called from elf_x86_64_relocate_section, there may
1271 be additional transitions based on TLS_TYPE. */
1272 if (from_relocate_section)
1273 {
1274 unsigned int new_to_type = to_type;
1275
1276 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1277 new_to_type = R_X86_64_TPOFF32;
1278
1279 if (to_type == R_X86_64_TLSGD
1280 || to_type == R_X86_64_GOTPC32_TLSDESC
1281 || to_type == R_X86_64_TLSDESC_CALL)
1282 {
1283 if (tls_type == GOT_TLS_IE)
1284 new_to_type = R_X86_64_GOTTPOFF;
1285 }
1286
1287 /* We checked the transition before when we were called from
1288 elf_x86_64_check_relocs. We only want to check the new
1289 transition which hasn't been checked before. */
1290 check = new_to_type != to_type && from_type == to_type;
1291 to_type = new_to_type;
1292 }
1293
1294 break;
1295
1296 case R_X86_64_TLSLD:
1297 if (bfd_link_executable (info))
1298 to_type = R_X86_64_TPOFF32;
1299 break;
1300
1301 default:
1302 return TRUE;
1303 }
1304
1305 /* Return TRUE if there is no transition. */
1306 if (from_type == to_type)
1307 return TRUE;
1308
1309 /* Check if the transition can be performed. */
1310 if (check
1311 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1312 symtab_hdr, sym_hashes,
1313 from_type, rel, relend))
1314 {
1315 reloc_howto_type *from, *to;
1316 const char *name;
1317
1318 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1319 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1320
1321 if (from == NULL || to == NULL)
1322 return FALSE;
1323
1324 if (h)
1325 name = h->root.root.string;
1326 else
1327 {
1328 struct elf_x86_link_hash_table *htab;
1329
1330 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1331 if (htab == NULL)
1332 name = "*unknown*";
1333 else
1334 {
1335 Elf_Internal_Sym *isym;
1336
1337 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1338 abfd, r_symndx);
1339 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1340 }
1341 }
1342
1343 _bfd_error_handler
1344 /* xgettext:c-format */
1345 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1346 " in section `%pA' failed"),
1347 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1348 bfd_set_error (bfd_error_bad_value);
1349 return FALSE;
1350 }
1351
1352 *r_type = to_type;
1353 return TRUE;
1354 }
1355
1356 /* Rename some of the generic section flags to better document how they
1357 are used here. */
1358 #define check_relocs_failed sec_flg0
1359
1360 static bfd_boolean
1361 elf_x86_64_need_pic (struct bfd_link_info *info,
1362 bfd *input_bfd, asection *sec,
1363 struct elf_link_hash_entry *h,
1364 Elf_Internal_Shdr *symtab_hdr,
1365 Elf_Internal_Sym *isym,
1366 reloc_howto_type *howto)
1367 {
1368 const char *v = "";
1369 const char *und = "";
1370 const char *pic = "";
1371 const char *object;
1372
1373 const char *name;
1374 if (h)
1375 {
1376 name = h->root.root.string;
1377 switch (ELF_ST_VISIBILITY (h->other))
1378 {
1379 case STV_HIDDEN:
1380 v = _("hidden symbol ");
1381 break;
1382 case STV_INTERNAL:
1383 v = _("internal symbol ");
1384 break;
1385 case STV_PROTECTED:
1386 v = _("protected symbol ");
1387 break;
1388 default:
1389 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1390 v = _("protected symbol ");
1391 else
1392 v = _("symbol ");
1393 pic = _("; recompile with -fPIC");
1394 break;
1395 }
1396
1397 if (!h->def_regular && !h->def_dynamic)
1398 und = _("undefined ");
1399 }
1400 else
1401 {
1402 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1403 pic = _("; recompile with -fPIC");
1404 }
1405
1406 if (bfd_link_dll (info))
1407 object = _("a shared object");
1408 else if (bfd_link_pie (info))
1409 object = _("a PIE object");
1410 else
1411 object = _("a PDE object");
1412
1413 /* xgettext:c-format */
1414 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1415 "not be used when making %s%s"),
1416 input_bfd, howto->name, und, v, name,
1417 object, pic);
1418 bfd_set_error (bfd_error_bad_value);
1419 sec->check_relocs_failed = 1;
1420 return FALSE;
1421 }
1422
1423 /* With the local symbol, foo, we convert
1424 mov foo@GOTPCREL(%rip), %reg
1425 to
1426 lea foo(%rip), %reg
1427 and convert
1428 call/jmp *foo@GOTPCREL(%rip)
1429 to
1430 nop call foo/jmp foo nop
1431 When PIC is false, convert
1432 test %reg, foo@GOTPCREL(%rip)
1433 to
1434 test $foo, %reg
1435 and convert
1436 binop foo@GOTPCREL(%rip), %reg
1437 to
1438 binop $foo, %reg
1439 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1440 instructions. */
1441
1442 static bfd_boolean
1443 elf_x86_64_convert_load_reloc (bfd *abfd,
1444 bfd_byte *contents,
1445 unsigned int *r_type_p,
1446 Elf_Internal_Rela *irel,
1447 struct elf_link_hash_entry *h,
1448 bfd_boolean *converted,
1449 struct bfd_link_info *link_info)
1450 {
1451 struct elf_x86_link_hash_table *htab;
1452 bfd_boolean is_pic;
1453 bfd_boolean no_overflow;
1454 bfd_boolean relocx;
1455 bfd_boolean to_reloc_pc32;
1456 asection *tsec;
1457 bfd_signed_vma raddend;
1458 unsigned int opcode;
1459 unsigned int modrm;
1460 unsigned int r_type = *r_type_p;
1461 unsigned int r_symndx;
1462 bfd_vma roff = irel->r_offset;
1463
1464 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1465 return TRUE;
1466
1467 raddend = irel->r_addend;
1468 /* Addend for 32-bit PC-relative relocation must be -4. */
1469 if (raddend != -4)
1470 return TRUE;
1471
1472 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1473 is_pic = bfd_link_pic (link_info);
1474
1475 relocx = (r_type == R_X86_64_GOTPCRELX
1476 || r_type == R_X86_64_REX_GOTPCRELX);
1477
1478 /* TRUE if --no-relax is used. */
1479 no_overflow = link_info->disable_target_specific_optimizations > 1;
1480
1481 r_symndx = htab->r_sym (irel->r_info);
1482
1483 opcode = bfd_get_8 (abfd, contents + roff - 2);
1484
1485 /* Convert mov to lea since it has been done for a while. */
1486 if (opcode != 0x8b)
1487 {
1488 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1489 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1490 test, xor instructions. */
1491 if (!relocx)
1492 return TRUE;
1493 }
1494
1495 /* We convert only to R_X86_64_PC32:
1496 1. Branch.
1497 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1498 3. no_overflow is true.
1499 4. PIC.
1500 */
1501 to_reloc_pc32 = (opcode == 0xff
1502 || !relocx
1503 || no_overflow
1504 || is_pic);
1505
1506 /* Get the symbol referred to by the reloc. */
1507 if (h == NULL)
1508 {
1509 Elf_Internal_Sym *isym
1510 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1511
1512 /* Skip relocation against undefined symbols. */
1513 if (isym->st_shndx == SHN_UNDEF)
1514 return TRUE;
1515
1516 if (isym->st_shndx == SHN_ABS)
1517 tsec = bfd_abs_section_ptr;
1518 else if (isym->st_shndx == SHN_COMMON)
1519 tsec = bfd_com_section_ptr;
1520 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1521 tsec = &_bfd_elf_large_com_section;
1522 else
1523 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1524 }
1525 else
1526 {
1527 /* Undefined weak symbol is only bound locally in executable
1528 and its reference is resolved as 0 without relocation
1529 overflow. We can only perform this optimization for
1530 GOTPCRELX relocations since we need to modify REX byte.
1531 It is OK convert mov with R_X86_64_GOTPCREL to
1532 R_X86_64_PC32. */
1533 bfd_boolean local_ref;
1534 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1535
1536 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1537 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1538 if ((relocx || opcode == 0x8b)
1539 && (h->root.type == bfd_link_hash_undefweak
1540 && !eh->linker_def
1541 && local_ref))
1542 {
1543 if (opcode == 0xff)
1544 {
1545 /* Skip for branch instructions since R_X86_64_PC32
1546 may overflow. */
1547 if (no_overflow)
1548 return TRUE;
1549 }
1550 else if (relocx)
1551 {
1552 /* For non-branch instructions, we can convert to
1553 R_X86_64_32/R_X86_64_32S since we know if there
1554 is a REX byte. */
1555 to_reloc_pc32 = FALSE;
1556 }
1557
1558 /* Since we don't know the current PC when PIC is true,
1559 we can't convert to R_X86_64_PC32. */
1560 if (to_reloc_pc32 && is_pic)
1561 return TRUE;
1562
1563 goto convert;
1564 }
1565 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1566 ld.so may use its link-time address. */
1567 else if (h->start_stop
1568 || eh->linker_def
1569 || ((h->def_regular
1570 || h->root.type == bfd_link_hash_defined
1571 || h->root.type == bfd_link_hash_defweak)
1572 && h != htab->elf.hdynamic
1573 && local_ref))
1574 {
1575 /* bfd_link_hash_new or bfd_link_hash_undefined is
1576 set by an assignment in a linker script in
1577 bfd_elf_record_link_assignment. start_stop is set
1578 on __start_SECNAME/__stop_SECNAME which mark section
1579 SECNAME. */
1580 if (h->start_stop
1581 || eh->linker_def
1582 || (h->def_regular
1583 && (h->root.type == bfd_link_hash_new
1584 || h->root.type == bfd_link_hash_undefined
1585 || ((h->root.type == bfd_link_hash_defined
1586 || h->root.type == bfd_link_hash_defweak)
1587 && h->root.u.def.section == bfd_und_section_ptr))))
1588 {
1589 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1590 if (no_overflow)
1591 return TRUE;
1592 goto convert;
1593 }
1594 tsec = h->root.u.def.section;
1595 }
1596 else
1597 return TRUE;
1598 }
1599
1600 /* Don't convert GOTPCREL relocation against large section. */
1601 if (elf_section_data (tsec) != NULL
1602 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1603 return TRUE;
1604
1605 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1606 if (no_overflow)
1607 return TRUE;
1608
1609 convert:
1610 if (opcode == 0xff)
1611 {
1612 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1613 unsigned int nop;
1614 unsigned int disp;
1615 bfd_vma nop_offset;
1616
1617 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1618 R_X86_64_PC32. */
1619 modrm = bfd_get_8 (abfd, contents + roff - 1);
1620 if (modrm == 0x25)
1621 {
1622 /* Convert to "jmp foo nop". */
1623 modrm = 0xe9;
1624 nop = NOP_OPCODE;
1625 nop_offset = irel->r_offset + 3;
1626 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1627 irel->r_offset -= 1;
1628 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1629 }
1630 else
1631 {
1632 struct elf_x86_link_hash_entry *eh
1633 = (struct elf_x86_link_hash_entry *) h;
1634
1635 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1636 is a nop prefix. */
1637 modrm = 0xe8;
1638 /* To support TLS optimization, always use addr32 prefix for
1639 "call *__tls_get_addr@GOTPCREL(%rip)". */
1640 if (eh && eh->tls_get_addr)
1641 {
1642 nop = 0x67;
1643 nop_offset = irel->r_offset - 2;
1644 }
1645 else
1646 {
1647 nop = link_info->call_nop_byte;
1648 if (link_info->call_nop_as_suffix)
1649 {
1650 nop_offset = irel->r_offset + 3;
1651 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1652 irel->r_offset -= 1;
1653 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1654 }
1655 else
1656 nop_offset = irel->r_offset - 2;
1657 }
1658 }
1659 bfd_put_8 (abfd, nop, contents + nop_offset);
1660 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1661 r_type = R_X86_64_PC32;
1662 }
1663 else
1664 {
1665 unsigned int rex;
1666 unsigned int rex_mask = REX_R;
1667
1668 if (r_type == R_X86_64_REX_GOTPCRELX)
1669 rex = bfd_get_8 (abfd, contents + roff - 3);
1670 else
1671 rex = 0;
1672
1673 if (opcode == 0x8b)
1674 {
1675 if (to_reloc_pc32)
1676 {
1677 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1678 "lea foo(%rip), %reg". */
1679 opcode = 0x8d;
1680 r_type = R_X86_64_PC32;
1681 }
1682 else
1683 {
1684 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1685 "mov $foo, %reg". */
1686 opcode = 0xc7;
1687 modrm = bfd_get_8 (abfd, contents + roff - 1);
1688 modrm = 0xc0 | (modrm & 0x38) >> 3;
1689 if ((rex & REX_W) != 0
1690 && ABI_64_P (link_info->output_bfd))
1691 {
1692 /* Keep the REX_W bit in REX byte for LP64. */
1693 r_type = R_X86_64_32S;
1694 goto rewrite_modrm_rex;
1695 }
1696 else
1697 {
1698 /* If the REX_W bit in REX byte isn't needed,
1699 use R_X86_64_32 and clear the W bit to avoid
1700 sign-extend imm32 to imm64. */
1701 r_type = R_X86_64_32;
1702 /* Clear the W bit in REX byte. */
1703 rex_mask |= REX_W;
1704 goto rewrite_modrm_rex;
1705 }
1706 }
1707 }
1708 else
1709 {
1710 /* R_X86_64_PC32 isn't supported. */
1711 if (to_reloc_pc32)
1712 return TRUE;
1713
1714 modrm = bfd_get_8 (abfd, contents + roff - 1);
1715 if (opcode == 0x85)
1716 {
1717 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1718 "test $foo, %reg". */
1719 modrm = 0xc0 | (modrm & 0x38) >> 3;
1720 opcode = 0xf7;
1721 }
1722 else
1723 {
1724 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1725 "binop $foo, %reg". */
1726 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1727 opcode = 0x81;
1728 }
1729
1730 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1731 overflow when sign-extending imm32 to imm64. */
1732 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1733
1734 rewrite_modrm_rex:
1735 bfd_put_8 (abfd, modrm, contents + roff - 1);
1736
1737 if (rex)
1738 {
1739 /* Move the R bit to the B bit in REX byte. */
1740 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1741 bfd_put_8 (abfd, rex, contents + roff - 3);
1742 }
1743
1744 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1745 irel->r_addend = 0;
1746 }
1747
1748 bfd_put_8 (abfd, opcode, contents + roff - 2);
1749 }
1750
1751 *r_type_p = r_type;
1752 irel->r_info = htab->r_info (r_symndx,
1753 r_type | R_X86_64_converted_reloc_bit);
1754
1755 *converted = TRUE;
1756
1757 return TRUE;
1758 }
1759
1760 /* Look through the relocs for a section during the first phase, and
1761 calculate needed space in the global offset table, procedure
1762 linkage table, and dynamic reloc sections. */
1763
1764 static bfd_boolean
1765 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1766 asection *sec,
1767 const Elf_Internal_Rela *relocs)
1768 {
1769 struct elf_x86_link_hash_table *htab;
1770 Elf_Internal_Shdr *symtab_hdr;
1771 struct elf_link_hash_entry **sym_hashes;
1772 const Elf_Internal_Rela *rel;
1773 const Elf_Internal_Rela *rel_end;
1774 asection *sreloc;
1775 bfd_byte *contents;
1776 bfd_boolean converted;
1777
1778 if (bfd_link_relocatable (info))
1779 return TRUE;
1780
1781 /* Don't do anything special with non-loaded, non-alloced sections.
1782 In particular, any relocs in such sections should not affect GOT
1783 and PLT reference counting (ie. we don't allow them to create GOT
1784 or PLT entries), there's no possibility or desire to optimize TLS
1785 relocs, and there's not much point in propagating relocs to shared
1786 libs that the dynamic linker won't relocate. */
1787 if ((sec->flags & SEC_ALLOC) == 0)
1788 return TRUE;
1789
1790 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1791 if (htab == NULL)
1792 {
1793 sec->check_relocs_failed = 1;
1794 return FALSE;
1795 }
1796
1797 BFD_ASSERT (is_x86_elf (abfd, htab));
1798
1799 /* Get the section contents. */
1800 if (elf_section_data (sec)->this_hdr.contents != NULL)
1801 contents = elf_section_data (sec)->this_hdr.contents;
1802 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1803 {
1804 sec->check_relocs_failed = 1;
1805 return FALSE;
1806 }
1807
1808 symtab_hdr = &elf_symtab_hdr (abfd);
1809 sym_hashes = elf_sym_hashes (abfd);
1810
1811 converted = FALSE;
1812
1813 sreloc = NULL;
1814
1815 rel_end = relocs + sec->reloc_count;
1816 for (rel = relocs; rel < rel_end; rel++)
1817 {
1818 unsigned int r_type;
1819 unsigned int r_symndx;
1820 struct elf_link_hash_entry *h;
1821 struct elf_x86_link_hash_entry *eh;
1822 Elf_Internal_Sym *isym;
1823 const char *name;
1824 bfd_boolean size_reloc;
1825 bfd_boolean converted_reloc;
1826
1827 r_symndx = htab->r_sym (rel->r_info);
1828 r_type = ELF32_R_TYPE (rel->r_info);
1829
1830 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1831 {
1832 /* xgettext:c-format */
1833 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1834 abfd, r_symndx);
1835 goto error_return;
1836 }
1837
1838 if (r_symndx < symtab_hdr->sh_info)
1839 {
1840 /* A local symbol. */
1841 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1842 abfd, r_symndx);
1843 if (isym == NULL)
1844 goto error_return;
1845
1846 /* Check relocation against local STT_GNU_IFUNC symbol. */
1847 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1848 {
1849 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1850 TRUE);
1851 if (h == NULL)
1852 goto error_return;
1853
1854 /* Fake a STT_GNU_IFUNC symbol. */
1855 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1856 isym, NULL);
1857 h->type = STT_GNU_IFUNC;
1858 h->def_regular = 1;
1859 h->ref_regular = 1;
1860 h->forced_local = 1;
1861 h->root.type = bfd_link_hash_defined;
1862 }
1863 else
1864 h = NULL;
1865 }
1866 else
1867 {
1868 isym = NULL;
1869 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1870 while (h->root.type == bfd_link_hash_indirect
1871 || h->root.type == bfd_link_hash_warning)
1872 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1873 }
1874
1875 /* Check invalid x32 relocations. */
1876 if (!ABI_64_P (abfd))
1877 switch (r_type)
1878 {
1879 default:
1880 break;
1881
1882 case R_X86_64_DTPOFF64:
1883 case R_X86_64_TPOFF64:
1884 case R_X86_64_PC64:
1885 case R_X86_64_GOTOFF64:
1886 case R_X86_64_GOT64:
1887 case R_X86_64_GOTPCREL64:
1888 case R_X86_64_GOTPC64:
1889 case R_X86_64_GOTPLT64:
1890 case R_X86_64_PLTOFF64:
1891 {
1892 if (h)
1893 name = h->root.root.string;
1894 else
1895 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1896 NULL);
1897 _bfd_error_handler
1898 /* xgettext:c-format */
1899 (_("%pB: relocation %s against symbol `%s' isn't "
1900 "supported in x32 mode"), abfd,
1901 x86_64_elf_howto_table[r_type].name, name);
1902 bfd_set_error (bfd_error_bad_value);
1903 goto error_return;
1904 }
1905 break;
1906 }
1907
1908 if (h != NULL)
1909 {
1910 /* It is referenced by a non-shared object. */
1911 h->ref_regular = 1;
1912
1913 if (h->type == STT_GNU_IFUNC)
1914 elf_tdata (info->output_bfd)->has_gnu_symbols
1915 |= elf_gnu_symbol_ifunc;
1916 }
1917
1918 converted_reloc = FALSE;
1919 if ((r_type == R_X86_64_GOTPCREL
1920 || r_type == R_X86_64_GOTPCRELX
1921 || r_type == R_X86_64_REX_GOTPCRELX)
1922 && (h == NULL || h->type != STT_GNU_IFUNC))
1923 {
1924 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1925 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1926 irel, h, &converted_reloc,
1927 info))
1928 goto error_return;
1929
1930 if (converted_reloc)
1931 converted = TRUE;
1932 }
1933
1934 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1935 symtab_hdr, sym_hashes,
1936 &r_type, GOT_UNKNOWN,
1937 rel, rel_end, h, r_symndx, FALSE))
1938 goto error_return;
1939
1940 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1941 if (h == htab->elf.hgot)
1942 htab->got_referenced = TRUE;
1943
1944 eh = (struct elf_x86_link_hash_entry *) h;
1945 switch (r_type)
1946 {
1947 case R_X86_64_TLSLD:
1948 htab->tls_ld_or_ldm_got.refcount = 1;
1949 goto create_got;
1950
1951 case R_X86_64_TPOFF32:
1952 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1953 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1954 &x86_64_elf_howto_table[r_type]);
1955 if (eh != NULL)
1956 eh->zero_undefweak &= 0x2;
1957 break;
1958
1959 case R_X86_64_GOTTPOFF:
1960 if (!bfd_link_executable (info))
1961 info->flags |= DF_STATIC_TLS;
1962 /* Fall through */
1963
1964 case R_X86_64_GOT32:
1965 case R_X86_64_GOTPCREL:
1966 case R_X86_64_GOTPCRELX:
1967 case R_X86_64_REX_GOTPCRELX:
1968 case R_X86_64_TLSGD:
1969 case R_X86_64_GOT64:
1970 case R_X86_64_GOTPCREL64:
1971 case R_X86_64_GOTPLT64:
1972 case R_X86_64_GOTPC32_TLSDESC:
1973 case R_X86_64_TLSDESC_CALL:
1974 /* This symbol requires a global offset table entry. */
1975 {
1976 int tls_type, old_tls_type;
1977
1978 switch (r_type)
1979 {
1980 default: tls_type = GOT_NORMAL; break;
1981 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1982 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1983 case R_X86_64_GOTPC32_TLSDESC:
1984 case R_X86_64_TLSDESC_CALL:
1985 tls_type = GOT_TLS_GDESC; break;
1986 }
1987
1988 if (h != NULL)
1989 {
1990 h->got.refcount = 1;
1991 old_tls_type = eh->tls_type;
1992 }
1993 else
1994 {
1995 bfd_signed_vma *local_got_refcounts;
1996
1997 /* This is a global offset table entry for a local symbol. */
1998 local_got_refcounts = elf_local_got_refcounts (abfd);
1999 if (local_got_refcounts == NULL)
2000 {
2001 bfd_size_type size;
2002
2003 size = symtab_hdr->sh_info;
2004 size *= sizeof (bfd_signed_vma)
2005 + sizeof (bfd_vma) + sizeof (char);
2006 local_got_refcounts = ((bfd_signed_vma *)
2007 bfd_zalloc (abfd, size));
2008 if (local_got_refcounts == NULL)
2009 goto error_return;
2010 elf_local_got_refcounts (abfd) = local_got_refcounts;
2011 elf_x86_local_tlsdesc_gotent (abfd)
2012 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2013 elf_x86_local_got_tls_type (abfd)
2014 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2015 }
2016 local_got_refcounts[r_symndx] = 1;
2017 old_tls_type
2018 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2019 }
2020
2021 /* If a TLS symbol is accessed using IE at least once,
2022 there is no point to use dynamic model for it. */
2023 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2024 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2025 || tls_type != GOT_TLS_IE))
2026 {
2027 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2028 tls_type = old_tls_type;
2029 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2030 && GOT_TLS_GD_ANY_P (tls_type))
2031 tls_type |= old_tls_type;
2032 else
2033 {
2034 if (h)
2035 name = h->root.root.string;
2036 else
2037 name = bfd_elf_sym_name (abfd, symtab_hdr,
2038 isym, NULL);
2039 _bfd_error_handler
2040 /* xgettext:c-format */
2041 (_("%pB: '%s' accessed both as normal and"
2042 " thread local symbol"),
2043 abfd, name);
2044 bfd_set_error (bfd_error_bad_value);
2045 goto error_return;
2046 }
2047 }
2048
2049 if (old_tls_type != tls_type)
2050 {
2051 if (eh != NULL)
2052 eh->tls_type = tls_type;
2053 else
2054 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2055 }
2056 }
2057 /* Fall through */
2058
2059 case R_X86_64_GOTOFF64:
2060 case R_X86_64_GOTPC32:
2061 case R_X86_64_GOTPC64:
2062 create_got:
2063 if (eh != NULL)
2064 eh->zero_undefweak &= 0x2;
2065 break;
2066
2067 case R_X86_64_PLT32:
2068 case R_X86_64_PLT32_BND:
2069 /* This symbol requires a procedure linkage table entry. We
2070 actually build the entry in adjust_dynamic_symbol,
2071 because this might be a case of linking PIC code which is
2072 never referenced by a dynamic object, in which case we
2073 don't need to generate a procedure linkage table entry
2074 after all. */
2075
2076 /* If this is a local symbol, we resolve it directly without
2077 creating a procedure linkage table entry. */
2078 if (h == NULL)
2079 continue;
2080
2081 eh->zero_undefweak &= 0x2;
2082 h->needs_plt = 1;
2083 h->plt.refcount = 1;
2084 break;
2085
2086 case R_X86_64_PLTOFF64:
2087 /* This tries to form the 'address' of a function relative
2088 to GOT. For global symbols we need a PLT entry. */
2089 if (h != NULL)
2090 {
2091 h->needs_plt = 1;
2092 h->plt.refcount = 1;
2093 }
2094 goto create_got;
2095
2096 case R_X86_64_SIZE32:
2097 case R_X86_64_SIZE64:
2098 size_reloc = TRUE;
2099 goto do_size;
2100
2101 case R_X86_64_32:
2102 if (!ABI_64_P (abfd))
2103 goto pointer;
2104 /* Fall through. */
2105 case R_X86_64_8:
2106 case R_X86_64_16:
2107 case R_X86_64_32S:
2108 /* Check relocation overflow as these relocs may lead to
2109 run-time relocation overflow. Don't error out for
2110 sections we don't care about, such as debug sections or
2111 when relocation overflow check is disabled. */
2112 if (!info->no_reloc_overflow_check
2113 && !converted_reloc
2114 && (bfd_link_pic (info)
2115 || (bfd_link_executable (info)
2116 && h != NULL
2117 && !h->def_regular
2118 && h->def_dynamic
2119 && (sec->flags & SEC_READONLY) == 0)))
2120 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2121 &x86_64_elf_howto_table[r_type]);
2122 /* Fall through. */
2123
2124 case R_X86_64_PC8:
2125 case R_X86_64_PC16:
2126 case R_X86_64_PC32:
2127 case R_X86_64_PC32_BND:
2128 case R_X86_64_PC64:
2129 case R_X86_64_64:
2130 pointer:
2131 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2132 eh->zero_undefweak |= 0x2;
2133 /* We are called after all symbols have been resolved. Only
2134 relocation against STT_GNU_IFUNC symbol must go through
2135 PLT. */
2136 if (h != NULL
2137 && (bfd_link_executable (info)
2138 || h->type == STT_GNU_IFUNC))
2139 {
2140 bfd_boolean func_pointer_ref = FALSE;
2141
2142 if (r_type == R_X86_64_PC32)
2143 {
2144 /* Since something like ".long foo - ." may be used
2145 as pointer, make sure that PLT is used if foo is
2146 a function defined in a shared library. */
2147 if ((sec->flags & SEC_CODE) == 0)
2148 {
2149 h->pointer_equality_needed = 1;
2150 if (bfd_link_pie (info)
2151 && h->type == STT_FUNC
2152 && !h->def_regular
2153 && h->def_dynamic)
2154 {
2155 h->needs_plt = 1;
2156 h->plt.refcount = 1;
2157 }
2158 }
2159 }
2160 else if (r_type != R_X86_64_PC32_BND
2161 && r_type != R_X86_64_PC64)
2162 {
2163 h->pointer_equality_needed = 1;
2164 /* At run-time, R_X86_64_64 can be resolved for both
2165 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2166 can only be resolved for x32. */
2167 if ((sec->flags & SEC_READONLY) == 0
2168 && (r_type == R_X86_64_64
2169 || (!ABI_64_P (abfd)
2170 && (r_type == R_X86_64_32
2171 || r_type == R_X86_64_32S))))
2172 func_pointer_ref = TRUE;
2173 }
2174
2175 if (!func_pointer_ref)
2176 {
2177 /* If this reloc is in a read-only section, we might
2178 need a copy reloc. We can't check reliably at this
2179 stage whether the section is read-only, as input
2180 sections have not yet been mapped to output sections.
2181 Tentatively set the flag for now, and correct in
2182 adjust_dynamic_symbol. */
2183 h->non_got_ref = 1;
2184
2185 /* We may need a .plt entry if the symbol is a function
2186 defined in a shared lib or is a function referenced
2187 from the code or read-only section. */
2188 if (!h->def_regular
2189 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2190 h->plt.refcount = 1;
2191 }
2192 }
2193
2194 size_reloc = FALSE;
2195 do_size:
2196 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2197 htab->pointer_r_type))
2198 {
2199 struct elf_dyn_relocs *p;
2200 struct elf_dyn_relocs **head;
2201
2202 /* We must copy these reloc types into the output file.
2203 Create a reloc section in dynobj and make room for
2204 this reloc. */
2205 if (sreloc == NULL)
2206 {
2207 sreloc = _bfd_elf_make_dynamic_reloc_section
2208 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2209 abfd, /*rela?*/ TRUE);
2210
2211 if (sreloc == NULL)
2212 goto error_return;
2213 }
2214
2215 /* If this is a global symbol, we count the number of
2216 relocations we need for this symbol. */
2217 if (h != NULL)
2218 head = &eh->dyn_relocs;
2219 else
2220 {
2221 /* Track dynamic relocs needed for local syms too.
2222 We really need local syms available to do this
2223 easily. Oh well. */
2224 asection *s;
2225 void **vpp;
2226
2227 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2228 abfd, r_symndx);
2229 if (isym == NULL)
2230 goto error_return;
2231
2232 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2233 if (s == NULL)
2234 s = sec;
2235
2236 /* Beware of type punned pointers vs strict aliasing
2237 rules. */
2238 vpp = &(elf_section_data (s)->local_dynrel);
2239 head = (struct elf_dyn_relocs **)vpp;
2240 }
2241
2242 p = *head;
2243 if (p == NULL || p->sec != sec)
2244 {
2245 bfd_size_type amt = sizeof *p;
2246
2247 p = ((struct elf_dyn_relocs *)
2248 bfd_alloc (htab->elf.dynobj, amt));
2249 if (p == NULL)
2250 goto error_return;
2251 p->next = *head;
2252 *head = p;
2253 p->sec = sec;
2254 p->count = 0;
2255 p->pc_count = 0;
2256 }
2257
2258 p->count += 1;
2259 /* Count size relocation as PC-relative relocation. */
2260 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2261 p->pc_count += 1;
2262 }
2263 break;
2264
2265 /* This relocation describes the C++ object vtable hierarchy.
2266 Reconstruct it for later use during GC. */
2267 case R_X86_64_GNU_VTINHERIT:
2268 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2269 goto error_return;
2270 break;
2271
2272 /* This relocation describes which C++ vtable entries are actually
2273 used. Record for later use during GC. */
2274 case R_X86_64_GNU_VTENTRY:
2275 BFD_ASSERT (h != NULL);
2276 if (h != NULL
2277 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2278 goto error_return;
2279 break;
2280
2281 default:
2282 break;
2283 }
2284 }
2285
2286 if (elf_section_data (sec)->this_hdr.contents != contents)
2287 {
2288 if (!converted && !info->keep_memory)
2289 free (contents);
2290 else
2291 {
2292 /* Cache the section contents for elf_link_input_bfd if any
2293 load is converted or --no-keep-memory isn't used. */
2294 elf_section_data (sec)->this_hdr.contents = contents;
2295 }
2296 }
2297
2298 /* Cache relocations if any load is converted. */
2299 if (elf_section_data (sec)->relocs != relocs && converted)
2300 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2301
2302 return TRUE;
2303
2304 error_return:
2305 if (elf_section_data (sec)->this_hdr.contents != contents)
2306 free (contents);
2307 sec->check_relocs_failed = 1;
2308 return FALSE;
2309 }
2310
2311 /* Return the relocation value for @tpoff relocation
2312 if STT_TLS virtual address is ADDRESS. */
2313
2314 static bfd_vma
2315 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2316 {
2317 struct elf_link_hash_table *htab = elf_hash_table (info);
2318 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2319 bfd_vma static_tls_size;
2320
2321 /* If tls_segment is NULL, we should have signalled an error already. */
2322 if (htab->tls_sec == NULL)
2323 return 0;
2324
2325 /* Consider special static TLS alignment requirements. */
2326 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2327 return address - static_tls_size - htab->tls_sec->vma;
2328 }
2329
2330 /* Relocate an x86_64 ELF section. */
2331
2332 static bfd_boolean
2333 elf_x86_64_relocate_section (bfd *output_bfd,
2334 struct bfd_link_info *info,
2335 bfd *input_bfd,
2336 asection *input_section,
2337 bfd_byte *contents,
2338 Elf_Internal_Rela *relocs,
2339 Elf_Internal_Sym *local_syms,
2340 asection **local_sections)
2341 {
2342 struct elf_x86_link_hash_table *htab;
2343 Elf_Internal_Shdr *symtab_hdr;
2344 struct elf_link_hash_entry **sym_hashes;
2345 bfd_vma *local_got_offsets;
2346 bfd_vma *local_tlsdesc_gotents;
2347 Elf_Internal_Rela *rel;
2348 Elf_Internal_Rela *wrel;
2349 Elf_Internal_Rela *relend;
2350 unsigned int plt_entry_size;
2351
2352 /* Skip if check_relocs failed. */
2353 if (input_section->check_relocs_failed)
2354 return FALSE;
2355
2356 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2357 if (htab == NULL)
2358 return FALSE;
2359
2360 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2361
2362 plt_entry_size = htab->plt.plt_entry_size;
2363 symtab_hdr = &elf_symtab_hdr (input_bfd);
2364 sym_hashes = elf_sym_hashes (input_bfd);
2365 local_got_offsets = elf_local_got_offsets (input_bfd);
2366 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2367
2368 _bfd_x86_elf_set_tls_module_base (info);
2369
2370 rel = wrel = relocs;
2371 relend = relocs + input_section->reloc_count;
2372 for (; rel < relend; wrel++, rel++)
2373 {
2374 unsigned int r_type, r_type_tls;
2375 reloc_howto_type *howto;
2376 unsigned long r_symndx;
2377 struct elf_link_hash_entry *h;
2378 struct elf_x86_link_hash_entry *eh;
2379 Elf_Internal_Sym *sym;
2380 asection *sec;
2381 bfd_vma off, offplt, plt_offset;
2382 bfd_vma relocation;
2383 bfd_boolean unresolved_reloc;
2384 bfd_reloc_status_type r;
2385 int tls_type;
2386 asection *base_got, *resolved_plt;
2387 bfd_vma st_size;
2388 bfd_boolean resolved_to_zero;
2389 bfd_boolean relative_reloc;
2390 bfd_boolean converted_reloc;
2391 bfd_boolean need_copy_reloc_in_pie;
2392
2393 r_type = ELF32_R_TYPE (rel->r_info);
2394 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2395 || r_type == (int) R_X86_64_GNU_VTENTRY)
2396 {
2397 if (wrel != rel)
2398 *wrel = *rel;
2399 continue;
2400 }
2401
2402 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2403 r_type &= ~R_X86_64_converted_reloc_bit;
2404
2405 if (r_type >= (int) R_X86_64_standard)
2406 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2407
2408 if (r_type != (int) R_X86_64_32
2409 || ABI_64_P (output_bfd))
2410 howto = x86_64_elf_howto_table + r_type;
2411 else
2412 howto = (x86_64_elf_howto_table
2413 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2414 r_symndx = htab->r_sym (rel->r_info);
2415 h = NULL;
2416 sym = NULL;
2417 sec = NULL;
2418 unresolved_reloc = FALSE;
2419 if (r_symndx < symtab_hdr->sh_info)
2420 {
2421 sym = local_syms + r_symndx;
2422 sec = local_sections[r_symndx];
2423
2424 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2425 &sec, rel);
2426 st_size = sym->st_size;
2427
2428 /* Relocate against local STT_GNU_IFUNC symbol. */
2429 if (!bfd_link_relocatable (info)
2430 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2431 {
2432 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2433 rel, FALSE);
2434 if (h == NULL)
2435 abort ();
2436
2437 /* Set STT_GNU_IFUNC symbol value. */
2438 h->root.u.def.value = sym->st_value;
2439 h->root.u.def.section = sec;
2440 }
2441 }
2442 else
2443 {
2444 bfd_boolean warned ATTRIBUTE_UNUSED;
2445 bfd_boolean ignored ATTRIBUTE_UNUSED;
2446
2447 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2448 r_symndx, symtab_hdr, sym_hashes,
2449 h, sec, relocation,
2450 unresolved_reloc, warned, ignored);
2451 st_size = h->size;
2452 }
2453
2454 if (sec != NULL && discarded_section (sec))
2455 {
2456 _bfd_clear_contents (howto, input_bfd, input_section,
2457 contents + rel->r_offset);
2458 wrel->r_offset = rel->r_offset;
2459 wrel->r_info = 0;
2460 wrel->r_addend = 0;
2461
2462 /* For ld -r, remove relocations in debug sections against
2463 sections defined in discarded sections. Not done for
2464 eh_frame editing code expects to be present. */
2465 if (bfd_link_relocatable (info)
2466 && (input_section->flags & SEC_DEBUGGING))
2467 wrel--;
2468
2469 continue;
2470 }
2471
2472 if (bfd_link_relocatable (info))
2473 {
2474 if (wrel != rel)
2475 *wrel = *rel;
2476 continue;
2477 }
2478
2479 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2480 {
2481 if (r_type == R_X86_64_64)
2482 {
2483 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2484 zero-extend it to 64bit if addend is zero. */
2485 r_type = R_X86_64_32;
2486 memset (contents + rel->r_offset + 4, 0, 4);
2487 }
2488 else if (r_type == R_X86_64_SIZE64)
2489 {
2490 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2491 zero-extend it to 64bit if addend is zero. */
2492 r_type = R_X86_64_SIZE32;
2493 memset (contents + rel->r_offset + 4, 0, 4);
2494 }
2495 }
2496
2497 eh = (struct elf_x86_link_hash_entry *) h;
2498
2499 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2500 it here if it is defined in a non-shared object. */
2501 if (h != NULL
2502 && h->type == STT_GNU_IFUNC
2503 && h->def_regular)
2504 {
2505 bfd_vma plt_index;
2506 const char *name;
2507
2508 if ((input_section->flags & SEC_ALLOC) == 0)
2509 {
2510 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2511 STT_GNU_IFUNC symbol as STT_FUNC. */
2512 if (elf_section_type (input_section) == SHT_NOTE)
2513 goto skip_ifunc;
2514 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2515 sections because such sections are not SEC_ALLOC and
2516 thus ld.so will not process them. */
2517 if ((input_section->flags & SEC_DEBUGGING) != 0)
2518 continue;
2519 abort ();
2520 }
2521
2522 switch (r_type)
2523 {
2524 default:
2525 break;
2526
2527 case R_X86_64_GOTPCREL:
2528 case R_X86_64_GOTPCRELX:
2529 case R_X86_64_REX_GOTPCRELX:
2530 case R_X86_64_GOTPCREL64:
2531 base_got = htab->elf.sgot;
2532 off = h->got.offset;
2533
2534 if (base_got == NULL)
2535 abort ();
2536
2537 if (off == (bfd_vma) -1)
2538 {
2539 /* We can't use h->got.offset here to save state, or
2540 even just remember the offset, as finish_dynamic_symbol
2541 would use that as offset into .got. */
2542
2543 if (h->plt.offset == (bfd_vma) -1)
2544 abort ();
2545
2546 if (htab->elf.splt != NULL)
2547 {
2548 plt_index = (h->plt.offset / plt_entry_size
2549 - htab->plt.has_plt0);
2550 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2551 base_got = htab->elf.sgotplt;
2552 }
2553 else
2554 {
2555 plt_index = h->plt.offset / plt_entry_size;
2556 off = plt_index * GOT_ENTRY_SIZE;
2557 base_got = htab->elf.igotplt;
2558 }
2559
2560 if (h->dynindx == -1
2561 || h->forced_local
2562 || info->symbolic)
2563 {
2564 /* This references the local defitionion. We must
2565 initialize this entry in the global offset table.
2566 Since the offset must always be a multiple of 8,
2567 we use the least significant bit to record
2568 whether we have initialized it already.
2569
2570 When doing a dynamic link, we create a .rela.got
2571 relocation entry to initialize the value. This
2572 is done in the finish_dynamic_symbol routine. */
2573 if ((off & 1) != 0)
2574 off &= ~1;
2575 else
2576 {
2577 bfd_put_64 (output_bfd, relocation,
2578 base_got->contents + off);
2579 /* Note that this is harmless for the GOTPLT64
2580 case, as -1 | 1 still is -1. */
2581 h->got.offset |= 1;
2582 }
2583 }
2584 }
2585
2586 relocation = (base_got->output_section->vma
2587 + base_got->output_offset + off);
2588
2589 goto do_relocation;
2590 }
2591
2592 if (h->plt.offset == (bfd_vma) -1)
2593 {
2594 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2595 if (r_type == htab->pointer_r_type
2596 && (input_section->flags & SEC_CODE) == 0)
2597 goto do_ifunc_pointer;
2598 goto bad_ifunc_reloc;
2599 }
2600
2601 /* STT_GNU_IFUNC symbol must go through PLT. */
2602 if (htab->elf.splt != NULL)
2603 {
2604 if (htab->plt_second != NULL)
2605 {
2606 resolved_plt = htab->plt_second;
2607 plt_offset = eh->plt_second.offset;
2608 }
2609 else
2610 {
2611 resolved_plt = htab->elf.splt;
2612 plt_offset = h->plt.offset;
2613 }
2614 }
2615 else
2616 {
2617 resolved_plt = htab->elf.iplt;
2618 plt_offset = h->plt.offset;
2619 }
2620
2621 relocation = (resolved_plt->output_section->vma
2622 + resolved_plt->output_offset + plt_offset);
2623
2624 switch (r_type)
2625 {
2626 default:
2627 bad_ifunc_reloc:
2628 if (h->root.root.string)
2629 name = h->root.root.string;
2630 else
2631 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2632 NULL);
2633 _bfd_error_handler
2634 /* xgettext:c-format */
2635 (_("%pB: relocation %s against STT_GNU_IFUNC "
2636 "symbol `%s' isn't supported"), input_bfd,
2637 howto->name, name);
2638 bfd_set_error (bfd_error_bad_value);
2639 return FALSE;
2640
2641 case R_X86_64_32S:
2642 if (bfd_link_pic (info))
2643 abort ();
2644 goto do_relocation;
2645
2646 case R_X86_64_32:
2647 if (ABI_64_P (output_bfd))
2648 goto do_relocation;
2649 /* FALLTHROUGH */
2650 case R_X86_64_64:
2651 do_ifunc_pointer:
2652 if (rel->r_addend != 0)
2653 {
2654 if (h->root.root.string)
2655 name = h->root.root.string;
2656 else
2657 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2658 sym, NULL);
2659 _bfd_error_handler
2660 /* xgettext:c-format */
2661 (_("%pB: relocation %s against STT_GNU_IFUNC "
2662 "symbol `%s' has non-zero addend: %" PRId64),
2663 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2664 bfd_set_error (bfd_error_bad_value);
2665 return FALSE;
2666 }
2667
2668 /* Generate dynamic relcoation only when there is a
2669 non-GOT reference in a shared object or there is no
2670 PLT. */
2671 if ((bfd_link_pic (info) && h->non_got_ref)
2672 || h->plt.offset == (bfd_vma) -1)
2673 {
2674 Elf_Internal_Rela outrel;
2675 asection *sreloc;
2676
2677 /* Need a dynamic relocation to get the real function
2678 address. */
2679 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2680 info,
2681 input_section,
2682 rel->r_offset);
2683 if (outrel.r_offset == (bfd_vma) -1
2684 || outrel.r_offset == (bfd_vma) -2)
2685 abort ();
2686
2687 outrel.r_offset += (input_section->output_section->vma
2688 + input_section->output_offset);
2689
2690 if (POINTER_LOCAL_IFUNC_P (info, h))
2691 {
2692 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2693 h->root.root.string,
2694 h->root.u.def.section->owner);
2695
2696 /* This symbol is resolved locally. */
2697 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2698 outrel.r_addend = (h->root.u.def.value
2699 + h->root.u.def.section->output_section->vma
2700 + h->root.u.def.section->output_offset);
2701 }
2702 else
2703 {
2704 outrel.r_info = htab->r_info (h->dynindx, r_type);
2705 outrel.r_addend = 0;
2706 }
2707
2708 /* Dynamic relocations are stored in
2709 1. .rela.ifunc section in PIC object.
2710 2. .rela.got section in dynamic executable.
2711 3. .rela.iplt section in static executable. */
2712 if (bfd_link_pic (info))
2713 sreloc = htab->elf.irelifunc;
2714 else if (htab->elf.splt != NULL)
2715 sreloc = htab->elf.srelgot;
2716 else
2717 sreloc = htab->elf.irelplt;
2718 elf_append_rela (output_bfd, sreloc, &outrel);
2719
2720 /* If this reloc is against an external symbol, we
2721 do not want to fiddle with the addend. Otherwise,
2722 we need to include the symbol value so that it
2723 becomes an addend for the dynamic reloc. For an
2724 internal symbol, we have updated addend. */
2725 continue;
2726 }
2727 /* FALLTHROUGH */
2728 case R_X86_64_PC32:
2729 case R_X86_64_PC32_BND:
2730 case R_X86_64_PC64:
2731 case R_X86_64_PLT32:
2732 case R_X86_64_PLT32_BND:
2733 goto do_relocation;
2734 }
2735 }
2736
2737 skip_ifunc:
2738 resolved_to_zero = (eh != NULL
2739 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2740
2741 /* When generating a shared object, the relocations handled here are
2742 copied into the output file to be resolved at run time. */
2743 switch (r_type)
2744 {
2745 case R_X86_64_GOT32:
2746 case R_X86_64_GOT64:
2747 /* Relocation is to the entry for this symbol in the global
2748 offset table. */
2749 case R_X86_64_GOTPCREL:
2750 case R_X86_64_GOTPCRELX:
2751 case R_X86_64_REX_GOTPCRELX:
2752 case R_X86_64_GOTPCREL64:
2753 /* Use global offset table entry as symbol value. */
2754 case R_X86_64_GOTPLT64:
2755 /* This is obsolete and treated the same as GOT64. */
2756 base_got = htab->elf.sgot;
2757
2758 if (htab->elf.sgot == NULL)
2759 abort ();
2760
2761 relative_reloc = FALSE;
2762 if (h != NULL)
2763 {
2764 off = h->got.offset;
2765 if (h->needs_plt
2766 && h->plt.offset != (bfd_vma)-1
2767 && off == (bfd_vma)-1)
2768 {
2769 /* We can't use h->got.offset here to save
2770 state, or even just remember the offset, as
2771 finish_dynamic_symbol would use that as offset into
2772 .got. */
2773 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2774 - htab->plt.has_plt0);
2775 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2776 base_got = htab->elf.sgotplt;
2777 }
2778
2779 if (RESOLVED_LOCALLY_P (info, h, htab))
2780 {
2781 /* We must initialize this entry in the global offset
2782 table. Since the offset must always be a multiple
2783 of 8, we use the least significant bit to record
2784 whether we have initialized it already.
2785
2786 When doing a dynamic link, we create a .rela.got
2787 relocation entry to initialize the value. This is
2788 done in the finish_dynamic_symbol routine. */
2789 if ((off & 1) != 0)
2790 off &= ~1;
2791 else
2792 {
2793 bfd_put_64 (output_bfd, relocation,
2794 base_got->contents + off);
2795 /* Note that this is harmless for the GOTPLT64 case,
2796 as -1 | 1 still is -1. */
2797 h->got.offset |= 1;
2798
2799 if (GENERATE_RELATIVE_RELOC_P (info, h))
2800 {
2801 /* If this symbol isn't dynamic in PIC,
2802 generate R_X86_64_RELATIVE here. */
2803 eh->no_finish_dynamic_symbol = 1;
2804 relative_reloc = TRUE;
2805 }
2806 }
2807 }
2808 else
2809 unresolved_reloc = FALSE;
2810 }
2811 else
2812 {
2813 if (local_got_offsets == NULL)
2814 abort ();
2815
2816 off = local_got_offsets[r_symndx];
2817
2818 /* The offset must always be a multiple of 8. We use
2819 the least significant bit to record whether we have
2820 already generated the necessary reloc. */
2821 if ((off & 1) != 0)
2822 off &= ~1;
2823 else
2824 {
2825 bfd_put_64 (output_bfd, relocation,
2826 base_got->contents + off);
2827 local_got_offsets[r_symndx] |= 1;
2828
2829 if (bfd_link_pic (info))
2830 relative_reloc = TRUE;
2831 }
2832 }
2833
2834 if (relative_reloc)
2835 {
2836 asection *s;
2837 Elf_Internal_Rela outrel;
2838
2839 /* We need to generate a R_X86_64_RELATIVE reloc
2840 for the dynamic linker. */
2841 s = htab->elf.srelgot;
2842 if (s == NULL)
2843 abort ();
2844
2845 outrel.r_offset = (base_got->output_section->vma
2846 + base_got->output_offset
2847 + off);
2848 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2849 outrel.r_addend = relocation;
2850 elf_append_rela (output_bfd, s, &outrel);
2851 }
2852
2853 if (off >= (bfd_vma) -2)
2854 abort ();
2855
2856 relocation = base_got->output_section->vma
2857 + base_got->output_offset + off;
2858 if (r_type != R_X86_64_GOTPCREL
2859 && r_type != R_X86_64_GOTPCRELX
2860 && r_type != R_X86_64_REX_GOTPCRELX
2861 && r_type != R_X86_64_GOTPCREL64)
2862 relocation -= htab->elf.sgotplt->output_section->vma
2863 - htab->elf.sgotplt->output_offset;
2864
2865 break;
2866
2867 case R_X86_64_GOTOFF64:
2868 /* Relocation is relative to the start of the global offset
2869 table. */
2870
2871 /* Check to make sure it isn't a protected function or data
2872 symbol for shared library since it may not be local when
2873 used as function address or with copy relocation. We also
2874 need to make sure that a symbol is referenced locally. */
2875 if (bfd_link_pic (info) && h)
2876 {
2877 if (!h->def_regular)
2878 {
2879 const char *v;
2880
2881 switch (ELF_ST_VISIBILITY (h->other))
2882 {
2883 case STV_HIDDEN:
2884 v = _("hidden symbol");
2885 break;
2886 case STV_INTERNAL:
2887 v = _("internal symbol");
2888 break;
2889 case STV_PROTECTED:
2890 v = _("protected symbol");
2891 break;
2892 default:
2893 v = _("symbol");
2894 break;
2895 }
2896
2897 _bfd_error_handler
2898 /* xgettext:c-format */
2899 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
2900 " `%s' can not be used when making a shared object"),
2901 input_bfd, v, h->root.root.string);
2902 bfd_set_error (bfd_error_bad_value);
2903 return FALSE;
2904 }
2905 else if (!bfd_link_executable (info)
2906 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2907 && (h->type == STT_FUNC
2908 || h->type == STT_OBJECT)
2909 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2910 {
2911 _bfd_error_handler
2912 /* xgettext:c-format */
2913 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
2914 " `%s' can not be used when making a shared object"),
2915 input_bfd,
2916 h->type == STT_FUNC ? "function" : "data",
2917 h->root.root.string);
2918 bfd_set_error (bfd_error_bad_value);
2919 return FALSE;
2920 }
2921 }
2922
2923 /* Note that sgot is not involved in this
2924 calculation. We always want the start of .got.plt. If we
2925 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2926 permitted by the ABI, we might have to change this
2927 calculation. */
2928 relocation -= htab->elf.sgotplt->output_section->vma
2929 + htab->elf.sgotplt->output_offset;
2930 break;
2931
2932 case R_X86_64_GOTPC32:
2933 case R_X86_64_GOTPC64:
2934 /* Use global offset table as symbol value. */
2935 relocation = htab->elf.sgotplt->output_section->vma
2936 + htab->elf.sgotplt->output_offset;
2937 unresolved_reloc = FALSE;
2938 break;
2939
2940 case R_X86_64_PLTOFF64:
2941 /* Relocation is PLT entry relative to GOT. For local
2942 symbols it's the symbol itself relative to GOT. */
2943 if (h != NULL
2944 /* See PLT32 handling. */
2945 && (h->plt.offset != (bfd_vma) -1
2946 || eh->plt_got.offset != (bfd_vma) -1)
2947 && htab->elf.splt != NULL)
2948 {
2949 if (eh->plt_got.offset != (bfd_vma) -1)
2950 {
2951 /* Use the GOT PLT. */
2952 resolved_plt = htab->plt_got;
2953 plt_offset = eh->plt_got.offset;
2954 }
2955 else if (htab->plt_second != NULL)
2956 {
2957 resolved_plt = htab->plt_second;
2958 plt_offset = eh->plt_second.offset;
2959 }
2960 else
2961 {
2962 resolved_plt = htab->elf.splt;
2963 plt_offset = h->plt.offset;
2964 }
2965
2966 relocation = (resolved_plt->output_section->vma
2967 + resolved_plt->output_offset
2968 + plt_offset);
2969 unresolved_reloc = FALSE;
2970 }
2971
2972 relocation -= htab->elf.sgotplt->output_section->vma
2973 + htab->elf.sgotplt->output_offset;
2974 break;
2975
2976 case R_X86_64_PLT32:
2977 case R_X86_64_PLT32_BND:
2978 /* Relocation is to the entry for this symbol in the
2979 procedure linkage table. */
2980
2981 /* Resolve a PLT32 reloc against a local symbol directly,
2982 without using the procedure linkage table. */
2983 if (h == NULL)
2984 break;
2985
2986 if ((h->plt.offset == (bfd_vma) -1
2987 && eh->plt_got.offset == (bfd_vma) -1)
2988 || htab->elf.splt == NULL)
2989 {
2990 /* We didn't make a PLT entry for this symbol. This
2991 happens when statically linking PIC code, or when
2992 using -Bsymbolic. */
2993 break;
2994 }
2995
2996 use_plt:
2997 if (h->plt.offset != (bfd_vma) -1)
2998 {
2999 if (htab->plt_second != NULL)
3000 {
3001 resolved_plt = htab->plt_second;
3002 plt_offset = eh->plt_second.offset;
3003 }
3004 else
3005 {
3006 resolved_plt = htab->elf.splt;
3007 plt_offset = h->plt.offset;
3008 }
3009 }
3010 else
3011 {
3012 /* Use the GOT PLT. */
3013 resolved_plt = htab->plt_got;
3014 plt_offset = eh->plt_got.offset;
3015 }
3016
3017 relocation = (resolved_plt->output_section->vma
3018 + resolved_plt->output_offset
3019 + plt_offset);
3020 unresolved_reloc = FALSE;
3021 break;
3022
3023 case R_X86_64_SIZE32:
3024 case R_X86_64_SIZE64:
3025 /* Set to symbol size. */
3026 relocation = st_size;
3027 goto direct;
3028
3029 case R_X86_64_PC8:
3030 case R_X86_64_PC16:
3031 case R_X86_64_PC32:
3032 case R_X86_64_PC32_BND:
3033 /* Don't complain about -fPIC if the symbol is undefined when
3034 building executable unless it is unresolved weak symbol,
3035 references a dynamic definition in PIE or -z nocopyreloc
3036 is used. */
3037 if ((input_section->flags & SEC_ALLOC) != 0
3038 && (input_section->flags & SEC_READONLY) != 0
3039 && h != NULL
3040 && ((bfd_link_executable (info)
3041 && ((h->root.type == bfd_link_hash_undefweak
3042 && !resolved_to_zero)
3043 || (bfd_link_pie (info)
3044 && !h->def_regular
3045 && h->def_dynamic)
3046 || ((info->nocopyreloc
3047 || (eh->def_protected
3048 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3049 && h->def_dynamic
3050 && !(h->root.u.def.section->flags & SEC_CODE))))
3051 || bfd_link_dll (info)))
3052 {
3053 bfd_boolean fail = FALSE;
3054 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3055 {
3056 /* Symbol is referenced locally. Make sure it is
3057 defined locally. */
3058 fail = !(h->def_regular || ELF_COMMON_DEF_P (h));
3059 }
3060 else if (!(bfd_link_pie (info)
3061 && (h->needs_copy || eh->needs_copy)))
3062 {
3063 /* Symbol doesn't need copy reloc and isn't referenced
3064 locally. Address of protected function may not be
3065 reachable at run-time. */
3066 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3067 || (ELF_ST_VISIBILITY (h->other) == STV_PROTECTED
3068 && h->type == STT_FUNC));
3069 }
3070
3071 if (fail)
3072 return elf_x86_64_need_pic (info, input_bfd, input_section,
3073 h, NULL, NULL, howto);
3074 }
3075 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3076 as function address. */
3077 else if (h != NULL
3078 && (input_section->flags & SEC_CODE) == 0
3079 && bfd_link_pie (info)
3080 && h->type == STT_FUNC
3081 && !h->def_regular
3082 && h->def_dynamic)
3083 goto use_plt;
3084 /* Fall through. */
3085
3086 case R_X86_64_8:
3087 case R_X86_64_16:
3088 case R_X86_64_32:
3089 case R_X86_64_PC64:
3090 case R_X86_64_64:
3091 /* FIXME: The ABI says the linker should make sure the value is
3092 the same when it's zeroextended to 64 bit. */
3093
3094 direct:
3095 if ((input_section->flags & SEC_ALLOC) == 0)
3096 break;
3097
3098 need_copy_reloc_in_pie = (bfd_link_pie (info)
3099 && h != NULL
3100 && (h->needs_copy
3101 || eh->needs_copy
3102 || (h->root.type
3103 == bfd_link_hash_undefined))
3104 && (X86_PCREL_TYPE_P (r_type)
3105 || X86_SIZE_TYPE_P (r_type)));
3106
3107 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3108 need_copy_reloc_in_pie,
3109 resolved_to_zero, FALSE))
3110 {
3111 Elf_Internal_Rela outrel;
3112 bfd_boolean skip, relocate;
3113 asection *sreloc;
3114
3115 /* When generating a shared object, these relocations
3116 are copied into the output file to be resolved at run
3117 time. */
3118 skip = FALSE;
3119 relocate = FALSE;
3120
3121 outrel.r_offset =
3122 _bfd_elf_section_offset (output_bfd, info, input_section,
3123 rel->r_offset);
3124 if (outrel.r_offset == (bfd_vma) -1)
3125 skip = TRUE;
3126 else if (outrel.r_offset == (bfd_vma) -2)
3127 skip = TRUE, relocate = TRUE;
3128
3129 outrel.r_offset += (input_section->output_section->vma
3130 + input_section->output_offset);
3131
3132 if (skip)
3133 memset (&outrel, 0, sizeof outrel);
3134
3135 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3136 {
3137 outrel.r_info = htab->r_info (h->dynindx, r_type);
3138 outrel.r_addend = rel->r_addend;
3139 }
3140 else
3141 {
3142 /* This symbol is local, or marked to become local.
3143 When relocation overflow check is disabled, we
3144 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3145 if (r_type == htab->pointer_r_type
3146 || (r_type == R_X86_64_32
3147 && info->no_reloc_overflow_check))
3148 {
3149 relocate = TRUE;
3150 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3151 outrel.r_addend = relocation + rel->r_addend;
3152 }
3153 else if (r_type == R_X86_64_64
3154 && !ABI_64_P (output_bfd))
3155 {
3156 relocate = TRUE;
3157 outrel.r_info = htab->r_info (0,
3158 R_X86_64_RELATIVE64);
3159 outrel.r_addend = relocation + rel->r_addend;
3160 /* Check addend overflow. */
3161 if ((outrel.r_addend & 0x80000000)
3162 != (rel->r_addend & 0x80000000))
3163 {
3164 const char *name;
3165 int addend = rel->r_addend;
3166 if (h && h->root.root.string)
3167 name = h->root.root.string;
3168 else
3169 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3170 sym, NULL);
3171 _bfd_error_handler
3172 /* xgettext:c-format */
3173 (_("%pB: addend %s%#x in relocation %s against "
3174 "symbol `%s' at %#" PRIx64
3175 " in section `%pA' is out of range"),
3176 input_bfd, addend < 0 ? "-" : "", addend,
3177 howto->name, name, (uint64_t) rel->r_offset,
3178 input_section);
3179 bfd_set_error (bfd_error_bad_value);
3180 return FALSE;
3181 }
3182 }
3183 else
3184 {
3185 long sindx;
3186
3187 if (bfd_is_abs_section (sec))
3188 sindx = 0;
3189 else if (sec == NULL || sec->owner == NULL)
3190 {
3191 bfd_set_error (bfd_error_bad_value);
3192 return FALSE;
3193 }
3194 else
3195 {
3196 asection *osec;
3197
3198 /* We are turning this relocation into one
3199 against a section symbol. It would be
3200 proper to subtract the symbol's value,
3201 osec->vma, from the emitted reloc addend,
3202 but ld.so expects buggy relocs. */
3203 osec = sec->output_section;
3204 sindx = elf_section_data (osec)->dynindx;
3205 if (sindx == 0)
3206 {
3207 asection *oi = htab->elf.text_index_section;
3208 sindx = elf_section_data (oi)->dynindx;
3209 }
3210 BFD_ASSERT (sindx != 0);
3211 }
3212
3213 outrel.r_info = htab->r_info (sindx, r_type);
3214 outrel.r_addend = relocation + rel->r_addend;
3215 }
3216 }
3217
3218 sreloc = elf_section_data (input_section)->sreloc;
3219
3220 if (sreloc == NULL || sreloc->contents == NULL)
3221 {
3222 r = bfd_reloc_notsupported;
3223 goto check_relocation_error;
3224 }
3225
3226 elf_append_rela (output_bfd, sreloc, &outrel);
3227
3228 /* If this reloc is against an external symbol, we do
3229 not want to fiddle with the addend. Otherwise, we
3230 need to include the symbol value so that it becomes
3231 an addend for the dynamic reloc. */
3232 if (! relocate)
3233 continue;
3234 }
3235
3236 break;
3237
3238 case R_X86_64_TLSGD:
3239 case R_X86_64_GOTPC32_TLSDESC:
3240 case R_X86_64_TLSDESC_CALL:
3241 case R_X86_64_GOTTPOFF:
3242 tls_type = GOT_UNKNOWN;
3243 if (h == NULL && local_got_offsets)
3244 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3245 else if (h != NULL)
3246 tls_type = elf_x86_hash_entry (h)->tls_type;
3247
3248 r_type_tls = r_type;
3249 if (! elf_x86_64_tls_transition (info, input_bfd,
3250 input_section, contents,
3251 symtab_hdr, sym_hashes,
3252 &r_type_tls, tls_type, rel,
3253 relend, h, r_symndx, TRUE))
3254 return FALSE;
3255
3256 if (r_type_tls == R_X86_64_TPOFF32)
3257 {
3258 bfd_vma roff = rel->r_offset;
3259
3260 BFD_ASSERT (! unresolved_reloc);
3261
3262 if (r_type == R_X86_64_TLSGD)
3263 {
3264 /* GD->LE transition. For 64bit, change
3265 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3266 .word 0x6666; rex64; call __tls_get_addr@PLT
3267 or
3268 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3269 .byte 0x66; rex64
3270 call *__tls_get_addr@GOTPCREL(%rip)
3271 which may be converted to
3272 addr32 call __tls_get_addr
3273 into:
3274 movq %fs:0, %rax
3275 leaq foo@tpoff(%rax), %rax
3276 For 32bit, change
3277 leaq foo@tlsgd(%rip), %rdi
3278 .word 0x6666; rex64; call __tls_get_addr@PLT
3279 or
3280 leaq foo@tlsgd(%rip), %rdi
3281 .byte 0x66; rex64
3282 call *__tls_get_addr@GOTPCREL(%rip)
3283 which may be converted to
3284 addr32 call __tls_get_addr
3285 into:
3286 movl %fs:0, %eax
3287 leaq foo@tpoff(%rax), %rax
3288 For largepic, change:
3289 leaq foo@tlsgd(%rip), %rdi
3290 movabsq $__tls_get_addr@pltoff, %rax
3291 addq %r15, %rax
3292 call *%rax
3293 into:
3294 movq %fs:0, %rax
3295 leaq foo@tpoff(%rax), %rax
3296 nopw 0x0(%rax,%rax,1) */
3297 int largepic = 0;
3298 if (ABI_64_P (output_bfd))
3299 {
3300 if (contents[roff + 5] == 0xb8)
3301 {
3302 memcpy (contents + roff - 3,
3303 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3304 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3305 largepic = 1;
3306 }
3307 else
3308 memcpy (contents + roff - 4,
3309 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3310 16);
3311 }
3312 else
3313 memcpy (contents + roff - 3,
3314 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3315 15);
3316 bfd_put_32 (output_bfd,
3317 elf_x86_64_tpoff (info, relocation),
3318 contents + roff + 8 + largepic);
3319 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3320 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3321 rel++;
3322 wrel++;
3323 continue;
3324 }
3325 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3326 {
3327 /* GDesc -> LE transition.
3328 It's originally something like:
3329 leaq x@tlsdesc(%rip), %rax
3330
3331 Change it to:
3332 movl $x@tpoff, %rax. */
3333
3334 unsigned int val, type;
3335
3336 type = bfd_get_8 (input_bfd, contents + roff - 3);
3337 val = bfd_get_8 (input_bfd, contents + roff - 1);
3338 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3339 contents + roff - 3);
3340 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3341 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3342 contents + roff - 1);
3343 bfd_put_32 (output_bfd,
3344 elf_x86_64_tpoff (info, relocation),
3345 contents + roff);
3346 continue;
3347 }
3348 else if (r_type == R_X86_64_TLSDESC_CALL)
3349 {
3350 /* GDesc -> LE transition.
3351 It's originally:
3352 call *(%rax)
3353 Turn it into:
3354 xchg %ax,%ax. */
3355 bfd_put_8 (output_bfd, 0x66, contents + roff);
3356 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3357 continue;
3358 }
3359 else if (r_type == R_X86_64_GOTTPOFF)
3360 {
3361 /* IE->LE transition:
3362 For 64bit, originally it can be one of:
3363 movq foo@gottpoff(%rip), %reg
3364 addq foo@gottpoff(%rip), %reg
3365 We change it into:
3366 movq $foo, %reg
3367 leaq foo(%reg), %reg
3368 addq $foo, %reg.
3369 For 32bit, originally it can be one of:
3370 movq foo@gottpoff(%rip), %reg
3371 addl foo@gottpoff(%rip), %reg
3372 We change it into:
3373 movq $foo, %reg
3374 leal foo(%reg), %reg
3375 addl $foo, %reg. */
3376
3377 unsigned int val, type, reg;
3378
3379 if (roff >= 3)
3380 val = bfd_get_8 (input_bfd, contents + roff - 3);
3381 else
3382 val = 0;
3383 type = bfd_get_8 (input_bfd, contents + roff - 2);
3384 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3385 reg >>= 3;
3386 if (type == 0x8b)
3387 {
3388 /* movq */
3389 if (val == 0x4c)
3390 bfd_put_8 (output_bfd, 0x49,
3391 contents + roff - 3);
3392 else if (!ABI_64_P (output_bfd) && val == 0x44)
3393 bfd_put_8 (output_bfd, 0x41,
3394 contents + roff - 3);
3395 bfd_put_8 (output_bfd, 0xc7,
3396 contents + roff - 2);
3397 bfd_put_8 (output_bfd, 0xc0 | reg,
3398 contents + roff - 1);
3399 }
3400 else if (reg == 4)
3401 {
3402 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3403 is special */
3404 if (val == 0x4c)
3405 bfd_put_8 (output_bfd, 0x49,
3406 contents + roff - 3);
3407 else if (!ABI_64_P (output_bfd) && val == 0x44)
3408 bfd_put_8 (output_bfd, 0x41,
3409 contents + roff - 3);
3410 bfd_put_8 (output_bfd, 0x81,
3411 contents + roff - 2);
3412 bfd_put_8 (output_bfd, 0xc0 | reg,
3413 contents + roff - 1);
3414 }
3415 else
3416 {
3417 /* addq/addl -> leaq/leal */
3418 if (val == 0x4c)
3419 bfd_put_8 (output_bfd, 0x4d,
3420 contents + roff - 3);
3421 else if (!ABI_64_P (output_bfd) && val == 0x44)
3422 bfd_put_8 (output_bfd, 0x45,
3423 contents + roff - 3);
3424 bfd_put_8 (output_bfd, 0x8d,
3425 contents + roff - 2);
3426 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3427 contents + roff - 1);
3428 }
3429 bfd_put_32 (output_bfd,
3430 elf_x86_64_tpoff (info, relocation),
3431 contents + roff);
3432 continue;
3433 }
3434 else
3435 BFD_ASSERT (FALSE);
3436 }
3437
3438 if (htab->elf.sgot == NULL)
3439 abort ();
3440
3441 if (h != NULL)
3442 {
3443 off = h->got.offset;
3444 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3445 }
3446 else
3447 {
3448 if (local_got_offsets == NULL)
3449 abort ();
3450
3451 off = local_got_offsets[r_symndx];
3452 offplt = local_tlsdesc_gotents[r_symndx];
3453 }
3454
3455 if ((off & 1) != 0)
3456 off &= ~1;
3457 else
3458 {
3459 Elf_Internal_Rela outrel;
3460 int dr_type, indx;
3461 asection *sreloc;
3462
3463 if (htab->elf.srelgot == NULL)
3464 abort ();
3465
3466 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3467
3468 if (GOT_TLS_GDESC_P (tls_type))
3469 {
3470 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3471 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3472 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3473 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3474 + htab->elf.sgotplt->output_offset
3475 + offplt
3476 + htab->sgotplt_jump_table_size);
3477 sreloc = htab->elf.srelplt;
3478 if (indx == 0)
3479 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3480 else
3481 outrel.r_addend = 0;
3482 elf_append_rela (output_bfd, sreloc, &outrel);
3483 }
3484
3485 sreloc = htab->elf.srelgot;
3486
3487 outrel.r_offset = (htab->elf.sgot->output_section->vma
3488 + htab->elf.sgot->output_offset + off);
3489
3490 if (GOT_TLS_GD_P (tls_type))
3491 dr_type = R_X86_64_DTPMOD64;
3492 else if (GOT_TLS_GDESC_P (tls_type))
3493 goto dr_done;
3494 else
3495 dr_type = R_X86_64_TPOFF64;
3496
3497 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3498 outrel.r_addend = 0;
3499 if ((dr_type == R_X86_64_TPOFF64
3500 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3501 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3502 outrel.r_info = htab->r_info (indx, dr_type);
3503
3504 elf_append_rela (output_bfd, sreloc, &outrel);
3505
3506 if (GOT_TLS_GD_P (tls_type))
3507 {
3508 if (indx == 0)
3509 {
3510 BFD_ASSERT (! unresolved_reloc);
3511 bfd_put_64 (output_bfd,
3512 relocation - _bfd_x86_elf_dtpoff_base (info),
3513 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3514 }
3515 else
3516 {
3517 bfd_put_64 (output_bfd, 0,
3518 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3519 outrel.r_info = htab->r_info (indx,
3520 R_X86_64_DTPOFF64);
3521 outrel.r_offset += GOT_ENTRY_SIZE;
3522 elf_append_rela (output_bfd, sreloc,
3523 &outrel);
3524 }
3525 }
3526
3527 dr_done:
3528 if (h != NULL)
3529 h->got.offset |= 1;
3530 else
3531 local_got_offsets[r_symndx] |= 1;
3532 }
3533
3534 if (off >= (bfd_vma) -2
3535 && ! GOT_TLS_GDESC_P (tls_type))
3536 abort ();
3537 if (r_type_tls == r_type)
3538 {
3539 if (r_type == R_X86_64_GOTPC32_TLSDESC
3540 || r_type == R_X86_64_TLSDESC_CALL)
3541 relocation = htab->elf.sgotplt->output_section->vma
3542 + htab->elf.sgotplt->output_offset
3543 + offplt + htab->sgotplt_jump_table_size;
3544 else
3545 relocation = htab->elf.sgot->output_section->vma
3546 + htab->elf.sgot->output_offset + off;
3547 unresolved_reloc = FALSE;
3548 }
3549 else
3550 {
3551 bfd_vma roff = rel->r_offset;
3552
3553 if (r_type == R_X86_64_TLSGD)
3554 {
3555 /* GD->IE transition. For 64bit, change
3556 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3557 .word 0x6666; rex64; call __tls_get_addr@PLT
3558 or
3559 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3560 .byte 0x66; rex64
3561 call *__tls_get_addr@GOTPCREL(%rip
3562 which may be converted to
3563 addr32 call __tls_get_addr
3564 into:
3565 movq %fs:0, %rax
3566 addq foo@gottpoff(%rip), %rax
3567 For 32bit, change
3568 leaq foo@tlsgd(%rip), %rdi
3569 .word 0x6666; rex64; call __tls_get_addr@PLT
3570 or
3571 leaq foo@tlsgd(%rip), %rdi
3572 .byte 0x66; rex64;
3573 call *__tls_get_addr@GOTPCREL(%rip)
3574 which may be converted to
3575 addr32 call __tls_get_addr
3576 into:
3577 movl %fs:0, %eax
3578 addq foo@gottpoff(%rip), %rax
3579 For largepic, change:
3580 leaq foo@tlsgd(%rip), %rdi
3581 movabsq $__tls_get_addr@pltoff, %rax
3582 addq %r15, %rax
3583 call *%rax
3584 into:
3585 movq %fs:0, %rax
3586 addq foo@gottpoff(%rax), %rax
3587 nopw 0x0(%rax,%rax,1) */
3588 int largepic = 0;
3589 if (ABI_64_P (output_bfd))
3590 {
3591 if (contents[roff + 5] == 0xb8)
3592 {
3593 memcpy (contents + roff - 3,
3594 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3595 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3596 largepic = 1;
3597 }
3598 else
3599 memcpy (contents + roff - 4,
3600 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3601 16);
3602 }
3603 else
3604 memcpy (contents + roff - 3,
3605 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3606 15);
3607
3608 relocation = (htab->elf.sgot->output_section->vma
3609 + htab->elf.sgot->output_offset + off
3610 - roff
3611 - largepic
3612 - input_section->output_section->vma
3613 - input_section->output_offset
3614 - 12);
3615 bfd_put_32 (output_bfd, relocation,
3616 contents + roff + 8 + largepic);
3617 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3618 rel++;
3619 wrel++;
3620 continue;
3621 }
3622 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3623 {
3624 /* GDesc -> IE transition.
3625 It's originally something like:
3626 leaq x@tlsdesc(%rip), %rax
3627
3628 Change it to:
3629 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3630
3631 /* Now modify the instruction as appropriate. To
3632 turn a leaq into a movq in the form we use it, it
3633 suffices to change the second byte from 0x8d to
3634 0x8b. */
3635 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3636
3637 bfd_put_32 (output_bfd,
3638 htab->elf.sgot->output_section->vma
3639 + htab->elf.sgot->output_offset + off
3640 - rel->r_offset
3641 - input_section->output_section->vma
3642 - input_section->output_offset
3643 - 4,
3644 contents + roff);
3645 continue;
3646 }
3647 else if (r_type == R_X86_64_TLSDESC_CALL)
3648 {
3649 /* GDesc -> IE transition.
3650 It's originally:
3651 call *(%rax)
3652
3653 Change it to:
3654 xchg %ax, %ax. */
3655
3656 bfd_put_8 (output_bfd, 0x66, contents + roff);
3657 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3658 continue;
3659 }
3660 else
3661 BFD_ASSERT (FALSE);
3662 }
3663 break;
3664
3665 case R_X86_64_TLSLD:
3666 if (! elf_x86_64_tls_transition (info, input_bfd,
3667 input_section, contents,
3668 symtab_hdr, sym_hashes,
3669 &r_type, GOT_UNKNOWN, rel,
3670 relend, h, r_symndx, TRUE))
3671 return FALSE;
3672
3673 if (r_type != R_X86_64_TLSLD)
3674 {
3675 /* LD->LE transition:
3676 leaq foo@tlsld(%rip), %rdi
3677 call __tls_get_addr@PLT
3678 For 64bit, we change it into:
3679 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3680 For 32bit, we change it into:
3681 nopl 0x0(%rax); movl %fs:0, %eax
3682 Or
3683 leaq foo@tlsld(%rip), %rdi;
3684 call *__tls_get_addr@GOTPCREL(%rip)
3685 which may be converted to
3686 addr32 call __tls_get_addr
3687 For 64bit, we change it into:
3688 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3689 For 32bit, we change it into:
3690 nopw 0x0(%rax); movl %fs:0, %eax
3691 For largepic, change:
3692 leaq foo@tlsgd(%rip), %rdi
3693 movabsq $__tls_get_addr@pltoff, %rax
3694 addq %rbx, %rax
3695 call *%rax
3696 into
3697 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3698 movq %fs:0, %eax */
3699
3700 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3701 if (ABI_64_P (output_bfd))
3702 {
3703 if (contents[rel->r_offset + 5] == 0xb8)
3704 memcpy (contents + rel->r_offset - 3,
3705 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3706 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3707 else if (contents[rel->r_offset + 4] == 0xff
3708 || contents[rel->r_offset + 4] == 0x67)
3709 memcpy (contents + rel->r_offset - 3,
3710 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3711 13);
3712 else
3713 memcpy (contents + rel->r_offset - 3,
3714 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3715 }
3716 else
3717 {
3718 if (contents[rel->r_offset + 4] == 0xff)
3719 memcpy (contents + rel->r_offset - 3,
3720 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3721 13);
3722 else
3723 memcpy (contents + rel->r_offset - 3,
3724 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3725 }
3726 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3727 and R_X86_64_PLTOFF64. */
3728 rel++;
3729 wrel++;
3730 continue;
3731 }
3732
3733 if (htab->elf.sgot == NULL)
3734 abort ();
3735
3736 off = htab->tls_ld_or_ldm_got.offset;
3737 if (off & 1)
3738 off &= ~1;
3739 else
3740 {
3741 Elf_Internal_Rela outrel;
3742
3743 if (htab->elf.srelgot == NULL)
3744 abort ();
3745
3746 outrel.r_offset = (htab->elf.sgot->output_section->vma
3747 + htab->elf.sgot->output_offset + off);
3748
3749 bfd_put_64 (output_bfd, 0,
3750 htab->elf.sgot->contents + off);
3751 bfd_put_64 (output_bfd, 0,
3752 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3753 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3754 outrel.r_addend = 0;
3755 elf_append_rela (output_bfd, htab->elf.srelgot,
3756 &outrel);
3757 htab->tls_ld_or_ldm_got.offset |= 1;
3758 }
3759 relocation = htab->elf.sgot->output_section->vma
3760 + htab->elf.sgot->output_offset + off;
3761 unresolved_reloc = FALSE;
3762 break;
3763
3764 case R_X86_64_DTPOFF32:
3765 if (!bfd_link_executable (info)
3766 || (input_section->flags & SEC_CODE) == 0)
3767 relocation -= _bfd_x86_elf_dtpoff_base (info);
3768 else
3769 relocation = elf_x86_64_tpoff (info, relocation);
3770 break;
3771
3772 case R_X86_64_TPOFF32:
3773 case R_X86_64_TPOFF64:
3774 BFD_ASSERT (bfd_link_executable (info));
3775 relocation = elf_x86_64_tpoff (info, relocation);
3776 break;
3777
3778 case R_X86_64_DTPOFF64:
3779 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3780 relocation -= _bfd_x86_elf_dtpoff_base (info);
3781 break;
3782
3783 default:
3784 break;
3785 }
3786
3787 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3788 because such sections are not SEC_ALLOC and thus ld.so will
3789 not process them. */
3790 if (unresolved_reloc
3791 && !((input_section->flags & SEC_DEBUGGING) != 0
3792 && h->def_dynamic)
3793 && _bfd_elf_section_offset (output_bfd, info, input_section,
3794 rel->r_offset) != (bfd_vma) -1)
3795 {
3796 switch (r_type)
3797 {
3798 case R_X86_64_32S:
3799 sec = h->root.u.def.section;
3800 if ((info->nocopyreloc
3801 || (eh->def_protected
3802 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3803 && !(h->root.u.def.section->flags & SEC_CODE))
3804 return elf_x86_64_need_pic (info, input_bfd, input_section,
3805 h, NULL, NULL, howto);
3806 /* Fall through. */
3807
3808 default:
3809 _bfd_error_handler
3810 /* xgettext:c-format */
3811 (_("%pB(%pA+%#" PRIx64 "): "
3812 "unresolvable %s relocation against symbol `%s'"),
3813 input_bfd,
3814 input_section,
3815 (uint64_t) rel->r_offset,
3816 howto->name,
3817 h->root.root.string);
3818 return FALSE;
3819 }
3820 }
3821
3822 do_relocation:
3823 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3824 contents, rel->r_offset,
3825 relocation, rel->r_addend);
3826
3827 check_relocation_error:
3828 if (r != bfd_reloc_ok)
3829 {
3830 const char *name;
3831
3832 if (h != NULL)
3833 name = h->root.root.string;
3834 else
3835 {
3836 name = bfd_elf_string_from_elf_section (input_bfd,
3837 symtab_hdr->sh_link,
3838 sym->st_name);
3839 if (name == NULL)
3840 return FALSE;
3841 if (*name == '\0')
3842 name = bfd_section_name (input_bfd, sec);
3843 }
3844
3845 if (r == bfd_reloc_overflow)
3846 {
3847 if (converted_reloc)
3848 {
3849 info->callbacks->einfo
3850 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3851 return FALSE;
3852 }
3853 (*info->callbacks->reloc_overflow)
3854 (info, (h ? &h->root : NULL), name, howto->name,
3855 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3856 }
3857 else
3858 {
3859 _bfd_error_handler
3860 /* xgettext:c-format */
3861 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
3862 input_bfd, input_section,
3863 (uint64_t) rel->r_offset, name, (int) r);
3864 return FALSE;
3865 }
3866 }
3867
3868 if (wrel != rel)
3869 *wrel = *rel;
3870 }
3871
3872 if (wrel != rel)
3873 {
3874 Elf_Internal_Shdr *rel_hdr;
3875 size_t deleted = rel - wrel;
3876
3877 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3878 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3879 if (rel_hdr->sh_size == 0)
3880 {
3881 /* It is too late to remove an empty reloc section. Leave
3882 one NONE reloc.
3883 ??? What is wrong with an empty section??? */
3884 rel_hdr->sh_size = rel_hdr->sh_entsize;
3885 deleted -= 1;
3886 }
3887 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3888 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3889 input_section->reloc_count -= deleted;
3890 }
3891
3892 return TRUE;
3893 }
3894
3895 /* Finish up dynamic symbol handling. We set the contents of various
3896 dynamic sections here. */
3897
3898 static bfd_boolean
3899 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3900 struct bfd_link_info *info,
3901 struct elf_link_hash_entry *h,
3902 Elf_Internal_Sym *sym)
3903 {
3904 struct elf_x86_link_hash_table *htab;
3905 bfd_boolean use_plt_second;
3906 struct elf_x86_link_hash_entry *eh;
3907 bfd_boolean local_undefweak;
3908
3909 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3910 if (htab == NULL)
3911 return FALSE;
3912
3913 /* Use the second PLT section only if there is .plt section. */
3914 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3915
3916 eh = (struct elf_x86_link_hash_entry *) h;
3917 if (eh->no_finish_dynamic_symbol)
3918 abort ();
3919
3920 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3921 resolved undefined weak symbols in executable so that their
3922 references have value 0 at run-time. */
3923 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3924
3925 if (h->plt.offset != (bfd_vma) -1)
3926 {
3927 bfd_vma plt_index;
3928 bfd_vma got_offset, plt_offset;
3929 Elf_Internal_Rela rela;
3930 bfd_byte *loc;
3931 asection *plt, *gotplt, *relplt, *resolved_plt;
3932 const struct elf_backend_data *bed;
3933 bfd_vma plt_got_pcrel_offset;
3934
3935 /* When building a static executable, use .iplt, .igot.plt and
3936 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3937 if (htab->elf.splt != NULL)
3938 {
3939 plt = htab->elf.splt;
3940 gotplt = htab->elf.sgotplt;
3941 relplt = htab->elf.srelplt;
3942 }
3943 else
3944 {
3945 plt = htab->elf.iplt;
3946 gotplt = htab->elf.igotplt;
3947 relplt = htab->elf.irelplt;
3948 }
3949
3950 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3951
3952 /* Get the index in the procedure linkage table which
3953 corresponds to this symbol. This is the index of this symbol
3954 in all the symbols for which we are making plt entries. The
3955 first entry in the procedure linkage table is reserved.
3956
3957 Get the offset into the .got table of the entry that
3958 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3959 bytes. The first three are reserved for the dynamic linker.
3960
3961 For static executables, we don't reserve anything. */
3962
3963 if (plt == htab->elf.splt)
3964 {
3965 got_offset = (h->plt.offset / htab->plt.plt_entry_size
3966 - htab->plt.has_plt0);
3967 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
3968 }
3969 else
3970 {
3971 got_offset = h->plt.offset / htab->plt.plt_entry_size;
3972 got_offset = got_offset * GOT_ENTRY_SIZE;
3973 }
3974
3975 /* Fill in the entry in the procedure linkage table. */
3976 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
3977 htab->plt.plt_entry_size);
3978 if (use_plt_second)
3979 {
3980 memcpy (htab->plt_second->contents + eh->plt_second.offset,
3981 htab->non_lazy_plt->plt_entry,
3982 htab->non_lazy_plt->plt_entry_size);
3983
3984 resolved_plt = htab->plt_second;
3985 plt_offset = eh->plt_second.offset;
3986 }
3987 else
3988 {
3989 resolved_plt = plt;
3990 plt_offset = h->plt.offset;
3991 }
3992
3993 /* Insert the relocation positions of the plt section. */
3994
3995 /* Put offset the PC-relative instruction referring to the GOT entry,
3996 subtracting the size of that instruction. */
3997 plt_got_pcrel_offset = (gotplt->output_section->vma
3998 + gotplt->output_offset
3999 + got_offset
4000 - resolved_plt->output_section->vma
4001 - resolved_plt->output_offset
4002 - plt_offset
4003 - htab->plt.plt_got_insn_size);
4004
4005 /* Check PC-relative offset overflow in PLT entry. */
4006 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4007 /* xgettext:c-format */
4008 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4009 output_bfd, h->root.root.string);
4010
4011 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4012 (resolved_plt->contents + plt_offset
4013 + htab->plt.plt_got_offset));
4014
4015 /* Fill in the entry in the global offset table, initially this
4016 points to the second part of the PLT entry. Leave the entry
4017 as zero for undefined weak symbol in PIE. No PLT relocation
4018 against undefined weak symbol in PIE. */
4019 if (!local_undefweak)
4020 {
4021 if (htab->plt.has_plt0)
4022 bfd_put_64 (output_bfd, (plt->output_section->vma
4023 + plt->output_offset
4024 + h->plt.offset
4025 + htab->lazy_plt->plt_lazy_offset),
4026 gotplt->contents + got_offset);
4027
4028 /* Fill in the entry in the .rela.plt section. */
4029 rela.r_offset = (gotplt->output_section->vma
4030 + gotplt->output_offset
4031 + got_offset);
4032 if (PLT_LOCAL_IFUNC_P (info, h))
4033 {
4034 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4035 h->root.root.string,
4036 h->root.u.def.section->owner);
4037
4038 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4039 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4040 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4041 rela.r_addend = (h->root.u.def.value
4042 + h->root.u.def.section->output_section->vma
4043 + h->root.u.def.section->output_offset);
4044 /* R_X86_64_IRELATIVE comes last. */
4045 plt_index = htab->next_irelative_index--;
4046 }
4047 else
4048 {
4049 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4050 rela.r_addend = 0;
4051 plt_index = htab->next_jump_slot_index++;
4052 }
4053
4054 /* Don't fill the second and third slots in PLT entry for
4055 static executables nor without PLT0. */
4056 if (plt == htab->elf.splt && htab->plt.has_plt0)
4057 {
4058 bfd_vma plt0_offset
4059 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4060
4061 /* Put relocation index. */
4062 bfd_put_32 (output_bfd, plt_index,
4063 (plt->contents + h->plt.offset
4064 + htab->lazy_plt->plt_reloc_offset));
4065
4066 /* Put offset for jmp .PLT0 and check for overflow. We don't
4067 check relocation index for overflow since branch displacement
4068 will overflow first. */
4069 if (plt0_offset > 0x80000000)
4070 /* xgettext:c-format */
4071 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4072 output_bfd, h->root.root.string);
4073 bfd_put_32 (output_bfd, - plt0_offset,
4074 (plt->contents + h->plt.offset
4075 + htab->lazy_plt->plt_plt_offset));
4076 }
4077
4078 bed = get_elf_backend_data (output_bfd);
4079 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4080 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4081 }
4082 }
4083 else if (eh->plt_got.offset != (bfd_vma) -1)
4084 {
4085 bfd_vma got_offset, plt_offset;
4086 asection *plt, *got;
4087 bfd_boolean got_after_plt;
4088 int32_t got_pcrel_offset;
4089
4090 /* Set the entry in the GOT procedure linkage table. */
4091 plt = htab->plt_got;
4092 got = htab->elf.sgot;
4093 got_offset = h->got.offset;
4094
4095 if (got_offset == (bfd_vma) -1
4096 || (h->type == STT_GNU_IFUNC && h->def_regular)
4097 || plt == NULL
4098 || got == NULL)
4099 abort ();
4100
4101 /* Use the non-lazy PLT entry template for the GOT PLT since they
4102 are the identical. */
4103 /* Fill in the entry in the GOT procedure linkage table. */
4104 plt_offset = eh->plt_got.offset;
4105 memcpy (plt->contents + plt_offset,
4106 htab->non_lazy_plt->plt_entry,
4107 htab->non_lazy_plt->plt_entry_size);
4108
4109 /* Put offset the PC-relative instruction referring to the GOT
4110 entry, subtracting the size of that instruction. */
4111 got_pcrel_offset = (got->output_section->vma
4112 + got->output_offset
4113 + got_offset
4114 - plt->output_section->vma
4115 - plt->output_offset
4116 - plt_offset
4117 - htab->non_lazy_plt->plt_got_insn_size);
4118
4119 /* Check PC-relative offset overflow in GOT PLT entry. */
4120 got_after_plt = got->output_section->vma > plt->output_section->vma;
4121 if ((got_after_plt && got_pcrel_offset < 0)
4122 || (!got_after_plt && got_pcrel_offset > 0))
4123 /* xgettext:c-format */
4124 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4125 output_bfd, h->root.root.string);
4126
4127 bfd_put_32 (output_bfd, got_pcrel_offset,
4128 (plt->contents + plt_offset
4129 + htab->non_lazy_plt->plt_got_offset));
4130 }
4131
4132 if (!local_undefweak
4133 && !h->def_regular
4134 && (h->plt.offset != (bfd_vma) -1
4135 || eh->plt_got.offset != (bfd_vma) -1))
4136 {
4137 /* Mark the symbol as undefined, rather than as defined in
4138 the .plt section. Leave the value if there were any
4139 relocations where pointer equality matters (this is a clue
4140 for the dynamic linker, to make function pointer
4141 comparisons work between an application and shared
4142 library), otherwise set it to zero. If a function is only
4143 called from a binary, there is no need to slow down
4144 shared libraries because of that. */
4145 sym->st_shndx = SHN_UNDEF;
4146 if (!h->pointer_equality_needed)
4147 sym->st_value = 0;
4148 }
4149
4150 /* Don't generate dynamic GOT relocation against undefined weak
4151 symbol in executable. */
4152 if (h->got.offset != (bfd_vma) -1
4153 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4154 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4155 && !local_undefweak)
4156 {
4157 Elf_Internal_Rela rela;
4158 asection *relgot = htab->elf.srelgot;
4159
4160 /* This symbol has an entry in the global offset table. Set it
4161 up. */
4162 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4163 abort ();
4164
4165 rela.r_offset = (htab->elf.sgot->output_section->vma
4166 + htab->elf.sgot->output_offset
4167 + (h->got.offset &~ (bfd_vma) 1));
4168
4169 /* If this is a static link, or it is a -Bsymbolic link and the
4170 symbol is defined locally or was forced to be local because
4171 of a version file, we just want to emit a RELATIVE reloc.
4172 The entry in the global offset table will already have been
4173 initialized in the relocate_section function. */
4174 if (h->def_regular
4175 && h->type == STT_GNU_IFUNC)
4176 {
4177 if (h->plt.offset == (bfd_vma) -1)
4178 {
4179 /* STT_GNU_IFUNC is referenced without PLT. */
4180 if (htab->elf.splt == NULL)
4181 {
4182 /* use .rel[a].iplt section to store .got relocations
4183 in static executable. */
4184 relgot = htab->elf.irelplt;
4185 }
4186 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4187 {
4188 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4189 h->root.root.string,
4190 h->root.u.def.section->owner);
4191
4192 rela.r_info = htab->r_info (0,
4193 R_X86_64_IRELATIVE);
4194 rela.r_addend = (h->root.u.def.value
4195 + h->root.u.def.section->output_section->vma
4196 + h->root.u.def.section->output_offset);
4197 }
4198 else
4199 goto do_glob_dat;
4200 }
4201 else if (bfd_link_pic (info))
4202 {
4203 /* Generate R_X86_64_GLOB_DAT. */
4204 goto do_glob_dat;
4205 }
4206 else
4207 {
4208 asection *plt;
4209 bfd_vma plt_offset;
4210
4211 if (!h->pointer_equality_needed)
4212 abort ();
4213
4214 /* For non-shared object, we can't use .got.plt, which
4215 contains the real function addres if we need pointer
4216 equality. We load the GOT entry with the PLT entry. */
4217 if (htab->plt_second != NULL)
4218 {
4219 plt = htab->plt_second;
4220 plt_offset = eh->plt_second.offset;
4221 }
4222 else
4223 {
4224 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4225 plt_offset = h->plt.offset;
4226 }
4227 bfd_put_64 (output_bfd, (plt->output_section->vma
4228 + plt->output_offset
4229 + plt_offset),
4230 htab->elf.sgot->contents + h->got.offset);
4231 return TRUE;
4232 }
4233 }
4234 else if (bfd_link_pic (info)
4235 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4236 {
4237 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4238 return FALSE;
4239 BFD_ASSERT((h->got.offset & 1) != 0);
4240 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4241 rela.r_addend = (h->root.u.def.value
4242 + h->root.u.def.section->output_section->vma
4243 + h->root.u.def.section->output_offset);
4244 }
4245 else
4246 {
4247 BFD_ASSERT((h->got.offset & 1) == 0);
4248 do_glob_dat:
4249 bfd_put_64 (output_bfd, (bfd_vma) 0,
4250 htab->elf.sgot->contents + h->got.offset);
4251 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4252 rela.r_addend = 0;
4253 }
4254
4255 elf_append_rela (output_bfd, relgot, &rela);
4256 }
4257
4258 if (h->needs_copy)
4259 {
4260 Elf_Internal_Rela rela;
4261 asection *s;
4262
4263 /* This symbol needs a copy reloc. Set it up. */
4264 VERIFY_COPY_RELOC (h, htab)
4265
4266 rela.r_offset = (h->root.u.def.value
4267 + h->root.u.def.section->output_section->vma
4268 + h->root.u.def.section->output_offset);
4269 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4270 rela.r_addend = 0;
4271 if (h->root.u.def.section == htab->elf.sdynrelro)
4272 s = htab->elf.sreldynrelro;
4273 else
4274 s = htab->elf.srelbss;
4275 elf_append_rela (output_bfd, s, &rela);
4276 }
4277
4278 return TRUE;
4279 }
4280
4281 /* Finish up local dynamic symbol handling. We set the contents of
4282 various dynamic sections here. */
4283
4284 static bfd_boolean
4285 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4286 {
4287 struct elf_link_hash_entry *h
4288 = (struct elf_link_hash_entry *) *slot;
4289 struct bfd_link_info *info
4290 = (struct bfd_link_info *) inf;
4291
4292 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4293 info, h, NULL);
4294 }
4295
4296 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4297 here since undefined weak symbol may not be dynamic and may not be
4298 called for elf_x86_64_finish_dynamic_symbol. */
4299
4300 static bfd_boolean
4301 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4302 void *inf)
4303 {
4304 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4305 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4306
4307 if (h->root.type != bfd_link_hash_undefweak
4308 || h->dynindx != -1)
4309 return TRUE;
4310
4311 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4312 info, h, NULL);
4313 }
4314
4315 /* Used to decide how to sort relocs in an optimal manner for the
4316 dynamic linker, before writing them out. */
4317
4318 static enum elf_reloc_type_class
4319 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4320 const asection *rel_sec ATTRIBUTE_UNUSED,
4321 const Elf_Internal_Rela *rela)
4322 {
4323 bfd *abfd = info->output_bfd;
4324 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4325 struct elf_x86_link_hash_table *htab
4326 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4327
4328 if (htab->elf.dynsym != NULL
4329 && htab->elf.dynsym->contents != NULL)
4330 {
4331 /* Check relocation against STT_GNU_IFUNC symbol if there are
4332 dynamic symbols. */
4333 unsigned long r_symndx = htab->r_sym (rela->r_info);
4334 if (r_symndx != STN_UNDEF)
4335 {
4336 Elf_Internal_Sym sym;
4337 if (!bed->s->swap_symbol_in (abfd,
4338 (htab->elf.dynsym->contents
4339 + r_symndx * bed->s->sizeof_sym),
4340 0, &sym))
4341 abort ();
4342
4343 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4344 return reloc_class_ifunc;
4345 }
4346 }
4347
4348 switch ((int) ELF32_R_TYPE (rela->r_info))
4349 {
4350 case R_X86_64_IRELATIVE:
4351 return reloc_class_ifunc;
4352 case R_X86_64_RELATIVE:
4353 case R_X86_64_RELATIVE64:
4354 return reloc_class_relative;
4355 case R_X86_64_JUMP_SLOT:
4356 return reloc_class_plt;
4357 case R_X86_64_COPY:
4358 return reloc_class_copy;
4359 default:
4360 return reloc_class_normal;
4361 }
4362 }
4363
4364 /* Finish up the dynamic sections. */
4365
4366 static bfd_boolean
4367 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4368 struct bfd_link_info *info)
4369 {
4370 struct elf_x86_link_hash_table *htab;
4371
4372 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4373 if (htab == NULL)
4374 return FALSE;
4375
4376 if (! htab->elf.dynamic_sections_created)
4377 return TRUE;
4378
4379 if (htab->elf.splt && htab->elf.splt->size > 0)
4380 {
4381 elf_section_data (htab->elf.splt->output_section)
4382 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4383
4384 if (htab->plt.has_plt0)
4385 {
4386 /* Fill in the special first entry in the procedure linkage
4387 table. */
4388 memcpy (htab->elf.splt->contents,
4389 htab->lazy_plt->plt0_entry,
4390 htab->lazy_plt->plt0_entry_size);
4391 /* Add offset for pushq GOT+8(%rip), since the instruction
4392 uses 6 bytes subtract this value. */
4393 bfd_put_32 (output_bfd,
4394 (htab->elf.sgotplt->output_section->vma
4395 + htab->elf.sgotplt->output_offset
4396 + 8
4397 - htab->elf.splt->output_section->vma
4398 - htab->elf.splt->output_offset
4399 - 6),
4400 (htab->elf.splt->contents
4401 + htab->lazy_plt->plt0_got1_offset));
4402 /* Add offset for the PC-relative instruction accessing
4403 GOT+16, subtracting the offset to the end of that
4404 instruction. */
4405 bfd_put_32 (output_bfd,
4406 (htab->elf.sgotplt->output_section->vma
4407 + htab->elf.sgotplt->output_offset
4408 + 16
4409 - htab->elf.splt->output_section->vma
4410 - htab->elf.splt->output_offset
4411 - htab->lazy_plt->plt0_got2_insn_end),
4412 (htab->elf.splt->contents
4413 + htab->lazy_plt->plt0_got2_offset));
4414 }
4415
4416 if (htab->tlsdesc_plt)
4417 {
4418 /* The TLSDESC entry in a lazy procedure linkage table. */
4419 static const bfd_byte tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
4420 {
4421 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
4422 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
4423 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
4424 };
4425
4426 bfd_put_64 (output_bfd, (bfd_vma) 0,
4427 htab->elf.sgot->contents + htab->tlsdesc_got);
4428
4429 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4430 tlsdesc_plt_entry, LAZY_PLT_ENTRY_SIZE);
4431
4432 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4433 bytes and the instruction uses 6 bytes, subtract these
4434 values. */
4435 bfd_put_32 (output_bfd,
4436 (htab->elf.sgotplt->output_section->vma
4437 + htab->elf.sgotplt->output_offset
4438 + 8
4439 - htab->elf.splt->output_section->vma
4440 - htab->elf.splt->output_offset
4441 - htab->tlsdesc_plt
4442 - 4 - 6),
4443 (htab->elf.splt->contents
4444 + htab->tlsdesc_plt
4445 + 4 + 2));
4446 /* Add offset for indirect branch via GOT+TDG, where TDG
4447 stands for htab->tlsdesc_got, subtracting the offset
4448 to the end of that instruction. */
4449 bfd_put_32 (output_bfd,
4450 (htab->elf.sgot->output_section->vma
4451 + htab->elf.sgot->output_offset
4452 + htab->tlsdesc_got
4453 - htab->elf.splt->output_section->vma
4454 - htab->elf.splt->output_offset
4455 - htab->tlsdesc_plt
4456 - 4 - 6 - 6),
4457 (htab->elf.splt->contents
4458 + htab->tlsdesc_plt + 4 + 6 + 2));
4459 }
4460 }
4461
4462 /* Fill PLT entries for undefined weak symbols in PIE. */
4463 if (bfd_link_pie (info))
4464 bfd_hash_traverse (&info->hash->table,
4465 elf_x86_64_pie_finish_undefweak_symbol,
4466 info);
4467
4468 return TRUE;
4469 }
4470
4471 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4472 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4473 It has to be done before elf_link_sort_relocs is called so that
4474 dynamic relocations are properly sorted. */
4475
4476 static bfd_boolean
4477 elf_x86_64_output_arch_local_syms
4478 (bfd *output_bfd ATTRIBUTE_UNUSED,
4479 struct bfd_link_info *info,
4480 void *flaginfo ATTRIBUTE_UNUSED,
4481 int (*func) (void *, const char *,
4482 Elf_Internal_Sym *,
4483 asection *,
4484 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4485 {
4486 struct elf_x86_link_hash_table *htab
4487 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4488 if (htab == NULL)
4489 return FALSE;
4490
4491 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4492 htab_traverse (htab->loc_hash_table,
4493 elf_x86_64_finish_local_dynamic_symbol,
4494 info);
4495
4496 return TRUE;
4497 }
4498
4499 /* Forward declaration. */
4500 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4501
4502 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4503 dynamic relocations. */
4504
4505 static long
4506 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4507 long symcount ATTRIBUTE_UNUSED,
4508 asymbol **syms ATTRIBUTE_UNUSED,
4509 long dynsymcount,
4510 asymbol **dynsyms,
4511 asymbol **ret)
4512 {
4513 long count, i, n;
4514 int j;
4515 bfd_byte *plt_contents;
4516 long relsize;
4517 const struct elf_x86_lazy_plt_layout *lazy_plt;
4518 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4519 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4520 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4521 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4522 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4523 asection *plt;
4524 enum elf_x86_plt_type plt_type;
4525 struct elf_x86_plt plts[] =
4526 {
4527 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4528 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4529 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4530 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4531 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4532 };
4533
4534 *ret = NULL;
4535
4536 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4537 return 0;
4538
4539 if (dynsymcount <= 0)
4540 return 0;
4541
4542 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4543 if (relsize <= 0)
4544 return -1;
4545
4546 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4547 {
4548 lazy_plt = &elf_x86_64_lazy_plt;
4549 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4550 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4551 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4552 if (ABI_64_P (abfd))
4553 {
4554 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4555 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4556 }
4557 else
4558 {
4559 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4560 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4561 }
4562 }
4563 else
4564 {
4565 lazy_plt = &elf_x86_64_nacl_plt;
4566 non_lazy_plt = NULL;
4567 lazy_bnd_plt = NULL;
4568 non_lazy_bnd_plt = NULL;
4569 lazy_ibt_plt = NULL;
4570 non_lazy_ibt_plt = NULL;
4571 }
4572
4573 count = 0;
4574 for (j = 0; plts[j].name != NULL; j++)
4575 {
4576 plt = bfd_get_section_by_name (abfd, plts[j].name);
4577 if (plt == NULL || plt->size == 0)
4578 continue;
4579
4580 /* Get the PLT section contents. */
4581 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4582 if (plt_contents == NULL)
4583 break;
4584 if (!bfd_get_section_contents (abfd, (asection *) plt,
4585 plt_contents, 0, plt->size))
4586 {
4587 free (plt_contents);
4588 break;
4589 }
4590
4591 /* Check what kind of PLT it is. */
4592 plt_type = plt_unknown;
4593 if (plts[j].type == plt_unknown
4594 && (plt->size >= (lazy_plt->plt_entry_size
4595 + lazy_plt->plt_entry_size)))
4596 {
4597 /* Match lazy PLT first. Need to check the first two
4598 instructions. */
4599 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4600 lazy_plt->plt0_got1_offset) == 0)
4601 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4602 2) == 0))
4603 plt_type = plt_lazy;
4604 else if (lazy_bnd_plt != NULL
4605 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4606 lazy_bnd_plt->plt0_got1_offset) == 0)
4607 && (memcmp (plt_contents + 6,
4608 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4609 {
4610 plt_type = plt_lazy | plt_second;
4611 /* The fist entry in the lazy IBT PLT is the same as the
4612 lazy BND PLT. */
4613 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4614 lazy_ibt_plt->plt_entry,
4615 lazy_ibt_plt->plt_got_offset) == 0))
4616 lazy_plt = lazy_ibt_plt;
4617 else
4618 lazy_plt = lazy_bnd_plt;
4619 }
4620 }
4621
4622 if (non_lazy_plt != NULL
4623 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4624 && plt->size >= non_lazy_plt->plt_entry_size)
4625 {
4626 /* Match non-lazy PLT. */
4627 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4628 non_lazy_plt->plt_got_offset) == 0)
4629 plt_type = plt_non_lazy;
4630 }
4631
4632 if (plt_type == plt_unknown || plt_type == plt_second)
4633 {
4634 if (non_lazy_bnd_plt != NULL
4635 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4636 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4637 non_lazy_bnd_plt->plt_got_offset) == 0))
4638 {
4639 /* Match BND PLT. */
4640 plt_type = plt_second;
4641 non_lazy_plt = non_lazy_bnd_plt;
4642 }
4643 else if (non_lazy_ibt_plt != NULL
4644 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4645 && (memcmp (plt_contents,
4646 non_lazy_ibt_plt->plt_entry,
4647 non_lazy_ibt_plt->plt_got_offset) == 0))
4648 {
4649 /* Match IBT PLT. */
4650 plt_type = plt_second;
4651 non_lazy_plt = non_lazy_ibt_plt;
4652 }
4653 }
4654
4655 if (plt_type == plt_unknown)
4656 {
4657 free (plt_contents);
4658 continue;
4659 }
4660
4661 plts[j].sec = plt;
4662 plts[j].type = plt_type;
4663
4664 if ((plt_type & plt_lazy))
4665 {
4666 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4667 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4668 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4669 /* Skip PLT0 in lazy PLT. */
4670 i = 1;
4671 }
4672 else
4673 {
4674 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4675 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4676 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4677 i = 0;
4678 }
4679
4680 /* Skip lazy PLT when the second PLT is used. */
4681 if (plt_type == (plt_lazy | plt_second))
4682 plts[j].count = 0;
4683 else
4684 {
4685 n = plt->size / plts[j].plt_entry_size;
4686 plts[j].count = n;
4687 count += n - i;
4688 }
4689
4690 plts[j].contents = plt_contents;
4691 }
4692
4693 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4694 (bfd_vma) 0, plts, dynsyms,
4695 ret);
4696 }
4697
4698 /* Handle an x86-64 specific section when reading an object file. This
4699 is called when elfcode.h finds a section with an unknown type. */
4700
4701 static bfd_boolean
4702 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4703 const char *name, int shindex)
4704 {
4705 if (hdr->sh_type != SHT_X86_64_UNWIND)
4706 return FALSE;
4707
4708 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4709 return FALSE;
4710
4711 return TRUE;
4712 }
4713
4714 /* Hook called by the linker routine which adds symbols from an object
4715 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4716 of .bss. */
4717
4718 static bfd_boolean
4719 elf_x86_64_add_symbol_hook (bfd *abfd,
4720 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4721 Elf_Internal_Sym *sym,
4722 const char **namep ATTRIBUTE_UNUSED,
4723 flagword *flagsp ATTRIBUTE_UNUSED,
4724 asection **secp,
4725 bfd_vma *valp)
4726 {
4727 asection *lcomm;
4728
4729 switch (sym->st_shndx)
4730 {
4731 case SHN_X86_64_LCOMMON:
4732 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4733 if (lcomm == NULL)
4734 {
4735 lcomm = bfd_make_section_with_flags (abfd,
4736 "LARGE_COMMON",
4737 (SEC_ALLOC
4738 | SEC_IS_COMMON
4739 | SEC_LINKER_CREATED));
4740 if (lcomm == NULL)
4741 return FALSE;
4742 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4743 }
4744 *secp = lcomm;
4745 *valp = sym->st_size;
4746 return TRUE;
4747 }
4748
4749 return TRUE;
4750 }
4751
4752
4753 /* Given a BFD section, try to locate the corresponding ELF section
4754 index. */
4755
4756 static bfd_boolean
4757 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4758 asection *sec, int *index_return)
4759 {
4760 if (sec == &_bfd_elf_large_com_section)
4761 {
4762 *index_return = SHN_X86_64_LCOMMON;
4763 return TRUE;
4764 }
4765 return FALSE;
4766 }
4767
4768 /* Process a symbol. */
4769
4770 static void
4771 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4772 asymbol *asym)
4773 {
4774 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4775
4776 switch (elfsym->internal_elf_sym.st_shndx)
4777 {
4778 case SHN_X86_64_LCOMMON:
4779 asym->section = &_bfd_elf_large_com_section;
4780 asym->value = elfsym->internal_elf_sym.st_size;
4781 /* Common symbol doesn't set BSF_GLOBAL. */
4782 asym->flags &= ~BSF_GLOBAL;
4783 break;
4784 }
4785 }
4786
4787 static bfd_boolean
4788 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4789 {
4790 return (sym->st_shndx == SHN_COMMON
4791 || sym->st_shndx == SHN_X86_64_LCOMMON);
4792 }
4793
4794 static unsigned int
4795 elf_x86_64_common_section_index (asection *sec)
4796 {
4797 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4798 return SHN_COMMON;
4799 else
4800 return SHN_X86_64_LCOMMON;
4801 }
4802
4803 static asection *
4804 elf_x86_64_common_section (asection *sec)
4805 {
4806 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4807 return bfd_com_section_ptr;
4808 else
4809 return &_bfd_elf_large_com_section;
4810 }
4811
4812 static bfd_boolean
4813 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4814 const Elf_Internal_Sym *sym,
4815 asection **psec,
4816 bfd_boolean newdef,
4817 bfd_boolean olddef,
4818 bfd *oldbfd,
4819 const asection *oldsec)
4820 {
4821 /* A normal common symbol and a large common symbol result in a
4822 normal common symbol. We turn the large common symbol into a
4823 normal one. */
4824 if (!olddef
4825 && h->root.type == bfd_link_hash_common
4826 && !newdef
4827 && bfd_is_com_section (*psec)
4828 && oldsec != *psec)
4829 {
4830 if (sym->st_shndx == SHN_COMMON
4831 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4832 {
4833 h->root.u.c.p->section
4834 = bfd_make_section_old_way (oldbfd, "COMMON");
4835 h->root.u.c.p->section->flags = SEC_ALLOC;
4836 }
4837 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4838 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4839 *psec = bfd_com_section_ptr;
4840 }
4841
4842 return TRUE;
4843 }
4844
4845 static int
4846 elf_x86_64_additional_program_headers (bfd *abfd,
4847 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4848 {
4849 asection *s;
4850 int count = 0;
4851
4852 /* Check to see if we need a large readonly segment. */
4853 s = bfd_get_section_by_name (abfd, ".lrodata");
4854 if (s && (s->flags & SEC_LOAD))
4855 count++;
4856
4857 /* Check to see if we need a large data segment. Since .lbss sections
4858 is placed right after the .bss section, there should be no need for
4859 a large data segment just because of .lbss. */
4860 s = bfd_get_section_by_name (abfd, ".ldata");
4861 if (s && (s->flags & SEC_LOAD))
4862 count++;
4863
4864 return count;
4865 }
4866
4867 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4868
4869 static bfd_boolean
4870 elf_x86_64_relocs_compatible (const bfd_target *input,
4871 const bfd_target *output)
4872 {
4873 return ((xvec_get_elf_backend_data (input)->s->elfclass
4874 == xvec_get_elf_backend_data (output)->s->elfclass)
4875 && _bfd_elf_relocs_compatible (input, output));
4876 }
4877
4878 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4879 with GNU properties if found. Otherwise, return NULL. */
4880
4881 static bfd *
4882 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4883 {
4884 struct elf_x86_init_table init_table;
4885
4886 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4887 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4888 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4889 != (int) R_X86_64_GNU_VTINHERIT)
4890 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4891 != (int) R_X86_64_GNU_VTENTRY))
4892 abort ();
4893
4894 /* This is unused for x86-64. */
4895 init_table.plt0_pad_byte = 0x90;
4896
4897 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
4898 {
4899 if (info->bndplt)
4900 {
4901 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4902 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4903 }
4904 else
4905 {
4906 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4907 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4908 }
4909
4910 if (ABI_64_P (info->output_bfd))
4911 {
4912 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4913 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4914 }
4915 else
4916 {
4917 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4918 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4919 }
4920 }
4921 else
4922 {
4923 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4924 init_table.non_lazy_plt = NULL;
4925 init_table.lazy_ibt_plt = NULL;
4926 init_table.non_lazy_ibt_plt = NULL;
4927 }
4928
4929 if (ABI_64_P (info->output_bfd))
4930 {
4931 init_table.r_info = elf64_r_info;
4932 init_table.r_sym = elf64_r_sym;
4933 }
4934 else
4935 {
4936 init_table.r_info = elf32_r_info;
4937 init_table.r_sym = elf32_r_sym;
4938 }
4939
4940 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4941 }
4942
4943 static const struct bfd_elf_special_section
4944 elf_x86_64_special_sections[]=
4945 {
4946 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4947 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4948 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
4949 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4950 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4951 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4952 { NULL, 0, 0, 0, 0 }
4953 };
4954
4955 #define TARGET_LITTLE_SYM x86_64_elf64_vec
4956 #define TARGET_LITTLE_NAME "elf64-x86-64"
4957 #define ELF_ARCH bfd_arch_i386
4958 #define ELF_TARGET_ID X86_64_ELF_DATA
4959 #define ELF_MACHINE_CODE EM_X86_64
4960 #if DEFAULT_LD_Z_SEPARATE_CODE
4961 # define ELF_MAXPAGESIZE 0x1000
4962 #else
4963 # define ELF_MAXPAGESIZE 0x200000
4964 #endif
4965 #define ELF_MINPAGESIZE 0x1000
4966 #define ELF_COMMONPAGESIZE 0x1000
4967
4968 #define elf_backend_can_gc_sections 1
4969 #define elf_backend_can_refcount 1
4970 #define elf_backend_want_got_plt 1
4971 #define elf_backend_plt_readonly 1
4972 #define elf_backend_want_plt_sym 0
4973 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
4974 #define elf_backend_rela_normal 1
4975 #define elf_backend_plt_alignment 4
4976 #define elf_backend_extern_protected_data 1
4977 #define elf_backend_caches_rawsize 1
4978 #define elf_backend_dtrel_excludes_plt 1
4979 #define elf_backend_want_dynrelro 1
4980
4981 #define elf_info_to_howto elf_x86_64_info_to_howto
4982
4983 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
4984 #define bfd_elf64_bfd_reloc_name_lookup \
4985 elf_x86_64_reloc_name_lookup
4986
4987 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
4988 #define elf_backend_check_relocs elf_x86_64_check_relocs
4989 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
4990 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
4991 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
4992 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
4993 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
4994 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
4995 #ifdef CORE_HEADER
4996 #define elf_backend_write_core_note elf_x86_64_write_core_note
4997 #endif
4998 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
4999 #define elf_backend_relocate_section elf_x86_64_relocate_section
5000 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5001 #define elf_backend_object_p elf64_x86_64_elf_object_p
5002 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5003
5004 #define elf_backend_section_from_shdr \
5005 elf_x86_64_section_from_shdr
5006
5007 #define elf_backend_section_from_bfd_section \
5008 elf_x86_64_elf_section_from_bfd_section
5009 #define elf_backend_add_symbol_hook \
5010 elf_x86_64_add_symbol_hook
5011 #define elf_backend_symbol_processing \
5012 elf_x86_64_symbol_processing
5013 #define elf_backend_common_section_index \
5014 elf_x86_64_common_section_index
5015 #define elf_backend_common_section \
5016 elf_x86_64_common_section
5017 #define elf_backend_common_definition \
5018 elf_x86_64_common_definition
5019 #define elf_backend_merge_symbol \
5020 elf_x86_64_merge_symbol
5021 #define elf_backend_special_sections \
5022 elf_x86_64_special_sections
5023 #define elf_backend_additional_program_headers \
5024 elf_x86_64_additional_program_headers
5025 #define elf_backend_setup_gnu_properties \
5026 elf_x86_64_link_setup_gnu_properties
5027 #define elf_backend_hide_symbol \
5028 _bfd_x86_elf_hide_symbol
5029
5030 #undef elf64_bed
5031 #define elf64_bed elf64_x86_64_bed
5032
5033 #include "elf64-target.h"
5034
5035 /* CloudABI support. */
5036
5037 #undef TARGET_LITTLE_SYM
5038 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5039 #undef TARGET_LITTLE_NAME
5040 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5041
5042 #undef ELF_OSABI
5043 #define ELF_OSABI ELFOSABI_CLOUDABI
5044
5045 #undef elf64_bed
5046 #define elf64_bed elf64_x86_64_cloudabi_bed
5047
5048 #include "elf64-target.h"
5049
5050 /* FreeBSD support. */
5051
5052 #undef TARGET_LITTLE_SYM
5053 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5054 #undef TARGET_LITTLE_NAME
5055 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5056
5057 #undef ELF_OSABI
5058 #define ELF_OSABI ELFOSABI_FREEBSD
5059
5060 #undef elf64_bed
5061 #define elf64_bed elf64_x86_64_fbsd_bed
5062
5063 #include "elf64-target.h"
5064
5065 /* Solaris 2 support. */
5066
5067 #undef TARGET_LITTLE_SYM
5068 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5069 #undef TARGET_LITTLE_NAME
5070 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5071
5072 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5073 {
5074 is_solaris /* os */
5075 };
5076
5077 #undef elf_backend_arch_data
5078 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5079
5080 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5081 objects won't be recognized. */
5082 #undef ELF_OSABI
5083
5084 #undef elf64_bed
5085 #define elf64_bed elf64_x86_64_sol2_bed
5086
5087 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5088 boundary. */
5089 #undef elf_backend_static_tls_alignment
5090 #define elf_backend_static_tls_alignment 16
5091
5092 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5093
5094 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5095 File, p.63. */
5096 #undef elf_backend_want_plt_sym
5097 #define elf_backend_want_plt_sym 1
5098
5099 #undef elf_backend_strtab_flags
5100 #define elf_backend_strtab_flags SHF_STRINGS
5101
5102 static bfd_boolean
5103 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5104 bfd *obfd ATTRIBUTE_UNUSED,
5105 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5106 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5107 {
5108 /* PR 19938: FIXME: Need to add code for setting the sh_info
5109 and sh_link fields of Solaris specific section types. */
5110 return FALSE;
5111 }
5112
5113 #undef elf_backend_copy_special_section_fields
5114 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5115
5116 #include "elf64-target.h"
5117
5118 /* Native Client support. */
5119
5120 static bfd_boolean
5121 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5122 {
5123 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5124 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5125 return TRUE;
5126 }
5127
5128 #undef TARGET_LITTLE_SYM
5129 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5130 #undef TARGET_LITTLE_NAME
5131 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5132 #undef elf64_bed
5133 #define elf64_bed elf64_x86_64_nacl_bed
5134
5135 #undef ELF_MAXPAGESIZE
5136 #undef ELF_MINPAGESIZE
5137 #undef ELF_COMMONPAGESIZE
5138 #define ELF_MAXPAGESIZE 0x10000
5139 #define ELF_MINPAGESIZE 0x10000
5140 #define ELF_COMMONPAGESIZE 0x10000
5141
5142 /* Restore defaults. */
5143 #undef ELF_OSABI
5144 #undef elf_backend_static_tls_alignment
5145 #undef elf_backend_want_plt_sym
5146 #define elf_backend_want_plt_sym 0
5147 #undef elf_backend_strtab_flags
5148 #undef elf_backend_copy_special_section_fields
5149
5150 /* NaCl uses substantially different PLT entries for the same effects. */
5151
5152 #undef elf_backend_plt_alignment
5153 #define elf_backend_plt_alignment 5
5154 #define NACL_PLT_ENTRY_SIZE 64
5155 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5156
5157 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5158 {
5159 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5160 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5161 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5162 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5163 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5164
5165 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5166 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5167
5168 /* 32 bytes of nop to pad out to the standard size. */
5169 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5170 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5171 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5172 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5173 0x66, /* excess data16 prefix */
5174 0x90 /* nop */
5175 };
5176
5177 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5178 {
5179 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5180 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5181 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5182 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5183
5184 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5185 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5186 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5187
5188 /* Lazy GOT entries point here (32-byte aligned). */
5189 0x68, /* pushq immediate */
5190 0, 0, 0, 0, /* replaced with index into relocation table. */
5191 0xe9, /* jmp relative */
5192 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5193
5194 /* 22 bytes of nop to pad out to the standard size. */
5195 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5196 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5197 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5198 };
5199
5200 /* .eh_frame covering the .plt section. */
5201
5202 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5203 {
5204 #if (PLT_CIE_LENGTH != 20 \
5205 || PLT_FDE_LENGTH != 36 \
5206 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5207 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5208 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5209 #endif
5210 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5211 0, 0, 0, 0, /* CIE ID */
5212 1, /* CIE version */
5213 'z', 'R', 0, /* Augmentation string */
5214 1, /* Code alignment factor */
5215 0x78, /* Data alignment factor */
5216 16, /* Return address column */
5217 1, /* Augmentation size */
5218 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5219 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5220 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5221 DW_CFA_nop, DW_CFA_nop,
5222
5223 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5224 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5225 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5226 0, 0, 0, 0, /* .plt size goes here */
5227 0, /* Augmentation size */
5228 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5229 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5230 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5231 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5232 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5233 13, /* Block length */
5234 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5235 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5236 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5237 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5238 DW_CFA_nop, DW_CFA_nop
5239 };
5240
5241 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5242 {
5243 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5244 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5245 elf_x86_64_nacl_plt_entry, /* plt_entry */
5246 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5247 2, /* plt0_got1_offset */
5248 9, /* plt0_got2_offset */
5249 13, /* plt0_got2_insn_end */
5250 3, /* plt_got_offset */
5251 33, /* plt_reloc_offset */
5252 38, /* plt_plt_offset */
5253 7, /* plt_got_insn_size */
5254 42, /* plt_plt_insn_end */
5255 32, /* plt_lazy_offset */
5256 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5257 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5258 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5259 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5260 };
5261
5262 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5263 {
5264 is_nacl /* os */
5265 };
5266
5267 #undef elf_backend_arch_data
5268 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5269
5270 #undef elf_backend_object_p
5271 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5272 #undef elf_backend_modify_segment_map
5273 #define elf_backend_modify_segment_map nacl_modify_segment_map
5274 #undef elf_backend_modify_program_headers
5275 #define elf_backend_modify_program_headers nacl_modify_program_headers
5276 #undef elf_backend_final_write_processing
5277 #define elf_backend_final_write_processing nacl_final_write_processing
5278
5279 #include "elf64-target.h"
5280
5281 /* Native Client x32 support. */
5282
5283 static bfd_boolean
5284 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5285 {
5286 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5287 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5288 return TRUE;
5289 }
5290
5291 #undef TARGET_LITTLE_SYM
5292 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5293 #undef TARGET_LITTLE_NAME
5294 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5295 #undef elf32_bed
5296 #define elf32_bed elf32_x86_64_nacl_bed
5297
5298 #define bfd_elf32_bfd_reloc_type_lookup \
5299 elf_x86_64_reloc_type_lookup
5300 #define bfd_elf32_bfd_reloc_name_lookup \
5301 elf_x86_64_reloc_name_lookup
5302 #define bfd_elf32_get_synthetic_symtab \
5303 elf_x86_64_get_synthetic_symtab
5304
5305 #undef elf_backend_object_p
5306 #define elf_backend_object_p \
5307 elf32_x86_64_nacl_elf_object_p
5308
5309 #undef elf_backend_bfd_from_remote_memory
5310 #define elf_backend_bfd_from_remote_memory \
5311 _bfd_elf32_bfd_from_remote_memory
5312
5313 #undef elf_backend_size_info
5314 #define elf_backend_size_info \
5315 _bfd_elf32_size_info
5316
5317 #undef elf32_bed
5318 #define elf32_bed elf32_x86_64_bed
5319
5320 #include "elf32-target.h"
5321
5322 /* Restore defaults. */
5323 #undef elf_backend_object_p
5324 #define elf_backend_object_p elf64_x86_64_elf_object_p
5325 #undef elf_backend_bfd_from_remote_memory
5326 #undef elf_backend_size_info
5327 #undef elf_backend_modify_segment_map
5328 #undef elf_backend_modify_program_headers
5329 #undef elf_backend_final_write_processing
5330
5331 /* Intel L1OM support. */
5332
5333 static bfd_boolean
5334 elf64_l1om_elf_object_p (bfd *abfd)
5335 {
5336 /* Set the right machine number for an L1OM elf64 file. */
5337 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5338 return TRUE;
5339 }
5340
5341 #undef TARGET_LITTLE_SYM
5342 #define TARGET_LITTLE_SYM l1om_elf64_vec
5343 #undef TARGET_LITTLE_NAME
5344 #define TARGET_LITTLE_NAME "elf64-l1om"
5345 #undef ELF_ARCH
5346 #define ELF_ARCH bfd_arch_l1om
5347
5348 #undef ELF_MACHINE_CODE
5349 #define ELF_MACHINE_CODE EM_L1OM
5350
5351 #undef ELF_OSABI
5352
5353 #undef elf64_bed
5354 #define elf64_bed elf64_l1om_bed
5355
5356 #undef elf_backend_object_p
5357 #define elf_backend_object_p elf64_l1om_elf_object_p
5358
5359 /* Restore defaults. */
5360 #undef ELF_MAXPAGESIZE
5361 #undef ELF_MINPAGESIZE
5362 #undef ELF_COMMONPAGESIZE
5363 #if DEFAULT_LD_Z_SEPARATE_CODE
5364 # define ELF_MAXPAGESIZE 0x1000
5365 #else
5366 # define ELF_MAXPAGESIZE 0x200000
5367 #endif
5368 #define ELF_MINPAGESIZE 0x1000
5369 #define ELF_COMMONPAGESIZE 0x1000
5370 #undef elf_backend_plt_alignment
5371 #define elf_backend_plt_alignment 4
5372 #undef elf_backend_arch_data
5373 #define elf_backend_arch_data &elf_x86_64_arch_bed
5374
5375 #include "elf64-target.h"
5376
5377 /* FreeBSD L1OM support. */
5378
5379 #undef TARGET_LITTLE_SYM
5380 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5381 #undef TARGET_LITTLE_NAME
5382 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5383
5384 #undef ELF_OSABI
5385 #define ELF_OSABI ELFOSABI_FREEBSD
5386
5387 #undef elf64_bed
5388 #define elf64_bed elf64_l1om_fbsd_bed
5389
5390 #include "elf64-target.h"
5391
5392 /* Intel K1OM support. */
5393
5394 static bfd_boolean
5395 elf64_k1om_elf_object_p (bfd *abfd)
5396 {
5397 /* Set the right machine number for an K1OM elf64 file. */
5398 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5399 return TRUE;
5400 }
5401
5402 #undef TARGET_LITTLE_SYM
5403 #define TARGET_LITTLE_SYM k1om_elf64_vec
5404 #undef TARGET_LITTLE_NAME
5405 #define TARGET_LITTLE_NAME "elf64-k1om"
5406 #undef ELF_ARCH
5407 #define ELF_ARCH bfd_arch_k1om
5408
5409 #undef ELF_MACHINE_CODE
5410 #define ELF_MACHINE_CODE EM_K1OM
5411
5412 #undef ELF_OSABI
5413
5414 #undef elf64_bed
5415 #define elf64_bed elf64_k1om_bed
5416
5417 #undef elf_backend_object_p
5418 #define elf_backend_object_p elf64_k1om_elf_object_p
5419
5420 #undef elf_backend_static_tls_alignment
5421
5422 #undef elf_backend_want_plt_sym
5423 #define elf_backend_want_plt_sym 0
5424
5425 #include "elf64-target.h"
5426
5427 /* FreeBSD K1OM support. */
5428
5429 #undef TARGET_LITTLE_SYM
5430 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5431 #undef TARGET_LITTLE_NAME
5432 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5433
5434 #undef ELF_OSABI
5435 #define ELF_OSABI ELFOSABI_FREEBSD
5436
5437 #undef elf64_bed
5438 #define elf64_bed elf64_k1om_fbsd_bed
5439
5440 #include "elf64-target.h"
5441
5442 /* 32bit x86-64 support. */
5443
5444 #undef TARGET_LITTLE_SYM
5445 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5446 #undef TARGET_LITTLE_NAME
5447 #define TARGET_LITTLE_NAME "elf32-x86-64"
5448 #undef elf32_bed
5449
5450 #undef ELF_ARCH
5451 #define ELF_ARCH bfd_arch_i386
5452
5453 #undef ELF_MACHINE_CODE
5454 #define ELF_MACHINE_CODE EM_X86_64
5455
5456 #undef ELF_OSABI
5457
5458 #undef elf_backend_object_p
5459 #define elf_backend_object_p \
5460 elf32_x86_64_elf_object_p
5461
5462 #undef elf_backend_bfd_from_remote_memory
5463 #define elf_backend_bfd_from_remote_memory \
5464 _bfd_elf32_bfd_from_remote_memory
5465
5466 #undef elf_backend_size_info
5467 #define elf_backend_size_info \
5468 _bfd_elf32_size_info
5469
5470 #include "elf32-target.h"