]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf64-x86-64.c
27e23a0793e1d9261e6986c1bd4267f63d6f464e
[thirdparty/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2019 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
286 abfd, r_type);
287 bfd_set_error (bfd_error_bad_value);
288 return NULL;
289 }
290 i = r_type;
291 }
292 else
293 i = r_type - (unsigned int) R_X86_64_vt_offset;
294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
295 return &x86_64_elf_howto_table[i];
296 }
297
298 /* Given a BFD reloc type, return a HOWTO structure. */
299 static reloc_howto_type *
300 elf_x86_64_reloc_type_lookup (bfd *abfd,
301 bfd_reloc_code_real_type code)
302 {
303 unsigned int i;
304
305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
306 i++)
307 {
308 if (x86_64_reloc_map[i].bfd_reloc_val == code)
309 return elf_x86_64_rtype_to_howto (abfd,
310 x86_64_reloc_map[i].elf_reloc_val);
311 }
312 return NULL;
313 }
314
315 static reloc_howto_type *
316 elf_x86_64_reloc_name_lookup (bfd *abfd,
317 const char *r_name)
318 {
319 unsigned int i;
320
321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 {
323 /* Get x32 R_X86_64_32. */
324 reloc_howto_type *reloc
325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
327 return reloc;
328 }
329
330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
331 if (x86_64_elf_howto_table[i].name != NULL
332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
333 return &x86_64_elf_howto_table[i];
334
335 return NULL;
336 }
337
338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
339
340 static bfd_boolean
341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
342 Elf_Internal_Rela *dst)
343 {
344 unsigned r_type;
345
346 r_type = ELF32_R_TYPE (dst->r_info);
347 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
348 if (cache_ptr->howto == NULL)
349 return FALSE;
350 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
351 return TRUE;
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 # if GCC_VERSION >= 8000
443 # pragma GCC diagnostic push
444 # pragma GCC diagnostic ignored "-Wstringop-truncation"
445 # endif
446 static char *
447 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
448 int note_type, ...)
449 {
450 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
451 va_list ap;
452 const char *fname, *psargs;
453 long pid;
454 int cursig;
455 const void *gregs;
456
457 switch (note_type)
458 {
459 default:
460 return NULL;
461
462 case NT_PRPSINFO:
463 va_start (ap, note_type);
464 fname = va_arg (ap, const char *);
465 psargs = va_arg (ap, const char *);
466 va_end (ap);
467
468 if (bed->s->elfclass == ELFCLASS32)
469 {
470 prpsinfo32_t data;
471 memset (&data, 0, sizeof (data));
472 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
473 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
474 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
475 &data, sizeof (data));
476 }
477 else
478 {
479 prpsinfo64_t data;
480 memset (&data, 0, sizeof (data));
481 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
482 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
483 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
484 &data, sizeof (data));
485 }
486 /* NOTREACHED */
487
488 case NT_PRSTATUS:
489 va_start (ap, note_type);
490 pid = va_arg (ap, long);
491 cursig = va_arg (ap, int);
492 gregs = va_arg (ap, const void *);
493 va_end (ap);
494
495 if (bed->s->elfclass == ELFCLASS32)
496 {
497 if (bed->elf_machine_code == EM_X86_64)
498 {
499 prstatusx32_t prstat;
500 memset (&prstat, 0, sizeof (prstat));
501 prstat.pr_pid = pid;
502 prstat.pr_cursig = cursig;
503 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
504 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
505 &prstat, sizeof (prstat));
506 }
507 else
508 {
509 prstatus32_t prstat;
510 memset (&prstat, 0, sizeof (prstat));
511 prstat.pr_pid = pid;
512 prstat.pr_cursig = cursig;
513 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
514 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
515 &prstat, sizeof (prstat));
516 }
517 }
518 else
519 {
520 prstatus64_t prstat;
521 memset (&prstat, 0, sizeof (prstat));
522 prstat.pr_pid = pid;
523 prstat.pr_cursig = cursig;
524 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
525 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
526 &prstat, sizeof (prstat));
527 }
528 }
529 /* NOTREACHED */
530 }
531 # if GCC_VERSION >= 8000
532 # pragma GCC diagnostic pop
533 # endif
534 #endif
535 \f
536 /* Functions for the x86-64 ELF linker. */
537
538 /* The size in bytes of an entry in the global offset table. */
539
540 #define GOT_ENTRY_SIZE 8
541
542 /* The size in bytes of an entry in the lazy procedure linkage table. */
543
544 #define LAZY_PLT_ENTRY_SIZE 16
545
546 /* The size in bytes of an entry in the non-lazy procedure linkage
547 table. */
548
549 #define NON_LAZY_PLT_ENTRY_SIZE 8
550
551 /* The first entry in a lazy procedure linkage table looks like this.
552 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
553 works. */
554
555 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
556 {
557 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
558 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
559 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
560 };
561
562 /* Subsequent entries in a lazy procedure linkage table look like this. */
563
564 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
565 {
566 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
567 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
568 0x68, /* pushq immediate */
569 0, 0, 0, 0, /* replaced with index into relocation table. */
570 0xe9, /* jmp relative */
571 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
572 };
573
574 /* The first entry in a lazy procedure linkage table with BND prefix
575 like this. */
576
577 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
578 {
579 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
580 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
581 0x0f, 0x1f, 0 /* nopl (%rax) */
582 };
583
584 /* Subsequent entries for branches with BND prefx in a lazy procedure
585 linkage table look like this. */
586
587 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
588 {
589 0x68, 0, 0, 0, 0, /* pushq immediate */
590 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
591 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
592 };
593
594 /* The first entry in the IBT-enabled lazy procedure linkage table is the
595 the same as the lazy PLT with BND prefix so that bound registers are
596 preserved when control is passed to dynamic linker. Subsequent
597 entries for a IBT-enabled lazy procedure linkage table look like
598 this. */
599
600 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
601 {
602 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
603 0x68, 0, 0, 0, 0, /* pushq immediate */
604 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
605 0x90 /* nop */
606 };
607
608 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
609 is the same as the normal lazy PLT. Subsequent entries for an
610 x32 IBT-enabled lazy procedure linkage table look like this. */
611
612 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
613 {
614 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
615 0x68, 0, 0, 0, 0, /* pushq immediate */
616 0xe9, 0, 0, 0, 0, /* jmpq relative */
617 0x66, 0x90 /* xchg %ax,%ax */
618 };
619
620 /* Entries in the non-lazey procedure linkage table look like this. */
621
622 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
623 {
624 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
625 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
626 0x66, 0x90 /* xchg %ax,%ax */
627 };
628
629 /* Entries for branches with BND prefix in the non-lazey procedure
630 linkage table look like this. */
631
632 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
633 {
634 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
635 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
636 0x90 /* nop */
637 };
638
639 /* Entries for branches with IBT-enabled in the non-lazey procedure
640 linkage table look like this. They have the same size as the lazy
641 PLT entry. */
642
643 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
644 {
645 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
646 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
647 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
648 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
649 };
650
651 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
652 linkage table look like this. They have the same size as the lazy
653 PLT entry. */
654
655 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
656 {
657 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
658 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
659 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
660 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
661 };
662
663 /* The TLSDESC entry in a lazy procedure linkage table. */
664 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
665 {
666 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
667 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
668 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
669 };
670
671 /* .eh_frame covering the lazy .plt section. */
672
673 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
674 {
675 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
676 0, 0, 0, 0, /* CIE ID */
677 1, /* CIE version */
678 'z', 'R', 0, /* Augmentation string */
679 1, /* Code alignment factor */
680 0x78, /* Data alignment factor */
681 16, /* Return address column */
682 1, /* Augmentation size */
683 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
684 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
685 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
686 DW_CFA_nop, DW_CFA_nop,
687
688 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
689 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
690 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
691 0, 0, 0, 0, /* .plt size goes here */
692 0, /* Augmentation size */
693 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
694 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
695 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
696 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
697 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
698 11, /* Block length */
699 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
700 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
701 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
702 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
703 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
704 };
705
706 /* .eh_frame covering the lazy BND .plt section. */
707
708 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
709 {
710 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
711 0, 0, 0, 0, /* CIE ID */
712 1, /* CIE version */
713 'z', 'R', 0, /* Augmentation string */
714 1, /* Code alignment factor */
715 0x78, /* Data alignment factor */
716 16, /* Return address column */
717 1, /* Augmentation size */
718 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
719 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
720 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
721 DW_CFA_nop, DW_CFA_nop,
722
723 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
724 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
725 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
726 0, 0, 0, 0, /* .plt size goes here */
727 0, /* Augmentation size */
728 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
729 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
730 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
731 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
732 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
733 11, /* Block length */
734 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
735 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
736 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
737 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
738 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
739 };
740
741 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
742
743 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
744 {
745 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
746 0, 0, 0, 0, /* CIE ID */
747 1, /* CIE version */
748 'z', 'R', 0, /* Augmentation string */
749 1, /* Code alignment factor */
750 0x78, /* Data alignment factor */
751 16, /* Return address column */
752 1, /* Augmentation size */
753 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
754 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
755 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
756 DW_CFA_nop, DW_CFA_nop,
757
758 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
759 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
760 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
761 0, 0, 0, 0, /* .plt size goes here */
762 0, /* Augmentation size */
763 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
764 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
765 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
766 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
767 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
768 11, /* Block length */
769 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
770 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
771 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
772 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
773 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
774 };
775
776 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
777
778 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
779 {
780 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
781 0, 0, 0, 0, /* CIE ID */
782 1, /* CIE version */
783 'z', 'R', 0, /* Augmentation string */
784 1, /* Code alignment factor */
785 0x78, /* Data alignment factor */
786 16, /* Return address column */
787 1, /* Augmentation size */
788 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
789 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
790 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
791 DW_CFA_nop, DW_CFA_nop,
792
793 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
794 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
795 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
796 0, 0, 0, 0, /* .plt size goes here */
797 0, /* Augmentation size */
798 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
799 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
800 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
801 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
802 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
803 11, /* Block length */
804 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
805 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
806 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
807 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
808 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
809 };
810
811 /* .eh_frame covering the non-lazy .plt section. */
812
813 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
814 {
815 #define PLT_GOT_FDE_LENGTH 20
816 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
817 0, 0, 0, 0, /* CIE ID */
818 1, /* CIE version */
819 'z', 'R', 0, /* Augmentation string */
820 1, /* Code alignment factor */
821 0x78, /* Data alignment factor */
822 16, /* Return address column */
823 1, /* Augmentation size */
824 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
825 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
826 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
827 DW_CFA_nop, DW_CFA_nop,
828
829 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
830 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
831 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
832 0, 0, 0, 0, /* non-lazy .plt size goes here */
833 0, /* Augmentation size */
834 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
835 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
836 };
837
838 /* These are the standard parameters. */
839 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
840 {
841 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
842 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
843 elf_x86_64_lazy_plt_entry, /* plt_entry */
844 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
845 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
846 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
847 6, /* plt_tlsdesc_got1_offset */
848 12, /* plt_tlsdesc_got2_offset */
849 10, /* plt_tlsdesc_got1_insn_end */
850 16, /* plt_tlsdesc_got2_insn_end */
851 2, /* plt0_got1_offset */
852 8, /* plt0_got2_offset */
853 12, /* plt0_got2_insn_end */
854 2, /* plt_got_offset */
855 7, /* plt_reloc_offset */
856 12, /* plt_plt_offset */
857 6, /* plt_got_insn_size */
858 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
859 6, /* plt_lazy_offset */
860 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
861 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
862 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
863 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
864 };
865
866 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
867 {
868 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
869 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
870 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
871 2, /* plt_got_offset */
872 6, /* plt_got_insn_size */
873 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
875 };
876
877 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
878 {
879 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
880 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
881 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
882 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
883 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
884 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
885 6, /* plt_tlsdesc_got1_offset */
886 12, /* plt_tlsdesc_got2_offset */
887 10, /* plt_tlsdesc_got1_insn_end */
888 16, /* plt_tlsdesc_got2_insn_end */
889 2, /* plt0_got1_offset */
890 1+8, /* plt0_got2_offset */
891 1+12, /* plt0_got2_insn_end */
892 1+2, /* plt_got_offset */
893 1, /* plt_reloc_offset */
894 7, /* plt_plt_offset */
895 1+6, /* plt_got_insn_size */
896 11, /* plt_plt_insn_end */
897 0, /* plt_lazy_offset */
898 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
899 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
900 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
901 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
902 };
903
904 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
905 {
906 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
907 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
908 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
909 1+2, /* plt_got_offset */
910 1+6, /* plt_got_insn_size */
911 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
912 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
913 };
914
915 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
916 {
917 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
918 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
919 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
920 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
921 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
922 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
923 6, /* plt_tlsdesc_got1_offset */
924 12, /* plt_tlsdesc_got2_offset */
925 10, /* plt_tlsdesc_got1_insn_end */
926 16, /* plt_tlsdesc_got2_insn_end */
927 2, /* plt0_got1_offset */
928 1+8, /* plt0_got2_offset */
929 1+12, /* plt0_got2_insn_end */
930 4+1+2, /* plt_got_offset */
931 4+1, /* plt_reloc_offset */
932 4+1+6, /* plt_plt_offset */
933 4+1+6, /* plt_got_insn_size */
934 4+1+5+5, /* plt_plt_insn_end */
935 0, /* plt_lazy_offset */
936 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
937 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
938 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
939 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
940 };
941
942 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
943 {
944 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
946 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
947 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
948 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
949 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
950 6, /* plt_tlsdesc_got1_offset */
951 12, /* plt_tlsdesc_got2_offset */
952 10, /* plt_tlsdesc_got1_insn_end */
953 16, /* plt_tlsdesc_got2_insn_end */
954 2, /* plt0_got1_offset */
955 8, /* plt0_got2_offset */
956 12, /* plt0_got2_insn_end */
957 4+2, /* plt_got_offset */
958 4+1, /* plt_reloc_offset */
959 4+6, /* plt_plt_offset */
960 4+6, /* plt_got_insn_size */
961 4+5+5, /* plt_plt_insn_end */
962 0, /* plt_lazy_offset */
963 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
964 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
965 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
966 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
967 };
968
969 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
970 {
971 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
972 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
973 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
974 4+1+2, /* plt_got_offset */
975 4+1+6, /* plt_got_insn_size */
976 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
977 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
978 };
979
980 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
981 {
982 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
983 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
984 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
985 4+2, /* plt_got_offset */
986 4+6, /* plt_got_insn_size */
987 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
988 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
989 };
990
991 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
992 {
993 is_normal /* os */
994 };
995
996 #define elf_backend_arch_data &elf_x86_64_arch_bed
997
998 static bfd_boolean
999 elf64_x86_64_elf_object_p (bfd *abfd)
1000 {
1001 /* Set the right machine number for an x86-64 elf64 file. */
1002 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1003 return TRUE;
1004 }
1005
1006 static bfd_boolean
1007 elf32_x86_64_elf_object_p (bfd *abfd)
1008 {
1009 /* Set the right machine number for an x86-64 elf32 file. */
1010 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1011 return TRUE;
1012 }
1013
1014 /* Return TRUE if the TLS access code sequence support transition
1015 from R_TYPE. */
1016
1017 static bfd_boolean
1018 elf_x86_64_check_tls_transition (bfd *abfd,
1019 struct bfd_link_info *info,
1020 asection *sec,
1021 bfd_byte *contents,
1022 Elf_Internal_Shdr *symtab_hdr,
1023 struct elf_link_hash_entry **sym_hashes,
1024 unsigned int r_type,
1025 const Elf_Internal_Rela *rel,
1026 const Elf_Internal_Rela *relend)
1027 {
1028 unsigned int val;
1029 unsigned long r_symndx;
1030 bfd_boolean largepic = FALSE;
1031 struct elf_link_hash_entry *h;
1032 bfd_vma offset;
1033 struct elf_x86_link_hash_table *htab;
1034 bfd_byte *call;
1035 bfd_boolean indirect_call;
1036
1037 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1038 offset = rel->r_offset;
1039 switch (r_type)
1040 {
1041 case R_X86_64_TLSGD:
1042 case R_X86_64_TLSLD:
1043 if ((rel + 1) >= relend)
1044 return FALSE;
1045
1046 if (r_type == R_X86_64_TLSGD)
1047 {
1048 /* Check transition from GD access model. For 64bit, only
1049 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1050 .word 0x6666; rex64; call __tls_get_addr@PLT
1051 or
1052 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1053 .byte 0x66; rex64
1054 call *__tls_get_addr@GOTPCREL(%rip)
1055 which may be converted to
1056 addr32 call __tls_get_addr
1057 can transit to different access model. For 32bit, only
1058 leaq foo@tlsgd(%rip), %rdi
1059 .word 0x6666; rex64; call __tls_get_addr@PLT
1060 or
1061 leaq foo@tlsgd(%rip), %rdi
1062 .byte 0x66; rex64
1063 call *__tls_get_addr@GOTPCREL(%rip)
1064 which may be converted to
1065 addr32 call __tls_get_addr
1066 can transit to different access model. For largepic,
1067 we also support:
1068 leaq foo@tlsgd(%rip), %rdi
1069 movabsq $__tls_get_addr@pltoff, %rax
1070 addq $r15, %rax
1071 call *%rax
1072 or
1073 leaq foo@tlsgd(%rip), %rdi
1074 movabsq $__tls_get_addr@pltoff, %rax
1075 addq $rbx, %rax
1076 call *%rax */
1077
1078 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1079
1080 if ((offset + 12) > sec->size)
1081 return FALSE;
1082
1083 call = contents + offset + 4;
1084 if (call[0] != 0x66
1085 || !((call[1] == 0x48
1086 && call[2] == 0xff
1087 && call[3] == 0x15)
1088 || (call[1] == 0x48
1089 && call[2] == 0x67
1090 && call[3] == 0xe8)
1091 || (call[1] == 0x66
1092 && call[2] == 0x48
1093 && call[3] == 0xe8)))
1094 {
1095 if (!ABI_64_P (abfd)
1096 || (offset + 19) > sec->size
1097 || offset < 3
1098 || memcmp (call - 7, leaq + 1, 3) != 0
1099 || memcmp (call, "\x48\xb8", 2) != 0
1100 || call[11] != 0x01
1101 || call[13] != 0xff
1102 || call[14] != 0xd0
1103 || !((call[10] == 0x48 && call[12] == 0xd8)
1104 || (call[10] == 0x4c && call[12] == 0xf8)))
1105 return FALSE;
1106 largepic = TRUE;
1107 }
1108 else if (ABI_64_P (abfd))
1109 {
1110 if (offset < 4
1111 || memcmp (contents + offset - 4, leaq, 4) != 0)
1112 return FALSE;
1113 }
1114 else
1115 {
1116 if (offset < 3
1117 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1118 return FALSE;
1119 }
1120 indirect_call = call[2] == 0xff;
1121 }
1122 else
1123 {
1124 /* Check transition from LD access model. Only
1125 leaq foo@tlsld(%rip), %rdi;
1126 call __tls_get_addr@PLT
1127 or
1128 leaq foo@tlsld(%rip), %rdi;
1129 call *__tls_get_addr@GOTPCREL(%rip)
1130 which may be converted to
1131 addr32 call __tls_get_addr
1132 can transit to different access model. For largepic
1133 we also support:
1134 leaq foo@tlsld(%rip), %rdi
1135 movabsq $__tls_get_addr@pltoff, %rax
1136 addq $r15, %rax
1137 call *%rax
1138 or
1139 leaq foo@tlsld(%rip), %rdi
1140 movabsq $__tls_get_addr@pltoff, %rax
1141 addq $rbx, %rax
1142 call *%rax */
1143
1144 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1145
1146 if (offset < 3 || (offset + 9) > sec->size)
1147 return FALSE;
1148
1149 if (memcmp (contents + offset - 3, lea, 3) != 0)
1150 return FALSE;
1151
1152 call = contents + offset + 4;
1153 if (!(call[0] == 0xe8
1154 || (call[0] == 0xff && call[1] == 0x15)
1155 || (call[0] == 0x67 && call[1] == 0xe8)))
1156 {
1157 if (!ABI_64_P (abfd)
1158 || (offset + 19) > sec->size
1159 || memcmp (call, "\x48\xb8", 2) != 0
1160 || call[11] != 0x01
1161 || call[13] != 0xff
1162 || call[14] != 0xd0
1163 || !((call[10] == 0x48 && call[12] == 0xd8)
1164 || (call[10] == 0x4c && call[12] == 0xf8)))
1165 return FALSE;
1166 largepic = TRUE;
1167 }
1168 indirect_call = call[0] == 0xff;
1169 }
1170
1171 r_symndx = htab->r_sym (rel[1].r_info);
1172 if (r_symndx < symtab_hdr->sh_info)
1173 return FALSE;
1174
1175 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1176 if (h == NULL
1177 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1178 return FALSE;
1179 else
1180 {
1181 r_type = (ELF32_R_TYPE (rel[1].r_info)
1182 & ~R_X86_64_converted_reloc_bit);
1183 if (largepic)
1184 return r_type == R_X86_64_PLTOFF64;
1185 else if (indirect_call)
1186 return r_type == R_X86_64_GOTPCRELX;
1187 else
1188 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1189 }
1190
1191 case R_X86_64_GOTTPOFF:
1192 /* Check transition from IE access model:
1193 mov foo@gottpoff(%rip), %reg
1194 add foo@gottpoff(%rip), %reg
1195 */
1196
1197 /* Check REX prefix first. */
1198 if (offset >= 3 && (offset + 4) <= sec->size)
1199 {
1200 val = bfd_get_8 (abfd, contents + offset - 3);
1201 if (val != 0x48 && val != 0x4c)
1202 {
1203 /* X32 may have 0x44 REX prefix or no REX prefix. */
1204 if (ABI_64_P (abfd))
1205 return FALSE;
1206 }
1207 }
1208 else
1209 {
1210 /* X32 may not have any REX prefix. */
1211 if (ABI_64_P (abfd))
1212 return FALSE;
1213 if (offset < 2 || (offset + 3) > sec->size)
1214 return FALSE;
1215 }
1216
1217 val = bfd_get_8 (abfd, contents + offset - 2);
1218 if (val != 0x8b && val != 0x03)
1219 return FALSE;
1220
1221 val = bfd_get_8 (abfd, contents + offset - 1);
1222 return (val & 0xc7) == 5;
1223
1224 case R_X86_64_GOTPC32_TLSDESC:
1225 /* Check transition from GDesc access model:
1226 leaq x@tlsdesc(%rip), %rax
1227
1228 Make sure it's a leaq adding rip to a 32-bit offset
1229 into any register, although it's probably almost always
1230 going to be rax. */
1231
1232 if (offset < 3 || (offset + 4) > sec->size)
1233 return FALSE;
1234
1235 val = bfd_get_8 (abfd, contents + offset - 3);
1236 if ((val & 0xfb) != 0x48)
1237 return FALSE;
1238
1239 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1240 return FALSE;
1241
1242 val = bfd_get_8 (abfd, contents + offset - 1);
1243 return (val & 0xc7) == 0x05;
1244
1245 case R_X86_64_TLSDESC_CALL:
1246 /* Check transition from GDesc access model:
1247 call *x@tlsdesc(%rax)
1248 */
1249 if (offset + 2 <= sec->size)
1250 {
1251 /* Make sure that it's a call *x@tlsdesc(%rax). */
1252 call = contents + offset;
1253 return call[0] == 0xff && call[1] == 0x10;
1254 }
1255
1256 return FALSE;
1257
1258 default:
1259 abort ();
1260 }
1261 }
1262
1263 /* Return TRUE if the TLS access transition is OK or no transition
1264 will be performed. Update R_TYPE if there is a transition. */
1265
1266 static bfd_boolean
1267 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1268 asection *sec, bfd_byte *contents,
1269 Elf_Internal_Shdr *symtab_hdr,
1270 struct elf_link_hash_entry **sym_hashes,
1271 unsigned int *r_type, int tls_type,
1272 const Elf_Internal_Rela *rel,
1273 const Elf_Internal_Rela *relend,
1274 struct elf_link_hash_entry *h,
1275 unsigned long r_symndx,
1276 bfd_boolean from_relocate_section)
1277 {
1278 unsigned int from_type = *r_type;
1279 unsigned int to_type = from_type;
1280 bfd_boolean check = TRUE;
1281
1282 /* Skip TLS transition for functions. */
1283 if (h != NULL
1284 && (h->type == STT_FUNC
1285 || h->type == STT_GNU_IFUNC))
1286 return TRUE;
1287
1288 switch (from_type)
1289 {
1290 case R_X86_64_TLSGD:
1291 case R_X86_64_GOTPC32_TLSDESC:
1292 case R_X86_64_TLSDESC_CALL:
1293 case R_X86_64_GOTTPOFF:
1294 if (bfd_link_executable (info))
1295 {
1296 if (h == NULL)
1297 to_type = R_X86_64_TPOFF32;
1298 else
1299 to_type = R_X86_64_GOTTPOFF;
1300 }
1301
1302 /* When we are called from elf_x86_64_relocate_section, there may
1303 be additional transitions based on TLS_TYPE. */
1304 if (from_relocate_section)
1305 {
1306 unsigned int new_to_type = to_type;
1307
1308 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1309 new_to_type = R_X86_64_TPOFF32;
1310
1311 if (to_type == R_X86_64_TLSGD
1312 || to_type == R_X86_64_GOTPC32_TLSDESC
1313 || to_type == R_X86_64_TLSDESC_CALL)
1314 {
1315 if (tls_type == GOT_TLS_IE)
1316 new_to_type = R_X86_64_GOTTPOFF;
1317 }
1318
1319 /* We checked the transition before when we were called from
1320 elf_x86_64_check_relocs. We only want to check the new
1321 transition which hasn't been checked before. */
1322 check = new_to_type != to_type && from_type == to_type;
1323 to_type = new_to_type;
1324 }
1325
1326 break;
1327
1328 case R_X86_64_TLSLD:
1329 if (bfd_link_executable (info))
1330 to_type = R_X86_64_TPOFF32;
1331 break;
1332
1333 default:
1334 return TRUE;
1335 }
1336
1337 /* Return TRUE if there is no transition. */
1338 if (from_type == to_type)
1339 return TRUE;
1340
1341 /* Check if the transition can be performed. */
1342 if (check
1343 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1344 symtab_hdr, sym_hashes,
1345 from_type, rel, relend))
1346 {
1347 reloc_howto_type *from, *to;
1348 const char *name;
1349
1350 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1351 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1352
1353 if (from == NULL || to == NULL)
1354 return FALSE;
1355
1356 if (h)
1357 name = h->root.root.string;
1358 else
1359 {
1360 struct elf_x86_link_hash_table *htab;
1361
1362 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1363 if (htab == NULL)
1364 name = "*unknown*";
1365 else
1366 {
1367 Elf_Internal_Sym *isym;
1368
1369 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1370 abfd, r_symndx);
1371 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1372 }
1373 }
1374
1375 _bfd_error_handler
1376 /* xgettext:c-format */
1377 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1378 " in section `%pA' failed"),
1379 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1380 bfd_set_error (bfd_error_bad_value);
1381 return FALSE;
1382 }
1383
1384 *r_type = to_type;
1385 return TRUE;
1386 }
1387
1388 /* Rename some of the generic section flags to better document how they
1389 are used here. */
1390 #define check_relocs_failed sec_flg0
1391
1392 static bfd_boolean
1393 elf_x86_64_need_pic (struct bfd_link_info *info,
1394 bfd *input_bfd, asection *sec,
1395 struct elf_link_hash_entry *h,
1396 Elf_Internal_Shdr *symtab_hdr,
1397 Elf_Internal_Sym *isym,
1398 reloc_howto_type *howto)
1399 {
1400 const char *v = "";
1401 const char *und = "";
1402 const char *pic = "";
1403 const char *object;
1404
1405 const char *name;
1406 if (h)
1407 {
1408 name = h->root.root.string;
1409 switch (ELF_ST_VISIBILITY (h->other))
1410 {
1411 case STV_HIDDEN:
1412 v = _("hidden symbol ");
1413 break;
1414 case STV_INTERNAL:
1415 v = _("internal symbol ");
1416 break;
1417 case STV_PROTECTED:
1418 v = _("protected symbol ");
1419 break;
1420 default:
1421 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1422 v = _("protected symbol ");
1423 else
1424 v = _("symbol ");
1425 pic = _("; recompile with -fPIC");
1426 break;
1427 }
1428
1429 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1430 und = _("undefined ");
1431 }
1432 else
1433 {
1434 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1435 pic = _("; recompile with -fPIC");
1436 }
1437
1438 if (bfd_link_dll (info))
1439 object = _("a shared object");
1440 else if (bfd_link_pie (info))
1441 object = _("a PIE object");
1442 else
1443 object = _("a PDE object");
1444
1445 /* xgettext:c-format */
1446 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1447 "not be used when making %s%s"),
1448 input_bfd, howto->name, und, v, name,
1449 object, pic);
1450 bfd_set_error (bfd_error_bad_value);
1451 sec->check_relocs_failed = 1;
1452 return FALSE;
1453 }
1454
1455 /* With the local symbol, foo, we convert
1456 mov foo@GOTPCREL(%rip), %reg
1457 to
1458 lea foo(%rip), %reg
1459 and convert
1460 call/jmp *foo@GOTPCREL(%rip)
1461 to
1462 nop call foo/jmp foo nop
1463 When PIC is false, convert
1464 test %reg, foo@GOTPCREL(%rip)
1465 to
1466 test $foo, %reg
1467 and convert
1468 binop foo@GOTPCREL(%rip), %reg
1469 to
1470 binop $foo, %reg
1471 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1472 instructions. */
1473
1474 static bfd_boolean
1475 elf_x86_64_convert_load_reloc (bfd *abfd,
1476 bfd_byte *contents,
1477 unsigned int *r_type_p,
1478 Elf_Internal_Rela *irel,
1479 struct elf_link_hash_entry *h,
1480 bfd_boolean *converted,
1481 struct bfd_link_info *link_info)
1482 {
1483 struct elf_x86_link_hash_table *htab;
1484 bfd_boolean is_pic;
1485 bfd_boolean no_overflow;
1486 bfd_boolean relocx;
1487 bfd_boolean to_reloc_pc32;
1488 asection *tsec;
1489 bfd_signed_vma raddend;
1490 unsigned int opcode;
1491 unsigned int modrm;
1492 unsigned int r_type = *r_type_p;
1493 unsigned int r_symndx;
1494 bfd_vma roff = irel->r_offset;
1495
1496 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1497 return TRUE;
1498
1499 raddend = irel->r_addend;
1500 /* Addend for 32-bit PC-relative relocation must be -4. */
1501 if (raddend != -4)
1502 return TRUE;
1503
1504 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1505 is_pic = bfd_link_pic (link_info);
1506
1507 relocx = (r_type == R_X86_64_GOTPCRELX
1508 || r_type == R_X86_64_REX_GOTPCRELX);
1509
1510 /* TRUE if --no-relax is used. */
1511 no_overflow = link_info->disable_target_specific_optimizations > 1;
1512
1513 r_symndx = htab->r_sym (irel->r_info);
1514
1515 opcode = bfd_get_8 (abfd, contents + roff - 2);
1516
1517 /* Convert mov to lea since it has been done for a while. */
1518 if (opcode != 0x8b)
1519 {
1520 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1521 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1522 test, xor instructions. */
1523 if (!relocx)
1524 return TRUE;
1525 }
1526
1527 /* We convert only to R_X86_64_PC32:
1528 1. Branch.
1529 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1530 3. no_overflow is true.
1531 4. PIC.
1532 */
1533 to_reloc_pc32 = (opcode == 0xff
1534 || !relocx
1535 || no_overflow
1536 || is_pic);
1537
1538 /* Get the symbol referred to by the reloc. */
1539 if (h == NULL)
1540 {
1541 Elf_Internal_Sym *isym
1542 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1543
1544 /* Skip relocation against undefined symbols. */
1545 if (isym->st_shndx == SHN_UNDEF)
1546 return TRUE;
1547
1548 if (isym->st_shndx == SHN_ABS)
1549 tsec = bfd_abs_section_ptr;
1550 else if (isym->st_shndx == SHN_COMMON)
1551 tsec = bfd_com_section_ptr;
1552 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1553 tsec = &_bfd_elf_large_com_section;
1554 else
1555 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1556 }
1557 else
1558 {
1559 /* Undefined weak symbol is only bound locally in executable
1560 and its reference is resolved as 0 without relocation
1561 overflow. We can only perform this optimization for
1562 GOTPCRELX relocations since we need to modify REX byte.
1563 It is OK convert mov with R_X86_64_GOTPCREL to
1564 R_X86_64_PC32. */
1565 bfd_boolean local_ref;
1566 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1567
1568 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1569 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1570 if ((relocx || opcode == 0x8b)
1571 && (h->root.type == bfd_link_hash_undefweak
1572 && !eh->linker_def
1573 && local_ref))
1574 {
1575 if (opcode == 0xff)
1576 {
1577 /* Skip for branch instructions since R_X86_64_PC32
1578 may overflow. */
1579 if (no_overflow)
1580 return TRUE;
1581 }
1582 else if (relocx)
1583 {
1584 /* For non-branch instructions, we can convert to
1585 R_X86_64_32/R_X86_64_32S since we know if there
1586 is a REX byte. */
1587 to_reloc_pc32 = FALSE;
1588 }
1589
1590 /* Since we don't know the current PC when PIC is true,
1591 we can't convert to R_X86_64_PC32. */
1592 if (to_reloc_pc32 && is_pic)
1593 return TRUE;
1594
1595 goto convert;
1596 }
1597 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1598 ld.so may use its link-time address. */
1599 else if (h->start_stop
1600 || eh->linker_def
1601 || ((h->def_regular
1602 || h->root.type == bfd_link_hash_defined
1603 || h->root.type == bfd_link_hash_defweak)
1604 && h != htab->elf.hdynamic
1605 && local_ref))
1606 {
1607 /* bfd_link_hash_new or bfd_link_hash_undefined is
1608 set by an assignment in a linker script in
1609 bfd_elf_record_link_assignment. start_stop is set
1610 on __start_SECNAME/__stop_SECNAME which mark section
1611 SECNAME. */
1612 if (h->start_stop
1613 || eh->linker_def
1614 || (h->def_regular
1615 && (h->root.type == bfd_link_hash_new
1616 || h->root.type == bfd_link_hash_undefined
1617 || ((h->root.type == bfd_link_hash_defined
1618 || h->root.type == bfd_link_hash_defweak)
1619 && h->root.u.def.section == bfd_und_section_ptr))))
1620 {
1621 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1622 if (no_overflow)
1623 return TRUE;
1624 goto convert;
1625 }
1626 tsec = h->root.u.def.section;
1627 }
1628 else
1629 return TRUE;
1630 }
1631
1632 /* Don't convert GOTPCREL relocation against large section. */
1633 if (elf_section_data (tsec) != NULL
1634 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1635 return TRUE;
1636
1637 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1638 if (no_overflow)
1639 return TRUE;
1640
1641 convert:
1642 if (opcode == 0xff)
1643 {
1644 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1645 unsigned int nop;
1646 unsigned int disp;
1647 bfd_vma nop_offset;
1648
1649 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1650 R_X86_64_PC32. */
1651 modrm = bfd_get_8 (abfd, contents + roff - 1);
1652 if (modrm == 0x25)
1653 {
1654 /* Convert to "jmp foo nop". */
1655 modrm = 0xe9;
1656 nop = NOP_OPCODE;
1657 nop_offset = irel->r_offset + 3;
1658 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1659 irel->r_offset -= 1;
1660 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1661 }
1662 else
1663 {
1664 struct elf_x86_link_hash_entry *eh
1665 = (struct elf_x86_link_hash_entry *) h;
1666
1667 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1668 is a nop prefix. */
1669 modrm = 0xe8;
1670 /* To support TLS optimization, always use addr32 prefix for
1671 "call *__tls_get_addr@GOTPCREL(%rip)". */
1672 if (eh && eh->tls_get_addr)
1673 {
1674 nop = 0x67;
1675 nop_offset = irel->r_offset - 2;
1676 }
1677 else
1678 {
1679 nop = htab->params->call_nop_byte;
1680 if (htab->params->call_nop_as_suffix)
1681 {
1682 nop_offset = irel->r_offset + 3;
1683 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1684 irel->r_offset -= 1;
1685 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1686 }
1687 else
1688 nop_offset = irel->r_offset - 2;
1689 }
1690 }
1691 bfd_put_8 (abfd, nop, contents + nop_offset);
1692 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1693 r_type = R_X86_64_PC32;
1694 }
1695 else
1696 {
1697 unsigned int rex;
1698 unsigned int rex_mask = REX_R;
1699
1700 if (r_type == R_X86_64_REX_GOTPCRELX)
1701 rex = bfd_get_8 (abfd, contents + roff - 3);
1702 else
1703 rex = 0;
1704
1705 if (opcode == 0x8b)
1706 {
1707 if (to_reloc_pc32)
1708 {
1709 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1710 "lea foo(%rip), %reg". */
1711 opcode = 0x8d;
1712 r_type = R_X86_64_PC32;
1713 }
1714 else
1715 {
1716 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1717 "mov $foo, %reg". */
1718 opcode = 0xc7;
1719 modrm = bfd_get_8 (abfd, contents + roff - 1);
1720 modrm = 0xc0 | (modrm & 0x38) >> 3;
1721 if ((rex & REX_W) != 0
1722 && ABI_64_P (link_info->output_bfd))
1723 {
1724 /* Keep the REX_W bit in REX byte for LP64. */
1725 r_type = R_X86_64_32S;
1726 goto rewrite_modrm_rex;
1727 }
1728 else
1729 {
1730 /* If the REX_W bit in REX byte isn't needed,
1731 use R_X86_64_32 and clear the W bit to avoid
1732 sign-extend imm32 to imm64. */
1733 r_type = R_X86_64_32;
1734 /* Clear the W bit in REX byte. */
1735 rex_mask |= REX_W;
1736 goto rewrite_modrm_rex;
1737 }
1738 }
1739 }
1740 else
1741 {
1742 /* R_X86_64_PC32 isn't supported. */
1743 if (to_reloc_pc32)
1744 return TRUE;
1745
1746 modrm = bfd_get_8 (abfd, contents + roff - 1);
1747 if (opcode == 0x85)
1748 {
1749 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1750 "test $foo, %reg". */
1751 modrm = 0xc0 | (modrm & 0x38) >> 3;
1752 opcode = 0xf7;
1753 }
1754 else
1755 {
1756 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1757 "binop $foo, %reg". */
1758 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1759 opcode = 0x81;
1760 }
1761
1762 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1763 overflow when sign-extending imm32 to imm64. */
1764 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1765
1766 rewrite_modrm_rex:
1767 bfd_put_8 (abfd, modrm, contents + roff - 1);
1768
1769 if (rex)
1770 {
1771 /* Move the R bit to the B bit in REX byte. */
1772 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1773 bfd_put_8 (abfd, rex, contents + roff - 3);
1774 }
1775
1776 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1777 irel->r_addend = 0;
1778 }
1779
1780 bfd_put_8 (abfd, opcode, contents + roff - 2);
1781 }
1782
1783 *r_type_p = r_type;
1784 irel->r_info = htab->r_info (r_symndx,
1785 r_type | R_X86_64_converted_reloc_bit);
1786
1787 *converted = TRUE;
1788
1789 return TRUE;
1790 }
1791
1792 /* Look through the relocs for a section during the first phase, and
1793 calculate needed space in the global offset table, procedure
1794 linkage table, and dynamic reloc sections. */
1795
1796 static bfd_boolean
1797 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1798 asection *sec,
1799 const Elf_Internal_Rela *relocs)
1800 {
1801 struct elf_x86_link_hash_table *htab;
1802 Elf_Internal_Shdr *symtab_hdr;
1803 struct elf_link_hash_entry **sym_hashes;
1804 const Elf_Internal_Rela *rel;
1805 const Elf_Internal_Rela *rel_end;
1806 asection *sreloc;
1807 bfd_byte *contents;
1808 bfd_boolean converted;
1809
1810 if (bfd_link_relocatable (info))
1811 return TRUE;
1812
1813 /* Don't do anything special with non-loaded, non-alloced sections.
1814 In particular, any relocs in such sections should not affect GOT
1815 and PLT reference counting (ie. we don't allow them to create GOT
1816 or PLT entries), there's no possibility or desire to optimize TLS
1817 relocs, and there's not much point in propagating relocs to shared
1818 libs that the dynamic linker won't relocate. */
1819 if ((sec->flags & SEC_ALLOC) == 0)
1820 return TRUE;
1821
1822 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1823 if (htab == NULL)
1824 {
1825 sec->check_relocs_failed = 1;
1826 return FALSE;
1827 }
1828
1829 BFD_ASSERT (is_x86_elf (abfd, htab));
1830
1831 /* Get the section contents. */
1832 if (elf_section_data (sec)->this_hdr.contents != NULL)
1833 contents = elf_section_data (sec)->this_hdr.contents;
1834 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1835 {
1836 sec->check_relocs_failed = 1;
1837 return FALSE;
1838 }
1839
1840 symtab_hdr = &elf_symtab_hdr (abfd);
1841 sym_hashes = elf_sym_hashes (abfd);
1842
1843 converted = FALSE;
1844
1845 sreloc = NULL;
1846
1847 rel_end = relocs + sec->reloc_count;
1848 for (rel = relocs; rel < rel_end; rel++)
1849 {
1850 unsigned int r_type;
1851 unsigned int r_symndx;
1852 struct elf_link_hash_entry *h;
1853 struct elf_x86_link_hash_entry *eh;
1854 Elf_Internal_Sym *isym;
1855 const char *name;
1856 bfd_boolean size_reloc;
1857 bfd_boolean converted_reloc;
1858 bfd_boolean do_check_pic;
1859
1860 r_symndx = htab->r_sym (rel->r_info);
1861 r_type = ELF32_R_TYPE (rel->r_info);
1862
1863 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1864 {
1865 /* xgettext:c-format */
1866 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1867 abfd, r_symndx);
1868 goto error_return;
1869 }
1870
1871 if (r_symndx < symtab_hdr->sh_info)
1872 {
1873 /* A local symbol. */
1874 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1875 abfd, r_symndx);
1876 if (isym == NULL)
1877 goto error_return;
1878
1879 /* Check relocation against local STT_GNU_IFUNC symbol. */
1880 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1881 {
1882 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1883 TRUE);
1884 if (h == NULL)
1885 goto error_return;
1886
1887 /* Fake a STT_GNU_IFUNC symbol. */
1888 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1889 isym, NULL);
1890 h->type = STT_GNU_IFUNC;
1891 h->def_regular = 1;
1892 h->ref_regular = 1;
1893 h->forced_local = 1;
1894 h->root.type = bfd_link_hash_defined;
1895 }
1896 else
1897 h = NULL;
1898 }
1899 else
1900 {
1901 isym = NULL;
1902 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1903 while (h->root.type == bfd_link_hash_indirect
1904 || h->root.type == bfd_link_hash_warning)
1905 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1906 }
1907
1908 /* Check invalid x32 relocations. */
1909 if (!ABI_64_P (abfd))
1910 switch (r_type)
1911 {
1912 default:
1913 break;
1914
1915 case R_X86_64_DTPOFF64:
1916 case R_X86_64_TPOFF64:
1917 case R_X86_64_PC64:
1918 case R_X86_64_GOTOFF64:
1919 case R_X86_64_GOT64:
1920 case R_X86_64_GOTPCREL64:
1921 case R_X86_64_GOTPC64:
1922 case R_X86_64_GOTPLT64:
1923 case R_X86_64_PLTOFF64:
1924 {
1925 if (h)
1926 name = h->root.root.string;
1927 else
1928 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1929 NULL);
1930 _bfd_error_handler
1931 /* xgettext:c-format */
1932 (_("%pB: relocation %s against symbol `%s' isn't "
1933 "supported in x32 mode"), abfd,
1934 x86_64_elf_howto_table[r_type].name, name);
1935 bfd_set_error (bfd_error_bad_value);
1936 goto error_return;
1937 }
1938 break;
1939 }
1940
1941 if (h != NULL)
1942 {
1943 /* It is referenced by a non-shared object. */
1944 h->ref_regular = 1;
1945
1946 if (h->type == STT_GNU_IFUNC)
1947 elf_tdata (info->output_bfd)->has_gnu_symbols
1948 |= elf_gnu_symbol_ifunc;
1949 }
1950
1951 converted_reloc = FALSE;
1952 if ((r_type == R_X86_64_GOTPCREL
1953 || r_type == R_X86_64_GOTPCRELX
1954 || r_type == R_X86_64_REX_GOTPCRELX)
1955 && (h == NULL || h->type != STT_GNU_IFUNC))
1956 {
1957 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1958 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1959 irel, h, &converted_reloc,
1960 info))
1961 goto error_return;
1962
1963 if (converted_reloc)
1964 converted = TRUE;
1965 }
1966
1967 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1968 symtab_hdr, sym_hashes,
1969 &r_type, GOT_UNKNOWN,
1970 rel, rel_end, h, r_symndx, FALSE))
1971 goto error_return;
1972
1973 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1974 if (h == htab->elf.hgot)
1975 htab->got_referenced = TRUE;
1976
1977 eh = (struct elf_x86_link_hash_entry *) h;
1978 switch (r_type)
1979 {
1980 case R_X86_64_TLSLD:
1981 htab->tls_ld_or_ldm_got.refcount = 1;
1982 goto create_got;
1983
1984 case R_X86_64_TPOFF32:
1985 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1986 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1987 &x86_64_elf_howto_table[r_type]);
1988 if (eh != NULL)
1989 eh->zero_undefweak &= 0x2;
1990 break;
1991
1992 case R_X86_64_GOTTPOFF:
1993 if (!bfd_link_executable (info))
1994 info->flags |= DF_STATIC_TLS;
1995 /* Fall through */
1996
1997 case R_X86_64_GOT32:
1998 case R_X86_64_GOTPCREL:
1999 case R_X86_64_GOTPCRELX:
2000 case R_X86_64_REX_GOTPCRELX:
2001 case R_X86_64_TLSGD:
2002 case R_X86_64_GOT64:
2003 case R_X86_64_GOTPCREL64:
2004 case R_X86_64_GOTPLT64:
2005 case R_X86_64_GOTPC32_TLSDESC:
2006 case R_X86_64_TLSDESC_CALL:
2007 /* This symbol requires a global offset table entry. */
2008 {
2009 int tls_type, old_tls_type;
2010
2011 switch (r_type)
2012 {
2013 default: tls_type = GOT_NORMAL; break;
2014 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2015 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2016 case R_X86_64_GOTPC32_TLSDESC:
2017 case R_X86_64_TLSDESC_CALL:
2018 tls_type = GOT_TLS_GDESC; break;
2019 }
2020
2021 if (h != NULL)
2022 {
2023 h->got.refcount = 1;
2024 old_tls_type = eh->tls_type;
2025 }
2026 else
2027 {
2028 bfd_signed_vma *local_got_refcounts;
2029
2030 /* This is a global offset table entry for a local symbol. */
2031 local_got_refcounts = elf_local_got_refcounts (abfd);
2032 if (local_got_refcounts == NULL)
2033 {
2034 bfd_size_type size;
2035
2036 size = symtab_hdr->sh_info;
2037 size *= sizeof (bfd_signed_vma)
2038 + sizeof (bfd_vma) + sizeof (char);
2039 local_got_refcounts = ((bfd_signed_vma *)
2040 bfd_zalloc (abfd, size));
2041 if (local_got_refcounts == NULL)
2042 goto error_return;
2043 elf_local_got_refcounts (abfd) = local_got_refcounts;
2044 elf_x86_local_tlsdesc_gotent (abfd)
2045 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2046 elf_x86_local_got_tls_type (abfd)
2047 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2048 }
2049 local_got_refcounts[r_symndx] = 1;
2050 old_tls_type
2051 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2052 }
2053
2054 /* If a TLS symbol is accessed using IE at least once,
2055 there is no point to use dynamic model for it. */
2056 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2057 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2058 || tls_type != GOT_TLS_IE))
2059 {
2060 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2061 tls_type = old_tls_type;
2062 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2063 && GOT_TLS_GD_ANY_P (tls_type))
2064 tls_type |= old_tls_type;
2065 else
2066 {
2067 if (h)
2068 name = h->root.root.string;
2069 else
2070 name = bfd_elf_sym_name (abfd, symtab_hdr,
2071 isym, NULL);
2072 _bfd_error_handler
2073 /* xgettext:c-format */
2074 (_("%pB: '%s' accessed both as normal and"
2075 " thread local symbol"),
2076 abfd, name);
2077 bfd_set_error (bfd_error_bad_value);
2078 goto error_return;
2079 }
2080 }
2081
2082 if (old_tls_type != tls_type)
2083 {
2084 if (eh != NULL)
2085 eh->tls_type = tls_type;
2086 else
2087 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2088 }
2089 }
2090 /* Fall through */
2091
2092 case R_X86_64_GOTOFF64:
2093 case R_X86_64_GOTPC32:
2094 case R_X86_64_GOTPC64:
2095 create_got:
2096 if (eh != NULL)
2097 eh->zero_undefweak &= 0x2;
2098 break;
2099
2100 case R_X86_64_PLT32:
2101 case R_X86_64_PLT32_BND:
2102 /* This symbol requires a procedure linkage table entry. We
2103 actually build the entry in adjust_dynamic_symbol,
2104 because this might be a case of linking PIC code which is
2105 never referenced by a dynamic object, in which case we
2106 don't need to generate a procedure linkage table entry
2107 after all. */
2108
2109 /* If this is a local symbol, we resolve it directly without
2110 creating a procedure linkage table entry. */
2111 if (h == NULL)
2112 continue;
2113
2114 eh->zero_undefweak &= 0x2;
2115 h->needs_plt = 1;
2116 h->plt.refcount = 1;
2117 break;
2118
2119 case R_X86_64_PLTOFF64:
2120 /* This tries to form the 'address' of a function relative
2121 to GOT. For global symbols we need a PLT entry. */
2122 if (h != NULL)
2123 {
2124 h->needs_plt = 1;
2125 h->plt.refcount = 1;
2126 }
2127 goto create_got;
2128
2129 case R_X86_64_SIZE32:
2130 case R_X86_64_SIZE64:
2131 size_reloc = TRUE;
2132 goto do_size;
2133
2134 case R_X86_64_PC8:
2135 case R_X86_64_PC16:
2136 case R_X86_64_PC32:
2137 case R_X86_64_PC32_BND:
2138 do_check_pic = TRUE;
2139 goto check_pic;
2140
2141 case R_X86_64_32:
2142 if (!ABI_64_P (abfd))
2143 goto pointer;
2144 /* Fall through. */
2145 case R_X86_64_8:
2146 case R_X86_64_16:
2147 case R_X86_64_32S:
2148 /* Check relocation overflow as these relocs may lead to
2149 run-time relocation overflow. Don't error out for
2150 sections we don't care about, such as debug sections or
2151 when relocation overflow check is disabled. */
2152 if (!htab->params->no_reloc_overflow_check
2153 && !converted_reloc
2154 && (bfd_link_pic (info)
2155 || (bfd_link_executable (info)
2156 && h != NULL
2157 && !h->def_regular
2158 && h->def_dynamic
2159 && (sec->flags & SEC_READONLY) == 0)))
2160 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2161 &x86_64_elf_howto_table[r_type]);
2162 /* Fall through. */
2163
2164 case R_X86_64_PC64:
2165 case R_X86_64_64:
2166 pointer:
2167 do_check_pic = FALSE;
2168 check_pic:
2169 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2170 eh->zero_undefweak |= 0x2;
2171 /* We are called after all symbols have been resolved. Only
2172 relocation against STT_GNU_IFUNC symbol must go through
2173 PLT. */
2174 if (h != NULL
2175 && (bfd_link_executable (info)
2176 || h->type == STT_GNU_IFUNC))
2177 {
2178 bfd_boolean func_pointer_ref = FALSE;
2179
2180 if (r_type == R_X86_64_PC32)
2181 {
2182 /* Since something like ".long foo - ." may be used
2183 as pointer, make sure that PLT is used if foo is
2184 a function defined in a shared library. */
2185 if ((sec->flags & SEC_CODE) == 0)
2186 {
2187 h->pointer_equality_needed = 1;
2188 if (bfd_link_pie (info)
2189 && h->type == STT_FUNC
2190 && !h->def_regular
2191 && h->def_dynamic)
2192 {
2193 h->needs_plt = 1;
2194 h->plt.refcount = 1;
2195 }
2196 }
2197 }
2198 else if (r_type != R_X86_64_PC32_BND
2199 && r_type != R_X86_64_PC64)
2200 {
2201 h->pointer_equality_needed = 1;
2202 /* At run-time, R_X86_64_64 can be resolved for both
2203 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2204 can only be resolved for x32. */
2205 if ((sec->flags & SEC_READONLY) == 0
2206 && (r_type == R_X86_64_64
2207 || (!ABI_64_P (abfd)
2208 && (r_type == R_X86_64_32
2209 || r_type == R_X86_64_32S))))
2210 func_pointer_ref = TRUE;
2211 }
2212
2213 if (!func_pointer_ref)
2214 {
2215 /* If this reloc is in a read-only section, we might
2216 need a copy reloc. We can't check reliably at this
2217 stage whether the section is read-only, as input
2218 sections have not yet been mapped to output sections.
2219 Tentatively set the flag for now, and correct in
2220 adjust_dynamic_symbol. */
2221 h->non_got_ref = 1;
2222
2223 /* We may need a .plt entry if the symbol is a function
2224 defined in a shared lib or is a function referenced
2225 from the code or read-only section. */
2226 if (!h->def_regular
2227 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2228 h->plt.refcount = 1;
2229 }
2230 }
2231
2232 if (do_check_pic)
2233 {
2234 /* Don't complain about -fPIC if the symbol is undefined
2235 when building executable unless it is unresolved weak
2236 symbol, references a dynamic definition in PIE or
2237 -z nocopyreloc is used. */
2238 bfd_boolean no_copyreloc_p
2239 = (info->nocopyreloc
2240 || (h != NULL
2241 && !h->root.linker_def
2242 && !h->root.ldscript_def
2243 && eh->def_protected
2244 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
2245 if ((sec->flags & SEC_ALLOC) != 0
2246 && (sec->flags & SEC_READONLY) != 0
2247 && h != NULL
2248 && ((bfd_link_executable (info)
2249 && ((h->root.type == bfd_link_hash_undefweak
2250 && (eh == NULL
2251 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
2252 eh)))
2253 || (bfd_link_pie (info)
2254 && !SYMBOL_DEFINED_NON_SHARED_P (h)
2255 && h->def_dynamic)
2256 || (no_copyreloc_p
2257 && h->def_dynamic
2258 && !(h->root.u.def.section->flags & SEC_CODE))))
2259 || bfd_link_dll (info)))
2260 {
2261 bfd_boolean fail = FALSE;
2262 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
2263 {
2264 /* Symbol is referenced locally. Make sure it is
2265 defined locally. */
2266 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
2267 }
2268 else if (bfd_link_pie (info))
2269 {
2270 /* We can only use PC-relative relocations in PIE
2271 from non-code sections. */
2272 if (h->type == STT_FUNC
2273 && (sec->flags & SEC_CODE) != 0)
2274 fail = TRUE;
2275 }
2276 else if (no_copyreloc_p || bfd_link_dll (info))
2277 {
2278 /* Symbol doesn't need copy reloc and isn't
2279 referenced locally. Don't allow PC-relative
2280 relocations against default and protected
2281 symbols since address of protected function
2282 and location of protected data may not be in
2283 the shared object. */
2284 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2285 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
2286 }
2287
2288 if (fail)
2289 return elf_x86_64_need_pic (info, abfd, sec, h,
2290 symtab_hdr, isym,
2291 &x86_64_elf_howto_table[r_type]);
2292 }
2293 }
2294
2295 size_reloc = FALSE;
2296 do_size:
2297 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2298 htab->pointer_r_type))
2299 {
2300 struct elf_dyn_relocs *p;
2301 struct elf_dyn_relocs **head;
2302
2303 /* We must copy these reloc types into the output file.
2304 Create a reloc section in dynobj and make room for
2305 this reloc. */
2306 if (sreloc == NULL)
2307 {
2308 sreloc = _bfd_elf_make_dynamic_reloc_section
2309 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2310 abfd, /*rela?*/ TRUE);
2311
2312 if (sreloc == NULL)
2313 goto error_return;
2314 }
2315
2316 /* If this is a global symbol, we count the number of
2317 relocations we need for this symbol. */
2318 if (h != NULL)
2319 head = &eh->dyn_relocs;
2320 else
2321 {
2322 /* Track dynamic relocs needed for local syms too.
2323 We really need local syms available to do this
2324 easily. Oh well. */
2325 asection *s;
2326 void **vpp;
2327
2328 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2329 abfd, r_symndx);
2330 if (isym == NULL)
2331 goto error_return;
2332
2333 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2334 if (s == NULL)
2335 s = sec;
2336
2337 /* Beware of type punned pointers vs strict aliasing
2338 rules. */
2339 vpp = &(elf_section_data (s)->local_dynrel);
2340 head = (struct elf_dyn_relocs **)vpp;
2341 }
2342
2343 p = *head;
2344 if (p == NULL || p->sec != sec)
2345 {
2346 bfd_size_type amt = sizeof *p;
2347
2348 p = ((struct elf_dyn_relocs *)
2349 bfd_alloc (htab->elf.dynobj, amt));
2350 if (p == NULL)
2351 goto error_return;
2352 p->next = *head;
2353 *head = p;
2354 p->sec = sec;
2355 p->count = 0;
2356 p->pc_count = 0;
2357 }
2358
2359 p->count += 1;
2360 /* Count size relocation as PC-relative relocation. */
2361 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2362 p->pc_count += 1;
2363 }
2364 break;
2365
2366 /* This relocation describes the C++ object vtable hierarchy.
2367 Reconstruct it for later use during GC. */
2368 case R_X86_64_GNU_VTINHERIT:
2369 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2370 goto error_return;
2371 break;
2372
2373 /* This relocation describes which C++ vtable entries are actually
2374 used. Record for later use during GC. */
2375 case R_X86_64_GNU_VTENTRY:
2376 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2377 goto error_return;
2378 break;
2379
2380 default:
2381 break;
2382 }
2383 }
2384
2385 if (elf_section_data (sec)->this_hdr.contents != contents)
2386 {
2387 if (!converted && !info->keep_memory)
2388 free (contents);
2389 else
2390 {
2391 /* Cache the section contents for elf_link_input_bfd if any
2392 load is converted or --no-keep-memory isn't used. */
2393 elf_section_data (sec)->this_hdr.contents = contents;
2394 }
2395 }
2396
2397 /* Cache relocations if any load is converted. */
2398 if (elf_section_data (sec)->relocs != relocs && converted)
2399 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2400
2401 return TRUE;
2402
2403 error_return:
2404 if (elf_section_data (sec)->this_hdr.contents != contents)
2405 free (contents);
2406 sec->check_relocs_failed = 1;
2407 return FALSE;
2408 }
2409
2410 /* Return the relocation value for @tpoff relocation
2411 if STT_TLS virtual address is ADDRESS. */
2412
2413 static bfd_vma
2414 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2415 {
2416 struct elf_link_hash_table *htab = elf_hash_table (info);
2417 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2418 bfd_vma static_tls_size;
2419
2420 /* If tls_segment is NULL, we should have signalled an error already. */
2421 if (htab->tls_sec == NULL)
2422 return 0;
2423
2424 /* Consider special static TLS alignment requirements. */
2425 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2426 return address - static_tls_size - htab->tls_sec->vma;
2427 }
2428
2429 /* Relocate an x86_64 ELF section. */
2430
2431 static bfd_boolean
2432 elf_x86_64_relocate_section (bfd *output_bfd,
2433 struct bfd_link_info *info,
2434 bfd *input_bfd,
2435 asection *input_section,
2436 bfd_byte *contents,
2437 Elf_Internal_Rela *relocs,
2438 Elf_Internal_Sym *local_syms,
2439 asection **local_sections)
2440 {
2441 struct elf_x86_link_hash_table *htab;
2442 Elf_Internal_Shdr *symtab_hdr;
2443 struct elf_link_hash_entry **sym_hashes;
2444 bfd_vma *local_got_offsets;
2445 bfd_vma *local_tlsdesc_gotents;
2446 Elf_Internal_Rela *rel;
2447 Elf_Internal_Rela *wrel;
2448 Elf_Internal_Rela *relend;
2449 unsigned int plt_entry_size;
2450
2451 /* Skip if check_relocs failed. */
2452 if (input_section->check_relocs_failed)
2453 return FALSE;
2454
2455 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2456 if (htab == NULL)
2457 return FALSE;
2458
2459 if (!is_x86_elf (input_bfd, htab))
2460 {
2461 bfd_set_error (bfd_error_wrong_format);
2462 return FALSE;
2463 }
2464
2465 plt_entry_size = htab->plt.plt_entry_size;
2466 symtab_hdr = &elf_symtab_hdr (input_bfd);
2467 sym_hashes = elf_sym_hashes (input_bfd);
2468 local_got_offsets = elf_local_got_offsets (input_bfd);
2469 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2470
2471 _bfd_x86_elf_set_tls_module_base (info);
2472
2473 rel = wrel = relocs;
2474 relend = relocs + input_section->reloc_count;
2475 for (; rel < relend; wrel++, rel++)
2476 {
2477 unsigned int r_type, r_type_tls;
2478 reloc_howto_type *howto;
2479 unsigned long r_symndx;
2480 struct elf_link_hash_entry *h;
2481 struct elf_x86_link_hash_entry *eh;
2482 Elf_Internal_Sym *sym;
2483 asection *sec;
2484 bfd_vma off, offplt, plt_offset;
2485 bfd_vma relocation;
2486 bfd_boolean unresolved_reloc;
2487 bfd_reloc_status_type r;
2488 int tls_type;
2489 asection *base_got, *resolved_plt;
2490 bfd_vma st_size;
2491 bfd_boolean resolved_to_zero;
2492 bfd_boolean relative_reloc;
2493 bfd_boolean converted_reloc;
2494 bfd_boolean need_copy_reloc_in_pie;
2495
2496 r_type = ELF32_R_TYPE (rel->r_info);
2497 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2498 || r_type == (int) R_X86_64_GNU_VTENTRY)
2499 {
2500 if (wrel != rel)
2501 *wrel = *rel;
2502 continue;
2503 }
2504
2505 r_symndx = htab->r_sym (rel->r_info);
2506 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2507 if (converted_reloc)
2508 {
2509 r_type &= ~R_X86_64_converted_reloc_bit;
2510 rel->r_info = htab->r_info (r_symndx, r_type);
2511 }
2512
2513 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2514 if (howto == NULL)
2515 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2516
2517 h = NULL;
2518 sym = NULL;
2519 sec = NULL;
2520 unresolved_reloc = FALSE;
2521 if (r_symndx < symtab_hdr->sh_info)
2522 {
2523 sym = local_syms + r_symndx;
2524 sec = local_sections[r_symndx];
2525
2526 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2527 &sec, rel);
2528 st_size = sym->st_size;
2529
2530 /* Relocate against local STT_GNU_IFUNC symbol. */
2531 if (!bfd_link_relocatable (info)
2532 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2533 {
2534 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2535 rel, FALSE);
2536 if (h == NULL)
2537 abort ();
2538
2539 /* Set STT_GNU_IFUNC symbol value. */
2540 h->root.u.def.value = sym->st_value;
2541 h->root.u.def.section = sec;
2542 }
2543 }
2544 else
2545 {
2546 bfd_boolean warned ATTRIBUTE_UNUSED;
2547 bfd_boolean ignored ATTRIBUTE_UNUSED;
2548
2549 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2550 r_symndx, symtab_hdr, sym_hashes,
2551 h, sec, relocation,
2552 unresolved_reloc, warned, ignored);
2553 st_size = h->size;
2554 }
2555
2556 if (sec != NULL && discarded_section (sec))
2557 {
2558 _bfd_clear_contents (howto, input_bfd, input_section,
2559 contents, rel->r_offset);
2560 wrel->r_offset = rel->r_offset;
2561 wrel->r_info = 0;
2562 wrel->r_addend = 0;
2563
2564 /* For ld -r, remove relocations in debug sections against
2565 sections defined in discarded sections. Not done for
2566 eh_frame editing code expects to be present. */
2567 if (bfd_link_relocatable (info)
2568 && (input_section->flags & SEC_DEBUGGING))
2569 wrel--;
2570
2571 continue;
2572 }
2573
2574 if (bfd_link_relocatable (info))
2575 {
2576 if (wrel != rel)
2577 *wrel = *rel;
2578 continue;
2579 }
2580
2581 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2582 {
2583 if (r_type == R_X86_64_64)
2584 {
2585 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2586 zero-extend it to 64bit if addend is zero. */
2587 r_type = R_X86_64_32;
2588 memset (contents + rel->r_offset + 4, 0, 4);
2589 }
2590 else if (r_type == R_X86_64_SIZE64)
2591 {
2592 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2593 zero-extend it to 64bit if addend is zero. */
2594 r_type = R_X86_64_SIZE32;
2595 memset (contents + rel->r_offset + 4, 0, 4);
2596 }
2597 }
2598
2599 eh = (struct elf_x86_link_hash_entry *) h;
2600
2601 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2602 it here if it is defined in a non-shared object. */
2603 if (h != NULL
2604 && h->type == STT_GNU_IFUNC
2605 && h->def_regular)
2606 {
2607 bfd_vma plt_index;
2608 const char *name;
2609
2610 if ((input_section->flags & SEC_ALLOC) == 0)
2611 {
2612 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2613 STT_GNU_IFUNC symbol as STT_FUNC. */
2614 if (elf_section_type (input_section) == SHT_NOTE)
2615 goto skip_ifunc;
2616 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2617 sections because such sections are not SEC_ALLOC and
2618 thus ld.so will not process them. */
2619 if ((input_section->flags & SEC_DEBUGGING) != 0)
2620 continue;
2621 abort ();
2622 }
2623
2624 switch (r_type)
2625 {
2626 default:
2627 break;
2628
2629 case R_X86_64_GOTPCREL:
2630 case R_X86_64_GOTPCRELX:
2631 case R_X86_64_REX_GOTPCRELX:
2632 case R_X86_64_GOTPCREL64:
2633 base_got = htab->elf.sgot;
2634 off = h->got.offset;
2635
2636 if (base_got == NULL)
2637 abort ();
2638
2639 if (off == (bfd_vma) -1)
2640 {
2641 /* We can't use h->got.offset here to save state, or
2642 even just remember the offset, as finish_dynamic_symbol
2643 would use that as offset into .got. */
2644
2645 if (h->plt.offset == (bfd_vma) -1)
2646 abort ();
2647
2648 if (htab->elf.splt != NULL)
2649 {
2650 plt_index = (h->plt.offset / plt_entry_size
2651 - htab->plt.has_plt0);
2652 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2653 base_got = htab->elf.sgotplt;
2654 }
2655 else
2656 {
2657 plt_index = h->plt.offset / plt_entry_size;
2658 off = plt_index * GOT_ENTRY_SIZE;
2659 base_got = htab->elf.igotplt;
2660 }
2661
2662 if (h->dynindx == -1
2663 || h->forced_local
2664 || info->symbolic)
2665 {
2666 /* This references the local defitionion. We must
2667 initialize this entry in the global offset table.
2668 Since the offset must always be a multiple of 8,
2669 we use the least significant bit to record
2670 whether we have initialized it already.
2671
2672 When doing a dynamic link, we create a .rela.got
2673 relocation entry to initialize the value. This
2674 is done in the finish_dynamic_symbol routine. */
2675 if ((off & 1) != 0)
2676 off &= ~1;
2677 else
2678 {
2679 bfd_put_64 (output_bfd, relocation,
2680 base_got->contents + off);
2681 /* Note that this is harmless for the GOTPLT64
2682 case, as -1 | 1 still is -1. */
2683 h->got.offset |= 1;
2684 }
2685 }
2686 }
2687
2688 relocation = (base_got->output_section->vma
2689 + base_got->output_offset + off);
2690
2691 goto do_relocation;
2692 }
2693
2694 if (h->plt.offset == (bfd_vma) -1)
2695 {
2696 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2697 if (r_type == htab->pointer_r_type
2698 && (input_section->flags & SEC_CODE) == 0)
2699 goto do_ifunc_pointer;
2700 goto bad_ifunc_reloc;
2701 }
2702
2703 /* STT_GNU_IFUNC symbol must go through PLT. */
2704 if (htab->elf.splt != NULL)
2705 {
2706 if (htab->plt_second != NULL)
2707 {
2708 resolved_plt = htab->plt_second;
2709 plt_offset = eh->plt_second.offset;
2710 }
2711 else
2712 {
2713 resolved_plt = htab->elf.splt;
2714 plt_offset = h->plt.offset;
2715 }
2716 }
2717 else
2718 {
2719 resolved_plt = htab->elf.iplt;
2720 plt_offset = h->plt.offset;
2721 }
2722
2723 relocation = (resolved_plt->output_section->vma
2724 + resolved_plt->output_offset + plt_offset);
2725
2726 switch (r_type)
2727 {
2728 default:
2729 bad_ifunc_reloc:
2730 if (h->root.root.string)
2731 name = h->root.root.string;
2732 else
2733 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2734 NULL);
2735 _bfd_error_handler
2736 /* xgettext:c-format */
2737 (_("%pB: relocation %s against STT_GNU_IFUNC "
2738 "symbol `%s' isn't supported"), input_bfd,
2739 howto->name, name);
2740 bfd_set_error (bfd_error_bad_value);
2741 return FALSE;
2742
2743 case R_X86_64_32S:
2744 if (bfd_link_pic (info))
2745 abort ();
2746 goto do_relocation;
2747
2748 case R_X86_64_32:
2749 if (ABI_64_P (output_bfd))
2750 goto do_relocation;
2751 /* FALLTHROUGH */
2752 case R_X86_64_64:
2753 do_ifunc_pointer:
2754 if (rel->r_addend != 0)
2755 {
2756 if (h->root.root.string)
2757 name = h->root.root.string;
2758 else
2759 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2760 sym, NULL);
2761 _bfd_error_handler
2762 /* xgettext:c-format */
2763 (_("%pB: relocation %s against STT_GNU_IFUNC "
2764 "symbol `%s' has non-zero addend: %" PRId64),
2765 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2766 bfd_set_error (bfd_error_bad_value);
2767 return FALSE;
2768 }
2769
2770 /* Generate dynamic relcoation only when there is a
2771 non-GOT reference in a shared object or there is no
2772 PLT. */
2773 if ((bfd_link_pic (info) && h->non_got_ref)
2774 || h->plt.offset == (bfd_vma) -1)
2775 {
2776 Elf_Internal_Rela outrel;
2777 asection *sreloc;
2778
2779 /* Need a dynamic relocation to get the real function
2780 address. */
2781 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2782 info,
2783 input_section,
2784 rel->r_offset);
2785 if (outrel.r_offset == (bfd_vma) -1
2786 || outrel.r_offset == (bfd_vma) -2)
2787 abort ();
2788
2789 outrel.r_offset += (input_section->output_section->vma
2790 + input_section->output_offset);
2791
2792 if (POINTER_LOCAL_IFUNC_P (info, h))
2793 {
2794 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2795 h->root.root.string,
2796 h->root.u.def.section->owner);
2797
2798 /* This symbol is resolved locally. */
2799 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2800 outrel.r_addend = (h->root.u.def.value
2801 + h->root.u.def.section->output_section->vma
2802 + h->root.u.def.section->output_offset);
2803 }
2804 else
2805 {
2806 outrel.r_info = htab->r_info (h->dynindx, r_type);
2807 outrel.r_addend = 0;
2808 }
2809
2810 /* Dynamic relocations are stored in
2811 1. .rela.ifunc section in PIC object.
2812 2. .rela.got section in dynamic executable.
2813 3. .rela.iplt section in static executable. */
2814 if (bfd_link_pic (info))
2815 sreloc = htab->elf.irelifunc;
2816 else if (htab->elf.splt != NULL)
2817 sreloc = htab->elf.srelgot;
2818 else
2819 sreloc = htab->elf.irelplt;
2820 elf_append_rela (output_bfd, sreloc, &outrel);
2821
2822 /* If this reloc is against an external symbol, we
2823 do not want to fiddle with the addend. Otherwise,
2824 we need to include the symbol value so that it
2825 becomes an addend for the dynamic reloc. For an
2826 internal symbol, we have updated addend. */
2827 continue;
2828 }
2829 /* FALLTHROUGH */
2830 case R_X86_64_PC32:
2831 case R_X86_64_PC32_BND:
2832 case R_X86_64_PC64:
2833 case R_X86_64_PLT32:
2834 case R_X86_64_PLT32_BND:
2835 goto do_relocation;
2836 }
2837 }
2838
2839 skip_ifunc:
2840 resolved_to_zero = (eh != NULL
2841 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2842
2843 /* When generating a shared object, the relocations handled here are
2844 copied into the output file to be resolved at run time. */
2845 switch (r_type)
2846 {
2847 case R_X86_64_GOT32:
2848 case R_X86_64_GOT64:
2849 /* Relocation is to the entry for this symbol in the global
2850 offset table. */
2851 case R_X86_64_GOTPCREL:
2852 case R_X86_64_GOTPCRELX:
2853 case R_X86_64_REX_GOTPCRELX:
2854 case R_X86_64_GOTPCREL64:
2855 /* Use global offset table entry as symbol value. */
2856 case R_X86_64_GOTPLT64:
2857 /* This is obsolete and treated the same as GOT64. */
2858 base_got = htab->elf.sgot;
2859
2860 if (htab->elf.sgot == NULL)
2861 abort ();
2862
2863 relative_reloc = FALSE;
2864 if (h != NULL)
2865 {
2866 off = h->got.offset;
2867 if (h->needs_plt
2868 && h->plt.offset != (bfd_vma)-1
2869 && off == (bfd_vma)-1)
2870 {
2871 /* We can't use h->got.offset here to save
2872 state, or even just remember the offset, as
2873 finish_dynamic_symbol would use that as offset into
2874 .got. */
2875 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2876 - htab->plt.has_plt0);
2877 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2878 base_got = htab->elf.sgotplt;
2879 }
2880
2881 if (RESOLVED_LOCALLY_P (info, h, htab))
2882 {
2883 /* We must initialize this entry in the global offset
2884 table. Since the offset must always be a multiple
2885 of 8, we use the least significant bit to record
2886 whether we have initialized it already.
2887
2888 When doing a dynamic link, we create a .rela.got
2889 relocation entry to initialize the value. This is
2890 done in the finish_dynamic_symbol routine. */
2891 if ((off & 1) != 0)
2892 off &= ~1;
2893 else
2894 {
2895 bfd_put_64 (output_bfd, relocation,
2896 base_got->contents + off);
2897 /* Note that this is harmless for the GOTPLT64 case,
2898 as -1 | 1 still is -1. */
2899 h->got.offset |= 1;
2900
2901 if (GENERATE_RELATIVE_RELOC_P (info, h))
2902 {
2903 /* If this symbol isn't dynamic in PIC,
2904 generate R_X86_64_RELATIVE here. */
2905 eh->no_finish_dynamic_symbol = 1;
2906 relative_reloc = TRUE;
2907 }
2908 }
2909 }
2910 else
2911 unresolved_reloc = FALSE;
2912 }
2913 else
2914 {
2915 if (local_got_offsets == NULL)
2916 abort ();
2917
2918 off = local_got_offsets[r_symndx];
2919
2920 /* The offset must always be a multiple of 8. We use
2921 the least significant bit to record whether we have
2922 already generated the necessary reloc. */
2923 if ((off & 1) != 0)
2924 off &= ~1;
2925 else
2926 {
2927 bfd_put_64 (output_bfd, relocation,
2928 base_got->contents + off);
2929 local_got_offsets[r_symndx] |= 1;
2930
2931 if (bfd_link_pic (info))
2932 relative_reloc = TRUE;
2933 }
2934 }
2935
2936 if (relative_reloc)
2937 {
2938 asection *s;
2939 Elf_Internal_Rela outrel;
2940
2941 /* We need to generate a R_X86_64_RELATIVE reloc
2942 for the dynamic linker. */
2943 s = htab->elf.srelgot;
2944 if (s == NULL)
2945 abort ();
2946
2947 outrel.r_offset = (base_got->output_section->vma
2948 + base_got->output_offset
2949 + off);
2950 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2951 outrel.r_addend = relocation;
2952 elf_append_rela (output_bfd, s, &outrel);
2953 }
2954
2955 if (off >= (bfd_vma) -2)
2956 abort ();
2957
2958 relocation = base_got->output_section->vma
2959 + base_got->output_offset + off;
2960 if (r_type != R_X86_64_GOTPCREL
2961 && r_type != R_X86_64_GOTPCRELX
2962 && r_type != R_X86_64_REX_GOTPCRELX
2963 && r_type != R_X86_64_GOTPCREL64)
2964 relocation -= htab->elf.sgotplt->output_section->vma
2965 - htab->elf.sgotplt->output_offset;
2966
2967 break;
2968
2969 case R_X86_64_GOTOFF64:
2970 /* Relocation is relative to the start of the global offset
2971 table. */
2972
2973 /* Check to make sure it isn't a protected function or data
2974 symbol for shared library since it may not be local when
2975 used as function address or with copy relocation. We also
2976 need to make sure that a symbol is referenced locally. */
2977 if (bfd_link_pic (info) && h)
2978 {
2979 if (!h->def_regular)
2980 {
2981 const char *v;
2982
2983 switch (ELF_ST_VISIBILITY (h->other))
2984 {
2985 case STV_HIDDEN:
2986 v = _("hidden symbol");
2987 break;
2988 case STV_INTERNAL:
2989 v = _("internal symbol");
2990 break;
2991 case STV_PROTECTED:
2992 v = _("protected symbol");
2993 break;
2994 default:
2995 v = _("symbol");
2996 break;
2997 }
2998
2999 _bfd_error_handler
3000 /* xgettext:c-format */
3001 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3002 " `%s' can not be used when making a shared object"),
3003 input_bfd, v, h->root.root.string);
3004 bfd_set_error (bfd_error_bad_value);
3005 return FALSE;
3006 }
3007 else if (!bfd_link_executable (info)
3008 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3009 && (h->type == STT_FUNC
3010 || h->type == STT_OBJECT)
3011 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3012 {
3013 _bfd_error_handler
3014 /* xgettext:c-format */
3015 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3016 " `%s' can not be used when making a shared object"),
3017 input_bfd,
3018 h->type == STT_FUNC ? "function" : "data",
3019 h->root.root.string);
3020 bfd_set_error (bfd_error_bad_value);
3021 return FALSE;
3022 }
3023 }
3024
3025 /* Note that sgot is not involved in this
3026 calculation. We always want the start of .got.plt. If we
3027 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3028 permitted by the ABI, we might have to change this
3029 calculation. */
3030 relocation -= htab->elf.sgotplt->output_section->vma
3031 + htab->elf.sgotplt->output_offset;
3032 break;
3033
3034 case R_X86_64_GOTPC32:
3035 case R_X86_64_GOTPC64:
3036 /* Use global offset table as symbol value. */
3037 relocation = htab->elf.sgotplt->output_section->vma
3038 + htab->elf.sgotplt->output_offset;
3039 unresolved_reloc = FALSE;
3040 break;
3041
3042 case R_X86_64_PLTOFF64:
3043 /* Relocation is PLT entry relative to GOT. For local
3044 symbols it's the symbol itself relative to GOT. */
3045 if (h != NULL
3046 /* See PLT32 handling. */
3047 && (h->plt.offset != (bfd_vma) -1
3048 || eh->plt_got.offset != (bfd_vma) -1)
3049 && htab->elf.splt != NULL)
3050 {
3051 if (eh->plt_got.offset != (bfd_vma) -1)
3052 {
3053 /* Use the GOT PLT. */
3054 resolved_plt = htab->plt_got;
3055 plt_offset = eh->plt_got.offset;
3056 }
3057 else if (htab->plt_second != NULL)
3058 {
3059 resolved_plt = htab->plt_second;
3060 plt_offset = eh->plt_second.offset;
3061 }
3062 else
3063 {
3064 resolved_plt = htab->elf.splt;
3065 plt_offset = h->plt.offset;
3066 }
3067
3068 relocation = (resolved_plt->output_section->vma
3069 + resolved_plt->output_offset
3070 + plt_offset);
3071 unresolved_reloc = FALSE;
3072 }
3073
3074 relocation -= htab->elf.sgotplt->output_section->vma
3075 + htab->elf.sgotplt->output_offset;
3076 break;
3077
3078 case R_X86_64_PLT32:
3079 case R_X86_64_PLT32_BND:
3080 /* Relocation is to the entry for this symbol in the
3081 procedure linkage table. */
3082
3083 /* Resolve a PLT32 reloc against a local symbol directly,
3084 without using the procedure linkage table. */
3085 if (h == NULL)
3086 break;
3087
3088 if ((h->plt.offset == (bfd_vma) -1
3089 && eh->plt_got.offset == (bfd_vma) -1)
3090 || htab->elf.splt == NULL)
3091 {
3092 /* We didn't make a PLT entry for this symbol. This
3093 happens when statically linking PIC code, or when
3094 using -Bsymbolic. */
3095 break;
3096 }
3097
3098 use_plt:
3099 if (h->plt.offset != (bfd_vma) -1)
3100 {
3101 if (htab->plt_second != NULL)
3102 {
3103 resolved_plt = htab->plt_second;
3104 plt_offset = eh->plt_second.offset;
3105 }
3106 else
3107 {
3108 resolved_plt = htab->elf.splt;
3109 plt_offset = h->plt.offset;
3110 }
3111 }
3112 else
3113 {
3114 /* Use the GOT PLT. */
3115 resolved_plt = htab->plt_got;
3116 plt_offset = eh->plt_got.offset;
3117 }
3118
3119 relocation = (resolved_plt->output_section->vma
3120 + resolved_plt->output_offset
3121 + plt_offset);
3122 unresolved_reloc = FALSE;
3123 break;
3124
3125 case R_X86_64_SIZE32:
3126 case R_X86_64_SIZE64:
3127 /* Set to symbol size. */
3128 relocation = st_size;
3129 goto direct;
3130
3131 case R_X86_64_PC8:
3132 case R_X86_64_PC16:
3133 case R_X86_64_PC32:
3134 case R_X86_64_PC32_BND:
3135 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3136 as function address. */
3137 if (h != NULL
3138 && (input_section->flags & SEC_CODE) == 0
3139 && bfd_link_pie (info)
3140 && h->type == STT_FUNC
3141 && !h->def_regular
3142 && h->def_dynamic)
3143 goto use_plt;
3144 /* Fall through. */
3145
3146 case R_X86_64_8:
3147 case R_X86_64_16:
3148 case R_X86_64_32:
3149 case R_X86_64_PC64:
3150 case R_X86_64_64:
3151 /* FIXME: The ABI says the linker should make sure the value is
3152 the same when it's zeroextended to 64 bit. */
3153
3154 direct:
3155 if ((input_section->flags & SEC_ALLOC) == 0)
3156 break;
3157
3158 need_copy_reloc_in_pie = (bfd_link_pie (info)
3159 && h != NULL
3160 && (h->needs_copy
3161 || eh->needs_copy
3162 || (h->root.type
3163 == bfd_link_hash_undefined))
3164 && (X86_PCREL_TYPE_P (r_type)
3165 || X86_SIZE_TYPE_P (r_type)));
3166
3167 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3168 need_copy_reloc_in_pie,
3169 resolved_to_zero, FALSE))
3170 {
3171 Elf_Internal_Rela outrel;
3172 bfd_boolean skip, relocate;
3173 asection *sreloc;
3174
3175 /* When generating a shared object, these relocations
3176 are copied into the output file to be resolved at run
3177 time. */
3178 skip = FALSE;
3179 relocate = FALSE;
3180
3181 outrel.r_offset =
3182 _bfd_elf_section_offset (output_bfd, info, input_section,
3183 rel->r_offset);
3184 if (outrel.r_offset == (bfd_vma) -1)
3185 skip = TRUE;
3186 else if (outrel.r_offset == (bfd_vma) -2)
3187 skip = TRUE, relocate = TRUE;
3188
3189 outrel.r_offset += (input_section->output_section->vma
3190 + input_section->output_offset);
3191
3192 if (skip)
3193 memset (&outrel, 0, sizeof outrel);
3194
3195 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3196 {
3197 outrel.r_info = htab->r_info (h->dynindx, r_type);
3198 outrel.r_addend = rel->r_addend;
3199 }
3200 else
3201 {
3202 /* This symbol is local, or marked to become local.
3203 When relocation overflow check is disabled, we
3204 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3205 if (r_type == htab->pointer_r_type
3206 || (r_type == R_X86_64_32
3207 && htab->params->no_reloc_overflow_check))
3208 {
3209 relocate = TRUE;
3210 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3211 outrel.r_addend = relocation + rel->r_addend;
3212 }
3213 else if (r_type == R_X86_64_64
3214 && !ABI_64_P (output_bfd))
3215 {
3216 relocate = TRUE;
3217 outrel.r_info = htab->r_info (0,
3218 R_X86_64_RELATIVE64);
3219 outrel.r_addend = relocation + rel->r_addend;
3220 /* Check addend overflow. */
3221 if ((outrel.r_addend & 0x80000000)
3222 != (rel->r_addend & 0x80000000))
3223 {
3224 const char *name;
3225 int addend = rel->r_addend;
3226 if (h && h->root.root.string)
3227 name = h->root.root.string;
3228 else
3229 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3230 sym, NULL);
3231 _bfd_error_handler
3232 /* xgettext:c-format */
3233 (_("%pB: addend %s%#x in relocation %s against "
3234 "symbol `%s' at %#" PRIx64
3235 " in section `%pA' is out of range"),
3236 input_bfd, addend < 0 ? "-" : "", addend,
3237 howto->name, name, (uint64_t) rel->r_offset,
3238 input_section);
3239 bfd_set_error (bfd_error_bad_value);
3240 return FALSE;
3241 }
3242 }
3243 else
3244 {
3245 long sindx;
3246
3247 if (bfd_is_abs_section (sec))
3248 sindx = 0;
3249 else if (sec == NULL || sec->owner == NULL)
3250 {
3251 bfd_set_error (bfd_error_bad_value);
3252 return FALSE;
3253 }
3254 else
3255 {
3256 asection *osec;
3257
3258 /* We are turning this relocation into one
3259 against a section symbol. It would be
3260 proper to subtract the symbol's value,
3261 osec->vma, from the emitted reloc addend,
3262 but ld.so expects buggy relocs. */
3263 osec = sec->output_section;
3264 sindx = elf_section_data (osec)->dynindx;
3265 if (sindx == 0)
3266 {
3267 asection *oi = htab->elf.text_index_section;
3268 sindx = elf_section_data (oi)->dynindx;
3269 }
3270 BFD_ASSERT (sindx != 0);
3271 }
3272
3273 outrel.r_info = htab->r_info (sindx, r_type);
3274 outrel.r_addend = relocation + rel->r_addend;
3275 }
3276 }
3277
3278 sreloc = elf_section_data (input_section)->sreloc;
3279
3280 if (sreloc == NULL || sreloc->contents == NULL)
3281 {
3282 r = bfd_reloc_notsupported;
3283 goto check_relocation_error;
3284 }
3285
3286 elf_append_rela (output_bfd, sreloc, &outrel);
3287
3288 /* If this reloc is against an external symbol, we do
3289 not want to fiddle with the addend. Otherwise, we
3290 need to include the symbol value so that it becomes
3291 an addend for the dynamic reloc. */
3292 if (! relocate)
3293 continue;
3294 }
3295
3296 break;
3297
3298 case R_X86_64_TLSGD:
3299 case R_X86_64_GOTPC32_TLSDESC:
3300 case R_X86_64_TLSDESC_CALL:
3301 case R_X86_64_GOTTPOFF:
3302 tls_type = GOT_UNKNOWN;
3303 if (h == NULL && local_got_offsets)
3304 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3305 else if (h != NULL)
3306 tls_type = elf_x86_hash_entry (h)->tls_type;
3307
3308 r_type_tls = r_type;
3309 if (! elf_x86_64_tls_transition (info, input_bfd,
3310 input_section, contents,
3311 symtab_hdr, sym_hashes,
3312 &r_type_tls, tls_type, rel,
3313 relend, h, r_symndx, TRUE))
3314 return FALSE;
3315
3316 if (r_type_tls == R_X86_64_TPOFF32)
3317 {
3318 bfd_vma roff = rel->r_offset;
3319
3320 BFD_ASSERT (! unresolved_reloc);
3321
3322 if (r_type == R_X86_64_TLSGD)
3323 {
3324 /* GD->LE transition. For 64bit, change
3325 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3326 .word 0x6666; rex64; call __tls_get_addr@PLT
3327 or
3328 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3329 .byte 0x66; rex64
3330 call *__tls_get_addr@GOTPCREL(%rip)
3331 which may be converted to
3332 addr32 call __tls_get_addr
3333 into:
3334 movq %fs:0, %rax
3335 leaq foo@tpoff(%rax), %rax
3336 For 32bit, change
3337 leaq foo@tlsgd(%rip), %rdi
3338 .word 0x6666; rex64; call __tls_get_addr@PLT
3339 or
3340 leaq foo@tlsgd(%rip), %rdi
3341 .byte 0x66; rex64
3342 call *__tls_get_addr@GOTPCREL(%rip)
3343 which may be converted to
3344 addr32 call __tls_get_addr
3345 into:
3346 movl %fs:0, %eax
3347 leaq foo@tpoff(%rax), %rax
3348 For largepic, change:
3349 leaq foo@tlsgd(%rip), %rdi
3350 movabsq $__tls_get_addr@pltoff, %rax
3351 addq %r15, %rax
3352 call *%rax
3353 into:
3354 movq %fs:0, %rax
3355 leaq foo@tpoff(%rax), %rax
3356 nopw 0x0(%rax,%rax,1) */
3357 int largepic = 0;
3358 if (ABI_64_P (output_bfd))
3359 {
3360 if (contents[roff + 5] == 0xb8)
3361 {
3362 if (roff < 3
3363 || (roff - 3 + 22) > input_section->size)
3364 {
3365 corrupt_input:
3366 info->callbacks->einfo
3367 (_("%F%P: corrupt input: %pB\n"),
3368 input_bfd);
3369 return FALSE;
3370 }
3371 memcpy (contents + roff - 3,
3372 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3373 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3374 largepic = 1;
3375 }
3376 else
3377 {
3378 if (roff < 4
3379 || (roff - 4 + 16) > input_section->size)
3380 goto corrupt_input;
3381 memcpy (contents + roff - 4,
3382 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3383 16);
3384 }
3385 }
3386 else
3387 {
3388 if (roff < 3
3389 || (roff - 3 + 15) > input_section->size)
3390 goto corrupt_input;
3391 memcpy (contents + roff - 3,
3392 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3393 15);
3394 }
3395 bfd_put_32 (output_bfd,
3396 elf_x86_64_tpoff (info, relocation),
3397 contents + roff + 8 + largepic);
3398 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3399 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3400 rel++;
3401 wrel++;
3402 continue;
3403 }
3404 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3405 {
3406 /* GDesc -> LE transition.
3407 It's originally something like:
3408 leaq x@tlsdesc(%rip), %rax
3409
3410 Change it to:
3411 movl $x@tpoff, %rax. */
3412
3413 unsigned int val, type;
3414
3415 if (roff < 3)
3416 goto corrupt_input;
3417 type = bfd_get_8 (input_bfd, contents + roff - 3);
3418 val = bfd_get_8 (input_bfd, contents + roff - 1);
3419 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3420 contents + roff - 3);
3421 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3422 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3423 contents + roff - 1);
3424 bfd_put_32 (output_bfd,
3425 elf_x86_64_tpoff (info, relocation),
3426 contents + roff);
3427 continue;
3428 }
3429 else if (r_type == R_X86_64_TLSDESC_CALL)
3430 {
3431 /* GDesc -> LE transition.
3432 It's originally:
3433 call *(%rax)
3434 Turn it into:
3435 xchg %ax,%ax. */
3436 bfd_put_8 (output_bfd, 0x66, contents + roff);
3437 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3438 continue;
3439 }
3440 else if (r_type == R_X86_64_GOTTPOFF)
3441 {
3442 /* IE->LE transition:
3443 For 64bit, originally it can be one of:
3444 movq foo@gottpoff(%rip), %reg
3445 addq foo@gottpoff(%rip), %reg
3446 We change it into:
3447 movq $foo, %reg
3448 leaq foo(%reg), %reg
3449 addq $foo, %reg.
3450 For 32bit, originally it can be one of:
3451 movq foo@gottpoff(%rip), %reg
3452 addl foo@gottpoff(%rip), %reg
3453 We change it into:
3454 movq $foo, %reg
3455 leal foo(%reg), %reg
3456 addl $foo, %reg. */
3457
3458 unsigned int val, type, reg;
3459
3460 if (roff >= 3)
3461 val = bfd_get_8 (input_bfd, contents + roff - 3);
3462 else
3463 {
3464 if (roff < 2)
3465 goto corrupt_input;
3466 val = 0;
3467 }
3468 type = bfd_get_8 (input_bfd, contents + roff - 2);
3469 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3470 reg >>= 3;
3471 if (type == 0x8b)
3472 {
3473 /* movq */
3474 if (val == 0x4c)
3475 {
3476 if (roff < 3)
3477 goto corrupt_input;
3478 bfd_put_8 (output_bfd, 0x49,
3479 contents + roff - 3);
3480 }
3481 else if (!ABI_64_P (output_bfd) && val == 0x44)
3482 {
3483 if (roff < 3)
3484 goto corrupt_input;
3485 bfd_put_8 (output_bfd, 0x41,
3486 contents + roff - 3);
3487 }
3488 bfd_put_8 (output_bfd, 0xc7,
3489 contents + roff - 2);
3490 bfd_put_8 (output_bfd, 0xc0 | reg,
3491 contents + roff - 1);
3492 }
3493 else if (reg == 4)
3494 {
3495 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3496 is special */
3497 if (val == 0x4c)
3498 {
3499 if (roff < 3)
3500 goto corrupt_input;
3501 bfd_put_8 (output_bfd, 0x49,
3502 contents + roff - 3);
3503 }
3504 else if (!ABI_64_P (output_bfd) && val == 0x44)
3505 {
3506 if (roff < 3)
3507 goto corrupt_input;
3508 bfd_put_8 (output_bfd, 0x41,
3509 contents + roff - 3);
3510 }
3511 bfd_put_8 (output_bfd, 0x81,
3512 contents + roff - 2);
3513 bfd_put_8 (output_bfd, 0xc0 | reg,
3514 contents + roff - 1);
3515 }
3516 else
3517 {
3518 /* addq/addl -> leaq/leal */
3519 if (val == 0x4c)
3520 {
3521 if (roff < 3)
3522 goto corrupt_input;
3523 bfd_put_8 (output_bfd, 0x4d,
3524 contents + roff - 3);
3525 }
3526 else if (!ABI_64_P (output_bfd) && val == 0x44)
3527 {
3528 if (roff < 3)
3529 goto corrupt_input;
3530 bfd_put_8 (output_bfd, 0x45,
3531 contents + roff - 3);
3532 }
3533 bfd_put_8 (output_bfd, 0x8d,
3534 contents + roff - 2);
3535 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3536 contents + roff - 1);
3537 }
3538 bfd_put_32 (output_bfd,
3539 elf_x86_64_tpoff (info, relocation),
3540 contents + roff);
3541 continue;
3542 }
3543 else
3544 BFD_ASSERT (FALSE);
3545 }
3546
3547 if (htab->elf.sgot == NULL)
3548 abort ();
3549
3550 if (h != NULL)
3551 {
3552 off = h->got.offset;
3553 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3554 }
3555 else
3556 {
3557 if (local_got_offsets == NULL)
3558 abort ();
3559
3560 off = local_got_offsets[r_symndx];
3561 offplt = local_tlsdesc_gotents[r_symndx];
3562 }
3563
3564 if ((off & 1) != 0)
3565 off &= ~1;
3566 else
3567 {
3568 Elf_Internal_Rela outrel;
3569 int dr_type, indx;
3570 asection *sreloc;
3571
3572 if (htab->elf.srelgot == NULL)
3573 abort ();
3574
3575 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3576
3577 if (GOT_TLS_GDESC_P (tls_type))
3578 {
3579 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3580 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3581 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3582 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3583 + htab->elf.sgotplt->output_offset
3584 + offplt
3585 + htab->sgotplt_jump_table_size);
3586 sreloc = htab->elf.srelplt;
3587 if (indx == 0)
3588 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3589 else
3590 outrel.r_addend = 0;
3591 elf_append_rela (output_bfd, sreloc, &outrel);
3592 }
3593
3594 sreloc = htab->elf.srelgot;
3595
3596 outrel.r_offset = (htab->elf.sgot->output_section->vma
3597 + htab->elf.sgot->output_offset + off);
3598
3599 if (GOT_TLS_GD_P (tls_type))
3600 dr_type = R_X86_64_DTPMOD64;
3601 else if (GOT_TLS_GDESC_P (tls_type))
3602 goto dr_done;
3603 else
3604 dr_type = R_X86_64_TPOFF64;
3605
3606 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3607 outrel.r_addend = 0;
3608 if ((dr_type == R_X86_64_TPOFF64
3609 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3610 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3611 outrel.r_info = htab->r_info (indx, dr_type);
3612
3613 elf_append_rela (output_bfd, sreloc, &outrel);
3614
3615 if (GOT_TLS_GD_P (tls_type))
3616 {
3617 if (indx == 0)
3618 {
3619 BFD_ASSERT (! unresolved_reloc);
3620 bfd_put_64 (output_bfd,
3621 relocation - _bfd_x86_elf_dtpoff_base (info),
3622 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3623 }
3624 else
3625 {
3626 bfd_put_64 (output_bfd, 0,
3627 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3628 outrel.r_info = htab->r_info (indx,
3629 R_X86_64_DTPOFF64);
3630 outrel.r_offset += GOT_ENTRY_SIZE;
3631 elf_append_rela (output_bfd, sreloc,
3632 &outrel);
3633 }
3634 }
3635
3636 dr_done:
3637 if (h != NULL)
3638 h->got.offset |= 1;
3639 else
3640 local_got_offsets[r_symndx] |= 1;
3641 }
3642
3643 if (off >= (bfd_vma) -2
3644 && ! GOT_TLS_GDESC_P (tls_type))
3645 abort ();
3646 if (r_type_tls == r_type)
3647 {
3648 if (r_type == R_X86_64_GOTPC32_TLSDESC
3649 || r_type == R_X86_64_TLSDESC_CALL)
3650 relocation = htab->elf.sgotplt->output_section->vma
3651 + htab->elf.sgotplt->output_offset
3652 + offplt + htab->sgotplt_jump_table_size;
3653 else
3654 relocation = htab->elf.sgot->output_section->vma
3655 + htab->elf.sgot->output_offset + off;
3656 unresolved_reloc = FALSE;
3657 }
3658 else
3659 {
3660 bfd_vma roff = rel->r_offset;
3661
3662 if (r_type == R_X86_64_TLSGD)
3663 {
3664 /* GD->IE transition. For 64bit, change
3665 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3666 .word 0x6666; rex64; call __tls_get_addr@PLT
3667 or
3668 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3669 .byte 0x66; rex64
3670 call *__tls_get_addr@GOTPCREL(%rip
3671 which may be converted to
3672 addr32 call __tls_get_addr
3673 into:
3674 movq %fs:0, %rax
3675 addq foo@gottpoff(%rip), %rax
3676 For 32bit, change
3677 leaq foo@tlsgd(%rip), %rdi
3678 .word 0x6666; rex64; call __tls_get_addr@PLT
3679 or
3680 leaq foo@tlsgd(%rip), %rdi
3681 .byte 0x66; rex64;
3682 call *__tls_get_addr@GOTPCREL(%rip)
3683 which may be converted to
3684 addr32 call __tls_get_addr
3685 into:
3686 movl %fs:0, %eax
3687 addq foo@gottpoff(%rip), %rax
3688 For largepic, change:
3689 leaq foo@tlsgd(%rip), %rdi
3690 movabsq $__tls_get_addr@pltoff, %rax
3691 addq %r15, %rax
3692 call *%rax
3693 into:
3694 movq %fs:0, %rax
3695 addq foo@gottpoff(%rax), %rax
3696 nopw 0x0(%rax,%rax,1) */
3697 int largepic = 0;
3698 if (ABI_64_P (output_bfd))
3699 {
3700 if (contents[roff + 5] == 0xb8)
3701 {
3702 if (roff < 3
3703 || (roff - 3 + 22) > input_section->size)
3704 goto corrupt_input;
3705 memcpy (contents + roff - 3,
3706 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3707 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3708 largepic = 1;
3709 }
3710 else
3711 {
3712 if (roff < 4
3713 || (roff - 4 + 16) > input_section->size)
3714 goto corrupt_input;
3715 memcpy (contents + roff - 4,
3716 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3717 16);
3718 }
3719 }
3720 else
3721 {
3722 if (roff < 3
3723 || (roff - 3 + 15) > input_section->size)
3724 goto corrupt_input;
3725 memcpy (contents + roff - 3,
3726 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3727 15);
3728 }
3729
3730 relocation = (htab->elf.sgot->output_section->vma
3731 + htab->elf.sgot->output_offset + off
3732 - roff
3733 - largepic
3734 - input_section->output_section->vma
3735 - input_section->output_offset
3736 - 12);
3737 bfd_put_32 (output_bfd, relocation,
3738 contents + roff + 8 + largepic);
3739 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3740 rel++;
3741 wrel++;
3742 continue;
3743 }
3744 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3745 {
3746 /* GDesc -> IE transition.
3747 It's originally something like:
3748 leaq x@tlsdesc(%rip), %rax
3749
3750 Change it to:
3751 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3752
3753 /* Now modify the instruction as appropriate. To
3754 turn a leaq into a movq in the form we use it, it
3755 suffices to change the second byte from 0x8d to
3756 0x8b. */
3757 if (roff < 2)
3758 goto corrupt_input;
3759 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3760
3761 bfd_put_32 (output_bfd,
3762 htab->elf.sgot->output_section->vma
3763 + htab->elf.sgot->output_offset + off
3764 - rel->r_offset
3765 - input_section->output_section->vma
3766 - input_section->output_offset
3767 - 4,
3768 contents + roff);
3769 continue;
3770 }
3771 else if (r_type == R_X86_64_TLSDESC_CALL)
3772 {
3773 /* GDesc -> IE transition.
3774 It's originally:
3775 call *(%rax)
3776
3777 Change it to:
3778 xchg %ax, %ax. */
3779
3780 bfd_put_8 (output_bfd, 0x66, contents + roff);
3781 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3782 continue;
3783 }
3784 else
3785 BFD_ASSERT (FALSE);
3786 }
3787 break;
3788
3789 case R_X86_64_TLSLD:
3790 if (! elf_x86_64_tls_transition (info, input_bfd,
3791 input_section, contents,
3792 symtab_hdr, sym_hashes,
3793 &r_type, GOT_UNKNOWN, rel,
3794 relend, h, r_symndx, TRUE))
3795 return FALSE;
3796
3797 if (r_type != R_X86_64_TLSLD)
3798 {
3799 /* LD->LE transition:
3800 leaq foo@tlsld(%rip), %rdi
3801 call __tls_get_addr@PLT
3802 For 64bit, we change it into:
3803 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3804 For 32bit, we change it into:
3805 nopl 0x0(%rax); movl %fs:0, %eax
3806 Or
3807 leaq foo@tlsld(%rip), %rdi;
3808 call *__tls_get_addr@GOTPCREL(%rip)
3809 which may be converted to
3810 addr32 call __tls_get_addr
3811 For 64bit, we change it into:
3812 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3813 For 32bit, we change it into:
3814 nopw 0x0(%rax); movl %fs:0, %eax
3815 For largepic, change:
3816 leaq foo@tlsgd(%rip), %rdi
3817 movabsq $__tls_get_addr@pltoff, %rax
3818 addq %rbx, %rax
3819 call *%rax
3820 into
3821 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3822 movq %fs:0, %eax */
3823
3824 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3825 if (ABI_64_P (output_bfd))
3826 {
3827 if ((rel->r_offset + 5) >= input_section->size)
3828 goto corrupt_input;
3829 if (contents[rel->r_offset + 5] == 0xb8)
3830 {
3831 if (rel->r_offset < 3
3832 || (rel->r_offset - 3 + 22) > input_section->size)
3833 goto corrupt_input;
3834 memcpy (contents + rel->r_offset - 3,
3835 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3836 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3837 }
3838 else if (contents[rel->r_offset + 4] == 0xff
3839 || contents[rel->r_offset + 4] == 0x67)
3840 {
3841 if (rel->r_offset < 3
3842 || (rel->r_offset - 3 + 13) > input_section->size)
3843 goto corrupt_input;
3844 memcpy (contents + rel->r_offset - 3,
3845 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3846 13);
3847
3848 }
3849 else
3850 {
3851 if (rel->r_offset < 3
3852 || (rel->r_offset - 3 + 12) > input_section->size)
3853 goto corrupt_input;
3854 memcpy (contents + rel->r_offset - 3,
3855 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3856 }
3857 }
3858 else
3859 {
3860 if ((rel->r_offset + 4) >= input_section->size)
3861 goto corrupt_input;
3862 if (contents[rel->r_offset + 4] == 0xff)
3863 {
3864 if (rel->r_offset < 3
3865 || (rel->r_offset - 3 + 13) > input_section->size)
3866 goto corrupt_input;
3867 memcpy (contents + rel->r_offset - 3,
3868 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3869 13);
3870 }
3871 else
3872 {
3873 if (rel->r_offset < 3
3874 || (rel->r_offset - 3 + 12) > input_section->size)
3875 goto corrupt_input;
3876 memcpy (contents + rel->r_offset - 3,
3877 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3878 }
3879 }
3880 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3881 and R_X86_64_PLTOFF64. */
3882 rel++;
3883 wrel++;
3884 continue;
3885 }
3886
3887 if (htab->elf.sgot == NULL)
3888 abort ();
3889
3890 off = htab->tls_ld_or_ldm_got.offset;
3891 if (off & 1)
3892 off &= ~1;
3893 else
3894 {
3895 Elf_Internal_Rela outrel;
3896
3897 if (htab->elf.srelgot == NULL)
3898 abort ();
3899
3900 outrel.r_offset = (htab->elf.sgot->output_section->vma
3901 + htab->elf.sgot->output_offset + off);
3902
3903 bfd_put_64 (output_bfd, 0,
3904 htab->elf.sgot->contents + off);
3905 bfd_put_64 (output_bfd, 0,
3906 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3907 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3908 outrel.r_addend = 0;
3909 elf_append_rela (output_bfd, htab->elf.srelgot,
3910 &outrel);
3911 htab->tls_ld_or_ldm_got.offset |= 1;
3912 }
3913 relocation = htab->elf.sgot->output_section->vma
3914 + htab->elf.sgot->output_offset + off;
3915 unresolved_reloc = FALSE;
3916 break;
3917
3918 case R_X86_64_DTPOFF32:
3919 if (!bfd_link_executable (info)
3920 || (input_section->flags & SEC_CODE) == 0)
3921 relocation -= _bfd_x86_elf_dtpoff_base (info);
3922 else
3923 relocation = elf_x86_64_tpoff (info, relocation);
3924 break;
3925
3926 case R_X86_64_TPOFF32:
3927 case R_X86_64_TPOFF64:
3928 BFD_ASSERT (bfd_link_executable (info));
3929 relocation = elf_x86_64_tpoff (info, relocation);
3930 break;
3931
3932 case R_X86_64_DTPOFF64:
3933 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3934 relocation -= _bfd_x86_elf_dtpoff_base (info);
3935 break;
3936
3937 default:
3938 break;
3939 }
3940
3941 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3942 because such sections are not SEC_ALLOC and thus ld.so will
3943 not process them. */
3944 if (unresolved_reloc
3945 && !((input_section->flags & SEC_DEBUGGING) != 0
3946 && h->def_dynamic)
3947 && _bfd_elf_section_offset (output_bfd, info, input_section,
3948 rel->r_offset) != (bfd_vma) -1)
3949 {
3950 switch (r_type)
3951 {
3952 case R_X86_64_32S:
3953 sec = h->root.u.def.section;
3954 if ((info->nocopyreloc
3955 || (eh->def_protected
3956 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3957 && !(h->root.u.def.section->flags & SEC_CODE))
3958 return elf_x86_64_need_pic (info, input_bfd, input_section,
3959 h, NULL, NULL, howto);
3960 /* Fall through. */
3961
3962 default:
3963 _bfd_error_handler
3964 /* xgettext:c-format */
3965 (_("%pB(%pA+%#" PRIx64 "): "
3966 "unresolvable %s relocation against symbol `%s'"),
3967 input_bfd,
3968 input_section,
3969 (uint64_t) rel->r_offset,
3970 howto->name,
3971 h->root.root.string);
3972 return FALSE;
3973 }
3974 }
3975
3976 do_relocation:
3977 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3978 contents, rel->r_offset,
3979 relocation, rel->r_addend);
3980
3981 check_relocation_error:
3982 if (r != bfd_reloc_ok)
3983 {
3984 const char *name;
3985
3986 if (h != NULL)
3987 name = h->root.root.string;
3988 else
3989 {
3990 name = bfd_elf_string_from_elf_section (input_bfd,
3991 symtab_hdr->sh_link,
3992 sym->st_name);
3993 if (name == NULL)
3994 return FALSE;
3995 if (*name == '\0')
3996 name = bfd_section_name (input_bfd, sec);
3997 }
3998
3999 if (r == bfd_reloc_overflow)
4000 {
4001 if (converted_reloc)
4002 {
4003 info->callbacks->einfo
4004 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
4005 return FALSE;
4006 }
4007 (*info->callbacks->reloc_overflow)
4008 (info, (h ? &h->root : NULL), name, howto->name,
4009 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4010 }
4011 else
4012 {
4013 _bfd_error_handler
4014 /* xgettext:c-format */
4015 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4016 input_bfd, input_section,
4017 (uint64_t) rel->r_offset, name, (int) r);
4018 return FALSE;
4019 }
4020 }
4021
4022 if (wrel != rel)
4023 *wrel = *rel;
4024 }
4025
4026 if (wrel != rel)
4027 {
4028 Elf_Internal_Shdr *rel_hdr;
4029 size_t deleted = rel - wrel;
4030
4031 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4032 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4033 if (rel_hdr->sh_size == 0)
4034 {
4035 /* It is too late to remove an empty reloc section. Leave
4036 one NONE reloc.
4037 ??? What is wrong with an empty section??? */
4038 rel_hdr->sh_size = rel_hdr->sh_entsize;
4039 deleted -= 1;
4040 }
4041 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4042 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4043 input_section->reloc_count -= deleted;
4044 }
4045
4046 return TRUE;
4047 }
4048
4049 /* Finish up dynamic symbol handling. We set the contents of various
4050 dynamic sections here. */
4051
4052 static bfd_boolean
4053 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4054 struct bfd_link_info *info,
4055 struct elf_link_hash_entry *h,
4056 Elf_Internal_Sym *sym)
4057 {
4058 struct elf_x86_link_hash_table *htab;
4059 bfd_boolean use_plt_second;
4060 struct elf_x86_link_hash_entry *eh;
4061 bfd_boolean local_undefweak;
4062
4063 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4064 if (htab == NULL)
4065 return FALSE;
4066
4067 /* Use the second PLT section only if there is .plt section. */
4068 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4069
4070 eh = (struct elf_x86_link_hash_entry *) h;
4071 if (eh->no_finish_dynamic_symbol)
4072 abort ();
4073
4074 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4075 resolved undefined weak symbols in executable so that their
4076 references have value 0 at run-time. */
4077 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4078
4079 if (h->plt.offset != (bfd_vma) -1)
4080 {
4081 bfd_vma plt_index;
4082 bfd_vma got_offset, plt_offset;
4083 Elf_Internal_Rela rela;
4084 bfd_byte *loc;
4085 asection *plt, *gotplt, *relplt, *resolved_plt;
4086 const struct elf_backend_data *bed;
4087 bfd_vma plt_got_pcrel_offset;
4088
4089 /* When building a static executable, use .iplt, .igot.plt and
4090 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4091 if (htab->elf.splt != NULL)
4092 {
4093 plt = htab->elf.splt;
4094 gotplt = htab->elf.sgotplt;
4095 relplt = htab->elf.srelplt;
4096 }
4097 else
4098 {
4099 plt = htab->elf.iplt;
4100 gotplt = htab->elf.igotplt;
4101 relplt = htab->elf.irelplt;
4102 }
4103
4104 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4105
4106 /* Get the index in the procedure linkage table which
4107 corresponds to this symbol. This is the index of this symbol
4108 in all the symbols for which we are making plt entries. The
4109 first entry in the procedure linkage table is reserved.
4110
4111 Get the offset into the .got table of the entry that
4112 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4113 bytes. The first three are reserved for the dynamic linker.
4114
4115 For static executables, we don't reserve anything. */
4116
4117 if (plt == htab->elf.splt)
4118 {
4119 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4120 - htab->plt.has_plt0);
4121 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4122 }
4123 else
4124 {
4125 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4126 got_offset = got_offset * GOT_ENTRY_SIZE;
4127 }
4128
4129 /* Fill in the entry in the procedure linkage table. */
4130 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4131 htab->plt.plt_entry_size);
4132 if (use_plt_second)
4133 {
4134 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4135 htab->non_lazy_plt->plt_entry,
4136 htab->non_lazy_plt->plt_entry_size);
4137
4138 resolved_plt = htab->plt_second;
4139 plt_offset = eh->plt_second.offset;
4140 }
4141 else
4142 {
4143 resolved_plt = plt;
4144 plt_offset = h->plt.offset;
4145 }
4146
4147 /* Insert the relocation positions of the plt section. */
4148
4149 /* Put offset the PC-relative instruction referring to the GOT entry,
4150 subtracting the size of that instruction. */
4151 plt_got_pcrel_offset = (gotplt->output_section->vma
4152 + gotplt->output_offset
4153 + got_offset
4154 - resolved_plt->output_section->vma
4155 - resolved_plt->output_offset
4156 - plt_offset
4157 - htab->plt.plt_got_insn_size);
4158
4159 /* Check PC-relative offset overflow in PLT entry. */
4160 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4161 /* xgettext:c-format */
4162 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4163 output_bfd, h->root.root.string);
4164
4165 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4166 (resolved_plt->contents + plt_offset
4167 + htab->plt.plt_got_offset));
4168
4169 /* Fill in the entry in the global offset table, initially this
4170 points to the second part of the PLT entry. Leave the entry
4171 as zero for undefined weak symbol in PIE. No PLT relocation
4172 against undefined weak symbol in PIE. */
4173 if (!local_undefweak)
4174 {
4175 if (htab->plt.has_plt0)
4176 bfd_put_64 (output_bfd, (plt->output_section->vma
4177 + plt->output_offset
4178 + h->plt.offset
4179 + htab->lazy_plt->plt_lazy_offset),
4180 gotplt->contents + got_offset);
4181
4182 /* Fill in the entry in the .rela.plt section. */
4183 rela.r_offset = (gotplt->output_section->vma
4184 + gotplt->output_offset
4185 + got_offset);
4186 if (PLT_LOCAL_IFUNC_P (info, h))
4187 {
4188 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4189 h->root.root.string,
4190 h->root.u.def.section->owner);
4191
4192 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4193 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4194 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4195 rela.r_addend = (h->root.u.def.value
4196 + h->root.u.def.section->output_section->vma
4197 + h->root.u.def.section->output_offset);
4198 /* R_X86_64_IRELATIVE comes last. */
4199 plt_index = htab->next_irelative_index--;
4200 }
4201 else
4202 {
4203 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4204 rela.r_addend = 0;
4205 plt_index = htab->next_jump_slot_index++;
4206 }
4207
4208 /* Don't fill the second and third slots in PLT entry for
4209 static executables nor without PLT0. */
4210 if (plt == htab->elf.splt && htab->plt.has_plt0)
4211 {
4212 bfd_vma plt0_offset
4213 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4214
4215 /* Put relocation index. */
4216 bfd_put_32 (output_bfd, plt_index,
4217 (plt->contents + h->plt.offset
4218 + htab->lazy_plt->plt_reloc_offset));
4219
4220 /* Put offset for jmp .PLT0 and check for overflow. We don't
4221 check relocation index for overflow since branch displacement
4222 will overflow first. */
4223 if (plt0_offset > 0x80000000)
4224 /* xgettext:c-format */
4225 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4226 output_bfd, h->root.root.string);
4227 bfd_put_32 (output_bfd, - plt0_offset,
4228 (plt->contents + h->plt.offset
4229 + htab->lazy_plt->plt_plt_offset));
4230 }
4231
4232 bed = get_elf_backend_data (output_bfd);
4233 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4234 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4235 }
4236 }
4237 else if (eh->plt_got.offset != (bfd_vma) -1)
4238 {
4239 bfd_vma got_offset, plt_offset;
4240 asection *plt, *got;
4241 bfd_boolean got_after_plt;
4242 int32_t got_pcrel_offset;
4243
4244 /* Set the entry in the GOT procedure linkage table. */
4245 plt = htab->plt_got;
4246 got = htab->elf.sgot;
4247 got_offset = h->got.offset;
4248
4249 if (got_offset == (bfd_vma) -1
4250 || (h->type == STT_GNU_IFUNC && h->def_regular)
4251 || plt == NULL
4252 || got == NULL)
4253 abort ();
4254
4255 /* Use the non-lazy PLT entry template for the GOT PLT since they
4256 are the identical. */
4257 /* Fill in the entry in the GOT procedure linkage table. */
4258 plt_offset = eh->plt_got.offset;
4259 memcpy (plt->contents + plt_offset,
4260 htab->non_lazy_plt->plt_entry,
4261 htab->non_lazy_plt->plt_entry_size);
4262
4263 /* Put offset the PC-relative instruction referring to the GOT
4264 entry, subtracting the size of that instruction. */
4265 got_pcrel_offset = (got->output_section->vma
4266 + got->output_offset
4267 + got_offset
4268 - plt->output_section->vma
4269 - plt->output_offset
4270 - plt_offset
4271 - htab->non_lazy_plt->plt_got_insn_size);
4272
4273 /* Check PC-relative offset overflow in GOT PLT entry. */
4274 got_after_plt = got->output_section->vma > plt->output_section->vma;
4275 if ((got_after_plt && got_pcrel_offset < 0)
4276 || (!got_after_plt && got_pcrel_offset > 0))
4277 /* xgettext:c-format */
4278 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4279 output_bfd, h->root.root.string);
4280
4281 bfd_put_32 (output_bfd, got_pcrel_offset,
4282 (plt->contents + plt_offset
4283 + htab->non_lazy_plt->plt_got_offset));
4284 }
4285
4286 if (!local_undefweak
4287 && !h->def_regular
4288 && (h->plt.offset != (bfd_vma) -1
4289 || eh->plt_got.offset != (bfd_vma) -1))
4290 {
4291 /* Mark the symbol as undefined, rather than as defined in
4292 the .plt section. Leave the value if there were any
4293 relocations where pointer equality matters (this is a clue
4294 for the dynamic linker, to make function pointer
4295 comparisons work between an application and shared
4296 library), otherwise set it to zero. If a function is only
4297 called from a binary, there is no need to slow down
4298 shared libraries because of that. */
4299 sym->st_shndx = SHN_UNDEF;
4300 if (!h->pointer_equality_needed)
4301 sym->st_value = 0;
4302 }
4303
4304 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4305
4306 /* Don't generate dynamic GOT relocation against undefined weak
4307 symbol in executable. */
4308 if (h->got.offset != (bfd_vma) -1
4309 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4310 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4311 && !local_undefweak)
4312 {
4313 Elf_Internal_Rela rela;
4314 asection *relgot = htab->elf.srelgot;
4315
4316 /* This symbol has an entry in the global offset table. Set it
4317 up. */
4318 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4319 abort ();
4320
4321 rela.r_offset = (htab->elf.sgot->output_section->vma
4322 + htab->elf.sgot->output_offset
4323 + (h->got.offset &~ (bfd_vma) 1));
4324
4325 /* If this is a static link, or it is a -Bsymbolic link and the
4326 symbol is defined locally or was forced to be local because
4327 of a version file, we just want to emit a RELATIVE reloc.
4328 The entry in the global offset table will already have been
4329 initialized in the relocate_section function. */
4330 if (h->def_regular
4331 && h->type == STT_GNU_IFUNC)
4332 {
4333 if (h->plt.offset == (bfd_vma) -1)
4334 {
4335 /* STT_GNU_IFUNC is referenced without PLT. */
4336 if (htab->elf.splt == NULL)
4337 {
4338 /* use .rel[a].iplt section to store .got relocations
4339 in static executable. */
4340 relgot = htab->elf.irelplt;
4341 }
4342 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4343 {
4344 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4345 h->root.root.string,
4346 h->root.u.def.section->owner);
4347
4348 rela.r_info = htab->r_info (0,
4349 R_X86_64_IRELATIVE);
4350 rela.r_addend = (h->root.u.def.value
4351 + h->root.u.def.section->output_section->vma
4352 + h->root.u.def.section->output_offset);
4353 }
4354 else
4355 goto do_glob_dat;
4356 }
4357 else if (bfd_link_pic (info))
4358 {
4359 /* Generate R_X86_64_GLOB_DAT. */
4360 goto do_glob_dat;
4361 }
4362 else
4363 {
4364 asection *plt;
4365 bfd_vma plt_offset;
4366
4367 if (!h->pointer_equality_needed)
4368 abort ();
4369
4370 /* For non-shared object, we can't use .got.plt, which
4371 contains the real function addres if we need pointer
4372 equality. We load the GOT entry with the PLT entry. */
4373 if (htab->plt_second != NULL)
4374 {
4375 plt = htab->plt_second;
4376 plt_offset = eh->plt_second.offset;
4377 }
4378 else
4379 {
4380 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4381 plt_offset = h->plt.offset;
4382 }
4383 bfd_put_64 (output_bfd, (plt->output_section->vma
4384 + plt->output_offset
4385 + plt_offset),
4386 htab->elf.sgot->contents + h->got.offset);
4387 return TRUE;
4388 }
4389 }
4390 else if (bfd_link_pic (info)
4391 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4392 {
4393 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4394 return FALSE;
4395 BFD_ASSERT((h->got.offset & 1) != 0);
4396 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4397 rela.r_addend = (h->root.u.def.value
4398 + h->root.u.def.section->output_section->vma
4399 + h->root.u.def.section->output_offset);
4400 }
4401 else
4402 {
4403 BFD_ASSERT((h->got.offset & 1) == 0);
4404 do_glob_dat:
4405 bfd_put_64 (output_bfd, (bfd_vma) 0,
4406 htab->elf.sgot->contents + h->got.offset);
4407 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4408 rela.r_addend = 0;
4409 }
4410
4411 elf_append_rela (output_bfd, relgot, &rela);
4412 }
4413
4414 if (h->needs_copy)
4415 {
4416 Elf_Internal_Rela rela;
4417 asection *s;
4418
4419 /* This symbol needs a copy reloc. Set it up. */
4420 VERIFY_COPY_RELOC (h, htab)
4421
4422 rela.r_offset = (h->root.u.def.value
4423 + h->root.u.def.section->output_section->vma
4424 + h->root.u.def.section->output_offset);
4425 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4426 rela.r_addend = 0;
4427 if (h->root.u.def.section == htab->elf.sdynrelro)
4428 s = htab->elf.sreldynrelro;
4429 else
4430 s = htab->elf.srelbss;
4431 elf_append_rela (output_bfd, s, &rela);
4432 }
4433
4434 return TRUE;
4435 }
4436
4437 /* Finish up local dynamic symbol handling. We set the contents of
4438 various dynamic sections here. */
4439
4440 static bfd_boolean
4441 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4442 {
4443 struct elf_link_hash_entry *h
4444 = (struct elf_link_hash_entry *) *slot;
4445 struct bfd_link_info *info
4446 = (struct bfd_link_info *) inf;
4447
4448 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4449 info, h, NULL);
4450 }
4451
4452 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4453 here since undefined weak symbol may not be dynamic and may not be
4454 called for elf_x86_64_finish_dynamic_symbol. */
4455
4456 static bfd_boolean
4457 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4458 void *inf)
4459 {
4460 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4461 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4462
4463 if (h->root.type != bfd_link_hash_undefweak
4464 || h->dynindx != -1)
4465 return TRUE;
4466
4467 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4468 info, h, NULL);
4469 }
4470
4471 /* Used to decide how to sort relocs in an optimal manner for the
4472 dynamic linker, before writing them out. */
4473
4474 static enum elf_reloc_type_class
4475 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4476 const asection *rel_sec ATTRIBUTE_UNUSED,
4477 const Elf_Internal_Rela *rela)
4478 {
4479 bfd *abfd = info->output_bfd;
4480 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4481 struct elf_x86_link_hash_table *htab
4482 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4483
4484 if (htab->elf.dynsym != NULL
4485 && htab->elf.dynsym->contents != NULL)
4486 {
4487 /* Check relocation against STT_GNU_IFUNC symbol if there are
4488 dynamic symbols. */
4489 unsigned long r_symndx = htab->r_sym (rela->r_info);
4490 if (r_symndx != STN_UNDEF)
4491 {
4492 Elf_Internal_Sym sym;
4493 if (!bed->s->swap_symbol_in (abfd,
4494 (htab->elf.dynsym->contents
4495 + r_symndx * bed->s->sizeof_sym),
4496 0, &sym))
4497 abort ();
4498
4499 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4500 return reloc_class_ifunc;
4501 }
4502 }
4503
4504 switch ((int) ELF32_R_TYPE (rela->r_info))
4505 {
4506 case R_X86_64_IRELATIVE:
4507 return reloc_class_ifunc;
4508 case R_X86_64_RELATIVE:
4509 case R_X86_64_RELATIVE64:
4510 return reloc_class_relative;
4511 case R_X86_64_JUMP_SLOT:
4512 return reloc_class_plt;
4513 case R_X86_64_COPY:
4514 return reloc_class_copy;
4515 default:
4516 return reloc_class_normal;
4517 }
4518 }
4519
4520 /* Finish up the dynamic sections. */
4521
4522 static bfd_boolean
4523 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4524 struct bfd_link_info *info)
4525 {
4526 struct elf_x86_link_hash_table *htab;
4527
4528 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4529 if (htab == NULL)
4530 return FALSE;
4531
4532 if (! htab->elf.dynamic_sections_created)
4533 return TRUE;
4534
4535 if (htab->elf.splt && htab->elf.splt->size > 0)
4536 {
4537 elf_section_data (htab->elf.splt->output_section)
4538 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4539
4540 if (htab->plt.has_plt0)
4541 {
4542 /* Fill in the special first entry in the procedure linkage
4543 table. */
4544 memcpy (htab->elf.splt->contents,
4545 htab->lazy_plt->plt0_entry,
4546 htab->lazy_plt->plt0_entry_size);
4547 /* Add offset for pushq GOT+8(%rip), since the instruction
4548 uses 6 bytes subtract this value. */
4549 bfd_put_32 (output_bfd,
4550 (htab->elf.sgotplt->output_section->vma
4551 + htab->elf.sgotplt->output_offset
4552 + 8
4553 - htab->elf.splt->output_section->vma
4554 - htab->elf.splt->output_offset
4555 - 6),
4556 (htab->elf.splt->contents
4557 + htab->lazy_plt->plt0_got1_offset));
4558 /* Add offset for the PC-relative instruction accessing
4559 GOT+16, subtracting the offset to the end of that
4560 instruction. */
4561 bfd_put_32 (output_bfd,
4562 (htab->elf.sgotplt->output_section->vma
4563 + htab->elf.sgotplt->output_offset
4564 + 16
4565 - htab->elf.splt->output_section->vma
4566 - htab->elf.splt->output_offset
4567 - htab->lazy_plt->plt0_got2_insn_end),
4568 (htab->elf.splt->contents
4569 + htab->lazy_plt->plt0_got2_offset));
4570 }
4571
4572 if (htab->tlsdesc_plt)
4573 {
4574 bfd_put_64 (output_bfd, (bfd_vma) 0,
4575 htab->elf.sgot->contents + htab->tlsdesc_got);
4576
4577 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4578 htab->lazy_plt->plt_tlsdesc_entry,
4579 htab->lazy_plt->plt_tlsdesc_entry_size);
4580
4581 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4582 bytes and the instruction uses 6 bytes, subtract these
4583 values. */
4584 bfd_put_32 (output_bfd,
4585 (htab->elf.sgotplt->output_section->vma
4586 + htab->elf.sgotplt->output_offset
4587 + 8
4588 - htab->elf.splt->output_section->vma
4589 - htab->elf.splt->output_offset
4590 - htab->tlsdesc_plt
4591 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4592 (htab->elf.splt->contents
4593 + htab->tlsdesc_plt
4594 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4595 /* Add offset for indirect branch via GOT+TDG, where TDG
4596 stands for htab->tlsdesc_got, subtracting the offset
4597 to the end of that instruction. */
4598 bfd_put_32 (output_bfd,
4599 (htab->elf.sgot->output_section->vma
4600 + htab->elf.sgot->output_offset
4601 + htab->tlsdesc_got
4602 - htab->elf.splt->output_section->vma
4603 - htab->elf.splt->output_offset
4604 - htab->tlsdesc_plt
4605 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4606 (htab->elf.splt->contents
4607 + htab->tlsdesc_plt
4608 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4609 }
4610 }
4611
4612 /* Fill PLT entries for undefined weak symbols in PIE. */
4613 if (bfd_link_pie (info))
4614 bfd_hash_traverse (&info->hash->table,
4615 elf_x86_64_pie_finish_undefweak_symbol,
4616 info);
4617
4618 return TRUE;
4619 }
4620
4621 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4622 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4623 It has to be done before elf_link_sort_relocs is called so that
4624 dynamic relocations are properly sorted. */
4625
4626 static bfd_boolean
4627 elf_x86_64_output_arch_local_syms
4628 (bfd *output_bfd ATTRIBUTE_UNUSED,
4629 struct bfd_link_info *info,
4630 void *flaginfo ATTRIBUTE_UNUSED,
4631 int (*func) (void *, const char *,
4632 Elf_Internal_Sym *,
4633 asection *,
4634 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4635 {
4636 struct elf_x86_link_hash_table *htab
4637 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4638 if (htab == NULL)
4639 return FALSE;
4640
4641 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4642 htab_traverse (htab->loc_hash_table,
4643 elf_x86_64_finish_local_dynamic_symbol,
4644 info);
4645
4646 return TRUE;
4647 }
4648
4649 /* Forward declaration. */
4650 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4651
4652 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4653 dynamic relocations. */
4654
4655 static long
4656 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4657 long symcount ATTRIBUTE_UNUSED,
4658 asymbol **syms ATTRIBUTE_UNUSED,
4659 long dynsymcount,
4660 asymbol **dynsyms,
4661 asymbol **ret)
4662 {
4663 long count, i, n;
4664 int j;
4665 bfd_byte *plt_contents;
4666 long relsize;
4667 const struct elf_x86_lazy_plt_layout *lazy_plt;
4668 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4669 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4670 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4671 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4672 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4673 asection *plt;
4674 enum elf_x86_plt_type plt_type;
4675 struct elf_x86_plt plts[] =
4676 {
4677 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4678 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4679 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4680 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4681 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4682 };
4683
4684 *ret = NULL;
4685
4686 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4687 return 0;
4688
4689 if (dynsymcount <= 0)
4690 return 0;
4691
4692 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4693 if (relsize <= 0)
4694 return -1;
4695
4696 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4697 {
4698 lazy_plt = &elf_x86_64_lazy_plt;
4699 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4700 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4701 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4702 if (ABI_64_P (abfd))
4703 {
4704 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4705 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4706 }
4707 else
4708 {
4709 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4710 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4711 }
4712 }
4713 else
4714 {
4715 lazy_plt = &elf_x86_64_nacl_plt;
4716 non_lazy_plt = NULL;
4717 lazy_bnd_plt = NULL;
4718 non_lazy_bnd_plt = NULL;
4719 lazy_ibt_plt = NULL;
4720 non_lazy_ibt_plt = NULL;
4721 }
4722
4723 count = 0;
4724 for (j = 0; plts[j].name != NULL; j++)
4725 {
4726 plt = bfd_get_section_by_name (abfd, plts[j].name);
4727 if (plt == NULL || plt->size == 0)
4728 continue;
4729
4730 /* Get the PLT section contents. */
4731 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4732 if (plt_contents == NULL)
4733 break;
4734 if (!bfd_get_section_contents (abfd, (asection *) plt,
4735 plt_contents, 0, plt->size))
4736 {
4737 free (plt_contents);
4738 break;
4739 }
4740
4741 /* Check what kind of PLT it is. */
4742 plt_type = plt_unknown;
4743 if (plts[j].type == plt_unknown
4744 && (plt->size >= (lazy_plt->plt_entry_size
4745 + lazy_plt->plt_entry_size)))
4746 {
4747 /* Match lazy PLT first. Need to check the first two
4748 instructions. */
4749 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4750 lazy_plt->plt0_got1_offset) == 0)
4751 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4752 2) == 0))
4753 plt_type = plt_lazy;
4754 else if (lazy_bnd_plt != NULL
4755 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4756 lazy_bnd_plt->plt0_got1_offset) == 0)
4757 && (memcmp (plt_contents + 6,
4758 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4759 {
4760 plt_type = plt_lazy | plt_second;
4761 /* The fist entry in the lazy IBT PLT is the same as the
4762 lazy BND PLT. */
4763 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4764 lazy_ibt_plt->plt_entry,
4765 lazy_ibt_plt->plt_got_offset) == 0))
4766 lazy_plt = lazy_ibt_plt;
4767 else
4768 lazy_plt = lazy_bnd_plt;
4769 }
4770 }
4771
4772 if (non_lazy_plt != NULL
4773 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4774 && plt->size >= non_lazy_plt->plt_entry_size)
4775 {
4776 /* Match non-lazy PLT. */
4777 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4778 non_lazy_plt->plt_got_offset) == 0)
4779 plt_type = plt_non_lazy;
4780 }
4781
4782 if (plt_type == plt_unknown || plt_type == plt_second)
4783 {
4784 if (non_lazy_bnd_plt != NULL
4785 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4786 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4787 non_lazy_bnd_plt->plt_got_offset) == 0))
4788 {
4789 /* Match BND PLT. */
4790 plt_type = plt_second;
4791 non_lazy_plt = non_lazy_bnd_plt;
4792 }
4793 else if (non_lazy_ibt_plt != NULL
4794 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4795 && (memcmp (plt_contents,
4796 non_lazy_ibt_plt->plt_entry,
4797 non_lazy_ibt_plt->plt_got_offset) == 0))
4798 {
4799 /* Match IBT PLT. */
4800 plt_type = plt_second;
4801 non_lazy_plt = non_lazy_ibt_plt;
4802 }
4803 }
4804
4805 if (plt_type == plt_unknown)
4806 {
4807 free (plt_contents);
4808 continue;
4809 }
4810
4811 plts[j].sec = plt;
4812 plts[j].type = plt_type;
4813
4814 if ((plt_type & plt_lazy))
4815 {
4816 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4817 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4818 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4819 /* Skip PLT0 in lazy PLT. */
4820 i = 1;
4821 }
4822 else
4823 {
4824 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4825 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4826 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4827 i = 0;
4828 }
4829
4830 /* Skip lazy PLT when the second PLT is used. */
4831 if (plt_type == (plt_lazy | plt_second))
4832 plts[j].count = 0;
4833 else
4834 {
4835 n = plt->size / plts[j].plt_entry_size;
4836 plts[j].count = n;
4837 count += n - i;
4838 }
4839
4840 plts[j].contents = plt_contents;
4841 }
4842
4843 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4844 (bfd_vma) 0, plts, dynsyms,
4845 ret);
4846 }
4847
4848 /* Handle an x86-64 specific section when reading an object file. This
4849 is called when elfcode.h finds a section with an unknown type. */
4850
4851 static bfd_boolean
4852 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4853 const char *name, int shindex)
4854 {
4855 if (hdr->sh_type != SHT_X86_64_UNWIND)
4856 return FALSE;
4857
4858 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4859 return FALSE;
4860
4861 return TRUE;
4862 }
4863
4864 /* Hook called by the linker routine which adds symbols from an object
4865 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4866 of .bss. */
4867
4868 static bfd_boolean
4869 elf_x86_64_add_symbol_hook (bfd *abfd,
4870 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4871 Elf_Internal_Sym *sym,
4872 const char **namep ATTRIBUTE_UNUSED,
4873 flagword *flagsp ATTRIBUTE_UNUSED,
4874 asection **secp,
4875 bfd_vma *valp)
4876 {
4877 asection *lcomm;
4878
4879 switch (sym->st_shndx)
4880 {
4881 case SHN_X86_64_LCOMMON:
4882 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4883 if (lcomm == NULL)
4884 {
4885 lcomm = bfd_make_section_with_flags (abfd,
4886 "LARGE_COMMON",
4887 (SEC_ALLOC
4888 | SEC_IS_COMMON
4889 | SEC_LINKER_CREATED));
4890 if (lcomm == NULL)
4891 return FALSE;
4892 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4893 }
4894 *secp = lcomm;
4895 *valp = sym->st_size;
4896 return TRUE;
4897 }
4898
4899 return TRUE;
4900 }
4901
4902
4903 /* Given a BFD section, try to locate the corresponding ELF section
4904 index. */
4905
4906 static bfd_boolean
4907 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4908 asection *sec, int *index_return)
4909 {
4910 if (sec == &_bfd_elf_large_com_section)
4911 {
4912 *index_return = SHN_X86_64_LCOMMON;
4913 return TRUE;
4914 }
4915 return FALSE;
4916 }
4917
4918 /* Process a symbol. */
4919
4920 static void
4921 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4922 asymbol *asym)
4923 {
4924 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4925
4926 switch (elfsym->internal_elf_sym.st_shndx)
4927 {
4928 case SHN_X86_64_LCOMMON:
4929 asym->section = &_bfd_elf_large_com_section;
4930 asym->value = elfsym->internal_elf_sym.st_size;
4931 /* Common symbol doesn't set BSF_GLOBAL. */
4932 asym->flags &= ~BSF_GLOBAL;
4933 break;
4934 }
4935 }
4936
4937 static bfd_boolean
4938 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4939 {
4940 return (sym->st_shndx == SHN_COMMON
4941 || sym->st_shndx == SHN_X86_64_LCOMMON);
4942 }
4943
4944 static unsigned int
4945 elf_x86_64_common_section_index (asection *sec)
4946 {
4947 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4948 return SHN_COMMON;
4949 else
4950 return SHN_X86_64_LCOMMON;
4951 }
4952
4953 static asection *
4954 elf_x86_64_common_section (asection *sec)
4955 {
4956 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4957 return bfd_com_section_ptr;
4958 else
4959 return &_bfd_elf_large_com_section;
4960 }
4961
4962 static bfd_boolean
4963 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4964 const Elf_Internal_Sym *sym,
4965 asection **psec,
4966 bfd_boolean newdef,
4967 bfd_boolean olddef,
4968 bfd *oldbfd,
4969 const asection *oldsec)
4970 {
4971 /* A normal common symbol and a large common symbol result in a
4972 normal common symbol. We turn the large common symbol into a
4973 normal one. */
4974 if (!olddef
4975 && h->root.type == bfd_link_hash_common
4976 && !newdef
4977 && bfd_is_com_section (*psec)
4978 && oldsec != *psec)
4979 {
4980 if (sym->st_shndx == SHN_COMMON
4981 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4982 {
4983 h->root.u.c.p->section
4984 = bfd_make_section_old_way (oldbfd, "COMMON");
4985 h->root.u.c.p->section->flags = SEC_ALLOC;
4986 }
4987 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4988 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4989 *psec = bfd_com_section_ptr;
4990 }
4991
4992 return TRUE;
4993 }
4994
4995 static int
4996 elf_x86_64_additional_program_headers (bfd *abfd,
4997 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4998 {
4999 asection *s;
5000 int count = 0;
5001
5002 /* Check to see if we need a large readonly segment. */
5003 s = bfd_get_section_by_name (abfd, ".lrodata");
5004 if (s && (s->flags & SEC_LOAD))
5005 count++;
5006
5007 /* Check to see if we need a large data segment. Since .lbss sections
5008 is placed right after the .bss section, there should be no need for
5009 a large data segment just because of .lbss. */
5010 s = bfd_get_section_by_name (abfd, ".ldata");
5011 if (s && (s->flags & SEC_LOAD))
5012 count++;
5013
5014 return count;
5015 }
5016
5017 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5018
5019 static bfd_boolean
5020 elf_x86_64_relocs_compatible (const bfd_target *input,
5021 const bfd_target *output)
5022 {
5023 return ((xvec_get_elf_backend_data (input)->s->elfclass
5024 == xvec_get_elf_backend_data (output)->s->elfclass)
5025 && _bfd_elf_relocs_compatible (input, output));
5026 }
5027
5028 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5029 with GNU properties if found. Otherwise, return NULL. */
5030
5031 static bfd *
5032 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5033 {
5034 struct elf_x86_init_table init_table;
5035
5036 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5037 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5038 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5039 != (int) R_X86_64_GNU_VTINHERIT)
5040 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5041 != (int) R_X86_64_GNU_VTENTRY))
5042 abort ();
5043
5044 /* This is unused for x86-64. */
5045 init_table.plt0_pad_byte = 0x90;
5046
5047 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
5048 {
5049 const struct elf_backend_data *bed
5050 = get_elf_backend_data (info->output_bfd);
5051 struct elf_x86_link_hash_table *htab
5052 = elf_x86_hash_table (info, bed->target_id);
5053 if (!htab)
5054 abort ();
5055 if (htab->params->bndplt)
5056 {
5057 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5058 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5059 }
5060 else
5061 {
5062 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5063 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5064 }
5065
5066 if (ABI_64_P (info->output_bfd))
5067 {
5068 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5069 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5070 }
5071 else
5072 {
5073 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5074 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5075 }
5076 }
5077 else
5078 {
5079 init_table.lazy_plt = &elf_x86_64_nacl_plt;
5080 init_table.non_lazy_plt = NULL;
5081 init_table.lazy_ibt_plt = NULL;
5082 init_table.non_lazy_ibt_plt = NULL;
5083 }
5084
5085 if (ABI_64_P (info->output_bfd))
5086 {
5087 init_table.r_info = elf64_r_info;
5088 init_table.r_sym = elf64_r_sym;
5089 }
5090 else
5091 {
5092 init_table.r_info = elf32_r_info;
5093 init_table.r_sym = elf32_r_sym;
5094 }
5095
5096 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5097 }
5098
5099 static const struct bfd_elf_special_section
5100 elf_x86_64_special_sections[]=
5101 {
5102 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5103 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5104 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5105 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5106 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5107 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5108 { NULL, 0, 0, 0, 0 }
5109 };
5110
5111 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5112 #define TARGET_LITTLE_NAME "elf64-x86-64"
5113 #define ELF_ARCH bfd_arch_i386
5114 #define ELF_TARGET_ID X86_64_ELF_DATA
5115 #define ELF_MACHINE_CODE EM_X86_64
5116 #if DEFAULT_LD_Z_SEPARATE_CODE
5117 # define ELF_MAXPAGESIZE 0x1000
5118 #else
5119 # define ELF_MAXPAGESIZE 0x200000
5120 #endif
5121 #define ELF_MINPAGESIZE 0x1000
5122 #define ELF_COMMONPAGESIZE 0x1000
5123
5124 #define elf_backend_can_gc_sections 1
5125 #define elf_backend_can_refcount 1
5126 #define elf_backend_want_got_plt 1
5127 #define elf_backend_plt_readonly 1
5128 #define elf_backend_want_plt_sym 0
5129 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5130 #define elf_backend_rela_normal 1
5131 #define elf_backend_plt_alignment 4
5132 #define elf_backend_extern_protected_data 1
5133 #define elf_backend_caches_rawsize 1
5134 #define elf_backend_dtrel_excludes_plt 1
5135 #define elf_backend_want_dynrelro 1
5136
5137 #define elf_info_to_howto elf_x86_64_info_to_howto
5138
5139 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5140 #define bfd_elf64_bfd_reloc_name_lookup \
5141 elf_x86_64_reloc_name_lookup
5142
5143 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5144 #define elf_backend_check_relocs elf_x86_64_check_relocs
5145 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5146 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5147 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5148 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5149 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5150 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5151 #ifdef CORE_HEADER
5152 #define elf_backend_write_core_note elf_x86_64_write_core_note
5153 #endif
5154 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5155 #define elf_backend_relocate_section elf_x86_64_relocate_section
5156 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5157 #define elf_backend_object_p elf64_x86_64_elf_object_p
5158 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5159
5160 #define elf_backend_section_from_shdr \
5161 elf_x86_64_section_from_shdr
5162
5163 #define elf_backend_section_from_bfd_section \
5164 elf_x86_64_elf_section_from_bfd_section
5165 #define elf_backend_add_symbol_hook \
5166 elf_x86_64_add_symbol_hook
5167 #define elf_backend_symbol_processing \
5168 elf_x86_64_symbol_processing
5169 #define elf_backend_common_section_index \
5170 elf_x86_64_common_section_index
5171 #define elf_backend_common_section \
5172 elf_x86_64_common_section
5173 #define elf_backend_common_definition \
5174 elf_x86_64_common_definition
5175 #define elf_backend_merge_symbol \
5176 elf_x86_64_merge_symbol
5177 #define elf_backend_special_sections \
5178 elf_x86_64_special_sections
5179 #define elf_backend_additional_program_headers \
5180 elf_x86_64_additional_program_headers
5181 #define elf_backend_setup_gnu_properties \
5182 elf_x86_64_link_setup_gnu_properties
5183 #define elf_backend_hide_symbol \
5184 _bfd_x86_elf_hide_symbol
5185
5186 #undef elf64_bed
5187 #define elf64_bed elf64_x86_64_bed
5188
5189 #include "elf64-target.h"
5190
5191 /* CloudABI support. */
5192
5193 #undef TARGET_LITTLE_SYM
5194 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5195 #undef TARGET_LITTLE_NAME
5196 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5197
5198 #undef ELF_OSABI
5199 #define ELF_OSABI ELFOSABI_CLOUDABI
5200
5201 #undef elf64_bed
5202 #define elf64_bed elf64_x86_64_cloudabi_bed
5203
5204 #include "elf64-target.h"
5205
5206 /* FreeBSD support. */
5207
5208 #undef TARGET_LITTLE_SYM
5209 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5210 #undef TARGET_LITTLE_NAME
5211 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5212
5213 #undef ELF_OSABI
5214 #define ELF_OSABI ELFOSABI_FREEBSD
5215
5216 #undef elf64_bed
5217 #define elf64_bed elf64_x86_64_fbsd_bed
5218
5219 #include "elf64-target.h"
5220
5221 /* Solaris 2 support. */
5222
5223 #undef TARGET_LITTLE_SYM
5224 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5225 #undef TARGET_LITTLE_NAME
5226 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5227
5228 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5229 {
5230 is_solaris /* os */
5231 };
5232
5233 #undef elf_backend_arch_data
5234 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5235
5236 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5237 objects won't be recognized. */
5238 #undef ELF_OSABI
5239
5240 #undef elf64_bed
5241 #define elf64_bed elf64_x86_64_sol2_bed
5242
5243 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5244 boundary. */
5245 #undef elf_backend_static_tls_alignment
5246 #define elf_backend_static_tls_alignment 16
5247
5248 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5249
5250 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5251 File, p.63. */
5252 #undef elf_backend_want_plt_sym
5253 #define elf_backend_want_plt_sym 1
5254
5255 #undef elf_backend_strtab_flags
5256 #define elf_backend_strtab_flags SHF_STRINGS
5257
5258 static bfd_boolean
5259 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5260 bfd *obfd ATTRIBUTE_UNUSED,
5261 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5262 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5263 {
5264 /* PR 19938: FIXME: Need to add code for setting the sh_info
5265 and sh_link fields of Solaris specific section types. */
5266 return FALSE;
5267 }
5268
5269 #undef elf_backend_copy_special_section_fields
5270 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5271
5272 #include "elf64-target.h"
5273
5274 /* Native Client support. */
5275
5276 static bfd_boolean
5277 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5278 {
5279 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5280 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5281 return TRUE;
5282 }
5283
5284 #undef TARGET_LITTLE_SYM
5285 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5286 #undef TARGET_LITTLE_NAME
5287 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5288 #undef elf64_bed
5289 #define elf64_bed elf64_x86_64_nacl_bed
5290
5291 #undef ELF_MAXPAGESIZE
5292 #undef ELF_MINPAGESIZE
5293 #undef ELF_COMMONPAGESIZE
5294 #define ELF_MAXPAGESIZE 0x10000
5295 #define ELF_MINPAGESIZE 0x10000
5296 #define ELF_COMMONPAGESIZE 0x10000
5297
5298 /* Restore defaults. */
5299 #undef ELF_OSABI
5300 #undef elf_backend_static_tls_alignment
5301 #undef elf_backend_want_plt_sym
5302 #define elf_backend_want_plt_sym 0
5303 #undef elf_backend_strtab_flags
5304 #undef elf_backend_copy_special_section_fields
5305
5306 /* NaCl uses substantially different PLT entries for the same effects. */
5307
5308 #undef elf_backend_plt_alignment
5309 #define elf_backend_plt_alignment 5
5310 #define NACL_PLT_ENTRY_SIZE 64
5311 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5312
5313 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5314 {
5315 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5316 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5317 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5318 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5319 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5320
5321 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5322 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5323
5324 /* 32 bytes of nop to pad out to the standard size. */
5325 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5326 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5327 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5328 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5329 0x66, /* excess data16 prefix */
5330 0x90 /* nop */
5331 };
5332
5333 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5334 {
5335 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5336 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5337 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5338 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5339
5340 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5341 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5342 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5343
5344 /* Lazy GOT entries point here (32-byte aligned). */
5345 0x68, /* pushq immediate */
5346 0, 0, 0, 0, /* replaced with index into relocation table. */
5347 0xe9, /* jmp relative */
5348 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5349
5350 /* 22 bytes of nop to pad out to the standard size. */
5351 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5352 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5353 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5354 };
5355
5356 /* .eh_frame covering the .plt section. */
5357
5358 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5359 {
5360 #if (PLT_CIE_LENGTH != 20 \
5361 || PLT_FDE_LENGTH != 36 \
5362 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5363 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5364 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5365 #endif
5366 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5367 0, 0, 0, 0, /* CIE ID */
5368 1, /* CIE version */
5369 'z', 'R', 0, /* Augmentation string */
5370 1, /* Code alignment factor */
5371 0x78, /* Data alignment factor */
5372 16, /* Return address column */
5373 1, /* Augmentation size */
5374 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5375 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5376 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5377 DW_CFA_nop, DW_CFA_nop,
5378
5379 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5380 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5381 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5382 0, 0, 0, 0, /* .plt size goes here */
5383 0, /* Augmentation size */
5384 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5385 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5386 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5387 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5388 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5389 13, /* Block length */
5390 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5391 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5392 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5393 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5394 DW_CFA_nop, DW_CFA_nop
5395 };
5396
5397 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5398 {
5399 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5400 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5401 elf_x86_64_nacl_plt_entry, /* plt_entry */
5402 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5403 elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */
5404 NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
5405 2, /* plt_tlsdesc_got1_offset */
5406 9, /* plt_tlsdesc_got2_offset */
5407 6, /* plt_tlsdesc_got1_insn_end */
5408 13, /* plt_tlsdesc_got2_insn_end */
5409 2, /* plt0_got1_offset */
5410 9, /* plt0_got2_offset */
5411 13, /* plt0_got2_insn_end */
5412 3, /* plt_got_offset */
5413 33, /* plt_reloc_offset */
5414 38, /* plt_plt_offset */
5415 7, /* plt_got_insn_size */
5416 42, /* plt_plt_insn_end */
5417 32, /* plt_lazy_offset */
5418 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5419 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5420 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5421 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5422 };
5423
5424 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5425 {
5426 is_nacl /* os */
5427 };
5428
5429 #undef elf_backend_arch_data
5430 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5431
5432 #undef elf_backend_object_p
5433 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5434 #undef elf_backend_modify_segment_map
5435 #define elf_backend_modify_segment_map nacl_modify_segment_map
5436 #undef elf_backend_modify_program_headers
5437 #define elf_backend_modify_program_headers nacl_modify_program_headers
5438 #undef elf_backend_final_write_processing
5439 #define elf_backend_final_write_processing nacl_final_write_processing
5440
5441 #include "elf64-target.h"
5442
5443 /* Native Client x32 support. */
5444
5445 static bfd_boolean
5446 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5447 {
5448 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5449 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5450 return TRUE;
5451 }
5452
5453 #undef TARGET_LITTLE_SYM
5454 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5455 #undef TARGET_LITTLE_NAME
5456 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5457 #undef elf32_bed
5458 #define elf32_bed elf32_x86_64_nacl_bed
5459
5460 #define bfd_elf32_bfd_reloc_type_lookup \
5461 elf_x86_64_reloc_type_lookup
5462 #define bfd_elf32_bfd_reloc_name_lookup \
5463 elf_x86_64_reloc_name_lookup
5464 #define bfd_elf32_get_synthetic_symtab \
5465 elf_x86_64_get_synthetic_symtab
5466
5467 #undef elf_backend_object_p
5468 #define elf_backend_object_p \
5469 elf32_x86_64_nacl_elf_object_p
5470
5471 #undef elf_backend_bfd_from_remote_memory
5472 #define elf_backend_bfd_from_remote_memory \
5473 _bfd_elf32_bfd_from_remote_memory
5474
5475 #undef elf_backend_size_info
5476 #define elf_backend_size_info \
5477 _bfd_elf32_size_info
5478
5479 #undef elf32_bed
5480 #define elf32_bed elf32_x86_64_bed
5481
5482 #include "elf32-target.h"
5483
5484 /* Restore defaults. */
5485 #undef elf_backend_object_p
5486 #define elf_backend_object_p elf64_x86_64_elf_object_p
5487 #undef elf_backend_bfd_from_remote_memory
5488 #undef elf_backend_size_info
5489 #undef elf_backend_modify_segment_map
5490 #undef elf_backend_modify_program_headers
5491 #undef elf_backend_final_write_processing
5492
5493 /* Intel L1OM support. */
5494
5495 static bfd_boolean
5496 elf64_l1om_elf_object_p (bfd *abfd)
5497 {
5498 /* Set the right machine number for an L1OM elf64 file. */
5499 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5500 return TRUE;
5501 }
5502
5503 #undef TARGET_LITTLE_SYM
5504 #define TARGET_LITTLE_SYM l1om_elf64_vec
5505 #undef TARGET_LITTLE_NAME
5506 #define TARGET_LITTLE_NAME "elf64-l1om"
5507 #undef ELF_ARCH
5508 #define ELF_ARCH bfd_arch_l1om
5509
5510 #undef ELF_MACHINE_CODE
5511 #define ELF_MACHINE_CODE EM_L1OM
5512
5513 #undef ELF_OSABI
5514
5515 #undef elf64_bed
5516 #define elf64_bed elf64_l1om_bed
5517
5518 #undef elf_backend_object_p
5519 #define elf_backend_object_p elf64_l1om_elf_object_p
5520
5521 /* Restore defaults. */
5522 #undef ELF_MAXPAGESIZE
5523 #undef ELF_MINPAGESIZE
5524 #undef ELF_COMMONPAGESIZE
5525 #if DEFAULT_LD_Z_SEPARATE_CODE
5526 # define ELF_MAXPAGESIZE 0x1000
5527 #else
5528 # define ELF_MAXPAGESIZE 0x200000
5529 #endif
5530 #define ELF_MINPAGESIZE 0x1000
5531 #define ELF_COMMONPAGESIZE 0x1000
5532 #undef elf_backend_plt_alignment
5533 #define elf_backend_plt_alignment 4
5534 #undef elf_backend_arch_data
5535 #define elf_backend_arch_data &elf_x86_64_arch_bed
5536
5537 #include "elf64-target.h"
5538
5539 /* FreeBSD L1OM support. */
5540
5541 #undef TARGET_LITTLE_SYM
5542 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5543 #undef TARGET_LITTLE_NAME
5544 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5545
5546 #undef ELF_OSABI
5547 #define ELF_OSABI ELFOSABI_FREEBSD
5548
5549 #undef elf64_bed
5550 #define elf64_bed elf64_l1om_fbsd_bed
5551
5552 #include "elf64-target.h"
5553
5554 /* Intel K1OM support. */
5555
5556 static bfd_boolean
5557 elf64_k1om_elf_object_p (bfd *abfd)
5558 {
5559 /* Set the right machine number for an K1OM elf64 file. */
5560 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5561 return TRUE;
5562 }
5563
5564 #undef TARGET_LITTLE_SYM
5565 #define TARGET_LITTLE_SYM k1om_elf64_vec
5566 #undef TARGET_LITTLE_NAME
5567 #define TARGET_LITTLE_NAME "elf64-k1om"
5568 #undef ELF_ARCH
5569 #define ELF_ARCH bfd_arch_k1om
5570
5571 #undef ELF_MACHINE_CODE
5572 #define ELF_MACHINE_CODE EM_K1OM
5573
5574 #undef ELF_OSABI
5575
5576 #undef elf64_bed
5577 #define elf64_bed elf64_k1om_bed
5578
5579 #undef elf_backend_object_p
5580 #define elf_backend_object_p elf64_k1om_elf_object_p
5581
5582 #undef elf_backend_static_tls_alignment
5583
5584 #undef elf_backend_want_plt_sym
5585 #define elf_backend_want_plt_sym 0
5586
5587 #include "elf64-target.h"
5588
5589 /* FreeBSD K1OM support. */
5590
5591 #undef TARGET_LITTLE_SYM
5592 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5593 #undef TARGET_LITTLE_NAME
5594 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5595
5596 #undef ELF_OSABI
5597 #define ELF_OSABI ELFOSABI_FREEBSD
5598
5599 #undef elf64_bed
5600 #define elf64_bed elf64_k1om_fbsd_bed
5601
5602 #include "elf64-target.h"
5603
5604 /* 32bit x86-64 support. */
5605
5606 #undef TARGET_LITTLE_SYM
5607 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5608 #undef TARGET_LITTLE_NAME
5609 #define TARGET_LITTLE_NAME "elf32-x86-64"
5610 #undef elf32_bed
5611
5612 #undef ELF_ARCH
5613 #define ELF_ARCH bfd_arch_i386
5614
5615 #undef ELF_MACHINE_CODE
5616 #define ELF_MACHINE_CODE EM_X86_64
5617
5618 #undef ELF_OSABI
5619
5620 #undef elf_backend_object_p
5621 #define elf_backend_object_p \
5622 elf32_x86_64_elf_object_p
5623
5624 #undef elf_backend_bfd_from_remote_memory
5625 #define elf_backend_bfd_from_remote_memory \
5626 _bfd_elf32_bfd_from_remote_memory
5627
5628 #undef elf_backend_size_info
5629 #define elf_backend_size_info \
5630 _bfd_elf32_size_info
5631
5632 #include "elf32-target.h"