]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - bfd/elf64-aarch64.c
* dwarf2read.c (dw2_symtab_iter_next): Check value of cu_index
[thirdparty/binutils-gdb.git] / bfd / elf64-aarch64.c
CommitLineData
a06ea964 1/* ELF support for AArch64.
59c108f7 2 Copyright 2009-2013 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21/* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
418009c2 43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
a06ea964
NC
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD64
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL64 relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elf64_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elf64_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elf64_aarch64_relocate_section ()
123
124 Calls elf64_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elf64_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138#include "sysdep.h"
139#include "bfd.h"
140#include "libiberty.h"
141#include "libbfd.h"
142#include "bfd_stdint.h"
143#include "elf-bfd.h"
144#include "bfdlink.h"
692e2b8b 145#include "objalloc.h"
a06ea964
NC
146#include "elf/aarch64.h"
147
148static bfd_reloc_status_type
149bfd_elf_aarch64_put_addend (bfd *abfd,
150 bfd_byte *address,
151 reloc_howto_type *howto, bfd_signed_vma addend);
152
153#define IS_AARCH64_TLS_RELOC(R_TYPE) \
154 ((R_TYPE) == R_AARCH64_TLSGD_ADR_PAGE21 \
155 || (R_TYPE) == R_AARCH64_TLSGD_ADD_LO12_NC \
156 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
157 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
158 || (R_TYPE) == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
159 || (R_TYPE) == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
160 || (R_TYPE) == R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
161 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12 \
162 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_HI12 \
163 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
164 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G2 \
165 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1 \
166 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
167 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0 \
168 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
169 || (R_TYPE) == R_AARCH64_TLS_DTPMOD64 \
170 || (R_TYPE) == R_AARCH64_TLS_DTPREL64 \
171 || (R_TYPE) == R_AARCH64_TLS_TPREL64 \
172 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
173
174#define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
418009c2 175 ((R_TYPE) == R_AARCH64_TLSDESC_LD_PREL19 \
a06ea964 176 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PREL21 \
418009c2 177 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PAGE21 \
a06ea964
NC
178 || (R_TYPE) == R_AARCH64_TLSDESC_ADD_LO12_NC \
179 || (R_TYPE) == R_AARCH64_TLSDESC_LD64_LO12_NC \
180 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G1 \
181 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G0_NC \
182 || (R_TYPE) == R_AARCH64_TLSDESC_LDR \
183 || (R_TYPE) == R_AARCH64_TLSDESC_ADD \
184 || (R_TYPE) == R_AARCH64_TLSDESC_CALL \
185 || (R_TYPE) == R_AARCH64_TLSDESC)
186
187#define ELIMINATE_COPY_RELOCS 0
188
189/* Return the relocation section associated with NAME. HTAB is the
190 bfd's elf64_aarch64_link_hash_entry. */
191#define RELOC_SECTION(HTAB, NAME) \
192 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
193
194/* Return size of a relocation entry. HTAB is the bfd's
195 elf64_aarch64_link_hash_entry. */
196#define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela))
197
198/* Return function to swap relocations in. HTAB is the bfd's
199 elf64_aarch64_link_hash_entry. */
200#define SWAP_RELOC_IN(HTAB) (bfd_elf64_swap_reloca_in)
201
202/* Return function to swap relocations out. HTAB is the bfd's
203 elf64_aarch64_link_hash_entry. */
204#define SWAP_RELOC_OUT(HTAB) (bfd_elf64_swap_reloca_out)
205
206/* GOT Entry size - 8 bytes. */
207#define GOT_ENTRY_SIZE (8)
208#define PLT_ENTRY_SIZE (32)
209#define PLT_SMALL_ENTRY_SIZE (16)
210#define PLT_TLSDESC_ENTRY_SIZE (32)
211
212/* Take the PAGE component of an address or offset. */
213#define PG(x) ((x) & ~ 0xfff)
214#define PG_OFFSET(x) ((x) & 0xfff)
215
216/* Encoding of the nop instruction */
217#define INSN_NOP 0xd503201f
218
219#define aarch64_compute_jump_table_size(htab) \
220 (((htab)->root.srelplt == NULL) ? 0 \
221 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
222
223/* The first entry in a procedure linkage table looks like this
224 if the distance between the PLTGOT and the PLT is < 4GB use
225 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
226 in x16 and needs to work out PLTGOT[1] by using an address of
227 [x16,#-8]. */
228static const bfd_byte elf64_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
229{
230 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
231 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
232 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
233 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
234 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
235 0x1f, 0x20, 0x03, 0xd5, /* nop */
236 0x1f, 0x20, 0x03, 0xd5, /* nop */
237 0x1f, 0x20, 0x03, 0xd5, /* nop */
238};
239
240/* Per function entry in a procedure linkage table looks like this
241 if the distance between the PLTGOT and the PLT is < 4GB use
242 these PLT entries. */
243static const bfd_byte elf64_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
244{
245 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
246 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
247 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
248 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
249};
250
251static const bfd_byte
252elf64_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
253{
254 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
255 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
256 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
257 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */
258 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
259 0x40, 0x00, 0x1F, 0xD6, /* br x2 */
260 0x1f, 0x20, 0x03, 0xd5, /* nop */
261 0x1f, 0x20, 0x03, 0xd5, /* nop */
262};
263
264#define elf_info_to_howto elf64_aarch64_info_to_howto
265#define elf_info_to_howto_rel elf64_aarch64_info_to_howto
266
267#define AARCH64_ELF_ABI_VERSION 0
268#define AARCH64_ELF_OS_ABI_VERSION 0
269
270/* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
271#define ALL_ONES (~ (bfd_vma) 0)
272
273static reloc_howto_type elf64_aarch64_howto_none =
274 HOWTO (R_AARCH64_NONE, /* type */
275 0, /* rightshift */
276 0, /* size (0 = byte, 1 = short, 2 = long) */
277 0, /* bitsize */
278 FALSE, /* pc_relative */
279 0, /* bitpos */
280 complain_overflow_dont,/* complain_on_overflow */
281 bfd_elf_generic_reloc, /* special_function */
282 "R_AARCH64_NONE", /* name */
283 FALSE, /* partial_inplace */
284 0, /* src_mask */
285 0, /* dst_mask */
286 FALSE); /* pcrel_offset */
287
288static reloc_howto_type elf64_aarch64_howto_dynrelocs[] =
289{
290 HOWTO (R_AARCH64_COPY, /* type */
291 0, /* rightshift */
292 2, /* size (0 = byte, 1 = short, 2 = long) */
293 64, /* bitsize */
294 FALSE, /* pc_relative */
295 0, /* bitpos */
296 complain_overflow_bitfield, /* complain_on_overflow */
297 bfd_elf_generic_reloc, /* special_function */
298 "R_AARCH64_COPY", /* name */
299 TRUE, /* partial_inplace */
300 0xffffffff, /* src_mask */
301 0xffffffff, /* dst_mask */
302 FALSE), /* pcrel_offset */
303
304 HOWTO (R_AARCH64_GLOB_DAT, /* type */
305 0, /* rightshift */
306 2, /* size (0 = byte, 1 = short, 2 = long) */
307 64, /* bitsize */
308 FALSE, /* pc_relative */
309 0, /* bitpos */
310 complain_overflow_bitfield, /* complain_on_overflow */
311 bfd_elf_generic_reloc, /* special_function */
312 "R_AARCH64_GLOB_DAT", /* name */
313 TRUE, /* partial_inplace */
314 0xffffffff, /* src_mask */
315 0xffffffff, /* dst_mask */
316 FALSE), /* pcrel_offset */
317
318 HOWTO (R_AARCH64_JUMP_SLOT, /* type */
319 0, /* rightshift */
320 2, /* size (0 = byte, 1 = short, 2 = long) */
321 64, /* bitsize */
322 FALSE, /* pc_relative */
323 0, /* bitpos */
324 complain_overflow_bitfield, /* complain_on_overflow */
325 bfd_elf_generic_reloc, /* special_function */
326 "R_AARCH64_JUMP_SLOT", /* name */
327 TRUE, /* partial_inplace */
328 0xffffffff, /* src_mask */
329 0xffffffff, /* dst_mask */
330 FALSE), /* pcrel_offset */
331
332 HOWTO (R_AARCH64_RELATIVE, /* type */
333 0, /* rightshift */
334 2, /* size (0 = byte, 1 = short, 2 = long) */
335 64, /* bitsize */
336 FALSE, /* pc_relative */
337 0, /* bitpos */
338 complain_overflow_bitfield, /* complain_on_overflow */
339 bfd_elf_generic_reloc, /* special_function */
340 "R_AARCH64_RELATIVE", /* name */
341 TRUE, /* partial_inplace */
342 ALL_ONES, /* src_mask */
343 ALL_ONES, /* dst_mask */
344 FALSE), /* pcrel_offset */
345
346 HOWTO (R_AARCH64_TLS_DTPMOD64, /* type */
347 0, /* rightshift */
348 2, /* size (0 = byte, 1 = short, 2 = long) */
349 64, /* bitsize */
350 FALSE, /* pc_relative */
351 0, /* bitpos */
352 complain_overflow_dont, /* complain_on_overflow */
353 bfd_elf_generic_reloc, /* special_function */
354 "R_AARCH64_TLS_DTPMOD64", /* name */
355 FALSE, /* partial_inplace */
356 0, /* src_mask */
357 ALL_ONES, /* dst_mask */
358 FALSE), /* pc_reloffset */
359
360 HOWTO (R_AARCH64_TLS_DTPREL64, /* type */
361 0, /* rightshift */
362 2, /* size (0 = byte, 1 = short, 2 = long) */
363 64, /* bitsize */
364 FALSE, /* pc_relative */
365 0, /* bitpos */
366 complain_overflow_dont, /* complain_on_overflow */
367 bfd_elf_generic_reloc, /* special_function */
368 "R_AARCH64_TLS_DTPREL64", /* name */
369 FALSE, /* partial_inplace */
370 0, /* src_mask */
371 ALL_ONES, /* dst_mask */
372 FALSE), /* pcrel_offset */
373
374 HOWTO (R_AARCH64_TLS_TPREL64, /* type */
375 0, /* rightshift */
376 2, /* size (0 = byte, 1 = short, 2 = long) */
377 64, /* bitsize */
378 FALSE, /* pc_relative */
379 0, /* bitpos */
380 complain_overflow_dont, /* complain_on_overflow */
381 bfd_elf_generic_reloc, /* special_function */
382 "R_AARCH64_TLS_TPREL64", /* name */
383 FALSE, /* partial_inplace */
384 0, /* src_mask */
385 ALL_ONES, /* dst_mask */
386 FALSE), /* pcrel_offset */
387
388 HOWTO (R_AARCH64_TLSDESC, /* type */
389 0, /* rightshift */
390 2, /* size (0 = byte, 1 = short, 2 = long) */
391 64, /* bitsize */
392 FALSE, /* pc_relative */
393 0, /* bitpos */
394 complain_overflow_dont, /* complain_on_overflow */
395 bfd_elf_generic_reloc, /* special_function */
396 "R_AARCH64_TLSDESC", /* name */
397 FALSE, /* partial_inplace */
398 0, /* src_mask */
399 ALL_ONES, /* dst_mask */
400 FALSE), /* pcrel_offset */
401
692e2b8b
WN
402 HOWTO (R_AARCH64_IRELATIVE, /* type */
403 0, /* rightshift */
404 2, /* size (0 = byte, 1 = short, 2 = long) */
405 64, /* bitsize */
406 FALSE, /* pc_relative */
407 0, /* bitpos */
408 complain_overflow_bitfield, /* complain_on_overflow */
409 bfd_elf_generic_reloc, /* special_function */
410 "R_AARCH64_IRELATIVE", /* name */
411 FALSE, /* partial_inplace */
412 0, /* src_mask */
413 ALL_ONES, /* dst_mask */
414 FALSE), /* pcrel_offset */
415
a06ea964
NC
416};
417
418/* Note: code such as elf64_aarch64_reloc_type_lookup expect to use e.g.
419 R_AARCH64_PREL64 as an index into this, and find the R_AARCH64_PREL64 HOWTO
420 in that slot. */
421
422static reloc_howto_type elf64_aarch64_howto_table[] =
423{
424 /* Basic data relocations. */
425
426 HOWTO (R_AARCH64_NULL, /* type */
427 0, /* rightshift */
428 0, /* size (0 = byte, 1 = short, 2 = long) */
429 0, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_dont, /* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_AARCH64_NULL", /* name */
435 FALSE, /* partial_inplace */
436 0, /* src_mask */
437 0, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 /* .xword: (S+A) */
441 HOWTO (R_AARCH64_ABS64, /* type */
442 0, /* rightshift */
443 4, /* size (4 = long long) */
444 64, /* bitsize */
445 FALSE, /* pc_relative */
446 0, /* bitpos */
447 complain_overflow_unsigned, /* complain_on_overflow */
448 bfd_elf_generic_reloc, /* special_function */
449 "R_AARCH64_ABS64", /* name */
450 FALSE, /* partial_inplace */
451 ALL_ONES, /* src_mask */
452 ALL_ONES, /* dst_mask */
453 FALSE), /* pcrel_offset */
454
455 /* .word: (S+A) */
456 HOWTO (R_AARCH64_ABS32, /* type */
457 0, /* rightshift */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
459 32, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_unsigned, /* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_AARCH64_ABS32", /* name */
465 FALSE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 /* .half: (S+A) */
471 HOWTO (R_AARCH64_ABS16, /* type */
472 0, /* rightshift */
473 1, /* size (0 = byte, 1 = short, 2 = long) */
474 16, /* bitsize */
475 FALSE, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_unsigned, /* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_AARCH64_ABS16", /* name */
480 FALSE, /* partial_inplace */
481 0xffff, /* src_mask */
482 0xffff, /* dst_mask */
483 FALSE), /* pcrel_offset */
484
485 /* .xword: (S+A-P) */
486 HOWTO (R_AARCH64_PREL64, /* type */
487 0, /* rightshift */
488 4, /* size (4 = long long) */
489 64, /* bitsize */
490 TRUE, /* pc_relative */
491 0, /* bitpos */
492 complain_overflow_signed, /* complain_on_overflow */
493 bfd_elf_generic_reloc, /* special_function */
494 "R_AARCH64_PREL64", /* name */
495 FALSE, /* partial_inplace */
496 ALL_ONES, /* src_mask */
497 ALL_ONES, /* dst_mask */
498 TRUE), /* pcrel_offset */
499
500 /* .word: (S+A-P) */
501 HOWTO (R_AARCH64_PREL32, /* type */
502 0, /* rightshift */
503 2, /* size (0 = byte, 1 = short, 2 = long) */
504 32, /* bitsize */
505 TRUE, /* pc_relative */
506 0, /* bitpos */
507 complain_overflow_signed, /* complain_on_overflow */
508 bfd_elf_generic_reloc, /* special_function */
509 "R_AARCH64_PREL32", /* name */
510 FALSE, /* partial_inplace */
511 0xffffffff, /* src_mask */
512 0xffffffff, /* dst_mask */
513 TRUE), /* pcrel_offset */
514
515 /* .half: (S+A-P) */
516 HOWTO (R_AARCH64_PREL16, /* type */
517 0, /* rightshift */
518 1, /* size (0 = byte, 1 = short, 2 = long) */
519 16, /* bitsize */
520 TRUE, /* pc_relative */
521 0, /* bitpos */
522 complain_overflow_signed, /* complain_on_overflow */
523 bfd_elf_generic_reloc, /* special_function */
524 "R_AARCH64_PREL16", /* name */
525 FALSE, /* partial_inplace */
526 0xffff, /* src_mask */
527 0xffff, /* dst_mask */
528 TRUE), /* pcrel_offset */
529
530 /* Group relocations to create a 16, 32, 48 or 64 bit
531 unsigned data or abs address inline. */
532
533 /* MOVZ: ((S+A) >> 0) & 0xffff */
534 HOWTO (R_AARCH64_MOVW_UABS_G0, /* type */
535 0, /* rightshift */
536 2, /* size (0 = byte, 1 = short, 2 = long) */
537 16, /* bitsize */
538 FALSE, /* pc_relative */
539 0, /* bitpos */
540 complain_overflow_unsigned, /* complain_on_overflow */
541 bfd_elf_generic_reloc, /* special_function */
542 "R_AARCH64_MOVW_UABS_G0", /* name */
543 FALSE, /* partial_inplace */
544 0xffff, /* src_mask */
545 0xffff, /* dst_mask */
546 FALSE), /* pcrel_offset */
547
548 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
549 HOWTO (R_AARCH64_MOVW_UABS_G0_NC, /* type */
550 0, /* rightshift */
551 2, /* size (0 = byte, 1 = short, 2 = long) */
552 16, /* bitsize */
553 FALSE, /* pc_relative */
554 0, /* bitpos */
555 complain_overflow_dont, /* complain_on_overflow */
556 bfd_elf_generic_reloc, /* special_function */
557 "R_AARCH64_MOVW_UABS_G0_NC", /* name */
558 FALSE, /* partial_inplace */
559 0xffff, /* src_mask */
560 0xffff, /* dst_mask */
561 FALSE), /* pcrel_offset */
562
563 /* MOVZ: ((S+A) >> 16) & 0xffff */
564 HOWTO (R_AARCH64_MOVW_UABS_G1, /* type */
565 16, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 16, /* bitsize */
568 FALSE, /* pc_relative */
569 0, /* bitpos */
570 complain_overflow_unsigned, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_AARCH64_MOVW_UABS_G1", /* name */
573 FALSE, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE), /* pcrel_offset */
577
578 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
579 HOWTO (R_AARCH64_MOVW_UABS_G1_NC, /* type */
580 16, /* rightshift */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
582 16, /* bitsize */
583 FALSE, /* pc_relative */
584 0, /* bitpos */
585 complain_overflow_dont, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 "R_AARCH64_MOVW_UABS_G1_NC", /* name */
588 FALSE, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 FALSE), /* pcrel_offset */
592
593 /* MOVZ: ((S+A) >> 32) & 0xffff */
594 HOWTO (R_AARCH64_MOVW_UABS_G2, /* type */
595 32, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 16, /* bitsize */
598 FALSE, /* pc_relative */
599 0, /* bitpos */
600 complain_overflow_unsigned, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_AARCH64_MOVW_UABS_G2", /* name */
603 FALSE, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
609 HOWTO (R_AARCH64_MOVW_UABS_G2_NC, /* type */
610 32, /* rightshift */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
612 16, /* bitsize */
613 FALSE, /* pc_relative */
614 0, /* bitpos */
615 complain_overflow_dont, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 "R_AARCH64_MOVW_UABS_G2_NC", /* name */
618 FALSE, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 FALSE), /* pcrel_offset */
622
623 /* MOVZ: ((S+A) >> 48) & 0xffff */
624 HOWTO (R_AARCH64_MOVW_UABS_G3, /* type */
625 48, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 16, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_unsigned, /* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_AARCH64_MOVW_UABS_G3", /* name */
633 FALSE, /* partial_inplace */
634 0xffff, /* src_mask */
635 0xffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
639 signed data or abs address inline. Will change instruction
640 to MOVN or MOVZ depending on sign of calculated value. */
641
642 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
643 HOWTO (R_AARCH64_MOVW_SABS_G0, /* type */
644 0, /* rightshift */
645 2, /* size (0 = byte, 1 = short, 2 = long) */
646 16, /* bitsize */
647 FALSE, /* pc_relative */
648 0, /* bitpos */
649 complain_overflow_signed, /* complain_on_overflow */
650 bfd_elf_generic_reloc, /* special_function */
651 "R_AARCH64_MOVW_SABS_G0", /* name */
652 FALSE, /* partial_inplace */
653 0xffff, /* src_mask */
654 0xffff, /* dst_mask */
655 FALSE), /* pcrel_offset */
656
657 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
658 HOWTO (R_AARCH64_MOVW_SABS_G1, /* type */
659 16, /* rightshift */
660 2, /* size (0 = byte, 1 = short, 2 = long) */
661 16, /* bitsize */
662 FALSE, /* pc_relative */
663 0, /* bitpos */
664 complain_overflow_signed, /* complain_on_overflow */
665 bfd_elf_generic_reloc, /* special_function */
666 "R_AARCH64_MOVW_SABS_G1", /* name */
667 FALSE, /* partial_inplace */
668 0xffff, /* src_mask */
669 0xffff, /* dst_mask */
670 FALSE), /* pcrel_offset */
671
672 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
673 HOWTO (R_AARCH64_MOVW_SABS_G2, /* type */
674 32, /* rightshift */
675 2, /* size (0 = byte, 1 = short, 2 = long) */
676 16, /* bitsize */
677 FALSE, /* pc_relative */
678 0, /* bitpos */
679 complain_overflow_signed, /* complain_on_overflow */
680 bfd_elf_generic_reloc, /* special_function */
681 "R_AARCH64_MOVW_SABS_G2", /* name */
682 FALSE, /* partial_inplace */
683 0xffff, /* src_mask */
684 0xffff, /* dst_mask */
685 FALSE), /* pcrel_offset */
686
687/* Relocations to generate 19, 21 and 33 bit PC-relative load/store
688 addresses: PG(x) is (x & ~0xfff). */
689
690 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
691 HOWTO (R_AARCH64_LD_PREL_LO19, /* type */
692 2, /* rightshift */
693 2, /* size (0 = byte, 1 = short, 2 = long) */
694 19, /* bitsize */
695 TRUE, /* pc_relative */
696 0, /* bitpos */
697 complain_overflow_signed, /* complain_on_overflow */
698 bfd_elf_generic_reloc, /* special_function */
699 "R_AARCH64_LD_PREL_LO19", /* name */
700 FALSE, /* partial_inplace */
701 0x7ffff, /* src_mask */
702 0x7ffff, /* dst_mask */
703 TRUE), /* pcrel_offset */
704
705 /* ADR: (S+A-P) & 0x1fffff */
706 HOWTO (R_AARCH64_ADR_PREL_LO21, /* type */
707 0, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 21, /* bitsize */
710 TRUE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_signed, /* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_AARCH64_ADR_PREL_LO21", /* name */
715 FALSE, /* partial_inplace */
716 0x1fffff, /* src_mask */
717 0x1fffff, /* dst_mask */
718 TRUE), /* pcrel_offset */
719
720 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
721 HOWTO (R_AARCH64_ADR_PREL_PG_HI21, /* type */
722 12, /* rightshift */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
724 21, /* bitsize */
725 TRUE, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_signed, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 "R_AARCH64_ADR_PREL_PG_HI21", /* name */
730 FALSE, /* partial_inplace */
731 0x1fffff, /* src_mask */
732 0x1fffff, /* dst_mask */
733 TRUE), /* pcrel_offset */
734
735 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
736 HOWTO (R_AARCH64_ADR_PREL_PG_HI21_NC, /* type */
737 12, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 21, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_AARCH64_ADR_PREL_PG_HI21_NC", /* name */
745 FALSE, /* partial_inplace */
746 0x1fffff, /* src_mask */
747 0x1fffff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 /* ADD: (S+A) & 0xfff [no overflow check] */
751 HOWTO (R_AARCH64_ADD_ABS_LO12_NC, /* type */
752 0, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 12, /* bitsize */
755 FALSE, /* pc_relative */
756 10, /* bitpos */
757 complain_overflow_dont, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_AARCH64_ADD_ABS_LO12_NC", /* name */
760 FALSE, /* partial_inplace */
761 0x3ffc00, /* src_mask */
762 0x3ffc00, /* dst_mask */
763 FALSE), /* pcrel_offset */
764
765 /* LD/ST8: (S+A) & 0xfff */
766 HOWTO (R_AARCH64_LDST8_ABS_LO12_NC, /* type */
767 0, /* rightshift */
768 2, /* size (0 = byte, 1 = short, 2 = long) */
769 12, /* bitsize */
770 FALSE, /* pc_relative */
771 0, /* bitpos */
772 complain_overflow_dont, /* complain_on_overflow */
773 bfd_elf_generic_reloc, /* special_function */
774 "R_AARCH64_LDST8_ABS_LO12_NC", /* name */
775 FALSE, /* partial_inplace */
776 0xfff, /* src_mask */
777 0xfff, /* dst_mask */
778 FALSE), /* pcrel_offset */
779
780 /* Relocations for control-flow instructions. */
781
782 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
783 HOWTO (R_AARCH64_TSTBR14, /* type */
784 2, /* rightshift */
785 2, /* size (0 = byte, 1 = short, 2 = long) */
786 14, /* bitsize */
787 TRUE, /* pc_relative */
788 0, /* bitpos */
789 complain_overflow_signed, /* complain_on_overflow */
790 bfd_elf_generic_reloc, /* special_function */
791 "R_AARCH64_TSTBR14", /* name */
792 FALSE, /* partial_inplace */
793 0x3fff, /* src_mask */
794 0x3fff, /* dst_mask */
795 TRUE), /* pcrel_offset */
796
797 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
798 HOWTO (R_AARCH64_CONDBR19, /* type */
799 2, /* rightshift */
800 2, /* size (0 = byte, 1 = short, 2 = long) */
801 19, /* bitsize */
802 TRUE, /* pc_relative */
803 0, /* bitpos */
804 complain_overflow_signed, /* complain_on_overflow */
805 bfd_elf_generic_reloc, /* special_function */
806 "R_AARCH64_CONDBR19", /* name */
807 FALSE, /* partial_inplace */
808 0x7ffff, /* src_mask */
809 0x7ffff, /* dst_mask */
810 TRUE), /* pcrel_offset */
811
812 EMPTY_HOWTO (281),
813
814 /* B: ((S+A-P) >> 2) & 0x3ffffff */
815 HOWTO (R_AARCH64_JUMP26, /* type */
816 2, /* rightshift */
817 2, /* size (0 = byte, 1 = short, 2 = long) */
818 26, /* bitsize */
819 TRUE, /* pc_relative */
820 0, /* bitpos */
821 complain_overflow_signed, /* complain_on_overflow */
822 bfd_elf_generic_reloc, /* special_function */
823 "R_AARCH64_JUMP26", /* name */
824 FALSE, /* partial_inplace */
825 0x3ffffff, /* src_mask */
826 0x3ffffff, /* dst_mask */
827 TRUE), /* pcrel_offset */
828
829 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
830 HOWTO (R_AARCH64_CALL26, /* type */
831 2, /* rightshift */
832 2, /* size (0 = byte, 1 = short, 2 = long) */
833 26, /* bitsize */
834 TRUE, /* pc_relative */
835 0, /* bitpos */
836 complain_overflow_signed, /* complain_on_overflow */
837 bfd_elf_generic_reloc, /* special_function */
838 "R_AARCH64_CALL26", /* name */
839 FALSE, /* partial_inplace */
840 0x3ffffff, /* src_mask */
841 0x3ffffff, /* dst_mask */
842 TRUE), /* pcrel_offset */
843
844 /* LD/ST16: (S+A) & 0xffe */
845 HOWTO (R_AARCH64_LDST16_ABS_LO12_NC, /* type */
846 1, /* rightshift */
847 2, /* size (0 = byte, 1 = short, 2 = long) */
848 12, /* bitsize */
849 FALSE, /* pc_relative */
850 0, /* bitpos */
851 complain_overflow_dont, /* complain_on_overflow */
852 bfd_elf_generic_reloc, /* special_function */
853 "R_AARCH64_LDST16_ABS_LO12_NC", /* name */
854 FALSE, /* partial_inplace */
855 0xffe, /* src_mask */
856 0xffe, /* dst_mask */
857 FALSE), /* pcrel_offset */
858
859 /* LD/ST32: (S+A) & 0xffc */
860 HOWTO (R_AARCH64_LDST32_ABS_LO12_NC, /* type */
861 2, /* rightshift */
862 2, /* size (0 = byte, 1 = short, 2 = long) */
863 12, /* bitsize */
864 FALSE, /* pc_relative */
865 0, /* bitpos */
866 complain_overflow_dont, /* complain_on_overflow */
867 bfd_elf_generic_reloc, /* special_function */
868 "R_AARCH64_LDST32_ABS_LO12_NC", /* name */
869 FALSE, /* partial_inplace */
870 0xffc, /* src_mask */
871 0xffc, /* dst_mask */
872 FALSE), /* pcrel_offset */
873
874 /* LD/ST64: (S+A) & 0xff8 */
875 HOWTO (R_AARCH64_LDST64_ABS_LO12_NC, /* type */
876 3, /* rightshift */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
878 12, /* bitsize */
879 FALSE, /* pc_relative */
880 0, /* bitpos */
881 complain_overflow_dont, /* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_AARCH64_LDST64_ABS_LO12_NC", /* name */
884 FALSE, /* partial_inplace */
885 0xff8, /* src_mask */
886 0xff8, /* dst_mask */
887 FALSE), /* pcrel_offset */
888
889 EMPTY_HOWTO (287),
890 EMPTY_HOWTO (288),
891 EMPTY_HOWTO (289),
892 EMPTY_HOWTO (290),
893 EMPTY_HOWTO (291),
894 EMPTY_HOWTO (292),
895 EMPTY_HOWTO (293),
896 EMPTY_HOWTO (294),
897 EMPTY_HOWTO (295),
898 EMPTY_HOWTO (296),
899 EMPTY_HOWTO (297),
900 EMPTY_HOWTO (298),
901
902 /* LD/ST128: (S+A) & 0xff0 */
903 HOWTO (R_AARCH64_LDST128_ABS_LO12_NC, /* type */
904 4, /* rightshift */
905 2, /* size (0 = byte, 1 = short, 2 = long) */
906 12, /* bitsize */
907 FALSE, /* pc_relative */
908 0, /* bitpos */
909 complain_overflow_dont, /* complain_on_overflow */
910 bfd_elf_generic_reloc, /* special_function */
911 "R_AARCH64_LDST128_ABS_LO12_NC", /* name */
912 FALSE, /* partial_inplace */
913 0xff0, /* src_mask */
914 0xff0, /* dst_mask */
915 FALSE), /* pcrel_offset */
916
917 EMPTY_HOWTO (300),
918 EMPTY_HOWTO (301),
919 EMPTY_HOWTO (302),
920 EMPTY_HOWTO (303),
921 EMPTY_HOWTO (304),
922 EMPTY_HOWTO (305),
923 EMPTY_HOWTO (306),
924 EMPTY_HOWTO (307),
925 EMPTY_HOWTO (308),
f41aef5f
RE
926
927 /* Set a load-literal immediate field to bits
928 0x1FFFFC of G(S)-P */
929 HOWTO (R_AARCH64_GOT_LD_PREL19, /* type */
930 2, /* rightshift */
931 2, /* size (0 = byte,1 = short,2 = long) */
932 19, /* bitsize */
933 TRUE, /* pc_relative */
934 0, /* bitpos */
935 complain_overflow_signed, /* complain_on_overflow */
936 bfd_elf_generic_reloc, /* special_function */
937 "R_AARCH64_GOT_LD_PREL19", /* name */
938 FALSE, /* partial_inplace */
939 0xffffe0, /* src_mask */
940 0xffffe0, /* dst_mask */
941 TRUE), /* pcrel_offset */
942
a06ea964
NC
943 EMPTY_HOWTO (310),
944
945 /* Get to the page for the GOT entry for the symbol
946 (G(S) - P) using an ADRP instruction. */
947 HOWTO (R_AARCH64_ADR_GOT_PAGE, /* type */
948 12, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 21, /* bitsize */
951 TRUE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont, /* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_AARCH64_ADR_GOT_PAGE", /* name */
956 FALSE, /* partial_inplace */
957 0x1fffff, /* src_mask */
958 0x1fffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
960
961 /* LD64: GOT offset G(S) & 0xff8 */
962 HOWTO (R_AARCH64_LD64_GOT_LO12_NC, /* type */
963 3, /* rightshift */
964 2, /* size (0 = byte, 1 = short, 2 = long) */
965 12, /* bitsize */
966 FALSE, /* pc_relative */
967 0, /* bitpos */
968 complain_overflow_dont, /* complain_on_overflow */
969 bfd_elf_generic_reloc, /* special_function */
970 "R_AARCH64_LD64_GOT_LO12_NC", /* name */
971 FALSE, /* partial_inplace */
972 0xff8, /* src_mask */
973 0xff8, /* dst_mask */
974 FALSE) /* pcrel_offset */
975};
976
977static reloc_howto_type elf64_aarch64_tls_howto_table[] =
978{
979 EMPTY_HOWTO (512),
980
981 /* Get to the page for the GOT entry for the symbol
982 (G(S) - P) using an ADRP instruction. */
983 HOWTO (R_AARCH64_TLSGD_ADR_PAGE21, /* type */
984 12, /* rightshift */
985 2, /* size (0 = byte, 1 = short, 2 = long) */
986 21, /* bitsize */
987 TRUE, /* pc_relative */
988 0, /* bitpos */
989 complain_overflow_dont, /* complain_on_overflow */
990 bfd_elf_generic_reloc, /* special_function */
991 "R_AARCH64_TLSGD_ADR_PAGE21", /* name */
992 FALSE, /* partial_inplace */
993 0x1fffff, /* src_mask */
994 0x1fffff, /* dst_mask */
995 TRUE), /* pcrel_offset */
996
997 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
998 HOWTO (R_AARCH64_TLSGD_ADD_LO12_NC, /* type */
999 0, /* rightshift */
1000 2, /* size (0 = byte, 1 = short, 2 = long) */
1001 12, /* bitsize */
1002 FALSE, /* pc_relative */
1003 0, /* bitpos */
1004 complain_overflow_dont, /* complain_on_overflow */
1005 bfd_elf_generic_reloc, /* special_function */
1006 "R_AARCH64_TLSGD_ADD_LO12_NC", /* name */
1007 FALSE, /* partial_inplace */
1008 0xfff, /* src_mask */
1009 0xfff, /* dst_mask */
1010 FALSE), /* pcrel_offset */
1011
1012 EMPTY_HOWTO (515),
1013 EMPTY_HOWTO (516),
1014 EMPTY_HOWTO (517),
1015 EMPTY_HOWTO (518),
1016 EMPTY_HOWTO (519),
1017 EMPTY_HOWTO (520),
1018 EMPTY_HOWTO (521),
1019 EMPTY_HOWTO (522),
1020 EMPTY_HOWTO (523),
1021 EMPTY_HOWTO (524),
1022 EMPTY_HOWTO (525),
1023 EMPTY_HOWTO (526),
1024 EMPTY_HOWTO (527),
1025 EMPTY_HOWTO (528),
1026 EMPTY_HOWTO (529),
1027 EMPTY_HOWTO (530),
1028 EMPTY_HOWTO (531),
1029 EMPTY_HOWTO (532),
1030 EMPTY_HOWTO (533),
1031 EMPTY_HOWTO (534),
1032 EMPTY_HOWTO (535),
1033 EMPTY_HOWTO (536),
1034 EMPTY_HOWTO (537),
1035 EMPTY_HOWTO (538),
1036
1037 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, /* type */
1038 16, /* rightshift */
1039 2, /* size (0 = byte, 1 = short, 2 = long) */
1040 16, /* bitsize */
1041 FALSE, /* pc_relative */
1042 0, /* bitpos */
1043 complain_overflow_dont, /* complain_on_overflow */
1044 bfd_elf_generic_reloc, /* special_function */
1045 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", /* name */
1046 FALSE, /* partial_inplace */
1047 0xffff, /* src_mask */
1048 0xffff, /* dst_mask */
1049 FALSE), /* pcrel_offset */
1050
1051 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, /* type */
1052 0, /* rightshift */
1053 2, /* size (0 = byte, 1 = short, 2 = long) */
1054 32, /* bitsize */
1055 FALSE, /* pc_relative */
1056 0, /* bitpos */
1057 complain_overflow_dont, /* complain_on_overflow */
1058 bfd_elf_generic_reloc, /* special_function */
1059 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", /* name */
1060 FALSE, /* partial_inplace */
1061 0xffff, /* src_mask */
1062 0xffff, /* dst_mask */
1063 FALSE), /* pcrel_offset */
1064
1065 HOWTO (R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, /* type */
1066 12, /* rightshift */
1067 2, /* size (0 = byte, 1 = short, 2 = long) */
1068 21, /* bitsize */
1069 FALSE, /* pc_relative */
1070 0, /* bitpos */
1071 complain_overflow_dont, /* complain_on_overflow */
1072 bfd_elf_generic_reloc, /* special_function */
1073 "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", /* name */
1074 FALSE, /* partial_inplace */
1075 0x1fffff, /* src_mask */
1076 0x1fffff, /* dst_mask */
1077 FALSE), /* pcrel_offset */
1078
1079 HOWTO (R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, /* type */
1080 3, /* rightshift */
1081 2, /* size (0 = byte, 1 = short, 2 = long) */
1082 12, /* bitsize */
1083 FALSE, /* pc_relative */
1084 0, /* bitpos */
1085 complain_overflow_dont, /* complain_on_overflow */
1086 bfd_elf_generic_reloc, /* special_function */
1087 "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", /* name */
1088 FALSE, /* partial_inplace */
1089 0xff8, /* src_mask */
1090 0xff8, /* dst_mask */
1091 FALSE), /* pcrel_offset */
1092
1093 HOWTO (R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, /* type */
bb3f9ed8 1094 2, /* rightshift */
a06ea964
NC
1095 2, /* size (0 = byte, 1 = short, 2 = long) */
1096 21, /* bitsize */
1097 FALSE, /* pc_relative */
1098 0, /* bitpos */
1099 complain_overflow_dont, /* complain_on_overflow */
1100 bfd_elf_generic_reloc, /* special_function */
1101 "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", /* name */
1102 FALSE, /* partial_inplace */
1103 0x1ffffc, /* src_mask */
1104 0x1ffffc, /* dst_mask */
1105 FALSE), /* pcrel_offset */
1106
1107 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G2, /* type */
bb3f9ed8 1108 32, /* rightshift */
a06ea964
NC
1109 2, /* size (0 = byte, 1 = short, 2 = long) */
1110 12, /* bitsize */
1111 FALSE, /* pc_relative */
1112 0, /* bitpos */
1113 complain_overflow_dont, /* complain_on_overflow */
1114 bfd_elf_generic_reloc, /* special_function */
1115 "R_AARCH64_TLSLE_MOVW_TPREL_G2", /* name */
1116 FALSE, /* partial_inplace */
1117 0xffff, /* src_mask */
1118 0xffff, /* dst_mask */
1119 FALSE), /* pcrel_offset */
1120
1121 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1, /* type */
bb3f9ed8 1122 16, /* rightshift */
a06ea964
NC
1123 2, /* size (0 = byte, 1 = short, 2 = long) */
1124 12, /* bitsize */
1125 FALSE, /* pc_relative */
1126 0, /* bitpos */
1127 complain_overflow_dont, /* complain_on_overflow */
1128 bfd_elf_generic_reloc, /* special_function */
1129 "R_AARCH64_TLSLE_MOVW_TPREL_G1", /* name */
1130 FALSE, /* partial_inplace */
1131 0xffff, /* src_mask */
1132 0xffff, /* dst_mask */
1133 FALSE), /* pcrel_offset */
1134
1135 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, /* type */
bb3f9ed8 1136 16, /* rightshift */
a06ea964
NC
1137 2, /* size (0 = byte, 1 = short, 2 = long) */
1138 12, /* bitsize */
1139 FALSE, /* pc_relative */
1140 0, /* bitpos */
1141 complain_overflow_dont, /* complain_on_overflow */
1142 bfd_elf_generic_reloc, /* special_function */
1143 "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", /* name */
1144 FALSE, /* partial_inplace */
1145 0xffff, /* src_mask */
1146 0xffff, /* dst_mask */
1147 FALSE), /* pcrel_offset */
1148
1149 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0, /* type */
1150 0, /* rightshift */
1151 2, /* size (0 = byte, 1 = short, 2 = long) */
1152 12, /* bitsize */
1153 FALSE, /* pc_relative */
1154 0, /* bitpos */
1155 complain_overflow_dont, /* complain_on_overflow */
1156 bfd_elf_generic_reloc, /* special_function */
1157 "R_AARCH64_TLSLE_MOVW_TPREL_G0", /* name */
1158 FALSE, /* partial_inplace */
1159 0xffff, /* src_mask */
1160 0xffff, /* dst_mask */
1161 FALSE), /* pcrel_offset */
1162
1163 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, /* type */
1164 0, /* rightshift */
1165 2, /* size (0 = byte, 1 = short, 2 = long) */
1166 12, /* bitsize */
1167 FALSE, /* pc_relative */
1168 0, /* bitpos */
1169 complain_overflow_dont, /* complain_on_overflow */
1170 bfd_elf_generic_reloc, /* special_function */
1171 "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", /* name */
1172 FALSE, /* partial_inplace */
1173 0xffff, /* src_mask */
1174 0xffff, /* dst_mask */
1175 FALSE), /* pcrel_offset */
1176
1177 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_HI12, /* type */
bb3f9ed8 1178 12, /* rightshift */
a06ea964
NC
1179 2, /* size (0 = byte, 1 = short, 2 = long) */
1180 12, /* bitsize */
1181 FALSE, /* pc_relative */
1182 0, /* bitpos */
1183 complain_overflow_dont, /* complain_on_overflow */
1184 bfd_elf_generic_reloc, /* special_function */
1185 "R_AARCH64_TLSLE_ADD_TPREL_HI12", /* name */
1186 FALSE, /* partial_inplace */
1187 0xfff, /* src_mask */
1188 0xfff, /* dst_mask */
1189 FALSE), /* pcrel_offset */
1190
1191 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12, /* type */
1192 0, /* rightshift */
1193 2, /* size (0 = byte, 1 = short, 2 = long) */
1194 12, /* bitsize */
1195 FALSE, /* pc_relative */
1196 0, /* bitpos */
1197 complain_overflow_dont, /* complain_on_overflow */
1198 bfd_elf_generic_reloc, /* special_function */
1199 "R_AARCH64_TLSLE_ADD_TPREL_LO12", /* name */
1200 FALSE, /* partial_inplace */
1201 0xfff, /* src_mask */
1202 0xfff, /* dst_mask */
1203 FALSE), /* pcrel_offset */
1204
1205 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, /* type */
1206 0, /* rightshift */
1207 2, /* size (0 = byte, 1 = short, 2 = long) */
1208 12, /* bitsize */
1209 FALSE, /* pc_relative */
1210 0, /* bitpos */
1211 complain_overflow_dont, /* complain_on_overflow */
1212 bfd_elf_generic_reloc, /* special_function */
1213 "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", /* name */
1214 FALSE, /* partial_inplace */
1215 0xfff, /* src_mask */
1216 0xfff, /* dst_mask */
1217 FALSE), /* pcrel_offset */
1218};
1219
1220static reloc_howto_type elf64_aarch64_tlsdesc_howto_table[] =
1221{
418009c2 1222 HOWTO (R_AARCH64_TLSDESC_LD_PREL19, /* type */
bb3f9ed8 1223 2, /* rightshift */
a06ea964
NC
1224 2, /* size (0 = byte, 1 = short, 2 = long) */
1225 21, /* bitsize */
1226 TRUE, /* pc_relative */
1227 0, /* bitpos */
1228 complain_overflow_dont, /* complain_on_overflow */
1229 bfd_elf_generic_reloc, /* special_function */
418009c2 1230 "R_AARCH64_TLSDESC_LD_PREL19", /* name */
a06ea964
NC
1231 FALSE, /* partial_inplace */
1232 0x1ffffc, /* src_mask */
1233 0x1ffffc, /* dst_mask */
1234 TRUE), /* pcrel_offset */
1235
1236 HOWTO (R_AARCH64_TLSDESC_ADR_PREL21, /* type */
1237 0, /* rightshift */
1238 2, /* size (0 = byte, 1 = short, 2 = long) */
1239 21, /* bitsize */
1240 TRUE, /* pc_relative */
1241 0, /* bitpos */
1242 complain_overflow_dont, /* complain_on_overflow */
1243 bfd_elf_generic_reloc, /* special_function */
1244 "R_AARCH64_TLSDESC_ADR_PREL21", /* name */
1245 FALSE, /* partial_inplace */
1246 0x1fffff, /* src_mask */
1247 0x1fffff, /* dst_mask */
1248 TRUE), /* pcrel_offset */
1249
1250 /* Get to the page for the GOT entry for the symbol
1251 (G(S) - P) using an ADRP instruction. */
418009c2 1252 HOWTO (R_AARCH64_TLSDESC_ADR_PAGE21, /* type */
a06ea964
NC
1253 12, /* rightshift */
1254 2, /* size (0 = byte, 1 = short, 2 = long) */
1255 21, /* bitsize */
1256 TRUE, /* pc_relative */
1257 0, /* bitpos */
1258 complain_overflow_dont, /* complain_on_overflow */
1259 bfd_elf_generic_reloc, /* special_function */
418009c2 1260 "R_AARCH64_TLSDESC_ADR_PAGE21", /* name */
a06ea964
NC
1261 FALSE, /* partial_inplace */
1262 0x1fffff, /* src_mask */
1263 0x1fffff, /* dst_mask */
1264 TRUE), /* pcrel_offset */
1265
1266 /* LD64: GOT offset G(S) & 0xfff. */
1267 HOWTO (R_AARCH64_TLSDESC_LD64_LO12_NC, /* type */
1268 3, /* rightshift */
1269 2, /* size (0 = byte, 1 = short, 2 = long) */
1270 12, /* bitsize */
1271 FALSE, /* pc_relative */
1272 0, /* bitpos */
1273 complain_overflow_dont, /* complain_on_overflow */
1274 bfd_elf_generic_reloc, /* special_function */
1275 "R_AARCH64_TLSDESC_LD64_LO12_NC", /* name */
1276 FALSE, /* partial_inplace */
1277 0xfff, /* src_mask */
1278 0xfff, /* dst_mask */
1279 FALSE), /* pcrel_offset */
1280
1281 /* ADD: GOT offset G(S) & 0xfff. */
1282 HOWTO (R_AARCH64_TLSDESC_ADD_LO12_NC, /* type */
1283 0, /* rightshift */
1284 2, /* size (0 = byte, 1 = short, 2 = long) */
1285 12, /* bitsize */
1286 FALSE, /* pc_relative */
1287 0, /* bitpos */
1288 complain_overflow_dont, /* complain_on_overflow */
1289 bfd_elf_generic_reloc, /* special_function */
1290 "R_AARCH64_TLSDESC_ADD_LO12_NC", /* name */
1291 FALSE, /* partial_inplace */
1292 0xfff, /* src_mask */
1293 0xfff, /* dst_mask */
1294 FALSE), /* pcrel_offset */
1295
1296 HOWTO (R_AARCH64_TLSDESC_OFF_G1, /* type */
bb3f9ed8 1297 16, /* rightshift */
a06ea964
NC
1298 2, /* size (0 = byte, 1 = short, 2 = long) */
1299 12, /* bitsize */
1300 FALSE, /* pc_relative */
1301 0, /* bitpos */
1302 complain_overflow_dont, /* complain_on_overflow */
1303 bfd_elf_generic_reloc, /* special_function */
1304 "R_AARCH64_TLSDESC_OFF_G1", /* name */
1305 FALSE, /* partial_inplace */
1306 0xffff, /* src_mask */
1307 0xffff, /* dst_mask */
1308 FALSE), /* pcrel_offset */
1309
1310 HOWTO (R_AARCH64_TLSDESC_OFF_G0_NC, /* type */
1311 0, /* rightshift */
1312 2, /* size (0 = byte, 1 = short, 2 = long) */
1313 12, /* bitsize */
1314 FALSE, /* pc_relative */
1315 0, /* bitpos */
1316 complain_overflow_dont, /* complain_on_overflow */
1317 bfd_elf_generic_reloc, /* special_function */
1318 "R_AARCH64_TLSDESC_OFF_G0_NC", /* name */
1319 FALSE, /* partial_inplace */
1320 0xffff, /* src_mask */
1321 0xffff, /* dst_mask */
1322 FALSE), /* pcrel_offset */
1323
1324 HOWTO (R_AARCH64_TLSDESC_LDR, /* type */
1325 0, /* rightshift */
1326 2, /* size (0 = byte, 1 = short, 2 = long) */
1327 12, /* bitsize */
1328 FALSE, /* pc_relative */
1329 0, /* bitpos */
1330 complain_overflow_dont, /* complain_on_overflow */
1331 bfd_elf_generic_reloc, /* special_function */
1332 "R_AARCH64_TLSDESC_LDR", /* name */
1333 FALSE, /* partial_inplace */
1334 0x0, /* src_mask */
1335 0x0, /* dst_mask */
1336 FALSE), /* pcrel_offset */
1337
1338 HOWTO (R_AARCH64_TLSDESC_ADD, /* type */
1339 0, /* rightshift */
1340 2, /* size (0 = byte, 1 = short, 2 = long) */
1341 12, /* bitsize */
1342 FALSE, /* pc_relative */
1343 0, /* bitpos */
1344 complain_overflow_dont, /* complain_on_overflow */
1345 bfd_elf_generic_reloc, /* special_function */
1346 "R_AARCH64_TLSDESC_ADD", /* name */
1347 FALSE, /* partial_inplace */
1348 0x0, /* src_mask */
1349 0x0, /* dst_mask */
1350 FALSE), /* pcrel_offset */
1351
1352 HOWTO (R_AARCH64_TLSDESC_CALL, /* type */
1353 0, /* rightshift */
1354 2, /* size (0 = byte, 1 = short, 2 = long) */
1355 12, /* bitsize */
1356 FALSE, /* pc_relative */
1357 0, /* bitpos */
1358 complain_overflow_dont, /* complain_on_overflow */
1359 bfd_elf_generic_reloc, /* special_function */
1360 "R_AARCH64_TLSDESC_CALL", /* name */
1361 FALSE, /* partial_inplace */
1362 0x0, /* src_mask */
1363 0x0, /* dst_mask */
1364 FALSE), /* pcrel_offset */
1365};
1366
1367static reloc_howto_type *
1368elf64_aarch64_howto_from_type (unsigned int r_type)
1369{
1370 if (r_type >= R_AARCH64_static_min && r_type < R_AARCH64_static_max)
1371 return &elf64_aarch64_howto_table[r_type - R_AARCH64_static_min];
1372
1373 if (r_type >= R_AARCH64_tls_min && r_type < R_AARCH64_tls_max)
1374 return &elf64_aarch64_tls_howto_table[r_type - R_AARCH64_tls_min];
1375
1376 if (r_type >= R_AARCH64_tlsdesc_min && r_type < R_AARCH64_tlsdesc_max)
1377 return &elf64_aarch64_tlsdesc_howto_table[r_type - R_AARCH64_tlsdesc_min];
1378
1379 if (r_type >= R_AARCH64_dyn_min && r_type < R_AARCH64_dyn_max)
1380 return &elf64_aarch64_howto_dynrelocs[r_type - R_AARCH64_dyn_min];
1381
1382 switch (r_type)
1383 {
1384 case R_AARCH64_NONE:
1385 return &elf64_aarch64_howto_none;
1386
1387 }
1388 bfd_set_error (bfd_error_bad_value);
1389 return NULL;
1390}
1391
1392static void
1393elf64_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1394 Elf_Internal_Rela *elf_reloc)
1395{
1396 unsigned int r_type;
1397
1398 r_type = ELF64_R_TYPE (elf_reloc->r_info);
1399 bfd_reloc->howto = elf64_aarch64_howto_from_type (r_type);
1400}
1401
1402struct elf64_aarch64_reloc_map
1403{
1404 bfd_reloc_code_real_type bfd_reloc_val;
1405 unsigned int elf_reloc_val;
1406};
1407
1408/* All entries in this list must also be present in
1409 elf64_aarch64_howto_table. */
1410static const struct elf64_aarch64_reloc_map elf64_aarch64_reloc_map[] =
1411{
1412 {BFD_RELOC_NONE, R_AARCH64_NONE},
1413
1414 /* Basic data relocations. */
1415 {BFD_RELOC_CTOR, R_AARCH64_ABS64},
1416 {BFD_RELOC_64, R_AARCH64_ABS64},
1417 {BFD_RELOC_32, R_AARCH64_ABS32},
1418 {BFD_RELOC_16, R_AARCH64_ABS16},
1419 {BFD_RELOC_64_PCREL, R_AARCH64_PREL64},
1420 {BFD_RELOC_32_PCREL, R_AARCH64_PREL32},
1421 {BFD_RELOC_16_PCREL, R_AARCH64_PREL16},
1422
1423 /* Group relocations to low order bits of a 16, 32, 48 or 64 bit
1424 value inline. */
1425 {BFD_RELOC_AARCH64_MOVW_G0_NC, R_AARCH64_MOVW_UABS_G0_NC},
1426 {BFD_RELOC_AARCH64_MOVW_G1_NC, R_AARCH64_MOVW_UABS_G1_NC},
1427 {BFD_RELOC_AARCH64_MOVW_G2_NC, R_AARCH64_MOVW_UABS_G2_NC},
1428
1429 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1430 signed value inline. */
1431 {BFD_RELOC_AARCH64_MOVW_G0_S, R_AARCH64_MOVW_SABS_G0},
1432 {BFD_RELOC_AARCH64_MOVW_G1_S, R_AARCH64_MOVW_SABS_G1},
1433 {BFD_RELOC_AARCH64_MOVW_G2_S, R_AARCH64_MOVW_SABS_G2},
1434
1435 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1436 unsigned value inline. */
1437 {BFD_RELOC_AARCH64_MOVW_G0, R_AARCH64_MOVW_UABS_G0},
1438 {BFD_RELOC_AARCH64_MOVW_G1, R_AARCH64_MOVW_UABS_G1},
1439 {BFD_RELOC_AARCH64_MOVW_G2, R_AARCH64_MOVW_UABS_G2},
1440 {BFD_RELOC_AARCH64_MOVW_G3, R_AARCH64_MOVW_UABS_G3},
1441
1442 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store. */
1443 {BFD_RELOC_AARCH64_LD_LO19_PCREL, R_AARCH64_LD_PREL_LO19},
1444 {BFD_RELOC_AARCH64_ADR_LO21_PCREL, R_AARCH64_ADR_PREL_LO21},
1445 {BFD_RELOC_AARCH64_ADR_HI21_PCREL, R_AARCH64_ADR_PREL_PG_HI21},
1446 {BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, R_AARCH64_ADR_PREL_PG_HI21_NC},
1447 {BFD_RELOC_AARCH64_ADD_LO12, R_AARCH64_ADD_ABS_LO12_NC},
1448 {BFD_RELOC_AARCH64_LDST8_LO12, R_AARCH64_LDST8_ABS_LO12_NC},
1449 {BFD_RELOC_AARCH64_LDST16_LO12, R_AARCH64_LDST16_ABS_LO12_NC},
1450 {BFD_RELOC_AARCH64_LDST32_LO12, R_AARCH64_LDST32_ABS_LO12_NC},
1451 {BFD_RELOC_AARCH64_LDST64_LO12, R_AARCH64_LDST64_ABS_LO12_NC},
1452 {BFD_RELOC_AARCH64_LDST128_LO12, R_AARCH64_LDST128_ABS_LO12_NC},
1453
1454 /* Relocations for control-flow instructions. */
1455 {BFD_RELOC_AARCH64_TSTBR14, R_AARCH64_TSTBR14},
1456 {BFD_RELOC_AARCH64_BRANCH19, R_AARCH64_CONDBR19},
1457 {BFD_RELOC_AARCH64_JUMP26, R_AARCH64_JUMP26},
1458 {BFD_RELOC_AARCH64_CALL26, R_AARCH64_CALL26},
1459
1460 /* Relocations for PIC. */
f41aef5f 1461 {BFD_RELOC_AARCH64_GOT_LD_PREL19, R_AARCH64_GOT_LD_PREL19},
a06ea964
NC
1462 {BFD_RELOC_AARCH64_ADR_GOT_PAGE, R_AARCH64_ADR_GOT_PAGE},
1463 {BFD_RELOC_AARCH64_LD64_GOT_LO12_NC, R_AARCH64_LD64_GOT_LO12_NC},
1464
1465 /* Relocations for TLS. */
1466 {BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, R_AARCH64_TLSGD_ADR_PAGE21},
1467 {BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, R_AARCH64_TLSGD_ADD_LO12_NC},
1468 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
1469 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1},
1470 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
1471 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC},
1472 {BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
1473 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21},
1474 {BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,
1475 R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
1476 {BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19,
1477 R_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
1478 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, R_AARCH64_TLSLE_MOVW_TPREL_G2},
1479 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, R_AARCH64_TLSLE_MOVW_TPREL_G1},
1480 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
1481 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC},
1482 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, R_AARCH64_TLSLE_MOVW_TPREL_G0},
1483 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
1484 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC},
1485 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, R_AARCH64_TLSLE_ADD_TPREL_LO12},
1486 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, R_AARCH64_TLSLE_ADD_TPREL_HI12},
1487 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
1488 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC},
418009c2 1489 {BFD_RELOC_AARCH64_TLSDESC_LD_PREL19, R_AARCH64_TLSDESC_LD_PREL19},
a06ea964 1490 {BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, R_AARCH64_TLSDESC_ADR_PREL21},
418009c2 1491 {BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21, R_AARCH64_TLSDESC_ADR_PAGE21},
a06ea964
NC
1492 {BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, R_AARCH64_TLSDESC_ADD_LO12_NC},
1493 {BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC, R_AARCH64_TLSDESC_LD64_LO12_NC},
1494 {BFD_RELOC_AARCH64_TLSDESC_OFF_G1, R_AARCH64_TLSDESC_OFF_G1},
1495 {BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, R_AARCH64_TLSDESC_OFF_G0_NC},
1496 {BFD_RELOC_AARCH64_TLSDESC_LDR, R_AARCH64_TLSDESC_LDR},
1497 {BFD_RELOC_AARCH64_TLSDESC_ADD, R_AARCH64_TLSDESC_ADD},
1498 {BFD_RELOC_AARCH64_TLSDESC_CALL, R_AARCH64_TLSDESC_CALL},
1499 {BFD_RELOC_AARCH64_TLS_DTPMOD64, R_AARCH64_TLS_DTPMOD64},
1500 {BFD_RELOC_AARCH64_TLS_DTPREL64, R_AARCH64_TLS_DTPREL64},
1501 {BFD_RELOC_AARCH64_TLS_TPREL64, R_AARCH64_TLS_TPREL64},
1502 {BFD_RELOC_AARCH64_TLSDESC, R_AARCH64_TLSDESC},
1503};
1504
1505static reloc_howto_type *
1506elf64_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1507 bfd_reloc_code_real_type code)
1508{
1509 unsigned int i;
1510
1511 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_reloc_map); i++)
1512 if (elf64_aarch64_reloc_map[i].bfd_reloc_val == code)
1513 return elf64_aarch64_howto_from_type
1514 (elf64_aarch64_reloc_map[i].elf_reloc_val);
1515
1516 bfd_set_error (bfd_error_bad_value);
1517 return NULL;
1518}
1519
1520static reloc_howto_type *
1521elf64_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1522 const char *r_name)
1523{
1524 unsigned int i;
1525
1526 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_howto_table); i++)
1527 if (elf64_aarch64_howto_table[i].name != NULL
1528 && strcasecmp (elf64_aarch64_howto_table[i].name, r_name) == 0)
1529 return &elf64_aarch64_howto_table[i];
1530
1531 return NULL;
1532}
1533
cd6fa7fd
YZ
1534/* Support for core dump NOTE sections. */
1535
1536static bfd_boolean
1537elf64_aarch64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1538{
1539 int offset;
1540 size_t size;
1541
1542 switch (note->descsz)
1543 {
1544 default:
1545 return FALSE;
1546
1547 case 408: /* sizeof(struct elf_prstatus) on Linux/arm64. */
1548 /* pr_cursig */
228e534f 1549 elf_tdata (abfd)->core->signal
cd6fa7fd
YZ
1550 = bfd_get_16 (abfd, note->descdata + 12);
1551
1552 /* pr_pid */
228e534f 1553 elf_tdata (abfd)->core->lwpid
cd6fa7fd
YZ
1554 = bfd_get_32 (abfd, note->descdata + 32);
1555
1556 /* pr_reg */
1557 offset = 112;
170a8295 1558 size = 272;
cd6fa7fd
YZ
1559
1560 break;
1561 }
1562
1563 /* Make a ".reg/999" section. */
1564 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1565 size, note->descpos + offset);
1566}
1567
a06ea964
NC
1568#define TARGET_LITTLE_SYM bfd_elf64_littleaarch64_vec
1569#define TARGET_LITTLE_NAME "elf64-littleaarch64"
1570#define TARGET_BIG_SYM bfd_elf64_bigaarch64_vec
1571#define TARGET_BIG_NAME "elf64-bigaarch64"
1572
cd6fa7fd
YZ
1573#define elf_backend_grok_prstatus elf64_aarch64_grok_prstatus
1574
a06ea964
NC
1575typedef unsigned long int insn32;
1576
1577/* The linker script knows the section names for placement.
1578 The entry_names are used to do simple name mangling on the stubs.
1579 Given a function name, and its type, the stub can be found. The
1580 name can be changed. The only requirement is the %s be present. */
1581#define STUB_ENTRY_NAME "__%s_veneer"
1582
1583/* The name of the dynamic interpreter. This is put in the .interp
1584 section. */
1585#define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1586
1587#define AARCH64_MAX_FWD_BRANCH_OFFSET \
1588 (((1 << 25) - 1) << 2)
1589#define AARCH64_MAX_BWD_BRANCH_OFFSET \
1590 (-((1 << 25) << 2))
1591
1592#define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1593#define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1594
1595static int
1596aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1597{
1598 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1599 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1600}
1601
1602static int
1603aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1604{
1605 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1606 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1607 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1608}
1609
1610static const uint32_t aarch64_adrp_branch_stub [] =
1611{
1612 0x90000010, /* adrp ip0, X */
1613 /* R_AARCH64_ADR_HI21_PCREL(X) */
1614 0x91000210, /* add ip0, ip0, :lo12:X */
1615 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1616 0xd61f0200, /* br ip0 */
1617};
1618
1619static const uint32_t aarch64_long_branch_stub[] =
1620{
1621 0x58000090, /* ldr ip0, 1f */
1622 0x10000011, /* adr ip1, #0 */
1623 0x8b110210, /* add ip0, ip0, ip1 */
1624 0xd61f0200, /* br ip0 */
1625 0x00000000, /* 1: .xword
1626 R_AARCH64_PREL64(X) + 12
1627 */
1628 0x00000000,
1629};
1630
1631/* Section name for stubs is the associated section name plus this
1632 string. */
1633#define STUB_SUFFIX ".stub"
1634
1635enum elf64_aarch64_stub_type
1636{
1637 aarch64_stub_none,
1638 aarch64_stub_adrp_branch,
1639 aarch64_stub_long_branch,
1640};
1641
1642struct elf64_aarch64_stub_hash_entry
1643{
1644 /* Base hash table entry structure. */
1645 struct bfd_hash_entry root;
1646
1647 /* The stub section. */
1648 asection *stub_sec;
1649
1650 /* Offset within stub_sec of the beginning of this stub. */
1651 bfd_vma stub_offset;
1652
1653 /* Given the symbol's value and its section we can determine its final
1654 value when building the stubs (so the stub knows where to jump). */
1655 bfd_vma target_value;
1656 asection *target_section;
1657
1658 enum elf64_aarch64_stub_type stub_type;
1659
1660 /* The symbol table entry, if any, that this was derived from. */
1661 struct elf64_aarch64_link_hash_entry *h;
1662
1663 /* Destination symbol type */
1664 unsigned char st_type;
1665
1666 /* Where this stub is being called from, or, in the case of combined
1667 stub sections, the first input section in the group. */
1668 asection *id_sec;
1669
1670 /* The name for the local symbol at the start of this stub. The
1671 stub name in the hash table has to be unique; this does not, so
1672 it can be friendlier. */
1673 char *output_name;
1674};
1675
1676/* Used to build a map of a section. This is required for mixed-endian
1677 code/data. */
1678
1679typedef struct elf64_elf_section_map
1680{
1681 bfd_vma vma;
1682 char type;
1683}
1684elf64_aarch64_section_map;
1685
1686
1687typedef struct _aarch64_elf_section_data
1688{
1689 struct bfd_elf_section_data elf;
1690 unsigned int mapcount;
1691 unsigned int mapsize;
1692 elf64_aarch64_section_map *map;
1693}
1694_aarch64_elf_section_data;
1695
1696#define elf64_aarch64_section_data(sec) \
1697 ((_aarch64_elf_section_data *) elf_section_data (sec))
1698
1699/* The size of the thread control block. */
1700#define TCB_SIZE 16
1701
1702struct elf_aarch64_local_symbol
1703{
1704 unsigned int got_type;
1705 bfd_signed_vma got_refcount;
1706 bfd_vma got_offset;
1707
1708 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1709 offset is from the end of the jump table and reserved entries
1710 within the PLTGOT.
1711
1712 The magic value (bfd_vma) -1 indicates that an offset has not be
1713 allocated. */
1714 bfd_vma tlsdesc_got_jump_table_offset;
1715};
1716
1717struct elf_aarch64_obj_tdata
1718{
1719 struct elf_obj_tdata root;
1720
1721 /* local symbol descriptors */
1722 struct elf_aarch64_local_symbol *locals;
1723
1724 /* Zero to warn when linking objects with incompatible enum sizes. */
1725 int no_enum_size_warning;
1726
1727 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1728 int no_wchar_size_warning;
1729};
1730
1731#define elf_aarch64_tdata(bfd) \
1732 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1733
1734#define elf64_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1735
1736#define is_aarch64_elf(bfd) \
1737 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1738 && elf_tdata (bfd) != NULL \
1739 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1740
1741static bfd_boolean
1742elf64_aarch64_mkobject (bfd *abfd)
1743{
1744 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1745 AARCH64_ELF_DATA);
1746}
1747
a06ea964
NC
1748#define elf64_aarch64_hash_entry(ent) \
1749 ((struct elf64_aarch64_link_hash_entry *)(ent))
1750
1751#define GOT_UNKNOWN 0
1752#define GOT_NORMAL 1
1753#define GOT_TLS_GD 2
1754#define GOT_TLS_IE 4
1755#define GOT_TLSDESC_GD 8
1756
1757#define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1758
1759/* AArch64 ELF linker hash entry. */
1760struct elf64_aarch64_link_hash_entry
1761{
1762 struct elf_link_hash_entry root;
1763
1764 /* Track dynamic relocs copied for this symbol. */
1765 struct elf_dyn_relocs *dyn_relocs;
1766
a06ea964
NC
1767 /* Since PLT entries have variable size, we need to record the
1768 index into .got.plt instead of recomputing it from the PLT
1769 offset. */
1770 bfd_signed_vma plt_got_offset;
1771
1772 /* Bit mask representing the type of GOT entry(s) if any required by
1773 this symbol. */
1774 unsigned int got_type;
1775
1776 /* A pointer to the most recently used stub hash entry against this
1777 symbol. */
1778 struct elf64_aarch64_stub_hash_entry *stub_cache;
1779
1780 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1781 is from the end of the jump table and reserved entries within the PLTGOT.
1782
1783 The magic value (bfd_vma) -1 indicates that an offset has not
1784 be allocated. */
1785 bfd_vma tlsdesc_got_jump_table_offset;
1786};
1787
1788static unsigned int
1789elf64_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1790 bfd *abfd,
1791 unsigned long r_symndx)
1792{
1793 if (h)
1794 return elf64_aarch64_hash_entry (h)->got_type;
1795
1796 if (! elf64_aarch64_locals (abfd))
1797 return GOT_UNKNOWN;
1798
1799 return elf64_aarch64_locals (abfd)[r_symndx].got_type;
1800}
1801
1802/* Traverse an AArch64 ELF linker hash table. */
1803#define elf64_aarch64_link_hash_traverse(table, func, info) \
1804 (elf_link_hash_traverse \
1805 (&(table)->root, \
1806 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
1807 (info)))
1808
1809/* Get the AArch64 elf linker hash table from a link_info structure. */
1810#define elf64_aarch64_hash_table(info) \
1811 ((struct elf64_aarch64_link_hash_table *) ((info)->hash))
1812
1813#define aarch64_stub_hash_lookup(table, string, create, copy) \
1814 ((struct elf64_aarch64_stub_hash_entry *) \
1815 bfd_hash_lookup ((table), (string), (create), (copy)))
1816
1817/* AArch64 ELF linker hash table. */
1818struct elf64_aarch64_link_hash_table
1819{
1820 /* The main hash table. */
1821 struct elf_link_hash_table root;
1822
1823 /* Nonzero to force PIC branch veneers. */
1824 int pic_veneer;
1825
1826 /* The number of bytes in the initial entry in the PLT. */
1827 bfd_size_type plt_header_size;
1828
1829 /* The number of bytes in the subsequent PLT etries. */
1830 bfd_size_type plt_entry_size;
1831
1832 /* Short-cuts to get to dynamic linker sections. */
1833 asection *sdynbss;
1834 asection *srelbss;
1835
1836 /* Small local sym cache. */
1837 struct sym_cache sym_cache;
1838
1839 /* For convenience in allocate_dynrelocs. */
1840 bfd *obfd;
1841
1842 /* The amount of space used by the reserved portion of the sgotplt
1843 section, plus whatever space is used by the jump slots. */
1844 bfd_vma sgotplt_jump_table_size;
1845
1846 /* The stub hash table. */
1847 struct bfd_hash_table stub_hash_table;
1848
1849 /* Linker stub bfd. */
1850 bfd *stub_bfd;
1851
1852 /* Linker call-backs. */
1853 asection *(*add_stub_section) (const char *, asection *);
1854 void (*layout_sections_again) (void);
1855
1856 /* Array to keep track of which stub sections have been created, and
1857 information on stub grouping. */
1858 struct map_stub
1859 {
1860 /* This is the section to which stubs in the group will be
1861 attached. */
1862 asection *link_sec;
1863 /* The stub section. */
1864 asection *stub_sec;
1865 } *stub_group;
1866
1867 /* Assorted information used by elf64_aarch64_size_stubs. */
1868 unsigned int bfd_count;
1869 int top_index;
1870 asection **input_list;
1871
1872 /* The offset into splt of the PLT entry for the TLS descriptor
1873 resolver. Special values are 0, if not necessary (or not found
1874 to be necessary yet), and -1 if needed but not determined
1875 yet. */
1876 bfd_vma tlsdesc_plt;
1877
1878 /* The GOT offset for the lazy trampoline. Communicated to the
1879 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1880 indicates an offset is not allocated. */
1881 bfd_vma dt_tlsdesc_got;
692e2b8b
WN
1882
1883 /* Used by local STT_GNU_IFUNC symbols. */
1884 htab_t loc_hash_table;
1885 void * loc_hash_memory;
1886
1887 /* The index of the next R_AARCH64_JUMP_SLOT entry in .rela.plt. */
1888 bfd_vma next_jump_slot_index;
1889 /* The index of the next R_AARCH64_IRELATIVE entry in .rela.plt. */
1890 bfd_vma next_irelative_index;
a06ea964
NC
1891};
1892
1893
1894/* Return non-zero if the indicated VALUE has overflowed the maximum
1895 range expressible by a unsigned number with the indicated number of
1896 BITS. */
1897
1898static bfd_reloc_status_type
1899aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
1900{
1901 bfd_vma lim;
1902 if (bits >= sizeof (bfd_vma) * 8)
1903 return bfd_reloc_ok;
1904 lim = (bfd_vma) 1 << bits;
1905 if (value >= lim)
1906 return bfd_reloc_overflow;
1907 return bfd_reloc_ok;
1908}
1909
1910
1911/* Return non-zero if the indicated VALUE has overflowed the maximum
1912 range expressible by an signed number with the indicated number of
1913 BITS. */
1914
1915static bfd_reloc_status_type
1916aarch64_signed_overflow (bfd_vma value, unsigned int bits)
1917{
1918 bfd_signed_vma svalue = (bfd_signed_vma) value;
1919 bfd_signed_vma lim;
1920
1921 if (bits >= sizeof (bfd_vma) * 8)
1922 return bfd_reloc_ok;
1923 lim = (bfd_signed_vma) 1 << (bits - 1);
1924 if (svalue < -lim || svalue >= lim)
1925 return bfd_reloc_overflow;
1926 return bfd_reloc_ok;
1927}
1928
1929/* Create an entry in an AArch64 ELF linker hash table. */
1930
1931static struct bfd_hash_entry *
1932elf64_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1933 struct bfd_hash_table *table,
1934 const char *string)
1935{
1936 struct elf64_aarch64_link_hash_entry *ret =
1937 (struct elf64_aarch64_link_hash_entry *) entry;
1938
1939 /* Allocate the structure if it has not already been allocated by a
1940 subclass. */
1941 if (ret == NULL)
1942 ret = bfd_hash_allocate (table,
1943 sizeof (struct elf64_aarch64_link_hash_entry));
1944 if (ret == NULL)
1945 return (struct bfd_hash_entry *) ret;
1946
1947 /* Call the allocation method of the superclass. */
1948 ret = ((struct elf64_aarch64_link_hash_entry *)
1949 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1950 table, string));
1951 if (ret != NULL)
1952 {
1953 ret->dyn_relocs = NULL;
a06ea964
NC
1954 ret->got_type = GOT_UNKNOWN;
1955 ret->plt_got_offset = (bfd_vma) - 1;
1956 ret->stub_cache = NULL;
1957 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1958 }
1959
1960 return (struct bfd_hash_entry *) ret;
1961}
1962
1963/* Initialize an entry in the stub hash table. */
1964
1965static struct bfd_hash_entry *
1966stub_hash_newfunc (struct bfd_hash_entry *entry,
1967 struct bfd_hash_table *table, const char *string)
1968{
1969 /* Allocate the structure if it has not already been allocated by a
1970 subclass. */
1971 if (entry == NULL)
1972 {
1973 entry = bfd_hash_allocate (table,
1974 sizeof (struct
1975 elf64_aarch64_stub_hash_entry));
1976 if (entry == NULL)
1977 return entry;
1978 }
1979
1980 /* Call the allocation method of the superclass. */
1981 entry = bfd_hash_newfunc (entry, table, string);
1982 if (entry != NULL)
1983 {
1984 struct elf64_aarch64_stub_hash_entry *eh;
1985
1986 /* Initialize the local fields. */
1987 eh = (struct elf64_aarch64_stub_hash_entry *) entry;
1988 eh->stub_sec = NULL;
1989 eh->stub_offset = 0;
1990 eh->target_value = 0;
1991 eh->target_section = NULL;
1992 eh->stub_type = aarch64_stub_none;
1993 eh->h = NULL;
1994 eh->id_sec = NULL;
1995 }
1996
1997 return entry;
1998}
1999
692e2b8b
WN
2000/* Compute a hash of a local hash entry. We use elf_link_hash_entry
2001 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2002 as global symbol. We reuse indx and dynstr_index for local symbol
2003 hash since they aren't used by global symbols in this backend. */
2004
2005static hashval_t
2006elf_aarch64_local_htab_hash (const void *ptr)
2007{
2008 struct elf_link_hash_entry *h
2009 = (struct elf_link_hash_entry *) ptr;
2010 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2011}
2012
2013/* Compare local hash entries. */
2014
2015static int
2016elf_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2017{
2018 struct elf_link_hash_entry *h1
2019 = (struct elf_link_hash_entry *) ptr1;
2020 struct elf_link_hash_entry *h2
2021 = (struct elf_link_hash_entry *) ptr2;
2022
2023 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2024}
2025
2026/* Find and/or create a hash entry for local symbol. */
2027
2028static struct elf_link_hash_entry *
2029elf_aarch64_get_local_sym_hash (struct elf64_aarch64_link_hash_table *htab,
2030 bfd *abfd, const Elf_Internal_Rela *rel,
2031 bfd_boolean create)
2032{
2033 struct elf64_aarch64_link_hash_entry e, *ret;
2034 asection *sec = abfd->sections;
2035 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
3368874f 2036 ELF64_R_SYM (rel->r_info));
692e2b8b
WN
2037 void **slot;
2038
2039 e.root.indx = sec->id;
3368874f 2040 e.root.dynstr_index = ELF64_R_SYM (rel->r_info);
692e2b8b
WN
2041 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2042 create ? INSERT : NO_INSERT);
2043
2044 if (!slot)
2045 return NULL;
2046
2047 if (*slot)
2048 {
2049 ret = (struct elf64_aarch64_link_hash_entry *) *slot;
2050 return &ret->root;
2051 }
2052
2053 ret = (struct elf64_aarch64_link_hash_entry *)
2054 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2055 sizeof (struct elf64_aarch64_link_hash_entry));
2056 if (ret)
2057 {
2058 memset (ret, 0, sizeof (*ret));
2059 ret->root.indx = sec->id;
3368874f 2060 ret->root.dynstr_index = ELF64_R_SYM (rel->r_info);
692e2b8b
WN
2061 ret->root.dynindx = -1;
2062 *slot = ret;
2063 }
2064 return &ret->root;
2065}
a06ea964
NC
2066
2067/* Copy the extra info we tack onto an elf_link_hash_entry. */
2068
2069static void
2070elf64_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2071 struct elf_link_hash_entry *dir,
2072 struct elf_link_hash_entry *ind)
2073{
2074 struct elf64_aarch64_link_hash_entry *edir, *eind;
2075
2076 edir = (struct elf64_aarch64_link_hash_entry *) dir;
2077 eind = (struct elf64_aarch64_link_hash_entry *) ind;
2078
2079 if (eind->dyn_relocs != NULL)
2080 {
2081 if (edir->dyn_relocs != NULL)
2082 {
2083 struct elf_dyn_relocs **pp;
2084 struct elf_dyn_relocs *p;
2085
2086 /* Add reloc counts against the indirect sym to the direct sym
2087 list. Merge any entries against the same section. */
2088 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2089 {
2090 struct elf_dyn_relocs *q;
2091
2092 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2093 if (q->sec == p->sec)
2094 {
2095 q->pc_count += p->pc_count;
2096 q->count += p->count;
2097 *pp = p->next;
2098 break;
2099 }
2100 if (q == NULL)
2101 pp = &p->next;
2102 }
2103 *pp = edir->dyn_relocs;
2104 }
2105
2106 edir->dyn_relocs = eind->dyn_relocs;
2107 eind->dyn_relocs = NULL;
2108 }
2109
a06ea964
NC
2110 if (ind->root.type == bfd_link_hash_indirect)
2111 {
2112 /* Copy over PLT info. */
2113 if (dir->got.refcount <= 0)
2114 {
2115 edir->got_type = eind->got_type;
2116 eind->got_type = GOT_UNKNOWN;
2117 }
2118 }
2119
2120 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2121}
2122
2123/* Create an AArch64 elf linker hash table. */
2124
2125static struct bfd_link_hash_table *
2126elf64_aarch64_link_hash_table_create (bfd *abfd)
2127{
2128 struct elf64_aarch64_link_hash_table *ret;
2129 bfd_size_type amt = sizeof (struct elf64_aarch64_link_hash_table);
2130
7bf52ea2 2131 ret = bfd_zmalloc (amt);
a06ea964
NC
2132 if (ret == NULL)
2133 return NULL;
2134
2135 if (!_bfd_elf_link_hash_table_init
2136 (&ret->root, abfd, elf64_aarch64_link_hash_newfunc,
2137 sizeof (struct elf64_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2138 {
2139 free (ret);
2140 return NULL;
2141 }
2142
a06ea964
NC
2143 ret->plt_header_size = PLT_ENTRY_SIZE;
2144 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
a06ea964 2145 ret->obfd = abfd;
a06ea964
NC
2146 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2147
2148 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2149 sizeof (struct elf64_aarch64_stub_hash_entry)))
2150 {
2151 free (ret);
2152 return NULL;
2153 }
2154
692e2b8b
WN
2155 ret->loc_hash_table = htab_try_create (1024,
2156 elf_aarch64_local_htab_hash,
2157 elf_aarch64_local_htab_eq,
2158 NULL);
2159 ret->loc_hash_memory = objalloc_create ();
2160 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2161 {
2162 free (ret);
2163 return NULL;
2164 }
2165
a06ea964
NC
2166 return &ret->root.root;
2167}
2168
2169/* Free the derived linker hash table. */
2170
2171static void
2172elf64_aarch64_hash_table_free (struct bfd_link_hash_table *hash)
2173{
2174 struct elf64_aarch64_link_hash_table *ret
2175 = (struct elf64_aarch64_link_hash_table *) hash;
2176
692e2b8b
WN
2177 if (ret->loc_hash_table)
2178 htab_delete (ret->loc_hash_table);
2179 if (ret->loc_hash_memory)
2180 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2181
a06ea964 2182 bfd_hash_table_free (&ret->stub_hash_table);
9f7c3e5e 2183 _bfd_elf_link_hash_table_free (hash);
a06ea964
NC
2184}
2185
2186static bfd_vma
2187aarch64_resolve_relocation (unsigned int r_type, bfd_vma place, bfd_vma value,
2188 bfd_vma addend, bfd_boolean weak_undef_p)
2189{
2190 switch (r_type)
2191 {
2192 case R_AARCH64_TLSDESC_CALL:
2193 case R_AARCH64_NONE:
2194 case R_AARCH64_NULL:
2195 break;
2196
2197 case R_AARCH64_ADR_PREL_LO21:
2198 case R_AARCH64_CONDBR19:
2199 case R_AARCH64_LD_PREL_LO19:
2200 case R_AARCH64_PREL16:
2201 case R_AARCH64_PREL32:
2202 case R_AARCH64_PREL64:
2203 case R_AARCH64_TSTBR14:
2204 if (weak_undef_p)
2205 value = place;
2206 value = value + addend - place;
2207 break;
2208
2209 case R_AARCH64_CALL26:
2210 case R_AARCH64_JUMP26:
2211 value = value + addend - place;
2212 break;
2213
2214 case R_AARCH64_ABS16:
2215 case R_AARCH64_ABS32:
2216 case R_AARCH64_MOVW_SABS_G0:
2217 case R_AARCH64_MOVW_SABS_G1:
2218 case R_AARCH64_MOVW_SABS_G2:
2219 case R_AARCH64_MOVW_UABS_G0:
2220 case R_AARCH64_MOVW_UABS_G0_NC:
2221 case R_AARCH64_MOVW_UABS_G1:
2222 case R_AARCH64_MOVW_UABS_G1_NC:
2223 case R_AARCH64_MOVW_UABS_G2:
2224 case R_AARCH64_MOVW_UABS_G2_NC:
2225 case R_AARCH64_MOVW_UABS_G3:
2226 value = value + addend;
2227 break;
2228
2229 case R_AARCH64_ADR_PREL_PG_HI21:
2230 case R_AARCH64_ADR_PREL_PG_HI21_NC:
2231 if (weak_undef_p)
2232 value = PG (place);
2233 value = PG (value + addend) - PG (place);
2234 break;
2235
f41aef5f
RE
2236 case R_AARCH64_GOT_LD_PREL19:
2237 value = value + addend - place;
2238 break;
2239
a06ea964 2240 case R_AARCH64_ADR_GOT_PAGE:
418009c2 2241 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
2242 case R_AARCH64_TLSGD_ADR_PAGE21:
2243 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
2244 value = PG (value + addend) - PG (place);
2245 break;
2246
2247 case R_AARCH64_ADD_ABS_LO12_NC:
2248 case R_AARCH64_LD64_GOT_LO12_NC:
2249 case R_AARCH64_LDST8_ABS_LO12_NC:
2250 case R_AARCH64_LDST16_ABS_LO12_NC:
2251 case R_AARCH64_LDST32_ABS_LO12_NC:
2252 case R_AARCH64_LDST64_ABS_LO12_NC:
2253 case R_AARCH64_LDST128_ABS_LO12_NC:
2254 case R_AARCH64_TLSDESC_ADD_LO12_NC:
2255 case R_AARCH64_TLSDESC_ADD:
2256 case R_AARCH64_TLSDESC_LD64_LO12_NC:
2257 case R_AARCH64_TLSDESC_LDR:
2258 case R_AARCH64_TLSGD_ADD_LO12_NC:
2259 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
2260 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
2261 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
2262 value = PG_OFFSET (value + addend);
2263 break;
2264
2265 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
2266 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
2267 value = (value + addend) & (bfd_vma) 0xffff0000;
2268 break;
2269 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
2270 value = (value + addend) & (bfd_vma) 0xfff000;
2271 break;
2272
2273 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
2274 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
2275 value = (value + addend) & (bfd_vma) 0xffff;
2276 break;
2277
2278 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
2279 value = (value + addend) & ~(bfd_vma) 0xffffffff;
2280 value -= place & ~(bfd_vma) 0xffffffff;
2281 break;
2282 }
2283 return value;
2284}
2285
2286static bfd_boolean
2287aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2288 bfd_vma offset, bfd_vma value)
2289{
2290 reloc_howto_type *howto;
2291 bfd_vma place;
2292
2293 howto = elf64_aarch64_howto_from_type (r_type);
2294 place = (input_section->output_section->vma + input_section->output_offset
2295 + offset);
2296 value = aarch64_resolve_relocation (r_type, place, value, 0, FALSE);
2297 return bfd_elf_aarch64_put_addend (input_bfd,
2298 input_section->contents + offset,
2299 howto, value);
2300}
2301
2302static enum elf64_aarch64_stub_type
2303aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2304{
2305 if (aarch64_valid_for_adrp_p (value, place))
2306 return aarch64_stub_adrp_branch;
2307 return aarch64_stub_long_branch;
2308}
2309
2310/* Determine the type of stub needed, if any, for a call. */
2311
2312static enum elf64_aarch64_stub_type
2313aarch64_type_of_stub (struct bfd_link_info *info,
2314 asection *input_sec,
2315 const Elf_Internal_Rela *rel,
2316 unsigned char st_type,
2317 struct elf64_aarch64_link_hash_entry *hash,
2318 bfd_vma destination)
2319{
2320 bfd_vma location;
2321 bfd_signed_vma branch_offset;
2322 unsigned int r_type;
2323 struct elf64_aarch64_link_hash_table *globals;
2324 enum elf64_aarch64_stub_type stub_type = aarch64_stub_none;
2325 bfd_boolean via_plt_p;
2326
2327 if (st_type != STT_FUNC)
2328 return stub_type;
2329
2330 globals = elf64_aarch64_hash_table (info);
2331 via_plt_p = (globals->root.splt != NULL && hash != NULL
2332 && hash->root.plt.offset != (bfd_vma) - 1);
2333
2334 if (via_plt_p)
2335 return stub_type;
2336
2337 /* Determine where the call point is. */
2338 location = (input_sec->output_offset
2339 + input_sec->output_section->vma + rel->r_offset);
2340
2341 branch_offset = (bfd_signed_vma) (destination - location);
2342
2343 r_type = ELF64_R_TYPE (rel->r_info);
2344
2345 /* We don't want to redirect any old unconditional jump in this way,
2346 only one which is being used for a sibcall, where it is
2347 acceptable for the IP0 and IP1 registers to be clobbered. */
2348 if ((r_type == R_AARCH64_CALL26 || r_type == R_AARCH64_JUMP26)
2349 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2350 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2351 {
2352 stub_type = aarch64_stub_long_branch;
2353 }
2354
2355 return stub_type;
2356}
2357
2358/* Build a name for an entry in the stub hash table. */
2359
2360static char *
2361elf64_aarch64_stub_name (const asection *input_section,
2362 const asection *sym_sec,
2363 const struct elf64_aarch64_link_hash_entry *hash,
2364 const Elf_Internal_Rela *rel)
2365{
2366 char *stub_name;
2367 bfd_size_type len;
2368
2369 if (hash)
2370 {
2371 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2372 stub_name = bfd_malloc (len);
2373 if (stub_name != NULL)
2374 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2375 (unsigned int) input_section->id,
2376 hash->root.root.root.string,
2377 rel->r_addend);
2378 }
2379 else
2380 {
2381 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2382 stub_name = bfd_malloc (len);
2383 if (stub_name != NULL)
2384 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2385 (unsigned int) input_section->id,
2386 (unsigned int) sym_sec->id,
2387 (unsigned int) ELF64_R_SYM (rel->r_info),
2388 rel->r_addend);
2389 }
2390
2391 return stub_name;
2392}
2393
2394/* Look up an entry in the stub hash. Stub entries are cached because
2395 creating the stub name takes a bit of time. */
2396
2397static struct elf64_aarch64_stub_hash_entry *
2398elf64_aarch64_get_stub_entry (const asection *input_section,
2399 const asection *sym_sec,
2400 struct elf_link_hash_entry *hash,
2401 const Elf_Internal_Rela *rel,
2402 struct elf64_aarch64_link_hash_table *htab)
2403{
2404 struct elf64_aarch64_stub_hash_entry *stub_entry;
2405 struct elf64_aarch64_link_hash_entry *h =
2406 (struct elf64_aarch64_link_hash_entry *) hash;
2407 const asection *id_sec;
2408
2409 if ((input_section->flags & SEC_CODE) == 0)
2410 return NULL;
2411
2412 /* If this input section is part of a group of sections sharing one
2413 stub section, then use the id of the first section in the group.
2414 Stub names need to include a section id, as there may well be
2415 more than one stub used to reach say, printf, and we need to
2416 distinguish between them. */
2417 id_sec = htab->stub_group[input_section->id].link_sec;
2418
2419 if (h != NULL && h->stub_cache != NULL
2420 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2421 {
2422 stub_entry = h->stub_cache;
2423 }
2424 else
2425 {
2426 char *stub_name;
2427
2428 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, h, rel);
2429 if (stub_name == NULL)
2430 return NULL;
2431
2432 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2433 stub_name, FALSE, FALSE);
2434 if (h != NULL)
2435 h->stub_cache = stub_entry;
2436
2437 free (stub_name);
2438 }
2439
2440 return stub_entry;
2441}
2442
2443/* Add a new stub entry to the stub hash. Not all fields of the new
2444 stub entry are initialised. */
2445
2446static struct elf64_aarch64_stub_hash_entry *
2447elf64_aarch64_add_stub (const char *stub_name,
2448 asection *section,
2449 struct elf64_aarch64_link_hash_table *htab)
2450{
2451 asection *link_sec;
2452 asection *stub_sec;
2453 struct elf64_aarch64_stub_hash_entry *stub_entry;
2454
2455 link_sec = htab->stub_group[section->id].link_sec;
2456 stub_sec = htab->stub_group[section->id].stub_sec;
2457 if (stub_sec == NULL)
2458 {
2459 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2460 if (stub_sec == NULL)
2461 {
2462 size_t namelen;
2463 bfd_size_type len;
2464 char *s_name;
2465
2466 namelen = strlen (link_sec->name);
2467 len = namelen + sizeof (STUB_SUFFIX);
2468 s_name = bfd_alloc (htab->stub_bfd, len);
2469 if (s_name == NULL)
2470 return NULL;
2471
2472 memcpy (s_name, link_sec->name, namelen);
2473 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2474 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
2475 if (stub_sec == NULL)
2476 return NULL;
2477 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2478 }
2479 htab->stub_group[section->id].stub_sec = stub_sec;
2480 }
2481
2482 /* Enter this entry into the linker stub hash table. */
2483 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2484 TRUE, FALSE);
2485 if (stub_entry == NULL)
2486 {
2487 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2488 section->owner, stub_name);
2489 return NULL;
2490 }
2491
2492 stub_entry->stub_sec = stub_sec;
2493 stub_entry->stub_offset = 0;
2494 stub_entry->id_sec = link_sec;
2495
2496 return stub_entry;
2497}
2498
2499static bfd_boolean
2500aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2501 void *in_arg ATTRIBUTE_UNUSED)
2502{
2503 struct elf64_aarch64_stub_hash_entry *stub_entry;
2504 asection *stub_sec;
2505 bfd *stub_bfd;
2506 bfd_byte *loc;
2507 bfd_vma sym_value;
2508 unsigned int template_size;
2509 const uint32_t *template;
2510 unsigned int i;
2511
2512 /* Massage our args to the form they really have. */
2513 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2514
2515 stub_sec = stub_entry->stub_sec;
2516
2517 /* Make a note of the offset within the stubs for this entry. */
2518 stub_entry->stub_offset = stub_sec->size;
2519 loc = stub_sec->contents + stub_entry->stub_offset;
2520
2521 stub_bfd = stub_sec->owner;
2522
2523 /* This is the address of the stub destination. */
2524 sym_value = (stub_entry->target_value
2525 + stub_entry->target_section->output_offset
2526 + stub_entry->target_section->output_section->vma);
2527
2528 if (stub_entry->stub_type == aarch64_stub_long_branch)
2529 {
2530 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2531 + stub_sec->output_offset);
2532
2533 /* See if we can relax the stub. */
2534 if (aarch64_valid_for_adrp_p (sym_value, place))
2535 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2536 }
2537
2538 switch (stub_entry->stub_type)
2539 {
2540 case aarch64_stub_adrp_branch:
2541 template = aarch64_adrp_branch_stub;
2542 template_size = sizeof (aarch64_adrp_branch_stub);
2543 break;
2544 case aarch64_stub_long_branch:
2545 template = aarch64_long_branch_stub;
2546 template_size = sizeof (aarch64_long_branch_stub);
2547 break;
2548 default:
2549 BFD_FAIL ();
2550 return FALSE;
2551 }
2552
2553 for (i = 0; i < (template_size / sizeof template[0]); i++)
2554 {
2555 bfd_putl32 (template[i], loc);
2556 loc += 4;
2557 }
2558
2559 template_size = (template_size + 7) & ~7;
2560 stub_sec->size += template_size;
2561
2562 switch (stub_entry->stub_type)
2563 {
2564 case aarch64_stub_adrp_branch:
2565 if (aarch64_relocate (R_AARCH64_ADR_PREL_PG_HI21, stub_bfd, stub_sec,
2566 stub_entry->stub_offset, sym_value))
2567 /* The stub would not have been relaxed if the offset was out
2568 of range. */
2569 BFD_FAIL ();
2570
2571 _bfd_final_link_relocate
2572 (elf64_aarch64_howto_from_type (R_AARCH64_ADD_ABS_LO12_NC),
2573 stub_bfd,
2574 stub_sec,
2575 stub_sec->contents,
2576 stub_entry->stub_offset + 4,
2577 sym_value,
2578 0);
2579 break;
2580
2581 case aarch64_stub_long_branch:
2582 /* We want the value relative to the address 12 bytes back from the
2583 value itself. */
2584 _bfd_final_link_relocate (elf64_aarch64_howto_from_type
2585 (R_AARCH64_PREL64), stub_bfd, stub_sec,
2586 stub_sec->contents,
2587 stub_entry->stub_offset + 16,
2588 sym_value + 12, 0);
2589 break;
2590 default:
2591 break;
2592 }
2593
2594 return TRUE;
2595}
2596
2597/* As above, but don't actually build the stub. Just bump offset so
2598 we know stub section sizes. */
2599
2600static bfd_boolean
2601aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2602 void *in_arg ATTRIBUTE_UNUSED)
2603{
2604 struct elf64_aarch64_stub_hash_entry *stub_entry;
2605 int size;
2606
2607 /* Massage our args to the form they really have. */
2608 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2609
2610 switch (stub_entry->stub_type)
2611 {
2612 case aarch64_stub_adrp_branch:
2613 size = sizeof (aarch64_adrp_branch_stub);
2614 break;
2615 case aarch64_stub_long_branch:
2616 size = sizeof (aarch64_long_branch_stub);
2617 break;
2618 default:
2619 BFD_FAIL ();
2620 return FALSE;
2621 break;
2622 }
2623
2624 size = (size + 7) & ~7;
2625 stub_entry->stub_sec->size += size;
2626 return TRUE;
2627}
2628
2629/* External entry points for sizing and building linker stubs. */
2630
2631/* Set up various things so that we can make a list of input sections
2632 for each output section included in the link. Returns -1 on error,
2633 0 when no stubs will be needed, and 1 on success. */
2634
2635int
2636elf64_aarch64_setup_section_lists (bfd *output_bfd,
2637 struct bfd_link_info *info)
2638{
2639 bfd *input_bfd;
2640 unsigned int bfd_count;
2641 int top_id, top_index;
2642 asection *section;
2643 asection **input_list, **list;
2644 bfd_size_type amt;
2645 struct elf64_aarch64_link_hash_table *htab =
2646 elf64_aarch64_hash_table (info);
2647
2648 if (!is_elf_hash_table (htab))
2649 return 0;
2650
2651 /* Count the number of input BFDs and find the top input section id. */
2652 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2653 input_bfd != NULL; input_bfd = input_bfd->link_next)
2654 {
2655 bfd_count += 1;
2656 for (section = input_bfd->sections;
2657 section != NULL; section = section->next)
2658 {
2659 if (top_id < section->id)
2660 top_id = section->id;
2661 }
2662 }
2663 htab->bfd_count = bfd_count;
2664
2665 amt = sizeof (struct map_stub) * (top_id + 1);
2666 htab->stub_group = bfd_zmalloc (amt);
2667 if (htab->stub_group == NULL)
2668 return -1;
2669
2670 /* We can't use output_bfd->section_count here to find the top output
2671 section index as some sections may have been removed, and
2672 _bfd_strip_section_from_output doesn't renumber the indices. */
2673 for (section = output_bfd->sections, top_index = 0;
2674 section != NULL; section = section->next)
2675 {
2676 if (top_index < section->index)
2677 top_index = section->index;
2678 }
2679
2680 htab->top_index = top_index;
2681 amt = sizeof (asection *) * (top_index + 1);
2682 input_list = bfd_malloc (amt);
2683 htab->input_list = input_list;
2684 if (input_list == NULL)
2685 return -1;
2686
2687 /* For sections we aren't interested in, mark their entries with a
2688 value we can check later. */
2689 list = input_list + top_index;
2690 do
2691 *list = bfd_abs_section_ptr;
2692 while (list-- != input_list);
2693
2694 for (section = output_bfd->sections;
2695 section != NULL; section = section->next)
2696 {
2697 if ((section->flags & SEC_CODE) != 0)
2698 input_list[section->index] = NULL;
2699 }
2700
2701 return 1;
2702}
2703
2704/* Used by elf64_aarch64_next_input_section and group_sections. */
2705#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2706
2707/* The linker repeatedly calls this function for each input section,
2708 in the order that input sections are linked into output sections.
2709 Build lists of input sections to determine groupings between which
2710 we may insert linker stubs. */
2711
2712void
2713elf64_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2714{
2715 struct elf64_aarch64_link_hash_table *htab =
2716 elf64_aarch64_hash_table (info);
2717
2718 if (isec->output_section->index <= htab->top_index)
2719 {
2720 asection **list = htab->input_list + isec->output_section->index;
2721
2722 if (*list != bfd_abs_section_ptr)
2723 {
2724 /* Steal the link_sec pointer for our list. */
2725 /* This happens to make the list in reverse order,
2726 which is what we want. */
2727 PREV_SEC (isec) = *list;
2728 *list = isec;
2729 }
2730 }
2731}
2732
2733/* See whether we can group stub sections together. Grouping stub
2734 sections may result in fewer stubs. More importantly, we need to
2735 put all .init* and .fini* stubs at the beginning of the .init or
2736 .fini output sections respectively, because glibc splits the
2737 _init and _fini functions into multiple parts. Putting a stub in
2738 the middle of a function is not a good idea. */
2739
2740static void
2741group_sections (struct elf64_aarch64_link_hash_table *htab,
2742 bfd_size_type stub_group_size,
2743 bfd_boolean stubs_always_before_branch)
2744{
2745 asection **list = htab->input_list + htab->top_index;
2746
2747 do
2748 {
2749 asection *tail = *list;
2750
2751 if (tail == bfd_abs_section_ptr)
2752 continue;
2753
2754 while (tail != NULL)
2755 {
2756 asection *curr;
2757 asection *prev;
2758 bfd_size_type total;
2759
2760 curr = tail;
2761 total = tail->size;
2762 while ((prev = PREV_SEC (curr)) != NULL
2763 && ((total += curr->output_offset - prev->output_offset)
2764 < stub_group_size))
2765 curr = prev;
2766
2767 /* OK, the size from the start of CURR to the end is less
2768 than stub_group_size and thus can be handled by one stub
2769 section. (Or the tail section is itself larger than
2770 stub_group_size, in which case we may be toast.)
2771 We should really be keeping track of the total size of
2772 stubs added here, as stubs contribute to the final output
2773 section size. */
2774 do
2775 {
2776 prev = PREV_SEC (tail);
2777 /* Set up this stub group. */
2778 htab->stub_group[tail->id].link_sec = curr;
2779 }
2780 while (tail != curr && (tail = prev) != NULL);
2781
2782 /* But wait, there's more! Input sections up to stub_group_size
2783 bytes before the stub section can be handled by it too. */
2784 if (!stubs_always_before_branch)
2785 {
2786 total = 0;
2787 while (prev != NULL
2788 && ((total += tail->output_offset - prev->output_offset)
2789 < stub_group_size))
2790 {
2791 tail = prev;
2792 prev = PREV_SEC (tail);
2793 htab->stub_group[tail->id].link_sec = curr;
2794 }
2795 }
2796 tail = prev;
2797 }
2798 }
2799 while (list-- != htab->input_list);
2800
2801 free (htab->input_list);
2802}
2803
2804#undef PREV_SEC
2805
2806/* Determine and set the size of the stub section for a final link.
2807
2808 The basic idea here is to examine all the relocations looking for
2809 PC-relative calls to a target that is unreachable with a "bl"
2810 instruction. */
2811
2812bfd_boolean
2813elf64_aarch64_size_stubs (bfd *output_bfd,
2814 bfd *stub_bfd,
2815 struct bfd_link_info *info,
2816 bfd_signed_vma group_size,
2817 asection * (*add_stub_section) (const char *,
2818 asection *),
2819 void (*layout_sections_again) (void))
2820{
2821 bfd_size_type stub_group_size;
2822 bfd_boolean stubs_always_before_branch;
2823 bfd_boolean stub_changed = 0;
2824 struct elf64_aarch64_link_hash_table *htab = elf64_aarch64_hash_table (info);
2825
2826 /* Propagate mach to stub bfd, because it may not have been
2827 finalized when we created stub_bfd. */
2828 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
2829 bfd_get_mach (output_bfd));
2830
2831 /* Stash our params away. */
2832 htab->stub_bfd = stub_bfd;
2833 htab->add_stub_section = add_stub_section;
2834 htab->layout_sections_again = layout_sections_again;
2835 stubs_always_before_branch = group_size < 0;
2836 if (group_size < 0)
2837 stub_group_size = -group_size;
2838 else
2839 stub_group_size = group_size;
2840
2841 if (stub_group_size == 1)
2842 {
2843 /* Default values. */
2844 /* Aarch64 branch range is +-128MB. The value used is 1MB less. */
2845 stub_group_size = 127 * 1024 * 1024;
2846 }
2847
2848 group_sections (htab, stub_group_size, stubs_always_before_branch);
2849
2850 while (1)
2851 {
2852 bfd *input_bfd;
2853 unsigned int bfd_indx;
2854 asection *stub_sec;
2855
2856 for (input_bfd = info->input_bfds, bfd_indx = 0;
2857 input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++)
2858 {
2859 Elf_Internal_Shdr *symtab_hdr;
2860 asection *section;
2861 Elf_Internal_Sym *local_syms = NULL;
2862
2863 /* We'll need the symbol table in a second. */
2864 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2865 if (symtab_hdr->sh_info == 0)
2866 continue;
2867
2868 /* Walk over each section attached to the input bfd. */
2869 for (section = input_bfd->sections;
2870 section != NULL; section = section->next)
2871 {
2872 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2873
2874 /* If there aren't any relocs, then there's nothing more
2875 to do. */
2876 if ((section->flags & SEC_RELOC) == 0
2877 || section->reloc_count == 0
2878 || (section->flags & SEC_CODE) == 0)
2879 continue;
2880
2881 /* If this section is a link-once section that will be
2882 discarded, then don't create any stubs. */
2883 if (section->output_section == NULL
2884 || section->output_section->owner != output_bfd)
2885 continue;
2886
2887 /* Get the relocs. */
2888 internal_relocs
2889 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
2890 NULL, info->keep_memory);
2891 if (internal_relocs == NULL)
2892 goto error_ret_free_local;
2893
2894 /* Now examine each relocation. */
2895 irela = internal_relocs;
2896 irelaend = irela + section->reloc_count;
2897 for (; irela < irelaend; irela++)
2898 {
2899 unsigned int r_type, r_indx;
2900 enum elf64_aarch64_stub_type stub_type;
2901 struct elf64_aarch64_stub_hash_entry *stub_entry;
2902 asection *sym_sec;
2903 bfd_vma sym_value;
2904 bfd_vma destination;
2905 struct elf64_aarch64_link_hash_entry *hash;
2906 const char *sym_name;
2907 char *stub_name;
2908 const asection *id_sec;
2909 unsigned char st_type;
2910 bfd_size_type len;
2911
2912 r_type = ELF64_R_TYPE (irela->r_info);
2913 r_indx = ELF64_R_SYM (irela->r_info);
2914
2915 if (r_type >= (unsigned int) R_AARCH64_end)
2916 {
2917 bfd_set_error (bfd_error_bad_value);
2918 error_ret_free_internal:
2919 if (elf_section_data (section)->relocs == NULL)
2920 free (internal_relocs);
2921 goto error_ret_free_local;
2922 }
2923
2924 /* Only look for stubs on unconditional branch and
2925 branch and link instructions. */
2926 if (r_type != (unsigned int) R_AARCH64_CALL26
2927 && r_type != (unsigned int) R_AARCH64_JUMP26)
2928 continue;
2929
2930 /* Now determine the call target, its name, value,
2931 section. */
2932 sym_sec = NULL;
2933 sym_value = 0;
2934 destination = 0;
2935 hash = NULL;
2936 sym_name = NULL;
2937 if (r_indx < symtab_hdr->sh_info)
2938 {
2939 /* It's a local symbol. */
2940 Elf_Internal_Sym *sym;
2941 Elf_Internal_Shdr *hdr;
2942
2943 if (local_syms == NULL)
2944 {
2945 local_syms
2946 = (Elf_Internal_Sym *) symtab_hdr->contents;
2947 if (local_syms == NULL)
2948 local_syms
2949 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
2950 symtab_hdr->sh_info, 0,
2951 NULL, NULL, NULL);
2952 if (local_syms == NULL)
2953 goto error_ret_free_internal;
2954 }
2955
2956 sym = local_syms + r_indx;
2957 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
2958 sym_sec = hdr->bfd_section;
2959 if (!sym_sec)
2960 /* This is an undefined symbol. It can never
2961 be resolved. */
2962 continue;
2963
2964 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
2965 sym_value = sym->st_value;
2966 destination = (sym_value + irela->r_addend
2967 + sym_sec->output_offset
2968 + sym_sec->output_section->vma);
2969 st_type = ELF_ST_TYPE (sym->st_info);
2970 sym_name
2971 = bfd_elf_string_from_elf_section (input_bfd,
2972 symtab_hdr->sh_link,
2973 sym->st_name);
2974 }
2975 else
2976 {
2977 int e_indx;
2978
2979 e_indx = r_indx - symtab_hdr->sh_info;
2980 hash = ((struct elf64_aarch64_link_hash_entry *)
2981 elf_sym_hashes (input_bfd)[e_indx]);
2982
2983 while (hash->root.root.type == bfd_link_hash_indirect
2984 || hash->root.root.type == bfd_link_hash_warning)
2985 hash = ((struct elf64_aarch64_link_hash_entry *)
2986 hash->root.root.u.i.link);
2987
2988 if (hash->root.root.type == bfd_link_hash_defined
2989 || hash->root.root.type == bfd_link_hash_defweak)
2990 {
2991 struct elf64_aarch64_link_hash_table *globals =
2992 elf64_aarch64_hash_table (info);
2993 sym_sec = hash->root.root.u.def.section;
2994 sym_value = hash->root.root.u.def.value;
2995 /* For a destination in a shared library,
2996 use the PLT stub as target address to
2997 decide whether a branch stub is
2998 needed. */
2999 if (globals->root.splt != NULL && hash != NULL
3000 && hash->root.plt.offset != (bfd_vma) - 1)
3001 {
3002 sym_sec = globals->root.splt;
3003 sym_value = hash->root.plt.offset;
3004 if (sym_sec->output_section != NULL)
3005 destination = (sym_value
3006 + sym_sec->output_offset
3007 +
3008 sym_sec->output_section->vma);
3009 }
3010 else if (sym_sec->output_section != NULL)
3011 destination = (sym_value + irela->r_addend
3012 + sym_sec->output_offset
3013 + sym_sec->output_section->vma);
3014 }
3015 else if (hash->root.root.type == bfd_link_hash_undefined
3016 || (hash->root.root.type
3017 == bfd_link_hash_undefweak))
3018 {
3019 /* For a shared library, use the PLT stub as
3020 target address to decide whether a long
3021 branch stub is needed.
3022 For absolute code, they cannot be handled. */
3023 struct elf64_aarch64_link_hash_table *globals =
3024 elf64_aarch64_hash_table (info);
3025
3026 if (globals->root.splt != NULL && hash != NULL
3027 && hash->root.plt.offset != (bfd_vma) - 1)
3028 {
3029 sym_sec = globals->root.splt;
3030 sym_value = hash->root.plt.offset;
3031 if (sym_sec->output_section != NULL)
3032 destination = (sym_value
3033 + sym_sec->output_offset
3034 +
3035 sym_sec->output_section->vma);
3036 }
3037 else
3038 continue;
3039 }
3040 else
3041 {
3042 bfd_set_error (bfd_error_bad_value);
3043 goto error_ret_free_internal;
3044 }
3045 st_type = ELF_ST_TYPE (hash->root.type);
3046 sym_name = hash->root.root.root.string;
3047 }
3048
3049 /* Determine what (if any) linker stub is needed. */
3050 stub_type = aarch64_type_of_stub
3051 (info, section, irela, st_type, hash, destination);
3052 if (stub_type == aarch64_stub_none)
3053 continue;
3054
3055 /* Support for grouping stub sections. */
3056 id_sec = htab->stub_group[section->id].link_sec;
3057
3058 /* Get the name of this stub. */
3059 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, hash,
3060 irela);
3061 if (!stub_name)
3062 goto error_ret_free_internal;
3063
3064 stub_entry =
3065 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3066 stub_name, FALSE, FALSE);
3067 if (stub_entry != NULL)
3068 {
3069 /* The proper stub has already been created. */
3070 free (stub_name);
3071 continue;
3072 }
3073
3074 stub_entry = elf64_aarch64_add_stub (stub_name, section,
3075 htab);
3076 if (stub_entry == NULL)
3077 {
3078 free (stub_name);
3079 goto error_ret_free_internal;
3080 }
3081
3082 stub_entry->target_value = sym_value;
3083 stub_entry->target_section = sym_sec;
3084 stub_entry->stub_type = stub_type;
3085 stub_entry->h = hash;
3086 stub_entry->st_type = st_type;
3087
3088 if (sym_name == NULL)
3089 sym_name = "unnamed";
3090 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3091 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3092 if (stub_entry->output_name == NULL)
3093 {
3094 free (stub_name);
3095 goto error_ret_free_internal;
3096 }
3097
3098 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3099 sym_name);
3100
3101 stub_changed = TRUE;
3102 }
3103
3104 /* We're done with the internal relocs, free them. */
3105 if (elf_section_data (section)->relocs == NULL)
3106 free (internal_relocs);
3107 }
3108 }
3109
3110 if (!stub_changed)
3111 break;
3112
3113 /* OK, we've added some stubs. Find out the new size of the
3114 stub sections. */
3115 for (stub_sec = htab->stub_bfd->sections;
3116 stub_sec != NULL; stub_sec = stub_sec->next)
3117 stub_sec->size = 0;
3118
3119 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3120
3121 /* Ask the linker to do its stuff. */
3122 (*htab->layout_sections_again) ();
3123 stub_changed = FALSE;
3124 }
3125
3126 return TRUE;
3127
3128error_ret_free_local:
3129 return FALSE;
3130}
3131
3132/* Build all the stubs associated with the current output file. The
3133 stubs are kept in a hash table attached to the main linker hash
3134 table. We also set up the .plt entries for statically linked PIC
3135 functions here. This function is called via aarch64_elf_finish in the
3136 linker. */
3137
3138bfd_boolean
3139elf64_aarch64_build_stubs (struct bfd_link_info *info)
3140{
3141 asection *stub_sec;
3142 struct bfd_hash_table *table;
3143 struct elf64_aarch64_link_hash_table *htab;
3144
3145 htab = elf64_aarch64_hash_table (info);
3146
3147 for (stub_sec = htab->stub_bfd->sections;
3148 stub_sec != NULL; stub_sec = stub_sec->next)
3149 {
3150 bfd_size_type size;
3151
3152 /* Ignore non-stub sections. */
3153 if (!strstr (stub_sec->name, STUB_SUFFIX))
3154 continue;
3155
3156 /* Allocate memory to hold the linker stubs. */
3157 size = stub_sec->size;
3158 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3159 if (stub_sec->contents == NULL && size != 0)
3160 return FALSE;
3161 stub_sec->size = 0;
3162 }
3163
3164 /* Build the stubs as directed by the stub hash table. */
3165 table = &htab->stub_hash_table;
3166 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3167
3168 return TRUE;
3169}
3170
3171
3172/* Add an entry to the code/data map for section SEC. */
3173
3174static void
3175elf64_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3176{
3177 struct _aarch64_elf_section_data *sec_data =
3178 elf64_aarch64_section_data (sec);
3179 unsigned int newidx;
3180
3181 if (sec_data->map == NULL)
3182 {
3183 sec_data->map = bfd_malloc (sizeof (elf64_aarch64_section_map));
3184 sec_data->mapcount = 0;
3185 sec_data->mapsize = 1;
3186 }
3187
3188 newidx = sec_data->mapcount++;
3189
3190 if (sec_data->mapcount > sec_data->mapsize)
3191 {
3192 sec_data->mapsize *= 2;
3193 sec_data->map = bfd_realloc_or_free
3194 (sec_data->map, sec_data->mapsize * sizeof (elf64_aarch64_section_map));
3195 }
3196
3197 if (sec_data->map)
3198 {
3199 sec_data->map[newidx].vma = vma;
3200 sec_data->map[newidx].type = type;
3201 }
3202}
3203
3204
3205/* Initialise maps of insn/data for input BFDs. */
3206void
3207bfd_elf64_aarch64_init_maps (bfd *abfd)
3208{
3209 Elf_Internal_Sym *isymbuf;
3210 Elf_Internal_Shdr *hdr;
3211 unsigned int i, localsyms;
3212
3213 /* Make sure that we are dealing with an AArch64 elf binary. */
3214 if (!is_aarch64_elf (abfd))
3215 return;
3216
3217 if ((abfd->flags & DYNAMIC) != 0)
3218 return;
3219
3220 hdr = &elf_symtab_hdr (abfd);
3221 localsyms = hdr->sh_info;
3222
3223 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3224 should contain the number of local symbols, which should come before any
3225 global symbols. Mapping symbols are always local. */
3226 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3227
3228 /* No internal symbols read? Skip this BFD. */
3229 if (isymbuf == NULL)
3230 return;
3231
3232 for (i = 0; i < localsyms; i++)
3233 {
3234 Elf_Internal_Sym *isym = &isymbuf[i];
3235 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3236 const char *name;
3237
3238 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3239 {
3240 name = bfd_elf_string_from_elf_section (abfd,
3241 hdr->sh_link,
3242 isym->st_name);
3243
3244 if (bfd_is_aarch64_special_symbol_name
3245 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3246 elf64_aarch64_section_map_add (sec, name[1], isym->st_value);
3247 }
3248 }
3249}
3250
3251/* Set option values needed during linking. */
3252void
3253bfd_elf64_aarch64_set_options (struct bfd *output_bfd,
3254 struct bfd_link_info *link_info,
3255 int no_enum_warn,
3256 int no_wchar_warn, int pic_veneer)
3257{
3258 struct elf64_aarch64_link_hash_table *globals;
3259
3260 globals = elf64_aarch64_hash_table (link_info);
3261 globals->pic_veneer = pic_veneer;
3262
3263 BFD_ASSERT (is_aarch64_elf (output_bfd));
3264 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3265 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3266}
3267
3268#define MASK(n) ((1u << (n)) - 1)
3269
3270/* Decode the 26-bit offset of unconditional branch. */
3271static inline uint32_t
3272decode_branch_ofs_26 (uint32_t insn)
3273{
3274 return insn & MASK (26);
3275}
3276
3277/* Decode the 19-bit offset of conditional branch and compare & branch. */
3278static inline uint32_t
3279decode_cond_branch_ofs_19 (uint32_t insn)
3280{
3281 return (insn >> 5) & MASK (19);
3282}
3283
3284/* Decode the 19-bit offset of load literal. */
3285static inline uint32_t
3286decode_ld_lit_ofs_19 (uint32_t insn)
3287{
3288 return (insn >> 5) & MASK (19);
3289}
3290
3291/* Decode the 14-bit offset of test & branch. */
3292static inline uint32_t
3293decode_tst_branch_ofs_14 (uint32_t insn)
3294{
3295 return (insn >> 5) & MASK (14);
3296}
3297
3298/* Decode the 16-bit imm of move wide. */
3299static inline uint32_t
3300decode_movw_imm (uint32_t insn)
3301{
3302 return (insn >> 5) & MASK (16);
3303}
3304
3305/* Decode the 21-bit imm of adr. */
3306static inline uint32_t
3307decode_adr_imm (uint32_t insn)
3308{
3309 return ((insn >> 29) & MASK (2)) | ((insn >> 3) & (MASK (19) << 2));
3310}
3311
3312/* Decode the 12-bit imm of add immediate. */
3313static inline uint32_t
3314decode_add_imm (uint32_t insn)
3315{
3316 return (insn >> 10) & MASK (12);
3317}
3318
3319
3320/* Encode the 26-bit offset of unconditional branch. */
3321static inline uint32_t
3322reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
3323{
3324 return (insn & ~MASK (26)) | (ofs & MASK (26));
3325}
3326
3327/* Encode the 19-bit offset of conditional branch and compare & branch. */
3328static inline uint32_t
3329reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
3330{
3331 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3332}
3333
3334/* Decode the 19-bit offset of load literal. */
3335static inline uint32_t
3336reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
3337{
3338 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3339}
3340
3341/* Encode the 14-bit offset of test & branch. */
3342static inline uint32_t
3343reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
3344{
3345 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
3346}
3347
3348/* Reencode the imm field of move wide. */
3349static inline uint32_t
3350reencode_movw_imm (uint32_t insn, uint32_t imm)
3351{
3352 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
3353}
3354
3355/* Reencode the imm field of adr. */
3356static inline uint32_t
3357reencode_adr_imm (uint32_t insn, uint32_t imm)
3358{
3359 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
3360 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
3361}
3362
3363/* Reencode the imm field of ld/st pos immediate. */
3364static inline uint32_t
3365reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
3366{
3367 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3368}
3369
3370/* Reencode the imm field of add immediate. */
3371static inline uint32_t
3372reencode_add_imm (uint32_t insn, uint32_t imm)
3373{
3374 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3375}
3376
3377/* Reencode mov[zn] to movz. */
3378static inline uint32_t
3379reencode_movzn_to_movz (uint32_t opcode)
3380{
3381 return opcode | (1 << 30);
3382}
3383
3384/* Reencode mov[zn] to movn. */
3385static inline uint32_t
3386reencode_movzn_to_movn (uint32_t opcode)
3387{
3388 return opcode & ~(1 << 30);
3389}
3390
3391/* Insert the addend/value into the instruction or data object being
3392 relocated. */
3393static bfd_reloc_status_type
3394bfd_elf_aarch64_put_addend (bfd *abfd,
3395 bfd_byte *address,
3396 reloc_howto_type *howto, bfd_signed_vma addend)
3397{
3398 bfd_reloc_status_type status = bfd_reloc_ok;
3399 bfd_signed_vma old_addend = addend;
3400 bfd_vma contents;
3401 int size;
3402
3403 size = bfd_get_reloc_size (howto);
3404 switch (size)
3405 {
3406 case 2:
3407 contents = bfd_get_16 (abfd, address);
3408 break;
3409 case 4:
3410 if (howto->src_mask != 0xffffffff)
3411 /* Must be 32-bit instruction, always little-endian. */
3412 contents = bfd_getl32 (address);
3413 else
3414 /* Must be 32-bit data (endianness dependent). */
3415 contents = bfd_get_32 (abfd, address);
3416 break;
3417 case 8:
3418 contents = bfd_get_64 (abfd, address);
3419 break;
3420 default:
3421 abort ();
3422 }
3423
3424 switch (howto->complain_on_overflow)
3425 {
3426 case complain_overflow_dont:
3427 break;
3428 case complain_overflow_signed:
3429 status = aarch64_signed_overflow (addend,
3430 howto->bitsize + howto->rightshift);
3431 break;
3432 case complain_overflow_unsigned:
3433 status = aarch64_unsigned_overflow (addend,
3434 howto->bitsize + howto->rightshift);
3435 break;
3436 case complain_overflow_bitfield:
3437 default:
3438 abort ();
3439 }
3440
3441 addend >>= howto->rightshift;
3442
3443 switch (howto->type)
3444 {
3445 case R_AARCH64_JUMP26:
3446 case R_AARCH64_CALL26:
3447 contents = reencode_branch_ofs_26 (contents, addend);
3448 break;
3449
3450 case R_AARCH64_CONDBR19:
3451 contents = reencode_cond_branch_ofs_19 (contents, addend);
3452 break;
3453
3454 case R_AARCH64_TSTBR14:
3455 contents = reencode_tst_branch_ofs_14 (contents, addend);
3456 break;
3457
3458 case R_AARCH64_LD_PREL_LO19:
f41aef5f 3459 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3460 if (old_addend & ((1 << howto->rightshift) - 1))
3461 return bfd_reloc_overflow;
3462 contents = reencode_ld_lit_ofs_19 (contents, addend);
3463 break;
3464
3465 case R_AARCH64_TLSDESC_CALL:
3466 break;
3467
3468 case R_AARCH64_TLSGD_ADR_PAGE21:
3469 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
418009c2 3470 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
3471 case R_AARCH64_ADR_GOT_PAGE:
3472 case R_AARCH64_ADR_PREL_LO21:
3473 case R_AARCH64_ADR_PREL_PG_HI21:
3474 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3475 contents = reencode_adr_imm (contents, addend);
3476 break;
3477
3478 case R_AARCH64_TLSGD_ADD_LO12_NC:
3479 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3480 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3481 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3482 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3483 case R_AARCH64_ADD_ABS_LO12_NC:
3484 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
3485 12 bits of the page offset following
3486 R_AARCH64_ADR_PREL_PG_HI21 which computes the
3487 (pc-relative) page base. */
3488 contents = reencode_add_imm (contents, addend);
3489 break;
3490
3491 case R_AARCH64_LDST8_ABS_LO12_NC:
3492 case R_AARCH64_LDST16_ABS_LO12_NC:
3493 case R_AARCH64_LDST32_ABS_LO12_NC:
3494 case R_AARCH64_LDST64_ABS_LO12_NC:
3495 case R_AARCH64_LDST128_ABS_LO12_NC:
3496 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3497 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3498 case R_AARCH64_LD64_GOT_LO12_NC:
3499 if (old_addend & ((1 << howto->rightshift) - 1))
3500 return bfd_reloc_overflow;
3501 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
3502 12 bits of the page offset following R_AARCH64_ADR_PREL_PG_HI21
3503 which computes the (pc-relative) page base. */
3504 contents = reencode_ldst_pos_imm (contents, addend);
3505 break;
3506
3507 /* Group relocations to create high bits of a 16, 32, 48 or 64
3508 bit signed data or abs address inline. Will change
3509 instruction to MOVN or MOVZ depending on sign of calculated
3510 value. */
3511
3512 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3513 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3514 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3515 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3516 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3517 case R_AARCH64_MOVW_SABS_G0:
3518 case R_AARCH64_MOVW_SABS_G1:
3519 case R_AARCH64_MOVW_SABS_G2:
3520 /* NOTE: We can only come here with movz or movn. */
3521 if (addend < 0)
3522 {
3523 /* Force use of MOVN. */
3524 addend = ~addend;
3525 contents = reencode_movzn_to_movn (contents);
3526 }
3527 else
3528 {
3529 /* Force use of MOVZ. */
3530 contents = reencode_movzn_to_movz (contents);
3531 }
3532 /* fall through */
3533
3534 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
3535 data or abs address inline. */
3536
3537 case R_AARCH64_MOVW_UABS_G0:
3538 case R_AARCH64_MOVW_UABS_G0_NC:
3539 case R_AARCH64_MOVW_UABS_G1:
3540 case R_AARCH64_MOVW_UABS_G1_NC:
3541 case R_AARCH64_MOVW_UABS_G2:
3542 case R_AARCH64_MOVW_UABS_G2_NC:
3543 case R_AARCH64_MOVW_UABS_G3:
3544 contents = reencode_movw_imm (contents, addend);
3545 break;
3546
3547 default:
3548 /* Repack simple data */
3549 if (howto->dst_mask & (howto->dst_mask + 1))
3550 return bfd_reloc_notsupported;
3551
3552 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
3553 break;
3554 }
3555
3556 switch (size)
3557 {
3558 case 2:
3559 bfd_put_16 (abfd, contents, address);
3560 break;
3561 case 4:
3562 if (howto->dst_mask != 0xffffffff)
3563 /* must be 32-bit instruction, always little-endian */
3564 bfd_putl32 (contents, address);
3565 else
3566 /* must be 32-bit data (endianness dependent) */
3567 bfd_put_32 (abfd, contents, address);
3568 break;
3569 case 8:
3570 bfd_put_64 (abfd, contents, address);
3571 break;
3572 default:
3573 abort ();
3574 }
3575
3576 return status;
3577}
3578
3579static bfd_vma
3580aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3581 struct elf64_aarch64_link_hash_table
3582 *globals, struct bfd_link_info *info,
3583 bfd_vma value, bfd *output_bfd,
3584 bfd_boolean *unresolved_reloc_p)
3585{
3586 bfd_vma off = (bfd_vma) - 1;
3587 asection *basegot = globals->root.sgot;
3588 bfd_boolean dyn = globals->root.dynamic_sections_created;
3589
3590 if (h != NULL)
3591 {
3592 off = h->got.offset;
3593 BFD_ASSERT (off != (bfd_vma) - 1);
3594 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3595 || (info->shared
3596 && SYMBOL_REFERENCES_LOCAL (info, h))
3597 || (ELF_ST_VISIBILITY (h->other)
3598 && h->root.type == bfd_link_hash_undefweak))
3599 {
3600 /* This is actually a static link, or it is a -Bsymbolic link
3601 and the symbol is defined locally. We must initialize this
3602 entry in the global offset table. Since the offset must
3603 always be a multiple of 8, we use the least significant bit
3604 to record whether we have initialized it already.
3605 When doing a dynamic link, we create a .rel(a).got relocation
3606 entry to initialize the value. This is done in the
3607 finish_dynamic_symbol routine. */
3608 if ((off & 1) != 0)
3609 off &= ~1;
3610 else
3611 {
3612 bfd_put_64 (output_bfd, value, basegot->contents + off);
3613 h->got.offset |= 1;
3614 }
3615 }
3616 else
3617 *unresolved_reloc_p = FALSE;
3618
3619 off = off + basegot->output_section->vma + basegot->output_offset;
3620 }
3621
3622 return off;
3623}
3624
3625/* Change R_TYPE to a more efficient access model where possible,
3626 return the new reloc type. */
3627
3628static unsigned int
3629aarch64_tls_transition_without_check (unsigned int r_type,
3630 struct elf_link_hash_entry *h)
3631{
3632 bfd_boolean is_local = h == NULL;
3633 switch (r_type)
3634 {
3635 case R_AARCH64_TLSGD_ADR_PAGE21:
418009c2 3636 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
3637 return is_local
3638 ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
3639
3640 case R_AARCH64_TLSGD_ADD_LO12_NC:
3641 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3642 return is_local
3643 ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3644 : R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
3645
3646 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3647 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3648
3649 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3650 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3651
3652 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3653 case R_AARCH64_TLSDESC_CALL:
3654 /* Instructions with these relocations will become NOPs. */
3655 return R_AARCH64_NONE;
3656 }
3657
3658 return r_type;
3659}
3660
3661static unsigned int
3662aarch64_reloc_got_type (unsigned int r_type)
3663{
3664 switch (r_type)
3665 {
3666 case R_AARCH64_LD64_GOT_LO12_NC:
3667 case R_AARCH64_ADR_GOT_PAGE:
f41aef5f 3668 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
3669 return GOT_NORMAL;
3670
3671 case R_AARCH64_TLSGD_ADR_PAGE21:
3672 case R_AARCH64_TLSGD_ADD_LO12_NC:
3673 return GOT_TLS_GD;
3674
3675 case R_AARCH64_TLSDESC_ADD_LO12_NC:
418009c2 3676 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
3677 case R_AARCH64_TLSDESC_CALL:
3678 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3679 return GOT_TLSDESC_GD;
3680
3681 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3682 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3683 return GOT_TLS_IE;
3684
3685 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3686 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3687 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3688 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3689 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3690 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3691 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3692 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3693 return GOT_UNKNOWN;
3694 }
3695 return GOT_UNKNOWN;
3696}
3697
3698static bfd_boolean
3699aarch64_can_relax_tls (bfd *input_bfd,
3700 struct bfd_link_info *info,
3701 unsigned int r_type,
3702 struct elf_link_hash_entry *h,
3703 unsigned long r_symndx)
3704{
3705 unsigned int symbol_got_type;
3706 unsigned int reloc_got_type;
3707
3708 if (! IS_AARCH64_TLS_RELOC (r_type))
3709 return FALSE;
3710
3711 symbol_got_type = elf64_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3712 reloc_got_type = aarch64_reloc_got_type (r_type);
3713
3714 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3715 return TRUE;
3716
3717 if (info->shared)
3718 return FALSE;
3719
3720 if (h && h->root.type == bfd_link_hash_undefweak)
3721 return FALSE;
3722
3723 return TRUE;
3724}
3725
3726static unsigned int
3727aarch64_tls_transition (bfd *input_bfd,
3728 struct bfd_link_info *info,
3729 unsigned int r_type,
3730 struct elf_link_hash_entry *h,
3731 unsigned long r_symndx)
3732{
3733 if (! aarch64_can_relax_tls (input_bfd, info, r_type, h, r_symndx))
3734 return r_type;
3735
3736 return aarch64_tls_transition_without_check (r_type, h);
3737}
3738
3739/* Return the base VMA address which should be subtracted from real addresses
3740 when resolving R_AARCH64_TLS_DTPREL64 relocation. */
3741
3742static bfd_vma
3743dtpoff_base (struct bfd_link_info *info)
3744{
3745 /* If tls_sec is NULL, we should have signalled an error already. */
3746 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3747 return elf_hash_table (info)->tls_sec->vma;
3748}
3749
3750
3751/* Return the base VMA address which should be subtracted from real addresses
3752 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3753
3754static bfd_vma
3755tpoff_base (struct bfd_link_info *info)
3756{
3757 struct elf_link_hash_table *htab = elf_hash_table (info);
3758
3759 /* If tls_sec is NULL, we should have signalled an error already. */
3760 if (htab->tls_sec == NULL)
3761 return 0;
3762
3763 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3764 htab->tls_sec->alignment_power);
3765 return htab->tls_sec->vma - base;
3766}
3767
3768static bfd_vma *
3769symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3770 unsigned long r_symndx)
3771{
3772 /* Calculate the address of the GOT entry for symbol
3773 referred to in h. */
3774 if (h != NULL)
3775 return &h->got.offset;
3776 else
3777 {
3778 /* local symbol */
3779 struct elf_aarch64_local_symbol *l;
3780
3781 l = elf64_aarch64_locals (input_bfd);
3782 return &l[r_symndx].got_offset;
3783 }
3784}
3785
3786static void
3787symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3788 unsigned long r_symndx)
3789{
3790 bfd_vma *p;
3791 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3792 *p |= 1;
3793}
3794
3795static int
3796symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3797 unsigned long r_symndx)
3798{
3799 bfd_vma value;
3800 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3801 return value & 1;
3802}
3803
3804static bfd_vma
3805symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3806 unsigned long r_symndx)
3807{
3808 bfd_vma value;
3809 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3810 value &= ~1;
3811 return value;
3812}
3813
3814static bfd_vma *
3815symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3816 unsigned long r_symndx)
3817{
3818 /* Calculate the address of the GOT entry for symbol
3819 referred to in h. */
3820 if (h != NULL)
3821 {
3822 struct elf64_aarch64_link_hash_entry *eh;
3823 eh = (struct elf64_aarch64_link_hash_entry *) h;
3824 return &eh->tlsdesc_got_jump_table_offset;
3825 }
3826 else
3827 {
3828 /* local symbol */
3829 struct elf_aarch64_local_symbol *l;
3830
3831 l = elf64_aarch64_locals (input_bfd);
3832 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3833 }
3834}
3835
3836static void
3837symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3838 unsigned long r_symndx)
3839{
3840 bfd_vma *p;
3841 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3842 *p |= 1;
3843}
3844
3845static int
3846symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3847 struct elf_link_hash_entry *h,
3848 unsigned long r_symndx)
3849{
3850 bfd_vma value;
3851 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3852 return value & 1;
3853}
3854
3855static bfd_vma
3856symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3857 unsigned long r_symndx)
3858{
3859 bfd_vma value;
3860 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3861 value &= ~1;
3862 return value;
3863}
3864
3865/* Perform a relocation as part of a final link. */
3866static bfd_reloc_status_type
3867elf64_aarch64_final_link_relocate (reloc_howto_type *howto,
3868 bfd *input_bfd,
3869 bfd *output_bfd,
3870 asection *input_section,
3871 bfd_byte *contents,
3872 Elf_Internal_Rela *rel,
3873 bfd_vma value,
3874 struct bfd_link_info *info,
3875 asection *sym_sec,
3876 struct elf_link_hash_entry *h,
3877 bfd_boolean *unresolved_reloc_p,
3878 bfd_boolean save_addend,
692e2b8b
WN
3879 bfd_vma *saved_addend,
3880 Elf_Internal_Sym *sym)
a06ea964 3881{
692e2b8b 3882 Elf_Internal_Shdr *symtab_hdr;
a06ea964
NC
3883 unsigned int r_type = howto->type;
3884 unsigned long r_symndx;
3885 bfd_byte *hit_data = contents + rel->r_offset;
3886 bfd_vma place;
3887 bfd_signed_vma signed_addend;
3888 struct elf64_aarch64_link_hash_table *globals;
3889 bfd_boolean weak_undef_p;
3890
3891 globals = elf64_aarch64_hash_table (info);
3892
692e2b8b
WN
3893 symtab_hdr = &elf_symtab_hdr (input_bfd);
3894
a06ea964
NC
3895 BFD_ASSERT (is_aarch64_elf (input_bfd));
3896
3897 r_symndx = ELF64_R_SYM (rel->r_info);
3898
3899 /* It is possible to have linker relaxations on some TLS access
3900 models. Update our information here. */
3901 r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
3902
3903 if (r_type != howto->type)
3904 howto = elf64_aarch64_howto_from_type (r_type);
3905
3906 place = input_section->output_section->vma
3907 + input_section->output_offset + rel->r_offset;
3908
3909 /* Get addend, accumulating the addend for consecutive relocs
3910 which refer to the same offset. */
3911 signed_addend = saved_addend ? *saved_addend : 0;
3912 signed_addend += rel->r_addend;
3913
3914 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
3915 : bfd_is_und_section (sym_sec));
692e2b8b
WN
3916
3917 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3918 it here if it is defined in a non-shared object. */
3919 if (h != NULL
3920 && h->type == STT_GNU_IFUNC
3921 && h->def_regular)
3922 {
3923 asection *plt;
3924 const char *name;
3925 asection *base_got;
3926 bfd_vma off;
3927
3928 if ((input_section->flags & SEC_ALLOC) == 0
3929 || h->plt.offset == (bfd_vma) -1)
3930 abort ();
3931
3932 /* STT_GNU_IFUNC symbol must go through PLT. */
3933 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
3934 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
3935
3936 switch (r_type)
3937 {
3938 default:
3939 if (h->root.root.string)
3940 name = h->root.root.string;
3941 else
3942 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3943 NULL);
3944 (*_bfd_error_handler)
3945 (_("%B: relocation %s against STT_GNU_IFUNC "
3946 "symbol `%s' isn't handled by %s"), input_bfd,
3947 howto->name, name, __FUNCTION__);
3948 bfd_set_error (bfd_error_bad_value);
3949 return FALSE;
3950
3951 case R_AARCH64_ABS64:
3952 if (rel->r_addend != 0)
3953 {
3954 if (h->root.root.string)
3955 name = h->root.root.string;
3956 else
3957 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3958 sym, NULL);
3959 (*_bfd_error_handler)
3960 (_("%B: relocation %s against STT_GNU_IFUNC "
3961 "symbol `%s' has non-zero addend: %d"),
3962 input_bfd, howto->name, name, rel->r_addend);
3963 bfd_set_error (bfd_error_bad_value);
3964 return FALSE;
3965 }
3966
3967 /* Generate dynamic relocation only when there is a
3968 non-GOT reference in a shared object. */
3969 if (info->shared && h->non_got_ref)
3970 {
3971 Elf_Internal_Rela outrel;
3972 asection *sreloc;
3973
3974 /* Need a dynamic relocation to get the real function
3975 address. */
3976 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3977 info,
3978 input_section,
3979 rel->r_offset);
3980 if (outrel.r_offset == (bfd_vma) -1
3981 || outrel.r_offset == (bfd_vma) -2)
3982 abort ();
3983
3984 outrel.r_offset += (input_section->output_section->vma
3985 + input_section->output_offset);
3986
3987 if (h->dynindx == -1
3988 || h->forced_local
3989 || info->executable)
3990 {
3991 /* This symbol is resolved locally. */
3992 outrel.r_info = ELF64_R_INFO (0, R_AARCH64_IRELATIVE);
3993 outrel.r_addend = (h->root.u.def.value
3994 + h->root.u.def.section->output_section->vma
3995 + h->root.u.def.section->output_offset);
3996 }
3997 else
3998 {
3999 outrel.r_info = ELF64_R_INFO (h->dynindx, r_type);
4000 outrel.r_addend = 0;
4001 }
4002
4003 sreloc = globals->root.irelifunc;
4004 elf_append_rela (output_bfd, sreloc, &outrel);
4005
4006 /* If this reloc is against an external symbol, we
4007 do not want to fiddle with the addend. Otherwise,
4008 we need to include the symbol value so that it
4009 becomes an addend for the dynamic reloc. For an
4010 internal symbol, we have updated addend. */
4011 return bfd_reloc_ok;
4012 }
692e2b8b
WN
4013 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4014 contents, rel->r_offset, value,
4015 signed_addend);
2532064a
YZ
4016
4017 case R_AARCH64_JUMP26:
4018 case R_AARCH64_CALL26:
4019 value = aarch64_resolve_relocation (r_type, place, value,
4020 signed_addend, weak_undef_p);
4021 return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value);
4022
692e2b8b
WN
4023 case R_AARCH64_LD64_GOT_LO12_NC:
4024 case R_AARCH64_ADR_GOT_PAGE:
4025 case R_AARCH64_GOT_LD_PREL19:
4026 base_got = globals->root.sgot;
4027 off = h->got.offset;
4028
4029 if (base_got == NULL)
4030 abort ();
4031
4032 if (off == (bfd_vma) -1)
4033 {
4034 bfd_vma plt_index;
4035
4036 /* We can't use h->got.offset here to save state, or
4037 even just remember the offset, as finish_dynamic_symbol
4038 would use that as offset into .got. */
4039
4040 if (globals->root.splt != NULL)
4041 {
4042 plt_index = h->plt.offset / globals->plt_entry_size - 1;
4043 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4044 base_got = globals->root.sgotplt;
4045 }
4046 else
4047 {
4048 plt_index = h->plt.offset / globals->plt_entry_size;
4049 off = plt_index * GOT_ENTRY_SIZE;
4050 base_got = globals->root.igotplt;
4051 }
4052
4053 if (h->dynindx == -1
4054 || h->forced_local
4055 || info->symbolic)
4056 {
4057 /* This references the local defitionion. We must
4058 initialize this entry in the global offset table.
4059 Since the offset must always be a multiple of 8,
4060 we use the least significant bit to record
4061 whether we have initialized it already.
4062
4063 When doing a dynamic link, we create a .rela.got
4064 relocation entry to initialize the value. This
4065 is done in the finish_dynamic_symbol routine. */
4066 if ((off & 1) != 0)
4067 off &= ~1;
4068 else
4069 {
4070 bfd_put_64 (output_bfd, value,
4071 base_got->contents + off);
4072 /* Note that this is harmless for the GOTPLT64
4073 case, as -1 | 1 still is -1. */
4074 h->got.offset |= 1;
4075 }
4076 }
4077 value = (base_got->output_section->vma
4078 + base_got->output_offset + off);
4079 }
4080 else
4081 value = aarch64_calculate_got_entry_vma (h, globals, info,
4082 value, output_bfd,
4083 unresolved_reloc_p);
4084 value = aarch64_resolve_relocation (r_type, place, value,
4085 0, weak_undef_p);
2532064a
YZ
4086 return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value);
4087
692e2b8b
WN
4088 case R_AARCH64_ADR_PREL_PG_HI21:
4089 case R_AARCH64_ADD_ABS_LO12_NC:
4090 break;
4091 }
4092 }
4093
a06ea964
NC
4094 switch (r_type)
4095 {
4096 case R_AARCH64_NONE:
4097 case R_AARCH64_NULL:
4098 case R_AARCH64_TLSDESC_CALL:
4099 *unresolved_reloc_p = FALSE;
4100 return bfd_reloc_ok;
4101
4102 case R_AARCH64_ABS64:
4103
4104 /* When generating a shared object or relocatable executable, these
4105 relocations are copied into the output file to be resolved at
4106 run time. */
4107 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4108 && (input_section->flags & SEC_ALLOC)
4109 && (h == NULL
4110 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4111 || h->root.type != bfd_link_hash_undefweak))
4112 {
4113 Elf_Internal_Rela outrel;
4114 bfd_byte *loc;
4115 bfd_boolean skip, relocate;
4116 asection *sreloc;
4117
4118 *unresolved_reloc_p = FALSE;
4119
a06ea964
NC
4120 skip = FALSE;
4121 relocate = FALSE;
4122
4123 outrel.r_addend = signed_addend;
4124 outrel.r_offset =
4125 _bfd_elf_section_offset (output_bfd, info, input_section,
4126 rel->r_offset);
4127 if (outrel.r_offset == (bfd_vma) - 1)
4128 skip = TRUE;
4129 else if (outrel.r_offset == (bfd_vma) - 2)
4130 {
4131 skip = TRUE;
4132 relocate = TRUE;
4133 }
4134
4135 outrel.r_offset += (input_section->output_section->vma
4136 + input_section->output_offset);
4137
4138 if (skip)
4139 memset (&outrel, 0, sizeof outrel);
4140 else if (h != NULL
4141 && h->dynindx != -1
4142 && (!info->shared || !info->symbolic || !h->def_regular))
4143 outrel.r_info = ELF64_R_INFO (h->dynindx, r_type);
4144 else
4145 {
4146 int symbol;
4147
4148 /* On SVR4-ish systems, the dynamic loader cannot
4149 relocate the text and data segments independently,
4150 so the symbol does not matter. */
4151 symbol = 0;
4152 outrel.r_info = ELF64_R_INFO (symbol, R_AARCH64_RELATIVE);
4153 outrel.r_addend += value;
4154 }
4155
692e2b8b
WN
4156 sreloc = elf_section_data (input_section)->sreloc;
4157 if (sreloc == NULL || sreloc->contents == NULL)
4158 return bfd_reloc_notsupported;
4159
4160 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
a06ea964
NC
4161 bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
4162
692e2b8b 4163 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
a06ea964
NC
4164 {
4165 /* Sanity to check that we have previously allocated
4166 sufficient space in the relocation section for the
4167 number of relocations we actually want to emit. */
4168 abort ();
4169 }
4170
4171 /* If this reloc is against an external symbol, we do not want to
4172 fiddle with the addend. Otherwise, we need to include the symbol
4173 value so that it becomes an addend for the dynamic reloc. */
4174 if (!relocate)
4175 return bfd_reloc_ok;
4176
4177 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4178 contents, rel->r_offset, value,
4179 signed_addend);
4180 }
4181 else
4182 value += signed_addend;
4183 break;
4184
4185 case R_AARCH64_JUMP26:
4186 case R_AARCH64_CALL26:
4187 {
4188 asection *splt = globals->root.splt;
4189 bfd_boolean via_plt_p =
4190 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4191
4192 /* A call to an undefined weak symbol is converted to a jump to
4193 the next instruction unless a PLT entry will be created.
4194 The jump to the next instruction is optimized as a NOP.
4195 Do the same for local undefined symbols. */
4196 if (weak_undef_p && ! via_plt_p)
4197 {
4198 bfd_putl32 (INSN_NOP, hit_data);
4199 return bfd_reloc_ok;
4200 }
4201
4202 /* If the call goes through a PLT entry, make sure to
4203 check distance to the right destination address. */
4204 if (via_plt_p)
4205 {
4206 value = (splt->output_section->vma
4207 + splt->output_offset + h->plt.offset);
4208 *unresolved_reloc_p = FALSE;
4209 }
4210
4211 /* If the target symbol is global and marked as a function the
4212 relocation applies a function call or a tail call. In this
4213 situation we can veneer out of range branches. The veneers
4214 use IP0 and IP1 hence cannot be used arbitrary out of range
4215 branches that occur within the body of a function. */
4216 if (h && h->type == STT_FUNC)
4217 {
4218 /* Check if a stub has to be inserted because the destination
4219 is too far away. */
4220 if (! aarch64_valid_branch_p (value, place))
4221 {
4222 /* The target is out of reach, so redirect the branch to
4223 the local stub for this function. */
4224 struct elf64_aarch64_stub_hash_entry *stub_entry;
4225 stub_entry = elf64_aarch64_get_stub_entry (input_section,
4226 sym_sec, h,
4227 rel, globals);
4228 if (stub_entry != NULL)
4229 value = (stub_entry->stub_offset
4230 + stub_entry->stub_sec->output_offset
4231 + stub_entry->stub_sec->output_section->vma);
4232 }
4233 }
4234 }
4235 value = aarch64_resolve_relocation (r_type, place, value,
4236 signed_addend, weak_undef_p);
4237 break;
4238
4239 case R_AARCH64_ABS16:
4240 case R_AARCH64_ABS32:
4241 case R_AARCH64_ADD_ABS_LO12_NC:
4242 case R_AARCH64_ADR_PREL_LO21:
4243 case R_AARCH64_ADR_PREL_PG_HI21:
4244 case R_AARCH64_ADR_PREL_PG_HI21_NC:
4245 case R_AARCH64_CONDBR19:
4246 case R_AARCH64_LD_PREL_LO19:
4247 case R_AARCH64_LDST8_ABS_LO12_NC:
4248 case R_AARCH64_LDST16_ABS_LO12_NC:
4249 case R_AARCH64_LDST32_ABS_LO12_NC:
4250 case R_AARCH64_LDST64_ABS_LO12_NC:
4251 case R_AARCH64_LDST128_ABS_LO12_NC:
4252 case R_AARCH64_MOVW_SABS_G0:
4253 case R_AARCH64_MOVW_SABS_G1:
4254 case R_AARCH64_MOVW_SABS_G2:
4255 case R_AARCH64_MOVW_UABS_G0:
4256 case R_AARCH64_MOVW_UABS_G0_NC:
4257 case R_AARCH64_MOVW_UABS_G1:
4258 case R_AARCH64_MOVW_UABS_G1_NC:
4259 case R_AARCH64_MOVW_UABS_G2:
4260 case R_AARCH64_MOVW_UABS_G2_NC:
4261 case R_AARCH64_MOVW_UABS_G3:
4262 case R_AARCH64_PREL16:
4263 case R_AARCH64_PREL32:
4264 case R_AARCH64_PREL64:
4265 case R_AARCH64_TSTBR14:
4266 value = aarch64_resolve_relocation (r_type, place, value,
4267 signed_addend, weak_undef_p);
4268 break;
4269
4270 case R_AARCH64_LD64_GOT_LO12_NC:
4271 case R_AARCH64_ADR_GOT_PAGE:
f41aef5f 4272 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
4273 if (globals->root.sgot == NULL)
4274 BFD_ASSERT (h != NULL);
4275
4276 if (h != NULL)
4277 {
4278 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4279 output_bfd,
4280 unresolved_reloc_p);
4281 value = aarch64_resolve_relocation (r_type, place, value,
4282 0, weak_undef_p);
4283 }
4284 break;
4285
4286 case R_AARCH64_TLSGD_ADR_PAGE21:
4287 case R_AARCH64_TLSGD_ADD_LO12_NC:
4288 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4289 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4290 if (globals->root.sgot == NULL)
4291 return bfd_reloc_notsupported;
4292
4293 value = (symbol_got_offset (input_bfd, h, r_symndx)
4294 + globals->root.sgot->output_section->vma
4295 + globals->root.sgot->output_section->output_offset);
4296
4297 value = aarch64_resolve_relocation (r_type, place, value,
4298 0, weak_undef_p);
4299 *unresolved_reloc_p = FALSE;
4300 break;
4301
4302 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4303 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4304 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4305 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4306 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4307 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4308 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4309 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4310 value = aarch64_resolve_relocation (r_type, place, value,
bb3f9ed8 4311 signed_addend - tpoff_base (info), weak_undef_p);
a06ea964
NC
4312 *unresolved_reloc_p = FALSE;
4313 break;
4314
418009c2 4315 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
4316 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4317 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4318 case R_AARCH64_TLSDESC_ADD:
4319 case R_AARCH64_TLSDESC_LDR:
4320 if (globals->root.sgot == NULL)
4321 return bfd_reloc_notsupported;
4322
4323 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4324 + globals->root.sgotplt->output_section->vma
4325 + globals->root.sgotplt->output_section->output_offset
4326 + globals->sgotplt_jump_table_size);
4327
4328 value = aarch64_resolve_relocation (r_type, place, value,
4329 0, weak_undef_p);
4330 *unresolved_reloc_p = FALSE;
4331 break;
4332
4333 default:
4334 return bfd_reloc_notsupported;
4335 }
4336
4337 if (saved_addend)
4338 *saved_addend = value;
4339
4340 /* Only apply the final relocation in a sequence. */
4341 if (save_addend)
4342 return bfd_reloc_continue;
4343
4344 return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value);
4345}
4346
4347/* Handle TLS relaxations. Relaxing is possible for symbols that use
4348 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4349 link.
4350
4351 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4352 is to then call final_link_relocate. Return other values in the
4353 case of error. */
4354
4355static bfd_reloc_status_type
4356elf64_aarch64_tls_relax (struct elf64_aarch64_link_hash_table *globals,
4357 bfd *input_bfd, bfd_byte *contents,
4358 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4359{
4360 bfd_boolean is_local = h == NULL;
4361 unsigned int r_type = ELF64_R_TYPE (rel->r_info);
4362 unsigned long insn;
4363
4364 BFD_ASSERT (globals && input_bfd && contents && rel);
4365
4366 switch (r_type)
4367 {
4368 case R_AARCH64_TLSGD_ADR_PAGE21:
418009c2 4369 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
4370 if (is_local)
4371 {
4372 /* GD->LE relaxation:
4373 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4374 or
4375 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4376 */
4377 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4378 return bfd_reloc_continue;
4379 }
4380 else
4381 {
4382 /* GD->IE relaxation:
4383 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4384 or
4385 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4386 */
4387 insn = bfd_getl32 (contents + rel->r_offset);
4388 return bfd_reloc_continue;
4389 }
4390
4391 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4392 if (is_local)
4393 {
4394 /* GD->LE relaxation:
4395 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4396 */
4397 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4398 return bfd_reloc_continue;
4399 }
4400 else
4401 {
4402 /* GD->IE relaxation:
4403 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4404 */
4405 insn = bfd_getl32 (contents + rel->r_offset);
4406 insn &= 0xfffffff0;
4407 bfd_putl32 (insn, contents + rel->r_offset);
4408 return bfd_reloc_continue;
4409 }
4410
4411 case R_AARCH64_TLSGD_ADD_LO12_NC:
4412 if (is_local)
4413 {
4414 /* GD->LE relaxation
4415 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4416 bl __tls_get_addr => mrs x1, tpidr_el0
4417 nop => add x0, x1, x0
4418 */
4419
4420 /* First kill the tls_get_addr reloc on the bl instruction. */
4421 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4422 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4423
4424 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4425 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4426 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4427 return bfd_reloc_continue;
4428 }
4429 else
4430 {
4431 /* GD->IE relaxation
4432 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4433 BL __tls_get_addr => mrs x1, tpidr_el0
4434 R_AARCH64_CALL26
4435 NOP => add x0, x1, x0
4436 */
4437
4438 BFD_ASSERT (ELF64_R_TYPE (rel[1].r_info) == R_AARCH64_CALL26);
4439
4440 /* Remove the relocation on the BL instruction. */
4441 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4442
4443 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4444
4445 /* We choose to fixup the BL and NOP instructions using the
4446 offset from the second relocation to allow flexibility in
4447 scheduling instructions between the ADD and BL. */
4448 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4449 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4450 return bfd_reloc_continue;
4451 }
4452
4453 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4454 case R_AARCH64_TLSDESC_CALL:
4455 /* GD->IE/LE relaxation:
4456 add x0, x0, #:tlsdesc_lo12:var => nop
4457 blr xd => nop
4458 */
4459 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4460 return bfd_reloc_ok;
4461
4462 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4463 /* IE->LE relaxation:
4464 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4465 */
4466 if (is_local)
4467 {
4468 insn = bfd_getl32 (contents + rel->r_offset);
4469 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4470 }
4471 return bfd_reloc_continue;
4472
4473 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4474 /* IE->LE relaxation:
4475 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4476 */
4477 if (is_local)
4478 {
4479 insn = bfd_getl32 (contents + rel->r_offset);
4480 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4481 }
4482 return bfd_reloc_continue;
4483
4484 default:
4485 return bfd_reloc_continue;
4486 }
4487
4488 return bfd_reloc_ok;
4489}
4490
4491/* Relocate an AArch64 ELF section. */
4492
4493static bfd_boolean
4494elf64_aarch64_relocate_section (bfd *output_bfd,
4495 struct bfd_link_info *info,
4496 bfd *input_bfd,
4497 asection *input_section,
4498 bfd_byte *contents,
4499 Elf_Internal_Rela *relocs,
4500 Elf_Internal_Sym *local_syms,
4501 asection **local_sections)
4502{
4503 Elf_Internal_Shdr *symtab_hdr;
4504 struct elf_link_hash_entry **sym_hashes;
4505 Elf_Internal_Rela *rel;
4506 Elf_Internal_Rela *relend;
4507 const char *name;
4508 struct elf64_aarch64_link_hash_table *globals;
4509 bfd_boolean save_addend = FALSE;
4510 bfd_vma addend = 0;
4511
4512 globals = elf64_aarch64_hash_table (info);
4513
4514 symtab_hdr = &elf_symtab_hdr (input_bfd);
4515 sym_hashes = elf_sym_hashes (input_bfd);
4516
4517 rel = relocs;
4518 relend = relocs + input_section->reloc_count;
4519 for (; rel < relend; rel++)
4520 {
4521 unsigned int r_type;
4522 unsigned int relaxed_r_type;
4523 reloc_howto_type *howto;
4524 unsigned long r_symndx;
4525 Elf_Internal_Sym *sym;
4526 asection *sec;
4527 struct elf_link_hash_entry *h;
4528 bfd_vma relocation;
4529 bfd_reloc_status_type r;
4530 arelent bfd_reloc;
4531 char sym_type;
4532 bfd_boolean unresolved_reloc = FALSE;
4533 char *error_message = NULL;
4534
4535 r_symndx = ELF64_R_SYM (rel->r_info);
4536 r_type = ELF64_R_TYPE (rel->r_info);
4537
4538 bfd_reloc.howto = elf64_aarch64_howto_from_type (r_type);
4539 howto = bfd_reloc.howto;
4540
7fcfd62d
NC
4541 if (howto == NULL)
4542 {
4543 (*_bfd_error_handler)
4544 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4545 input_bfd, input_section, r_type);
4546 return FALSE;
4547 }
4548
a06ea964
NC
4549 h = NULL;
4550 sym = NULL;
4551 sec = NULL;
4552
4553 if (r_symndx < symtab_hdr->sh_info)
4554 {
4555 sym = local_syms + r_symndx;
4556 sym_type = ELF64_ST_TYPE (sym->st_info);
4557 sec = local_sections[r_symndx];
4558
4559 /* An object file might have a reference to a local
4560 undefined symbol. This is a daft object file, but we
4561 should at least do something about it. */
4562 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4563 && bfd_is_und_section (sec)
4564 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4565 {
4566 if (!info->callbacks->undefined_symbol
4567 (info, bfd_elf_string_from_elf_section
4568 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4569 input_bfd, input_section, rel->r_offset, TRUE))
4570 return FALSE;
4571 }
4572
a06ea964 4573 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
692e2b8b
WN
4574
4575 /* Relocate against local STT_GNU_IFUNC symbol. */
4576 if (!info->relocatable
4577 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
4578 {
4579 h = elf_aarch64_get_local_sym_hash (globals, input_bfd,
4580 rel, FALSE);
4581 if (h == NULL)
4582 abort ();
4583
4584 /* Set STT_GNU_IFUNC symbol value. */
4585 h->root.u.def.value = sym->st_value;
4586 h->root.u.def.section = sec;
4587 }
a06ea964
NC
4588 }
4589 else
4590 {
4591 bfd_boolean warned;
4592
4593 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4594 r_symndx, symtab_hdr, sym_hashes,
4595 h, sec, relocation,
4596 unresolved_reloc, warned);
4597
4598 sym_type = h->type;
4599 }
4600
4601 if (sec != NULL && discarded_section (sec))
4602 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4603 rel, 1, relend, howto, 0, contents);
4604
4605 if (info->relocatable)
4606 {
4607 /* This is a relocatable link. We don't have to change
4608 anything, unless the reloc is against a section symbol,
4609 in which case we have to adjust according to where the
4610 section symbol winds up in the output section. */
4611 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
4612 rel->r_addend += sec->output_offset;
4613 continue;
4614 }
4615
4616 if (h != NULL)
4617 name = h->root.root.string;
4618 else
4619 {
4620 name = (bfd_elf_string_from_elf_section
4621 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4622 if (name == NULL || *name == '\0')
4623 name = bfd_section_name (input_bfd, sec);
4624 }
4625
4626 if (r_symndx != 0
4627 && r_type != R_AARCH64_NONE
4628 && r_type != R_AARCH64_NULL
4629 && (h == NULL
4630 || h->root.type == bfd_link_hash_defined
4631 || h->root.type == bfd_link_hash_defweak)
4632 && IS_AARCH64_TLS_RELOC (r_type) != (sym_type == STT_TLS))
4633 {
4634 (*_bfd_error_handler)
4635 ((sym_type == STT_TLS
4636 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4637 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4638 input_bfd,
4639 input_section, (long) rel->r_offset, howto->name, name);
4640 }
4641
4642
4643 /* We relax only if we can see that there can be a valid transition
4644 from a reloc type to another.
4645 We call elf64_aarch64_final_link_relocate unless we're completely
4646 done, i.e., the relaxation produced the final output we want. */
4647
4648 relaxed_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4649 h, r_symndx);
4650 if (relaxed_r_type != r_type)
4651 {
4652 r_type = relaxed_r_type;
4653 howto = elf64_aarch64_howto_from_type (r_type);
4654
4655 r = elf64_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4656 unresolved_reloc = 0;
4657 }
4658 else
4659 r = bfd_reloc_continue;
4660
4661 /* There may be multiple consecutive relocations for the
4662 same offset. In that case we are supposed to treat the
4663 output of each relocation as the addend for the next. */
4664 if (rel + 1 < relend
4665 && rel->r_offset == rel[1].r_offset
4666 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4667 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4668 save_addend = TRUE;
4669 else
4670 save_addend = FALSE;
4671
4672 if (r == bfd_reloc_continue)
4673 r = elf64_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4674 input_section, contents, rel,
4675 relocation, info, sec,
4676 h, &unresolved_reloc,
692e2b8b 4677 save_addend, &addend, sym);
a06ea964
NC
4678
4679 switch (r_type)
4680 {
4681 case R_AARCH64_TLSGD_ADR_PAGE21:
4682 case R_AARCH64_TLSGD_ADD_LO12_NC:
4683 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4684 {
4685 bfd_boolean need_relocs = FALSE;
4686 bfd_byte *loc;
4687 int indx;
4688 bfd_vma off;
4689
4690 off = symbol_got_offset (input_bfd, h, r_symndx);
4691 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4692
4693 need_relocs =
4694 (info->shared || indx != 0) &&
4695 (h == NULL
4696 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4697 || h->root.type != bfd_link_hash_undefweak);
4698
4699 BFD_ASSERT (globals->root.srelgot != NULL);
4700
4701 if (need_relocs)
4702 {
4703 Elf_Internal_Rela rela;
4704 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_DTPMOD64);
4705 rela.r_addend = 0;
4706 rela.r_offset = globals->root.sgot->output_section->vma +
4707 globals->root.sgot->output_offset + off;
4708
4709
4710 loc = globals->root.srelgot->contents;
4711 loc += globals->root.srelgot->reloc_count++
4712 * RELOC_SIZE (htab);
4713 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4714
4715 if (indx == 0)
4716 {
4717 bfd_put_64 (output_bfd,
4718 relocation - dtpoff_base (info),
4719 globals->root.sgot->contents + off
4720 + GOT_ENTRY_SIZE);
4721 }
4722 else
4723 {
4724 /* This TLS symbol is global. We emit a
4725 relocation to fixup the tls offset at load
4726 time. */
4727 rela.r_info =
4728 ELF64_R_INFO (indx, R_AARCH64_TLS_DTPREL64);
4729 rela.r_addend = 0;
4730 rela.r_offset =
4731 (globals->root.sgot->output_section->vma
4732 + globals->root.sgot->output_offset + off
4733 + GOT_ENTRY_SIZE);
4734
4735 loc = globals->root.srelgot->contents;
4736 loc += globals->root.srelgot->reloc_count++
4737 * RELOC_SIZE (globals);
4738 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4739 bfd_put_64 (output_bfd, (bfd_vma) 0,
4740 globals->root.sgot->contents + off
4741 + GOT_ENTRY_SIZE);
4742 }
4743 }
4744 else
4745 {
4746 bfd_put_64 (output_bfd, (bfd_vma) 1,
4747 globals->root.sgot->contents + off);
4748 bfd_put_64 (output_bfd,
4749 relocation - dtpoff_base (info),
4750 globals->root.sgot->contents + off
4751 + GOT_ENTRY_SIZE);
4752 }
4753
4754 symbol_got_offset_mark (input_bfd, h, r_symndx);
4755 }
4756 break;
4757
4758 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4759 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4760 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4761 {
4762 bfd_boolean need_relocs = FALSE;
4763 bfd_byte *loc;
4764 int indx;
4765 bfd_vma off;
4766
4767 off = symbol_got_offset (input_bfd, h, r_symndx);
4768
4769 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4770
4771 need_relocs =
4772 (info->shared || indx != 0) &&
4773 (h == NULL
4774 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4775 || h->root.type != bfd_link_hash_undefweak);
4776
4777 BFD_ASSERT (globals->root.srelgot != NULL);
4778
4779 if (need_relocs)
4780 {
4781 Elf_Internal_Rela rela;
4782
4783 if (indx == 0)
4784 rela.r_addend = relocation - dtpoff_base (info);
4785 else
4786 rela.r_addend = 0;
4787
4788 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_TPREL64);
4789 rela.r_offset = globals->root.sgot->output_section->vma +
4790 globals->root.sgot->output_offset + off;
4791
4792 loc = globals->root.srelgot->contents;
4793 loc += globals->root.srelgot->reloc_count++
4794 * RELOC_SIZE (htab);
4795
4796 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4797
4798 bfd_put_64 (output_bfd, rela.r_addend,
4799 globals->root.sgot->contents + off);
4800 }
4801 else
4802 bfd_put_64 (output_bfd, relocation - tpoff_base (info),
4803 globals->root.sgot->contents + off);
4804
4805 symbol_got_offset_mark (input_bfd, h, r_symndx);
4806 }
4807 break;
4808
4809 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4810 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4811 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4812 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4813 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4814 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4815 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4816 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4817 break;
4818
418009c2 4819 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
4820 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4821 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4822 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
4823 {
4824 bfd_boolean need_relocs = FALSE;
4825 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
4826 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
4827
4828 need_relocs = (h == NULL
4829 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4830 || h->root.type != bfd_link_hash_undefweak);
4831
4832 BFD_ASSERT (globals->root.srelgot != NULL);
4833 BFD_ASSERT (globals->root.sgot != NULL);
4834
4835 if (need_relocs)
4836 {
4837 bfd_byte *loc;
4838 Elf_Internal_Rela rela;
4839 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLSDESC);
4840 rela.r_addend = 0;
4841 rela.r_offset = (globals->root.sgotplt->output_section->vma
4842 + globals->root.sgotplt->output_offset
4843 + off + globals->sgotplt_jump_table_size);
4844
4845 if (indx == 0)
4846 rela.r_addend = relocation - dtpoff_base (info);
4847
4848 /* Allocate the next available slot in the PLT reloc
4849 section to hold our R_AARCH64_TLSDESC, the next
4850 available slot is determined from reloc_count,
4851 which we step. But note, reloc_count was
4852 artifically moved down while allocating slots for
4853 real PLT relocs such that all of the PLT relocs
4854 will fit above the initial reloc_count and the
4855 extra stuff will fit below. */
4856 loc = globals->root.srelplt->contents;
4857 loc += globals->root.srelplt->reloc_count++
4858 * RELOC_SIZE (globals);
4859
4860 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4861
4862 bfd_put_64 (output_bfd, (bfd_vma) 0,
4863 globals->root.sgotplt->contents + off +
4864 globals->sgotplt_jump_table_size);
4865 bfd_put_64 (output_bfd, (bfd_vma) 0,
4866 globals->root.sgotplt->contents + off +
4867 globals->sgotplt_jump_table_size +
4868 GOT_ENTRY_SIZE);
4869 }
4870
4871 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
4872 }
4873 break;
4874 }
4875
4876 if (!save_addend)
4877 addend = 0;
4878
4879
4880 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4881 because such sections are not SEC_ALLOC and thus ld.so will
4882 not process them. */
4883 if (unresolved_reloc
4884 && !((input_section->flags & SEC_DEBUGGING) != 0
4885 && h->def_dynamic)
4886 && _bfd_elf_section_offset (output_bfd, info, input_section,
4887 +rel->r_offset) != (bfd_vma) - 1)
4888 {
4889 (*_bfd_error_handler)
4890 (_
4891 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4892 input_bfd, input_section, (long) rel->r_offset, howto->name,
4893 h->root.root.string);
4894 return FALSE;
4895 }
4896
4897 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
4898 {
4899 switch (r)
4900 {
4901 case bfd_reloc_overflow:
4902 /* If the overflowing reloc was to an undefined symbol,
4903 we have already printed one error message and there
4904 is no point complaining again. */
4905 if ((!h ||
4906 h->root.type != bfd_link_hash_undefined)
4907 && (!((*info->callbacks->reloc_overflow)
4908 (info, (h ? &h->root : NULL), name, howto->name,
4909 (bfd_vma) 0, input_bfd, input_section,
4910 rel->r_offset))))
4911 return FALSE;
4912 break;
4913
4914 case bfd_reloc_undefined:
4915 if (!((*info->callbacks->undefined_symbol)
4916 (info, name, input_bfd, input_section,
4917 rel->r_offset, TRUE)))
4918 return FALSE;
4919 break;
4920
4921 case bfd_reloc_outofrange:
4922 error_message = _("out of range");
4923 goto common_error;
4924
4925 case bfd_reloc_notsupported:
4926 error_message = _("unsupported relocation");
4927 goto common_error;
4928
4929 case bfd_reloc_dangerous:
4930 /* error_message should already be set. */
4931 goto common_error;
4932
4933 default:
4934 error_message = _("unknown error");
4935 /* Fall through. */
4936
4937 common_error:
4938 BFD_ASSERT (error_message != NULL);
4939 if (!((*info->callbacks->reloc_dangerous)
4940 (info, error_message, input_bfd, input_section,
4941 rel->r_offset)))
4942 return FALSE;
4943 break;
4944 }
4945 }
4946 }
4947
4948 return TRUE;
4949}
4950
4951/* Set the right machine number. */
4952
4953static bfd_boolean
4954elf64_aarch64_object_p (bfd *abfd)
4955{
4956 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
4957 return TRUE;
4958}
4959
4960/* Function to keep AArch64 specific flags in the ELF header. */
4961
4962static bfd_boolean
4963elf64_aarch64_set_private_flags (bfd *abfd, flagword flags)
4964{
4965 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
4966 {
4967 }
4968 else
4969 {
4970 elf_elfheader (abfd)->e_flags = flags;
4971 elf_flags_init (abfd) = TRUE;
4972 }
4973
4974 return TRUE;
4975}
4976
4977/* Copy backend specific data from one object module to another. */
4978
4979static bfd_boolean
4980elf64_aarch64_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
4981{
4982 flagword in_flags;
4983
4984 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4985 return TRUE;
4986
4987 in_flags = elf_elfheader (ibfd)->e_flags;
4988
4989 elf_elfheader (obfd)->e_flags = in_flags;
4990 elf_flags_init (obfd) = TRUE;
4991
4992 /* Also copy the EI_OSABI field. */
4993 elf_elfheader (obfd)->e_ident[EI_OSABI] =
4994 elf_elfheader (ibfd)->e_ident[EI_OSABI];
4995
4996 /* Copy object attributes. */
4997 _bfd_elf_copy_obj_attributes (ibfd, obfd);
4998
4999 return TRUE;
5000}
5001
5002/* Merge backend specific data from an object file to the output
5003 object file when linking. */
5004
5005static bfd_boolean
5006elf64_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5007{
5008 flagword out_flags;
5009 flagword in_flags;
5010 bfd_boolean flags_compatible = TRUE;
5011 asection *sec;
5012
5013 /* Check if we have the same endianess. */
5014 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5015 return FALSE;
5016
5017 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5018 return TRUE;
5019
5020 /* The input BFD must have had its flags initialised. */
5021 /* The following seems bogus to me -- The flags are initialized in
5022 the assembler but I don't think an elf_flags_init field is
5023 written into the object. */
5024 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5025
5026 in_flags = elf_elfheader (ibfd)->e_flags;
5027 out_flags = elf_elfheader (obfd)->e_flags;
5028
5029 if (!elf_flags_init (obfd))
5030 {
5031 /* If the input is the default architecture and had the default
5032 flags then do not bother setting the flags for the output
5033 architecture, instead allow future merges to do this. If no
5034 future merges ever set these flags then they will retain their
5035 uninitialised values, which surprise surprise, correspond
5036 to the default values. */
5037 if (bfd_get_arch_info (ibfd)->the_default
5038 && elf_elfheader (ibfd)->e_flags == 0)
5039 return TRUE;
5040
5041 elf_flags_init (obfd) = TRUE;
5042 elf_elfheader (obfd)->e_flags = in_flags;
5043
5044 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5045 && bfd_get_arch_info (obfd)->the_default)
5046 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5047 bfd_get_mach (ibfd));
5048
5049 return TRUE;
5050 }
5051
5052 /* Identical flags must be compatible. */
5053 if (in_flags == out_flags)
5054 return TRUE;
5055
5056 /* Check to see if the input BFD actually contains any sections. If
5057 not, its flags may not have been initialised either, but it
5058 cannot actually cause any incompatiblity. Do not short-circuit
5059 dynamic objects; their section list may be emptied by
5060 elf_link_add_object_symbols.
5061
5062 Also check to see if there are no code sections in the input.
5063 In this case there is no need to check for code specific flags.
5064 XXX - do we need to worry about floating-point format compatability
5065 in data sections ? */
5066 if (!(ibfd->flags & DYNAMIC))
5067 {
5068 bfd_boolean null_input_bfd = TRUE;
5069 bfd_boolean only_data_sections = TRUE;
5070
5071 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5072 {
5073 if ((bfd_get_section_flags (ibfd, sec)
5074 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5075 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5076 only_data_sections = FALSE;
5077
5078 null_input_bfd = FALSE;
5079 break;
5080 }
5081
5082 if (null_input_bfd || only_data_sections)
5083 return TRUE;
5084 }
5085
5086 return flags_compatible;
5087}
5088
5089/* Display the flags field. */
5090
5091static bfd_boolean
5092elf64_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5093{
5094 FILE *file = (FILE *) ptr;
5095 unsigned long flags;
5096
5097 BFD_ASSERT (abfd != NULL && ptr != NULL);
5098
5099 /* Print normal ELF private data. */
5100 _bfd_elf_print_private_bfd_data (abfd, ptr);
5101
5102 flags = elf_elfheader (abfd)->e_flags;
5103 /* Ignore init flag - it may not be set, despite the flags field
5104 containing valid data. */
5105
5106 /* xgettext:c-format */
5107 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5108
5109 if (flags)
5110 fprintf (file, _("<Unrecognised flag bits set>"));
5111
5112 fputc ('\n', file);
5113
5114 return TRUE;
5115}
5116
5117/* Update the got entry reference counts for the section being removed. */
5118
5119static bfd_boolean
cb8af559
NC
5120elf64_aarch64_gc_sweep_hook (bfd *abfd,
5121 struct bfd_link_info *info,
5122 asection *sec,
5123 const Elf_Internal_Rela * relocs)
a06ea964 5124{
59c108f7
NC
5125 struct elf64_aarch64_link_hash_table *htab;
5126 Elf_Internal_Shdr *symtab_hdr;
5127 struct elf_link_hash_entry **sym_hashes;
cb8af559 5128 struct elf_aarch64_local_symbol *locals;
59c108f7
NC
5129 const Elf_Internal_Rela *rel, *relend;
5130
5131 if (info->relocatable)
5132 return TRUE;
5133
5134 htab = elf64_aarch64_hash_table (info);
5135
5136 if (htab == NULL)
5137 return FALSE;
5138
5139 elf_section_data (sec)->local_dynrel = NULL;
5140
5141 symtab_hdr = &elf_symtab_hdr (abfd);
5142 sym_hashes = elf_sym_hashes (abfd);
5143
cb8af559 5144 locals = elf64_aarch64_locals (abfd);
59c108f7
NC
5145
5146 relend = relocs + sec->reloc_count;
5147 for (rel = relocs; rel < relend; rel++)
5148 {
5149 unsigned long r_symndx;
5150 unsigned int r_type;
5151 struct elf_link_hash_entry *h = NULL;
5152
5153 r_symndx = ELF64_R_SYM (rel->r_info);
59c108f7
NC
5154 if (r_symndx >= symtab_hdr->sh_info)
5155 {
59c108f7
NC
5156 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5157 while (h->root.type == bfd_link_hash_indirect
5158 || h->root.type == bfd_link_hash_warning)
5159 h = (struct elf_link_hash_entry *) h->root.u.i.link;
59c108f7
NC
5160 }
5161 else
5162 {
692e2b8b 5163 /* A local symbol. */
59c108f7
NC
5164 Elf_Internal_Sym *isym;
5165
59c108f7
NC
5166 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5167 abfd, r_symndx);
692e2b8b
WN
5168
5169 /* Check relocation against local STT_GNU_IFUNC symbol. */
5170 if (isym != NULL
5171 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5172 {
5173 h = elf_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
5174 if (h == NULL)
5175 abort ();
5176 }
5177 }
5178
5179 if (h)
5180 {
5181 struct elf64_aarch64_link_hash_entry *eh;
5182 struct elf_dyn_relocs **pp;
5183 struct elf_dyn_relocs *p;
5184
5185 eh = (struct elf64_aarch64_link_hash_entry *) h;
5186
5187 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
5188 if (p->sec == sec)
5189 {
5190 /* Everything must go for SEC. */
5191 *pp = p->next;
5192 break;
5193 }
59c108f7
NC
5194 }
5195
5196 r_type = ELF64_R_TYPE (rel->r_info);
5197 r_type = aarch64_tls_transition (abfd,info, r_type, h ,r_symndx);
5198 switch (r_type)
5199 {
5200 case R_AARCH64_LD64_GOT_LO12_NC:
5201 case R_AARCH64_GOT_LD_PREL19:
5202 case R_AARCH64_ADR_GOT_PAGE:
5203 case R_AARCH64_TLSGD_ADR_PAGE21:
5204 case R_AARCH64_TLSGD_ADD_LO12_NC:
5205 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5206 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5207 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
5208 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
5209 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5210 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
5211 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
5212 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5213 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
5214 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
418009c2 5215 case R_AARCH64_TLSDESC_ADR_PAGE21:
59c108f7
NC
5216 case R_AARCH64_TLSDESC_ADD_LO12_NC:
5217 case R_AARCH64_TLSDESC_LD64_LO12_NC:
5218 if (h != NULL)
5219 {
5220 if (h->got.refcount > 0)
5221 h->got.refcount -= 1;
692e2b8b
WN
5222
5223 if (h->type == STT_GNU_IFUNC)
5224 {
5225 if (h->plt.refcount > 0)
5226 h->plt.refcount -= 1;
5227 }
59c108f7 5228 }
cb8af559 5229 else if (locals != NULL)
59c108f7 5230 {
cb8af559
NC
5231 if (locals[r_symndx].got_refcount > 0)
5232 locals[r_symndx].got_refcount -= 1;
59c108f7
NC
5233 }
5234 break;
5235
5236 case R_AARCH64_ADR_PREL_PG_HI21_NC:
5237 case R_AARCH64_ADR_PREL_PG_HI21:
5238 case R_AARCH64_ADR_PREL_LO21:
5239 if (h != NULL && info->executable)
5240 {
5241 if (h->plt.refcount > 0)
5242 h->plt.refcount -= 1;
5243 }
5244 break;
5245
5246 case R_AARCH64_CALL26:
5247 case R_AARCH64_JUMP26:
5248 /* If this is a local symbol then we resolve it
5249 directly without creating a PLT entry. */
5250 if (h == NULL)
5251 continue;
5252
5253 if (h->plt.refcount > 0)
5254 h->plt.refcount -= 1;
5255 break;
5256
5257 case R_AARCH64_ABS64:
692e2b8b 5258 if (h != NULL)
59c108f7
NC
5259 {
5260 if (h->plt.refcount > 0)
5261 h->plt.refcount -= 1;
5262 }
5263 break;
5264
5265 default:
5266 break;
5267 }
5268 }
5269
a06ea964
NC
5270 return TRUE;
5271}
5272
5273/* Adjust a symbol defined by a dynamic object and referenced by a
5274 regular object. The current definition is in some section of the
5275 dynamic object, but we're not including those sections. We have to
5276 change the definition to something the rest of the link can
5277 understand. */
5278
5279static bfd_boolean
5280elf64_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
5281 struct elf_link_hash_entry *h)
5282{
5283 struct elf64_aarch64_link_hash_table *htab;
5284 asection *s;
5285
5286 /* If this is a function, put it in the procedure linkage table. We
5287 will fill in the contents of the procedure linkage table later,
5288 when we know the address of the .got section. */
692e2b8b 5289 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
a06ea964
NC
5290 {
5291 if (h->plt.refcount <= 0
692e2b8b
WN
5292 || (h->type != STT_GNU_IFUNC
5293 && (SYMBOL_CALLS_LOCAL (info, h)
5294 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
5295 && h->root.type == bfd_link_hash_undefweak))))
a06ea964
NC
5296 {
5297 /* This case can occur if we saw a CALL26 reloc in
5298 an input file, but the symbol wasn't referred to
5299 by a dynamic object or all references were
5300 garbage collected. In which case we can end up
5301 resolving. */
5302 h->plt.offset = (bfd_vma) - 1;
5303 h->needs_plt = 0;
5304 }
5305
5306 return TRUE;
5307 }
5308 else
5309 /* It's possible that we incorrectly decided a .plt reloc was
5310 needed for an R_X86_64_PC32 reloc to a non-function sym in
5311 check_relocs. We can't decide accurately between function and
5312 non-function syms in check-relocs; Objects loaded later in
5313 the link may change h->type. So fix it now. */
5314 h->plt.offset = (bfd_vma) - 1;
5315
5316
5317 /* If this is a weak symbol, and there is a real definition, the
5318 processor independent code will have arranged for us to see the
5319 real definition first, and we can just use the same value. */
5320 if (h->u.weakdef != NULL)
5321 {
5322 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
5323 || h->u.weakdef->root.type == bfd_link_hash_defweak);
5324 h->root.u.def.section = h->u.weakdef->root.u.def.section;
5325 h->root.u.def.value = h->u.weakdef->root.u.def.value;
5326 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
5327 h->non_got_ref = h->u.weakdef->non_got_ref;
5328 return TRUE;
5329 }
5330
5331 /* If we are creating a shared library, we must presume that the
5332 only references to the symbol are via the global offset table.
5333 For such cases we need not do anything here; the relocations will
5334 be handled correctly by relocate_section. */
5335 if (info->shared)
5336 return TRUE;
5337
5338 /* If there are no references to this symbol that do not use the
5339 GOT, we don't need to generate a copy reloc. */
5340 if (!h->non_got_ref)
5341 return TRUE;
5342
5343 /* If -z nocopyreloc was given, we won't generate them either. */
5344 if (info->nocopyreloc)
5345 {
5346 h->non_got_ref = 0;
5347 return TRUE;
5348 }
5349
5350 /* We must allocate the symbol in our .dynbss section, which will
5351 become part of the .bss section of the executable. There will be
5352 an entry for this symbol in the .dynsym section. The dynamic
5353 object will contain position independent code, so all references
5354 from the dynamic object to this symbol will go through the global
5355 offset table. The dynamic linker will use the .dynsym entry to
5356 determine the address it must put in the global offset table, so
5357 both the dynamic object and the regular object will refer to the
5358 same memory location for the variable. */
5359
5360 htab = elf64_aarch64_hash_table (info);
5361
5362 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
5363 to copy the initial value out of the dynamic object and into the
5364 runtime process image. */
5365 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
5366 {
5367 htab->srelbss->size += RELOC_SIZE (htab);
5368 h->needs_copy = 1;
5369 }
5370
5371 s = htab->sdynbss;
5372
5373 return _bfd_elf_adjust_dynamic_copy (h, s);
5374
5375}
5376
5377static bfd_boolean
5378elf64_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5379{
5380 struct elf_aarch64_local_symbol *locals;
5381 locals = elf64_aarch64_locals (abfd);
5382 if (locals == NULL)
5383 {
5384 locals = (struct elf_aarch64_local_symbol *)
5385 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5386 if (locals == NULL)
5387 return FALSE;
5388 elf64_aarch64_locals (abfd) = locals;
5389 }
5390 return TRUE;
5391}
5392
5393/* Look through the relocs for a section during the first phase. */
5394
5395static bfd_boolean
5396elf64_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5397 asection *sec, const Elf_Internal_Rela *relocs)
5398{
5399 Elf_Internal_Shdr *symtab_hdr;
5400 struct elf_link_hash_entry **sym_hashes;
5401 const Elf_Internal_Rela *rel;
5402 const Elf_Internal_Rela *rel_end;
5403 asection *sreloc;
5404
5405 struct elf64_aarch64_link_hash_table *htab;
5406
a06ea964
NC
5407 if (info->relocatable)
5408 return TRUE;
5409
5410 BFD_ASSERT (is_aarch64_elf (abfd));
5411
5412 htab = elf64_aarch64_hash_table (info);
5413 sreloc = NULL;
5414
5415 symtab_hdr = &elf_symtab_hdr (abfd);
5416 sym_hashes = elf_sym_hashes (abfd);
a06ea964
NC
5417
5418 rel_end = relocs + sec->reloc_count;
5419 for (rel = relocs; rel < rel_end; rel++)
5420 {
5421 struct elf_link_hash_entry *h;
5422 unsigned long r_symndx;
5423 unsigned int r_type;
692e2b8b 5424 Elf_Internal_Sym *isym;
a06ea964
NC
5425
5426 r_symndx = ELF64_R_SYM (rel->r_info);
5427 r_type = ELF64_R_TYPE (rel->r_info);
5428
5429 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5430 {
5431 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5432 r_symndx);
5433 return FALSE;
5434 }
5435
ed5acf27 5436 if (r_symndx < symtab_hdr->sh_info)
692e2b8b
WN
5437 {
5438 /* A local symbol. */
5439 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5440 abfd, r_symndx);
5441 if (isym == NULL)
5442 return FALSE;
5443
5444 /* Check relocation against local STT_GNU_IFUNC symbol. */
5445 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5446 {
5447 h = elf_aarch64_get_local_sym_hash (htab, abfd, rel,
5448 TRUE);
5449 if (h == NULL)
5450 return FALSE;
5451
5452 /* Fake a STT_GNU_IFUNC symbol. */
5453 h->type = STT_GNU_IFUNC;
5454 h->def_regular = 1;
5455 h->ref_regular = 1;
5456 h->forced_local = 1;
5457 h->root.type = bfd_link_hash_defined;
5458 }
5459 else
5460 h = NULL;
5461 }
a06ea964
NC
5462 else
5463 {
5464 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5465 while (h->root.type == bfd_link_hash_indirect
5466 || h->root.type == bfd_link_hash_warning)
5467 h = (struct elf_link_hash_entry *) h->root.u.i.link;
81fbe831
AM
5468
5469 /* PR15323, ref flags aren't set for references in the same
5470 object. */
5471 h->root.non_ir_ref = 1;
a06ea964
NC
5472 }
5473
692e2b8b
WN
5474 if (h != NULL)
5475 {
5476 /* Create the ifunc sections for static executables. If we
5477 never see an indirect function symbol nor we are building
5478 a static executable, those sections will be empty and
5479 won't appear in output. */
5480 switch (r_type)
5481 {
5482 default:
5483 break;
5484
5485 case R_AARCH64_ABS64:
5486 case R_AARCH64_CALL26:
5487 case R_AARCH64_JUMP26:
5488 case R_AARCH64_LD64_GOT_LO12_NC:
5489 case R_AARCH64_ADR_GOT_PAGE:
5490 case R_AARCH64_GOT_LD_PREL19:
5491 case R_AARCH64_ADR_PREL_PG_HI21:
5492 case R_AARCH64_ADD_ABS_LO12_NC:
5493 if (htab->root.dynobj == NULL)
5494 htab->root.dynobj = abfd;
5495 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
5496 return FALSE;
5497 break;
5498 }
5499
5500 /* It is referenced by a non-shared object. */
5501 h->ref_regular = 1;
5502 h->root.non_ir_ref = 1;
5503 }
5504
a06ea964
NC
5505 /* Could be done earlier, if h were already available. */
5506 r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5507
5508 switch (r_type)
5509 {
5510 case R_AARCH64_ABS64:
5511
5512 /* We don't need to handle relocs into sections not going into
5513 the "real" output. */
5514 if ((sec->flags & SEC_ALLOC) == 0)
5515 break;
5516
5517 if (h != NULL)
5518 {
5519 if (!info->shared)
5520 h->non_got_ref = 1;
5521
5522 h->plt.refcount += 1;
5523 h->pointer_equality_needed = 1;
5524 }
5525
5526 /* No need to do anything if we're not creating a shared
5527 object. */
5528 if (! info->shared)
5529 break;
5530
5531 {
5532 struct elf_dyn_relocs *p;
5533 struct elf_dyn_relocs **head;
5534
5535 /* We must copy these reloc types into the output file.
5536 Create a reloc section in dynobj and make room for
5537 this reloc. */
5538 if (sreloc == NULL)
5539 {
5540 if (htab->root.dynobj == NULL)
5541 htab->root.dynobj = abfd;
5542
5543 sreloc = _bfd_elf_make_dynamic_reloc_section
5544 (sec, htab->root.dynobj, 3, abfd, /*rela? */ TRUE);
5545
5546 if (sreloc == NULL)
5547 return FALSE;
5548 }
5549
5550 /* If this is a global symbol, we count the number of
5551 relocations we need for this symbol. */
5552 if (h != NULL)
5553 {
5554 struct elf64_aarch64_link_hash_entry *eh;
5555 eh = (struct elf64_aarch64_link_hash_entry *) h;
5556 head = &eh->dyn_relocs;
5557 }
5558 else
5559 {
5560 /* Track dynamic relocs needed for local syms too.
5561 We really need local syms available to do this
5562 easily. Oh well. */
5563
5564 asection *s;
5565 void **vpp;
a06ea964
NC
5566
5567 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5568 abfd, r_symndx);
5569 if (isym == NULL)
5570 return FALSE;
5571
5572 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5573 if (s == NULL)
5574 s = sec;
5575
5576 /* Beware of type punned pointers vs strict aliasing
5577 rules. */
5578 vpp = &(elf_section_data (s)->local_dynrel);
5579 head = (struct elf_dyn_relocs **) vpp;
5580 }
5581
5582 p = *head;
5583 if (p == NULL || p->sec != sec)
5584 {
5585 bfd_size_type amt = sizeof *p;
5586 p = ((struct elf_dyn_relocs *)
5587 bfd_zalloc (htab->root.dynobj, amt));
5588 if (p == NULL)
5589 return FALSE;
5590 p->next = *head;
5591 *head = p;
5592 p->sec = sec;
5593 }
5594
5595 p->count += 1;
5596
5597 }
5598 break;
5599
5600 /* RR: We probably want to keep a consistency check that
5601 there are no dangling GOT_PAGE relocs. */
5602 case R_AARCH64_LD64_GOT_LO12_NC:
f41aef5f 5603 case R_AARCH64_GOT_LD_PREL19:
a06ea964
NC
5604 case R_AARCH64_ADR_GOT_PAGE:
5605 case R_AARCH64_TLSGD_ADR_PAGE21:
5606 case R_AARCH64_TLSGD_ADD_LO12_NC:
5607 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5608 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5609 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
5610 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
5611 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5612 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
5613 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
5614 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5615 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
5616 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
418009c2 5617 case R_AARCH64_TLSDESC_ADR_PAGE21:
a06ea964
NC
5618 case R_AARCH64_TLSDESC_ADD_LO12_NC:
5619 case R_AARCH64_TLSDESC_LD64_LO12_NC:
5620 {
5621 unsigned got_type;
5622 unsigned old_got_type;
5623
5624 got_type = aarch64_reloc_got_type (r_type);
5625
5626 if (h)
5627 {
5628 h->got.refcount += 1;
5629 old_got_type = elf64_aarch64_hash_entry (h)->got_type;
5630 }
5631 else
5632 {
5633 struct elf_aarch64_local_symbol *locals;
5634
5635 if (!elf64_aarch64_allocate_local_symbols
5636 (abfd, symtab_hdr->sh_info))
5637 return FALSE;
5638
5639 locals = elf64_aarch64_locals (abfd);
5640 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5641 locals[r_symndx].got_refcount += 1;
5642 old_got_type = locals[r_symndx].got_type;
5643 }
5644
5645 /* If a variable is accessed with both general dynamic TLS
5646 methods, two slots may be created. */
5647 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
5648 got_type |= old_got_type;
5649
5650 /* We will already have issued an error message if there
5651 is a TLS/non-TLS mismatch, based on the symbol type.
5652 So just combine any TLS types needed. */
5653 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
5654 && got_type != GOT_NORMAL)
5655 got_type |= old_got_type;
5656
5657 /* If the symbol is accessed by both IE and GD methods, we
5658 are able to relax. Turn off the GD flag, without
5659 messing up with any other kind of TLS types that may be
5660 involved. */
5661 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
5662 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
5663
5664 if (old_got_type != got_type)
5665 {
5666 if (h != NULL)
5667 elf64_aarch64_hash_entry (h)->got_type = got_type;
5668 else
5669 {
5670 struct elf_aarch64_local_symbol *locals;
5671 locals = elf64_aarch64_locals (abfd);
5672 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5673 locals[r_symndx].got_type = got_type;
5674 }
5675 }
5676
5677 if (htab->root.sgot == NULL)
5678 {
5679 if (htab->root.dynobj == NULL)
5680 htab->root.dynobj = abfd;
5681 if (!_bfd_elf_create_got_section (htab->root.dynobj, info))
5682 return FALSE;
5683 }
5684 break;
5685 }
5686
5687 case R_AARCH64_ADR_PREL_PG_HI21_NC:
5688 case R_AARCH64_ADR_PREL_PG_HI21:
f41aef5f 5689 case R_AARCH64_ADR_PREL_LO21:
a06ea964
NC
5690 if (h != NULL && info->executable)
5691 {
5692 /* If this reloc is in a read-only section, we might
5693 need a copy reloc. We can't check reliably at this
5694 stage whether the section is read-only, as input
5695 sections have not yet been mapped to output sections.
5696 Tentatively set the flag for now, and correct in
5697 adjust_dynamic_symbol. */
5698 h->non_got_ref = 1;
5699 h->plt.refcount += 1;
5700 h->pointer_equality_needed = 1;
5701 }
5702 /* FIXME:: RR need to handle these in shared libraries
5703 and essentially bomb out as these being non-PIC
5704 relocations in shared libraries. */
5705 break;
5706
5707 case R_AARCH64_CALL26:
5708 case R_AARCH64_JUMP26:
5709 /* If this is a local symbol then we resolve it
5710 directly without creating a PLT entry. */
5711 if (h == NULL)
5712 continue;
5713
5714 h->needs_plt = 1;
692e2b8b
WN
5715 if (h->plt.refcount <= 0)
5716 h->plt.refcount = 1;
5717 else
5718 h->plt.refcount += 1;
a06ea964
NC
5719 break;
5720 }
5721 }
5722 return TRUE;
5723}
5724
5725/* Treat mapping symbols as special target symbols. */
5726
5727static bfd_boolean
5728elf64_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
5729 asymbol *sym)
5730{
5731 return bfd_is_aarch64_special_symbol_name (sym->name,
5732 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
5733}
5734
5735/* This is a copy of elf_find_function () from elf.c except that
5736 AArch64 mapping symbols are ignored when looking for function names. */
5737
5738static bfd_boolean
5739aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
5740 asection *section,
5741 asymbol **symbols,
5742 bfd_vma offset,
5743 const char **filename_ptr,
5744 const char **functionname_ptr)
5745{
5746 const char *filename = NULL;
5747 asymbol *func = NULL;
5748 bfd_vma low_func = 0;
5749 asymbol **p;
5750
5751 for (p = symbols; *p != NULL; p++)
5752 {
5753 elf_symbol_type *q;
5754
5755 q = (elf_symbol_type *) * p;
5756
5757 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
5758 {
5759 default:
5760 break;
5761 case STT_FILE:
5762 filename = bfd_asymbol_name (&q->symbol);
5763 break;
5764 case STT_FUNC:
5765 case STT_NOTYPE:
5766 /* Skip mapping symbols. */
5767 if ((q->symbol.flags & BSF_LOCAL)
5768 && (bfd_is_aarch64_special_symbol_name
5769 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
5770 continue;
5771 /* Fall through. */
5772 if (bfd_get_section (&q->symbol) == section
5773 && q->symbol.value >= low_func && q->symbol.value <= offset)
5774 {
5775 func = (asymbol *) q;
5776 low_func = q->symbol.value;
5777 }
5778 break;
5779 }
5780 }
5781
5782 if (func == NULL)
5783 return FALSE;
5784
5785 if (filename_ptr)
5786 *filename_ptr = filename;
5787 if (functionname_ptr)
5788 *functionname_ptr = bfd_asymbol_name (func);
5789
5790 return TRUE;
5791}
5792
5793
5794/* Find the nearest line to a particular section and offset, for error
5795 reporting. This code is a duplicate of the code in elf.c, except
5796 that it uses aarch64_elf_find_function. */
5797
5798static bfd_boolean
5799elf64_aarch64_find_nearest_line (bfd *abfd,
5800 asection *section,
5801 asymbol **symbols,
5802 bfd_vma offset,
5803 const char **filename_ptr,
5804 const char **functionname_ptr,
5805 unsigned int *line_ptr)
5806{
5807 bfd_boolean found = FALSE;
5808
5809 /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64
5810 toolchain uses it. */
5811
5812 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
5813 section, symbols, offset,
5814 filename_ptr, functionname_ptr,
5815 line_ptr, NULL, 0,
5816 &elf_tdata (abfd)->dwarf2_find_line_info))
5817 {
5818 if (!*functionname_ptr)
5819 aarch64_elf_find_function (abfd, section, symbols, offset,
5820 *filename_ptr ? NULL : filename_ptr,
5821 functionname_ptr);
5822
5823 return TRUE;
5824 }
5825
5826 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
5827 &found, filename_ptr,
5828 functionname_ptr, line_ptr,
5829 &elf_tdata (abfd)->line_info))
5830 return FALSE;
5831
5832 if (found && (*functionname_ptr || *line_ptr))
5833 return TRUE;
5834
5835 if (symbols == NULL)
5836 return FALSE;
5837
5838 if (!aarch64_elf_find_function (abfd, section, symbols, offset,
5839 filename_ptr, functionname_ptr))
5840 return FALSE;
5841
5842 *line_ptr = 0;
5843 return TRUE;
5844}
5845
5846static bfd_boolean
5847elf64_aarch64_find_inliner_info (bfd *abfd,
5848 const char **filename_ptr,
5849 const char **functionname_ptr,
5850 unsigned int *line_ptr)
5851{
5852 bfd_boolean found;
5853 found = _bfd_dwarf2_find_inliner_info
5854 (abfd, filename_ptr,
5855 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
5856 return found;
5857}
5858
5859
5860static void
5861elf64_aarch64_post_process_headers (bfd *abfd,
692e2b8b 5862 struct bfd_link_info *link_info)
a06ea964
NC
5863{
5864 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
5865
5866 i_ehdrp = elf_elfheader (abfd);
a06ea964 5867 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
692e2b8b
WN
5868
5869 _bfd_elf_set_osabi (abfd, link_info);
a06ea964
NC
5870}
5871
5872static enum elf_reloc_type_class
7e612e98
AM
5873elf64_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5874 const asection *rel_sec ATTRIBUTE_UNUSED,
5875 const Elf_Internal_Rela *rela)
a06ea964
NC
5876{
5877 switch ((int) ELF64_R_TYPE (rela->r_info))
5878 {
5879 case R_AARCH64_RELATIVE:
5880 return reloc_class_relative;
5881 case R_AARCH64_JUMP_SLOT:
5882 return reloc_class_plt;
5883 case R_AARCH64_COPY:
5884 return reloc_class_copy;
5885 default:
5886 return reloc_class_normal;
5887 }
5888}
5889
5890/* Set the right machine number for an AArch64 ELF file. */
5891
5892static bfd_boolean
5893elf64_aarch64_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
5894{
5895 if (hdr->sh_type == SHT_NOTE)
5896 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
5897
5898 return TRUE;
5899}
5900
5901/* Handle an AArch64 specific section when reading an object file. This is
5902 called when bfd_section_from_shdr finds a section with an unknown
5903 type. */
5904
5905static bfd_boolean
5906elf64_aarch64_section_from_shdr (bfd *abfd,
5907 Elf_Internal_Shdr *hdr,
5908 const char *name, int shindex)
5909{
5910 /* There ought to be a place to keep ELF backend specific flags, but
5911 at the moment there isn't one. We just keep track of the
5912 sections by their name, instead. Fortunately, the ABI gives
5913 names for all the AArch64 specific sections, so we will probably get
5914 away with this. */
5915 switch (hdr->sh_type)
5916 {
5917 case SHT_AARCH64_ATTRIBUTES:
5918 break;
5919
5920 default:
5921 return FALSE;
5922 }
5923
5924 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5925 return FALSE;
5926
5927 return TRUE;
5928}
5929
5930/* A structure used to record a list of sections, independently
5931 of the next and prev fields in the asection structure. */
5932typedef struct section_list
5933{
5934 asection *sec;
5935 struct section_list *next;
5936 struct section_list *prev;
5937}
5938section_list;
5939
5940/* Unfortunately we need to keep a list of sections for which
5941 an _aarch64_elf_section_data structure has been allocated. This
5942 is because it is possible for functions like elf64_aarch64_write_section
5943 to be called on a section which has had an elf_data_structure
5944 allocated for it (and so the used_by_bfd field is valid) but
5945 for which the AArch64 extended version of this structure - the
5946 _aarch64_elf_section_data structure - has not been allocated. */
5947static section_list *sections_with_aarch64_elf_section_data = NULL;
5948
5949static void
5950record_section_with_aarch64_elf_section_data (asection *sec)
5951{
5952 struct section_list *entry;
5953
5954 entry = bfd_malloc (sizeof (*entry));
5955 if (entry == NULL)
5956 return;
5957 entry->sec = sec;
5958 entry->next = sections_with_aarch64_elf_section_data;
5959 entry->prev = NULL;
5960 if (entry->next != NULL)
5961 entry->next->prev = entry;
5962 sections_with_aarch64_elf_section_data = entry;
5963}
5964
5965static struct section_list *
5966find_aarch64_elf_section_entry (asection *sec)
5967{
5968 struct section_list *entry;
5969 static struct section_list *last_entry = NULL;
5970
5971 /* This is a short cut for the typical case where the sections are added
5972 to the sections_with_aarch64_elf_section_data list in forward order and
5973 then looked up here in backwards order. This makes a real difference
5974 to the ld-srec/sec64k.exp linker test. */
5975 entry = sections_with_aarch64_elf_section_data;
5976 if (last_entry != NULL)
5977 {
5978 if (last_entry->sec == sec)
5979 entry = last_entry;
5980 else if (last_entry->next != NULL && last_entry->next->sec == sec)
5981 entry = last_entry->next;
5982 }
5983
5984 for (; entry; entry = entry->next)
5985 if (entry->sec == sec)
5986 break;
5987
5988 if (entry)
5989 /* Record the entry prior to this one - it is the entry we are
5990 most likely to want to locate next time. Also this way if we
5991 have been called from
5992 unrecord_section_with_aarch64_elf_section_data () we will not
5993 be caching a pointer that is about to be freed. */
5994 last_entry = entry->prev;
5995
5996 return entry;
5997}
5998
5999static void
6000unrecord_section_with_aarch64_elf_section_data (asection *sec)
6001{
6002 struct section_list *entry;
6003
6004 entry = find_aarch64_elf_section_entry (sec);
6005
6006 if (entry)
6007 {
6008 if (entry->prev != NULL)
6009 entry->prev->next = entry->next;
6010 if (entry->next != NULL)
6011 entry->next->prev = entry->prev;
6012 if (entry == sections_with_aarch64_elf_section_data)
6013 sections_with_aarch64_elf_section_data = entry->next;
6014 free (entry);
6015 }
6016}
6017
6018
6019typedef struct
6020{
6021 void *finfo;
6022 struct bfd_link_info *info;
6023 asection *sec;
6024 int sec_shndx;
6025 int (*func) (void *, const char *, Elf_Internal_Sym *,
6026 asection *, struct elf_link_hash_entry *);
6027} output_arch_syminfo;
6028
6029enum map_symbol_type
6030{
6031 AARCH64_MAP_INSN,
6032 AARCH64_MAP_DATA
6033};
6034
6035
6036/* Output a single mapping symbol. */
6037
6038static bfd_boolean
6039elf64_aarch64_output_map_sym (output_arch_syminfo *osi,
6040 enum map_symbol_type type, bfd_vma offset)
6041{
6042 static const char *names[2] = { "$x", "$d" };
6043 Elf_Internal_Sym sym;
6044
6045 sym.st_value = (osi->sec->output_section->vma
6046 + osi->sec->output_offset + offset);
6047 sym.st_size = 0;
6048 sym.st_other = 0;
6049 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6050 sym.st_shndx = osi->sec_shndx;
6051 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
6052}
6053
6054
6055
6056/* Output mapping symbols for PLT entries associated with H. */
6057
6058static bfd_boolean
6059elf64_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
6060{
6061 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
6062 bfd_vma addr;
6063
6064 if (h->root.type == bfd_link_hash_indirect)
6065 return TRUE;
6066
6067 if (h->root.type == bfd_link_hash_warning)
6068 /* When warning symbols are created, they **replace** the "real"
6069 entry in the hash table, thus we never get to see the real
6070 symbol in a hash traversal. So look at it now. */
6071 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6072
6073 if (h->plt.offset == (bfd_vma) - 1)
6074 return TRUE;
6075
6076 addr = h->plt.offset;
6077 if (addr == 32)
6078 {
6079 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6080 return FALSE;
6081 }
6082 return TRUE;
6083}
6084
6085
6086/* Output a single local symbol for a generated stub. */
6087
6088static bfd_boolean
6089elf64_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
6090 bfd_vma offset, bfd_vma size)
6091{
6092 Elf_Internal_Sym sym;
6093
6094 sym.st_value = (osi->sec->output_section->vma
6095 + osi->sec->output_offset + offset);
6096 sym.st_size = size;
6097 sym.st_other = 0;
6098 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6099 sym.st_shndx = osi->sec_shndx;
6100 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
6101}
6102
6103static bfd_boolean
6104aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
6105{
6106 struct elf64_aarch64_stub_hash_entry *stub_entry;
6107 asection *stub_sec;
6108 bfd_vma addr;
6109 char *stub_name;
6110 output_arch_syminfo *osi;
6111
6112 /* Massage our args to the form they really have. */
6113 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
6114 osi = (output_arch_syminfo *) in_arg;
6115
6116 stub_sec = stub_entry->stub_sec;
6117
6118 /* Ensure this stub is attached to the current section being
6119 processed. */
6120 if (stub_sec != osi->sec)
6121 return TRUE;
6122
6123 addr = (bfd_vma) stub_entry->stub_offset;
6124
6125 stub_name = stub_entry->output_name;
6126
6127 switch (stub_entry->stub_type)
6128 {
6129 case aarch64_stub_adrp_branch:
6130 if (!elf64_aarch64_output_stub_sym (osi, stub_name, addr,
6131 sizeof (aarch64_adrp_branch_stub)))
6132 return FALSE;
6133 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6134 return FALSE;
6135 break;
6136 case aarch64_stub_long_branch:
6137 if (!elf64_aarch64_output_stub_sym
6138 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
6139 return FALSE;
6140 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6141 return FALSE;
6142 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
6143 return FALSE;
6144 break;
6145 default:
6146 BFD_FAIL ();
6147 }
6148
6149 return TRUE;
6150}
6151
6152/* Output mapping symbols for linker generated sections. */
6153
6154static bfd_boolean
6155elf64_aarch64_output_arch_local_syms (bfd *output_bfd,
6156 struct bfd_link_info *info,
6157 void *finfo,
6158 int (*func) (void *, const char *,
6159 Elf_Internal_Sym *,
6160 asection *,
6161 struct elf_link_hash_entry
6162 *))
6163{
6164 output_arch_syminfo osi;
6165 struct elf64_aarch64_link_hash_table *htab;
6166
6167 htab = elf64_aarch64_hash_table (info);
6168
6169 osi.finfo = finfo;
6170 osi.info = info;
6171 osi.func = func;
6172
6173 /* Long calls stubs. */
6174 if (htab->stub_bfd && htab->stub_bfd->sections)
6175 {
6176 asection *stub_sec;
6177
6178 for (stub_sec = htab->stub_bfd->sections;
6179 stub_sec != NULL; stub_sec = stub_sec->next)
6180 {
6181 /* Ignore non-stub sections. */
6182 if (!strstr (stub_sec->name, STUB_SUFFIX))
6183 continue;
6184
6185 osi.sec = stub_sec;
6186
6187 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6188 (output_bfd, osi.sec->output_section);
6189
6190 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
6191 &osi);
6192 }
6193 }
6194
6195 /* Finally, output mapping symbols for the PLT. */
6196 if (!htab->root.splt || htab->root.splt->size == 0)
6197 return TRUE;
6198
6199 /* For now live without mapping symbols for the plt. */
6200 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6201 (output_bfd, htab->root.splt->output_section);
6202 osi.sec = htab->root.splt;
6203
6204 elf_link_hash_traverse (&htab->root, elf64_aarch64_output_plt_map,
6205 (void *) &osi);
6206
6207 return TRUE;
6208
6209}
6210
6211/* Allocate target specific section data. */
6212
6213static bfd_boolean
6214elf64_aarch64_new_section_hook (bfd *abfd, asection *sec)
6215{
6216 if (!sec->used_by_bfd)
6217 {
6218 _aarch64_elf_section_data *sdata;
6219 bfd_size_type amt = sizeof (*sdata);
6220
6221 sdata = bfd_zalloc (abfd, amt);
6222 if (sdata == NULL)
6223 return FALSE;
6224 sec->used_by_bfd = sdata;
6225 }
6226
6227 record_section_with_aarch64_elf_section_data (sec);
6228
6229 return _bfd_elf_new_section_hook (abfd, sec);
6230}
6231
6232
6233static void
6234unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
6235 asection *sec,
6236 void *ignore ATTRIBUTE_UNUSED)
6237{
6238 unrecord_section_with_aarch64_elf_section_data (sec);
6239}
6240
6241static bfd_boolean
6242elf64_aarch64_close_and_cleanup (bfd *abfd)
6243{
6244 if (abfd->sections)
6245 bfd_map_over_sections (abfd,
6246 unrecord_section_via_map_over_sections, NULL);
6247
6248 return _bfd_elf_close_and_cleanup (abfd);
6249}
6250
6251static bfd_boolean
6252elf64_aarch64_bfd_free_cached_info (bfd *abfd)
6253{
6254 if (abfd->sections)
6255 bfd_map_over_sections (abfd,
6256 unrecord_section_via_map_over_sections, NULL);
6257
6258 return _bfd_free_cached_info (abfd);
6259}
6260
a06ea964
NC
6261/* Create dynamic sections. This is different from the ARM backend in that
6262 the got, plt, gotplt and their relocation sections are all created in the
6263 standard part of the bfd elf backend. */
6264
6265static bfd_boolean
6266elf64_aarch64_create_dynamic_sections (bfd *dynobj,
6267 struct bfd_link_info *info)
6268{
6269 struct elf64_aarch64_link_hash_table *htab;
6270 struct elf_link_hash_entry *h;
6271
6272 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
6273 return FALSE;
6274
6275 htab = elf64_aarch64_hash_table (info);
6276 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
6277 if (!info->shared)
6278 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
6279
6280 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
6281 abort ();
6282
6283 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
6284 dynobj's .got section. We don't do this in the linker script
6285 because we don't want to define the symbol if we are not creating
6286 a global offset table. */
6287 h = _bfd_elf_define_linkage_sym (dynobj, info,
6288 htab->root.sgot, "_GLOBAL_OFFSET_TABLE_");
6289 elf_hash_table (info)->hgot = h;
6290 if (h == NULL)
6291 return FALSE;
6292
6293 return TRUE;
6294}
6295
6296
6297/* Allocate space in .plt, .got and associated reloc sections for
6298 dynamic relocs. */
6299
6300static bfd_boolean
6301elf64_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
6302{
6303 struct bfd_link_info *info;
6304 struct elf64_aarch64_link_hash_table *htab;
6305 struct elf64_aarch64_link_hash_entry *eh;
6306 struct elf_dyn_relocs *p;
6307
6308 /* An example of a bfd_link_hash_indirect symbol is versioned
6309 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6310 -> __gxx_personality_v0(bfd_link_hash_defined)
6311
6312 There is no need to process bfd_link_hash_indirect symbols here
6313 because we will also be presented with the concrete instance of
6314 the symbol and elf64_aarch64_copy_indirect_symbol () will have been
6315 called to copy all relevant data from the generic to the concrete
6316 symbol instance.
6317 */
6318 if (h->root.type == bfd_link_hash_indirect)
6319 return TRUE;
6320
6321 if (h->root.type == bfd_link_hash_warning)
6322 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6323
6324 info = (struct bfd_link_info *) inf;
6325 htab = elf64_aarch64_hash_table (info);
6326
692e2b8b
WN
6327 eh = (struct elf64_aarch64_link_hash_entry *) h;
6328
6329 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
6330 here if it is defined and referenced in a non-shared object. */
6331 if (h->type == STT_GNU_IFUNC
6332 && h->def_regular)
6333 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
6334 &eh->dyn_relocs,
6335 htab->plt_entry_size,
6336 htab->plt_header_size,
6337 GOT_ENTRY_SIZE);
6338 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
a06ea964
NC
6339 {
6340 /* Make sure this symbol is output as a dynamic symbol.
6341 Undefined weak syms won't yet be marked as dynamic. */
6342 if (h->dynindx == -1 && !h->forced_local)
6343 {
6344 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6345 return FALSE;
6346 }
6347
6348 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
6349 {
6350 asection *s = htab->root.splt;
6351
6352 /* If this is the first .plt entry, make room for the special
6353 first entry. */
6354 if (s->size == 0)
6355 s->size += htab->plt_header_size;
6356
6357 h->plt.offset = s->size;
6358
6359 /* If this symbol is not defined in a regular file, and we are
6360 not generating a shared library, then set the symbol to this
6361 location in the .plt. This is required to make function
6362 pointers compare as equal between the normal executable and
6363 the shared library. */
6364 if (!info->shared && !h->def_regular)
6365 {
6366 h->root.u.def.section = s;
6367 h->root.u.def.value = h->plt.offset;
6368 }
6369
6370 /* Make room for this entry. For now we only create the
6371 small model PLT entries. We later need to find a way
6372 of relaxing into these from the large model PLT entries. */
6373 s->size += PLT_SMALL_ENTRY_SIZE;
6374
6375 /* We also need to make an entry in the .got.plt section, which
6376 will be placed in the .got section by the linker script. */
6377 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
6378
6379 /* We also need to make an entry in the .rela.plt section. */
6380 htab->root.srelplt->size += RELOC_SIZE (htab);
6381
6382 /* We need to ensure that all GOT entries that serve the PLT
6383 are consecutive with the special GOT slots [0] [1] and
6384 [2]. Any addtional relocations, such as
6385 R_AARCH64_TLSDESC, must be placed after the PLT related
6386 entries. We abuse the reloc_count such that during
6387 sizing we adjust reloc_count to indicate the number of
6388 PLT related reserved entries. In subsequent phases when
6389 filling in the contents of the reloc entries, PLT related
6390 entries are placed by computing their PLT index (0
6391 .. reloc_count). While other none PLT relocs are placed
6392 at the slot indicated by reloc_count and reloc_count is
6393 updated. */
6394
6395 htab->root.srelplt->reloc_count++;
6396 }
6397 else
6398 {
6399 h->plt.offset = (bfd_vma) - 1;
6400 h->needs_plt = 0;
6401 }
6402 }
6403 else
6404 {
6405 h->plt.offset = (bfd_vma) - 1;
6406 h->needs_plt = 0;
6407 }
6408
a06ea964
NC
6409 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6410
6411 if (h->got.refcount > 0)
6412 {
6413 bfd_boolean dyn;
6414 unsigned got_type = elf64_aarch64_hash_entry (h)->got_type;
6415
6416 h->got.offset = (bfd_vma) - 1;
6417
6418 dyn = htab->root.dynamic_sections_created;
6419
6420 /* Make sure this symbol is output as a dynamic symbol.
6421 Undefined weak syms won't yet be marked as dynamic. */
6422 if (dyn && h->dynindx == -1 && !h->forced_local)
6423 {
6424 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6425 return FALSE;
6426 }
6427
6428 if (got_type == GOT_UNKNOWN)
6429 {
6430 }
6431 else if (got_type == GOT_NORMAL)
6432 {
6433 h->got.offset = htab->root.sgot->size;
6434 htab->root.sgot->size += GOT_ENTRY_SIZE;
6435 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6436 || h->root.type != bfd_link_hash_undefweak)
6437 && (info->shared
6438 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6439 {
6440 htab->root.srelgot->size += RELOC_SIZE (htab);
6441 }
6442 }
6443 else
6444 {
6445 int indx;
6446 if (got_type & GOT_TLSDESC_GD)
6447 {
6448 eh->tlsdesc_got_jump_table_offset =
6449 (htab->root.sgotplt->size
6450 - aarch64_compute_jump_table_size (htab));
6451 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6452 h->got.offset = (bfd_vma) - 2;
6453 }
6454
6455 if (got_type & GOT_TLS_GD)
6456 {
6457 h->got.offset = htab->root.sgot->size;
6458 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6459 }
6460
6461 if (got_type & GOT_TLS_IE)
6462 {
6463 h->got.offset = htab->root.sgot->size;
6464 htab->root.sgot->size += GOT_ENTRY_SIZE;
6465 }
6466
6467 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6468 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6469 || h->root.type != bfd_link_hash_undefweak)
6470 && (info->shared
6471 || indx != 0
6472 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6473 {
6474 if (got_type & GOT_TLSDESC_GD)
6475 {
6476 htab->root.srelplt->size += RELOC_SIZE (htab);
6477 /* Note reloc_count not incremented here! We have
6478 already adjusted reloc_count for this relocation
6479 type. */
6480
6481 /* TLSDESC PLT is now needed, but not yet determined. */
6482 htab->tlsdesc_plt = (bfd_vma) - 1;
6483 }
6484
6485 if (got_type & GOT_TLS_GD)
6486 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6487
6488 if (got_type & GOT_TLS_IE)
6489 htab->root.srelgot->size += RELOC_SIZE (htab);
6490 }
6491 }
6492 }
6493 else
6494 {
6495 h->got.offset = (bfd_vma) - 1;
6496 }
6497
6498 if (eh->dyn_relocs == NULL)
6499 return TRUE;
6500
6501 /* In the shared -Bsymbolic case, discard space allocated for
6502 dynamic pc-relative relocs against symbols which turn out to be
6503 defined in regular objects. For the normal shared case, discard
6504 space for pc-relative relocs that have become local due to symbol
6505 visibility changes. */
6506
6507 if (info->shared)
6508 {
6509 /* Relocs that use pc_count are those that appear on a call
6510 insn, or certain REL relocs that can generated via assembly.
6511 We want calls to protected symbols to resolve directly to the
6512 function rather than going via the plt. If people want
6513 function pointer comparisons to work as expected then they
6514 should avoid writing weird assembly. */
6515 if (SYMBOL_CALLS_LOCAL (info, h))
6516 {
6517 struct elf_dyn_relocs **pp;
6518
6519 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6520 {
6521 p->count -= p->pc_count;
6522 p->pc_count = 0;
6523 if (p->count == 0)
6524 *pp = p->next;
6525 else
6526 pp = &p->next;
6527 }
6528 }
6529
6530 /* Also discard relocs on undefined weak syms with non-default
6531 visibility. */
6532 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6533 {
6534 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6535 eh->dyn_relocs = NULL;
6536
6537 /* Make sure undefined weak symbols are output as a dynamic
6538 symbol in PIEs. */
6539 else if (h->dynindx == -1
6540 && !h->forced_local
6541 && !bfd_elf_link_record_dynamic_symbol (info, h))
6542 return FALSE;
6543 }
6544
6545 }
6546 else if (ELIMINATE_COPY_RELOCS)
6547 {
6548 /* For the non-shared case, discard space for relocs against
6549 symbols which turn out to need copy relocs or are not
6550 dynamic. */
6551
6552 if (!h->non_got_ref
6553 && ((h->def_dynamic
6554 && !h->def_regular)
6555 || (htab->root.dynamic_sections_created
6556 && (h->root.type == bfd_link_hash_undefweak
6557 || h->root.type == bfd_link_hash_undefined))))
6558 {
6559 /* Make sure this symbol is output as a dynamic symbol.
6560 Undefined weak syms won't yet be marked as dynamic. */
6561 if (h->dynindx == -1
6562 && !h->forced_local
6563 && !bfd_elf_link_record_dynamic_symbol (info, h))
6564 return FALSE;
6565
6566 /* If that succeeded, we know we'll be keeping all the
6567 relocs. */
6568 if (h->dynindx != -1)
6569 goto keep;
6570 }
6571
6572 eh->dyn_relocs = NULL;
6573
6574 keep:;
6575 }
6576
6577 /* Finally, allocate space. */
6578 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6579 {
6580 asection *sreloc;
6581
6582 sreloc = elf_section_data (p->sec)->sreloc;
6583
6584 BFD_ASSERT (sreloc != NULL);
6585
6586 sreloc->size += p->count * RELOC_SIZE (htab);
6587 }
6588
6589 return TRUE;
6590}
6591
6592
692e2b8b
WN
6593/* Allocate space in .plt, .got and associated reloc sections for
6594 local dynamic relocs. */
6595
6596static bfd_boolean
6597elf_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
6598{
6599 struct elf_link_hash_entry *h
6600 = (struct elf_link_hash_entry *) *slot;
6601
6602 if (h->type != STT_GNU_IFUNC
6603 || !h->def_regular
6604 || !h->ref_regular
6605 || !h->forced_local
6606 || h->root.type != bfd_link_hash_defined)
6607 abort ();
6608
6609 return elf64_aarch64_allocate_dynrelocs (h, inf);
6610}
a06ea964
NC
6611
6612
6613/* This is the most important function of all . Innocuosly named
6614 though ! */
6615static bfd_boolean
6616elf64_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
6617 struct bfd_link_info *info)
6618{
6619 struct elf64_aarch64_link_hash_table *htab;
6620 bfd *dynobj;
6621 asection *s;
6622 bfd_boolean relocs;
6623 bfd *ibfd;
6624
6625 htab = elf64_aarch64_hash_table ((info));
6626 dynobj = htab->root.dynobj;
6627
6628 BFD_ASSERT (dynobj != NULL);
6629
6630 if (htab->root.dynamic_sections_created)
6631 {
6632 if (info->executable)
6633 {
6634 s = bfd_get_linker_section (dynobj, ".interp");
6635 if (s == NULL)
6636 abort ();
6637 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
6638 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
6639 }
6640 }
6641
6642 /* Set up .got offsets for local syms, and space for local dynamic
6643 relocs. */
6644 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
6645 {
6646 struct elf_aarch64_local_symbol *locals = NULL;
6647 Elf_Internal_Shdr *symtab_hdr;
6648 asection *srel;
6649 unsigned int i;
6650
6651 if (!is_aarch64_elf (ibfd))
6652 continue;
6653
6654 for (s = ibfd->sections; s != NULL; s = s->next)
6655 {
6656 struct elf_dyn_relocs *p;
6657
6658 for (p = (struct elf_dyn_relocs *)
6659 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
6660 {
6661 if (!bfd_is_abs_section (p->sec)
6662 && bfd_is_abs_section (p->sec->output_section))
6663 {
6664 /* Input section has been discarded, either because
6665 it is a copy of a linkonce section or due to
6666 linker script /DISCARD/, so we'll be discarding
6667 the relocs too. */
6668 }
6669 else if (p->count != 0)
6670 {
6671 srel = elf_section_data (p->sec)->sreloc;
6672 srel->size += p->count * RELOC_SIZE (htab);
6673 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
6674 info->flags |= DF_TEXTREL;
6675 }
6676 }
6677 }
6678
6679 locals = elf64_aarch64_locals (ibfd);
6680 if (!locals)
6681 continue;
6682
6683 symtab_hdr = &elf_symtab_hdr (ibfd);
6684 srel = htab->root.srelgot;
6685 for (i = 0; i < symtab_hdr->sh_info; i++)
6686 {
6687 locals[i].got_offset = (bfd_vma) - 1;
6688 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6689 if (locals[i].got_refcount > 0)
6690 {
6691 unsigned got_type = locals[i].got_type;
6692 if (got_type & GOT_TLSDESC_GD)
6693 {
6694 locals[i].tlsdesc_got_jump_table_offset =
6695 (htab->root.sgotplt->size
6696 - aarch64_compute_jump_table_size (htab));
6697 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6698 locals[i].got_offset = (bfd_vma) - 2;
6699 }
6700
6701 if (got_type & GOT_TLS_GD)
6702 {
6703 locals[i].got_offset = htab->root.sgot->size;
6704 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6705 }
6706
6707 if (got_type & GOT_TLS_IE)
6708 {
6709 locals[i].got_offset = htab->root.sgot->size;
6710 htab->root.sgot->size += GOT_ENTRY_SIZE;
6711 }
6712
6713 if (got_type == GOT_UNKNOWN)
6714 {
6715 }
6716
6717 if (got_type == GOT_NORMAL)
6718 {
6719 }
6720
6721 if (info->shared)
6722 {
6723 if (got_type & GOT_TLSDESC_GD)
6724 {
6725 htab->root.srelplt->size += RELOC_SIZE (htab);
6726 /* Note RELOC_COUNT not incremented here! */
6727 htab->tlsdesc_plt = (bfd_vma) - 1;
6728 }
6729
6730 if (got_type & GOT_TLS_GD)
6731 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6732
6733 if (got_type & GOT_TLS_IE)
6734 htab->root.srelgot->size += RELOC_SIZE (htab);
6735 }
6736 }
6737 else
6738 {
6739 locals[i].got_refcount = (bfd_vma) - 1;
6740 }
6741 }
6742 }
6743
6744
6745 /* Allocate global sym .plt and .got entries, and space for global
6746 sym dynamic relocs. */
6747 elf_link_hash_traverse (&htab->root, elf64_aarch64_allocate_dynrelocs,
6748 info);
6749
692e2b8b
WN
6750 /* Allocate .plt and .got entries, and space for local symbols. */
6751 htab_traverse (htab->loc_hash_table,
6752 elf_aarch64_allocate_local_dynrelocs,
6753 info);
a06ea964
NC
6754
6755 /* For every jump slot reserved in the sgotplt, reloc_count is
6756 incremented. However, when we reserve space for TLS descriptors,
6757 it's not incremented, so in order to compute the space reserved
6758 for them, it suffices to multiply the reloc count by the jump
6759 slot size. */
6760
6761 if (htab->root.srelplt)
692e2b8b
WN
6762 {
6763 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
6764 htab->next_irelative_index = htab->root.srelplt->reloc_count - 1;
6765 }
6766 else if (htab->root.irelplt)
6767 htab->next_irelative_index = htab->root.irelplt->reloc_count - 1;
a06ea964
NC
6768
6769 if (htab->tlsdesc_plt)
6770 {
6771 if (htab->root.splt->size == 0)
6772 htab->root.splt->size += PLT_ENTRY_SIZE;
6773
6774 htab->tlsdesc_plt = htab->root.splt->size;
6775 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
6776
6777 /* If we're not using lazy TLS relocations, don't generate the
6778 GOT entry required. */
6779 if (!(info->flags & DF_BIND_NOW))
6780 {
6781 htab->dt_tlsdesc_got = htab->root.sgot->size;
6782 htab->root.sgot->size += GOT_ENTRY_SIZE;
6783 }
6784 }
6785
6786 /* We now have determined the sizes of the various dynamic sections.
6787 Allocate memory for them. */
6788 relocs = FALSE;
6789 for (s = dynobj->sections; s != NULL; s = s->next)
6790 {
6791 if ((s->flags & SEC_LINKER_CREATED) == 0)
6792 continue;
6793
6794 if (s == htab->root.splt
6795 || s == htab->root.sgot
6796 || s == htab->root.sgotplt
6797 || s == htab->root.iplt
6798 || s == htab->root.igotplt || s == htab->sdynbss)
6799 {
6800 /* Strip this section if we don't need it; see the
6801 comment below. */
6802 }
6803 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
6804 {
6805 if (s->size != 0 && s != htab->root.srelplt)
6806 relocs = TRUE;
6807
6808 /* We use the reloc_count field as a counter if we need
6809 to copy relocs into the output file. */
6810 if (s != htab->root.srelplt)
6811 s->reloc_count = 0;
6812 }
6813 else
6814 {
6815 /* It's not one of our sections, so don't allocate space. */
6816 continue;
6817 }
6818
6819 if (s->size == 0)
6820 {
6821 /* If we don't need this section, strip it from the
6822 output file. This is mostly to handle .rela.bss and
6823 .rela.plt. We must create both sections in
6824 create_dynamic_sections, because they must be created
6825 before the linker maps input sections to output
6826 sections. The linker does that before
6827 adjust_dynamic_symbol is called, and it is that
6828 function which decides whether anything needs to go
6829 into these sections. */
6830
6831 s->flags |= SEC_EXCLUDE;
6832 continue;
6833 }
6834
6835 if ((s->flags & SEC_HAS_CONTENTS) == 0)
6836 continue;
6837
6838 /* Allocate memory for the section contents. We use bfd_zalloc
6839 here in case unused entries are not reclaimed before the
6840 section's contents are written out. This should not happen,
6841 but this way if it does, we get a R_AARCH64_NONE reloc instead
6842 of garbage. */
6843 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
6844 if (s->contents == NULL)
6845 return FALSE;
6846 }
6847
6848 if (htab->root.dynamic_sections_created)
6849 {
6850 /* Add some entries to the .dynamic section. We fill in the
6851 values later, in elf64_aarch64_finish_dynamic_sections, but we
6852 must add the entries now so that we get the correct size for
6853 the .dynamic section. The DT_DEBUG entry is filled in by the
6854 dynamic linker and used by the debugger. */
6855#define add_dynamic_entry(TAG, VAL) \
6856 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
6857
6858 if (info->executable)
6859 {
6860 if (!add_dynamic_entry (DT_DEBUG, 0))
6861 return FALSE;
6862 }
6863
6864 if (htab->root.splt->size != 0)
6865 {
6866 if (!add_dynamic_entry (DT_PLTGOT, 0)
6867 || !add_dynamic_entry (DT_PLTRELSZ, 0)
6868 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
6869 || !add_dynamic_entry (DT_JMPREL, 0))
6870 return FALSE;
6871
6872 if (htab->tlsdesc_plt
6873 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
6874 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
6875 return FALSE;
6876 }
6877
6878 if (relocs)
6879 {
6880 if (!add_dynamic_entry (DT_RELA, 0)
6881 || !add_dynamic_entry (DT_RELASZ, 0)
6882 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
6883 return FALSE;
6884
6885 /* If any dynamic relocs apply to a read-only section,
6886 then we need a DT_TEXTREL entry. */
6887 if ((info->flags & DF_TEXTREL) != 0)
6888 {
6889 if (!add_dynamic_entry (DT_TEXTREL, 0))
6890 return FALSE;
6891 }
6892 }
6893 }
6894#undef add_dynamic_entry
6895
6896 return TRUE;
6897
6898
6899}
6900
6901static inline void
6902elf64_aarch64_update_plt_entry (bfd *output_bfd,
6903 unsigned int r_type,
6904 bfd_byte *plt_entry, bfd_vma value)
6905{
6906 reloc_howto_type *howto;
6907 howto = elf64_aarch64_howto_from_type (r_type);
6908 bfd_elf_aarch64_put_addend (output_bfd, plt_entry, howto, value);
6909}
6910
6911static void
6912elf64_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
6913 struct elf64_aarch64_link_hash_table
692e2b8b
WN
6914 *htab, bfd *output_bfd,
6915 struct bfd_link_info *info)
a06ea964
NC
6916{
6917 bfd_byte *plt_entry;
6918 bfd_vma plt_index;
6919 bfd_vma got_offset;
6920 bfd_vma gotplt_entry_address;
6921 bfd_vma plt_entry_address;
6922 Elf_Internal_Rela rela;
6923 bfd_byte *loc;
692e2b8b 6924 asection *plt, *gotplt, *relplt;
a06ea964 6925
692e2b8b
WN
6926 /* When building a static executable, use .iplt, .igot.plt and
6927 .rela.iplt sections for STT_GNU_IFUNC symbols. */
6928 if (htab->root.splt != NULL)
6929 {
6930 plt = htab->root.splt;
6931 gotplt = htab->root.sgotplt;
6932 relplt = htab->root.srelplt;
6933 }
6934 else
6935 {
6936 plt = htab->root.iplt;
6937 gotplt = htab->root.igotplt;
6938 relplt = htab->root.irelplt;
6939 }
a06ea964 6940
692e2b8b
WN
6941 /* Get the index in the procedure linkage table which
6942 corresponds to this symbol. This is the index of this symbol
6943 in all the symbols for which we are making plt entries. The
6944 first entry in the procedure linkage table is reserved.
6945
6946 Get the offset into the .got table of the entry that
6947 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
6948 bytes. The first three are reserved for the dynamic linker.
6949
6950 For static executables, we don't reserve anything. */
6951
6952 if (plt == htab->root.splt)
6953 {
6954 got_offset = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
6955 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
6956 }
6957 else
6958 {
6959 got_offset = h->plt.offset / htab->plt_entry_size;
6960 got_offset = got_offset * GOT_ENTRY_SIZE;
6961 }
6962
6963 plt_entry = plt->contents + h->plt.offset;
6964 plt_entry_address = plt->output_section->vma
6965 + plt->output_section->output_offset + h->plt.offset;
6966 gotplt_entry_address = gotplt->output_section->vma +
6967 gotplt->output_offset + got_offset;
a06ea964
NC
6968
6969 /* Copy in the boiler-plate for the PLTn entry. */
6970 memcpy (plt_entry, elf64_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
6971
6972 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6973 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6974 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6975 plt_entry,
6976 PG (gotplt_entry_address) -
6977 PG (plt_entry_address));
6978
6979 /* Fill in the lo12 bits for the load from the pltgot. */
6980 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6981 plt_entry + 4,
6982 PG_OFFSET (gotplt_entry_address));
6983
6984 /* Fill in the the lo12 bits for the add from the pltgot entry. */
6985 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6986 plt_entry + 8,
6987 PG_OFFSET (gotplt_entry_address));
6988
6989 /* All the GOTPLT Entries are essentially initialized to PLT0. */
6990 bfd_put_64 (output_bfd,
692e2b8b
WN
6991 plt->output_section->vma + plt->output_offset,
6992 gotplt->contents + got_offset);
a06ea964 6993
a06ea964 6994 rela.r_offset = gotplt_entry_address;
692e2b8b
WN
6995
6996 if (h->dynindx == -1
6997 || ((info->executable
6998 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6999 && h->def_regular
7000 && h->type == STT_GNU_IFUNC))
7001 {
7002 /* If an STT_GNU_IFUNC symbol is locally defined, generate
7003 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
7004 rela.r_info = ELF64_R_INFO (0, R_AARCH64_IRELATIVE);
7005 rela.r_addend = (h->root.u.def.value
7006 + h->root.u.def.section->output_section->vma
7007 + h->root.u.def.section->output_offset);
7008 /* R_AARCH64_IRELATIVE comes last. */
7009 plt_index = htab->next_irelative_index--;
7010 }
7011 else
7012 {
7013 /* Fill in the entry in the .rela.plt section. */
7014 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_JUMP_SLOT);
7015 rela.r_addend = 0;
7016 plt_index = htab->next_jump_slot_index++;
7017 }
a06ea964
NC
7018
7019 /* Compute the relocation entry to used based on PLT index and do
7020 not adjust reloc_count. The reloc_count has already been adjusted
7021 to account for this entry. */
692e2b8b 7022 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
a06ea964
NC
7023 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
7024}
7025
7026/* Size sections even though they're not dynamic. We use it to setup
7027 _TLS_MODULE_BASE_, if needed. */
7028
7029static bfd_boolean
7030elf64_aarch64_always_size_sections (bfd *output_bfd,
7031 struct bfd_link_info *info)
7032{
7033 asection *tls_sec;
7034
7035 if (info->relocatable)
7036 return TRUE;
7037
7038 tls_sec = elf_hash_table (info)->tls_sec;
7039
7040 if (tls_sec)
7041 {
7042 struct elf_link_hash_entry *tlsbase;
7043
7044 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
7045 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
7046
7047 if (tlsbase)
7048 {
7049 struct bfd_link_hash_entry *h = NULL;
7050 const struct elf_backend_data *bed =
7051 get_elf_backend_data (output_bfd);
7052
7053 if (!(_bfd_generic_link_add_one_symbol
7054 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
7055 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
7056 return FALSE;
7057
7058 tlsbase->type = STT_TLS;
7059 tlsbase = (struct elf_link_hash_entry *) h;
7060 tlsbase->def_regular = 1;
7061 tlsbase->other = STV_HIDDEN;
7062 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
7063 }
7064 }
7065
7066 return TRUE;
7067}
7068
7069/* Finish up dynamic symbol handling. We set the contents of various
7070 dynamic sections here. */
7071static bfd_boolean
7072elf64_aarch64_finish_dynamic_symbol (bfd *output_bfd,
7073 struct bfd_link_info *info,
7074 struct elf_link_hash_entry *h,
7075 Elf_Internal_Sym *sym)
7076{
7077 struct elf64_aarch64_link_hash_table *htab;
7078 htab = elf64_aarch64_hash_table (info);
7079
7080 if (h->plt.offset != (bfd_vma) - 1)
7081 {
692e2b8b
WN
7082 asection *plt, *gotplt, *relplt;
7083
a06ea964
NC
7084 /* This symbol has an entry in the procedure linkage table. Set
7085 it up. */
7086
692e2b8b
WN
7087 /* When building a static executable, use .iplt, .igot.plt and
7088 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7089 if (htab->root.splt != NULL)
7090 {
7091 plt = htab->root.splt;
7092 gotplt = htab->root.sgotplt;
7093 relplt = htab->root.srelplt;
7094 }
7095 else
7096 {
7097 plt = htab->root.iplt;
7098 gotplt = htab->root.igotplt;
7099 relplt = htab->root.irelplt;
7100 }
7101
7102 /* This symbol has an entry in the procedure linkage table. Set
7103 it up. */
7104 if ((h->dynindx == -1
7105 && !((h->forced_local || info->executable)
7106 && h->def_regular
7107 && h->type == STT_GNU_IFUNC))
7108 || plt == NULL
7109 || gotplt == NULL
7110 || relplt == NULL)
a06ea964
NC
7111 abort ();
7112
692e2b8b 7113 elf64_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
a06ea964
NC
7114 if (!h->def_regular)
7115 {
7116 /* Mark the symbol as undefined, rather than as defined in
7117 the .plt section. Leave the value alone. This is a clue
7118 for the dynamic linker, to make function pointer
7119 comparisons work between an application and shared
7120 library. */
7121 sym->st_shndx = SHN_UNDEF;
7122 }
7123 }
7124
7125 if (h->got.offset != (bfd_vma) - 1
7126 && elf64_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
7127 {
7128 Elf_Internal_Rela rela;
7129 bfd_byte *loc;
7130
7131 /* This symbol has an entry in the global offset table. Set it
7132 up. */
7133 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
7134 abort ();
7135
7136 rela.r_offset = (htab->root.sgot->output_section->vma
7137 + htab->root.sgot->output_offset
7138 + (h->got.offset & ~(bfd_vma) 1));
7139
7140 if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
7141 {
7142 if (!h->def_regular)
7143 return FALSE;
7144
7145 BFD_ASSERT ((h->got.offset & 1) != 0);
7146 rela.r_info = ELF64_R_INFO (0, R_AARCH64_RELATIVE);
7147 rela.r_addend = (h->root.u.def.value
7148 + h->root.u.def.section->output_section->vma
7149 + h->root.u.def.section->output_offset);
7150 }
7151 else
7152 {
7153 BFD_ASSERT ((h->got.offset & 1) == 0);
7154 bfd_put_64 (output_bfd, (bfd_vma) 0,
7155 htab->root.sgot->contents + h->got.offset);
7156 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_GLOB_DAT);
7157 rela.r_addend = 0;
7158 }
7159
7160 loc = htab->root.srelgot->contents;
7161 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
7162 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
7163 }
7164
7165 if (h->needs_copy)
7166 {
7167 Elf_Internal_Rela rela;
7168 bfd_byte *loc;
7169
7170 /* This symbol needs a copy reloc. Set it up. */
7171
7172 if (h->dynindx == -1
7173 || (h->root.type != bfd_link_hash_defined
7174 && h->root.type != bfd_link_hash_defweak)
7175 || htab->srelbss == NULL)
7176 abort ();
7177
7178 rela.r_offset = (h->root.u.def.value
7179 + h->root.u.def.section->output_section->vma
7180 + h->root.u.def.section->output_offset);
7181 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_COPY);
7182 rela.r_addend = 0;
7183 loc = htab->srelbss->contents;
7184 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
7185 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
7186 }
7187
7188 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
7189 be NULL for local symbols. */
7190 if (sym != NULL
9637f6ef 7191 && (h == elf_hash_table (info)->hdynamic
a06ea964
NC
7192 || h == elf_hash_table (info)->hgot))
7193 sym->st_shndx = SHN_ABS;
7194
7195 return TRUE;
7196}
7197
692e2b8b
WN
7198/* Finish up local dynamic symbol handling. We set the contents of
7199 various dynamic sections here. */
7200
7201static bfd_boolean
7202elf_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
7203{
7204 struct elf_link_hash_entry *h
7205 = (struct elf_link_hash_entry *) *slot;
7206 struct bfd_link_info *info
7207 = (struct bfd_link_info *) inf;
7208
7209 return elf64_aarch64_finish_dynamic_symbol (info->output_bfd,
7210 info, h, NULL);
7211}
7212
a06ea964
NC
7213static void
7214elf64_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
7215 struct elf64_aarch64_link_hash_table
7216 *htab)
7217{
7218 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
7219 small and large plts and at the minute just generates
7220 the small PLT. */
7221
7222 /* PLT0 of the small PLT looks like this -
7223 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
7224 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
7225 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
7226 // symbol resolver
7227 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
7228 // GOTPLT entry for this.
7229 br x17
7230 */
7231 bfd_vma plt_got_base;
7232 bfd_vma plt_base;
7233
7234
7235 memcpy (htab->root.splt->contents, elf64_aarch64_small_plt0_entry,
7236 PLT_ENTRY_SIZE);
7237 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
7238 PLT_ENTRY_SIZE;
7239
7240 plt_got_base = (htab->root.sgotplt->output_section->vma
7241 + htab->root.sgotplt->output_offset);
7242
7243 plt_base = htab->root.splt->output_section->vma +
7244 htab->root.splt->output_section->output_offset;
7245
7246 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7247 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7248 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
7249 htab->root.splt->contents + 4,
7250 PG (plt_got_base + 16) - PG (plt_base + 4));
7251
7252 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
7253 htab->root.splt->contents + 8,
7254 PG_OFFSET (plt_got_base + 16));
7255
7256 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
7257 htab->root.splt->contents + 12,
7258 PG_OFFSET (plt_got_base + 16));
7259}
7260
7261static bfd_boolean
7262elf64_aarch64_finish_dynamic_sections (bfd *output_bfd,
7263 struct bfd_link_info *info)
7264{
7265 struct elf64_aarch64_link_hash_table *htab;
7266 bfd *dynobj;
7267 asection *sdyn;
7268
7269 htab = elf64_aarch64_hash_table (info);
7270 dynobj = htab->root.dynobj;
7271 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
7272
7273 if (htab->root.dynamic_sections_created)
7274 {
7275 Elf64_External_Dyn *dyncon, *dynconend;
7276
7277 if (sdyn == NULL || htab->root.sgot == NULL)
7278 abort ();
7279
7280 dyncon = (Elf64_External_Dyn *) sdyn->contents;
7281 dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size);
7282 for (; dyncon < dynconend; dyncon++)
7283 {
7284 Elf_Internal_Dyn dyn;
7285 asection *s;
7286
7287 bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
7288
7289 switch (dyn.d_tag)
7290 {
7291 default:
7292 continue;
7293
7294 case DT_PLTGOT:
7295 s = htab->root.sgotplt;
7296 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
7297 break;
7298
7299 case DT_JMPREL:
7300 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
7301 break;
7302
7303 case DT_PLTRELSZ:
7304 s = htab->root.srelplt->output_section;
7305 dyn.d_un.d_val = s->size;
7306 break;
7307
7308 case DT_RELASZ:
7309 /* The procedure linkage table relocs (DT_JMPREL) should
7310 not be included in the overall relocs (DT_RELA).
7311 Therefore, we override the DT_RELASZ entry here to
7312 make it not include the JMPREL relocs. Since the
7313 linker script arranges for .rela.plt to follow all
7314 other relocation sections, we don't have to worry
7315 about changing the DT_RELA entry. */
7316 if (htab->root.srelplt != NULL)
7317 {
7318 s = htab->root.srelplt->output_section;
7319 dyn.d_un.d_val -= s->size;
7320 }
7321 break;
7322
7323 case DT_TLSDESC_PLT:
7324 s = htab->root.splt;
7325 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7326 + htab->tlsdesc_plt;
7327 break;
7328
7329 case DT_TLSDESC_GOT:
7330 s = htab->root.sgot;
7331 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7332 + htab->dt_tlsdesc_got;
7333 break;
7334 }
7335
7336 bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
7337 }
7338
7339 }
7340
7341 /* Fill in the special first entry in the procedure linkage table. */
7342 if (htab->root.splt && htab->root.splt->size > 0)
7343 {
7344 elf64_aarch64_init_small_plt0_entry (output_bfd, htab);
7345
7346 elf_section_data (htab->root.splt->output_section)->
7347 this_hdr.sh_entsize = htab->plt_entry_size;
7348
7349
7350 if (htab->tlsdesc_plt)
7351 {
7352 bfd_put_64 (output_bfd, (bfd_vma) 0,
7353 htab->root.sgot->contents + htab->dt_tlsdesc_got);
7354
7355 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
7356 elf64_aarch64_tlsdesc_small_plt_entry,
7357 sizeof (elf64_aarch64_tlsdesc_small_plt_entry));
7358
7359 {
7360 bfd_vma adrp1_addr =
7361 htab->root.splt->output_section->vma
7362 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
7363
7364 bfd_vma adrp2_addr =
7365 htab->root.splt->output_section->vma
7366 + htab->root.splt->output_offset + htab->tlsdesc_plt + 8;
7367
7368 bfd_vma got_addr =
7369 htab->root.sgot->output_section->vma
7370 + htab->root.sgot->output_offset;
7371
7372 bfd_vma pltgot_addr =
7373 htab->root.sgotplt->output_section->vma
7374 + htab->root.sgotplt->output_offset;
7375
7376 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
7377 bfd_vma opcode;
7378
7379 /* adrp x2, DT_TLSDESC_GOT */
7380 opcode = bfd_get_32 (output_bfd,
7381 htab->root.splt->contents
7382 + htab->tlsdesc_plt + 4);
7383 opcode = reencode_adr_imm
7384 (opcode, (PG (dt_tlsdesc_got) - PG (adrp1_addr)) >> 12);
7385 bfd_put_32 (output_bfd, opcode,
7386 htab->root.splt->contents + htab->tlsdesc_plt + 4);
7387
7388 /* adrp x3, 0 */
7389 opcode = bfd_get_32 (output_bfd,
7390 htab->root.splt->contents
7391 + htab->tlsdesc_plt + 8);
7392 opcode = reencode_adr_imm
7393 (opcode, (PG (pltgot_addr) - PG (adrp2_addr)) >> 12);
7394 bfd_put_32 (output_bfd, opcode,
7395 htab->root.splt->contents + htab->tlsdesc_plt + 8);
7396
7397 /* ldr x2, [x2, #0] */
7398 opcode = bfd_get_32 (output_bfd,
7399 htab->root.splt->contents
7400 + htab->tlsdesc_plt + 12);
7401 opcode = reencode_ldst_pos_imm (opcode,
7402 PG_OFFSET (dt_tlsdesc_got) >> 3);
7403 bfd_put_32 (output_bfd, opcode,
7404 htab->root.splt->contents + htab->tlsdesc_plt + 12);
7405
7406 /* add x3, x3, 0 */
7407 opcode = bfd_get_32 (output_bfd,
7408 htab->root.splt->contents
7409 + htab->tlsdesc_plt + 16);
7410 opcode = reencode_add_imm (opcode, PG_OFFSET (pltgot_addr));
7411 bfd_put_32 (output_bfd, opcode,
7412 htab->root.splt->contents + htab->tlsdesc_plt + 16);
7413 }
7414 }
7415 }
7416
7417 if (htab->root.sgotplt)
7418 {
7419 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
7420 {
7421 (*_bfd_error_handler)
7422 (_("discarded output section: `%A'"), htab->root.sgotplt);
7423 return FALSE;
7424 }
7425
7426 /* Fill in the first three entries in the global offset table. */
7427 if (htab->root.sgotplt->size > 0)
7428 {
7429 /* Set the first entry in the global offset table to the address of
7430 the dynamic section. */
7431 if (sdyn == NULL)
7432 bfd_put_64 (output_bfd, (bfd_vma) 0,
7433 htab->root.sgotplt->contents);
7434 else
7435 bfd_put_64 (output_bfd,
7436 sdyn->output_section->vma + sdyn->output_offset,
7437 htab->root.sgotplt->contents);
7438 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
7439 bfd_put_64 (output_bfd,
7440 (bfd_vma) 0,
7441 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
7442 bfd_put_64 (output_bfd,
7443 (bfd_vma) 0,
7444 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
7445 }
7446
7447 elf_section_data (htab->root.sgotplt->output_section)->
7448 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
7449 }
7450
7451 if (htab->root.sgot && htab->root.sgot->size > 0)
7452 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
7453 = GOT_ENTRY_SIZE;
7454
692e2b8b
WN
7455 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
7456 htab_traverse (htab->loc_hash_table,
7457 elf_aarch64_finish_local_dynamic_symbol,
7458 info);
7459
a06ea964
NC
7460 return TRUE;
7461}
7462
7463/* Return address for Ith PLT stub in section PLT, for relocation REL
7464 or (bfd_vma) -1 if it should not be included. */
7465
7466static bfd_vma
7467elf64_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
7468 const arelent *rel ATTRIBUTE_UNUSED)
7469{
7470 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
7471}
7472
692e2b8b
WN
7473/* Hook called by the linker routine which adds symbols from an object
7474 file. */
7475
7476static bfd_boolean
7477elf64_aarch64_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
7478 Elf_Internal_Sym *sym,
7479 const char **namep ATTRIBUTE_UNUSED,
7480 flagword *flagsp ATTRIBUTE_UNUSED,
7481 asection **secp ATTRIBUTE_UNUSED,
7482 bfd_vma *valp ATTRIBUTE_UNUSED)
7483{
7484 if ((abfd->flags & DYNAMIC) == 0
7485 && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
7486 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE))
7487 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
7488
7489 return TRUE;
7490}
a06ea964
NC
7491
7492/* We use this so we can override certain functions
7493 (though currently we don't). */
7494
7495const struct elf_size_info elf64_aarch64_size_info =
7496{
7497 sizeof (Elf64_External_Ehdr),
7498 sizeof (Elf64_External_Phdr),
7499 sizeof (Elf64_External_Shdr),
7500 sizeof (Elf64_External_Rel),
7501 sizeof (Elf64_External_Rela),
7502 sizeof (Elf64_External_Sym),
7503 sizeof (Elf64_External_Dyn),
7504 sizeof (Elf_External_Note),
7505 4, /* Hash table entry size. */
7506 1, /* Internal relocs per external relocs. */
7507 64, /* Arch size. */
7508 3, /* Log_file_align. */
7509 ELFCLASS64, EV_CURRENT,
7510 bfd_elf64_write_out_phdrs,
7511 bfd_elf64_write_shdrs_and_ehdr,
7512 bfd_elf64_checksum_contents,
7513 bfd_elf64_write_relocs,
7514 bfd_elf64_swap_symbol_in,
7515 bfd_elf64_swap_symbol_out,
7516 bfd_elf64_slurp_reloc_table,
7517 bfd_elf64_slurp_symbol_table,
7518 bfd_elf64_swap_dyn_in,
7519 bfd_elf64_swap_dyn_out,
7520 bfd_elf64_swap_reloc_in,
7521 bfd_elf64_swap_reloc_out,
7522 bfd_elf64_swap_reloca_in,
7523 bfd_elf64_swap_reloca_out
7524};
7525
7526#define ELF_ARCH bfd_arch_aarch64
7527#define ELF_MACHINE_CODE EM_AARCH64
7528#define ELF_MAXPAGESIZE 0x10000
7529#define ELF_MINPAGESIZE 0x1000
7530#define ELF_COMMONPAGESIZE 0x1000
7531
7532#define bfd_elf64_close_and_cleanup \
7533 elf64_aarch64_close_and_cleanup
7534
7535#define bfd_elf64_bfd_copy_private_bfd_data \
7536 elf64_aarch64_copy_private_bfd_data
7537
7538#define bfd_elf64_bfd_free_cached_info \
7539 elf64_aarch64_bfd_free_cached_info
7540
7541#define bfd_elf64_bfd_is_target_special_symbol \
7542 elf64_aarch64_is_target_special_symbol
7543
7544#define bfd_elf64_bfd_link_hash_table_create \
7545 elf64_aarch64_link_hash_table_create
7546
7547#define bfd_elf64_bfd_link_hash_table_free \
7548 elf64_aarch64_hash_table_free
7549
7550#define bfd_elf64_bfd_merge_private_bfd_data \
7551 elf64_aarch64_merge_private_bfd_data
7552
7553#define bfd_elf64_bfd_print_private_bfd_data \
7554 elf64_aarch64_print_private_bfd_data
7555
7556#define bfd_elf64_bfd_reloc_type_lookup \
7557 elf64_aarch64_reloc_type_lookup
7558
7559#define bfd_elf64_bfd_reloc_name_lookup \
7560 elf64_aarch64_reloc_name_lookup
7561
7562#define bfd_elf64_bfd_set_private_flags \
7563 elf64_aarch64_set_private_flags
7564
7565#define bfd_elf64_find_inliner_info \
7566 elf64_aarch64_find_inliner_info
7567
7568#define bfd_elf64_find_nearest_line \
7569 elf64_aarch64_find_nearest_line
7570
7571#define bfd_elf64_mkobject \
7572 elf64_aarch64_mkobject
7573
7574#define bfd_elf64_new_section_hook \
7575 elf64_aarch64_new_section_hook
7576
7577#define elf_backend_adjust_dynamic_symbol \
7578 elf64_aarch64_adjust_dynamic_symbol
7579
7580#define elf_backend_always_size_sections \
7581 elf64_aarch64_always_size_sections
7582
7583#define elf_backend_check_relocs \
7584 elf64_aarch64_check_relocs
7585
7586#define elf_backend_copy_indirect_symbol \
7587 elf64_aarch64_copy_indirect_symbol
7588
7589/* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
7590 to them in our hash. */
7591#define elf_backend_create_dynamic_sections \
7592 elf64_aarch64_create_dynamic_sections
7593
7594#define elf_backend_init_index_section \
7595 _bfd_elf_init_2_index_sections
7596
a06ea964
NC
7597#define elf_backend_finish_dynamic_sections \
7598 elf64_aarch64_finish_dynamic_sections
7599
7600#define elf_backend_finish_dynamic_symbol \
7601 elf64_aarch64_finish_dynamic_symbol
7602
7603#define elf_backend_gc_sweep_hook \
7604 elf64_aarch64_gc_sweep_hook
7605
7606#define elf_backend_object_p \
7607 elf64_aarch64_object_p
7608
7609#define elf_backend_output_arch_local_syms \
7610 elf64_aarch64_output_arch_local_syms
7611
7612#define elf_backend_plt_sym_val \
7613 elf64_aarch64_plt_sym_val
7614
7615#define elf_backend_post_process_headers \
7616 elf64_aarch64_post_process_headers
7617
7618#define elf_backend_relocate_section \
7619 elf64_aarch64_relocate_section
7620
7621#define elf_backend_reloc_type_class \
7622 elf64_aarch64_reloc_type_class
7623
7624#define elf_backend_section_flags \
7625 elf64_aarch64_section_flags
7626
7627#define elf_backend_section_from_shdr \
7628 elf64_aarch64_section_from_shdr
7629
7630#define elf_backend_size_dynamic_sections \
7631 elf64_aarch64_size_dynamic_sections
7632
7633#define elf_backend_size_info \
7634 elf64_aarch64_size_info
7635
692e2b8b
WN
7636#define elf_backend_add_symbol_hook \
7637 elf64_aarch64_add_symbol_hook
7638
a06ea964 7639#define elf_backend_can_refcount 1
59c108f7 7640#define elf_backend_can_gc_sections 1
a06ea964
NC
7641#define elf_backend_plt_readonly 1
7642#define elf_backend_want_got_plt 1
7643#define elf_backend_want_plt_sym 0
7644#define elf_backend_may_use_rel_p 0
7645#define elf_backend_may_use_rela_p 1
7646#define elf_backend_default_use_rela_p 1
7647#define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
c495064d 7648#define elf_backend_default_execstack 0
a06ea964
NC
7649
7650#undef elf_backend_obj_attrs_section
7651#define elf_backend_obj_attrs_section ".ARM.attributes"
7652
7653#include "elf64-target.h"