]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elfnn-aarch64.c
[AArch64][5/8] GAS support BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC
[thirdparty/binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "bfd_stdint.h"
143 #include "elf-bfd.h"
144 #include "bfdlink.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #endif
158
159 #if ARCH_SIZE == 32
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
165 #endif
166
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
189 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
190 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
191
192 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
193 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
205
206 #define ELIMINATE_COPY_RELOCS 0
207
208 /* Return size of a relocation entry. HTAB is the bfd's
209 elf_aarch64_link_hash_entry. */
210 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
211
212 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
213 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
214 #define PLT_ENTRY_SIZE (32)
215 #define PLT_SMALL_ENTRY_SIZE (16)
216 #define PLT_TLSDESC_ENTRY_SIZE (32)
217
218 /* Encoding of the nop instruction */
219 #define INSN_NOP 0xd503201f
220
221 #define aarch64_compute_jump_table_size(htab) \
222 (((htab)->root.srelplt == NULL) ? 0 \
223 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
224
225 /* The first entry in a procedure linkage table looks like this
226 if the distance between the PLTGOT and the PLT is < 4GB use
227 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
228 in x16 and needs to work out PLTGOT[1] by using an address of
229 [x16,#-GOT_ENTRY_SIZE]. */
230 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
231 {
232 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
233 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
234 #if ARCH_SIZE == 64
235 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
236 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
237 #else
238 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
239 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
240 #endif
241 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
242 0x1f, 0x20, 0x03, 0xd5, /* nop */
243 0x1f, 0x20, 0x03, 0xd5, /* nop */
244 0x1f, 0x20, 0x03, 0xd5, /* nop */
245 };
246
247 /* Per function entry in a procedure linkage table looks like this
248 if the distance between the PLTGOT and the PLT is < 4GB use
249 these PLT entries. */
250 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
251 {
252 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
253 #if ARCH_SIZE == 64
254 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
255 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
256 #else
257 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
258 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
259 #endif
260 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
261 };
262
263 static const bfd_byte
264 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
265 {
266 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
267 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
268 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
269 #if ARCH_SIZE == 64
270 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
271 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
272 #else
273 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
274 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
275 #endif
276 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
277 0x1f, 0x20, 0x03, 0xd5, /* nop */
278 0x1f, 0x20, 0x03, 0xd5, /* nop */
279 };
280
281 #define elf_info_to_howto elfNN_aarch64_info_to_howto
282 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
283
284 #define AARCH64_ELF_ABI_VERSION 0
285
286 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
287 #define ALL_ONES (~ (bfd_vma) 0)
288
289 /* Indexed by the bfd interal reloc enumerators.
290 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
291 in reloc.c. */
292
293 static reloc_howto_type elfNN_aarch64_howto_table[] =
294 {
295 EMPTY_HOWTO (0),
296
297 /* Basic data relocations. */
298
299 #if ARCH_SIZE == 64
300 HOWTO (R_AARCH64_NULL, /* type */
301 0, /* rightshift */
302 3, /* size (0 = byte, 1 = short, 2 = long) */
303 0, /* bitsize */
304 FALSE, /* pc_relative */
305 0, /* bitpos */
306 complain_overflow_dont, /* complain_on_overflow */
307 bfd_elf_generic_reloc, /* special_function */
308 "R_AARCH64_NULL", /* name */
309 FALSE, /* partial_inplace */
310 0, /* src_mask */
311 0, /* dst_mask */
312 FALSE), /* pcrel_offset */
313 #else
314 HOWTO (R_AARCH64_NONE, /* type */
315 0, /* rightshift */
316 3, /* size (0 = byte, 1 = short, 2 = long) */
317 0, /* bitsize */
318 FALSE, /* pc_relative */
319 0, /* bitpos */
320 complain_overflow_dont, /* complain_on_overflow */
321 bfd_elf_generic_reloc, /* special_function */
322 "R_AARCH64_NONE", /* name */
323 FALSE, /* partial_inplace */
324 0, /* src_mask */
325 0, /* dst_mask */
326 FALSE), /* pcrel_offset */
327 #endif
328
329 /* .xword: (S+A) */
330 HOWTO64 (AARCH64_R (ABS64), /* type */
331 0, /* rightshift */
332 4, /* size (4 = long long) */
333 64, /* bitsize */
334 FALSE, /* pc_relative */
335 0, /* bitpos */
336 complain_overflow_unsigned, /* complain_on_overflow */
337 bfd_elf_generic_reloc, /* special_function */
338 AARCH64_R_STR (ABS64), /* name */
339 FALSE, /* partial_inplace */
340 ALL_ONES, /* src_mask */
341 ALL_ONES, /* dst_mask */
342 FALSE), /* pcrel_offset */
343
344 /* .word: (S+A) */
345 HOWTO (AARCH64_R (ABS32), /* type */
346 0, /* rightshift */
347 2, /* size (0 = byte, 1 = short, 2 = long) */
348 32, /* bitsize */
349 FALSE, /* pc_relative */
350 0, /* bitpos */
351 complain_overflow_unsigned, /* complain_on_overflow */
352 bfd_elf_generic_reloc, /* special_function */
353 AARCH64_R_STR (ABS32), /* name */
354 FALSE, /* partial_inplace */
355 0xffffffff, /* src_mask */
356 0xffffffff, /* dst_mask */
357 FALSE), /* pcrel_offset */
358
359 /* .half: (S+A) */
360 HOWTO (AARCH64_R (ABS16), /* type */
361 0, /* rightshift */
362 1, /* size (0 = byte, 1 = short, 2 = long) */
363 16, /* bitsize */
364 FALSE, /* pc_relative */
365 0, /* bitpos */
366 complain_overflow_unsigned, /* complain_on_overflow */
367 bfd_elf_generic_reloc, /* special_function */
368 AARCH64_R_STR (ABS16), /* name */
369 FALSE, /* partial_inplace */
370 0xffff, /* src_mask */
371 0xffff, /* dst_mask */
372 FALSE), /* pcrel_offset */
373
374 /* .xword: (S+A-P) */
375 HOWTO64 (AARCH64_R (PREL64), /* type */
376 0, /* rightshift */
377 4, /* size (4 = long long) */
378 64, /* bitsize */
379 TRUE, /* pc_relative */
380 0, /* bitpos */
381 complain_overflow_signed, /* complain_on_overflow */
382 bfd_elf_generic_reloc, /* special_function */
383 AARCH64_R_STR (PREL64), /* name */
384 FALSE, /* partial_inplace */
385 ALL_ONES, /* src_mask */
386 ALL_ONES, /* dst_mask */
387 TRUE), /* pcrel_offset */
388
389 /* .word: (S+A-P) */
390 HOWTO (AARCH64_R (PREL32), /* type */
391 0, /* rightshift */
392 2, /* size (0 = byte, 1 = short, 2 = long) */
393 32, /* bitsize */
394 TRUE, /* pc_relative */
395 0, /* bitpos */
396 complain_overflow_signed, /* complain_on_overflow */
397 bfd_elf_generic_reloc, /* special_function */
398 AARCH64_R_STR (PREL32), /* name */
399 FALSE, /* partial_inplace */
400 0xffffffff, /* src_mask */
401 0xffffffff, /* dst_mask */
402 TRUE), /* pcrel_offset */
403
404 /* .half: (S+A-P) */
405 HOWTO (AARCH64_R (PREL16), /* type */
406 0, /* rightshift */
407 1, /* size (0 = byte, 1 = short, 2 = long) */
408 16, /* bitsize */
409 TRUE, /* pc_relative */
410 0, /* bitpos */
411 complain_overflow_signed, /* complain_on_overflow */
412 bfd_elf_generic_reloc, /* special_function */
413 AARCH64_R_STR (PREL16), /* name */
414 FALSE, /* partial_inplace */
415 0xffff, /* src_mask */
416 0xffff, /* dst_mask */
417 TRUE), /* pcrel_offset */
418
419 /* Group relocations to create a 16, 32, 48 or 64 bit
420 unsigned data or abs address inline. */
421
422 /* MOVZ: ((S+A) >> 0) & 0xffff */
423 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
424 0, /* rightshift */
425 2, /* size (0 = byte, 1 = short, 2 = long) */
426 16, /* bitsize */
427 FALSE, /* pc_relative */
428 0, /* bitpos */
429 complain_overflow_unsigned, /* complain_on_overflow */
430 bfd_elf_generic_reloc, /* special_function */
431 AARCH64_R_STR (MOVW_UABS_G0), /* name */
432 FALSE, /* partial_inplace */
433 0xffff, /* src_mask */
434 0xffff, /* dst_mask */
435 FALSE), /* pcrel_offset */
436
437 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
438 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
439 0, /* rightshift */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
441 16, /* bitsize */
442 FALSE, /* pc_relative */
443 0, /* bitpos */
444 complain_overflow_dont, /* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
447 FALSE, /* partial_inplace */
448 0xffff, /* src_mask */
449 0xffff, /* dst_mask */
450 FALSE), /* pcrel_offset */
451
452 /* MOVZ: ((S+A) >> 16) & 0xffff */
453 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
454 16, /* rightshift */
455 2, /* size (0 = byte, 1 = short, 2 = long) */
456 16, /* bitsize */
457 FALSE, /* pc_relative */
458 0, /* bitpos */
459 complain_overflow_unsigned, /* complain_on_overflow */
460 bfd_elf_generic_reloc, /* special_function */
461 AARCH64_R_STR (MOVW_UABS_G1), /* name */
462 FALSE, /* partial_inplace */
463 0xffff, /* src_mask */
464 0xffff, /* dst_mask */
465 FALSE), /* pcrel_offset */
466
467 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
468 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
469 16, /* rightshift */
470 2, /* size (0 = byte, 1 = short, 2 = long) */
471 16, /* bitsize */
472 FALSE, /* pc_relative */
473 0, /* bitpos */
474 complain_overflow_dont, /* complain_on_overflow */
475 bfd_elf_generic_reloc, /* special_function */
476 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
477 FALSE, /* partial_inplace */
478 0xffff, /* src_mask */
479 0xffff, /* dst_mask */
480 FALSE), /* pcrel_offset */
481
482 /* MOVZ: ((S+A) >> 32) & 0xffff */
483 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
484 32, /* rightshift */
485 2, /* size (0 = byte, 1 = short, 2 = long) */
486 16, /* bitsize */
487 FALSE, /* pc_relative */
488 0, /* bitpos */
489 complain_overflow_unsigned, /* complain_on_overflow */
490 bfd_elf_generic_reloc, /* special_function */
491 AARCH64_R_STR (MOVW_UABS_G2), /* name */
492 FALSE, /* partial_inplace */
493 0xffff, /* src_mask */
494 0xffff, /* dst_mask */
495 FALSE), /* pcrel_offset */
496
497 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
498 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
499 32, /* rightshift */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
501 16, /* bitsize */
502 FALSE, /* pc_relative */
503 0, /* bitpos */
504 complain_overflow_dont, /* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
507 FALSE, /* partial_inplace */
508 0xffff, /* src_mask */
509 0xffff, /* dst_mask */
510 FALSE), /* pcrel_offset */
511
512 /* MOVZ: ((S+A) >> 48) & 0xffff */
513 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
514 48, /* rightshift */
515 2, /* size (0 = byte, 1 = short, 2 = long) */
516 16, /* bitsize */
517 FALSE, /* pc_relative */
518 0, /* bitpos */
519 complain_overflow_unsigned, /* complain_on_overflow */
520 bfd_elf_generic_reloc, /* special_function */
521 AARCH64_R_STR (MOVW_UABS_G3), /* name */
522 FALSE, /* partial_inplace */
523 0xffff, /* src_mask */
524 0xffff, /* dst_mask */
525 FALSE), /* pcrel_offset */
526
527 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
528 signed data or abs address inline. Will change instruction
529 to MOVN or MOVZ depending on sign of calculated value. */
530
531 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
532 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
533 0, /* rightshift */
534 2, /* size (0 = byte, 1 = short, 2 = long) */
535 16, /* bitsize */
536 FALSE, /* pc_relative */
537 0, /* bitpos */
538 complain_overflow_signed, /* complain_on_overflow */
539 bfd_elf_generic_reloc, /* special_function */
540 AARCH64_R_STR (MOVW_SABS_G0), /* name */
541 FALSE, /* partial_inplace */
542 0xffff, /* src_mask */
543 0xffff, /* dst_mask */
544 FALSE), /* pcrel_offset */
545
546 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
547 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
548 16, /* rightshift */
549 2, /* size (0 = byte, 1 = short, 2 = long) */
550 16, /* bitsize */
551 FALSE, /* pc_relative */
552 0, /* bitpos */
553 complain_overflow_signed, /* complain_on_overflow */
554 bfd_elf_generic_reloc, /* special_function */
555 AARCH64_R_STR (MOVW_SABS_G1), /* name */
556 FALSE, /* partial_inplace */
557 0xffff, /* src_mask */
558 0xffff, /* dst_mask */
559 FALSE), /* pcrel_offset */
560
561 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
562 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
563 32, /* rightshift */
564 2, /* size (0 = byte, 1 = short, 2 = long) */
565 16, /* bitsize */
566 FALSE, /* pc_relative */
567 0, /* bitpos */
568 complain_overflow_signed, /* complain_on_overflow */
569 bfd_elf_generic_reloc, /* special_function */
570 AARCH64_R_STR (MOVW_SABS_G2), /* name */
571 FALSE, /* partial_inplace */
572 0xffff, /* src_mask */
573 0xffff, /* dst_mask */
574 FALSE), /* pcrel_offset */
575
576 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
577 addresses: PG(x) is (x & ~0xfff). */
578
579 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
580 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
581 2, /* rightshift */
582 2, /* size (0 = byte, 1 = short, 2 = long) */
583 19, /* bitsize */
584 TRUE, /* pc_relative */
585 0, /* bitpos */
586 complain_overflow_signed, /* complain_on_overflow */
587 bfd_elf_generic_reloc, /* special_function */
588 AARCH64_R_STR (LD_PREL_LO19), /* name */
589 FALSE, /* partial_inplace */
590 0x7ffff, /* src_mask */
591 0x7ffff, /* dst_mask */
592 TRUE), /* pcrel_offset */
593
594 /* ADR: (S+A-P) & 0x1fffff */
595 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
596 0, /* rightshift */
597 2, /* size (0 = byte, 1 = short, 2 = long) */
598 21, /* bitsize */
599 TRUE, /* pc_relative */
600 0, /* bitpos */
601 complain_overflow_signed, /* complain_on_overflow */
602 bfd_elf_generic_reloc, /* special_function */
603 AARCH64_R_STR (ADR_PREL_LO21), /* name */
604 FALSE, /* partial_inplace */
605 0x1fffff, /* src_mask */
606 0x1fffff, /* dst_mask */
607 TRUE), /* pcrel_offset */
608
609 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
610 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
611 12, /* rightshift */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
613 21, /* bitsize */
614 TRUE, /* pc_relative */
615 0, /* bitpos */
616 complain_overflow_signed, /* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
619 FALSE, /* partial_inplace */
620 0x1fffff, /* src_mask */
621 0x1fffff, /* dst_mask */
622 TRUE), /* pcrel_offset */
623
624 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
625 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
626 12, /* rightshift */
627 2, /* size (0 = byte, 1 = short, 2 = long) */
628 21, /* bitsize */
629 TRUE, /* pc_relative */
630 0, /* bitpos */
631 complain_overflow_dont, /* complain_on_overflow */
632 bfd_elf_generic_reloc, /* special_function */
633 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
634 FALSE, /* partial_inplace */
635 0x1fffff, /* src_mask */
636 0x1fffff, /* dst_mask */
637 TRUE), /* pcrel_offset */
638
639 /* ADD: (S+A) & 0xfff [no overflow check] */
640 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
641 0, /* rightshift */
642 2, /* size (0 = byte, 1 = short, 2 = long) */
643 12, /* bitsize */
644 FALSE, /* pc_relative */
645 10, /* bitpos */
646 complain_overflow_dont, /* complain_on_overflow */
647 bfd_elf_generic_reloc, /* special_function */
648 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
649 FALSE, /* partial_inplace */
650 0x3ffc00, /* src_mask */
651 0x3ffc00, /* dst_mask */
652 FALSE), /* pcrel_offset */
653
654 /* LD/ST8: (S+A) & 0xfff */
655 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
656 0, /* rightshift */
657 2, /* size (0 = byte, 1 = short, 2 = long) */
658 12, /* bitsize */
659 FALSE, /* pc_relative */
660 0, /* bitpos */
661 complain_overflow_dont, /* complain_on_overflow */
662 bfd_elf_generic_reloc, /* special_function */
663 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
664 FALSE, /* partial_inplace */
665 0xfff, /* src_mask */
666 0xfff, /* dst_mask */
667 FALSE), /* pcrel_offset */
668
669 /* Relocations for control-flow instructions. */
670
671 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
672 HOWTO (AARCH64_R (TSTBR14), /* type */
673 2, /* rightshift */
674 2, /* size (0 = byte, 1 = short, 2 = long) */
675 14, /* bitsize */
676 TRUE, /* pc_relative */
677 0, /* bitpos */
678 complain_overflow_signed, /* complain_on_overflow */
679 bfd_elf_generic_reloc, /* special_function */
680 AARCH64_R_STR (TSTBR14), /* name */
681 FALSE, /* partial_inplace */
682 0x3fff, /* src_mask */
683 0x3fff, /* dst_mask */
684 TRUE), /* pcrel_offset */
685
686 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
687 HOWTO (AARCH64_R (CONDBR19), /* type */
688 2, /* rightshift */
689 2, /* size (0 = byte, 1 = short, 2 = long) */
690 19, /* bitsize */
691 TRUE, /* pc_relative */
692 0, /* bitpos */
693 complain_overflow_signed, /* complain_on_overflow */
694 bfd_elf_generic_reloc, /* special_function */
695 AARCH64_R_STR (CONDBR19), /* name */
696 FALSE, /* partial_inplace */
697 0x7ffff, /* src_mask */
698 0x7ffff, /* dst_mask */
699 TRUE), /* pcrel_offset */
700
701 /* B: ((S+A-P) >> 2) & 0x3ffffff */
702 HOWTO (AARCH64_R (JUMP26), /* type */
703 2, /* rightshift */
704 2, /* size (0 = byte, 1 = short, 2 = long) */
705 26, /* bitsize */
706 TRUE, /* pc_relative */
707 0, /* bitpos */
708 complain_overflow_signed, /* complain_on_overflow */
709 bfd_elf_generic_reloc, /* special_function */
710 AARCH64_R_STR (JUMP26), /* name */
711 FALSE, /* partial_inplace */
712 0x3ffffff, /* src_mask */
713 0x3ffffff, /* dst_mask */
714 TRUE), /* pcrel_offset */
715
716 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
717 HOWTO (AARCH64_R (CALL26), /* type */
718 2, /* rightshift */
719 2, /* size (0 = byte, 1 = short, 2 = long) */
720 26, /* bitsize */
721 TRUE, /* pc_relative */
722 0, /* bitpos */
723 complain_overflow_signed, /* complain_on_overflow */
724 bfd_elf_generic_reloc, /* special_function */
725 AARCH64_R_STR (CALL26), /* name */
726 FALSE, /* partial_inplace */
727 0x3ffffff, /* src_mask */
728 0x3ffffff, /* dst_mask */
729 TRUE), /* pcrel_offset */
730
731 /* LD/ST16: (S+A) & 0xffe */
732 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
733 1, /* rightshift */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
735 12, /* bitsize */
736 FALSE, /* pc_relative */
737 0, /* bitpos */
738 complain_overflow_dont, /* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
741 FALSE, /* partial_inplace */
742 0xffe, /* src_mask */
743 0xffe, /* dst_mask */
744 FALSE), /* pcrel_offset */
745
746 /* LD/ST32: (S+A) & 0xffc */
747 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
748 2, /* rightshift */
749 2, /* size (0 = byte, 1 = short, 2 = long) */
750 12, /* bitsize */
751 FALSE, /* pc_relative */
752 0, /* bitpos */
753 complain_overflow_dont, /* complain_on_overflow */
754 bfd_elf_generic_reloc, /* special_function */
755 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
756 FALSE, /* partial_inplace */
757 0xffc, /* src_mask */
758 0xffc, /* dst_mask */
759 FALSE), /* pcrel_offset */
760
761 /* LD/ST64: (S+A) & 0xff8 */
762 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
763 3, /* rightshift */
764 2, /* size (0 = byte, 1 = short, 2 = long) */
765 12, /* bitsize */
766 FALSE, /* pc_relative */
767 0, /* bitpos */
768 complain_overflow_dont, /* complain_on_overflow */
769 bfd_elf_generic_reloc, /* special_function */
770 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
771 FALSE, /* partial_inplace */
772 0xff8, /* src_mask */
773 0xff8, /* dst_mask */
774 FALSE), /* pcrel_offset */
775
776 /* LD/ST128: (S+A) & 0xff0 */
777 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
778 4, /* rightshift */
779 2, /* size (0 = byte, 1 = short, 2 = long) */
780 12, /* bitsize */
781 FALSE, /* pc_relative */
782 0, /* bitpos */
783 complain_overflow_dont, /* complain_on_overflow */
784 bfd_elf_generic_reloc, /* special_function */
785 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
786 FALSE, /* partial_inplace */
787 0xff0, /* src_mask */
788 0xff0, /* dst_mask */
789 FALSE), /* pcrel_offset */
790
791 /* Set a load-literal immediate field to bits
792 0x1FFFFC of G(S)-P */
793 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
794 2, /* rightshift */
795 2, /* size (0 = byte,1 = short,2 = long) */
796 19, /* bitsize */
797 TRUE, /* pc_relative */
798 0, /* bitpos */
799 complain_overflow_signed, /* complain_on_overflow */
800 bfd_elf_generic_reloc, /* special_function */
801 AARCH64_R_STR (GOT_LD_PREL19), /* name */
802 FALSE, /* partial_inplace */
803 0xffffe0, /* src_mask */
804 0xffffe0, /* dst_mask */
805 TRUE), /* pcrel_offset */
806
807 /* Get to the page for the GOT entry for the symbol
808 (G(S) - P) using an ADRP instruction. */
809 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
810 12, /* rightshift */
811 2, /* size (0 = byte, 1 = short, 2 = long) */
812 21, /* bitsize */
813 TRUE, /* pc_relative */
814 0, /* bitpos */
815 complain_overflow_dont, /* complain_on_overflow */
816 bfd_elf_generic_reloc, /* special_function */
817 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
818 FALSE, /* partial_inplace */
819 0x1fffff, /* src_mask */
820 0x1fffff, /* dst_mask */
821 TRUE), /* pcrel_offset */
822
823 /* LD64: GOT offset G(S) & 0xff8 */
824 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
825 3, /* rightshift */
826 2, /* size (0 = byte, 1 = short, 2 = long) */
827 12, /* bitsize */
828 FALSE, /* pc_relative */
829 0, /* bitpos */
830 complain_overflow_dont, /* complain_on_overflow */
831 bfd_elf_generic_reloc, /* special_function */
832 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
833 FALSE, /* partial_inplace */
834 0xff8, /* src_mask */
835 0xff8, /* dst_mask */
836 FALSE), /* pcrel_offset */
837
838 /* LD32: GOT offset G(S) & 0xffc */
839 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
840 2, /* rightshift */
841 2, /* size (0 = byte, 1 = short, 2 = long) */
842 12, /* bitsize */
843 FALSE, /* pc_relative */
844 0, /* bitpos */
845 complain_overflow_dont, /* complain_on_overflow */
846 bfd_elf_generic_reloc, /* special_function */
847 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
848 FALSE, /* partial_inplace */
849 0xffc, /* src_mask */
850 0xffc, /* dst_mask */
851 FALSE), /* pcrel_offset */
852
853 /* LD64: GOT offset for the symbol. */
854 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
855 3, /* rightshift */
856 2, /* size (0 = byte, 1 = short, 2 = long) */
857 12, /* bitsize */
858 FALSE, /* pc_relative */
859 0, /* bitpos */
860 complain_overflow_unsigned, /* complain_on_overflow */
861 bfd_elf_generic_reloc, /* special_function */
862 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
863 FALSE, /* partial_inplace */
864 0x7ff8, /* src_mask */
865 0x7ff8, /* dst_mask */
866 FALSE), /* pcrel_offset */
867
868 /* LD32: GOT offset to the page address of GOT table.
869 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
870 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
871 2, /* rightshift */
872 2, /* size (0 = byte, 1 = short, 2 = long) */
873 12, /* bitsize */
874 FALSE, /* pc_relative */
875 0, /* bitpos */
876 complain_overflow_unsigned, /* complain_on_overflow */
877 bfd_elf_generic_reloc, /* special_function */
878 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
879 FALSE, /* partial_inplace */
880 0x5ffc, /* src_mask */
881 0x5ffc, /* dst_mask */
882 FALSE), /* pcrel_offset */
883
884 /* LD64: GOT offset to the page address of GOT table.
885 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
886 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
887 3, /* rightshift */
888 2, /* size (0 = byte, 1 = short, 2 = long) */
889 12, /* bitsize */
890 FALSE, /* pc_relative */
891 0, /* bitpos */
892 complain_overflow_unsigned, /* complain_on_overflow */
893 bfd_elf_generic_reloc, /* special_function */
894 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
895 FALSE, /* partial_inplace */
896 0x7ff8, /* src_mask */
897 0x7ff8, /* dst_mask */
898 FALSE), /* pcrel_offset */
899
900 /* Get to the page for the GOT entry for the symbol
901 (G(S) - P) using an ADRP instruction. */
902 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
903 12, /* rightshift */
904 2, /* size (0 = byte, 1 = short, 2 = long) */
905 21, /* bitsize */
906 TRUE, /* pc_relative */
907 0, /* bitpos */
908 complain_overflow_dont, /* complain_on_overflow */
909 bfd_elf_generic_reloc, /* special_function */
910 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
911 FALSE, /* partial_inplace */
912 0x1fffff, /* src_mask */
913 0x1fffff, /* dst_mask */
914 TRUE), /* pcrel_offset */
915
916 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
917 0, /* rightshift */
918 2, /* size (0 = byte, 1 = short, 2 = long) */
919 21, /* bitsize */
920 TRUE, /* pc_relative */
921 0, /* bitpos */
922 complain_overflow_dont, /* complain_on_overflow */
923 bfd_elf_generic_reloc, /* special_function */
924 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
925 FALSE, /* partial_inplace */
926 0x1fffff, /* src_mask */
927 0x1fffff, /* dst_mask */
928 TRUE), /* pcrel_offset */
929
930 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
931 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
932 0, /* rightshift */
933 2, /* size (0 = byte, 1 = short, 2 = long) */
934 12, /* bitsize */
935 FALSE, /* pc_relative */
936 0, /* bitpos */
937 complain_overflow_dont, /* complain_on_overflow */
938 bfd_elf_generic_reloc, /* special_function */
939 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
940 FALSE, /* partial_inplace */
941 0xfff, /* src_mask */
942 0xfff, /* dst_mask */
943 FALSE), /* pcrel_offset */
944
945 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
946 16, /* rightshift */
947 2, /* size (0 = byte, 1 = short, 2 = long) */
948 16, /* bitsize */
949 FALSE, /* pc_relative */
950 0, /* bitpos */
951 complain_overflow_dont, /* complain_on_overflow */
952 bfd_elf_generic_reloc, /* special_function */
953 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
954 FALSE, /* partial_inplace */
955 0xffff, /* src_mask */
956 0xffff, /* dst_mask */
957 FALSE), /* pcrel_offset */
958
959 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
960 0, /* rightshift */
961 2, /* size (0 = byte, 1 = short, 2 = long) */
962 16, /* bitsize */
963 FALSE, /* pc_relative */
964 0, /* bitpos */
965 complain_overflow_dont, /* complain_on_overflow */
966 bfd_elf_generic_reloc, /* special_function */
967 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
968 FALSE, /* partial_inplace */
969 0xffff, /* src_mask */
970 0xffff, /* dst_mask */
971 FALSE), /* pcrel_offset */
972
973 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
974 12, /* rightshift */
975 2, /* size (0 = byte, 1 = short, 2 = long) */
976 21, /* bitsize */
977 FALSE, /* pc_relative */
978 0, /* bitpos */
979 complain_overflow_dont, /* complain_on_overflow */
980 bfd_elf_generic_reloc, /* special_function */
981 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
982 FALSE, /* partial_inplace */
983 0x1fffff, /* src_mask */
984 0x1fffff, /* dst_mask */
985 FALSE), /* pcrel_offset */
986
987 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
988 3, /* rightshift */
989 2, /* size (0 = byte, 1 = short, 2 = long) */
990 12, /* bitsize */
991 FALSE, /* pc_relative */
992 0, /* bitpos */
993 complain_overflow_dont, /* complain_on_overflow */
994 bfd_elf_generic_reloc, /* special_function */
995 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
996 FALSE, /* partial_inplace */
997 0xff8, /* src_mask */
998 0xff8, /* dst_mask */
999 FALSE), /* pcrel_offset */
1000
1001 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1002 2, /* rightshift */
1003 2, /* size (0 = byte, 1 = short, 2 = long) */
1004 12, /* bitsize */
1005 FALSE, /* pc_relative */
1006 0, /* bitpos */
1007 complain_overflow_dont, /* complain_on_overflow */
1008 bfd_elf_generic_reloc, /* special_function */
1009 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1010 FALSE, /* partial_inplace */
1011 0xffc, /* src_mask */
1012 0xffc, /* dst_mask */
1013 FALSE), /* pcrel_offset */
1014
1015 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1016 2, /* rightshift */
1017 2, /* size (0 = byte, 1 = short, 2 = long) */
1018 19, /* bitsize */
1019 FALSE, /* pc_relative */
1020 0, /* bitpos */
1021 complain_overflow_dont, /* complain_on_overflow */
1022 bfd_elf_generic_reloc, /* special_function */
1023 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1024 FALSE, /* partial_inplace */
1025 0x1ffffc, /* src_mask */
1026 0x1ffffc, /* dst_mask */
1027 FALSE), /* pcrel_offset */
1028
1029 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1030 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */
1031 0, /* rightshift */
1032 2, /* size (0 = byte, 1 = short, 2 = long) */
1033 12, /* bitsize */
1034 FALSE, /* pc_relative */
1035 0, /* bitpos */
1036 complain_overflow_dont, /* complain_on_overflow */
1037 bfd_elf_generic_reloc, /* special_function */
1038 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */
1039 FALSE, /* partial_inplace */
1040 0xfff, /* src_mask */
1041 0xfff, /* dst_mask */
1042 FALSE), /* pcrel_offset */
1043
1044 /* Get to the page for the GOT entry for the symbol
1045 (G(S) - P) using an ADRP instruction. */
1046 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
1047 12, /* rightshift */
1048 2, /* size (0 = byte, 1 = short, 2 = long) */
1049 21, /* bitsize */
1050 TRUE, /* pc_relative */
1051 0, /* bitpos */
1052 complain_overflow_signed, /* complain_on_overflow */
1053 bfd_elf_generic_reloc, /* special_function */
1054 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
1055 FALSE, /* partial_inplace */
1056 0x1fffff, /* src_mask */
1057 0x1fffff, /* dst_mask */
1058 TRUE), /* pcrel_offset */
1059
1060 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1061 0, /* rightshift */
1062 2, /* size (0 = byte, 1 = short, 2 = long) */
1063 21, /* bitsize */
1064 TRUE, /* pc_relative */
1065 0, /* bitpos */
1066 complain_overflow_signed, /* complain_on_overflow */
1067 bfd_elf_generic_reloc, /* special_function */
1068 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1069 FALSE, /* partial_inplace */
1070 0x1fffff, /* src_mask */
1071 0x1fffff, /* dst_mask */
1072 TRUE), /* pcrel_offset */
1073
1074 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1075 32, /* rightshift */
1076 2, /* size (0 = byte, 1 = short, 2 = long) */
1077 16, /* bitsize */
1078 FALSE, /* pc_relative */
1079 0, /* bitpos */
1080 complain_overflow_unsigned, /* complain_on_overflow */
1081 bfd_elf_generic_reloc, /* special_function */
1082 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1083 FALSE, /* partial_inplace */
1084 0xffff, /* src_mask */
1085 0xffff, /* dst_mask */
1086 FALSE), /* pcrel_offset */
1087
1088 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1089 16, /* rightshift */
1090 2, /* size (0 = byte, 1 = short, 2 = long) */
1091 16, /* bitsize */
1092 FALSE, /* pc_relative */
1093 0, /* bitpos */
1094 complain_overflow_dont, /* complain_on_overflow */
1095 bfd_elf_generic_reloc, /* special_function */
1096 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1097 FALSE, /* partial_inplace */
1098 0xffff, /* src_mask */
1099 0xffff, /* dst_mask */
1100 FALSE), /* pcrel_offset */
1101
1102 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1103 16, /* rightshift */
1104 2, /* size (0 = byte, 1 = short, 2 = long) */
1105 16, /* bitsize */
1106 FALSE, /* pc_relative */
1107 0, /* bitpos */
1108 complain_overflow_dont, /* complain_on_overflow */
1109 bfd_elf_generic_reloc, /* special_function */
1110 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1111 FALSE, /* partial_inplace */
1112 0xffff, /* src_mask */
1113 0xffff, /* dst_mask */
1114 FALSE), /* pcrel_offset */
1115
1116 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1117 0, /* rightshift */
1118 2, /* size (0 = byte, 1 = short, 2 = long) */
1119 16, /* bitsize */
1120 FALSE, /* pc_relative */
1121 0, /* bitpos */
1122 complain_overflow_dont, /* complain_on_overflow */
1123 bfd_elf_generic_reloc, /* special_function */
1124 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1125 FALSE, /* partial_inplace */
1126 0xffff, /* src_mask */
1127 0xffff, /* dst_mask */
1128 FALSE), /* pcrel_offset */
1129
1130 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1131 0, /* rightshift */
1132 2, /* size (0 = byte, 1 = short, 2 = long) */
1133 16, /* bitsize */
1134 FALSE, /* pc_relative */
1135 0, /* bitpos */
1136 complain_overflow_dont, /* complain_on_overflow */
1137 bfd_elf_generic_reloc, /* special_function */
1138 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1139 FALSE, /* partial_inplace */
1140 0xffff, /* src_mask */
1141 0xffff, /* dst_mask */
1142 FALSE), /* pcrel_offset */
1143
1144 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1145 12, /* rightshift */
1146 2, /* size (0 = byte, 1 = short, 2 = long) */
1147 12, /* bitsize */
1148 FALSE, /* pc_relative */
1149 0, /* bitpos */
1150 complain_overflow_unsigned, /* complain_on_overflow */
1151 bfd_elf_generic_reloc, /* special_function */
1152 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1153 FALSE, /* partial_inplace */
1154 0xfff, /* src_mask */
1155 0xfff, /* dst_mask */
1156 FALSE), /* pcrel_offset */
1157
1158 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1159 0, /* rightshift */
1160 2, /* size (0 = byte, 1 = short, 2 = long) */
1161 12, /* bitsize */
1162 FALSE, /* pc_relative */
1163 0, /* bitpos */
1164 complain_overflow_unsigned, /* complain_on_overflow */
1165 bfd_elf_generic_reloc, /* special_function */
1166 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1167 FALSE, /* partial_inplace */
1168 0xfff, /* src_mask */
1169 0xfff, /* dst_mask */
1170 FALSE), /* pcrel_offset */
1171
1172 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1173 0, /* rightshift */
1174 2, /* size (0 = byte, 1 = short, 2 = long) */
1175 12, /* bitsize */
1176 FALSE, /* pc_relative */
1177 0, /* bitpos */
1178 complain_overflow_dont, /* complain_on_overflow */
1179 bfd_elf_generic_reloc, /* special_function */
1180 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1181 FALSE, /* partial_inplace */
1182 0xfff, /* src_mask */
1183 0xfff, /* dst_mask */
1184 FALSE), /* pcrel_offset */
1185
1186 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1187 2, /* rightshift */
1188 2, /* size (0 = byte, 1 = short, 2 = long) */
1189 19, /* bitsize */
1190 TRUE, /* pc_relative */
1191 0, /* bitpos */
1192 complain_overflow_dont, /* complain_on_overflow */
1193 bfd_elf_generic_reloc, /* special_function */
1194 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1195 FALSE, /* partial_inplace */
1196 0x0ffffe0, /* src_mask */
1197 0x0ffffe0, /* dst_mask */
1198 TRUE), /* pcrel_offset */
1199
1200 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1201 0, /* rightshift */
1202 2, /* size (0 = byte, 1 = short, 2 = long) */
1203 21, /* bitsize */
1204 TRUE, /* pc_relative */
1205 0, /* bitpos */
1206 complain_overflow_dont, /* complain_on_overflow */
1207 bfd_elf_generic_reloc, /* special_function */
1208 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1209 FALSE, /* partial_inplace */
1210 0x1fffff, /* src_mask */
1211 0x1fffff, /* dst_mask */
1212 TRUE), /* pcrel_offset */
1213
1214 /* Get to the page for the GOT entry for the symbol
1215 (G(S) - P) using an ADRP instruction. */
1216 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1217 12, /* rightshift */
1218 2, /* size (0 = byte, 1 = short, 2 = long) */
1219 21, /* bitsize */
1220 TRUE, /* pc_relative */
1221 0, /* bitpos */
1222 complain_overflow_dont, /* complain_on_overflow */
1223 bfd_elf_generic_reloc, /* special_function */
1224 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1225 FALSE, /* partial_inplace */
1226 0x1fffff, /* src_mask */
1227 0x1fffff, /* dst_mask */
1228 TRUE), /* pcrel_offset */
1229
1230 /* LD64: GOT offset G(S) & 0xff8. */
1231 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1232 3, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 12, /* bitsize */
1235 FALSE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont, /* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1240 FALSE, /* partial_inplace */
1241 0xff8, /* src_mask */
1242 0xff8, /* dst_mask */
1243 FALSE), /* pcrel_offset */
1244
1245 /* LD32: GOT offset G(S) & 0xffc. */
1246 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1247 2, /* rightshift */
1248 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 12, /* bitsize */
1250 FALSE, /* pc_relative */
1251 0, /* bitpos */
1252 complain_overflow_dont, /* complain_on_overflow */
1253 bfd_elf_generic_reloc, /* special_function */
1254 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1255 FALSE, /* partial_inplace */
1256 0xffc, /* src_mask */
1257 0xffc, /* dst_mask */
1258 FALSE), /* pcrel_offset */
1259
1260 /* ADD: GOT offset G(S) & 0xfff. */
1261 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1262 0, /* rightshift */
1263 2, /* size (0 = byte, 1 = short, 2 = long) */
1264 12, /* bitsize */
1265 FALSE, /* pc_relative */
1266 0, /* bitpos */
1267 complain_overflow_dont, /* complain_on_overflow */
1268 bfd_elf_generic_reloc, /* special_function */
1269 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1270 FALSE, /* partial_inplace */
1271 0xfff, /* src_mask */
1272 0xfff, /* dst_mask */
1273 FALSE), /* pcrel_offset */
1274
1275 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1276 16, /* rightshift */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1278 12, /* bitsize */
1279 FALSE, /* pc_relative */
1280 0, /* bitpos */
1281 complain_overflow_dont, /* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1284 FALSE, /* partial_inplace */
1285 0xffff, /* src_mask */
1286 0xffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1288
1289 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1290 0, /* rightshift */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1292 12, /* bitsize */
1293 FALSE, /* pc_relative */
1294 0, /* bitpos */
1295 complain_overflow_dont, /* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1298 FALSE, /* partial_inplace */
1299 0xffff, /* src_mask */
1300 0xffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1302
1303 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1304 0, /* rightshift */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1306 12, /* bitsize */
1307 FALSE, /* pc_relative */
1308 0, /* bitpos */
1309 complain_overflow_dont, /* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 AARCH64_R_STR (TLSDESC_LDR), /* name */
1312 FALSE, /* partial_inplace */
1313 0x0, /* src_mask */
1314 0x0, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1316
1317 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1318 0, /* rightshift */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1320 12, /* bitsize */
1321 FALSE, /* pc_relative */
1322 0, /* bitpos */
1323 complain_overflow_dont, /* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 AARCH64_R_STR (TLSDESC_ADD), /* name */
1326 FALSE, /* partial_inplace */
1327 0x0, /* src_mask */
1328 0x0, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1330
1331 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1332 0, /* rightshift */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1334 0, /* bitsize */
1335 FALSE, /* pc_relative */
1336 0, /* bitpos */
1337 complain_overflow_dont, /* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 AARCH64_R_STR (TLSDESC_CALL), /* name */
1340 FALSE, /* partial_inplace */
1341 0x0, /* src_mask */
1342 0x0, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1344
1345 HOWTO (AARCH64_R (COPY), /* type */
1346 0, /* rightshift */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1348 64, /* bitsize */
1349 FALSE, /* pc_relative */
1350 0, /* bitpos */
1351 complain_overflow_bitfield, /* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 AARCH64_R_STR (COPY), /* name */
1354 TRUE, /* partial_inplace */
1355 0xffffffff, /* src_mask */
1356 0xffffffff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1358
1359 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1360 0, /* rightshift */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1362 64, /* bitsize */
1363 FALSE, /* pc_relative */
1364 0, /* bitpos */
1365 complain_overflow_bitfield, /* complain_on_overflow */
1366 bfd_elf_generic_reloc, /* special_function */
1367 AARCH64_R_STR (GLOB_DAT), /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1372
1373 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1374 0, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 64, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_bitfield, /* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 AARCH64_R_STR (JUMP_SLOT), /* name */
1382 TRUE, /* partial_inplace */
1383 0xffffffff, /* src_mask */
1384 0xffffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 HOWTO (AARCH64_R (RELATIVE), /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 64, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_bitfield, /* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 AARCH64_R_STR (RELATIVE), /* name */
1396 TRUE, /* partial_inplace */
1397 ALL_ONES, /* src_mask */
1398 ALL_ONES, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1400
1401 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1402 0, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 64, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_dont, /* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 #if ARCH_SIZE == 64
1410 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1411 #else
1412 AARCH64_R_STR (TLS_DTPMOD), /* name */
1413 #endif
1414 FALSE, /* partial_inplace */
1415 0, /* src_mask */
1416 ALL_ONES, /* dst_mask */
1417 FALSE), /* pc_reloffset */
1418
1419 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1420 0, /* rightshift */
1421 2, /* size (0 = byte, 1 = short, 2 = long) */
1422 64, /* bitsize */
1423 FALSE, /* pc_relative */
1424 0, /* bitpos */
1425 complain_overflow_dont, /* complain_on_overflow */
1426 bfd_elf_generic_reloc, /* special_function */
1427 #if ARCH_SIZE == 64
1428 AARCH64_R_STR (TLS_DTPREL64), /* name */
1429 #else
1430 AARCH64_R_STR (TLS_DTPREL), /* name */
1431 #endif
1432 FALSE, /* partial_inplace */
1433 0, /* src_mask */
1434 ALL_ONES, /* dst_mask */
1435 FALSE), /* pcrel_offset */
1436
1437 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1438 0, /* rightshift */
1439 2, /* size (0 = byte, 1 = short, 2 = long) */
1440 64, /* bitsize */
1441 FALSE, /* pc_relative */
1442 0, /* bitpos */
1443 complain_overflow_dont, /* complain_on_overflow */
1444 bfd_elf_generic_reloc, /* special_function */
1445 #if ARCH_SIZE == 64
1446 AARCH64_R_STR (TLS_TPREL64), /* name */
1447 #else
1448 AARCH64_R_STR (TLS_TPREL), /* name */
1449 #endif
1450 FALSE, /* partial_inplace */
1451 0, /* src_mask */
1452 ALL_ONES, /* dst_mask */
1453 FALSE), /* pcrel_offset */
1454
1455 HOWTO (AARCH64_R (TLSDESC), /* type */
1456 0, /* rightshift */
1457 2, /* size (0 = byte, 1 = short, 2 = long) */
1458 64, /* bitsize */
1459 FALSE, /* pc_relative */
1460 0, /* bitpos */
1461 complain_overflow_dont, /* complain_on_overflow */
1462 bfd_elf_generic_reloc, /* special_function */
1463 AARCH64_R_STR (TLSDESC), /* name */
1464 FALSE, /* partial_inplace */
1465 0, /* src_mask */
1466 ALL_ONES, /* dst_mask */
1467 FALSE), /* pcrel_offset */
1468
1469 HOWTO (AARCH64_R (IRELATIVE), /* type */
1470 0, /* rightshift */
1471 2, /* size (0 = byte, 1 = short, 2 = long) */
1472 64, /* bitsize */
1473 FALSE, /* pc_relative */
1474 0, /* bitpos */
1475 complain_overflow_bitfield, /* complain_on_overflow */
1476 bfd_elf_generic_reloc, /* special_function */
1477 AARCH64_R_STR (IRELATIVE), /* name */
1478 FALSE, /* partial_inplace */
1479 0, /* src_mask */
1480 ALL_ONES, /* dst_mask */
1481 FALSE), /* pcrel_offset */
1482
1483 EMPTY_HOWTO (0),
1484 };
1485
1486 static reloc_howto_type elfNN_aarch64_howto_none =
1487 HOWTO (R_AARCH64_NONE, /* type */
1488 0, /* rightshift */
1489 3, /* size (0 = byte, 1 = short, 2 = long) */
1490 0, /* bitsize */
1491 FALSE, /* pc_relative */
1492 0, /* bitpos */
1493 complain_overflow_dont,/* complain_on_overflow */
1494 bfd_elf_generic_reloc, /* special_function */
1495 "R_AARCH64_NONE", /* name */
1496 FALSE, /* partial_inplace */
1497 0, /* src_mask */
1498 0, /* dst_mask */
1499 FALSE); /* pcrel_offset */
1500
1501 /* Given HOWTO, return the bfd internal relocation enumerator. */
1502
1503 static bfd_reloc_code_real_type
1504 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1505 {
1506 const int size
1507 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1508 const ptrdiff_t offset
1509 = howto - elfNN_aarch64_howto_table;
1510
1511 if (offset > 0 && offset < size - 1)
1512 return BFD_RELOC_AARCH64_RELOC_START + offset;
1513
1514 if (howto == &elfNN_aarch64_howto_none)
1515 return BFD_RELOC_AARCH64_NONE;
1516
1517 return BFD_RELOC_AARCH64_RELOC_START;
1518 }
1519
1520 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1521
1522 static bfd_reloc_code_real_type
1523 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1524 {
1525 static bfd_boolean initialized_p = FALSE;
1526 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1527 static unsigned int offsets[R_AARCH64_end];
1528
1529 if (initialized_p == FALSE)
1530 {
1531 unsigned int i;
1532
1533 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1534 if (elfNN_aarch64_howto_table[i].type != 0)
1535 offsets[elfNN_aarch64_howto_table[i].type] = i;
1536
1537 initialized_p = TRUE;
1538 }
1539
1540 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1541 return BFD_RELOC_AARCH64_NONE;
1542
1543 /* PR 17512: file: b371e70a. */
1544 if (r_type >= R_AARCH64_end)
1545 {
1546 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1547 bfd_set_error (bfd_error_bad_value);
1548 return BFD_RELOC_AARCH64_NONE;
1549 }
1550
1551 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1552 }
1553
1554 struct elf_aarch64_reloc_map
1555 {
1556 bfd_reloc_code_real_type from;
1557 bfd_reloc_code_real_type to;
1558 };
1559
1560 /* Map bfd generic reloc to AArch64-specific reloc. */
1561 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1562 {
1563 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1564
1565 /* Basic data relocations. */
1566 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1567 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1568 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1569 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1570 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1571 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1572 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1573 };
1574
1575 /* Given the bfd internal relocation enumerator in CODE, return the
1576 corresponding howto entry. */
1577
1578 static reloc_howto_type *
1579 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1580 {
1581 unsigned int i;
1582
1583 /* Convert bfd generic reloc to AArch64-specific reloc. */
1584 if (code < BFD_RELOC_AARCH64_RELOC_START
1585 || code > BFD_RELOC_AARCH64_RELOC_END)
1586 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1587 if (elf_aarch64_reloc_map[i].from == code)
1588 {
1589 code = elf_aarch64_reloc_map[i].to;
1590 break;
1591 }
1592
1593 if (code > BFD_RELOC_AARCH64_RELOC_START
1594 && code < BFD_RELOC_AARCH64_RELOC_END)
1595 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1596 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1597
1598 if (code == BFD_RELOC_AARCH64_NONE)
1599 return &elfNN_aarch64_howto_none;
1600
1601 return NULL;
1602 }
1603
1604 static reloc_howto_type *
1605 elfNN_aarch64_howto_from_type (unsigned int r_type)
1606 {
1607 bfd_reloc_code_real_type val;
1608 reloc_howto_type *howto;
1609
1610 #if ARCH_SIZE == 32
1611 if (r_type > 256)
1612 {
1613 bfd_set_error (bfd_error_bad_value);
1614 return NULL;
1615 }
1616 #endif
1617
1618 if (r_type == R_AARCH64_NONE)
1619 return &elfNN_aarch64_howto_none;
1620
1621 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1622 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1623
1624 if (howto != NULL)
1625 return howto;
1626
1627 bfd_set_error (bfd_error_bad_value);
1628 return NULL;
1629 }
1630
1631 static void
1632 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1633 Elf_Internal_Rela *elf_reloc)
1634 {
1635 unsigned int r_type;
1636
1637 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1638 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1639 }
1640
1641 static reloc_howto_type *
1642 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1643 bfd_reloc_code_real_type code)
1644 {
1645 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1646
1647 if (howto != NULL)
1648 return howto;
1649
1650 bfd_set_error (bfd_error_bad_value);
1651 return NULL;
1652 }
1653
1654 static reloc_howto_type *
1655 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1656 const char *r_name)
1657 {
1658 unsigned int i;
1659
1660 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1661 if (elfNN_aarch64_howto_table[i].name != NULL
1662 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1663 return &elfNN_aarch64_howto_table[i];
1664
1665 return NULL;
1666 }
1667
1668 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1669 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1670 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1671 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1672
1673 /* The linker script knows the section names for placement.
1674 The entry_names are used to do simple name mangling on the stubs.
1675 Given a function name, and its type, the stub can be found. The
1676 name can be changed. The only requirement is the %s be present. */
1677 #define STUB_ENTRY_NAME "__%s_veneer"
1678
1679 /* The name of the dynamic interpreter. This is put in the .interp
1680 section. */
1681 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1682
1683 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1684 (((1 << 25) - 1) << 2)
1685 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1686 (-((1 << 25) << 2))
1687
1688 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1689 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1690
1691 static int
1692 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1693 {
1694 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1695 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1696 }
1697
1698 static int
1699 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1700 {
1701 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1702 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1703 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1704 }
1705
1706 static const uint32_t aarch64_adrp_branch_stub [] =
1707 {
1708 0x90000010, /* adrp ip0, X */
1709 /* R_AARCH64_ADR_HI21_PCREL(X) */
1710 0x91000210, /* add ip0, ip0, :lo12:X */
1711 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1712 0xd61f0200, /* br ip0 */
1713 };
1714
1715 static const uint32_t aarch64_long_branch_stub[] =
1716 {
1717 #if ARCH_SIZE == 64
1718 0x58000090, /* ldr ip0, 1f */
1719 #else
1720 0x18000090, /* ldr wip0, 1f */
1721 #endif
1722 0x10000011, /* adr ip1, #0 */
1723 0x8b110210, /* add ip0, ip0, ip1 */
1724 0xd61f0200, /* br ip0 */
1725 0x00000000, /* 1: .xword or .word
1726 R_AARCH64_PRELNN(X) + 12
1727 */
1728 0x00000000,
1729 };
1730
1731 static const uint32_t aarch64_erratum_835769_stub[] =
1732 {
1733 0x00000000, /* Placeholder for multiply accumulate. */
1734 0x14000000, /* b <label> */
1735 };
1736
1737 static const uint32_t aarch64_erratum_843419_stub[] =
1738 {
1739 0x00000000, /* Placeholder for LDR instruction. */
1740 0x14000000, /* b <label> */
1741 };
1742
1743 /* Section name for stubs is the associated section name plus this
1744 string. */
1745 #define STUB_SUFFIX ".stub"
1746
1747 enum elf_aarch64_stub_type
1748 {
1749 aarch64_stub_none,
1750 aarch64_stub_adrp_branch,
1751 aarch64_stub_long_branch,
1752 aarch64_stub_erratum_835769_veneer,
1753 aarch64_stub_erratum_843419_veneer,
1754 };
1755
1756 struct elf_aarch64_stub_hash_entry
1757 {
1758 /* Base hash table entry structure. */
1759 struct bfd_hash_entry root;
1760
1761 /* The stub section. */
1762 asection *stub_sec;
1763
1764 /* Offset within stub_sec of the beginning of this stub. */
1765 bfd_vma stub_offset;
1766
1767 /* Given the symbol's value and its section we can determine its final
1768 value when building the stubs (so the stub knows where to jump). */
1769 bfd_vma target_value;
1770 asection *target_section;
1771
1772 enum elf_aarch64_stub_type stub_type;
1773
1774 /* The symbol table entry, if any, that this was derived from. */
1775 struct elf_aarch64_link_hash_entry *h;
1776
1777 /* Destination symbol type */
1778 unsigned char st_type;
1779
1780 /* Where this stub is being called from, or, in the case of combined
1781 stub sections, the first input section in the group. */
1782 asection *id_sec;
1783
1784 /* The name for the local symbol at the start of this stub. The
1785 stub name in the hash table has to be unique; this does not, so
1786 it can be friendlier. */
1787 char *output_name;
1788
1789 /* The instruction which caused this stub to be generated (only valid for
1790 erratum 835769 workaround stubs at present). */
1791 uint32_t veneered_insn;
1792
1793 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
1794 bfd_vma adrp_offset;
1795 };
1796
1797 /* Used to build a map of a section. This is required for mixed-endian
1798 code/data. */
1799
1800 typedef struct elf_elf_section_map
1801 {
1802 bfd_vma vma;
1803 char type;
1804 }
1805 elf_aarch64_section_map;
1806
1807
1808 typedef struct _aarch64_elf_section_data
1809 {
1810 struct bfd_elf_section_data elf;
1811 unsigned int mapcount;
1812 unsigned int mapsize;
1813 elf_aarch64_section_map *map;
1814 }
1815 _aarch64_elf_section_data;
1816
1817 #define elf_aarch64_section_data(sec) \
1818 ((_aarch64_elf_section_data *) elf_section_data (sec))
1819
1820 /* The size of the thread control block which is defined to be two pointers. */
1821 #define TCB_SIZE (ARCH_SIZE/8)*2
1822
1823 struct elf_aarch64_local_symbol
1824 {
1825 unsigned int got_type;
1826 bfd_signed_vma got_refcount;
1827 bfd_vma got_offset;
1828
1829 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1830 offset is from the end of the jump table and reserved entries
1831 within the PLTGOT.
1832
1833 The magic value (bfd_vma) -1 indicates that an offset has not be
1834 allocated. */
1835 bfd_vma tlsdesc_got_jump_table_offset;
1836 };
1837
1838 struct elf_aarch64_obj_tdata
1839 {
1840 struct elf_obj_tdata root;
1841
1842 /* local symbol descriptors */
1843 struct elf_aarch64_local_symbol *locals;
1844
1845 /* Zero to warn when linking objects with incompatible enum sizes. */
1846 int no_enum_size_warning;
1847
1848 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1849 int no_wchar_size_warning;
1850 };
1851
1852 #define elf_aarch64_tdata(bfd) \
1853 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1854
1855 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1856
1857 #define is_aarch64_elf(bfd) \
1858 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1859 && elf_tdata (bfd) != NULL \
1860 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1861
1862 static bfd_boolean
1863 elfNN_aarch64_mkobject (bfd *abfd)
1864 {
1865 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1866 AARCH64_ELF_DATA);
1867 }
1868
1869 #define elf_aarch64_hash_entry(ent) \
1870 ((struct elf_aarch64_link_hash_entry *)(ent))
1871
1872 #define GOT_UNKNOWN 0
1873 #define GOT_NORMAL 1
1874 #define GOT_TLS_GD 2
1875 #define GOT_TLS_IE 4
1876 #define GOT_TLSDESC_GD 8
1877
1878 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1879
1880 /* AArch64 ELF linker hash entry. */
1881 struct elf_aarch64_link_hash_entry
1882 {
1883 struct elf_link_hash_entry root;
1884
1885 /* Track dynamic relocs copied for this symbol. */
1886 struct elf_dyn_relocs *dyn_relocs;
1887
1888 /* Since PLT entries have variable size, we need to record the
1889 index into .got.plt instead of recomputing it from the PLT
1890 offset. */
1891 bfd_signed_vma plt_got_offset;
1892
1893 /* Bit mask representing the type of GOT entry(s) if any required by
1894 this symbol. */
1895 unsigned int got_type;
1896
1897 /* A pointer to the most recently used stub hash entry against this
1898 symbol. */
1899 struct elf_aarch64_stub_hash_entry *stub_cache;
1900
1901 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1902 is from the end of the jump table and reserved entries within the PLTGOT.
1903
1904 The magic value (bfd_vma) -1 indicates that an offset has not
1905 be allocated. */
1906 bfd_vma tlsdesc_got_jump_table_offset;
1907 };
1908
1909 static unsigned int
1910 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1911 bfd *abfd,
1912 unsigned long r_symndx)
1913 {
1914 if (h)
1915 return elf_aarch64_hash_entry (h)->got_type;
1916
1917 if (! elf_aarch64_locals (abfd))
1918 return GOT_UNKNOWN;
1919
1920 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1921 }
1922
1923 /* Get the AArch64 elf linker hash table from a link_info structure. */
1924 #define elf_aarch64_hash_table(info) \
1925 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1926
1927 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1928 ((struct elf_aarch64_stub_hash_entry *) \
1929 bfd_hash_lookup ((table), (string), (create), (copy)))
1930
1931 /* AArch64 ELF linker hash table. */
1932 struct elf_aarch64_link_hash_table
1933 {
1934 /* The main hash table. */
1935 struct elf_link_hash_table root;
1936
1937 /* Nonzero to force PIC branch veneers. */
1938 int pic_veneer;
1939
1940 /* Fix erratum 835769. */
1941 int fix_erratum_835769;
1942
1943 /* Fix erratum 843419. */
1944 int fix_erratum_843419;
1945
1946 /* Enable ADRP->ADR rewrite for erratum 843419 workaround. */
1947 int fix_erratum_843419_adr;
1948
1949 /* The number of bytes in the initial entry in the PLT. */
1950 bfd_size_type plt_header_size;
1951
1952 /* The number of bytes in the subsequent PLT etries. */
1953 bfd_size_type plt_entry_size;
1954
1955 /* Short-cuts to get to dynamic linker sections. */
1956 asection *sdynbss;
1957 asection *srelbss;
1958
1959 /* Small local sym cache. */
1960 struct sym_cache sym_cache;
1961
1962 /* For convenience in allocate_dynrelocs. */
1963 bfd *obfd;
1964
1965 /* The amount of space used by the reserved portion of the sgotplt
1966 section, plus whatever space is used by the jump slots. */
1967 bfd_vma sgotplt_jump_table_size;
1968
1969 /* The stub hash table. */
1970 struct bfd_hash_table stub_hash_table;
1971
1972 /* Linker stub bfd. */
1973 bfd *stub_bfd;
1974
1975 /* Linker call-backs. */
1976 asection *(*add_stub_section) (const char *, asection *);
1977 void (*layout_sections_again) (void);
1978
1979 /* Array to keep track of which stub sections have been created, and
1980 information on stub grouping. */
1981 struct map_stub
1982 {
1983 /* This is the section to which stubs in the group will be
1984 attached. */
1985 asection *link_sec;
1986 /* The stub section. */
1987 asection *stub_sec;
1988 } *stub_group;
1989
1990 /* Assorted information used by elfNN_aarch64_size_stubs. */
1991 unsigned int bfd_count;
1992 int top_index;
1993 asection **input_list;
1994
1995 /* The offset into splt of the PLT entry for the TLS descriptor
1996 resolver. Special values are 0, if not necessary (or not found
1997 to be necessary yet), and -1 if needed but not determined
1998 yet. */
1999 bfd_vma tlsdesc_plt;
2000
2001 /* The GOT offset for the lazy trampoline. Communicated to the
2002 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
2003 indicates an offset is not allocated. */
2004 bfd_vma dt_tlsdesc_got;
2005
2006 /* Used by local STT_GNU_IFUNC symbols. */
2007 htab_t loc_hash_table;
2008 void * loc_hash_memory;
2009 };
2010
2011 /* Create an entry in an AArch64 ELF linker hash table. */
2012
2013 static struct bfd_hash_entry *
2014 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
2015 struct bfd_hash_table *table,
2016 const char *string)
2017 {
2018 struct elf_aarch64_link_hash_entry *ret =
2019 (struct elf_aarch64_link_hash_entry *) entry;
2020
2021 /* Allocate the structure if it has not already been allocated by a
2022 subclass. */
2023 if (ret == NULL)
2024 ret = bfd_hash_allocate (table,
2025 sizeof (struct elf_aarch64_link_hash_entry));
2026 if (ret == NULL)
2027 return (struct bfd_hash_entry *) ret;
2028
2029 /* Call the allocation method of the superclass. */
2030 ret = ((struct elf_aarch64_link_hash_entry *)
2031 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2032 table, string));
2033 if (ret != NULL)
2034 {
2035 ret->dyn_relocs = NULL;
2036 ret->got_type = GOT_UNKNOWN;
2037 ret->plt_got_offset = (bfd_vma) - 1;
2038 ret->stub_cache = NULL;
2039 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2040 }
2041
2042 return (struct bfd_hash_entry *) ret;
2043 }
2044
2045 /* Initialize an entry in the stub hash table. */
2046
2047 static struct bfd_hash_entry *
2048 stub_hash_newfunc (struct bfd_hash_entry *entry,
2049 struct bfd_hash_table *table, const char *string)
2050 {
2051 /* Allocate the structure if it has not already been allocated by a
2052 subclass. */
2053 if (entry == NULL)
2054 {
2055 entry = bfd_hash_allocate (table,
2056 sizeof (struct
2057 elf_aarch64_stub_hash_entry));
2058 if (entry == NULL)
2059 return entry;
2060 }
2061
2062 /* Call the allocation method of the superclass. */
2063 entry = bfd_hash_newfunc (entry, table, string);
2064 if (entry != NULL)
2065 {
2066 struct elf_aarch64_stub_hash_entry *eh;
2067
2068 /* Initialize the local fields. */
2069 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2070 eh->adrp_offset = 0;
2071 eh->stub_sec = NULL;
2072 eh->stub_offset = 0;
2073 eh->target_value = 0;
2074 eh->target_section = NULL;
2075 eh->stub_type = aarch64_stub_none;
2076 eh->h = NULL;
2077 eh->id_sec = NULL;
2078 }
2079
2080 return entry;
2081 }
2082
2083 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2084 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2085 as global symbol. We reuse indx and dynstr_index for local symbol
2086 hash since they aren't used by global symbols in this backend. */
2087
2088 static hashval_t
2089 elfNN_aarch64_local_htab_hash (const void *ptr)
2090 {
2091 struct elf_link_hash_entry *h
2092 = (struct elf_link_hash_entry *) ptr;
2093 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2094 }
2095
2096 /* Compare local hash entries. */
2097
2098 static int
2099 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2100 {
2101 struct elf_link_hash_entry *h1
2102 = (struct elf_link_hash_entry *) ptr1;
2103 struct elf_link_hash_entry *h2
2104 = (struct elf_link_hash_entry *) ptr2;
2105
2106 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2107 }
2108
2109 /* Find and/or create a hash entry for local symbol. */
2110
2111 static struct elf_link_hash_entry *
2112 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2113 bfd *abfd, const Elf_Internal_Rela *rel,
2114 bfd_boolean create)
2115 {
2116 struct elf_aarch64_link_hash_entry e, *ret;
2117 asection *sec = abfd->sections;
2118 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2119 ELFNN_R_SYM (rel->r_info));
2120 void **slot;
2121
2122 e.root.indx = sec->id;
2123 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2124 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2125 create ? INSERT : NO_INSERT);
2126
2127 if (!slot)
2128 return NULL;
2129
2130 if (*slot)
2131 {
2132 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2133 return &ret->root;
2134 }
2135
2136 ret = (struct elf_aarch64_link_hash_entry *)
2137 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2138 sizeof (struct elf_aarch64_link_hash_entry));
2139 if (ret)
2140 {
2141 memset (ret, 0, sizeof (*ret));
2142 ret->root.indx = sec->id;
2143 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2144 ret->root.dynindx = -1;
2145 *slot = ret;
2146 }
2147 return &ret->root;
2148 }
2149
2150 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2151
2152 static void
2153 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2154 struct elf_link_hash_entry *dir,
2155 struct elf_link_hash_entry *ind)
2156 {
2157 struct elf_aarch64_link_hash_entry *edir, *eind;
2158
2159 edir = (struct elf_aarch64_link_hash_entry *) dir;
2160 eind = (struct elf_aarch64_link_hash_entry *) ind;
2161
2162 if (eind->dyn_relocs != NULL)
2163 {
2164 if (edir->dyn_relocs != NULL)
2165 {
2166 struct elf_dyn_relocs **pp;
2167 struct elf_dyn_relocs *p;
2168
2169 /* Add reloc counts against the indirect sym to the direct sym
2170 list. Merge any entries against the same section. */
2171 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2172 {
2173 struct elf_dyn_relocs *q;
2174
2175 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2176 if (q->sec == p->sec)
2177 {
2178 q->pc_count += p->pc_count;
2179 q->count += p->count;
2180 *pp = p->next;
2181 break;
2182 }
2183 if (q == NULL)
2184 pp = &p->next;
2185 }
2186 *pp = edir->dyn_relocs;
2187 }
2188
2189 edir->dyn_relocs = eind->dyn_relocs;
2190 eind->dyn_relocs = NULL;
2191 }
2192
2193 if (ind->root.type == bfd_link_hash_indirect)
2194 {
2195 /* Copy over PLT info. */
2196 if (dir->got.refcount <= 0)
2197 {
2198 edir->got_type = eind->got_type;
2199 eind->got_type = GOT_UNKNOWN;
2200 }
2201 }
2202
2203 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2204 }
2205
2206 /* Destroy an AArch64 elf linker hash table. */
2207
2208 static void
2209 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2210 {
2211 struct elf_aarch64_link_hash_table *ret
2212 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2213
2214 if (ret->loc_hash_table)
2215 htab_delete (ret->loc_hash_table);
2216 if (ret->loc_hash_memory)
2217 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2218
2219 bfd_hash_table_free (&ret->stub_hash_table);
2220 _bfd_elf_link_hash_table_free (obfd);
2221 }
2222
2223 /* Create an AArch64 elf linker hash table. */
2224
2225 static struct bfd_link_hash_table *
2226 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2227 {
2228 struct elf_aarch64_link_hash_table *ret;
2229 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2230
2231 ret = bfd_zmalloc (amt);
2232 if (ret == NULL)
2233 return NULL;
2234
2235 if (!_bfd_elf_link_hash_table_init
2236 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2237 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2238 {
2239 free (ret);
2240 return NULL;
2241 }
2242
2243 ret->plt_header_size = PLT_ENTRY_SIZE;
2244 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2245 ret->obfd = abfd;
2246 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2247
2248 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2249 sizeof (struct elf_aarch64_stub_hash_entry)))
2250 {
2251 _bfd_elf_link_hash_table_free (abfd);
2252 return NULL;
2253 }
2254
2255 ret->loc_hash_table = htab_try_create (1024,
2256 elfNN_aarch64_local_htab_hash,
2257 elfNN_aarch64_local_htab_eq,
2258 NULL);
2259 ret->loc_hash_memory = objalloc_create ();
2260 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2261 {
2262 elfNN_aarch64_link_hash_table_free (abfd);
2263 return NULL;
2264 }
2265 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2266
2267 return &ret->root.root;
2268 }
2269
2270 static bfd_boolean
2271 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2272 bfd_vma offset, bfd_vma value)
2273 {
2274 reloc_howto_type *howto;
2275 bfd_vma place;
2276
2277 howto = elfNN_aarch64_howto_from_type (r_type);
2278 place = (input_section->output_section->vma + input_section->output_offset
2279 + offset);
2280
2281 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2282 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2283 return _bfd_aarch64_elf_put_addend (input_bfd,
2284 input_section->contents + offset, r_type,
2285 howto, value);
2286 }
2287
2288 static enum elf_aarch64_stub_type
2289 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2290 {
2291 if (aarch64_valid_for_adrp_p (value, place))
2292 return aarch64_stub_adrp_branch;
2293 return aarch64_stub_long_branch;
2294 }
2295
2296 /* Determine the type of stub needed, if any, for a call. */
2297
2298 static enum elf_aarch64_stub_type
2299 aarch64_type_of_stub (struct bfd_link_info *info,
2300 asection *input_sec,
2301 const Elf_Internal_Rela *rel,
2302 unsigned char st_type,
2303 struct elf_aarch64_link_hash_entry *hash,
2304 bfd_vma destination)
2305 {
2306 bfd_vma location;
2307 bfd_signed_vma branch_offset;
2308 unsigned int r_type;
2309 struct elf_aarch64_link_hash_table *globals;
2310 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2311 bfd_boolean via_plt_p;
2312
2313 if (st_type != STT_FUNC)
2314 return stub_type;
2315
2316 globals = elf_aarch64_hash_table (info);
2317 via_plt_p = (globals->root.splt != NULL && hash != NULL
2318 && hash->root.plt.offset != (bfd_vma) - 1);
2319
2320 if (via_plt_p)
2321 return stub_type;
2322
2323 /* Determine where the call point is. */
2324 location = (input_sec->output_offset
2325 + input_sec->output_section->vma + rel->r_offset);
2326
2327 branch_offset = (bfd_signed_vma) (destination - location);
2328
2329 r_type = ELFNN_R_TYPE (rel->r_info);
2330
2331 /* We don't want to redirect any old unconditional jump in this way,
2332 only one which is being used for a sibcall, where it is
2333 acceptable for the IP0 and IP1 registers to be clobbered. */
2334 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2335 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2336 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2337 {
2338 stub_type = aarch64_stub_long_branch;
2339 }
2340
2341 return stub_type;
2342 }
2343
2344 /* Build a name for an entry in the stub hash table. */
2345
2346 static char *
2347 elfNN_aarch64_stub_name (const asection *input_section,
2348 const asection *sym_sec,
2349 const struct elf_aarch64_link_hash_entry *hash,
2350 const Elf_Internal_Rela *rel)
2351 {
2352 char *stub_name;
2353 bfd_size_type len;
2354
2355 if (hash)
2356 {
2357 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2358 stub_name = bfd_malloc (len);
2359 if (stub_name != NULL)
2360 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2361 (unsigned int) input_section->id,
2362 hash->root.root.root.string,
2363 rel->r_addend);
2364 }
2365 else
2366 {
2367 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2368 stub_name = bfd_malloc (len);
2369 if (stub_name != NULL)
2370 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2371 (unsigned int) input_section->id,
2372 (unsigned int) sym_sec->id,
2373 (unsigned int) ELFNN_R_SYM (rel->r_info),
2374 rel->r_addend);
2375 }
2376
2377 return stub_name;
2378 }
2379
2380 /* Look up an entry in the stub hash. Stub entries are cached because
2381 creating the stub name takes a bit of time. */
2382
2383 static struct elf_aarch64_stub_hash_entry *
2384 elfNN_aarch64_get_stub_entry (const asection *input_section,
2385 const asection *sym_sec,
2386 struct elf_link_hash_entry *hash,
2387 const Elf_Internal_Rela *rel,
2388 struct elf_aarch64_link_hash_table *htab)
2389 {
2390 struct elf_aarch64_stub_hash_entry *stub_entry;
2391 struct elf_aarch64_link_hash_entry *h =
2392 (struct elf_aarch64_link_hash_entry *) hash;
2393 const asection *id_sec;
2394
2395 if ((input_section->flags & SEC_CODE) == 0)
2396 return NULL;
2397
2398 /* If this input section is part of a group of sections sharing one
2399 stub section, then use the id of the first section in the group.
2400 Stub names need to include a section id, as there may well be
2401 more than one stub used to reach say, printf, and we need to
2402 distinguish between them. */
2403 id_sec = htab->stub_group[input_section->id].link_sec;
2404
2405 if (h != NULL && h->stub_cache != NULL
2406 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2407 {
2408 stub_entry = h->stub_cache;
2409 }
2410 else
2411 {
2412 char *stub_name;
2413
2414 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2415 if (stub_name == NULL)
2416 return NULL;
2417
2418 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2419 stub_name, FALSE, FALSE);
2420 if (h != NULL)
2421 h->stub_cache = stub_entry;
2422
2423 free (stub_name);
2424 }
2425
2426 return stub_entry;
2427 }
2428
2429
2430 /* Create a stub section. */
2431
2432 static asection *
2433 _bfd_aarch64_create_stub_section (asection *section,
2434 struct elf_aarch64_link_hash_table *htab)
2435 {
2436 size_t namelen;
2437 bfd_size_type len;
2438 char *s_name;
2439
2440 namelen = strlen (section->name);
2441 len = namelen + sizeof (STUB_SUFFIX);
2442 s_name = bfd_alloc (htab->stub_bfd, len);
2443 if (s_name == NULL)
2444 return NULL;
2445
2446 memcpy (s_name, section->name, namelen);
2447 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2448 return (*htab->add_stub_section) (s_name, section);
2449 }
2450
2451
2452 /* Find or create a stub section for a link section.
2453
2454 Fix or create the stub section used to collect stubs attached to
2455 the specified link section. */
2456
2457 static asection *
2458 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2459 struct elf_aarch64_link_hash_table *htab)
2460 {
2461 if (htab->stub_group[link_section->id].stub_sec == NULL)
2462 htab->stub_group[link_section->id].stub_sec
2463 = _bfd_aarch64_create_stub_section (link_section, htab);
2464 return htab->stub_group[link_section->id].stub_sec;
2465 }
2466
2467
2468 /* Find or create a stub section in the stub group for an input
2469 section. */
2470
2471 static asection *
2472 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2473 struct elf_aarch64_link_hash_table *htab)
2474 {
2475 asection *link_sec = htab->stub_group[section->id].link_sec;
2476 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2477 }
2478
2479
2480 /* Add a new stub entry in the stub group associated with an input
2481 section to the stub hash. Not all fields of the new stub entry are
2482 initialised. */
2483
2484 static struct elf_aarch64_stub_hash_entry *
2485 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2486 asection *section,
2487 struct elf_aarch64_link_hash_table *htab)
2488 {
2489 asection *link_sec;
2490 asection *stub_sec;
2491 struct elf_aarch64_stub_hash_entry *stub_entry;
2492
2493 link_sec = htab->stub_group[section->id].link_sec;
2494 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2495
2496 /* Enter this entry into the linker stub hash table. */
2497 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2498 TRUE, FALSE);
2499 if (stub_entry == NULL)
2500 {
2501 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2502 section->owner, stub_name);
2503 return NULL;
2504 }
2505
2506 stub_entry->stub_sec = stub_sec;
2507 stub_entry->stub_offset = 0;
2508 stub_entry->id_sec = link_sec;
2509
2510 return stub_entry;
2511 }
2512
2513 /* Add a new stub entry in the final stub section to the stub hash.
2514 Not all fields of the new stub entry are initialised. */
2515
2516 static struct elf_aarch64_stub_hash_entry *
2517 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
2518 asection *link_section,
2519 struct elf_aarch64_link_hash_table *htab)
2520 {
2521 asection *stub_sec;
2522 struct elf_aarch64_stub_hash_entry *stub_entry;
2523
2524 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
2525 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2526 TRUE, FALSE);
2527 if (stub_entry == NULL)
2528 {
2529 (*_bfd_error_handler) (_("cannot create stub entry %s"), stub_name);
2530 return NULL;
2531 }
2532
2533 stub_entry->stub_sec = stub_sec;
2534 stub_entry->stub_offset = 0;
2535 stub_entry->id_sec = link_section;
2536
2537 return stub_entry;
2538 }
2539
2540
2541 static bfd_boolean
2542 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2543 void *in_arg ATTRIBUTE_UNUSED)
2544 {
2545 struct elf_aarch64_stub_hash_entry *stub_entry;
2546 asection *stub_sec;
2547 bfd *stub_bfd;
2548 bfd_byte *loc;
2549 bfd_vma sym_value;
2550 bfd_vma veneered_insn_loc;
2551 bfd_vma veneer_entry_loc;
2552 bfd_signed_vma branch_offset = 0;
2553 unsigned int template_size;
2554 const uint32_t *template;
2555 unsigned int i;
2556
2557 /* Massage our args to the form they really have. */
2558 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2559
2560 stub_sec = stub_entry->stub_sec;
2561
2562 /* Make a note of the offset within the stubs for this entry. */
2563 stub_entry->stub_offset = stub_sec->size;
2564 loc = stub_sec->contents + stub_entry->stub_offset;
2565
2566 stub_bfd = stub_sec->owner;
2567
2568 /* This is the address of the stub destination. */
2569 sym_value = (stub_entry->target_value
2570 + stub_entry->target_section->output_offset
2571 + stub_entry->target_section->output_section->vma);
2572
2573 if (stub_entry->stub_type == aarch64_stub_long_branch)
2574 {
2575 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2576 + stub_sec->output_offset);
2577
2578 /* See if we can relax the stub. */
2579 if (aarch64_valid_for_adrp_p (sym_value, place))
2580 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2581 }
2582
2583 switch (stub_entry->stub_type)
2584 {
2585 case aarch64_stub_adrp_branch:
2586 template = aarch64_adrp_branch_stub;
2587 template_size = sizeof (aarch64_adrp_branch_stub);
2588 break;
2589 case aarch64_stub_long_branch:
2590 template = aarch64_long_branch_stub;
2591 template_size = sizeof (aarch64_long_branch_stub);
2592 break;
2593 case aarch64_stub_erratum_835769_veneer:
2594 template = aarch64_erratum_835769_stub;
2595 template_size = sizeof (aarch64_erratum_835769_stub);
2596 break;
2597 case aarch64_stub_erratum_843419_veneer:
2598 template = aarch64_erratum_843419_stub;
2599 template_size = sizeof (aarch64_erratum_843419_stub);
2600 break;
2601 default:
2602 abort ();
2603 }
2604
2605 for (i = 0; i < (template_size / sizeof template[0]); i++)
2606 {
2607 bfd_putl32 (template[i], loc);
2608 loc += 4;
2609 }
2610
2611 template_size = (template_size + 7) & ~7;
2612 stub_sec->size += template_size;
2613
2614 switch (stub_entry->stub_type)
2615 {
2616 case aarch64_stub_adrp_branch:
2617 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2618 stub_entry->stub_offset, sym_value))
2619 /* The stub would not have been relaxed if the offset was out
2620 of range. */
2621 BFD_FAIL ();
2622
2623 if (aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
2624 stub_entry->stub_offset + 4, sym_value))
2625 BFD_FAIL ();
2626 break;
2627
2628 case aarch64_stub_long_branch:
2629 /* We want the value relative to the address 12 bytes back from the
2630 value itself. */
2631 if (aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
2632 stub_entry->stub_offset + 16, sym_value + 12))
2633 BFD_FAIL ();
2634 break;
2635
2636 case aarch64_stub_erratum_835769_veneer:
2637 veneered_insn_loc = stub_entry->target_section->output_section->vma
2638 + stub_entry->target_section->output_offset
2639 + stub_entry->target_value;
2640 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2641 + stub_entry->stub_sec->output_offset
2642 + stub_entry->stub_offset;
2643 branch_offset = veneered_insn_loc - veneer_entry_loc;
2644 branch_offset >>= 2;
2645 branch_offset &= 0x3ffffff;
2646 bfd_putl32 (stub_entry->veneered_insn,
2647 stub_sec->contents + stub_entry->stub_offset);
2648 bfd_putl32 (template[1] | branch_offset,
2649 stub_sec->contents + stub_entry->stub_offset + 4);
2650 break;
2651
2652 case aarch64_stub_erratum_843419_veneer:
2653 if (aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
2654 stub_entry->stub_offset + 4, sym_value + 4))
2655 BFD_FAIL ();
2656 break;
2657
2658 default:
2659 abort ();
2660 }
2661
2662 return TRUE;
2663 }
2664
2665 /* As above, but don't actually build the stub. Just bump offset so
2666 we know stub section sizes. */
2667
2668 static bfd_boolean
2669 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2670 void *in_arg ATTRIBUTE_UNUSED)
2671 {
2672 struct elf_aarch64_stub_hash_entry *stub_entry;
2673 int size;
2674
2675 /* Massage our args to the form they really have. */
2676 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2677
2678 switch (stub_entry->stub_type)
2679 {
2680 case aarch64_stub_adrp_branch:
2681 size = sizeof (aarch64_adrp_branch_stub);
2682 break;
2683 case aarch64_stub_long_branch:
2684 size = sizeof (aarch64_long_branch_stub);
2685 break;
2686 case aarch64_stub_erratum_835769_veneer:
2687 size = sizeof (aarch64_erratum_835769_stub);
2688 break;
2689 case aarch64_stub_erratum_843419_veneer:
2690 size = sizeof (aarch64_erratum_843419_stub);
2691 break;
2692 default:
2693 abort ();
2694 }
2695
2696 size = (size + 7) & ~7;
2697 stub_entry->stub_sec->size += size;
2698 return TRUE;
2699 }
2700
2701 /* External entry points for sizing and building linker stubs. */
2702
2703 /* Set up various things so that we can make a list of input sections
2704 for each output section included in the link. Returns -1 on error,
2705 0 when no stubs will be needed, and 1 on success. */
2706
2707 int
2708 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2709 struct bfd_link_info *info)
2710 {
2711 bfd *input_bfd;
2712 unsigned int bfd_count;
2713 int top_id, top_index;
2714 asection *section;
2715 asection **input_list, **list;
2716 bfd_size_type amt;
2717 struct elf_aarch64_link_hash_table *htab =
2718 elf_aarch64_hash_table (info);
2719
2720 if (!is_elf_hash_table (htab))
2721 return 0;
2722
2723 /* Count the number of input BFDs and find the top input section id. */
2724 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2725 input_bfd != NULL; input_bfd = input_bfd->link.next)
2726 {
2727 bfd_count += 1;
2728 for (section = input_bfd->sections;
2729 section != NULL; section = section->next)
2730 {
2731 if (top_id < section->id)
2732 top_id = section->id;
2733 }
2734 }
2735 htab->bfd_count = bfd_count;
2736
2737 amt = sizeof (struct map_stub) * (top_id + 1);
2738 htab->stub_group = bfd_zmalloc (amt);
2739 if (htab->stub_group == NULL)
2740 return -1;
2741
2742 /* We can't use output_bfd->section_count here to find the top output
2743 section index as some sections may have been removed, and
2744 _bfd_strip_section_from_output doesn't renumber the indices. */
2745 for (section = output_bfd->sections, top_index = 0;
2746 section != NULL; section = section->next)
2747 {
2748 if (top_index < section->index)
2749 top_index = section->index;
2750 }
2751
2752 htab->top_index = top_index;
2753 amt = sizeof (asection *) * (top_index + 1);
2754 input_list = bfd_malloc (amt);
2755 htab->input_list = input_list;
2756 if (input_list == NULL)
2757 return -1;
2758
2759 /* For sections we aren't interested in, mark their entries with a
2760 value we can check later. */
2761 list = input_list + top_index;
2762 do
2763 *list = bfd_abs_section_ptr;
2764 while (list-- != input_list);
2765
2766 for (section = output_bfd->sections;
2767 section != NULL; section = section->next)
2768 {
2769 if ((section->flags & SEC_CODE) != 0)
2770 input_list[section->index] = NULL;
2771 }
2772
2773 return 1;
2774 }
2775
2776 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2777 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2778
2779 /* The linker repeatedly calls this function for each input section,
2780 in the order that input sections are linked into output sections.
2781 Build lists of input sections to determine groupings between which
2782 we may insert linker stubs. */
2783
2784 void
2785 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2786 {
2787 struct elf_aarch64_link_hash_table *htab =
2788 elf_aarch64_hash_table (info);
2789
2790 if (isec->output_section->index <= htab->top_index)
2791 {
2792 asection **list = htab->input_list + isec->output_section->index;
2793
2794 if (*list != bfd_abs_section_ptr)
2795 {
2796 /* Steal the link_sec pointer for our list. */
2797 /* This happens to make the list in reverse order,
2798 which is what we want. */
2799 PREV_SEC (isec) = *list;
2800 *list = isec;
2801 }
2802 }
2803 }
2804
2805 /* See whether we can group stub sections together. Grouping stub
2806 sections may result in fewer stubs. More importantly, we need to
2807 put all .init* and .fini* stubs at the beginning of the .init or
2808 .fini output sections respectively, because glibc splits the
2809 _init and _fini functions into multiple parts. Putting a stub in
2810 the middle of a function is not a good idea. */
2811
2812 static void
2813 group_sections (struct elf_aarch64_link_hash_table *htab,
2814 bfd_size_type stub_group_size,
2815 bfd_boolean stubs_always_before_branch)
2816 {
2817 asection **list = htab->input_list + htab->top_index;
2818
2819 do
2820 {
2821 asection *tail = *list;
2822
2823 if (tail == bfd_abs_section_ptr)
2824 continue;
2825
2826 while (tail != NULL)
2827 {
2828 asection *curr;
2829 asection *prev;
2830 bfd_size_type total;
2831
2832 curr = tail;
2833 total = tail->size;
2834 while ((prev = PREV_SEC (curr)) != NULL
2835 && ((total += curr->output_offset - prev->output_offset)
2836 < stub_group_size))
2837 curr = prev;
2838
2839 /* OK, the size from the start of CURR to the end is less
2840 than stub_group_size and thus can be handled by one stub
2841 section. (Or the tail section is itself larger than
2842 stub_group_size, in which case we may be toast.)
2843 We should really be keeping track of the total size of
2844 stubs added here, as stubs contribute to the final output
2845 section size. */
2846 do
2847 {
2848 prev = PREV_SEC (tail);
2849 /* Set up this stub group. */
2850 htab->stub_group[tail->id].link_sec = curr;
2851 }
2852 while (tail != curr && (tail = prev) != NULL);
2853
2854 /* But wait, there's more! Input sections up to stub_group_size
2855 bytes before the stub section can be handled by it too. */
2856 if (!stubs_always_before_branch)
2857 {
2858 total = 0;
2859 while (prev != NULL
2860 && ((total += tail->output_offset - prev->output_offset)
2861 < stub_group_size))
2862 {
2863 tail = prev;
2864 prev = PREV_SEC (tail);
2865 htab->stub_group[tail->id].link_sec = curr;
2866 }
2867 }
2868 tail = prev;
2869 }
2870 }
2871 while (list-- != htab->input_list);
2872
2873 free (htab->input_list);
2874 }
2875
2876 #undef PREV_SEC
2877
2878 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2879
2880 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2881 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2882 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2883 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2884 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2885 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2886
2887 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2888 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2889 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2890 #define AARCH64_ZR 0x1f
2891
2892 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2893 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2894
2895 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2896 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2897 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2898 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2899 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2900 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2901 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2902 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2903 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2904 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2905 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2906 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2907 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2908 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2909 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2910 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2911 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2912 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2913
2914 /* Classify an INSN if it is indeed a load/store.
2915
2916 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2917
2918 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2919 is set equal to RT.
2920
2921 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2922
2923 */
2924
2925 static bfd_boolean
2926 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2927 bfd_boolean *pair, bfd_boolean *load)
2928 {
2929 uint32_t opcode;
2930 unsigned int r;
2931 uint32_t opc = 0;
2932 uint32_t v = 0;
2933 uint32_t opc_v = 0;
2934
2935 /* Bail out quickly if INSN doesn't fall into the the load-store
2936 encoding space. */
2937 if (!AARCH64_LDST (insn))
2938 return FALSE;
2939
2940 *pair = FALSE;
2941 *load = FALSE;
2942 if (AARCH64_LDST_EX (insn))
2943 {
2944 *rt = AARCH64_RT (insn);
2945 *rt2 = *rt;
2946 if (AARCH64_BIT (insn, 21) == 1)
2947 {
2948 *pair = TRUE;
2949 *rt2 = AARCH64_RT2 (insn);
2950 }
2951 *load = AARCH64_LD (insn);
2952 return TRUE;
2953 }
2954 else if (AARCH64_LDST_NAP (insn)
2955 || AARCH64_LDSTP_PI (insn)
2956 || AARCH64_LDSTP_O (insn)
2957 || AARCH64_LDSTP_PRE (insn))
2958 {
2959 *pair = TRUE;
2960 *rt = AARCH64_RT (insn);
2961 *rt2 = AARCH64_RT2 (insn);
2962 *load = AARCH64_LD (insn);
2963 return TRUE;
2964 }
2965 else if (AARCH64_LDST_PCREL (insn)
2966 || AARCH64_LDST_UI (insn)
2967 || AARCH64_LDST_PIIMM (insn)
2968 || AARCH64_LDST_U (insn)
2969 || AARCH64_LDST_PREIMM (insn)
2970 || AARCH64_LDST_RO (insn)
2971 || AARCH64_LDST_UIMM (insn))
2972 {
2973 *rt = AARCH64_RT (insn);
2974 *rt2 = *rt;
2975 if (AARCH64_LDST_PCREL (insn))
2976 *load = TRUE;
2977 opc = AARCH64_BITS (insn, 22, 2);
2978 v = AARCH64_BIT (insn, 26);
2979 opc_v = opc | (v << 2);
2980 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
2981 || opc_v == 5 || opc_v == 7);
2982 return TRUE;
2983 }
2984 else if (AARCH64_LDST_SIMD_M (insn)
2985 || AARCH64_LDST_SIMD_M_PI (insn))
2986 {
2987 *rt = AARCH64_RT (insn);
2988 *load = AARCH64_BIT (insn, 22);
2989 opcode = (insn >> 12) & 0xf;
2990 switch (opcode)
2991 {
2992 case 0:
2993 case 2:
2994 *rt2 = *rt + 3;
2995 break;
2996
2997 case 4:
2998 case 6:
2999 *rt2 = *rt + 2;
3000 break;
3001
3002 case 7:
3003 *rt2 = *rt;
3004 break;
3005
3006 case 8:
3007 case 10:
3008 *rt2 = *rt + 1;
3009 break;
3010
3011 default:
3012 return FALSE;
3013 }
3014 return TRUE;
3015 }
3016 else if (AARCH64_LDST_SIMD_S (insn)
3017 || AARCH64_LDST_SIMD_S_PI (insn))
3018 {
3019 *rt = AARCH64_RT (insn);
3020 r = (insn >> 21) & 1;
3021 *load = AARCH64_BIT (insn, 22);
3022 opcode = (insn >> 13) & 0x7;
3023 switch (opcode)
3024 {
3025 case 0:
3026 case 2:
3027 case 4:
3028 *rt2 = *rt + r;
3029 break;
3030
3031 case 1:
3032 case 3:
3033 case 5:
3034 *rt2 = *rt + (r == 0 ? 2 : 3);
3035 break;
3036
3037 case 6:
3038 *rt2 = *rt + r;
3039 break;
3040
3041 case 7:
3042 *rt2 = *rt + (r == 0 ? 2 : 3);
3043 break;
3044
3045 default:
3046 return FALSE;
3047 }
3048 return TRUE;
3049 }
3050
3051 return FALSE;
3052 }
3053
3054 /* Return TRUE if INSN is multiply-accumulate. */
3055
3056 static bfd_boolean
3057 aarch64_mlxl_p (uint32_t insn)
3058 {
3059 uint32_t op31 = AARCH64_OP31 (insn);
3060
3061 if (AARCH64_MAC (insn)
3062 && (op31 == 0 || op31 == 1 || op31 == 5)
3063 /* Exclude MUL instructions which are encoded as a multiple accumulate
3064 with RA = XZR. */
3065 && AARCH64_RA (insn) != AARCH64_ZR)
3066 return TRUE;
3067
3068 return FALSE;
3069 }
3070
3071 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3072 it is possible for a 64-bit multiply-accumulate instruction to generate an
3073 incorrect result. The details are quite complex and hard to
3074 determine statically, since branches in the code may exist in some
3075 circumstances, but all cases end with a memory (load, store, or
3076 prefetch) instruction followed immediately by the multiply-accumulate
3077 operation. We employ a linker patching technique, by moving the potentially
3078 affected multiply-accumulate instruction into a patch region and replacing
3079 the original instruction with a branch to the patch. This function checks
3080 if INSN_1 is the memory operation followed by a multiply-accumulate
3081 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3082 if INSN_1 and INSN_2 are safe. */
3083
3084 static bfd_boolean
3085 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3086 {
3087 uint32_t rt;
3088 uint32_t rt2;
3089 uint32_t rn;
3090 uint32_t rm;
3091 uint32_t ra;
3092 bfd_boolean pair;
3093 bfd_boolean load;
3094
3095 if (aarch64_mlxl_p (insn_2)
3096 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3097 {
3098 /* Any SIMD memory op is independent of the subsequent MLA
3099 by definition of the erratum. */
3100 if (AARCH64_BIT (insn_1, 26))
3101 return TRUE;
3102
3103 /* If not SIMD, check for integer memory ops and MLA relationship. */
3104 rn = AARCH64_RN (insn_2);
3105 ra = AARCH64_RA (insn_2);
3106 rm = AARCH64_RM (insn_2);
3107
3108 /* If this is a load and there's a true(RAW) dependency, we are safe
3109 and this is not an erratum sequence. */
3110 if (load &&
3111 (rt == rn || rt == rm || rt == ra
3112 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3113 return FALSE;
3114
3115 /* We conservatively put out stubs for all other cases (including
3116 writebacks). */
3117 return TRUE;
3118 }
3119
3120 return FALSE;
3121 }
3122
3123 /* Used to order a list of mapping symbols by address. */
3124
3125 static int
3126 elf_aarch64_compare_mapping (const void *a, const void *b)
3127 {
3128 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3129 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3130
3131 if (amap->vma > bmap->vma)
3132 return 1;
3133 else if (amap->vma < bmap->vma)
3134 return -1;
3135 else if (amap->type > bmap->type)
3136 /* Ensure results do not depend on the host qsort for objects with
3137 multiple mapping symbols at the same address by sorting on type
3138 after vma. */
3139 return 1;
3140 else if (amap->type < bmap->type)
3141 return -1;
3142 else
3143 return 0;
3144 }
3145
3146
3147 static char *
3148 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3149 {
3150 char *stub_name = (char *) bfd_malloc
3151 (strlen ("__erratum_835769_veneer_") + 16);
3152 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3153 return stub_name;
3154 }
3155
3156 /* Scan for Cortex-A53 erratum 835769 sequence.
3157
3158 Return TRUE else FALSE on abnormal termination. */
3159
3160 static bfd_boolean
3161 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3162 struct bfd_link_info *info,
3163 unsigned int *num_fixes_p)
3164 {
3165 asection *section;
3166 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3167 unsigned int num_fixes = *num_fixes_p;
3168
3169 if (htab == NULL)
3170 return TRUE;
3171
3172 for (section = input_bfd->sections;
3173 section != NULL;
3174 section = section->next)
3175 {
3176 bfd_byte *contents = NULL;
3177 struct _aarch64_elf_section_data *sec_data;
3178 unsigned int span;
3179
3180 if (elf_section_type (section) != SHT_PROGBITS
3181 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3182 || (section->flags & SEC_EXCLUDE) != 0
3183 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3184 || (section->output_section == bfd_abs_section_ptr))
3185 continue;
3186
3187 if (elf_section_data (section)->this_hdr.contents != NULL)
3188 contents = elf_section_data (section)->this_hdr.contents;
3189 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3190 return FALSE;
3191
3192 sec_data = elf_aarch64_section_data (section);
3193
3194 qsort (sec_data->map, sec_data->mapcount,
3195 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3196
3197 for (span = 0; span < sec_data->mapcount; span++)
3198 {
3199 unsigned int span_start = sec_data->map[span].vma;
3200 unsigned int span_end = ((span == sec_data->mapcount - 1)
3201 ? sec_data->map[0].vma + section->size
3202 : sec_data->map[span + 1].vma);
3203 unsigned int i;
3204 char span_type = sec_data->map[span].type;
3205
3206 if (span_type == 'd')
3207 continue;
3208
3209 for (i = span_start; i + 4 < span_end; i += 4)
3210 {
3211 uint32_t insn_1 = bfd_getl32 (contents + i);
3212 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3213
3214 if (aarch64_erratum_sequence (insn_1, insn_2))
3215 {
3216 struct elf_aarch64_stub_hash_entry *stub_entry;
3217 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3218 if (! stub_name)
3219 return FALSE;
3220
3221 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3222 section,
3223 htab);
3224 if (! stub_entry)
3225 return FALSE;
3226
3227 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3228 stub_entry->target_section = section;
3229 stub_entry->target_value = i + 4;
3230 stub_entry->veneered_insn = insn_2;
3231 stub_entry->output_name = stub_name;
3232 num_fixes++;
3233 }
3234 }
3235 }
3236 if (elf_section_data (section)->this_hdr.contents == NULL)
3237 free (contents);
3238 }
3239
3240 *num_fixes_p = num_fixes;
3241
3242 return TRUE;
3243 }
3244
3245
3246 /* Test if instruction INSN is ADRP. */
3247
3248 static bfd_boolean
3249 _bfd_aarch64_adrp_p (uint32_t insn)
3250 {
3251 return ((insn & 0x9f000000) == 0x90000000);
3252 }
3253
3254
3255 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
3256
3257 static bfd_boolean
3258 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
3259 uint32_t insn_3)
3260 {
3261 uint32_t rt;
3262 uint32_t rt2;
3263 bfd_boolean pair;
3264 bfd_boolean load;
3265
3266 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
3267 && (!pair
3268 || (pair && !load))
3269 && AARCH64_LDST_UIMM (insn_3)
3270 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
3271 }
3272
3273
3274 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
3275
3276 Return TRUE if section CONTENTS at offset I contains one of the
3277 erratum 843419 sequences, otherwise return FALSE. If a sequence is
3278 seen set P_VENEER_I to the offset of the final LOAD/STORE
3279 instruction in the sequence.
3280 */
3281
3282 static bfd_boolean
3283 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
3284 bfd_vma i, bfd_vma span_end,
3285 bfd_vma *p_veneer_i)
3286 {
3287 uint32_t insn_1 = bfd_getl32 (contents + i);
3288
3289 if (!_bfd_aarch64_adrp_p (insn_1))
3290 return FALSE;
3291
3292 if (span_end < i + 12)
3293 return FALSE;
3294
3295 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3296 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
3297
3298 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
3299 return FALSE;
3300
3301 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
3302 {
3303 *p_veneer_i = i + 8;
3304 return TRUE;
3305 }
3306
3307 if (span_end < i + 16)
3308 return FALSE;
3309
3310 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
3311
3312 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
3313 {
3314 *p_veneer_i = i + 12;
3315 return TRUE;
3316 }
3317
3318 return FALSE;
3319 }
3320
3321
3322 /* Resize all stub sections. */
3323
3324 static void
3325 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3326 {
3327 asection *section;
3328
3329 /* OK, we've added some stubs. Find out the new size of the
3330 stub sections. */
3331 for (section = htab->stub_bfd->sections;
3332 section != NULL; section = section->next)
3333 {
3334 /* Ignore non-stub sections. */
3335 if (!strstr (section->name, STUB_SUFFIX))
3336 continue;
3337 section->size = 0;
3338 }
3339
3340 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3341
3342 for (section = htab->stub_bfd->sections;
3343 section != NULL; section = section->next)
3344 {
3345 if (!strstr (section->name, STUB_SUFFIX))
3346 continue;
3347
3348 if (section->size)
3349 section->size += 4;
3350
3351 /* Ensure all stub sections have a size which is a multiple of
3352 4096. This is important in order to ensure that the insertion
3353 of stub sections does not in itself move existing code around
3354 in such a way that new errata sequences are created. */
3355 if (htab->fix_erratum_843419)
3356 if (section->size)
3357 section->size = BFD_ALIGN (section->size, 0x1000);
3358 }
3359 }
3360
3361
3362 /* Construct an erratum 843419 workaround stub name.
3363 */
3364
3365 static char *
3366 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
3367 bfd_vma offset)
3368 {
3369 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
3370 char *stub_name = bfd_malloc (len);
3371
3372 if (stub_name != NULL)
3373 snprintf (stub_name, len, "e843419@%04x_%08x_%" BFD_VMA_FMT "x",
3374 input_section->owner->id,
3375 input_section->id,
3376 offset);
3377 return stub_name;
3378 }
3379
3380 /* Build a stub_entry structure describing an 843419 fixup.
3381
3382 The stub_entry constructed is populated with the bit pattern INSN
3383 of the instruction located at OFFSET within input SECTION.
3384
3385 Returns TRUE on success. */
3386
3387 static bfd_boolean
3388 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
3389 bfd_vma adrp_offset,
3390 bfd_vma ldst_offset,
3391 asection *section,
3392 struct bfd_link_info *info)
3393 {
3394 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3395 char *stub_name;
3396 struct elf_aarch64_stub_hash_entry *stub_entry;
3397
3398 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
3399 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3400 FALSE, FALSE);
3401 if (stub_entry)
3402 {
3403 free (stub_name);
3404 return TRUE;
3405 }
3406
3407 /* We always place an 843419 workaround veneer in the stub section
3408 attached to the input section in which an erratum sequence has
3409 been found. This ensures that later in the link process (in
3410 elfNN_aarch64_write_section) when we copy the veneered
3411 instruction from the input section into the stub section the
3412 copied instruction will have had any relocations applied to it.
3413 If we placed workaround veneers in any other stub section then we
3414 could not assume that all relocations have been processed on the
3415 corresponding input section at the point we output the stub
3416 section.
3417 */
3418
3419 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
3420 if (stub_entry == NULL)
3421 {
3422 free (stub_name);
3423 return FALSE;
3424 }
3425
3426 stub_entry->adrp_offset = adrp_offset;
3427 stub_entry->target_value = ldst_offset;
3428 stub_entry->target_section = section;
3429 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
3430 stub_entry->veneered_insn = insn;
3431 stub_entry->output_name = stub_name;
3432
3433 return TRUE;
3434 }
3435
3436
3437 /* Scan an input section looking for the signature of erratum 843419.
3438
3439 Scans input SECTION in INPUT_BFD looking for erratum 843419
3440 signatures, for each signature found a stub_entry is created
3441 describing the location of the erratum for subsequent fixup.
3442
3443 Return TRUE on successful scan, FALSE on failure to scan.
3444 */
3445
3446 static bfd_boolean
3447 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
3448 struct bfd_link_info *info)
3449 {
3450 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3451
3452 if (htab == NULL)
3453 return TRUE;
3454
3455 if (elf_section_type (section) != SHT_PROGBITS
3456 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3457 || (section->flags & SEC_EXCLUDE) != 0
3458 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3459 || (section->output_section == bfd_abs_section_ptr))
3460 return TRUE;
3461
3462 do
3463 {
3464 bfd_byte *contents = NULL;
3465 struct _aarch64_elf_section_data *sec_data;
3466 unsigned int span;
3467
3468 if (elf_section_data (section)->this_hdr.contents != NULL)
3469 contents = elf_section_data (section)->this_hdr.contents;
3470 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3471 return FALSE;
3472
3473 sec_data = elf_aarch64_section_data (section);
3474
3475 qsort (sec_data->map, sec_data->mapcount,
3476 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3477
3478 for (span = 0; span < sec_data->mapcount; span++)
3479 {
3480 unsigned int span_start = sec_data->map[span].vma;
3481 unsigned int span_end = ((span == sec_data->mapcount - 1)
3482 ? sec_data->map[0].vma + section->size
3483 : sec_data->map[span + 1].vma);
3484 unsigned int i;
3485 char span_type = sec_data->map[span].type;
3486
3487 if (span_type == 'd')
3488 continue;
3489
3490 for (i = span_start; i + 8 < span_end; i += 4)
3491 {
3492 bfd_vma vma = (section->output_section->vma
3493 + section->output_offset
3494 + i);
3495 bfd_vma veneer_i;
3496
3497 if (_bfd_aarch64_erratum_843419_p
3498 (contents, vma, i, span_end, &veneer_i))
3499 {
3500 uint32_t insn = bfd_getl32 (contents + veneer_i);
3501
3502 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
3503 section, info))
3504 return FALSE;
3505 }
3506 }
3507 }
3508
3509 if (elf_section_data (section)->this_hdr.contents == NULL)
3510 free (contents);
3511 }
3512 while (0);
3513
3514 return TRUE;
3515 }
3516
3517
3518 /* Determine and set the size of the stub section for a final link.
3519
3520 The basic idea here is to examine all the relocations looking for
3521 PC-relative calls to a target that is unreachable with a "bl"
3522 instruction. */
3523
3524 bfd_boolean
3525 elfNN_aarch64_size_stubs (bfd *output_bfd,
3526 bfd *stub_bfd,
3527 struct bfd_link_info *info,
3528 bfd_signed_vma group_size,
3529 asection * (*add_stub_section) (const char *,
3530 asection *),
3531 void (*layout_sections_again) (void))
3532 {
3533 bfd_size_type stub_group_size;
3534 bfd_boolean stubs_always_before_branch;
3535 bfd_boolean stub_changed = FALSE;
3536 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3537 unsigned int num_erratum_835769_fixes = 0;
3538
3539 /* Propagate mach to stub bfd, because it may not have been
3540 finalized when we created stub_bfd. */
3541 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3542 bfd_get_mach (output_bfd));
3543
3544 /* Stash our params away. */
3545 htab->stub_bfd = stub_bfd;
3546 htab->add_stub_section = add_stub_section;
3547 htab->layout_sections_again = layout_sections_again;
3548 stubs_always_before_branch = group_size < 0;
3549 if (group_size < 0)
3550 stub_group_size = -group_size;
3551 else
3552 stub_group_size = group_size;
3553
3554 if (stub_group_size == 1)
3555 {
3556 /* Default values. */
3557 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3558 stub_group_size = 127 * 1024 * 1024;
3559 }
3560
3561 group_sections (htab, stub_group_size, stubs_always_before_branch);
3562
3563 (*htab->layout_sections_again) ();
3564
3565 if (htab->fix_erratum_835769)
3566 {
3567 bfd *input_bfd;
3568
3569 for (input_bfd = info->input_bfds;
3570 input_bfd != NULL; input_bfd = input_bfd->link.next)
3571 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3572 &num_erratum_835769_fixes))
3573 return FALSE;
3574
3575 _bfd_aarch64_resize_stubs (htab);
3576 (*htab->layout_sections_again) ();
3577 }
3578
3579 if (htab->fix_erratum_843419)
3580 {
3581 bfd *input_bfd;
3582
3583 for (input_bfd = info->input_bfds;
3584 input_bfd != NULL;
3585 input_bfd = input_bfd->link.next)
3586 {
3587 asection *section;
3588
3589 for (section = input_bfd->sections;
3590 section != NULL;
3591 section = section->next)
3592 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
3593 return FALSE;
3594 }
3595
3596 _bfd_aarch64_resize_stubs (htab);
3597 (*htab->layout_sections_again) ();
3598 }
3599
3600 while (1)
3601 {
3602 bfd *input_bfd;
3603
3604 for (input_bfd = info->input_bfds;
3605 input_bfd != NULL; input_bfd = input_bfd->link.next)
3606 {
3607 Elf_Internal_Shdr *symtab_hdr;
3608 asection *section;
3609 Elf_Internal_Sym *local_syms = NULL;
3610
3611 /* We'll need the symbol table in a second. */
3612 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3613 if (symtab_hdr->sh_info == 0)
3614 continue;
3615
3616 /* Walk over each section attached to the input bfd. */
3617 for (section = input_bfd->sections;
3618 section != NULL; section = section->next)
3619 {
3620 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3621
3622 /* If there aren't any relocs, then there's nothing more
3623 to do. */
3624 if ((section->flags & SEC_RELOC) == 0
3625 || section->reloc_count == 0
3626 || (section->flags & SEC_CODE) == 0)
3627 continue;
3628
3629 /* If this section is a link-once section that will be
3630 discarded, then don't create any stubs. */
3631 if (section->output_section == NULL
3632 || section->output_section->owner != output_bfd)
3633 continue;
3634
3635 /* Get the relocs. */
3636 internal_relocs
3637 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3638 NULL, info->keep_memory);
3639 if (internal_relocs == NULL)
3640 goto error_ret_free_local;
3641
3642 /* Now examine each relocation. */
3643 irela = internal_relocs;
3644 irelaend = irela + section->reloc_count;
3645 for (; irela < irelaend; irela++)
3646 {
3647 unsigned int r_type, r_indx;
3648 enum elf_aarch64_stub_type stub_type;
3649 struct elf_aarch64_stub_hash_entry *stub_entry;
3650 asection *sym_sec;
3651 bfd_vma sym_value;
3652 bfd_vma destination;
3653 struct elf_aarch64_link_hash_entry *hash;
3654 const char *sym_name;
3655 char *stub_name;
3656 const asection *id_sec;
3657 unsigned char st_type;
3658 bfd_size_type len;
3659
3660 r_type = ELFNN_R_TYPE (irela->r_info);
3661 r_indx = ELFNN_R_SYM (irela->r_info);
3662
3663 if (r_type >= (unsigned int) R_AARCH64_end)
3664 {
3665 bfd_set_error (bfd_error_bad_value);
3666 error_ret_free_internal:
3667 if (elf_section_data (section)->relocs == NULL)
3668 free (internal_relocs);
3669 goto error_ret_free_local;
3670 }
3671
3672 /* Only look for stubs on unconditional branch and
3673 branch and link instructions. */
3674 if (r_type != (unsigned int) AARCH64_R (CALL26)
3675 && r_type != (unsigned int) AARCH64_R (JUMP26))
3676 continue;
3677
3678 /* Now determine the call target, its name, value,
3679 section. */
3680 sym_sec = NULL;
3681 sym_value = 0;
3682 destination = 0;
3683 hash = NULL;
3684 sym_name = NULL;
3685 if (r_indx < symtab_hdr->sh_info)
3686 {
3687 /* It's a local symbol. */
3688 Elf_Internal_Sym *sym;
3689 Elf_Internal_Shdr *hdr;
3690
3691 if (local_syms == NULL)
3692 {
3693 local_syms
3694 = (Elf_Internal_Sym *) symtab_hdr->contents;
3695 if (local_syms == NULL)
3696 local_syms
3697 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3698 symtab_hdr->sh_info, 0,
3699 NULL, NULL, NULL);
3700 if (local_syms == NULL)
3701 goto error_ret_free_internal;
3702 }
3703
3704 sym = local_syms + r_indx;
3705 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3706 sym_sec = hdr->bfd_section;
3707 if (!sym_sec)
3708 /* This is an undefined symbol. It can never
3709 be resolved. */
3710 continue;
3711
3712 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3713 sym_value = sym->st_value;
3714 destination = (sym_value + irela->r_addend
3715 + sym_sec->output_offset
3716 + sym_sec->output_section->vma);
3717 st_type = ELF_ST_TYPE (sym->st_info);
3718 sym_name
3719 = bfd_elf_string_from_elf_section (input_bfd,
3720 symtab_hdr->sh_link,
3721 sym->st_name);
3722 }
3723 else
3724 {
3725 int e_indx;
3726
3727 e_indx = r_indx - symtab_hdr->sh_info;
3728 hash = ((struct elf_aarch64_link_hash_entry *)
3729 elf_sym_hashes (input_bfd)[e_indx]);
3730
3731 while (hash->root.root.type == bfd_link_hash_indirect
3732 || hash->root.root.type == bfd_link_hash_warning)
3733 hash = ((struct elf_aarch64_link_hash_entry *)
3734 hash->root.root.u.i.link);
3735
3736 if (hash->root.root.type == bfd_link_hash_defined
3737 || hash->root.root.type == bfd_link_hash_defweak)
3738 {
3739 struct elf_aarch64_link_hash_table *globals =
3740 elf_aarch64_hash_table (info);
3741 sym_sec = hash->root.root.u.def.section;
3742 sym_value = hash->root.root.u.def.value;
3743 /* For a destination in a shared library,
3744 use the PLT stub as target address to
3745 decide whether a branch stub is
3746 needed. */
3747 if (globals->root.splt != NULL && hash != NULL
3748 && hash->root.plt.offset != (bfd_vma) - 1)
3749 {
3750 sym_sec = globals->root.splt;
3751 sym_value = hash->root.plt.offset;
3752 if (sym_sec->output_section != NULL)
3753 destination = (sym_value
3754 + sym_sec->output_offset
3755 +
3756 sym_sec->output_section->vma);
3757 }
3758 else if (sym_sec->output_section != NULL)
3759 destination = (sym_value + irela->r_addend
3760 + sym_sec->output_offset
3761 + sym_sec->output_section->vma);
3762 }
3763 else if (hash->root.root.type == bfd_link_hash_undefined
3764 || (hash->root.root.type
3765 == bfd_link_hash_undefweak))
3766 {
3767 /* For a shared library, use the PLT stub as
3768 target address to decide whether a long
3769 branch stub is needed.
3770 For absolute code, they cannot be handled. */
3771 struct elf_aarch64_link_hash_table *globals =
3772 elf_aarch64_hash_table (info);
3773
3774 if (globals->root.splt != NULL && hash != NULL
3775 && hash->root.plt.offset != (bfd_vma) - 1)
3776 {
3777 sym_sec = globals->root.splt;
3778 sym_value = hash->root.plt.offset;
3779 if (sym_sec->output_section != NULL)
3780 destination = (sym_value
3781 + sym_sec->output_offset
3782 +
3783 sym_sec->output_section->vma);
3784 }
3785 else
3786 continue;
3787 }
3788 else
3789 {
3790 bfd_set_error (bfd_error_bad_value);
3791 goto error_ret_free_internal;
3792 }
3793 st_type = ELF_ST_TYPE (hash->root.type);
3794 sym_name = hash->root.root.root.string;
3795 }
3796
3797 /* Determine what (if any) linker stub is needed. */
3798 stub_type = aarch64_type_of_stub
3799 (info, section, irela, st_type, hash, destination);
3800 if (stub_type == aarch64_stub_none)
3801 continue;
3802
3803 /* Support for grouping stub sections. */
3804 id_sec = htab->stub_group[section->id].link_sec;
3805
3806 /* Get the name of this stub. */
3807 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3808 irela);
3809 if (!stub_name)
3810 goto error_ret_free_internal;
3811
3812 stub_entry =
3813 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3814 stub_name, FALSE, FALSE);
3815 if (stub_entry != NULL)
3816 {
3817 /* The proper stub has already been created. */
3818 free (stub_name);
3819 continue;
3820 }
3821
3822 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3823 (stub_name, section, htab);
3824 if (stub_entry == NULL)
3825 {
3826 free (stub_name);
3827 goto error_ret_free_internal;
3828 }
3829
3830 stub_entry->target_value = sym_value;
3831 stub_entry->target_section = sym_sec;
3832 stub_entry->stub_type = stub_type;
3833 stub_entry->h = hash;
3834 stub_entry->st_type = st_type;
3835
3836 if (sym_name == NULL)
3837 sym_name = "unnamed";
3838 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3839 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3840 if (stub_entry->output_name == NULL)
3841 {
3842 free (stub_name);
3843 goto error_ret_free_internal;
3844 }
3845
3846 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3847 sym_name);
3848
3849 stub_changed = TRUE;
3850 }
3851
3852 /* We're done with the internal relocs, free them. */
3853 if (elf_section_data (section)->relocs == NULL)
3854 free (internal_relocs);
3855 }
3856 }
3857
3858 if (!stub_changed)
3859 break;
3860
3861 _bfd_aarch64_resize_stubs (htab);
3862
3863 /* Ask the linker to do its stuff. */
3864 (*htab->layout_sections_again) ();
3865 stub_changed = FALSE;
3866 }
3867
3868 return TRUE;
3869
3870 error_ret_free_local:
3871 return FALSE;
3872 }
3873
3874 /* Build all the stubs associated with the current output file. The
3875 stubs are kept in a hash table attached to the main linker hash
3876 table. We also set up the .plt entries for statically linked PIC
3877 functions here. This function is called via aarch64_elf_finish in the
3878 linker. */
3879
3880 bfd_boolean
3881 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3882 {
3883 asection *stub_sec;
3884 struct bfd_hash_table *table;
3885 struct elf_aarch64_link_hash_table *htab;
3886
3887 htab = elf_aarch64_hash_table (info);
3888
3889 for (stub_sec = htab->stub_bfd->sections;
3890 stub_sec != NULL; stub_sec = stub_sec->next)
3891 {
3892 bfd_size_type size;
3893
3894 /* Ignore non-stub sections. */
3895 if (!strstr (stub_sec->name, STUB_SUFFIX))
3896 continue;
3897
3898 /* Allocate memory to hold the linker stubs. */
3899 size = stub_sec->size;
3900 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3901 if (stub_sec->contents == NULL && size != 0)
3902 return FALSE;
3903 stub_sec->size = 0;
3904
3905 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
3906 stub_sec->size += 4;
3907 }
3908
3909 /* Build the stubs as directed by the stub hash table. */
3910 table = &htab->stub_hash_table;
3911 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3912
3913 return TRUE;
3914 }
3915
3916
3917 /* Add an entry to the code/data map for section SEC. */
3918
3919 static void
3920 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3921 {
3922 struct _aarch64_elf_section_data *sec_data =
3923 elf_aarch64_section_data (sec);
3924 unsigned int newidx;
3925
3926 if (sec_data->map == NULL)
3927 {
3928 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3929 sec_data->mapcount = 0;
3930 sec_data->mapsize = 1;
3931 }
3932
3933 newidx = sec_data->mapcount++;
3934
3935 if (sec_data->mapcount > sec_data->mapsize)
3936 {
3937 sec_data->mapsize *= 2;
3938 sec_data->map = bfd_realloc_or_free
3939 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3940 }
3941
3942 if (sec_data->map)
3943 {
3944 sec_data->map[newidx].vma = vma;
3945 sec_data->map[newidx].type = type;
3946 }
3947 }
3948
3949
3950 /* Initialise maps of insn/data for input BFDs. */
3951 void
3952 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3953 {
3954 Elf_Internal_Sym *isymbuf;
3955 Elf_Internal_Shdr *hdr;
3956 unsigned int i, localsyms;
3957
3958 /* Make sure that we are dealing with an AArch64 elf binary. */
3959 if (!is_aarch64_elf (abfd))
3960 return;
3961
3962 if ((abfd->flags & DYNAMIC) != 0)
3963 return;
3964
3965 hdr = &elf_symtab_hdr (abfd);
3966 localsyms = hdr->sh_info;
3967
3968 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3969 should contain the number of local symbols, which should come before any
3970 global symbols. Mapping symbols are always local. */
3971 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3972
3973 /* No internal symbols read? Skip this BFD. */
3974 if (isymbuf == NULL)
3975 return;
3976
3977 for (i = 0; i < localsyms; i++)
3978 {
3979 Elf_Internal_Sym *isym = &isymbuf[i];
3980 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3981 const char *name;
3982
3983 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3984 {
3985 name = bfd_elf_string_from_elf_section (abfd,
3986 hdr->sh_link,
3987 isym->st_name);
3988
3989 if (bfd_is_aarch64_special_symbol_name
3990 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3991 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
3992 }
3993 }
3994 }
3995
3996 /* Set option values needed during linking. */
3997 void
3998 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
3999 struct bfd_link_info *link_info,
4000 int no_enum_warn,
4001 int no_wchar_warn, int pic_veneer,
4002 int fix_erratum_835769,
4003 int fix_erratum_843419)
4004 {
4005 struct elf_aarch64_link_hash_table *globals;
4006
4007 globals = elf_aarch64_hash_table (link_info);
4008 globals->pic_veneer = pic_veneer;
4009 globals->fix_erratum_835769 = fix_erratum_835769;
4010 globals->fix_erratum_843419 = fix_erratum_843419;
4011 globals->fix_erratum_843419_adr = TRUE;
4012
4013 BFD_ASSERT (is_aarch64_elf (output_bfd));
4014 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
4015 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
4016 }
4017
4018 static bfd_vma
4019 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
4020 struct elf_aarch64_link_hash_table
4021 *globals, struct bfd_link_info *info,
4022 bfd_vma value, bfd *output_bfd,
4023 bfd_boolean *unresolved_reloc_p)
4024 {
4025 bfd_vma off = (bfd_vma) - 1;
4026 asection *basegot = globals->root.sgot;
4027 bfd_boolean dyn = globals->root.dynamic_sections_created;
4028
4029 if (h != NULL)
4030 {
4031 BFD_ASSERT (basegot != NULL);
4032 off = h->got.offset;
4033 BFD_ASSERT (off != (bfd_vma) - 1);
4034 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
4035 || (info->shared
4036 && SYMBOL_REFERENCES_LOCAL (info, h))
4037 || (ELF_ST_VISIBILITY (h->other)
4038 && h->root.type == bfd_link_hash_undefweak))
4039 {
4040 /* This is actually a static link, or it is a -Bsymbolic link
4041 and the symbol is defined locally. We must initialize this
4042 entry in the global offset table. Since the offset must
4043 always be a multiple of 8 (4 in the case of ILP32), we use
4044 the least significant bit to record whether we have
4045 initialized it already.
4046 When doing a dynamic link, we create a .rel(a).got relocation
4047 entry to initialize the value. This is done in the
4048 finish_dynamic_symbol routine. */
4049 if ((off & 1) != 0)
4050 off &= ~1;
4051 else
4052 {
4053 bfd_put_NN (output_bfd, value, basegot->contents + off);
4054 h->got.offset |= 1;
4055 }
4056 }
4057 else
4058 *unresolved_reloc_p = FALSE;
4059
4060 off = off + basegot->output_section->vma + basegot->output_offset;
4061 }
4062
4063 return off;
4064 }
4065
4066 /* Change R_TYPE to a more efficient access model where possible,
4067 return the new reloc type. */
4068
4069 static bfd_reloc_code_real_type
4070 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
4071 struct elf_link_hash_entry *h)
4072 {
4073 bfd_boolean is_local = h == NULL;
4074
4075 switch (r_type)
4076 {
4077 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4078 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4079 return (is_local
4080 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4081 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
4082
4083 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4084 return (is_local
4085 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4086 : r_type);
4087
4088 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4089 return (is_local
4090 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4091 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4092
4093 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4094 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4095 return (is_local
4096 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4097 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
4098
4099 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4100 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
4101
4102 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4103 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
4104
4105 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4106 return r_type;
4107
4108 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4109 return (is_local
4110 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
4111 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4112
4113 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4114 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4115 /* Instructions with these relocations will become NOPs. */
4116 return BFD_RELOC_AARCH64_NONE;
4117
4118 default:
4119 break;
4120 }
4121
4122 return r_type;
4123 }
4124
4125 static unsigned int
4126 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
4127 {
4128 switch (r_type)
4129 {
4130 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4131 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4132 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4133 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4134 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4135 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4136 return GOT_NORMAL;
4137
4138 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4139 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4140 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4141 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
4142 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4143 return GOT_TLS_GD;
4144
4145 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4146 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4147 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4148 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4149 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4150 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4151 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4152 return GOT_TLSDESC_GD;
4153
4154 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4155 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4156 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4157 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4158 return GOT_TLS_IE;
4159
4160 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4161 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4162 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4163 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4164 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4165 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4166 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4167 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4168 return GOT_UNKNOWN;
4169
4170 default:
4171 break;
4172 }
4173 return GOT_UNKNOWN;
4174 }
4175
4176 static bfd_boolean
4177 aarch64_can_relax_tls (bfd *input_bfd,
4178 struct bfd_link_info *info,
4179 bfd_reloc_code_real_type r_type,
4180 struct elf_link_hash_entry *h,
4181 unsigned long r_symndx)
4182 {
4183 unsigned int symbol_got_type;
4184 unsigned int reloc_got_type;
4185
4186 if (! IS_AARCH64_TLS_RELOC (r_type))
4187 return FALSE;
4188
4189 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
4190 reloc_got_type = aarch64_reloc_got_type (r_type);
4191
4192 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
4193 return TRUE;
4194
4195 if (info->shared)
4196 return FALSE;
4197
4198 if (h && h->root.type == bfd_link_hash_undefweak)
4199 return FALSE;
4200
4201 return TRUE;
4202 }
4203
4204 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
4205 enumerator. */
4206
4207 static bfd_reloc_code_real_type
4208 aarch64_tls_transition (bfd *input_bfd,
4209 struct bfd_link_info *info,
4210 unsigned int r_type,
4211 struct elf_link_hash_entry *h,
4212 unsigned long r_symndx)
4213 {
4214 bfd_reloc_code_real_type bfd_r_type
4215 = elfNN_aarch64_bfd_reloc_from_type (r_type);
4216
4217 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
4218 return bfd_r_type;
4219
4220 return aarch64_tls_transition_without_check (bfd_r_type, h);
4221 }
4222
4223 /* Return the base VMA address which should be subtracted from real addresses
4224 when resolving R_AARCH64_TLS_DTPREL relocation. */
4225
4226 static bfd_vma
4227 dtpoff_base (struct bfd_link_info *info)
4228 {
4229 /* If tls_sec is NULL, we should have signalled an error already. */
4230 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
4231 return elf_hash_table (info)->tls_sec->vma;
4232 }
4233
4234 /* Return the base VMA address which should be subtracted from real addresses
4235 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
4236
4237 static bfd_vma
4238 tpoff_base (struct bfd_link_info *info)
4239 {
4240 struct elf_link_hash_table *htab = elf_hash_table (info);
4241
4242 /* If tls_sec is NULL, we should have signalled an error already. */
4243 BFD_ASSERT (htab->tls_sec != NULL);
4244
4245 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
4246 htab->tls_sec->alignment_power);
4247 return htab->tls_sec->vma - base;
4248 }
4249
4250 static bfd_vma *
4251 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4252 unsigned long r_symndx)
4253 {
4254 /* Calculate the address of the GOT entry for symbol
4255 referred to in h. */
4256 if (h != NULL)
4257 return &h->got.offset;
4258 else
4259 {
4260 /* local symbol */
4261 struct elf_aarch64_local_symbol *l;
4262
4263 l = elf_aarch64_locals (input_bfd);
4264 return &l[r_symndx].got_offset;
4265 }
4266 }
4267
4268 static void
4269 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4270 unsigned long r_symndx)
4271 {
4272 bfd_vma *p;
4273 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
4274 *p |= 1;
4275 }
4276
4277 static int
4278 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
4279 unsigned long r_symndx)
4280 {
4281 bfd_vma value;
4282 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4283 return value & 1;
4284 }
4285
4286 static bfd_vma
4287 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4288 unsigned long r_symndx)
4289 {
4290 bfd_vma value;
4291 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4292 value &= ~1;
4293 return value;
4294 }
4295
4296 static bfd_vma *
4297 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4298 unsigned long r_symndx)
4299 {
4300 /* Calculate the address of the GOT entry for symbol
4301 referred to in h. */
4302 if (h != NULL)
4303 {
4304 struct elf_aarch64_link_hash_entry *eh;
4305 eh = (struct elf_aarch64_link_hash_entry *) h;
4306 return &eh->tlsdesc_got_jump_table_offset;
4307 }
4308 else
4309 {
4310 /* local symbol */
4311 struct elf_aarch64_local_symbol *l;
4312
4313 l = elf_aarch64_locals (input_bfd);
4314 return &l[r_symndx].tlsdesc_got_jump_table_offset;
4315 }
4316 }
4317
4318 static void
4319 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4320 unsigned long r_symndx)
4321 {
4322 bfd_vma *p;
4323 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4324 *p |= 1;
4325 }
4326
4327 static int
4328 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
4329 struct elf_link_hash_entry *h,
4330 unsigned long r_symndx)
4331 {
4332 bfd_vma value;
4333 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4334 return value & 1;
4335 }
4336
4337 static bfd_vma
4338 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4339 unsigned long r_symndx)
4340 {
4341 bfd_vma value;
4342 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4343 value &= ~1;
4344 return value;
4345 }
4346
4347 /* Data for make_branch_to_erratum_835769_stub(). */
4348
4349 struct erratum_835769_branch_to_stub_data
4350 {
4351 struct bfd_link_info *info;
4352 asection *output_section;
4353 bfd_byte *contents;
4354 };
4355
4356 /* Helper to insert branches to erratum 835769 stubs in the right
4357 places for a particular section. */
4358
4359 static bfd_boolean
4360 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4361 void *in_arg)
4362 {
4363 struct elf_aarch64_stub_hash_entry *stub_entry;
4364 struct erratum_835769_branch_to_stub_data *data;
4365 bfd_byte *contents;
4366 unsigned long branch_insn = 0;
4367 bfd_vma veneered_insn_loc, veneer_entry_loc;
4368 bfd_signed_vma branch_offset;
4369 unsigned int target;
4370 bfd *abfd;
4371
4372 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4373 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4374
4375 if (stub_entry->target_section != data->output_section
4376 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4377 return TRUE;
4378
4379 contents = data->contents;
4380 veneered_insn_loc = stub_entry->target_section->output_section->vma
4381 + stub_entry->target_section->output_offset
4382 + stub_entry->target_value;
4383 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4384 + stub_entry->stub_sec->output_offset
4385 + stub_entry->stub_offset;
4386 branch_offset = veneer_entry_loc - veneered_insn_loc;
4387
4388 abfd = stub_entry->target_section->owner;
4389 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4390 (*_bfd_error_handler)
4391 (_("%B: error: Erratum 835769 stub out "
4392 "of range (input file too large)"), abfd);
4393
4394 target = stub_entry->target_value;
4395 branch_insn = 0x14000000;
4396 branch_offset >>= 2;
4397 branch_offset &= 0x3ffffff;
4398 branch_insn |= branch_offset;
4399 bfd_putl32 (branch_insn, &contents[target]);
4400
4401 return TRUE;
4402 }
4403
4404
4405 static bfd_boolean
4406 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
4407 void *in_arg)
4408 {
4409 struct elf_aarch64_stub_hash_entry *stub_entry
4410 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4411 struct erratum_835769_branch_to_stub_data *data
4412 = (struct erratum_835769_branch_to_stub_data *) in_arg;
4413 struct bfd_link_info *info;
4414 struct elf_aarch64_link_hash_table *htab;
4415 bfd_byte *contents;
4416 asection *section;
4417 bfd *abfd;
4418 bfd_vma place;
4419 uint32_t insn;
4420
4421 info = data->info;
4422 contents = data->contents;
4423 section = data->output_section;
4424
4425 htab = elf_aarch64_hash_table (info);
4426
4427 if (stub_entry->target_section != section
4428 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
4429 return TRUE;
4430
4431 insn = bfd_getl32 (contents + stub_entry->target_value);
4432 bfd_putl32 (insn,
4433 stub_entry->stub_sec->contents + stub_entry->stub_offset);
4434
4435 place = (section->output_section->vma + section->output_offset
4436 + stub_entry->adrp_offset);
4437 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
4438
4439 if ((insn & AARCH64_ADRP_OP_MASK) != AARCH64_ADRP_OP)
4440 abort ();
4441
4442 bfd_signed_vma imm =
4443 (_bfd_aarch64_sign_extend
4444 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
4445 - (place & 0xfff));
4446
4447 if (htab->fix_erratum_843419_adr
4448 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
4449 {
4450 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
4451 | AARCH64_RT (insn));
4452 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
4453 }
4454 else
4455 {
4456 bfd_vma veneered_insn_loc;
4457 bfd_vma veneer_entry_loc;
4458 bfd_signed_vma branch_offset;
4459 uint32_t branch_insn;
4460
4461 veneered_insn_loc = stub_entry->target_section->output_section->vma
4462 + stub_entry->target_section->output_offset
4463 + stub_entry->target_value;
4464 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4465 + stub_entry->stub_sec->output_offset
4466 + stub_entry->stub_offset;
4467 branch_offset = veneer_entry_loc - veneered_insn_loc;
4468
4469 abfd = stub_entry->target_section->owner;
4470 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4471 (*_bfd_error_handler)
4472 (_("%B: error: Erratum 843419 stub out "
4473 "of range (input file too large)"), abfd);
4474
4475 branch_insn = 0x14000000;
4476 branch_offset >>= 2;
4477 branch_offset &= 0x3ffffff;
4478 branch_insn |= branch_offset;
4479 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
4480 }
4481 return TRUE;
4482 }
4483
4484
4485 static bfd_boolean
4486 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4487 struct bfd_link_info *link_info,
4488 asection *sec,
4489 bfd_byte *contents)
4490
4491 {
4492 struct elf_aarch64_link_hash_table *globals =
4493 elf_aarch64_hash_table (link_info);
4494
4495 if (globals == NULL)
4496 return FALSE;
4497
4498 /* Fix code to point to erratum 835769 stubs. */
4499 if (globals->fix_erratum_835769)
4500 {
4501 struct erratum_835769_branch_to_stub_data data;
4502
4503 data.info = link_info;
4504 data.output_section = sec;
4505 data.contents = contents;
4506 bfd_hash_traverse (&globals->stub_hash_table,
4507 make_branch_to_erratum_835769_stub, &data);
4508 }
4509
4510 if (globals->fix_erratum_843419)
4511 {
4512 struct erratum_835769_branch_to_stub_data data;
4513
4514 data.info = link_info;
4515 data.output_section = sec;
4516 data.contents = contents;
4517 bfd_hash_traverse (&globals->stub_hash_table,
4518 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
4519 }
4520
4521 return FALSE;
4522 }
4523
4524 /* Perform a relocation as part of a final link. */
4525 static bfd_reloc_status_type
4526 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4527 bfd *input_bfd,
4528 bfd *output_bfd,
4529 asection *input_section,
4530 bfd_byte *contents,
4531 Elf_Internal_Rela *rel,
4532 bfd_vma value,
4533 struct bfd_link_info *info,
4534 asection *sym_sec,
4535 struct elf_link_hash_entry *h,
4536 bfd_boolean *unresolved_reloc_p,
4537 bfd_boolean save_addend,
4538 bfd_vma *saved_addend,
4539 Elf_Internal_Sym *sym)
4540 {
4541 Elf_Internal_Shdr *symtab_hdr;
4542 unsigned int r_type = howto->type;
4543 bfd_reloc_code_real_type bfd_r_type
4544 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4545 bfd_reloc_code_real_type new_bfd_r_type;
4546 unsigned long r_symndx;
4547 bfd_byte *hit_data = contents + rel->r_offset;
4548 bfd_vma place, off;
4549 bfd_signed_vma signed_addend;
4550 struct elf_aarch64_link_hash_table *globals;
4551 bfd_boolean weak_undef_p;
4552 asection *base_got;
4553
4554 globals = elf_aarch64_hash_table (info);
4555
4556 symtab_hdr = &elf_symtab_hdr (input_bfd);
4557
4558 BFD_ASSERT (is_aarch64_elf (input_bfd));
4559
4560 r_symndx = ELFNN_R_SYM (rel->r_info);
4561
4562 /* It is possible to have linker relaxations on some TLS access
4563 models. Update our information here. */
4564 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4565 if (new_bfd_r_type != bfd_r_type)
4566 {
4567 bfd_r_type = new_bfd_r_type;
4568 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4569 BFD_ASSERT (howto != NULL);
4570 r_type = howto->type;
4571 }
4572
4573 place = input_section->output_section->vma
4574 + input_section->output_offset + rel->r_offset;
4575
4576 /* Get addend, accumulating the addend for consecutive relocs
4577 which refer to the same offset. */
4578 signed_addend = saved_addend ? *saved_addend : 0;
4579 signed_addend += rel->r_addend;
4580
4581 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4582 : bfd_is_und_section (sym_sec));
4583
4584 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4585 it here if it is defined in a non-shared object. */
4586 if (h != NULL
4587 && h->type == STT_GNU_IFUNC
4588 && h->def_regular)
4589 {
4590 asection *plt;
4591 const char *name;
4592 bfd_vma addend = 0;
4593
4594 if ((input_section->flags & SEC_ALLOC) == 0
4595 || h->plt.offset == (bfd_vma) -1)
4596 abort ();
4597
4598 /* STT_GNU_IFUNC symbol must go through PLT. */
4599 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4600 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4601
4602 switch (bfd_r_type)
4603 {
4604 default:
4605 if (h->root.root.string)
4606 name = h->root.root.string;
4607 else
4608 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4609 NULL);
4610 (*_bfd_error_handler)
4611 (_("%B: relocation %s against STT_GNU_IFUNC "
4612 "symbol `%s' isn't handled by %s"), input_bfd,
4613 howto->name, name, __FUNCTION__);
4614 bfd_set_error (bfd_error_bad_value);
4615 return FALSE;
4616
4617 case BFD_RELOC_AARCH64_NN:
4618 if (rel->r_addend != 0)
4619 {
4620 if (h->root.root.string)
4621 name = h->root.root.string;
4622 else
4623 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4624 sym, NULL);
4625 (*_bfd_error_handler)
4626 (_("%B: relocation %s against STT_GNU_IFUNC "
4627 "symbol `%s' has non-zero addend: %d"),
4628 input_bfd, howto->name, name, rel->r_addend);
4629 bfd_set_error (bfd_error_bad_value);
4630 return FALSE;
4631 }
4632
4633 /* Generate dynamic relocation only when there is a
4634 non-GOT reference in a shared object. */
4635 if (info->shared && h->non_got_ref)
4636 {
4637 Elf_Internal_Rela outrel;
4638 asection *sreloc;
4639
4640 /* Need a dynamic relocation to get the real function
4641 address. */
4642 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4643 info,
4644 input_section,
4645 rel->r_offset);
4646 if (outrel.r_offset == (bfd_vma) -1
4647 || outrel.r_offset == (bfd_vma) -2)
4648 abort ();
4649
4650 outrel.r_offset += (input_section->output_section->vma
4651 + input_section->output_offset);
4652
4653 if (h->dynindx == -1
4654 || h->forced_local
4655 || info->executable)
4656 {
4657 /* This symbol is resolved locally. */
4658 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4659 outrel.r_addend = (h->root.u.def.value
4660 + h->root.u.def.section->output_section->vma
4661 + h->root.u.def.section->output_offset);
4662 }
4663 else
4664 {
4665 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4666 outrel.r_addend = 0;
4667 }
4668
4669 sreloc = globals->root.irelifunc;
4670 elf_append_rela (output_bfd, sreloc, &outrel);
4671
4672 /* If this reloc is against an external symbol, we
4673 do not want to fiddle with the addend. Otherwise,
4674 we need to include the symbol value so that it
4675 becomes an addend for the dynamic reloc. For an
4676 internal symbol, we have updated addend. */
4677 return bfd_reloc_ok;
4678 }
4679 /* FALLTHROUGH */
4680 case BFD_RELOC_AARCH64_CALL26:
4681 case BFD_RELOC_AARCH64_JUMP26:
4682 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4683 signed_addend,
4684 weak_undef_p);
4685 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4686 howto, value);
4687 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4688 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4689 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4690 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4691 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4692 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4693 base_got = globals->root.sgot;
4694 off = h->got.offset;
4695
4696 if (base_got == NULL)
4697 abort ();
4698
4699 if (off == (bfd_vma) -1)
4700 {
4701 bfd_vma plt_index;
4702
4703 /* We can't use h->got.offset here to save state, or
4704 even just remember the offset, as finish_dynamic_symbol
4705 would use that as offset into .got. */
4706
4707 if (globals->root.splt != NULL)
4708 {
4709 plt_index = ((h->plt.offset - globals->plt_header_size) /
4710 globals->plt_entry_size);
4711 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4712 base_got = globals->root.sgotplt;
4713 }
4714 else
4715 {
4716 plt_index = h->plt.offset / globals->plt_entry_size;
4717 off = plt_index * GOT_ENTRY_SIZE;
4718 base_got = globals->root.igotplt;
4719 }
4720
4721 if (h->dynindx == -1
4722 || h->forced_local
4723 || info->symbolic)
4724 {
4725 /* This references the local definition. We must
4726 initialize this entry in the global offset table.
4727 Since the offset must always be a multiple of 8,
4728 we use the least significant bit to record
4729 whether we have initialized it already.
4730
4731 When doing a dynamic link, we create a .rela.got
4732 relocation entry to initialize the value. This
4733 is done in the finish_dynamic_symbol routine. */
4734 if ((off & 1) != 0)
4735 off &= ~1;
4736 else
4737 {
4738 bfd_put_NN (output_bfd, value,
4739 base_got->contents + off);
4740 /* Note that this is harmless as -1 | 1 still is -1. */
4741 h->got.offset |= 1;
4742 }
4743 }
4744 value = (base_got->output_section->vma
4745 + base_got->output_offset + off);
4746 }
4747 else
4748 value = aarch64_calculate_got_entry_vma (h, globals, info,
4749 value, output_bfd,
4750 unresolved_reloc_p);
4751 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4752 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4753 addend = (globals->root.sgot->output_section->vma
4754 + globals->root.sgot->output_offset);
4755 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4756 addend, weak_undef_p);
4757 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4758 case BFD_RELOC_AARCH64_ADD_LO12:
4759 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4760 break;
4761 }
4762 }
4763
4764 switch (bfd_r_type)
4765 {
4766 case BFD_RELOC_AARCH64_NONE:
4767 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4768 *unresolved_reloc_p = FALSE;
4769 return bfd_reloc_ok;
4770
4771 case BFD_RELOC_AARCH64_NN:
4772
4773 /* When generating a shared object or relocatable executable, these
4774 relocations are copied into the output file to be resolved at
4775 run time. */
4776 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4777 && (input_section->flags & SEC_ALLOC)
4778 && (h == NULL
4779 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4780 || h->root.type != bfd_link_hash_undefweak))
4781 {
4782 Elf_Internal_Rela outrel;
4783 bfd_byte *loc;
4784 bfd_boolean skip, relocate;
4785 asection *sreloc;
4786
4787 *unresolved_reloc_p = FALSE;
4788
4789 skip = FALSE;
4790 relocate = FALSE;
4791
4792 outrel.r_addend = signed_addend;
4793 outrel.r_offset =
4794 _bfd_elf_section_offset (output_bfd, info, input_section,
4795 rel->r_offset);
4796 if (outrel.r_offset == (bfd_vma) - 1)
4797 skip = TRUE;
4798 else if (outrel.r_offset == (bfd_vma) - 2)
4799 {
4800 skip = TRUE;
4801 relocate = TRUE;
4802 }
4803
4804 outrel.r_offset += (input_section->output_section->vma
4805 + input_section->output_offset);
4806
4807 if (skip)
4808 memset (&outrel, 0, sizeof outrel);
4809 else if (h != NULL
4810 && h->dynindx != -1
4811 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4812 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4813 else
4814 {
4815 int symbol;
4816
4817 /* On SVR4-ish systems, the dynamic loader cannot
4818 relocate the text and data segments independently,
4819 so the symbol does not matter. */
4820 symbol = 0;
4821 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4822 outrel.r_addend += value;
4823 }
4824
4825 sreloc = elf_section_data (input_section)->sreloc;
4826 if (sreloc == NULL || sreloc->contents == NULL)
4827 return bfd_reloc_notsupported;
4828
4829 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4830 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4831
4832 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4833 {
4834 /* Sanity to check that we have previously allocated
4835 sufficient space in the relocation section for the
4836 number of relocations we actually want to emit. */
4837 abort ();
4838 }
4839
4840 /* If this reloc is against an external symbol, we do not want to
4841 fiddle with the addend. Otherwise, we need to include the symbol
4842 value so that it becomes an addend for the dynamic reloc. */
4843 if (!relocate)
4844 return bfd_reloc_ok;
4845
4846 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4847 contents, rel->r_offset, value,
4848 signed_addend);
4849 }
4850 else
4851 value += signed_addend;
4852 break;
4853
4854 case BFD_RELOC_AARCH64_CALL26:
4855 case BFD_RELOC_AARCH64_JUMP26:
4856 {
4857 asection *splt = globals->root.splt;
4858 bfd_boolean via_plt_p =
4859 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4860
4861 /* A call to an undefined weak symbol is converted to a jump to
4862 the next instruction unless a PLT entry will be created.
4863 The jump to the next instruction is optimized as a NOP.
4864 Do the same for local undefined symbols. */
4865 if (weak_undef_p && ! via_plt_p)
4866 {
4867 bfd_putl32 (INSN_NOP, hit_data);
4868 return bfd_reloc_ok;
4869 }
4870
4871 /* If the call goes through a PLT entry, make sure to
4872 check distance to the right destination address. */
4873 if (via_plt_p)
4874 {
4875 value = (splt->output_section->vma
4876 + splt->output_offset + h->plt.offset);
4877 *unresolved_reloc_p = FALSE;
4878 }
4879
4880 /* If the target symbol is global and marked as a function the
4881 relocation applies a function call or a tail call. In this
4882 situation we can veneer out of range branches. The veneers
4883 use IP0 and IP1 hence cannot be used arbitrary out of range
4884 branches that occur within the body of a function. */
4885 if (h && h->type == STT_FUNC)
4886 {
4887 /* Check if a stub has to be inserted because the destination
4888 is too far away. */
4889 if (! aarch64_valid_branch_p (value, place))
4890 {
4891 /* The target is out of reach, so redirect the branch to
4892 the local stub for this function. */
4893 struct elf_aarch64_stub_hash_entry *stub_entry;
4894 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
4895 sym_sec, h,
4896 rel, globals);
4897 if (stub_entry != NULL)
4898 value = (stub_entry->stub_offset
4899 + stub_entry->stub_sec->output_offset
4900 + stub_entry->stub_sec->output_section->vma);
4901 }
4902 }
4903 }
4904 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4905 signed_addend, weak_undef_p);
4906 break;
4907
4908 case BFD_RELOC_AARCH64_16_PCREL:
4909 case BFD_RELOC_AARCH64_32_PCREL:
4910 case BFD_RELOC_AARCH64_64_PCREL:
4911 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4912 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4913 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4914 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4915 if (info->shared
4916 && (input_section->flags & SEC_ALLOC) != 0
4917 && (input_section->flags & SEC_READONLY) != 0
4918 && h != NULL
4919 && !h->def_regular)
4920 {
4921 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4922
4923 (*_bfd_error_handler)
4924 (_("%B: relocation %s against external symbol `%s' can not be used"
4925 " when making a shared object; recompile with -fPIC"),
4926 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
4927 h->root.root.string);
4928 bfd_set_error (bfd_error_bad_value);
4929 return FALSE;
4930 }
4931
4932 case BFD_RELOC_AARCH64_16:
4933 #if ARCH_SIZE == 64
4934 case BFD_RELOC_AARCH64_32:
4935 #endif
4936 case BFD_RELOC_AARCH64_ADD_LO12:
4937 case BFD_RELOC_AARCH64_BRANCH19:
4938 case BFD_RELOC_AARCH64_LDST128_LO12:
4939 case BFD_RELOC_AARCH64_LDST16_LO12:
4940 case BFD_RELOC_AARCH64_LDST32_LO12:
4941 case BFD_RELOC_AARCH64_LDST64_LO12:
4942 case BFD_RELOC_AARCH64_LDST8_LO12:
4943 case BFD_RELOC_AARCH64_MOVW_G0:
4944 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4945 case BFD_RELOC_AARCH64_MOVW_G0_S:
4946 case BFD_RELOC_AARCH64_MOVW_G1:
4947 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4948 case BFD_RELOC_AARCH64_MOVW_G1_S:
4949 case BFD_RELOC_AARCH64_MOVW_G2:
4950 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4951 case BFD_RELOC_AARCH64_MOVW_G2_S:
4952 case BFD_RELOC_AARCH64_MOVW_G3:
4953 case BFD_RELOC_AARCH64_TSTBR14:
4954 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4955 signed_addend, weak_undef_p);
4956 break;
4957
4958 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4959 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4960 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4961 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4962 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4963 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4964 if (globals->root.sgot == NULL)
4965 BFD_ASSERT (h != NULL);
4966
4967 if (h != NULL)
4968 {
4969 bfd_vma addend = 0;
4970 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4971 output_bfd,
4972 unresolved_reloc_p);
4973 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4974 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4975 addend = (globals->root.sgot->output_section->vma
4976 + globals->root.sgot->output_offset);
4977 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4978 addend, weak_undef_p);
4979 }
4980 else
4981 {
4982 bfd_vma addend = 0;
4983 struct elf_aarch64_local_symbol *locals
4984 = elf_aarch64_locals (input_bfd);
4985
4986 if (locals == NULL)
4987 {
4988 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4989 (*_bfd_error_handler)
4990 (_("%B: Local symbol descriptor table be NULL when applying "
4991 "relocation %s against local symbol"),
4992 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
4993 abort ();
4994 }
4995
4996 off = symbol_got_offset (input_bfd, h, r_symndx);
4997 base_got = globals->root.sgot;
4998 bfd_vma got_entry_addr = (base_got->output_section->vma
4999 + base_got->output_offset + off);
5000
5001 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5002 {
5003 bfd_put_64 (output_bfd, value, base_got->contents + off);
5004
5005 if (info->shared)
5006 {
5007 asection *s;
5008 Elf_Internal_Rela outrel;
5009
5010 /* For local symbol, we have done absolute relocation in static
5011 linking stageh. While for share library, we need to update
5012 the content of GOT entry according to the share objects
5013 loading base address. So we need to generate a
5014 R_AARCH64_RELATIVE reloc for dynamic linker. */
5015 s = globals->root.srelgot;
5016 if (s == NULL)
5017 abort ();
5018
5019 outrel.r_offset = got_entry_addr;
5020 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
5021 outrel.r_addend = value;
5022 elf_append_rela (output_bfd, s, &outrel);
5023 }
5024
5025 symbol_got_offset_mark (input_bfd, h, r_symndx);
5026 }
5027
5028 /* Update the relocation value to GOT entry addr as we have transformed
5029 the direct data access into indirect data access through GOT. */
5030 value = got_entry_addr;
5031
5032 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5033 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5034 addend = base_got->output_section->vma + base_got->output_offset;
5035
5036 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5037 addend, weak_undef_p);
5038 }
5039
5040 break;
5041
5042 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5043 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5044 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5045 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5046 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5047 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5048 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5049 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5050 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5051 if (globals->root.sgot == NULL)
5052 return bfd_reloc_notsupported;
5053
5054 value = (symbol_got_offset (input_bfd, h, r_symndx)
5055 + globals->root.sgot->output_section->vma
5056 + globals->root.sgot->output_offset);
5057
5058 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5059 0, weak_undef_p);
5060 *unresolved_reloc_p = FALSE;
5061 break;
5062
5063 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5064 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5065 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5066 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5067 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5068 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5069 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5070 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5071 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5072 signed_addend - tpoff_base (info),
5073 weak_undef_p);
5074 *unresolved_reloc_p = FALSE;
5075 break;
5076
5077 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5078 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5079 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5080 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5081 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5082 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5083 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5084 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5085 if (globals->root.sgot == NULL)
5086 return bfd_reloc_notsupported;
5087 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
5088 + globals->root.sgotplt->output_section->vma
5089 + globals->root.sgotplt->output_offset
5090 + globals->sgotplt_jump_table_size);
5091
5092 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5093 0, weak_undef_p);
5094 *unresolved_reloc_p = FALSE;
5095 break;
5096
5097 default:
5098 return bfd_reloc_notsupported;
5099 }
5100
5101 if (saved_addend)
5102 *saved_addend = value;
5103
5104 /* Only apply the final relocation in a sequence. */
5105 if (save_addend)
5106 return bfd_reloc_continue;
5107
5108 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5109 howto, value);
5110 }
5111
5112 /* Handle TLS relaxations. Relaxing is possible for symbols that use
5113 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
5114 link.
5115
5116 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
5117 is to then call final_link_relocate. Return other values in the
5118 case of error. */
5119
5120 static bfd_reloc_status_type
5121 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
5122 bfd *input_bfd, bfd_byte *contents,
5123 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
5124 {
5125 bfd_boolean is_local = h == NULL;
5126 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
5127 unsigned long insn;
5128
5129 BFD_ASSERT (globals && input_bfd && contents && rel);
5130
5131 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5132 {
5133 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5134 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5135 if (is_local)
5136 {
5137 /* GD->LE relaxation:
5138 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
5139 or
5140 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
5141 */
5142 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5143 return bfd_reloc_continue;
5144 }
5145 else
5146 {
5147 /* GD->IE relaxation:
5148 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
5149 or
5150 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
5151 */
5152 return bfd_reloc_continue;
5153 }
5154
5155 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5156 BFD_ASSERT (0);
5157 break;
5158
5159 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5160 if (is_local)
5161 {
5162 /* Tiny TLSDESC->LE relaxation:
5163 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
5164 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
5165 .tlsdesccall var
5166 blr x1 => nop
5167 */
5168 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5169 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5170
5171 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5172 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
5173 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5174
5175 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5176 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
5177 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5178 return bfd_reloc_continue;
5179 }
5180 else
5181 {
5182 /* Tiny TLSDESC->IE relaxation:
5183 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
5184 adr x0, :tlsdesc:var => nop
5185 .tlsdesccall var
5186 blr x1 => nop
5187 */
5188 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5189 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5190
5191 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5192 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5193
5194 bfd_putl32 (0x58000000, contents + rel->r_offset);
5195 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
5196 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5197 return bfd_reloc_continue;
5198 }
5199
5200 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5201 if (is_local)
5202 {
5203 /* Tiny GD->LE relaxation:
5204 adr x0, :tlsgd:var => mrs x1, tpidr_el0
5205 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
5206 nop => add x0, x0, #:tprel_lo12_nc:x
5207 */
5208
5209 /* First kill the tls_get_addr reloc on the bl instruction. */
5210 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5211
5212 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
5213 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
5214 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
5215
5216 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5217 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
5218 rel[1].r_offset = rel->r_offset + 8;
5219
5220 /* Move the current relocation to the second instruction in
5221 the sequence. */
5222 rel->r_offset += 4;
5223 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5224 AARCH64_R (TLSLE_ADD_TPREL_HI12));
5225 return bfd_reloc_continue;
5226 }
5227 else
5228 {
5229 /* Tiny GD->IE relaxation:
5230 adr x0, :tlsgd:var => ldr x0, :gottprel:var
5231 bl __tls_get_addr => mrs x1, tpidr_el0
5232 nop => add x0, x0, x1
5233 */
5234
5235 /* First kill the tls_get_addr reloc on the bl instruction. */
5236 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5237 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5238
5239 bfd_putl32 (0x58000000, contents + rel->r_offset);
5240 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5241 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5242 return bfd_reloc_continue;
5243 }
5244
5245 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5246 return bfd_reloc_continue;
5247
5248 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5249 if (is_local)
5250 {
5251 /* GD->LE relaxation:
5252 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
5253 */
5254 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5255 return bfd_reloc_continue;
5256 }
5257 else
5258 {
5259 /* GD->IE relaxation:
5260 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
5261 */
5262 insn = bfd_getl32 (contents + rel->r_offset);
5263 insn &= 0xffffffe0;
5264 bfd_putl32 (insn, contents + rel->r_offset);
5265 return bfd_reloc_continue;
5266 }
5267
5268 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5269 if (is_local)
5270 {
5271 /* GD->LE relaxation
5272 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
5273 bl __tls_get_addr => mrs x1, tpidr_el0
5274 nop => add x0, x1, x0
5275 */
5276
5277 /* First kill the tls_get_addr reloc on the bl instruction. */
5278 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5279 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5280
5281 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5282 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5283 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5284 return bfd_reloc_continue;
5285 }
5286 else
5287 {
5288 /* GD->IE relaxation
5289 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
5290 BL __tls_get_addr => mrs x1, tpidr_el0
5291 R_AARCH64_CALL26
5292 NOP => add x0, x1, x0
5293 */
5294
5295 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
5296
5297 /* Remove the relocation on the BL instruction. */
5298 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5299
5300 bfd_putl32 (0xf9400000, contents + rel->r_offset);
5301
5302 /* We choose to fixup the BL and NOP instructions using the
5303 offset from the second relocation to allow flexibility in
5304 scheduling instructions between the ADD and BL. */
5305 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
5306 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
5307 return bfd_reloc_continue;
5308 }
5309
5310 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5311 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5312 /* GD->IE/LE relaxation:
5313 add x0, x0, #:tlsdesc_lo12:var => nop
5314 blr xd => nop
5315 */
5316 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
5317 return bfd_reloc_ok;
5318
5319 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5320 /* IE->LE relaxation:
5321 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
5322 */
5323 if (is_local)
5324 {
5325 insn = bfd_getl32 (contents + rel->r_offset);
5326 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
5327 }
5328 return bfd_reloc_continue;
5329
5330 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5331 /* IE->LE relaxation:
5332 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
5333 */
5334 if (is_local)
5335 {
5336 insn = bfd_getl32 (contents + rel->r_offset);
5337 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
5338 }
5339 return bfd_reloc_continue;
5340
5341 default:
5342 return bfd_reloc_continue;
5343 }
5344
5345 return bfd_reloc_ok;
5346 }
5347
5348 /* Relocate an AArch64 ELF section. */
5349
5350 static bfd_boolean
5351 elfNN_aarch64_relocate_section (bfd *output_bfd,
5352 struct bfd_link_info *info,
5353 bfd *input_bfd,
5354 asection *input_section,
5355 bfd_byte *contents,
5356 Elf_Internal_Rela *relocs,
5357 Elf_Internal_Sym *local_syms,
5358 asection **local_sections)
5359 {
5360 Elf_Internal_Shdr *symtab_hdr;
5361 struct elf_link_hash_entry **sym_hashes;
5362 Elf_Internal_Rela *rel;
5363 Elf_Internal_Rela *relend;
5364 const char *name;
5365 struct elf_aarch64_link_hash_table *globals;
5366 bfd_boolean save_addend = FALSE;
5367 bfd_vma addend = 0;
5368
5369 globals = elf_aarch64_hash_table (info);
5370
5371 symtab_hdr = &elf_symtab_hdr (input_bfd);
5372 sym_hashes = elf_sym_hashes (input_bfd);
5373
5374 rel = relocs;
5375 relend = relocs + input_section->reloc_count;
5376 for (; rel < relend; rel++)
5377 {
5378 unsigned int r_type;
5379 bfd_reloc_code_real_type bfd_r_type;
5380 bfd_reloc_code_real_type relaxed_bfd_r_type;
5381 reloc_howto_type *howto;
5382 unsigned long r_symndx;
5383 Elf_Internal_Sym *sym;
5384 asection *sec;
5385 struct elf_link_hash_entry *h;
5386 bfd_vma relocation;
5387 bfd_reloc_status_type r;
5388 arelent bfd_reloc;
5389 char sym_type;
5390 bfd_boolean unresolved_reloc = FALSE;
5391 char *error_message = NULL;
5392
5393 r_symndx = ELFNN_R_SYM (rel->r_info);
5394 r_type = ELFNN_R_TYPE (rel->r_info);
5395
5396 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
5397 howto = bfd_reloc.howto;
5398
5399 if (howto == NULL)
5400 {
5401 (*_bfd_error_handler)
5402 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
5403 input_bfd, input_section, r_type);
5404 return FALSE;
5405 }
5406 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
5407
5408 h = NULL;
5409 sym = NULL;
5410 sec = NULL;
5411
5412 if (r_symndx < symtab_hdr->sh_info)
5413 {
5414 sym = local_syms + r_symndx;
5415 sym_type = ELFNN_ST_TYPE (sym->st_info);
5416 sec = local_sections[r_symndx];
5417
5418 /* An object file might have a reference to a local
5419 undefined symbol. This is a daft object file, but we
5420 should at least do something about it. */
5421 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
5422 && bfd_is_und_section (sec)
5423 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
5424 {
5425 if (!info->callbacks->undefined_symbol
5426 (info, bfd_elf_string_from_elf_section
5427 (input_bfd, symtab_hdr->sh_link, sym->st_name),
5428 input_bfd, input_section, rel->r_offset, TRUE))
5429 return FALSE;
5430 }
5431
5432 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
5433
5434 /* Relocate against local STT_GNU_IFUNC symbol. */
5435 if (!info->relocatable
5436 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
5437 {
5438 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
5439 rel, FALSE);
5440 if (h == NULL)
5441 abort ();
5442
5443 /* Set STT_GNU_IFUNC symbol value. */
5444 h->root.u.def.value = sym->st_value;
5445 h->root.u.def.section = sec;
5446 }
5447 }
5448 else
5449 {
5450 bfd_boolean warned, ignored;
5451
5452 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
5453 r_symndx, symtab_hdr, sym_hashes,
5454 h, sec, relocation,
5455 unresolved_reloc, warned, ignored);
5456
5457 sym_type = h->type;
5458 }
5459
5460 if (sec != NULL && discarded_section (sec))
5461 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
5462 rel, 1, relend, howto, 0, contents);
5463
5464 if (info->relocatable)
5465 continue;
5466
5467 if (h != NULL)
5468 name = h->root.root.string;
5469 else
5470 {
5471 name = (bfd_elf_string_from_elf_section
5472 (input_bfd, symtab_hdr->sh_link, sym->st_name));
5473 if (name == NULL || *name == '\0')
5474 name = bfd_section_name (input_bfd, sec);
5475 }
5476
5477 if (r_symndx != 0
5478 && r_type != R_AARCH64_NONE
5479 && r_type != R_AARCH64_NULL
5480 && (h == NULL
5481 || h->root.type == bfd_link_hash_defined
5482 || h->root.type == bfd_link_hash_defweak)
5483 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
5484 {
5485 (*_bfd_error_handler)
5486 ((sym_type == STT_TLS
5487 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
5488 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
5489 input_bfd,
5490 input_section, (long) rel->r_offset, howto->name, name);
5491 }
5492
5493 /* We relax only if we can see that there can be a valid transition
5494 from a reloc type to another.
5495 We call elfNN_aarch64_final_link_relocate unless we're completely
5496 done, i.e., the relaxation produced the final output we want. */
5497
5498 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
5499 h, r_symndx);
5500 if (relaxed_bfd_r_type != bfd_r_type)
5501 {
5502 bfd_r_type = relaxed_bfd_r_type;
5503 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
5504 BFD_ASSERT (howto != NULL);
5505 r_type = howto->type;
5506 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
5507 unresolved_reloc = 0;
5508 }
5509 else
5510 r = bfd_reloc_continue;
5511
5512 /* There may be multiple consecutive relocations for the
5513 same offset. In that case we are supposed to treat the
5514 output of each relocation as the addend for the next. */
5515 if (rel + 1 < relend
5516 && rel->r_offset == rel[1].r_offset
5517 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
5518 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
5519 save_addend = TRUE;
5520 else
5521 save_addend = FALSE;
5522
5523 if (r == bfd_reloc_continue)
5524 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
5525 input_section, contents, rel,
5526 relocation, info, sec,
5527 h, &unresolved_reloc,
5528 save_addend, &addend, sym);
5529
5530 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5531 {
5532 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5533 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5534 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5535 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5536 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5537 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5538 {
5539 bfd_boolean need_relocs = FALSE;
5540 bfd_byte *loc;
5541 int indx;
5542 bfd_vma off;
5543
5544 off = symbol_got_offset (input_bfd, h, r_symndx);
5545 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5546
5547 need_relocs =
5548 (info->shared || indx != 0) &&
5549 (h == NULL
5550 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5551 || h->root.type != bfd_link_hash_undefweak);
5552
5553 BFD_ASSERT (globals->root.srelgot != NULL);
5554
5555 if (need_relocs)
5556 {
5557 Elf_Internal_Rela rela;
5558 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
5559 rela.r_addend = 0;
5560 rela.r_offset = globals->root.sgot->output_section->vma +
5561 globals->root.sgot->output_offset + off;
5562
5563
5564 loc = globals->root.srelgot->contents;
5565 loc += globals->root.srelgot->reloc_count++
5566 * RELOC_SIZE (htab);
5567 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5568
5569 bfd_reloc_code_real_type real_type =
5570 elfNN_aarch64_bfd_reloc_from_type (r_type);
5571
5572 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21
5573 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21)
5574 {
5575 /* For local dynamic, don't generate DTPREL in any case.
5576 Initialize the DTPREL slot into zero, so we get module
5577 base address when invoke runtime TLS resolver. */
5578 bfd_put_NN (output_bfd, 0,
5579 globals->root.sgot->contents + off
5580 + GOT_ENTRY_SIZE);
5581 }
5582 else if (indx == 0)
5583 {
5584 bfd_put_NN (output_bfd,
5585 relocation - dtpoff_base (info),
5586 globals->root.sgot->contents + off
5587 + GOT_ENTRY_SIZE);
5588 }
5589 else
5590 {
5591 /* This TLS symbol is global. We emit a
5592 relocation to fixup the tls offset at load
5593 time. */
5594 rela.r_info =
5595 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
5596 rela.r_addend = 0;
5597 rela.r_offset =
5598 (globals->root.sgot->output_section->vma
5599 + globals->root.sgot->output_offset + off
5600 + GOT_ENTRY_SIZE);
5601
5602 loc = globals->root.srelgot->contents;
5603 loc += globals->root.srelgot->reloc_count++
5604 * RELOC_SIZE (globals);
5605 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5606 bfd_put_NN (output_bfd, (bfd_vma) 0,
5607 globals->root.sgot->contents + off
5608 + GOT_ENTRY_SIZE);
5609 }
5610 }
5611 else
5612 {
5613 bfd_put_NN (output_bfd, (bfd_vma) 1,
5614 globals->root.sgot->contents + off);
5615 bfd_put_NN (output_bfd,
5616 relocation - dtpoff_base (info),
5617 globals->root.sgot->contents + off
5618 + GOT_ENTRY_SIZE);
5619 }
5620
5621 symbol_got_offset_mark (input_bfd, h, r_symndx);
5622 }
5623 break;
5624
5625 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5626 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5627 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5628 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5629 {
5630 bfd_boolean need_relocs = FALSE;
5631 bfd_byte *loc;
5632 int indx;
5633 bfd_vma off;
5634
5635 off = symbol_got_offset (input_bfd, h, r_symndx);
5636
5637 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5638
5639 need_relocs =
5640 (info->shared || indx != 0) &&
5641 (h == NULL
5642 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5643 || h->root.type != bfd_link_hash_undefweak);
5644
5645 BFD_ASSERT (globals->root.srelgot != NULL);
5646
5647 if (need_relocs)
5648 {
5649 Elf_Internal_Rela rela;
5650
5651 if (indx == 0)
5652 rela.r_addend = relocation - dtpoff_base (info);
5653 else
5654 rela.r_addend = 0;
5655
5656 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5657 rela.r_offset = globals->root.sgot->output_section->vma +
5658 globals->root.sgot->output_offset + off;
5659
5660 loc = globals->root.srelgot->contents;
5661 loc += globals->root.srelgot->reloc_count++
5662 * RELOC_SIZE (htab);
5663
5664 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5665
5666 bfd_put_NN (output_bfd, rela.r_addend,
5667 globals->root.sgot->contents + off);
5668 }
5669 else
5670 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5671 globals->root.sgot->contents + off);
5672
5673 symbol_got_offset_mark (input_bfd, h, r_symndx);
5674 }
5675 break;
5676
5677 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5678 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5679 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5680 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5681 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5682 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5683 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5684 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5685 break;
5686
5687 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5688 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5689 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5690 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5691 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5692 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5693 {
5694 bfd_boolean need_relocs = FALSE;
5695 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5696 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5697
5698 need_relocs = (h == NULL
5699 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5700 || h->root.type != bfd_link_hash_undefweak);
5701
5702 BFD_ASSERT (globals->root.srelgot != NULL);
5703 BFD_ASSERT (globals->root.sgot != NULL);
5704
5705 if (need_relocs)
5706 {
5707 bfd_byte *loc;
5708 Elf_Internal_Rela rela;
5709 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5710
5711 rela.r_addend = 0;
5712 rela.r_offset = (globals->root.sgotplt->output_section->vma
5713 + globals->root.sgotplt->output_offset
5714 + off + globals->sgotplt_jump_table_size);
5715
5716 if (indx == 0)
5717 rela.r_addend = relocation - dtpoff_base (info);
5718
5719 /* Allocate the next available slot in the PLT reloc
5720 section to hold our R_AARCH64_TLSDESC, the next
5721 available slot is determined from reloc_count,
5722 which we step. But note, reloc_count was
5723 artifically moved down while allocating slots for
5724 real PLT relocs such that all of the PLT relocs
5725 will fit above the initial reloc_count and the
5726 extra stuff will fit below. */
5727 loc = globals->root.srelplt->contents;
5728 loc += globals->root.srelplt->reloc_count++
5729 * RELOC_SIZE (globals);
5730
5731 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5732
5733 bfd_put_NN (output_bfd, (bfd_vma) 0,
5734 globals->root.sgotplt->contents + off +
5735 globals->sgotplt_jump_table_size);
5736 bfd_put_NN (output_bfd, (bfd_vma) 0,
5737 globals->root.sgotplt->contents + off +
5738 globals->sgotplt_jump_table_size +
5739 GOT_ENTRY_SIZE);
5740 }
5741
5742 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5743 }
5744 break;
5745 default:
5746 break;
5747 }
5748
5749 if (!save_addend)
5750 addend = 0;
5751
5752
5753 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5754 because such sections are not SEC_ALLOC and thus ld.so will
5755 not process them. */
5756 if (unresolved_reloc
5757 && !((input_section->flags & SEC_DEBUGGING) != 0
5758 && h->def_dynamic)
5759 && _bfd_elf_section_offset (output_bfd, info, input_section,
5760 +rel->r_offset) != (bfd_vma) - 1)
5761 {
5762 (*_bfd_error_handler)
5763 (_
5764 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5765 input_bfd, input_section, (long) rel->r_offset, howto->name,
5766 h->root.root.string);
5767 return FALSE;
5768 }
5769
5770 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5771 {
5772 switch (r)
5773 {
5774 case bfd_reloc_overflow:
5775 if (!(*info->callbacks->reloc_overflow)
5776 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
5777 input_bfd, input_section, rel->r_offset))
5778 return FALSE;
5779 break;
5780
5781 case bfd_reloc_undefined:
5782 if (!((*info->callbacks->undefined_symbol)
5783 (info, name, input_bfd, input_section,
5784 rel->r_offset, TRUE)))
5785 return FALSE;
5786 break;
5787
5788 case bfd_reloc_outofrange:
5789 error_message = _("out of range");
5790 goto common_error;
5791
5792 case bfd_reloc_notsupported:
5793 error_message = _("unsupported relocation");
5794 goto common_error;
5795
5796 case bfd_reloc_dangerous:
5797 /* error_message should already be set. */
5798 goto common_error;
5799
5800 default:
5801 error_message = _("unknown error");
5802 /* Fall through. */
5803
5804 common_error:
5805 BFD_ASSERT (error_message != NULL);
5806 if (!((*info->callbacks->reloc_dangerous)
5807 (info, error_message, input_bfd, input_section,
5808 rel->r_offset)))
5809 return FALSE;
5810 break;
5811 }
5812 }
5813 }
5814
5815 return TRUE;
5816 }
5817
5818 /* Set the right machine number. */
5819
5820 static bfd_boolean
5821 elfNN_aarch64_object_p (bfd *abfd)
5822 {
5823 #if ARCH_SIZE == 32
5824 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5825 #else
5826 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5827 #endif
5828 return TRUE;
5829 }
5830
5831 /* Function to keep AArch64 specific flags in the ELF header. */
5832
5833 static bfd_boolean
5834 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5835 {
5836 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5837 {
5838 }
5839 else
5840 {
5841 elf_elfheader (abfd)->e_flags = flags;
5842 elf_flags_init (abfd) = TRUE;
5843 }
5844
5845 return TRUE;
5846 }
5847
5848 /* Merge backend specific data from an object file to the output
5849 object file when linking. */
5850
5851 static bfd_boolean
5852 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5853 {
5854 flagword out_flags;
5855 flagword in_flags;
5856 bfd_boolean flags_compatible = TRUE;
5857 asection *sec;
5858
5859 /* Check if we have the same endianess. */
5860 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5861 return FALSE;
5862
5863 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5864 return TRUE;
5865
5866 /* The input BFD must have had its flags initialised. */
5867 /* The following seems bogus to me -- The flags are initialized in
5868 the assembler but I don't think an elf_flags_init field is
5869 written into the object. */
5870 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5871
5872 in_flags = elf_elfheader (ibfd)->e_flags;
5873 out_flags = elf_elfheader (obfd)->e_flags;
5874
5875 if (!elf_flags_init (obfd))
5876 {
5877 /* If the input is the default architecture and had the default
5878 flags then do not bother setting the flags for the output
5879 architecture, instead allow future merges to do this. If no
5880 future merges ever set these flags then they will retain their
5881 uninitialised values, which surprise surprise, correspond
5882 to the default values. */
5883 if (bfd_get_arch_info (ibfd)->the_default
5884 && elf_elfheader (ibfd)->e_flags == 0)
5885 return TRUE;
5886
5887 elf_flags_init (obfd) = TRUE;
5888 elf_elfheader (obfd)->e_flags = in_flags;
5889
5890 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5891 && bfd_get_arch_info (obfd)->the_default)
5892 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5893 bfd_get_mach (ibfd));
5894
5895 return TRUE;
5896 }
5897
5898 /* Identical flags must be compatible. */
5899 if (in_flags == out_flags)
5900 return TRUE;
5901
5902 /* Check to see if the input BFD actually contains any sections. If
5903 not, its flags may not have been initialised either, but it
5904 cannot actually cause any incompatiblity. Do not short-circuit
5905 dynamic objects; their section list may be emptied by
5906 elf_link_add_object_symbols.
5907
5908 Also check to see if there are no code sections in the input.
5909 In this case there is no need to check for code specific flags.
5910 XXX - do we need to worry about floating-point format compatability
5911 in data sections ? */
5912 if (!(ibfd->flags & DYNAMIC))
5913 {
5914 bfd_boolean null_input_bfd = TRUE;
5915 bfd_boolean only_data_sections = TRUE;
5916
5917 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5918 {
5919 if ((bfd_get_section_flags (ibfd, sec)
5920 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5921 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5922 only_data_sections = FALSE;
5923
5924 null_input_bfd = FALSE;
5925 break;
5926 }
5927
5928 if (null_input_bfd || only_data_sections)
5929 return TRUE;
5930 }
5931
5932 return flags_compatible;
5933 }
5934
5935 /* Display the flags field. */
5936
5937 static bfd_boolean
5938 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5939 {
5940 FILE *file = (FILE *) ptr;
5941 unsigned long flags;
5942
5943 BFD_ASSERT (abfd != NULL && ptr != NULL);
5944
5945 /* Print normal ELF private data. */
5946 _bfd_elf_print_private_bfd_data (abfd, ptr);
5947
5948 flags = elf_elfheader (abfd)->e_flags;
5949 /* Ignore init flag - it may not be set, despite the flags field
5950 containing valid data. */
5951
5952 /* xgettext:c-format */
5953 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5954
5955 if (flags)
5956 fprintf (file, _("<Unrecognised flag bits set>"));
5957
5958 fputc ('\n', file);
5959
5960 return TRUE;
5961 }
5962
5963 /* Update the got entry reference counts for the section being removed. */
5964
5965 static bfd_boolean
5966 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5967 struct bfd_link_info *info,
5968 asection *sec,
5969 const Elf_Internal_Rela * relocs)
5970 {
5971 struct elf_aarch64_link_hash_table *htab;
5972 Elf_Internal_Shdr *symtab_hdr;
5973 struct elf_link_hash_entry **sym_hashes;
5974 struct elf_aarch64_local_symbol *locals;
5975 const Elf_Internal_Rela *rel, *relend;
5976
5977 if (info->relocatable)
5978 return TRUE;
5979
5980 htab = elf_aarch64_hash_table (info);
5981
5982 if (htab == NULL)
5983 return FALSE;
5984
5985 elf_section_data (sec)->local_dynrel = NULL;
5986
5987 symtab_hdr = &elf_symtab_hdr (abfd);
5988 sym_hashes = elf_sym_hashes (abfd);
5989
5990 locals = elf_aarch64_locals (abfd);
5991
5992 relend = relocs + sec->reloc_count;
5993 for (rel = relocs; rel < relend; rel++)
5994 {
5995 unsigned long r_symndx;
5996 unsigned int r_type;
5997 struct elf_link_hash_entry *h = NULL;
5998
5999 r_symndx = ELFNN_R_SYM (rel->r_info);
6000
6001 if (r_symndx >= symtab_hdr->sh_info)
6002 {
6003
6004 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6005 while (h->root.type == bfd_link_hash_indirect
6006 || h->root.type == bfd_link_hash_warning)
6007 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6008 }
6009 else
6010 {
6011 Elf_Internal_Sym *isym;
6012
6013 /* A local symbol. */
6014 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6015 abfd, r_symndx);
6016
6017 /* Check relocation against local STT_GNU_IFUNC symbol. */
6018 if (isym != NULL
6019 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6020 {
6021 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
6022 if (h == NULL)
6023 abort ();
6024 }
6025 }
6026
6027 if (h)
6028 {
6029 struct elf_aarch64_link_hash_entry *eh;
6030 struct elf_dyn_relocs **pp;
6031 struct elf_dyn_relocs *p;
6032
6033 eh = (struct elf_aarch64_link_hash_entry *) h;
6034
6035 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
6036 if (p->sec == sec)
6037 {
6038 /* Everything must go for SEC. */
6039 *pp = p->next;
6040 break;
6041 }
6042 }
6043
6044 r_type = ELFNN_R_TYPE (rel->r_info);
6045 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
6046 {
6047 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6048 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6049 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6050 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6051 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6052 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6053 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6054 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6055 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6056 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6057 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6058 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6059 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6060 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6061 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6062 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6063 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6064 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6065 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6066 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6067 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6068 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6069 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6070 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6071 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6072 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6073 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6074 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6075 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6076 if (h != NULL)
6077 {
6078 if (h->got.refcount > 0)
6079 h->got.refcount -= 1;
6080
6081 if (h->type == STT_GNU_IFUNC)
6082 {
6083 if (h->plt.refcount > 0)
6084 h->plt.refcount -= 1;
6085 }
6086 }
6087 else if (locals != NULL)
6088 {
6089 if (locals[r_symndx].got_refcount > 0)
6090 locals[r_symndx].got_refcount -= 1;
6091 }
6092 break;
6093
6094 case BFD_RELOC_AARCH64_CALL26:
6095 case BFD_RELOC_AARCH64_JUMP26:
6096 /* If this is a local symbol then we resolve it
6097 directly without creating a PLT entry. */
6098 if (h == NULL)
6099 continue;
6100
6101 if (h->plt.refcount > 0)
6102 h->plt.refcount -= 1;
6103 break;
6104
6105 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6106 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6107 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6108 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6109 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6110 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6111 case BFD_RELOC_AARCH64_MOVW_G3:
6112 case BFD_RELOC_AARCH64_NN:
6113 if (h != NULL && info->executable)
6114 {
6115 if (h->plt.refcount > 0)
6116 h->plt.refcount -= 1;
6117 }
6118 break;
6119
6120 default:
6121 break;
6122 }
6123 }
6124
6125 return TRUE;
6126 }
6127
6128 /* Adjust a symbol defined by a dynamic object and referenced by a
6129 regular object. The current definition is in some section of the
6130 dynamic object, but we're not including those sections. We have to
6131 change the definition to something the rest of the link can
6132 understand. */
6133
6134 static bfd_boolean
6135 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
6136 struct elf_link_hash_entry *h)
6137 {
6138 struct elf_aarch64_link_hash_table *htab;
6139 asection *s;
6140
6141 /* If this is a function, put it in the procedure linkage table. We
6142 will fill in the contents of the procedure linkage table later,
6143 when we know the address of the .got section. */
6144 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
6145 {
6146 if (h->plt.refcount <= 0
6147 || (h->type != STT_GNU_IFUNC
6148 && (SYMBOL_CALLS_LOCAL (info, h)
6149 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
6150 && h->root.type == bfd_link_hash_undefweak))))
6151 {
6152 /* This case can occur if we saw a CALL26 reloc in
6153 an input file, but the symbol wasn't referred to
6154 by a dynamic object or all references were
6155 garbage collected. In which case we can end up
6156 resolving. */
6157 h->plt.offset = (bfd_vma) - 1;
6158 h->needs_plt = 0;
6159 }
6160
6161 return TRUE;
6162 }
6163 else
6164 /* Otherwise, reset to -1. */
6165 h->plt.offset = (bfd_vma) - 1;
6166
6167
6168 /* If this is a weak symbol, and there is a real definition, the
6169 processor independent code will have arranged for us to see the
6170 real definition first, and we can just use the same value. */
6171 if (h->u.weakdef != NULL)
6172 {
6173 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
6174 || h->u.weakdef->root.type == bfd_link_hash_defweak);
6175 h->root.u.def.section = h->u.weakdef->root.u.def.section;
6176 h->root.u.def.value = h->u.weakdef->root.u.def.value;
6177 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
6178 h->non_got_ref = h->u.weakdef->non_got_ref;
6179 return TRUE;
6180 }
6181
6182 /* If we are creating a shared library, we must presume that the
6183 only references to the symbol are via the global offset table.
6184 For such cases we need not do anything here; the relocations will
6185 be handled correctly by relocate_section. */
6186 if (info->shared)
6187 return TRUE;
6188
6189 /* If there are no references to this symbol that do not use the
6190 GOT, we don't need to generate a copy reloc. */
6191 if (!h->non_got_ref)
6192 return TRUE;
6193
6194 /* If -z nocopyreloc was given, we won't generate them either. */
6195 if (info->nocopyreloc)
6196 {
6197 h->non_got_ref = 0;
6198 return TRUE;
6199 }
6200
6201 /* We must allocate the symbol in our .dynbss section, which will
6202 become part of the .bss section of the executable. There will be
6203 an entry for this symbol in the .dynsym section. The dynamic
6204 object will contain position independent code, so all references
6205 from the dynamic object to this symbol will go through the global
6206 offset table. The dynamic linker will use the .dynsym entry to
6207 determine the address it must put in the global offset table, so
6208 both the dynamic object and the regular object will refer to the
6209 same memory location for the variable. */
6210
6211 htab = elf_aarch64_hash_table (info);
6212
6213 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
6214 to copy the initial value out of the dynamic object and into the
6215 runtime process image. */
6216 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
6217 {
6218 htab->srelbss->size += RELOC_SIZE (htab);
6219 h->needs_copy = 1;
6220 }
6221
6222 s = htab->sdynbss;
6223
6224 return _bfd_elf_adjust_dynamic_copy (info, h, s);
6225
6226 }
6227
6228 static bfd_boolean
6229 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
6230 {
6231 struct elf_aarch64_local_symbol *locals;
6232 locals = elf_aarch64_locals (abfd);
6233 if (locals == NULL)
6234 {
6235 locals = (struct elf_aarch64_local_symbol *)
6236 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
6237 if (locals == NULL)
6238 return FALSE;
6239 elf_aarch64_locals (abfd) = locals;
6240 }
6241 return TRUE;
6242 }
6243
6244 /* Create the .got section to hold the global offset table. */
6245
6246 static bfd_boolean
6247 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
6248 {
6249 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6250 flagword flags;
6251 asection *s;
6252 struct elf_link_hash_entry *h;
6253 struct elf_link_hash_table *htab = elf_hash_table (info);
6254
6255 /* This function may be called more than once. */
6256 s = bfd_get_linker_section (abfd, ".got");
6257 if (s != NULL)
6258 return TRUE;
6259
6260 flags = bed->dynamic_sec_flags;
6261
6262 s = bfd_make_section_anyway_with_flags (abfd,
6263 (bed->rela_plts_and_copies_p
6264 ? ".rela.got" : ".rel.got"),
6265 (bed->dynamic_sec_flags
6266 | SEC_READONLY));
6267 if (s == NULL
6268 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6269 return FALSE;
6270 htab->srelgot = s;
6271
6272 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
6273 if (s == NULL
6274 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6275 return FALSE;
6276 htab->sgot = s;
6277 htab->sgot->size += GOT_ENTRY_SIZE;
6278
6279 if (bed->want_got_sym)
6280 {
6281 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
6282 (or .got.plt) section. We don't do this in the linker script
6283 because we don't want to define the symbol if we are not creating
6284 a global offset table. */
6285 h = _bfd_elf_define_linkage_sym (abfd, info, s,
6286 "_GLOBAL_OFFSET_TABLE_");
6287 elf_hash_table (info)->hgot = h;
6288 if (h == NULL)
6289 return FALSE;
6290 }
6291
6292 if (bed->want_got_plt)
6293 {
6294 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
6295 if (s == NULL
6296 || !bfd_set_section_alignment (abfd, s,
6297 bed->s->log_file_align))
6298 return FALSE;
6299 htab->sgotplt = s;
6300 }
6301
6302 /* The first bit of the global offset table is the header. */
6303 s->size += bed->got_header_size;
6304
6305 return TRUE;
6306 }
6307
6308 /* Look through the relocs for a section during the first phase. */
6309
6310 static bfd_boolean
6311 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
6312 asection *sec, const Elf_Internal_Rela *relocs)
6313 {
6314 Elf_Internal_Shdr *symtab_hdr;
6315 struct elf_link_hash_entry **sym_hashes;
6316 const Elf_Internal_Rela *rel;
6317 const Elf_Internal_Rela *rel_end;
6318 asection *sreloc;
6319
6320 struct elf_aarch64_link_hash_table *htab;
6321
6322 if (info->relocatable)
6323 return TRUE;
6324
6325 BFD_ASSERT (is_aarch64_elf (abfd));
6326
6327 htab = elf_aarch64_hash_table (info);
6328 sreloc = NULL;
6329
6330 symtab_hdr = &elf_symtab_hdr (abfd);
6331 sym_hashes = elf_sym_hashes (abfd);
6332
6333 rel_end = relocs + sec->reloc_count;
6334 for (rel = relocs; rel < rel_end; rel++)
6335 {
6336 struct elf_link_hash_entry *h;
6337 unsigned long r_symndx;
6338 unsigned int r_type;
6339 bfd_reloc_code_real_type bfd_r_type;
6340 Elf_Internal_Sym *isym;
6341
6342 r_symndx = ELFNN_R_SYM (rel->r_info);
6343 r_type = ELFNN_R_TYPE (rel->r_info);
6344
6345 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
6346 {
6347 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
6348 r_symndx);
6349 return FALSE;
6350 }
6351
6352 if (r_symndx < symtab_hdr->sh_info)
6353 {
6354 /* A local symbol. */
6355 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6356 abfd, r_symndx);
6357 if (isym == NULL)
6358 return FALSE;
6359
6360 /* Check relocation against local STT_GNU_IFUNC symbol. */
6361 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6362 {
6363 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
6364 TRUE);
6365 if (h == NULL)
6366 return FALSE;
6367
6368 /* Fake a STT_GNU_IFUNC symbol. */
6369 h->type = STT_GNU_IFUNC;
6370 h->def_regular = 1;
6371 h->ref_regular = 1;
6372 h->forced_local = 1;
6373 h->root.type = bfd_link_hash_defined;
6374 }
6375 else
6376 h = NULL;
6377 }
6378 else
6379 {
6380 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6381 while (h->root.type == bfd_link_hash_indirect
6382 || h->root.type == bfd_link_hash_warning)
6383 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6384
6385 /* PR15323, ref flags aren't set for references in the same
6386 object. */
6387 h->root.non_ir_ref = 1;
6388 }
6389
6390 /* Could be done earlier, if h were already available. */
6391 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
6392
6393 if (h != NULL)
6394 {
6395 /* Create the ifunc sections for static executables. If we
6396 never see an indirect function symbol nor we are building
6397 a static executable, those sections will be empty and
6398 won't appear in output. */
6399 switch (bfd_r_type)
6400 {
6401 default:
6402 break;
6403
6404 case BFD_RELOC_AARCH64_ADD_LO12:
6405 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6406 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6407 case BFD_RELOC_AARCH64_CALL26:
6408 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6409 case BFD_RELOC_AARCH64_JUMP26:
6410 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6411 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6412 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6413 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6414 case BFD_RELOC_AARCH64_NN:
6415 if (htab->root.dynobj == NULL)
6416 htab->root.dynobj = abfd;
6417 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
6418 return FALSE;
6419 break;
6420 }
6421
6422 /* It is referenced by a non-shared object. */
6423 h->ref_regular = 1;
6424 h->root.non_ir_ref = 1;
6425 }
6426
6427 switch (bfd_r_type)
6428 {
6429 case BFD_RELOC_AARCH64_NN:
6430
6431 /* We don't need to handle relocs into sections not going into
6432 the "real" output. */
6433 if ((sec->flags & SEC_ALLOC) == 0)
6434 break;
6435
6436 if (h != NULL)
6437 {
6438 if (!info->shared)
6439 h->non_got_ref = 1;
6440
6441 h->plt.refcount += 1;
6442 h->pointer_equality_needed = 1;
6443 }
6444
6445 /* No need to do anything if we're not creating a shared
6446 object. */
6447 if (! info->shared)
6448 break;
6449
6450 {
6451 struct elf_dyn_relocs *p;
6452 struct elf_dyn_relocs **head;
6453
6454 /* We must copy these reloc types into the output file.
6455 Create a reloc section in dynobj and make room for
6456 this reloc. */
6457 if (sreloc == NULL)
6458 {
6459 if (htab->root.dynobj == NULL)
6460 htab->root.dynobj = abfd;
6461
6462 sreloc = _bfd_elf_make_dynamic_reloc_section
6463 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
6464
6465 if (sreloc == NULL)
6466 return FALSE;
6467 }
6468
6469 /* If this is a global symbol, we count the number of
6470 relocations we need for this symbol. */
6471 if (h != NULL)
6472 {
6473 struct elf_aarch64_link_hash_entry *eh;
6474 eh = (struct elf_aarch64_link_hash_entry *) h;
6475 head = &eh->dyn_relocs;
6476 }
6477 else
6478 {
6479 /* Track dynamic relocs needed for local syms too.
6480 We really need local syms available to do this
6481 easily. Oh well. */
6482
6483 asection *s;
6484 void **vpp;
6485
6486 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6487 abfd, r_symndx);
6488 if (isym == NULL)
6489 return FALSE;
6490
6491 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
6492 if (s == NULL)
6493 s = sec;
6494
6495 /* Beware of type punned pointers vs strict aliasing
6496 rules. */
6497 vpp = &(elf_section_data (s)->local_dynrel);
6498 head = (struct elf_dyn_relocs **) vpp;
6499 }
6500
6501 p = *head;
6502 if (p == NULL || p->sec != sec)
6503 {
6504 bfd_size_type amt = sizeof *p;
6505 p = ((struct elf_dyn_relocs *)
6506 bfd_zalloc (htab->root.dynobj, amt));
6507 if (p == NULL)
6508 return FALSE;
6509 p->next = *head;
6510 *head = p;
6511 p->sec = sec;
6512 }
6513
6514 p->count += 1;
6515
6516 }
6517 break;
6518
6519 /* RR: We probably want to keep a consistency check that
6520 there are no dangling GOT_PAGE relocs. */
6521 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6522 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6523 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6524 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6525 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6526 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6527 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6528 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6529 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6530 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6531 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6532 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6533 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6534 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6535 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6536 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6537 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6538 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6539 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6540 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6541 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6542 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6543 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6544 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6545 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6546 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6547 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6548 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6549 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6550 {
6551 unsigned got_type;
6552 unsigned old_got_type;
6553
6554 got_type = aarch64_reloc_got_type (bfd_r_type);
6555
6556 if (h)
6557 {
6558 h->got.refcount += 1;
6559 old_got_type = elf_aarch64_hash_entry (h)->got_type;
6560 }
6561 else
6562 {
6563 struct elf_aarch64_local_symbol *locals;
6564
6565 if (!elfNN_aarch64_allocate_local_symbols
6566 (abfd, symtab_hdr->sh_info))
6567 return FALSE;
6568
6569 locals = elf_aarch64_locals (abfd);
6570 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6571 locals[r_symndx].got_refcount += 1;
6572 old_got_type = locals[r_symndx].got_type;
6573 }
6574
6575 /* If a variable is accessed with both general dynamic TLS
6576 methods, two slots may be created. */
6577 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
6578 got_type |= old_got_type;
6579
6580 /* We will already have issued an error message if there
6581 is a TLS/non-TLS mismatch, based on the symbol type.
6582 So just combine any TLS types needed. */
6583 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
6584 && got_type != GOT_NORMAL)
6585 got_type |= old_got_type;
6586
6587 /* If the symbol is accessed by both IE and GD methods, we
6588 are able to relax. Turn off the GD flag, without
6589 messing up with any other kind of TLS types that may be
6590 involved. */
6591 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
6592 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
6593
6594 if (old_got_type != got_type)
6595 {
6596 if (h != NULL)
6597 elf_aarch64_hash_entry (h)->got_type = got_type;
6598 else
6599 {
6600 struct elf_aarch64_local_symbol *locals;
6601 locals = elf_aarch64_locals (abfd);
6602 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6603 locals[r_symndx].got_type = got_type;
6604 }
6605 }
6606
6607 if (htab->root.dynobj == NULL)
6608 htab->root.dynobj = abfd;
6609 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
6610 return FALSE;
6611 break;
6612 }
6613
6614 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6615 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6616 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6617 case BFD_RELOC_AARCH64_MOVW_G3:
6618 if (info->shared)
6619 {
6620 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6621 (*_bfd_error_handler)
6622 (_("%B: relocation %s against `%s' can not be used when making "
6623 "a shared object; recompile with -fPIC"),
6624 abfd, elfNN_aarch64_howto_table[howto_index].name,
6625 (h) ? h->root.root.string : "a local symbol");
6626 bfd_set_error (bfd_error_bad_value);
6627 return FALSE;
6628 }
6629
6630 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6631 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6632 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6633 if (h != NULL && info->executable)
6634 {
6635 /* If this reloc is in a read-only section, we might
6636 need a copy reloc. We can't check reliably at this
6637 stage whether the section is read-only, as input
6638 sections have not yet been mapped to output sections.
6639 Tentatively set the flag for now, and correct in
6640 adjust_dynamic_symbol. */
6641 h->non_got_ref = 1;
6642 h->plt.refcount += 1;
6643 h->pointer_equality_needed = 1;
6644 }
6645 /* FIXME:: RR need to handle these in shared libraries
6646 and essentially bomb out as these being non-PIC
6647 relocations in shared libraries. */
6648 break;
6649
6650 case BFD_RELOC_AARCH64_CALL26:
6651 case BFD_RELOC_AARCH64_JUMP26:
6652 /* If this is a local symbol then we resolve it
6653 directly without creating a PLT entry. */
6654 if (h == NULL)
6655 continue;
6656
6657 h->needs_plt = 1;
6658 if (h->plt.refcount <= 0)
6659 h->plt.refcount = 1;
6660 else
6661 h->plt.refcount += 1;
6662 break;
6663
6664 default:
6665 break;
6666 }
6667 }
6668
6669 return TRUE;
6670 }
6671
6672 /* Treat mapping symbols as special target symbols. */
6673
6674 static bfd_boolean
6675 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6676 asymbol *sym)
6677 {
6678 return bfd_is_aarch64_special_symbol_name (sym->name,
6679 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6680 }
6681
6682 /* This is a copy of elf_find_function () from elf.c except that
6683 AArch64 mapping symbols are ignored when looking for function names. */
6684
6685 static bfd_boolean
6686 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6687 asymbol **symbols,
6688 asection *section,
6689 bfd_vma offset,
6690 const char **filename_ptr,
6691 const char **functionname_ptr)
6692 {
6693 const char *filename = NULL;
6694 asymbol *func = NULL;
6695 bfd_vma low_func = 0;
6696 asymbol **p;
6697
6698 for (p = symbols; *p != NULL; p++)
6699 {
6700 elf_symbol_type *q;
6701
6702 q = (elf_symbol_type *) * p;
6703
6704 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6705 {
6706 default:
6707 break;
6708 case STT_FILE:
6709 filename = bfd_asymbol_name (&q->symbol);
6710 break;
6711 case STT_FUNC:
6712 case STT_NOTYPE:
6713 /* Skip mapping symbols. */
6714 if ((q->symbol.flags & BSF_LOCAL)
6715 && (bfd_is_aarch64_special_symbol_name
6716 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6717 continue;
6718 /* Fall through. */
6719 if (bfd_get_section (&q->symbol) == section
6720 && q->symbol.value >= low_func && q->symbol.value <= offset)
6721 {
6722 func = (asymbol *) q;
6723 low_func = q->symbol.value;
6724 }
6725 break;
6726 }
6727 }
6728
6729 if (func == NULL)
6730 return FALSE;
6731
6732 if (filename_ptr)
6733 *filename_ptr = filename;
6734 if (functionname_ptr)
6735 *functionname_ptr = bfd_asymbol_name (func);
6736
6737 return TRUE;
6738 }
6739
6740
6741 /* Find the nearest line to a particular section and offset, for error
6742 reporting. This code is a duplicate of the code in elf.c, except
6743 that it uses aarch64_elf_find_function. */
6744
6745 static bfd_boolean
6746 elfNN_aarch64_find_nearest_line (bfd *abfd,
6747 asymbol **symbols,
6748 asection *section,
6749 bfd_vma offset,
6750 const char **filename_ptr,
6751 const char **functionname_ptr,
6752 unsigned int *line_ptr,
6753 unsigned int *discriminator_ptr)
6754 {
6755 bfd_boolean found = FALSE;
6756
6757 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6758 filename_ptr, functionname_ptr,
6759 line_ptr, discriminator_ptr,
6760 dwarf_debug_sections, 0,
6761 &elf_tdata (abfd)->dwarf2_find_line_info))
6762 {
6763 if (!*functionname_ptr)
6764 aarch64_elf_find_function (abfd, symbols, section, offset,
6765 *filename_ptr ? NULL : filename_ptr,
6766 functionname_ptr);
6767
6768 return TRUE;
6769 }
6770
6771 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6772 toolchain uses DWARF1. */
6773
6774 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6775 &found, filename_ptr,
6776 functionname_ptr, line_ptr,
6777 &elf_tdata (abfd)->line_info))
6778 return FALSE;
6779
6780 if (found && (*functionname_ptr || *line_ptr))
6781 return TRUE;
6782
6783 if (symbols == NULL)
6784 return FALSE;
6785
6786 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6787 filename_ptr, functionname_ptr))
6788 return FALSE;
6789
6790 *line_ptr = 0;
6791 return TRUE;
6792 }
6793
6794 static bfd_boolean
6795 elfNN_aarch64_find_inliner_info (bfd *abfd,
6796 const char **filename_ptr,
6797 const char **functionname_ptr,
6798 unsigned int *line_ptr)
6799 {
6800 bfd_boolean found;
6801 found = _bfd_dwarf2_find_inliner_info
6802 (abfd, filename_ptr,
6803 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6804 return found;
6805 }
6806
6807
6808 static void
6809 elfNN_aarch64_post_process_headers (bfd *abfd,
6810 struct bfd_link_info *link_info)
6811 {
6812 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6813
6814 i_ehdrp = elf_elfheader (abfd);
6815 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6816
6817 _bfd_elf_post_process_headers (abfd, link_info);
6818 }
6819
6820 static enum elf_reloc_type_class
6821 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6822 const asection *rel_sec ATTRIBUTE_UNUSED,
6823 const Elf_Internal_Rela *rela)
6824 {
6825 switch ((int) ELFNN_R_TYPE (rela->r_info))
6826 {
6827 case AARCH64_R (RELATIVE):
6828 return reloc_class_relative;
6829 case AARCH64_R (JUMP_SLOT):
6830 return reloc_class_plt;
6831 case AARCH64_R (COPY):
6832 return reloc_class_copy;
6833 default:
6834 return reloc_class_normal;
6835 }
6836 }
6837
6838 /* Handle an AArch64 specific section when reading an object file. This is
6839 called when bfd_section_from_shdr finds a section with an unknown
6840 type. */
6841
6842 static bfd_boolean
6843 elfNN_aarch64_section_from_shdr (bfd *abfd,
6844 Elf_Internal_Shdr *hdr,
6845 const char *name, int shindex)
6846 {
6847 /* There ought to be a place to keep ELF backend specific flags, but
6848 at the moment there isn't one. We just keep track of the
6849 sections by their name, instead. Fortunately, the ABI gives
6850 names for all the AArch64 specific sections, so we will probably get
6851 away with this. */
6852 switch (hdr->sh_type)
6853 {
6854 case SHT_AARCH64_ATTRIBUTES:
6855 break;
6856
6857 default:
6858 return FALSE;
6859 }
6860
6861 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6862 return FALSE;
6863
6864 return TRUE;
6865 }
6866
6867 /* A structure used to record a list of sections, independently
6868 of the next and prev fields in the asection structure. */
6869 typedef struct section_list
6870 {
6871 asection *sec;
6872 struct section_list *next;
6873 struct section_list *prev;
6874 }
6875 section_list;
6876
6877 /* Unfortunately we need to keep a list of sections for which
6878 an _aarch64_elf_section_data structure has been allocated. This
6879 is because it is possible for functions like elfNN_aarch64_write_section
6880 to be called on a section which has had an elf_data_structure
6881 allocated for it (and so the used_by_bfd field is valid) but
6882 for which the AArch64 extended version of this structure - the
6883 _aarch64_elf_section_data structure - has not been allocated. */
6884 static section_list *sections_with_aarch64_elf_section_data = NULL;
6885
6886 static void
6887 record_section_with_aarch64_elf_section_data (asection *sec)
6888 {
6889 struct section_list *entry;
6890
6891 entry = bfd_malloc (sizeof (*entry));
6892 if (entry == NULL)
6893 return;
6894 entry->sec = sec;
6895 entry->next = sections_with_aarch64_elf_section_data;
6896 entry->prev = NULL;
6897 if (entry->next != NULL)
6898 entry->next->prev = entry;
6899 sections_with_aarch64_elf_section_data = entry;
6900 }
6901
6902 static struct section_list *
6903 find_aarch64_elf_section_entry (asection *sec)
6904 {
6905 struct section_list *entry;
6906 static struct section_list *last_entry = NULL;
6907
6908 /* This is a short cut for the typical case where the sections are added
6909 to the sections_with_aarch64_elf_section_data list in forward order and
6910 then looked up here in backwards order. This makes a real difference
6911 to the ld-srec/sec64k.exp linker test. */
6912 entry = sections_with_aarch64_elf_section_data;
6913 if (last_entry != NULL)
6914 {
6915 if (last_entry->sec == sec)
6916 entry = last_entry;
6917 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6918 entry = last_entry->next;
6919 }
6920
6921 for (; entry; entry = entry->next)
6922 if (entry->sec == sec)
6923 break;
6924
6925 if (entry)
6926 /* Record the entry prior to this one - it is the entry we are
6927 most likely to want to locate next time. Also this way if we
6928 have been called from
6929 unrecord_section_with_aarch64_elf_section_data () we will not
6930 be caching a pointer that is about to be freed. */
6931 last_entry = entry->prev;
6932
6933 return entry;
6934 }
6935
6936 static void
6937 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6938 {
6939 struct section_list *entry;
6940
6941 entry = find_aarch64_elf_section_entry (sec);
6942
6943 if (entry)
6944 {
6945 if (entry->prev != NULL)
6946 entry->prev->next = entry->next;
6947 if (entry->next != NULL)
6948 entry->next->prev = entry->prev;
6949 if (entry == sections_with_aarch64_elf_section_data)
6950 sections_with_aarch64_elf_section_data = entry->next;
6951 free (entry);
6952 }
6953 }
6954
6955
6956 typedef struct
6957 {
6958 void *finfo;
6959 struct bfd_link_info *info;
6960 asection *sec;
6961 int sec_shndx;
6962 int (*func) (void *, const char *, Elf_Internal_Sym *,
6963 asection *, struct elf_link_hash_entry *);
6964 } output_arch_syminfo;
6965
6966 enum map_symbol_type
6967 {
6968 AARCH64_MAP_INSN,
6969 AARCH64_MAP_DATA
6970 };
6971
6972
6973 /* Output a single mapping symbol. */
6974
6975 static bfd_boolean
6976 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6977 enum map_symbol_type type, bfd_vma offset)
6978 {
6979 static const char *names[2] = { "$x", "$d" };
6980 Elf_Internal_Sym sym;
6981
6982 sym.st_value = (osi->sec->output_section->vma
6983 + osi->sec->output_offset + offset);
6984 sym.st_size = 0;
6985 sym.st_other = 0;
6986 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6987 sym.st_shndx = osi->sec_shndx;
6988 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
6989 }
6990
6991
6992
6993 /* Output mapping symbols for PLT entries associated with H. */
6994
6995 static bfd_boolean
6996 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
6997 {
6998 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
6999 bfd_vma addr;
7000
7001 if (h->root.type == bfd_link_hash_indirect)
7002 return TRUE;
7003
7004 if (h->root.type == bfd_link_hash_warning)
7005 /* When warning symbols are created, they **replace** the "real"
7006 entry in the hash table, thus we never get to see the real
7007 symbol in a hash traversal. So look at it now. */
7008 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7009
7010 if (h->plt.offset == (bfd_vma) - 1)
7011 return TRUE;
7012
7013 addr = h->plt.offset;
7014 if (addr == 32)
7015 {
7016 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7017 return FALSE;
7018 }
7019 return TRUE;
7020 }
7021
7022
7023 /* Output a single local symbol for a generated stub. */
7024
7025 static bfd_boolean
7026 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
7027 bfd_vma offset, bfd_vma size)
7028 {
7029 Elf_Internal_Sym sym;
7030
7031 sym.st_value = (osi->sec->output_section->vma
7032 + osi->sec->output_offset + offset);
7033 sym.st_size = size;
7034 sym.st_other = 0;
7035 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7036 sym.st_shndx = osi->sec_shndx;
7037 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
7038 }
7039
7040 static bfd_boolean
7041 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
7042 {
7043 struct elf_aarch64_stub_hash_entry *stub_entry;
7044 asection *stub_sec;
7045 bfd_vma addr;
7046 char *stub_name;
7047 output_arch_syminfo *osi;
7048
7049 /* Massage our args to the form they really have. */
7050 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
7051 osi = (output_arch_syminfo *) in_arg;
7052
7053 stub_sec = stub_entry->stub_sec;
7054
7055 /* Ensure this stub is attached to the current section being
7056 processed. */
7057 if (stub_sec != osi->sec)
7058 return TRUE;
7059
7060 addr = (bfd_vma) stub_entry->stub_offset;
7061
7062 stub_name = stub_entry->output_name;
7063
7064 switch (stub_entry->stub_type)
7065 {
7066 case aarch64_stub_adrp_branch:
7067 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7068 sizeof (aarch64_adrp_branch_stub)))
7069 return FALSE;
7070 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7071 return FALSE;
7072 break;
7073 case aarch64_stub_long_branch:
7074 if (!elfNN_aarch64_output_stub_sym
7075 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
7076 return FALSE;
7077 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7078 return FALSE;
7079 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
7080 return FALSE;
7081 break;
7082 case aarch64_stub_erratum_835769_veneer:
7083 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7084 sizeof (aarch64_erratum_835769_stub)))
7085 return FALSE;
7086 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7087 return FALSE;
7088 break;
7089 case aarch64_stub_erratum_843419_veneer:
7090 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7091 sizeof (aarch64_erratum_843419_stub)))
7092 return FALSE;
7093 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7094 return FALSE;
7095 break;
7096
7097 default:
7098 abort ();
7099 }
7100
7101 return TRUE;
7102 }
7103
7104 /* Output mapping symbols for linker generated sections. */
7105
7106 static bfd_boolean
7107 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
7108 struct bfd_link_info *info,
7109 void *finfo,
7110 int (*func) (void *, const char *,
7111 Elf_Internal_Sym *,
7112 asection *,
7113 struct elf_link_hash_entry
7114 *))
7115 {
7116 output_arch_syminfo osi;
7117 struct elf_aarch64_link_hash_table *htab;
7118
7119 htab = elf_aarch64_hash_table (info);
7120
7121 osi.finfo = finfo;
7122 osi.info = info;
7123 osi.func = func;
7124
7125 /* Long calls stubs. */
7126 if (htab->stub_bfd && htab->stub_bfd->sections)
7127 {
7128 asection *stub_sec;
7129
7130 for (stub_sec = htab->stub_bfd->sections;
7131 stub_sec != NULL; stub_sec = stub_sec->next)
7132 {
7133 /* Ignore non-stub sections. */
7134 if (!strstr (stub_sec->name, STUB_SUFFIX))
7135 continue;
7136
7137 osi.sec = stub_sec;
7138
7139 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7140 (output_bfd, osi.sec->output_section);
7141
7142 /* The first instruction in a stub is always a branch. */
7143 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
7144 return FALSE;
7145
7146 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
7147 &osi);
7148 }
7149 }
7150
7151 /* Finally, output mapping symbols for the PLT. */
7152 if (!htab->root.splt || htab->root.splt->size == 0)
7153 return TRUE;
7154
7155 /* For now live without mapping symbols for the plt. */
7156 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7157 (output_bfd, htab->root.splt->output_section);
7158 osi.sec = htab->root.splt;
7159
7160 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
7161 (void *) &osi);
7162
7163 return TRUE;
7164
7165 }
7166
7167 /* Allocate target specific section data. */
7168
7169 static bfd_boolean
7170 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
7171 {
7172 if (!sec->used_by_bfd)
7173 {
7174 _aarch64_elf_section_data *sdata;
7175 bfd_size_type amt = sizeof (*sdata);
7176
7177 sdata = bfd_zalloc (abfd, amt);
7178 if (sdata == NULL)
7179 return FALSE;
7180 sec->used_by_bfd = sdata;
7181 }
7182
7183 record_section_with_aarch64_elf_section_data (sec);
7184
7185 return _bfd_elf_new_section_hook (abfd, sec);
7186 }
7187
7188
7189 static void
7190 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
7191 asection *sec,
7192 void *ignore ATTRIBUTE_UNUSED)
7193 {
7194 unrecord_section_with_aarch64_elf_section_data (sec);
7195 }
7196
7197 static bfd_boolean
7198 elfNN_aarch64_close_and_cleanup (bfd *abfd)
7199 {
7200 if (abfd->sections)
7201 bfd_map_over_sections (abfd,
7202 unrecord_section_via_map_over_sections, NULL);
7203
7204 return _bfd_elf_close_and_cleanup (abfd);
7205 }
7206
7207 static bfd_boolean
7208 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
7209 {
7210 if (abfd->sections)
7211 bfd_map_over_sections (abfd,
7212 unrecord_section_via_map_over_sections, NULL);
7213
7214 return _bfd_free_cached_info (abfd);
7215 }
7216
7217 /* Create dynamic sections. This is different from the ARM backend in that
7218 the got, plt, gotplt and their relocation sections are all created in the
7219 standard part of the bfd elf backend. */
7220
7221 static bfd_boolean
7222 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
7223 struct bfd_link_info *info)
7224 {
7225 struct elf_aarch64_link_hash_table *htab;
7226
7227 /* We need to create .got section. */
7228 if (!aarch64_elf_create_got_section (dynobj, info))
7229 return FALSE;
7230
7231 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
7232 return FALSE;
7233
7234 htab = elf_aarch64_hash_table (info);
7235 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
7236 if (!info->shared)
7237 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
7238
7239 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
7240 abort ();
7241
7242 return TRUE;
7243 }
7244
7245
7246 /* Allocate space in .plt, .got and associated reloc sections for
7247 dynamic relocs. */
7248
7249 static bfd_boolean
7250 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
7251 {
7252 struct bfd_link_info *info;
7253 struct elf_aarch64_link_hash_table *htab;
7254 struct elf_aarch64_link_hash_entry *eh;
7255 struct elf_dyn_relocs *p;
7256
7257 /* An example of a bfd_link_hash_indirect symbol is versioned
7258 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7259 -> __gxx_personality_v0(bfd_link_hash_defined)
7260
7261 There is no need to process bfd_link_hash_indirect symbols here
7262 because we will also be presented with the concrete instance of
7263 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7264 called to copy all relevant data from the generic to the concrete
7265 symbol instance.
7266 */
7267 if (h->root.type == bfd_link_hash_indirect)
7268 return TRUE;
7269
7270 if (h->root.type == bfd_link_hash_warning)
7271 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7272
7273 info = (struct bfd_link_info *) inf;
7274 htab = elf_aarch64_hash_table (info);
7275
7276 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7277 here if it is defined and referenced in a non-shared object. */
7278 if (h->type == STT_GNU_IFUNC
7279 && h->def_regular)
7280 return TRUE;
7281 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
7282 {
7283 /* Make sure this symbol is output as a dynamic symbol.
7284 Undefined weak syms won't yet be marked as dynamic. */
7285 if (h->dynindx == -1 && !h->forced_local)
7286 {
7287 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7288 return FALSE;
7289 }
7290
7291 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
7292 {
7293 asection *s = htab->root.splt;
7294
7295 /* If this is the first .plt entry, make room for the special
7296 first entry. */
7297 if (s->size == 0)
7298 s->size += htab->plt_header_size;
7299
7300 h->plt.offset = s->size;
7301
7302 /* If this symbol is not defined in a regular file, and we are
7303 not generating a shared library, then set the symbol to this
7304 location in the .plt. This is required to make function
7305 pointers compare as equal between the normal executable and
7306 the shared library. */
7307 if (!info->shared && !h->def_regular)
7308 {
7309 h->root.u.def.section = s;
7310 h->root.u.def.value = h->plt.offset;
7311 }
7312
7313 /* Make room for this entry. For now we only create the
7314 small model PLT entries. We later need to find a way
7315 of relaxing into these from the large model PLT entries. */
7316 s->size += PLT_SMALL_ENTRY_SIZE;
7317
7318 /* We also need to make an entry in the .got.plt section, which
7319 will be placed in the .got section by the linker script. */
7320 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
7321
7322 /* We also need to make an entry in the .rela.plt section. */
7323 htab->root.srelplt->size += RELOC_SIZE (htab);
7324
7325 /* We need to ensure that all GOT entries that serve the PLT
7326 are consecutive with the special GOT slots [0] [1] and
7327 [2]. Any addtional relocations, such as
7328 R_AARCH64_TLSDESC, must be placed after the PLT related
7329 entries. We abuse the reloc_count such that during
7330 sizing we adjust reloc_count to indicate the number of
7331 PLT related reserved entries. In subsequent phases when
7332 filling in the contents of the reloc entries, PLT related
7333 entries are placed by computing their PLT index (0
7334 .. reloc_count). While other none PLT relocs are placed
7335 at the slot indicated by reloc_count and reloc_count is
7336 updated. */
7337
7338 htab->root.srelplt->reloc_count++;
7339 }
7340 else
7341 {
7342 h->plt.offset = (bfd_vma) - 1;
7343 h->needs_plt = 0;
7344 }
7345 }
7346 else
7347 {
7348 h->plt.offset = (bfd_vma) - 1;
7349 h->needs_plt = 0;
7350 }
7351
7352 eh = (struct elf_aarch64_link_hash_entry *) h;
7353 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7354
7355 if (h->got.refcount > 0)
7356 {
7357 bfd_boolean dyn;
7358 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
7359
7360 h->got.offset = (bfd_vma) - 1;
7361
7362 dyn = htab->root.dynamic_sections_created;
7363
7364 /* Make sure this symbol is output as a dynamic symbol.
7365 Undefined weak syms won't yet be marked as dynamic. */
7366 if (dyn && h->dynindx == -1 && !h->forced_local)
7367 {
7368 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7369 return FALSE;
7370 }
7371
7372 if (got_type == GOT_UNKNOWN)
7373 {
7374 }
7375 else if (got_type == GOT_NORMAL)
7376 {
7377 h->got.offset = htab->root.sgot->size;
7378 htab->root.sgot->size += GOT_ENTRY_SIZE;
7379 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7380 || h->root.type != bfd_link_hash_undefweak)
7381 && (info->shared
7382 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7383 {
7384 htab->root.srelgot->size += RELOC_SIZE (htab);
7385 }
7386 }
7387 else
7388 {
7389 int indx;
7390 if (got_type & GOT_TLSDESC_GD)
7391 {
7392 eh->tlsdesc_got_jump_table_offset =
7393 (htab->root.sgotplt->size
7394 - aarch64_compute_jump_table_size (htab));
7395 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7396 h->got.offset = (bfd_vma) - 2;
7397 }
7398
7399 if (got_type & GOT_TLS_GD)
7400 {
7401 h->got.offset = htab->root.sgot->size;
7402 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7403 }
7404
7405 if (got_type & GOT_TLS_IE)
7406 {
7407 h->got.offset = htab->root.sgot->size;
7408 htab->root.sgot->size += GOT_ENTRY_SIZE;
7409 }
7410
7411 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7412 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7413 || h->root.type != bfd_link_hash_undefweak)
7414 && (info->shared
7415 || indx != 0
7416 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7417 {
7418 if (got_type & GOT_TLSDESC_GD)
7419 {
7420 htab->root.srelplt->size += RELOC_SIZE (htab);
7421 /* Note reloc_count not incremented here! We have
7422 already adjusted reloc_count for this relocation
7423 type. */
7424
7425 /* TLSDESC PLT is now needed, but not yet determined. */
7426 htab->tlsdesc_plt = (bfd_vma) - 1;
7427 }
7428
7429 if (got_type & GOT_TLS_GD)
7430 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7431
7432 if (got_type & GOT_TLS_IE)
7433 htab->root.srelgot->size += RELOC_SIZE (htab);
7434 }
7435 }
7436 }
7437 else
7438 {
7439 h->got.offset = (bfd_vma) - 1;
7440 }
7441
7442 if (eh->dyn_relocs == NULL)
7443 return TRUE;
7444
7445 /* In the shared -Bsymbolic case, discard space allocated for
7446 dynamic pc-relative relocs against symbols which turn out to be
7447 defined in regular objects. For the normal shared case, discard
7448 space for pc-relative relocs that have become local due to symbol
7449 visibility changes. */
7450
7451 if (info->shared)
7452 {
7453 /* Relocs that use pc_count are those that appear on a call
7454 insn, or certain REL relocs that can generated via assembly.
7455 We want calls to protected symbols to resolve directly to the
7456 function rather than going via the plt. If people want
7457 function pointer comparisons to work as expected then they
7458 should avoid writing weird assembly. */
7459 if (SYMBOL_CALLS_LOCAL (info, h))
7460 {
7461 struct elf_dyn_relocs **pp;
7462
7463 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
7464 {
7465 p->count -= p->pc_count;
7466 p->pc_count = 0;
7467 if (p->count == 0)
7468 *pp = p->next;
7469 else
7470 pp = &p->next;
7471 }
7472 }
7473
7474 /* Also discard relocs on undefined weak syms with non-default
7475 visibility. */
7476 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
7477 {
7478 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7479 eh->dyn_relocs = NULL;
7480
7481 /* Make sure undefined weak symbols are output as a dynamic
7482 symbol in PIEs. */
7483 else if (h->dynindx == -1
7484 && !h->forced_local
7485 && !bfd_elf_link_record_dynamic_symbol (info, h))
7486 return FALSE;
7487 }
7488
7489 }
7490 else if (ELIMINATE_COPY_RELOCS)
7491 {
7492 /* For the non-shared case, discard space for relocs against
7493 symbols which turn out to need copy relocs or are not
7494 dynamic. */
7495
7496 if (!h->non_got_ref
7497 && ((h->def_dynamic
7498 && !h->def_regular)
7499 || (htab->root.dynamic_sections_created
7500 && (h->root.type == bfd_link_hash_undefweak
7501 || h->root.type == bfd_link_hash_undefined))))
7502 {
7503 /* Make sure this symbol is output as a dynamic symbol.
7504 Undefined weak syms won't yet be marked as dynamic. */
7505 if (h->dynindx == -1
7506 && !h->forced_local
7507 && !bfd_elf_link_record_dynamic_symbol (info, h))
7508 return FALSE;
7509
7510 /* If that succeeded, we know we'll be keeping all the
7511 relocs. */
7512 if (h->dynindx != -1)
7513 goto keep;
7514 }
7515
7516 eh->dyn_relocs = NULL;
7517
7518 keep:;
7519 }
7520
7521 /* Finally, allocate space. */
7522 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7523 {
7524 asection *sreloc;
7525
7526 sreloc = elf_section_data (p->sec)->sreloc;
7527
7528 BFD_ASSERT (sreloc != NULL);
7529
7530 sreloc->size += p->count * RELOC_SIZE (htab);
7531 }
7532
7533 return TRUE;
7534 }
7535
7536 /* Allocate space in .plt, .got and associated reloc sections for
7537 ifunc dynamic relocs. */
7538
7539 static bfd_boolean
7540 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
7541 void *inf)
7542 {
7543 struct bfd_link_info *info;
7544 struct elf_aarch64_link_hash_table *htab;
7545 struct elf_aarch64_link_hash_entry *eh;
7546
7547 /* An example of a bfd_link_hash_indirect symbol is versioned
7548 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7549 -> __gxx_personality_v0(bfd_link_hash_defined)
7550
7551 There is no need to process bfd_link_hash_indirect symbols here
7552 because we will also be presented with the concrete instance of
7553 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7554 called to copy all relevant data from the generic to the concrete
7555 symbol instance.
7556 */
7557 if (h->root.type == bfd_link_hash_indirect)
7558 return TRUE;
7559
7560 if (h->root.type == bfd_link_hash_warning)
7561 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7562
7563 info = (struct bfd_link_info *) inf;
7564 htab = elf_aarch64_hash_table (info);
7565
7566 eh = (struct elf_aarch64_link_hash_entry *) h;
7567
7568 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7569 here if it is defined and referenced in a non-shared object. */
7570 if (h->type == STT_GNU_IFUNC
7571 && h->def_regular)
7572 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
7573 &eh->dyn_relocs,
7574 htab->plt_entry_size,
7575 htab->plt_header_size,
7576 GOT_ENTRY_SIZE);
7577 return TRUE;
7578 }
7579
7580 /* Allocate space in .plt, .got and associated reloc sections for
7581 local dynamic relocs. */
7582
7583 static bfd_boolean
7584 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
7585 {
7586 struct elf_link_hash_entry *h
7587 = (struct elf_link_hash_entry *) *slot;
7588
7589 if (h->type != STT_GNU_IFUNC
7590 || !h->def_regular
7591 || !h->ref_regular
7592 || !h->forced_local
7593 || h->root.type != bfd_link_hash_defined)
7594 abort ();
7595
7596 return elfNN_aarch64_allocate_dynrelocs (h, inf);
7597 }
7598
7599 /* Allocate space in .plt, .got and associated reloc sections for
7600 local ifunc dynamic relocs. */
7601
7602 static bfd_boolean
7603 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
7604 {
7605 struct elf_link_hash_entry *h
7606 = (struct elf_link_hash_entry *) *slot;
7607
7608 if (h->type != STT_GNU_IFUNC
7609 || !h->def_regular
7610 || !h->ref_regular
7611 || !h->forced_local
7612 || h->root.type != bfd_link_hash_defined)
7613 abort ();
7614
7615 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
7616 }
7617
7618 /* Find any dynamic relocs that apply to read-only sections. */
7619
7620 static bfd_boolean
7621 aarch64_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
7622 {
7623 struct elf_aarch64_link_hash_entry * eh;
7624 struct elf_dyn_relocs * p;
7625
7626 eh = (struct elf_aarch64_link_hash_entry *) h;
7627 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7628 {
7629 asection *s = p->sec;
7630
7631 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7632 {
7633 struct bfd_link_info *info = (struct bfd_link_info *) inf;
7634
7635 info->flags |= DF_TEXTREL;
7636
7637 /* Not an error, just cut short the traversal. */
7638 return FALSE;
7639 }
7640 }
7641 return TRUE;
7642 }
7643
7644 /* This is the most important function of all . Innocuosly named
7645 though ! */
7646 static bfd_boolean
7647 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
7648 struct bfd_link_info *info)
7649 {
7650 struct elf_aarch64_link_hash_table *htab;
7651 bfd *dynobj;
7652 asection *s;
7653 bfd_boolean relocs;
7654 bfd *ibfd;
7655
7656 htab = elf_aarch64_hash_table ((info));
7657 dynobj = htab->root.dynobj;
7658
7659 BFD_ASSERT (dynobj != NULL);
7660
7661 if (htab->root.dynamic_sections_created)
7662 {
7663 if (info->executable)
7664 {
7665 s = bfd_get_linker_section (dynobj, ".interp");
7666 if (s == NULL)
7667 abort ();
7668 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7669 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7670 }
7671 }
7672
7673 /* Set up .got offsets for local syms, and space for local dynamic
7674 relocs. */
7675 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7676 {
7677 struct elf_aarch64_local_symbol *locals = NULL;
7678 Elf_Internal_Shdr *symtab_hdr;
7679 asection *srel;
7680 unsigned int i;
7681
7682 if (!is_aarch64_elf (ibfd))
7683 continue;
7684
7685 for (s = ibfd->sections; s != NULL; s = s->next)
7686 {
7687 struct elf_dyn_relocs *p;
7688
7689 for (p = (struct elf_dyn_relocs *)
7690 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7691 {
7692 if (!bfd_is_abs_section (p->sec)
7693 && bfd_is_abs_section (p->sec->output_section))
7694 {
7695 /* Input section has been discarded, either because
7696 it is a copy of a linkonce section or due to
7697 linker script /DISCARD/, so we'll be discarding
7698 the relocs too. */
7699 }
7700 else if (p->count != 0)
7701 {
7702 srel = elf_section_data (p->sec)->sreloc;
7703 srel->size += p->count * RELOC_SIZE (htab);
7704 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7705 info->flags |= DF_TEXTREL;
7706 }
7707 }
7708 }
7709
7710 locals = elf_aarch64_locals (ibfd);
7711 if (!locals)
7712 continue;
7713
7714 symtab_hdr = &elf_symtab_hdr (ibfd);
7715 srel = htab->root.srelgot;
7716 for (i = 0; i < symtab_hdr->sh_info; i++)
7717 {
7718 locals[i].got_offset = (bfd_vma) - 1;
7719 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7720 if (locals[i].got_refcount > 0)
7721 {
7722 unsigned got_type = locals[i].got_type;
7723 if (got_type & GOT_TLSDESC_GD)
7724 {
7725 locals[i].tlsdesc_got_jump_table_offset =
7726 (htab->root.sgotplt->size
7727 - aarch64_compute_jump_table_size (htab));
7728 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7729 locals[i].got_offset = (bfd_vma) - 2;
7730 }
7731
7732 if (got_type & GOT_TLS_GD)
7733 {
7734 locals[i].got_offset = htab->root.sgot->size;
7735 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7736 }
7737
7738 if (got_type & GOT_TLS_IE
7739 || got_type & GOT_NORMAL)
7740 {
7741 locals[i].got_offset = htab->root.sgot->size;
7742 htab->root.sgot->size += GOT_ENTRY_SIZE;
7743 }
7744
7745 if (got_type == GOT_UNKNOWN)
7746 {
7747 }
7748
7749 if (info->shared)
7750 {
7751 if (got_type & GOT_TLSDESC_GD)
7752 {
7753 htab->root.srelplt->size += RELOC_SIZE (htab);
7754 /* Note RELOC_COUNT not incremented here! */
7755 htab->tlsdesc_plt = (bfd_vma) - 1;
7756 }
7757
7758 if (got_type & GOT_TLS_GD)
7759 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7760
7761 if (got_type & GOT_TLS_IE
7762 || got_type & GOT_NORMAL)
7763 htab->root.srelgot->size += RELOC_SIZE (htab);
7764 }
7765 }
7766 else
7767 {
7768 locals[i].got_refcount = (bfd_vma) - 1;
7769 }
7770 }
7771 }
7772
7773
7774 /* Allocate global sym .plt and .got entries, and space for global
7775 sym dynamic relocs. */
7776 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7777 info);
7778
7779 /* Allocate global ifunc sym .plt and .got entries, and space for global
7780 ifunc sym dynamic relocs. */
7781 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7782 info);
7783
7784 /* Allocate .plt and .got entries, and space for local symbols. */
7785 htab_traverse (htab->loc_hash_table,
7786 elfNN_aarch64_allocate_local_dynrelocs,
7787 info);
7788
7789 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7790 htab_traverse (htab->loc_hash_table,
7791 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7792 info);
7793
7794 /* For every jump slot reserved in the sgotplt, reloc_count is
7795 incremented. However, when we reserve space for TLS descriptors,
7796 it's not incremented, so in order to compute the space reserved
7797 for them, it suffices to multiply the reloc count by the jump
7798 slot size. */
7799
7800 if (htab->root.srelplt)
7801 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7802
7803 if (htab->tlsdesc_plt)
7804 {
7805 if (htab->root.splt->size == 0)
7806 htab->root.splt->size += PLT_ENTRY_SIZE;
7807
7808 htab->tlsdesc_plt = htab->root.splt->size;
7809 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7810
7811 /* If we're not using lazy TLS relocations, don't generate the
7812 GOT entry required. */
7813 if (!(info->flags & DF_BIND_NOW))
7814 {
7815 htab->dt_tlsdesc_got = htab->root.sgot->size;
7816 htab->root.sgot->size += GOT_ENTRY_SIZE;
7817 }
7818 }
7819
7820 /* Init mapping symbols information to use later to distingush between
7821 code and data while scanning for errata. */
7822 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
7823 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7824 {
7825 if (!is_aarch64_elf (ibfd))
7826 continue;
7827 bfd_elfNN_aarch64_init_maps (ibfd);
7828 }
7829
7830 /* We now have determined the sizes of the various dynamic sections.
7831 Allocate memory for them. */
7832 relocs = FALSE;
7833 for (s = dynobj->sections; s != NULL; s = s->next)
7834 {
7835 if ((s->flags & SEC_LINKER_CREATED) == 0)
7836 continue;
7837
7838 if (s == htab->root.splt
7839 || s == htab->root.sgot
7840 || s == htab->root.sgotplt
7841 || s == htab->root.iplt
7842 || s == htab->root.igotplt || s == htab->sdynbss)
7843 {
7844 /* Strip this section if we don't need it; see the
7845 comment below. */
7846 }
7847 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7848 {
7849 if (s->size != 0 && s != htab->root.srelplt)
7850 relocs = TRUE;
7851
7852 /* We use the reloc_count field as a counter if we need
7853 to copy relocs into the output file. */
7854 if (s != htab->root.srelplt)
7855 s->reloc_count = 0;
7856 }
7857 else
7858 {
7859 /* It's not one of our sections, so don't allocate space. */
7860 continue;
7861 }
7862
7863 if (s->size == 0)
7864 {
7865 /* If we don't need this section, strip it from the
7866 output file. This is mostly to handle .rela.bss and
7867 .rela.plt. We must create both sections in
7868 create_dynamic_sections, because they must be created
7869 before the linker maps input sections to output
7870 sections. The linker does that before
7871 adjust_dynamic_symbol is called, and it is that
7872 function which decides whether anything needs to go
7873 into these sections. */
7874
7875 s->flags |= SEC_EXCLUDE;
7876 continue;
7877 }
7878
7879 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7880 continue;
7881
7882 /* Allocate memory for the section contents. We use bfd_zalloc
7883 here in case unused entries are not reclaimed before the
7884 section's contents are written out. This should not happen,
7885 but this way if it does, we get a R_AARCH64_NONE reloc instead
7886 of garbage. */
7887 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7888 if (s->contents == NULL)
7889 return FALSE;
7890 }
7891
7892 if (htab->root.dynamic_sections_created)
7893 {
7894 /* Add some entries to the .dynamic section. We fill in the
7895 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7896 must add the entries now so that we get the correct size for
7897 the .dynamic section. The DT_DEBUG entry is filled in by the
7898 dynamic linker and used by the debugger. */
7899 #define add_dynamic_entry(TAG, VAL) \
7900 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7901
7902 if (info->executable)
7903 {
7904 if (!add_dynamic_entry (DT_DEBUG, 0))
7905 return FALSE;
7906 }
7907
7908 if (htab->root.splt->size != 0)
7909 {
7910 if (!add_dynamic_entry (DT_PLTGOT, 0)
7911 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7912 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7913 || !add_dynamic_entry (DT_JMPREL, 0))
7914 return FALSE;
7915
7916 if (htab->tlsdesc_plt
7917 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7918 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7919 return FALSE;
7920 }
7921
7922 if (relocs)
7923 {
7924 if (!add_dynamic_entry (DT_RELA, 0)
7925 || !add_dynamic_entry (DT_RELASZ, 0)
7926 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7927 return FALSE;
7928
7929 /* If any dynamic relocs apply to a read-only section,
7930 then we need a DT_TEXTREL entry. */
7931 if ((info->flags & DF_TEXTREL) == 0)
7932 elf_link_hash_traverse (& htab->root, aarch64_readonly_dynrelocs,
7933 info);
7934
7935 if ((info->flags & DF_TEXTREL) != 0)
7936 {
7937 if (!add_dynamic_entry (DT_TEXTREL, 0))
7938 return FALSE;
7939 }
7940 }
7941 }
7942 #undef add_dynamic_entry
7943
7944 return TRUE;
7945 }
7946
7947 static inline void
7948 elf_aarch64_update_plt_entry (bfd *output_bfd,
7949 bfd_reloc_code_real_type r_type,
7950 bfd_byte *plt_entry, bfd_vma value)
7951 {
7952 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7953
7954 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7955 }
7956
7957 static void
7958 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7959 struct elf_aarch64_link_hash_table
7960 *htab, bfd *output_bfd,
7961 struct bfd_link_info *info)
7962 {
7963 bfd_byte *plt_entry;
7964 bfd_vma plt_index;
7965 bfd_vma got_offset;
7966 bfd_vma gotplt_entry_address;
7967 bfd_vma plt_entry_address;
7968 Elf_Internal_Rela rela;
7969 bfd_byte *loc;
7970 asection *plt, *gotplt, *relplt;
7971
7972 /* When building a static executable, use .iplt, .igot.plt and
7973 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7974 if (htab->root.splt != NULL)
7975 {
7976 plt = htab->root.splt;
7977 gotplt = htab->root.sgotplt;
7978 relplt = htab->root.srelplt;
7979 }
7980 else
7981 {
7982 plt = htab->root.iplt;
7983 gotplt = htab->root.igotplt;
7984 relplt = htab->root.irelplt;
7985 }
7986
7987 /* Get the index in the procedure linkage table which
7988 corresponds to this symbol. This is the index of this symbol
7989 in all the symbols for which we are making plt entries. The
7990 first entry in the procedure linkage table is reserved.
7991
7992 Get the offset into the .got table of the entry that
7993 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
7994 bytes. The first three are reserved for the dynamic linker.
7995
7996 For static executables, we don't reserve anything. */
7997
7998 if (plt == htab->root.splt)
7999 {
8000 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
8001 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
8002 }
8003 else
8004 {
8005 plt_index = h->plt.offset / htab->plt_entry_size;
8006 got_offset = plt_index * GOT_ENTRY_SIZE;
8007 }
8008
8009 plt_entry = plt->contents + h->plt.offset;
8010 plt_entry_address = plt->output_section->vma
8011 + plt->output_offset + h->plt.offset;
8012 gotplt_entry_address = gotplt->output_section->vma +
8013 gotplt->output_offset + got_offset;
8014
8015 /* Copy in the boiler-plate for the PLTn entry. */
8016 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
8017
8018 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8019 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8020 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8021 plt_entry,
8022 PG (gotplt_entry_address) -
8023 PG (plt_entry_address));
8024
8025 /* Fill in the lo12 bits for the load from the pltgot. */
8026 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8027 plt_entry + 4,
8028 PG_OFFSET (gotplt_entry_address));
8029
8030 /* Fill in the lo12 bits for the add from the pltgot entry. */
8031 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8032 plt_entry + 8,
8033 PG_OFFSET (gotplt_entry_address));
8034
8035 /* All the GOTPLT Entries are essentially initialized to PLT0. */
8036 bfd_put_NN (output_bfd,
8037 plt->output_section->vma + plt->output_offset,
8038 gotplt->contents + got_offset);
8039
8040 rela.r_offset = gotplt_entry_address;
8041
8042 if (h->dynindx == -1
8043 || ((info->executable
8044 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
8045 && h->def_regular
8046 && h->type == STT_GNU_IFUNC))
8047 {
8048 /* If an STT_GNU_IFUNC symbol is locally defined, generate
8049 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
8050 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
8051 rela.r_addend = (h->root.u.def.value
8052 + h->root.u.def.section->output_section->vma
8053 + h->root.u.def.section->output_offset);
8054 }
8055 else
8056 {
8057 /* Fill in the entry in the .rela.plt section. */
8058 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
8059 rela.r_addend = 0;
8060 }
8061
8062 /* Compute the relocation entry to used based on PLT index and do
8063 not adjust reloc_count. The reloc_count has already been adjusted
8064 to account for this entry. */
8065 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
8066 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8067 }
8068
8069 /* Size sections even though they're not dynamic. We use it to setup
8070 _TLS_MODULE_BASE_, if needed. */
8071
8072 static bfd_boolean
8073 elfNN_aarch64_always_size_sections (bfd *output_bfd,
8074 struct bfd_link_info *info)
8075 {
8076 asection *tls_sec;
8077
8078 if (info->relocatable)
8079 return TRUE;
8080
8081 tls_sec = elf_hash_table (info)->tls_sec;
8082
8083 if (tls_sec)
8084 {
8085 struct elf_link_hash_entry *tlsbase;
8086
8087 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
8088 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
8089
8090 if (tlsbase)
8091 {
8092 struct bfd_link_hash_entry *h = NULL;
8093 const struct elf_backend_data *bed =
8094 get_elf_backend_data (output_bfd);
8095
8096 if (!(_bfd_generic_link_add_one_symbol
8097 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
8098 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
8099 return FALSE;
8100
8101 tlsbase->type = STT_TLS;
8102 tlsbase = (struct elf_link_hash_entry *) h;
8103 tlsbase->def_regular = 1;
8104 tlsbase->other = STV_HIDDEN;
8105 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
8106 }
8107 }
8108
8109 return TRUE;
8110 }
8111
8112 /* Finish up dynamic symbol handling. We set the contents of various
8113 dynamic sections here. */
8114 static bfd_boolean
8115 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
8116 struct bfd_link_info *info,
8117 struct elf_link_hash_entry *h,
8118 Elf_Internal_Sym *sym)
8119 {
8120 struct elf_aarch64_link_hash_table *htab;
8121 htab = elf_aarch64_hash_table (info);
8122
8123 if (h->plt.offset != (bfd_vma) - 1)
8124 {
8125 asection *plt, *gotplt, *relplt;
8126
8127 /* This symbol has an entry in the procedure linkage table. Set
8128 it up. */
8129
8130 /* When building a static executable, use .iplt, .igot.plt and
8131 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8132 if (htab->root.splt != NULL)
8133 {
8134 plt = htab->root.splt;
8135 gotplt = htab->root.sgotplt;
8136 relplt = htab->root.srelplt;
8137 }
8138 else
8139 {
8140 plt = htab->root.iplt;
8141 gotplt = htab->root.igotplt;
8142 relplt = htab->root.irelplt;
8143 }
8144
8145 /* This symbol has an entry in the procedure linkage table. Set
8146 it up. */
8147 if ((h->dynindx == -1
8148 && !((h->forced_local || info->executable)
8149 && h->def_regular
8150 && h->type == STT_GNU_IFUNC))
8151 || plt == NULL
8152 || gotplt == NULL
8153 || relplt == NULL)
8154 abort ();
8155
8156 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
8157 if (!h->def_regular)
8158 {
8159 /* Mark the symbol as undefined, rather than as defined in
8160 the .plt section. */
8161 sym->st_shndx = SHN_UNDEF;
8162 /* If the symbol is weak we need to clear the value.
8163 Otherwise, the PLT entry would provide a definition for
8164 the symbol even if the symbol wasn't defined anywhere,
8165 and so the symbol would never be NULL. Leave the value if
8166 there were any relocations where pointer equality matters
8167 (this is a clue for the dynamic linker, to make function
8168 pointer comparisons work between an application and shared
8169 library). */
8170 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
8171 sym->st_value = 0;
8172 }
8173 }
8174
8175 if (h->got.offset != (bfd_vma) - 1
8176 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
8177 {
8178 Elf_Internal_Rela rela;
8179 bfd_byte *loc;
8180
8181 /* This symbol has an entry in the global offset table. Set it
8182 up. */
8183 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
8184 abort ();
8185
8186 rela.r_offset = (htab->root.sgot->output_section->vma
8187 + htab->root.sgot->output_offset
8188 + (h->got.offset & ~(bfd_vma) 1));
8189
8190 if (h->def_regular
8191 && h->type == STT_GNU_IFUNC)
8192 {
8193 if (info->shared)
8194 {
8195 /* Generate R_AARCH64_GLOB_DAT. */
8196 goto do_glob_dat;
8197 }
8198 else
8199 {
8200 asection *plt;
8201
8202 if (!h->pointer_equality_needed)
8203 abort ();
8204
8205 /* For non-shared object, we can't use .got.plt, which
8206 contains the real function address if we need pointer
8207 equality. We load the GOT entry with the PLT entry. */
8208 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
8209 bfd_put_NN (output_bfd, (plt->output_section->vma
8210 + plt->output_offset
8211 + h->plt.offset),
8212 htab->root.sgot->contents
8213 + (h->got.offset & ~(bfd_vma) 1));
8214 return TRUE;
8215 }
8216 }
8217 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
8218 {
8219 if (!h->def_regular)
8220 return FALSE;
8221
8222 BFD_ASSERT ((h->got.offset & 1) != 0);
8223 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
8224 rela.r_addend = (h->root.u.def.value
8225 + h->root.u.def.section->output_section->vma
8226 + h->root.u.def.section->output_offset);
8227 }
8228 else
8229 {
8230 do_glob_dat:
8231 BFD_ASSERT ((h->got.offset & 1) == 0);
8232 bfd_put_NN (output_bfd, (bfd_vma) 0,
8233 htab->root.sgot->contents + h->got.offset);
8234 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
8235 rela.r_addend = 0;
8236 }
8237
8238 loc = htab->root.srelgot->contents;
8239 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
8240 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8241 }
8242
8243 if (h->needs_copy)
8244 {
8245 Elf_Internal_Rela rela;
8246 bfd_byte *loc;
8247
8248 /* This symbol needs a copy reloc. Set it up. */
8249
8250 if (h->dynindx == -1
8251 || (h->root.type != bfd_link_hash_defined
8252 && h->root.type != bfd_link_hash_defweak)
8253 || htab->srelbss == NULL)
8254 abort ();
8255
8256 rela.r_offset = (h->root.u.def.value
8257 + h->root.u.def.section->output_section->vma
8258 + h->root.u.def.section->output_offset);
8259 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
8260 rela.r_addend = 0;
8261 loc = htab->srelbss->contents;
8262 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
8263 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8264 }
8265
8266 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
8267 be NULL for local symbols. */
8268 if (sym != NULL
8269 && (h == elf_hash_table (info)->hdynamic
8270 || h == elf_hash_table (info)->hgot))
8271 sym->st_shndx = SHN_ABS;
8272
8273 return TRUE;
8274 }
8275
8276 /* Finish up local dynamic symbol handling. We set the contents of
8277 various dynamic sections here. */
8278
8279 static bfd_boolean
8280 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
8281 {
8282 struct elf_link_hash_entry *h
8283 = (struct elf_link_hash_entry *) *slot;
8284 struct bfd_link_info *info
8285 = (struct bfd_link_info *) inf;
8286
8287 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
8288 info, h, NULL);
8289 }
8290
8291 static void
8292 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
8293 struct elf_aarch64_link_hash_table
8294 *htab)
8295 {
8296 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
8297 small and large plts and at the minute just generates
8298 the small PLT. */
8299
8300 /* PLT0 of the small PLT looks like this in ELF64 -
8301 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
8302 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
8303 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
8304 // symbol resolver
8305 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
8306 // GOTPLT entry for this.
8307 br x17
8308 PLT0 will be slightly different in ELF32 due to different got entry
8309 size.
8310 */
8311 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
8312 bfd_vma plt_base;
8313
8314
8315 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
8316 PLT_ENTRY_SIZE);
8317 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
8318 PLT_ENTRY_SIZE;
8319
8320 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
8321 + htab->root.sgotplt->output_offset
8322 + GOT_ENTRY_SIZE * 2);
8323
8324 plt_base = htab->root.splt->output_section->vma +
8325 htab->root.splt->output_offset;
8326
8327 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8328 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8329 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8330 htab->root.splt->contents + 4,
8331 PG (plt_got_2nd_ent) - PG (plt_base + 4));
8332
8333 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8334 htab->root.splt->contents + 8,
8335 PG_OFFSET (plt_got_2nd_ent));
8336
8337 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8338 htab->root.splt->contents + 12,
8339 PG_OFFSET (plt_got_2nd_ent));
8340 }
8341
8342 static bfd_boolean
8343 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
8344 struct bfd_link_info *info)
8345 {
8346 struct elf_aarch64_link_hash_table *htab;
8347 bfd *dynobj;
8348 asection *sdyn;
8349
8350 htab = elf_aarch64_hash_table (info);
8351 dynobj = htab->root.dynobj;
8352 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
8353
8354 if (htab->root.dynamic_sections_created)
8355 {
8356 ElfNN_External_Dyn *dyncon, *dynconend;
8357
8358 if (sdyn == NULL || htab->root.sgot == NULL)
8359 abort ();
8360
8361 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
8362 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
8363 for (; dyncon < dynconend; dyncon++)
8364 {
8365 Elf_Internal_Dyn dyn;
8366 asection *s;
8367
8368 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
8369
8370 switch (dyn.d_tag)
8371 {
8372 default:
8373 continue;
8374
8375 case DT_PLTGOT:
8376 s = htab->root.sgotplt;
8377 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
8378 break;
8379
8380 case DT_JMPREL:
8381 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
8382 break;
8383
8384 case DT_PLTRELSZ:
8385 s = htab->root.srelplt;
8386 dyn.d_un.d_val = s->size;
8387 break;
8388
8389 case DT_RELASZ:
8390 /* The procedure linkage table relocs (DT_JMPREL) should
8391 not be included in the overall relocs (DT_RELA).
8392 Therefore, we override the DT_RELASZ entry here to
8393 make it not include the JMPREL relocs. Since the
8394 linker script arranges for .rela.plt to follow all
8395 other relocation sections, we don't have to worry
8396 about changing the DT_RELA entry. */
8397 if (htab->root.srelplt != NULL)
8398 {
8399 s = htab->root.srelplt;
8400 dyn.d_un.d_val -= s->size;
8401 }
8402 break;
8403
8404 case DT_TLSDESC_PLT:
8405 s = htab->root.splt;
8406 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8407 + htab->tlsdesc_plt;
8408 break;
8409
8410 case DT_TLSDESC_GOT:
8411 s = htab->root.sgot;
8412 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8413 + htab->dt_tlsdesc_got;
8414 break;
8415 }
8416
8417 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
8418 }
8419
8420 }
8421
8422 /* Fill in the special first entry in the procedure linkage table. */
8423 if (htab->root.splt && htab->root.splt->size > 0)
8424 {
8425 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
8426
8427 elf_section_data (htab->root.splt->output_section)->
8428 this_hdr.sh_entsize = htab->plt_entry_size;
8429
8430
8431 if (htab->tlsdesc_plt)
8432 {
8433 bfd_put_NN (output_bfd, (bfd_vma) 0,
8434 htab->root.sgot->contents + htab->dt_tlsdesc_got);
8435
8436 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
8437 elfNN_aarch64_tlsdesc_small_plt_entry,
8438 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
8439
8440 {
8441 bfd_vma adrp1_addr =
8442 htab->root.splt->output_section->vma
8443 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
8444
8445 bfd_vma adrp2_addr = adrp1_addr + 4;
8446
8447 bfd_vma got_addr =
8448 htab->root.sgot->output_section->vma
8449 + htab->root.sgot->output_offset;
8450
8451 bfd_vma pltgot_addr =
8452 htab->root.sgotplt->output_section->vma
8453 + htab->root.sgotplt->output_offset;
8454
8455 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
8456
8457 bfd_byte *plt_entry =
8458 htab->root.splt->contents + htab->tlsdesc_plt;
8459
8460 /* adrp x2, DT_TLSDESC_GOT */
8461 elf_aarch64_update_plt_entry (output_bfd,
8462 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8463 plt_entry + 4,
8464 (PG (dt_tlsdesc_got)
8465 - PG (adrp1_addr)));
8466
8467 /* adrp x3, 0 */
8468 elf_aarch64_update_plt_entry (output_bfd,
8469 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8470 plt_entry + 8,
8471 (PG (pltgot_addr)
8472 - PG (adrp2_addr)));
8473
8474 /* ldr x2, [x2, #0] */
8475 elf_aarch64_update_plt_entry (output_bfd,
8476 BFD_RELOC_AARCH64_LDSTNN_LO12,
8477 plt_entry + 12,
8478 PG_OFFSET (dt_tlsdesc_got));
8479
8480 /* add x3, x3, 0 */
8481 elf_aarch64_update_plt_entry (output_bfd,
8482 BFD_RELOC_AARCH64_ADD_LO12,
8483 plt_entry + 16,
8484 PG_OFFSET (pltgot_addr));
8485 }
8486 }
8487 }
8488
8489 if (htab->root.sgotplt)
8490 {
8491 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
8492 {
8493 (*_bfd_error_handler)
8494 (_("discarded output section: `%A'"), htab->root.sgotplt);
8495 return FALSE;
8496 }
8497
8498 /* Fill in the first three entries in the global offset table. */
8499 if (htab->root.sgotplt->size > 0)
8500 {
8501 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
8502
8503 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
8504 bfd_put_NN (output_bfd,
8505 (bfd_vma) 0,
8506 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
8507 bfd_put_NN (output_bfd,
8508 (bfd_vma) 0,
8509 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
8510 }
8511
8512 if (htab->root.sgot)
8513 {
8514 if (htab->root.sgot->size > 0)
8515 {
8516 bfd_vma addr =
8517 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
8518 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
8519 }
8520 }
8521
8522 elf_section_data (htab->root.sgotplt->output_section)->
8523 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
8524 }
8525
8526 if (htab->root.sgot && htab->root.sgot->size > 0)
8527 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
8528 = GOT_ENTRY_SIZE;
8529
8530 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
8531 htab_traverse (htab->loc_hash_table,
8532 elfNN_aarch64_finish_local_dynamic_symbol,
8533 info);
8534
8535 return TRUE;
8536 }
8537
8538 /* Return address for Ith PLT stub in section PLT, for relocation REL
8539 or (bfd_vma) -1 if it should not be included. */
8540
8541 static bfd_vma
8542 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
8543 const arelent *rel ATTRIBUTE_UNUSED)
8544 {
8545 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
8546 }
8547
8548
8549 /* We use this so we can override certain functions
8550 (though currently we don't). */
8551
8552 const struct elf_size_info elfNN_aarch64_size_info =
8553 {
8554 sizeof (ElfNN_External_Ehdr),
8555 sizeof (ElfNN_External_Phdr),
8556 sizeof (ElfNN_External_Shdr),
8557 sizeof (ElfNN_External_Rel),
8558 sizeof (ElfNN_External_Rela),
8559 sizeof (ElfNN_External_Sym),
8560 sizeof (ElfNN_External_Dyn),
8561 sizeof (Elf_External_Note),
8562 4, /* Hash table entry size. */
8563 1, /* Internal relocs per external relocs. */
8564 ARCH_SIZE, /* Arch size. */
8565 LOG_FILE_ALIGN, /* Log_file_align. */
8566 ELFCLASSNN, EV_CURRENT,
8567 bfd_elfNN_write_out_phdrs,
8568 bfd_elfNN_write_shdrs_and_ehdr,
8569 bfd_elfNN_checksum_contents,
8570 bfd_elfNN_write_relocs,
8571 bfd_elfNN_swap_symbol_in,
8572 bfd_elfNN_swap_symbol_out,
8573 bfd_elfNN_slurp_reloc_table,
8574 bfd_elfNN_slurp_symbol_table,
8575 bfd_elfNN_swap_dyn_in,
8576 bfd_elfNN_swap_dyn_out,
8577 bfd_elfNN_swap_reloc_in,
8578 bfd_elfNN_swap_reloc_out,
8579 bfd_elfNN_swap_reloca_in,
8580 bfd_elfNN_swap_reloca_out
8581 };
8582
8583 #define ELF_ARCH bfd_arch_aarch64
8584 #define ELF_MACHINE_CODE EM_AARCH64
8585 #define ELF_MAXPAGESIZE 0x10000
8586 #define ELF_MINPAGESIZE 0x1000
8587 #define ELF_COMMONPAGESIZE 0x1000
8588
8589 #define bfd_elfNN_close_and_cleanup \
8590 elfNN_aarch64_close_and_cleanup
8591
8592 #define bfd_elfNN_bfd_free_cached_info \
8593 elfNN_aarch64_bfd_free_cached_info
8594
8595 #define bfd_elfNN_bfd_is_target_special_symbol \
8596 elfNN_aarch64_is_target_special_symbol
8597
8598 #define bfd_elfNN_bfd_link_hash_table_create \
8599 elfNN_aarch64_link_hash_table_create
8600
8601 #define bfd_elfNN_bfd_merge_private_bfd_data \
8602 elfNN_aarch64_merge_private_bfd_data
8603
8604 #define bfd_elfNN_bfd_print_private_bfd_data \
8605 elfNN_aarch64_print_private_bfd_data
8606
8607 #define bfd_elfNN_bfd_reloc_type_lookup \
8608 elfNN_aarch64_reloc_type_lookup
8609
8610 #define bfd_elfNN_bfd_reloc_name_lookup \
8611 elfNN_aarch64_reloc_name_lookup
8612
8613 #define bfd_elfNN_bfd_set_private_flags \
8614 elfNN_aarch64_set_private_flags
8615
8616 #define bfd_elfNN_find_inliner_info \
8617 elfNN_aarch64_find_inliner_info
8618
8619 #define bfd_elfNN_find_nearest_line \
8620 elfNN_aarch64_find_nearest_line
8621
8622 #define bfd_elfNN_mkobject \
8623 elfNN_aarch64_mkobject
8624
8625 #define bfd_elfNN_new_section_hook \
8626 elfNN_aarch64_new_section_hook
8627
8628 #define elf_backend_adjust_dynamic_symbol \
8629 elfNN_aarch64_adjust_dynamic_symbol
8630
8631 #define elf_backend_always_size_sections \
8632 elfNN_aarch64_always_size_sections
8633
8634 #define elf_backend_check_relocs \
8635 elfNN_aarch64_check_relocs
8636
8637 #define elf_backend_copy_indirect_symbol \
8638 elfNN_aarch64_copy_indirect_symbol
8639
8640 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
8641 to them in our hash. */
8642 #define elf_backend_create_dynamic_sections \
8643 elfNN_aarch64_create_dynamic_sections
8644
8645 #define elf_backend_init_index_section \
8646 _bfd_elf_init_2_index_sections
8647
8648 #define elf_backend_finish_dynamic_sections \
8649 elfNN_aarch64_finish_dynamic_sections
8650
8651 #define elf_backend_finish_dynamic_symbol \
8652 elfNN_aarch64_finish_dynamic_symbol
8653
8654 #define elf_backend_gc_sweep_hook \
8655 elfNN_aarch64_gc_sweep_hook
8656
8657 #define elf_backend_object_p \
8658 elfNN_aarch64_object_p
8659
8660 #define elf_backend_output_arch_local_syms \
8661 elfNN_aarch64_output_arch_local_syms
8662
8663 #define elf_backend_plt_sym_val \
8664 elfNN_aarch64_plt_sym_val
8665
8666 #define elf_backend_post_process_headers \
8667 elfNN_aarch64_post_process_headers
8668
8669 #define elf_backend_relocate_section \
8670 elfNN_aarch64_relocate_section
8671
8672 #define elf_backend_reloc_type_class \
8673 elfNN_aarch64_reloc_type_class
8674
8675 #define elf_backend_section_from_shdr \
8676 elfNN_aarch64_section_from_shdr
8677
8678 #define elf_backend_size_dynamic_sections \
8679 elfNN_aarch64_size_dynamic_sections
8680
8681 #define elf_backend_size_info \
8682 elfNN_aarch64_size_info
8683
8684 #define elf_backend_write_section \
8685 elfNN_aarch64_write_section
8686
8687 #define elf_backend_can_refcount 1
8688 #define elf_backend_can_gc_sections 1
8689 #define elf_backend_plt_readonly 1
8690 #define elf_backend_want_got_plt 1
8691 #define elf_backend_want_plt_sym 0
8692 #define elf_backend_may_use_rel_p 0
8693 #define elf_backend_may_use_rela_p 1
8694 #define elf_backend_default_use_rela_p 1
8695 #define elf_backend_rela_normal 1
8696 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8697 #define elf_backend_default_execstack 0
8698 #define elf_backend_extern_protected_data 1
8699
8700 #undef elf_backend_obj_attrs_section
8701 #define elf_backend_obj_attrs_section ".ARM.attributes"
8702
8703 #include "elfNN-target.h"