]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elfnn-aarch64.c
[AArch64] PR18668, repair long branch veneer for plt stub
[thirdparty/binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "bfd_stdint.h"
143 #include "elf-bfd.h"
144 #include "bfdlink.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #endif
158
159 #if ARCH_SIZE == 32
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
165 #endif
166
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
189 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
190 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
191 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
192 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
193
194 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
195 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
205 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
206 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
207
208 #define ELIMINATE_COPY_RELOCS 0
209
210 /* Return size of a relocation entry. HTAB is the bfd's
211 elf_aarch64_link_hash_entry. */
212 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
213
214 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
215 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
216 #define PLT_ENTRY_SIZE (32)
217 #define PLT_SMALL_ENTRY_SIZE (16)
218 #define PLT_TLSDESC_ENTRY_SIZE (32)
219
220 /* Encoding of the nop instruction */
221 #define INSN_NOP 0xd503201f
222
223 #define aarch64_compute_jump_table_size(htab) \
224 (((htab)->root.srelplt == NULL) ? 0 \
225 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
226
227 /* The first entry in a procedure linkage table looks like this
228 if the distance between the PLTGOT and the PLT is < 4GB use
229 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
230 in x16 and needs to work out PLTGOT[1] by using an address of
231 [x16,#-GOT_ENTRY_SIZE]. */
232 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
233 {
234 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
235 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
236 #if ARCH_SIZE == 64
237 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
238 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
239 #else
240 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
241 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
242 #endif
243 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
244 0x1f, 0x20, 0x03, 0xd5, /* nop */
245 0x1f, 0x20, 0x03, 0xd5, /* nop */
246 0x1f, 0x20, 0x03, 0xd5, /* nop */
247 };
248
249 /* Per function entry in a procedure linkage table looks like this
250 if the distance between the PLTGOT and the PLT is < 4GB use
251 these PLT entries. */
252 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
253 {
254 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
255 #if ARCH_SIZE == 64
256 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
257 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
258 #else
259 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
260 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
261 #endif
262 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
263 };
264
265 static const bfd_byte
266 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
267 {
268 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
269 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
270 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
271 #if ARCH_SIZE == 64
272 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
273 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
274 #else
275 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
276 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
277 #endif
278 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
279 0x1f, 0x20, 0x03, 0xd5, /* nop */
280 0x1f, 0x20, 0x03, 0xd5, /* nop */
281 };
282
283 #define elf_info_to_howto elfNN_aarch64_info_to_howto
284 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
285
286 #define AARCH64_ELF_ABI_VERSION 0
287
288 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
289 #define ALL_ONES (~ (bfd_vma) 0)
290
291 /* Indexed by the bfd interal reloc enumerators.
292 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
293 in reloc.c. */
294
295 static reloc_howto_type elfNN_aarch64_howto_table[] =
296 {
297 EMPTY_HOWTO (0),
298
299 /* Basic data relocations. */
300
301 #if ARCH_SIZE == 64
302 HOWTO (R_AARCH64_NULL, /* type */
303 0, /* rightshift */
304 3, /* size (0 = byte, 1 = short, 2 = long) */
305 0, /* bitsize */
306 FALSE, /* pc_relative */
307 0, /* bitpos */
308 complain_overflow_dont, /* complain_on_overflow */
309 bfd_elf_generic_reloc, /* special_function */
310 "R_AARCH64_NULL", /* name */
311 FALSE, /* partial_inplace */
312 0, /* src_mask */
313 0, /* dst_mask */
314 FALSE), /* pcrel_offset */
315 #else
316 HOWTO (R_AARCH64_NONE, /* type */
317 0, /* rightshift */
318 3, /* size (0 = byte, 1 = short, 2 = long) */
319 0, /* bitsize */
320 FALSE, /* pc_relative */
321 0, /* bitpos */
322 complain_overflow_dont, /* complain_on_overflow */
323 bfd_elf_generic_reloc, /* special_function */
324 "R_AARCH64_NONE", /* name */
325 FALSE, /* partial_inplace */
326 0, /* src_mask */
327 0, /* dst_mask */
328 FALSE), /* pcrel_offset */
329 #endif
330
331 /* .xword: (S+A) */
332 HOWTO64 (AARCH64_R (ABS64), /* type */
333 0, /* rightshift */
334 4, /* size (4 = long long) */
335 64, /* bitsize */
336 FALSE, /* pc_relative */
337 0, /* bitpos */
338 complain_overflow_unsigned, /* complain_on_overflow */
339 bfd_elf_generic_reloc, /* special_function */
340 AARCH64_R_STR (ABS64), /* name */
341 FALSE, /* partial_inplace */
342 ALL_ONES, /* src_mask */
343 ALL_ONES, /* dst_mask */
344 FALSE), /* pcrel_offset */
345
346 /* .word: (S+A) */
347 HOWTO (AARCH64_R (ABS32), /* type */
348 0, /* rightshift */
349 2, /* size (0 = byte, 1 = short, 2 = long) */
350 32, /* bitsize */
351 FALSE, /* pc_relative */
352 0, /* bitpos */
353 complain_overflow_unsigned, /* complain_on_overflow */
354 bfd_elf_generic_reloc, /* special_function */
355 AARCH64_R_STR (ABS32), /* name */
356 FALSE, /* partial_inplace */
357 0xffffffff, /* src_mask */
358 0xffffffff, /* dst_mask */
359 FALSE), /* pcrel_offset */
360
361 /* .half: (S+A) */
362 HOWTO (AARCH64_R (ABS16), /* type */
363 0, /* rightshift */
364 1, /* size (0 = byte, 1 = short, 2 = long) */
365 16, /* bitsize */
366 FALSE, /* pc_relative */
367 0, /* bitpos */
368 complain_overflow_unsigned, /* complain_on_overflow */
369 bfd_elf_generic_reloc, /* special_function */
370 AARCH64_R_STR (ABS16), /* name */
371 FALSE, /* partial_inplace */
372 0xffff, /* src_mask */
373 0xffff, /* dst_mask */
374 FALSE), /* pcrel_offset */
375
376 /* .xword: (S+A-P) */
377 HOWTO64 (AARCH64_R (PREL64), /* type */
378 0, /* rightshift */
379 4, /* size (4 = long long) */
380 64, /* bitsize */
381 TRUE, /* pc_relative */
382 0, /* bitpos */
383 complain_overflow_signed, /* complain_on_overflow */
384 bfd_elf_generic_reloc, /* special_function */
385 AARCH64_R_STR (PREL64), /* name */
386 FALSE, /* partial_inplace */
387 ALL_ONES, /* src_mask */
388 ALL_ONES, /* dst_mask */
389 TRUE), /* pcrel_offset */
390
391 /* .word: (S+A-P) */
392 HOWTO (AARCH64_R (PREL32), /* type */
393 0, /* rightshift */
394 2, /* size (0 = byte, 1 = short, 2 = long) */
395 32, /* bitsize */
396 TRUE, /* pc_relative */
397 0, /* bitpos */
398 complain_overflow_signed, /* complain_on_overflow */
399 bfd_elf_generic_reloc, /* special_function */
400 AARCH64_R_STR (PREL32), /* name */
401 FALSE, /* partial_inplace */
402 0xffffffff, /* src_mask */
403 0xffffffff, /* dst_mask */
404 TRUE), /* pcrel_offset */
405
406 /* .half: (S+A-P) */
407 HOWTO (AARCH64_R (PREL16), /* type */
408 0, /* rightshift */
409 1, /* size (0 = byte, 1 = short, 2 = long) */
410 16, /* bitsize */
411 TRUE, /* pc_relative */
412 0, /* bitpos */
413 complain_overflow_signed, /* complain_on_overflow */
414 bfd_elf_generic_reloc, /* special_function */
415 AARCH64_R_STR (PREL16), /* name */
416 FALSE, /* partial_inplace */
417 0xffff, /* src_mask */
418 0xffff, /* dst_mask */
419 TRUE), /* pcrel_offset */
420
421 /* Group relocations to create a 16, 32, 48 or 64 bit
422 unsigned data or abs address inline. */
423
424 /* MOVZ: ((S+A) >> 0) & 0xffff */
425 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
426 0, /* rightshift */
427 2, /* size (0 = byte, 1 = short, 2 = long) */
428 16, /* bitsize */
429 FALSE, /* pc_relative */
430 0, /* bitpos */
431 complain_overflow_unsigned, /* complain_on_overflow */
432 bfd_elf_generic_reloc, /* special_function */
433 AARCH64_R_STR (MOVW_UABS_G0), /* name */
434 FALSE, /* partial_inplace */
435 0xffff, /* src_mask */
436 0xffff, /* dst_mask */
437 FALSE), /* pcrel_offset */
438
439 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
440 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
441 0, /* rightshift */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
443 16, /* bitsize */
444 FALSE, /* pc_relative */
445 0, /* bitpos */
446 complain_overflow_dont, /* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
449 FALSE, /* partial_inplace */
450 0xffff, /* src_mask */
451 0xffff, /* dst_mask */
452 FALSE), /* pcrel_offset */
453
454 /* MOVZ: ((S+A) >> 16) & 0xffff */
455 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
456 16, /* rightshift */
457 2, /* size (0 = byte, 1 = short, 2 = long) */
458 16, /* bitsize */
459 FALSE, /* pc_relative */
460 0, /* bitpos */
461 complain_overflow_unsigned, /* complain_on_overflow */
462 bfd_elf_generic_reloc, /* special_function */
463 AARCH64_R_STR (MOVW_UABS_G1), /* name */
464 FALSE, /* partial_inplace */
465 0xffff, /* src_mask */
466 0xffff, /* dst_mask */
467 FALSE), /* pcrel_offset */
468
469 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
470 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
471 16, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 16, /* bitsize */
474 FALSE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_dont, /* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
479 FALSE, /* partial_inplace */
480 0xffff, /* src_mask */
481 0xffff, /* dst_mask */
482 FALSE), /* pcrel_offset */
483
484 /* MOVZ: ((S+A) >> 32) & 0xffff */
485 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
486 32, /* rightshift */
487 2, /* size (0 = byte, 1 = short, 2 = long) */
488 16, /* bitsize */
489 FALSE, /* pc_relative */
490 0, /* bitpos */
491 complain_overflow_unsigned, /* complain_on_overflow */
492 bfd_elf_generic_reloc, /* special_function */
493 AARCH64_R_STR (MOVW_UABS_G2), /* name */
494 FALSE, /* partial_inplace */
495 0xffff, /* src_mask */
496 0xffff, /* dst_mask */
497 FALSE), /* pcrel_offset */
498
499 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
500 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
501 32, /* rightshift */
502 2, /* size (0 = byte, 1 = short, 2 = long) */
503 16, /* bitsize */
504 FALSE, /* pc_relative */
505 0, /* bitpos */
506 complain_overflow_dont, /* complain_on_overflow */
507 bfd_elf_generic_reloc, /* special_function */
508 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
509 FALSE, /* partial_inplace */
510 0xffff, /* src_mask */
511 0xffff, /* dst_mask */
512 FALSE), /* pcrel_offset */
513
514 /* MOVZ: ((S+A) >> 48) & 0xffff */
515 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
516 48, /* rightshift */
517 2, /* size (0 = byte, 1 = short, 2 = long) */
518 16, /* bitsize */
519 FALSE, /* pc_relative */
520 0, /* bitpos */
521 complain_overflow_unsigned, /* complain_on_overflow */
522 bfd_elf_generic_reloc, /* special_function */
523 AARCH64_R_STR (MOVW_UABS_G3), /* name */
524 FALSE, /* partial_inplace */
525 0xffff, /* src_mask */
526 0xffff, /* dst_mask */
527 FALSE), /* pcrel_offset */
528
529 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
530 signed data or abs address inline. Will change instruction
531 to MOVN or MOVZ depending on sign of calculated value. */
532
533 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
534 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
535 0, /* rightshift */
536 2, /* size (0 = byte, 1 = short, 2 = long) */
537 16, /* bitsize */
538 FALSE, /* pc_relative */
539 0, /* bitpos */
540 complain_overflow_signed, /* complain_on_overflow */
541 bfd_elf_generic_reloc, /* special_function */
542 AARCH64_R_STR (MOVW_SABS_G0), /* name */
543 FALSE, /* partial_inplace */
544 0xffff, /* src_mask */
545 0xffff, /* dst_mask */
546 FALSE), /* pcrel_offset */
547
548 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
549 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
550 16, /* rightshift */
551 2, /* size (0 = byte, 1 = short, 2 = long) */
552 16, /* bitsize */
553 FALSE, /* pc_relative */
554 0, /* bitpos */
555 complain_overflow_signed, /* complain_on_overflow */
556 bfd_elf_generic_reloc, /* special_function */
557 AARCH64_R_STR (MOVW_SABS_G1), /* name */
558 FALSE, /* partial_inplace */
559 0xffff, /* src_mask */
560 0xffff, /* dst_mask */
561 FALSE), /* pcrel_offset */
562
563 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
564 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
565 32, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 16, /* bitsize */
568 FALSE, /* pc_relative */
569 0, /* bitpos */
570 complain_overflow_signed, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 AARCH64_R_STR (MOVW_SABS_G2), /* name */
573 FALSE, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE), /* pcrel_offset */
577
578 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
579 addresses: PG(x) is (x & ~0xfff). */
580
581 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
582 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
583 2, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 19, /* bitsize */
586 TRUE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_signed, /* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 AARCH64_R_STR (LD_PREL_LO19), /* name */
591 FALSE, /* partial_inplace */
592 0x7ffff, /* src_mask */
593 0x7ffff, /* dst_mask */
594 TRUE), /* pcrel_offset */
595
596 /* ADR: (S+A-P) & 0x1fffff */
597 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
598 0, /* rightshift */
599 2, /* size (0 = byte, 1 = short, 2 = long) */
600 21, /* bitsize */
601 TRUE, /* pc_relative */
602 0, /* bitpos */
603 complain_overflow_signed, /* complain_on_overflow */
604 bfd_elf_generic_reloc, /* special_function */
605 AARCH64_R_STR (ADR_PREL_LO21), /* name */
606 FALSE, /* partial_inplace */
607 0x1fffff, /* src_mask */
608 0x1fffff, /* dst_mask */
609 TRUE), /* pcrel_offset */
610
611 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
612 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
613 12, /* rightshift */
614 2, /* size (0 = byte, 1 = short, 2 = long) */
615 21, /* bitsize */
616 TRUE, /* pc_relative */
617 0, /* bitpos */
618 complain_overflow_signed, /* complain_on_overflow */
619 bfd_elf_generic_reloc, /* special_function */
620 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
621 FALSE, /* partial_inplace */
622 0x1fffff, /* src_mask */
623 0x1fffff, /* dst_mask */
624 TRUE), /* pcrel_offset */
625
626 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
627 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
628 12, /* rightshift */
629 2, /* size (0 = byte, 1 = short, 2 = long) */
630 21, /* bitsize */
631 TRUE, /* pc_relative */
632 0, /* bitpos */
633 complain_overflow_dont, /* complain_on_overflow */
634 bfd_elf_generic_reloc, /* special_function */
635 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
636 FALSE, /* partial_inplace */
637 0x1fffff, /* src_mask */
638 0x1fffff, /* dst_mask */
639 TRUE), /* pcrel_offset */
640
641 /* ADD: (S+A) & 0xfff [no overflow check] */
642 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
643 0, /* rightshift */
644 2, /* size (0 = byte, 1 = short, 2 = long) */
645 12, /* bitsize */
646 FALSE, /* pc_relative */
647 10, /* bitpos */
648 complain_overflow_dont, /* complain_on_overflow */
649 bfd_elf_generic_reloc, /* special_function */
650 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
651 FALSE, /* partial_inplace */
652 0x3ffc00, /* src_mask */
653 0x3ffc00, /* dst_mask */
654 FALSE), /* pcrel_offset */
655
656 /* LD/ST8: (S+A) & 0xfff */
657 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
658 0, /* rightshift */
659 2, /* size (0 = byte, 1 = short, 2 = long) */
660 12, /* bitsize */
661 FALSE, /* pc_relative */
662 0, /* bitpos */
663 complain_overflow_dont, /* complain_on_overflow */
664 bfd_elf_generic_reloc, /* special_function */
665 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
666 FALSE, /* partial_inplace */
667 0xfff, /* src_mask */
668 0xfff, /* dst_mask */
669 FALSE), /* pcrel_offset */
670
671 /* Relocations for control-flow instructions. */
672
673 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
674 HOWTO (AARCH64_R (TSTBR14), /* type */
675 2, /* rightshift */
676 2, /* size (0 = byte, 1 = short, 2 = long) */
677 14, /* bitsize */
678 TRUE, /* pc_relative */
679 0, /* bitpos */
680 complain_overflow_signed, /* complain_on_overflow */
681 bfd_elf_generic_reloc, /* special_function */
682 AARCH64_R_STR (TSTBR14), /* name */
683 FALSE, /* partial_inplace */
684 0x3fff, /* src_mask */
685 0x3fff, /* dst_mask */
686 TRUE), /* pcrel_offset */
687
688 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
689 HOWTO (AARCH64_R (CONDBR19), /* type */
690 2, /* rightshift */
691 2, /* size (0 = byte, 1 = short, 2 = long) */
692 19, /* bitsize */
693 TRUE, /* pc_relative */
694 0, /* bitpos */
695 complain_overflow_signed, /* complain_on_overflow */
696 bfd_elf_generic_reloc, /* special_function */
697 AARCH64_R_STR (CONDBR19), /* name */
698 FALSE, /* partial_inplace */
699 0x7ffff, /* src_mask */
700 0x7ffff, /* dst_mask */
701 TRUE), /* pcrel_offset */
702
703 /* B: ((S+A-P) >> 2) & 0x3ffffff */
704 HOWTO (AARCH64_R (JUMP26), /* type */
705 2, /* rightshift */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
707 26, /* bitsize */
708 TRUE, /* pc_relative */
709 0, /* bitpos */
710 complain_overflow_signed, /* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 AARCH64_R_STR (JUMP26), /* name */
713 FALSE, /* partial_inplace */
714 0x3ffffff, /* src_mask */
715 0x3ffffff, /* dst_mask */
716 TRUE), /* pcrel_offset */
717
718 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
719 HOWTO (AARCH64_R (CALL26), /* type */
720 2, /* rightshift */
721 2, /* size (0 = byte, 1 = short, 2 = long) */
722 26, /* bitsize */
723 TRUE, /* pc_relative */
724 0, /* bitpos */
725 complain_overflow_signed, /* complain_on_overflow */
726 bfd_elf_generic_reloc, /* special_function */
727 AARCH64_R_STR (CALL26), /* name */
728 FALSE, /* partial_inplace */
729 0x3ffffff, /* src_mask */
730 0x3ffffff, /* dst_mask */
731 TRUE), /* pcrel_offset */
732
733 /* LD/ST16: (S+A) & 0xffe */
734 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
735 1, /* rightshift */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
737 12, /* bitsize */
738 FALSE, /* pc_relative */
739 0, /* bitpos */
740 complain_overflow_dont, /* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
743 FALSE, /* partial_inplace */
744 0xffe, /* src_mask */
745 0xffe, /* dst_mask */
746 FALSE), /* pcrel_offset */
747
748 /* LD/ST32: (S+A) & 0xffc */
749 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
750 2, /* rightshift */
751 2, /* size (0 = byte, 1 = short, 2 = long) */
752 12, /* bitsize */
753 FALSE, /* pc_relative */
754 0, /* bitpos */
755 complain_overflow_dont, /* complain_on_overflow */
756 bfd_elf_generic_reloc, /* special_function */
757 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
758 FALSE, /* partial_inplace */
759 0xffc, /* src_mask */
760 0xffc, /* dst_mask */
761 FALSE), /* pcrel_offset */
762
763 /* LD/ST64: (S+A) & 0xff8 */
764 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
765 3, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 12, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_dont, /* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
773 FALSE, /* partial_inplace */
774 0xff8, /* src_mask */
775 0xff8, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 /* LD/ST128: (S+A) & 0xff0 */
779 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
780 4, /* rightshift */
781 2, /* size (0 = byte, 1 = short, 2 = long) */
782 12, /* bitsize */
783 FALSE, /* pc_relative */
784 0, /* bitpos */
785 complain_overflow_dont, /* complain_on_overflow */
786 bfd_elf_generic_reloc, /* special_function */
787 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
788 FALSE, /* partial_inplace */
789 0xff0, /* src_mask */
790 0xff0, /* dst_mask */
791 FALSE), /* pcrel_offset */
792
793 /* Set a load-literal immediate field to bits
794 0x1FFFFC of G(S)-P */
795 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
796 2, /* rightshift */
797 2, /* size (0 = byte,1 = short,2 = long) */
798 19, /* bitsize */
799 TRUE, /* pc_relative */
800 0, /* bitpos */
801 complain_overflow_signed, /* complain_on_overflow */
802 bfd_elf_generic_reloc, /* special_function */
803 AARCH64_R_STR (GOT_LD_PREL19), /* name */
804 FALSE, /* partial_inplace */
805 0xffffe0, /* src_mask */
806 0xffffe0, /* dst_mask */
807 TRUE), /* pcrel_offset */
808
809 /* Get to the page for the GOT entry for the symbol
810 (G(S) - P) using an ADRP instruction. */
811 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
812 12, /* rightshift */
813 2, /* size (0 = byte, 1 = short, 2 = long) */
814 21, /* bitsize */
815 TRUE, /* pc_relative */
816 0, /* bitpos */
817 complain_overflow_dont, /* complain_on_overflow */
818 bfd_elf_generic_reloc, /* special_function */
819 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
820 FALSE, /* partial_inplace */
821 0x1fffff, /* src_mask */
822 0x1fffff, /* dst_mask */
823 TRUE), /* pcrel_offset */
824
825 /* LD64: GOT offset G(S) & 0xff8 */
826 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
827 3, /* rightshift */
828 2, /* size (0 = byte, 1 = short, 2 = long) */
829 12, /* bitsize */
830 FALSE, /* pc_relative */
831 0, /* bitpos */
832 complain_overflow_dont, /* complain_on_overflow */
833 bfd_elf_generic_reloc, /* special_function */
834 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
835 FALSE, /* partial_inplace */
836 0xff8, /* src_mask */
837 0xff8, /* dst_mask */
838 FALSE), /* pcrel_offset */
839
840 /* LD32: GOT offset G(S) & 0xffc */
841 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
842 2, /* rightshift */
843 2, /* size (0 = byte, 1 = short, 2 = long) */
844 12, /* bitsize */
845 FALSE, /* pc_relative */
846 0, /* bitpos */
847 complain_overflow_dont, /* complain_on_overflow */
848 bfd_elf_generic_reloc, /* special_function */
849 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
850 FALSE, /* partial_inplace */
851 0xffc, /* src_mask */
852 0xffc, /* dst_mask */
853 FALSE), /* pcrel_offset */
854
855 /* LD64: GOT offset for the symbol. */
856 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
857 3, /* rightshift */
858 2, /* size (0 = byte, 1 = short, 2 = long) */
859 12, /* bitsize */
860 FALSE, /* pc_relative */
861 0, /* bitpos */
862 complain_overflow_unsigned, /* complain_on_overflow */
863 bfd_elf_generic_reloc, /* special_function */
864 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
865 FALSE, /* partial_inplace */
866 0x7ff8, /* src_mask */
867 0x7ff8, /* dst_mask */
868 FALSE), /* pcrel_offset */
869
870 /* LD32: GOT offset to the page address of GOT table.
871 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
872 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
873 2, /* rightshift */
874 2, /* size (0 = byte, 1 = short, 2 = long) */
875 12, /* bitsize */
876 FALSE, /* pc_relative */
877 0, /* bitpos */
878 complain_overflow_unsigned, /* complain_on_overflow */
879 bfd_elf_generic_reloc, /* special_function */
880 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
881 FALSE, /* partial_inplace */
882 0x5ffc, /* src_mask */
883 0x5ffc, /* dst_mask */
884 FALSE), /* pcrel_offset */
885
886 /* LD64: GOT offset to the page address of GOT table.
887 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
888 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
889 3, /* rightshift */
890 2, /* size (0 = byte, 1 = short, 2 = long) */
891 12, /* bitsize */
892 FALSE, /* pc_relative */
893 0, /* bitpos */
894 complain_overflow_unsigned, /* complain_on_overflow */
895 bfd_elf_generic_reloc, /* special_function */
896 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
897 FALSE, /* partial_inplace */
898 0x7ff8, /* src_mask */
899 0x7ff8, /* dst_mask */
900 FALSE), /* pcrel_offset */
901
902 /* Get to the page for the GOT entry for the symbol
903 (G(S) - P) using an ADRP instruction. */
904 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
905 12, /* rightshift */
906 2, /* size (0 = byte, 1 = short, 2 = long) */
907 21, /* bitsize */
908 TRUE, /* pc_relative */
909 0, /* bitpos */
910 complain_overflow_dont, /* complain_on_overflow */
911 bfd_elf_generic_reloc, /* special_function */
912 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
913 FALSE, /* partial_inplace */
914 0x1fffff, /* src_mask */
915 0x1fffff, /* dst_mask */
916 TRUE), /* pcrel_offset */
917
918 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
919 0, /* rightshift */
920 2, /* size (0 = byte, 1 = short, 2 = long) */
921 21, /* bitsize */
922 TRUE, /* pc_relative */
923 0, /* bitpos */
924 complain_overflow_dont, /* complain_on_overflow */
925 bfd_elf_generic_reloc, /* special_function */
926 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
927 FALSE, /* partial_inplace */
928 0x1fffff, /* src_mask */
929 0x1fffff, /* dst_mask */
930 TRUE), /* pcrel_offset */
931
932 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
933 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
934 0, /* rightshift */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
936 12, /* bitsize */
937 FALSE, /* pc_relative */
938 0, /* bitpos */
939 complain_overflow_dont, /* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
942 FALSE, /* partial_inplace */
943 0xfff, /* src_mask */
944 0xfff, /* dst_mask */
945 FALSE), /* pcrel_offset */
946
947 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
948 16, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 16, /* bitsize */
951 FALSE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont, /* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
956 FALSE, /* partial_inplace */
957 0xffff, /* src_mask */
958 0xffff, /* dst_mask */
959 FALSE), /* pcrel_offset */
960
961 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
962 0, /* rightshift */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
964 16, /* bitsize */
965 FALSE, /* pc_relative */
966 0, /* bitpos */
967 complain_overflow_dont, /* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
970 FALSE, /* partial_inplace */
971 0xffff, /* src_mask */
972 0xffff, /* dst_mask */
973 FALSE), /* pcrel_offset */
974
975 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
976 12, /* rightshift */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
978 21, /* bitsize */
979 FALSE, /* pc_relative */
980 0, /* bitpos */
981 complain_overflow_dont, /* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
984 FALSE, /* partial_inplace */
985 0x1fffff, /* src_mask */
986 0x1fffff, /* dst_mask */
987 FALSE), /* pcrel_offset */
988
989 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
990 3, /* rightshift */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
992 12, /* bitsize */
993 FALSE, /* pc_relative */
994 0, /* bitpos */
995 complain_overflow_dont, /* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
998 FALSE, /* partial_inplace */
999 0xff8, /* src_mask */
1000 0xff8, /* dst_mask */
1001 FALSE), /* pcrel_offset */
1002
1003 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1004 2, /* rightshift */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1006 12, /* bitsize */
1007 FALSE, /* pc_relative */
1008 0, /* bitpos */
1009 complain_overflow_dont, /* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1012 FALSE, /* partial_inplace */
1013 0xffc, /* src_mask */
1014 0xffc, /* dst_mask */
1015 FALSE), /* pcrel_offset */
1016
1017 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1018 2, /* rightshift */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1020 19, /* bitsize */
1021 FALSE, /* pc_relative */
1022 0, /* bitpos */
1023 complain_overflow_dont, /* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1026 FALSE, /* partial_inplace */
1027 0x1ffffc, /* src_mask */
1028 0x1ffffc, /* dst_mask */
1029 FALSE), /* pcrel_offset */
1030
1031 /* Unsigned 12 bit byte offset to module TLS base address. */
1032 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */
1033 0, /* rightshift */
1034 2, /* size (0 = byte, 1 = short, 2 = long) */
1035 12, /* bitsize */
1036 FALSE, /* pc_relative */
1037 0, /* bitpos */
1038 complain_overflow_unsigned, /* complain_on_overflow */
1039 bfd_elf_generic_reloc, /* special_function */
1040 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */
1041 FALSE, /* partial_inplace */
1042 0xfff, /* src_mask */
1043 0xfff, /* dst_mask */
1044 FALSE), /* pcrel_offset */
1045
1046 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1047 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */
1048 0, /* rightshift */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1050 12, /* bitsize */
1051 FALSE, /* pc_relative */
1052 0, /* bitpos */
1053 complain_overflow_dont, /* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */
1056 FALSE, /* partial_inplace */
1057 0xfff, /* src_mask */
1058 0xfff, /* dst_mask */
1059 FALSE), /* pcrel_offset */
1060
1061 /* Get to the page for the GOT entry for the symbol
1062 (G(S) - P) using an ADRP instruction. */
1063 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
1064 12, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 21, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_signed, /* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
1072 FALSE, /* partial_inplace */
1073 0x1fffff, /* src_mask */
1074 0x1fffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 21, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_signed, /* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1086 FALSE, /* partial_inplace */
1087 0x1fffff, /* src_mask */
1088 0x1fffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1092 32, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 16, /* bitsize */
1095 FALSE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_unsigned, /* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1100 FALSE, /* partial_inplace */
1101 0xffff, /* src_mask */
1102 0xffff, /* dst_mask */
1103 FALSE), /* pcrel_offset */
1104
1105 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1106 16, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 16, /* bitsize */
1109 FALSE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont, /* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1114 FALSE, /* partial_inplace */
1115 0xffff, /* src_mask */
1116 0xffff, /* dst_mask */
1117 FALSE), /* pcrel_offset */
1118
1119 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1120 16, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 16, /* bitsize */
1123 FALSE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont, /* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1128 FALSE, /* partial_inplace */
1129 0xffff, /* src_mask */
1130 0xffff, /* dst_mask */
1131 FALSE), /* pcrel_offset */
1132
1133 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 16, /* bitsize */
1137 FALSE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont, /* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1142 FALSE, /* partial_inplace */
1143 0xffff, /* src_mask */
1144 0xffff, /* dst_mask */
1145 FALSE), /* pcrel_offset */
1146
1147 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 16, /* bitsize */
1151 FALSE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont, /* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1156 FALSE, /* partial_inplace */
1157 0xffff, /* src_mask */
1158 0xffff, /* dst_mask */
1159 FALSE), /* pcrel_offset */
1160
1161 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1162 12, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 12, /* bitsize */
1165 FALSE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_unsigned, /* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1170 FALSE, /* partial_inplace */
1171 0xfff, /* src_mask */
1172 0xfff, /* dst_mask */
1173 FALSE), /* pcrel_offset */
1174
1175 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 12, /* bitsize */
1179 FALSE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_unsigned, /* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1184 FALSE, /* partial_inplace */
1185 0xfff, /* src_mask */
1186 0xfff, /* dst_mask */
1187 FALSE), /* pcrel_offset */
1188
1189 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 12, /* bitsize */
1193 FALSE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont, /* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1198 FALSE, /* partial_inplace */
1199 0xfff, /* src_mask */
1200 0xfff, /* dst_mask */
1201 FALSE), /* pcrel_offset */
1202
1203 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1204 2, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 19, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont, /* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1212 FALSE, /* partial_inplace */
1213 0x0ffffe0, /* src_mask */
1214 0x0ffffe0, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 21, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont, /* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1226 FALSE, /* partial_inplace */
1227 0x1fffff, /* src_mask */
1228 0x1fffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 /* Get to the page for the GOT entry for the symbol
1232 (G(S) - P) using an ADRP instruction. */
1233 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1234 12, /* rightshift */
1235 2, /* size (0 = byte, 1 = short, 2 = long) */
1236 21, /* bitsize */
1237 TRUE, /* pc_relative */
1238 0, /* bitpos */
1239 complain_overflow_dont, /* complain_on_overflow */
1240 bfd_elf_generic_reloc, /* special_function */
1241 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1242 FALSE, /* partial_inplace */
1243 0x1fffff, /* src_mask */
1244 0x1fffff, /* dst_mask */
1245 TRUE), /* pcrel_offset */
1246
1247 /* LD64: GOT offset G(S) & 0xff8. */
1248 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1249 3, /* rightshift */
1250 2, /* size (0 = byte, 1 = short, 2 = long) */
1251 12, /* bitsize */
1252 FALSE, /* pc_relative */
1253 0, /* bitpos */
1254 complain_overflow_dont, /* complain_on_overflow */
1255 bfd_elf_generic_reloc, /* special_function */
1256 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1257 FALSE, /* partial_inplace */
1258 0xff8, /* src_mask */
1259 0xff8, /* dst_mask */
1260 FALSE), /* pcrel_offset */
1261
1262 /* LD32: GOT offset G(S) & 0xffc. */
1263 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1264 2, /* rightshift */
1265 2, /* size (0 = byte, 1 = short, 2 = long) */
1266 12, /* bitsize */
1267 FALSE, /* pc_relative */
1268 0, /* bitpos */
1269 complain_overflow_dont, /* complain_on_overflow */
1270 bfd_elf_generic_reloc, /* special_function */
1271 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1272 FALSE, /* partial_inplace */
1273 0xffc, /* src_mask */
1274 0xffc, /* dst_mask */
1275 FALSE), /* pcrel_offset */
1276
1277 /* ADD: GOT offset G(S) & 0xfff. */
1278 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1279 0, /* rightshift */
1280 2, /* size (0 = byte, 1 = short, 2 = long) */
1281 12, /* bitsize */
1282 FALSE, /* pc_relative */
1283 0, /* bitpos */
1284 complain_overflow_dont, /* complain_on_overflow */
1285 bfd_elf_generic_reloc, /* special_function */
1286 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1287 FALSE, /* partial_inplace */
1288 0xfff, /* src_mask */
1289 0xfff, /* dst_mask */
1290 FALSE), /* pcrel_offset */
1291
1292 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1293 16, /* rightshift */
1294 2, /* size (0 = byte, 1 = short, 2 = long) */
1295 12, /* bitsize */
1296 FALSE, /* pc_relative */
1297 0, /* bitpos */
1298 complain_overflow_dont, /* complain_on_overflow */
1299 bfd_elf_generic_reloc, /* special_function */
1300 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1301 FALSE, /* partial_inplace */
1302 0xffff, /* src_mask */
1303 0xffff, /* dst_mask */
1304 FALSE), /* pcrel_offset */
1305
1306 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1307 0, /* rightshift */
1308 2, /* size (0 = byte, 1 = short, 2 = long) */
1309 12, /* bitsize */
1310 FALSE, /* pc_relative */
1311 0, /* bitpos */
1312 complain_overflow_dont, /* complain_on_overflow */
1313 bfd_elf_generic_reloc, /* special_function */
1314 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1315 FALSE, /* partial_inplace */
1316 0xffff, /* src_mask */
1317 0xffff, /* dst_mask */
1318 FALSE), /* pcrel_offset */
1319
1320 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1321 0, /* rightshift */
1322 2, /* size (0 = byte, 1 = short, 2 = long) */
1323 12, /* bitsize */
1324 FALSE, /* pc_relative */
1325 0, /* bitpos */
1326 complain_overflow_dont, /* complain_on_overflow */
1327 bfd_elf_generic_reloc, /* special_function */
1328 AARCH64_R_STR (TLSDESC_LDR), /* name */
1329 FALSE, /* partial_inplace */
1330 0x0, /* src_mask */
1331 0x0, /* dst_mask */
1332 FALSE), /* pcrel_offset */
1333
1334 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1335 0, /* rightshift */
1336 2, /* size (0 = byte, 1 = short, 2 = long) */
1337 12, /* bitsize */
1338 FALSE, /* pc_relative */
1339 0, /* bitpos */
1340 complain_overflow_dont, /* complain_on_overflow */
1341 bfd_elf_generic_reloc, /* special_function */
1342 AARCH64_R_STR (TLSDESC_ADD), /* name */
1343 FALSE, /* partial_inplace */
1344 0x0, /* src_mask */
1345 0x0, /* dst_mask */
1346 FALSE), /* pcrel_offset */
1347
1348 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1349 0, /* rightshift */
1350 2, /* size (0 = byte, 1 = short, 2 = long) */
1351 0, /* bitsize */
1352 FALSE, /* pc_relative */
1353 0, /* bitpos */
1354 complain_overflow_dont, /* complain_on_overflow */
1355 bfd_elf_generic_reloc, /* special_function */
1356 AARCH64_R_STR (TLSDESC_CALL), /* name */
1357 FALSE, /* partial_inplace */
1358 0x0, /* src_mask */
1359 0x0, /* dst_mask */
1360 FALSE), /* pcrel_offset */
1361
1362 HOWTO (AARCH64_R (COPY), /* type */
1363 0, /* rightshift */
1364 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 64, /* bitsize */
1366 FALSE, /* pc_relative */
1367 0, /* bitpos */
1368 complain_overflow_bitfield, /* complain_on_overflow */
1369 bfd_elf_generic_reloc, /* special_function */
1370 AARCH64_R_STR (COPY), /* name */
1371 TRUE, /* partial_inplace */
1372 0xffffffff, /* src_mask */
1373 0xffffffff, /* dst_mask */
1374 FALSE), /* pcrel_offset */
1375
1376 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1377 0, /* rightshift */
1378 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 64, /* bitsize */
1380 FALSE, /* pc_relative */
1381 0, /* bitpos */
1382 complain_overflow_bitfield, /* complain_on_overflow */
1383 bfd_elf_generic_reloc, /* special_function */
1384 AARCH64_R_STR (GLOB_DAT), /* name */
1385 TRUE, /* partial_inplace */
1386 0xffffffff, /* src_mask */
1387 0xffffffff, /* dst_mask */
1388 FALSE), /* pcrel_offset */
1389
1390 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1391 0, /* rightshift */
1392 2, /* size (0 = byte, 1 = short, 2 = long) */
1393 64, /* bitsize */
1394 FALSE, /* pc_relative */
1395 0, /* bitpos */
1396 complain_overflow_bitfield, /* complain_on_overflow */
1397 bfd_elf_generic_reloc, /* special_function */
1398 AARCH64_R_STR (JUMP_SLOT), /* name */
1399 TRUE, /* partial_inplace */
1400 0xffffffff, /* src_mask */
1401 0xffffffff, /* dst_mask */
1402 FALSE), /* pcrel_offset */
1403
1404 HOWTO (AARCH64_R (RELATIVE), /* type */
1405 0, /* rightshift */
1406 2, /* size (0 = byte, 1 = short, 2 = long) */
1407 64, /* bitsize */
1408 FALSE, /* pc_relative */
1409 0, /* bitpos */
1410 complain_overflow_bitfield, /* complain_on_overflow */
1411 bfd_elf_generic_reloc, /* special_function */
1412 AARCH64_R_STR (RELATIVE), /* name */
1413 TRUE, /* partial_inplace */
1414 ALL_ONES, /* src_mask */
1415 ALL_ONES, /* dst_mask */
1416 FALSE), /* pcrel_offset */
1417
1418 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1419 0, /* rightshift */
1420 2, /* size (0 = byte, 1 = short, 2 = long) */
1421 64, /* bitsize */
1422 FALSE, /* pc_relative */
1423 0, /* bitpos */
1424 complain_overflow_dont, /* complain_on_overflow */
1425 bfd_elf_generic_reloc, /* special_function */
1426 #if ARCH_SIZE == 64
1427 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1428 #else
1429 AARCH64_R_STR (TLS_DTPMOD), /* name */
1430 #endif
1431 FALSE, /* partial_inplace */
1432 0, /* src_mask */
1433 ALL_ONES, /* dst_mask */
1434 FALSE), /* pc_reloffset */
1435
1436 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1437 0, /* rightshift */
1438 2, /* size (0 = byte, 1 = short, 2 = long) */
1439 64, /* bitsize */
1440 FALSE, /* pc_relative */
1441 0, /* bitpos */
1442 complain_overflow_dont, /* complain_on_overflow */
1443 bfd_elf_generic_reloc, /* special_function */
1444 #if ARCH_SIZE == 64
1445 AARCH64_R_STR (TLS_DTPREL64), /* name */
1446 #else
1447 AARCH64_R_STR (TLS_DTPREL), /* name */
1448 #endif
1449 FALSE, /* partial_inplace */
1450 0, /* src_mask */
1451 ALL_ONES, /* dst_mask */
1452 FALSE), /* pcrel_offset */
1453
1454 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1455 0, /* rightshift */
1456 2, /* size (0 = byte, 1 = short, 2 = long) */
1457 64, /* bitsize */
1458 FALSE, /* pc_relative */
1459 0, /* bitpos */
1460 complain_overflow_dont, /* complain_on_overflow */
1461 bfd_elf_generic_reloc, /* special_function */
1462 #if ARCH_SIZE == 64
1463 AARCH64_R_STR (TLS_TPREL64), /* name */
1464 #else
1465 AARCH64_R_STR (TLS_TPREL), /* name */
1466 #endif
1467 FALSE, /* partial_inplace */
1468 0, /* src_mask */
1469 ALL_ONES, /* dst_mask */
1470 FALSE), /* pcrel_offset */
1471
1472 HOWTO (AARCH64_R (TLSDESC), /* type */
1473 0, /* rightshift */
1474 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 64, /* bitsize */
1476 FALSE, /* pc_relative */
1477 0, /* bitpos */
1478 complain_overflow_dont, /* complain_on_overflow */
1479 bfd_elf_generic_reloc, /* special_function */
1480 AARCH64_R_STR (TLSDESC), /* name */
1481 FALSE, /* partial_inplace */
1482 0, /* src_mask */
1483 ALL_ONES, /* dst_mask */
1484 FALSE), /* pcrel_offset */
1485
1486 HOWTO (AARCH64_R (IRELATIVE), /* type */
1487 0, /* rightshift */
1488 2, /* size (0 = byte, 1 = short, 2 = long) */
1489 64, /* bitsize */
1490 FALSE, /* pc_relative */
1491 0, /* bitpos */
1492 complain_overflow_bitfield, /* complain_on_overflow */
1493 bfd_elf_generic_reloc, /* special_function */
1494 AARCH64_R_STR (IRELATIVE), /* name */
1495 FALSE, /* partial_inplace */
1496 0, /* src_mask */
1497 ALL_ONES, /* dst_mask */
1498 FALSE), /* pcrel_offset */
1499
1500 EMPTY_HOWTO (0),
1501 };
1502
1503 static reloc_howto_type elfNN_aarch64_howto_none =
1504 HOWTO (R_AARCH64_NONE, /* type */
1505 0, /* rightshift */
1506 3, /* size (0 = byte, 1 = short, 2 = long) */
1507 0, /* bitsize */
1508 FALSE, /* pc_relative */
1509 0, /* bitpos */
1510 complain_overflow_dont,/* complain_on_overflow */
1511 bfd_elf_generic_reloc, /* special_function */
1512 "R_AARCH64_NONE", /* name */
1513 FALSE, /* partial_inplace */
1514 0, /* src_mask */
1515 0, /* dst_mask */
1516 FALSE); /* pcrel_offset */
1517
1518 /* Given HOWTO, return the bfd internal relocation enumerator. */
1519
1520 static bfd_reloc_code_real_type
1521 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1522 {
1523 const int size
1524 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1525 const ptrdiff_t offset
1526 = howto - elfNN_aarch64_howto_table;
1527
1528 if (offset > 0 && offset < size - 1)
1529 return BFD_RELOC_AARCH64_RELOC_START + offset;
1530
1531 if (howto == &elfNN_aarch64_howto_none)
1532 return BFD_RELOC_AARCH64_NONE;
1533
1534 return BFD_RELOC_AARCH64_RELOC_START;
1535 }
1536
1537 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1538
1539 static bfd_reloc_code_real_type
1540 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1541 {
1542 static bfd_boolean initialized_p = FALSE;
1543 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1544 static unsigned int offsets[R_AARCH64_end];
1545
1546 if (initialized_p == FALSE)
1547 {
1548 unsigned int i;
1549
1550 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1551 if (elfNN_aarch64_howto_table[i].type != 0)
1552 offsets[elfNN_aarch64_howto_table[i].type] = i;
1553
1554 initialized_p = TRUE;
1555 }
1556
1557 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1558 return BFD_RELOC_AARCH64_NONE;
1559
1560 /* PR 17512: file: b371e70a. */
1561 if (r_type >= R_AARCH64_end)
1562 {
1563 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1564 bfd_set_error (bfd_error_bad_value);
1565 return BFD_RELOC_AARCH64_NONE;
1566 }
1567
1568 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1569 }
1570
1571 struct elf_aarch64_reloc_map
1572 {
1573 bfd_reloc_code_real_type from;
1574 bfd_reloc_code_real_type to;
1575 };
1576
1577 /* Map bfd generic reloc to AArch64-specific reloc. */
1578 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1579 {
1580 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1581
1582 /* Basic data relocations. */
1583 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1584 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1585 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1586 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1587 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1588 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1589 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1590 };
1591
1592 /* Given the bfd internal relocation enumerator in CODE, return the
1593 corresponding howto entry. */
1594
1595 static reloc_howto_type *
1596 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1597 {
1598 unsigned int i;
1599
1600 /* Convert bfd generic reloc to AArch64-specific reloc. */
1601 if (code < BFD_RELOC_AARCH64_RELOC_START
1602 || code > BFD_RELOC_AARCH64_RELOC_END)
1603 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1604 if (elf_aarch64_reloc_map[i].from == code)
1605 {
1606 code = elf_aarch64_reloc_map[i].to;
1607 break;
1608 }
1609
1610 if (code > BFD_RELOC_AARCH64_RELOC_START
1611 && code < BFD_RELOC_AARCH64_RELOC_END)
1612 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1613 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1614
1615 if (code == BFD_RELOC_AARCH64_NONE)
1616 return &elfNN_aarch64_howto_none;
1617
1618 return NULL;
1619 }
1620
1621 static reloc_howto_type *
1622 elfNN_aarch64_howto_from_type (unsigned int r_type)
1623 {
1624 bfd_reloc_code_real_type val;
1625 reloc_howto_type *howto;
1626
1627 #if ARCH_SIZE == 32
1628 if (r_type > 256)
1629 {
1630 bfd_set_error (bfd_error_bad_value);
1631 return NULL;
1632 }
1633 #endif
1634
1635 if (r_type == R_AARCH64_NONE)
1636 return &elfNN_aarch64_howto_none;
1637
1638 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1639 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1640
1641 if (howto != NULL)
1642 return howto;
1643
1644 bfd_set_error (bfd_error_bad_value);
1645 return NULL;
1646 }
1647
1648 static void
1649 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1650 Elf_Internal_Rela *elf_reloc)
1651 {
1652 unsigned int r_type;
1653
1654 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1655 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1656 }
1657
1658 static reloc_howto_type *
1659 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1660 bfd_reloc_code_real_type code)
1661 {
1662 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1663
1664 if (howto != NULL)
1665 return howto;
1666
1667 bfd_set_error (bfd_error_bad_value);
1668 return NULL;
1669 }
1670
1671 static reloc_howto_type *
1672 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1673 const char *r_name)
1674 {
1675 unsigned int i;
1676
1677 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1678 if (elfNN_aarch64_howto_table[i].name != NULL
1679 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1680 return &elfNN_aarch64_howto_table[i];
1681
1682 return NULL;
1683 }
1684
1685 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1686 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1687 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1688 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1689
1690 /* The linker script knows the section names for placement.
1691 The entry_names are used to do simple name mangling on the stubs.
1692 Given a function name, and its type, the stub can be found. The
1693 name can be changed. The only requirement is the %s be present. */
1694 #define STUB_ENTRY_NAME "__%s_veneer"
1695
1696 /* The name of the dynamic interpreter. This is put in the .interp
1697 section. */
1698 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1699
1700 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1701 (((1 << 25) - 1) << 2)
1702 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1703 (-((1 << 25) << 2))
1704
1705 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1706 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1707
1708 static int
1709 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1710 {
1711 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1712 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1713 }
1714
1715 static int
1716 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1717 {
1718 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1719 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1720 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1721 }
1722
1723 static const uint32_t aarch64_adrp_branch_stub [] =
1724 {
1725 0x90000010, /* adrp ip0, X */
1726 /* R_AARCH64_ADR_HI21_PCREL(X) */
1727 0x91000210, /* add ip0, ip0, :lo12:X */
1728 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1729 0xd61f0200, /* br ip0 */
1730 };
1731
1732 static const uint32_t aarch64_long_branch_stub[] =
1733 {
1734 #if ARCH_SIZE == 64
1735 0x58000090, /* ldr ip0, 1f */
1736 #else
1737 0x18000090, /* ldr wip0, 1f */
1738 #endif
1739 0x10000011, /* adr ip1, #0 */
1740 0x8b110210, /* add ip0, ip0, ip1 */
1741 0xd61f0200, /* br ip0 */
1742 0x00000000, /* 1: .xword or .word
1743 R_AARCH64_PRELNN(X) + 12
1744 */
1745 0x00000000,
1746 };
1747
1748 static const uint32_t aarch64_erratum_835769_stub[] =
1749 {
1750 0x00000000, /* Placeholder for multiply accumulate. */
1751 0x14000000, /* b <label> */
1752 };
1753
1754 static const uint32_t aarch64_erratum_843419_stub[] =
1755 {
1756 0x00000000, /* Placeholder for LDR instruction. */
1757 0x14000000, /* b <label> */
1758 };
1759
1760 /* Section name for stubs is the associated section name plus this
1761 string. */
1762 #define STUB_SUFFIX ".stub"
1763
1764 enum elf_aarch64_stub_type
1765 {
1766 aarch64_stub_none,
1767 aarch64_stub_adrp_branch,
1768 aarch64_stub_long_branch,
1769 aarch64_stub_erratum_835769_veneer,
1770 aarch64_stub_erratum_843419_veneer,
1771 };
1772
1773 struct elf_aarch64_stub_hash_entry
1774 {
1775 /* Base hash table entry structure. */
1776 struct bfd_hash_entry root;
1777
1778 /* The stub section. */
1779 asection *stub_sec;
1780
1781 /* Offset within stub_sec of the beginning of this stub. */
1782 bfd_vma stub_offset;
1783
1784 /* Given the symbol's value and its section we can determine its final
1785 value when building the stubs (so the stub knows where to jump). */
1786 bfd_vma target_value;
1787 asection *target_section;
1788
1789 enum elf_aarch64_stub_type stub_type;
1790
1791 /* The symbol table entry, if any, that this was derived from. */
1792 struct elf_aarch64_link_hash_entry *h;
1793
1794 /* Destination symbol type */
1795 unsigned char st_type;
1796
1797 /* Where this stub is being called from, or, in the case of combined
1798 stub sections, the first input section in the group. */
1799 asection *id_sec;
1800
1801 /* The name for the local symbol at the start of this stub. The
1802 stub name in the hash table has to be unique; this does not, so
1803 it can be friendlier. */
1804 char *output_name;
1805
1806 /* The instruction which caused this stub to be generated (only valid for
1807 erratum 835769 workaround stubs at present). */
1808 uint32_t veneered_insn;
1809
1810 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
1811 bfd_vma adrp_offset;
1812 };
1813
1814 /* Used to build a map of a section. This is required for mixed-endian
1815 code/data. */
1816
1817 typedef struct elf_elf_section_map
1818 {
1819 bfd_vma vma;
1820 char type;
1821 }
1822 elf_aarch64_section_map;
1823
1824
1825 typedef struct _aarch64_elf_section_data
1826 {
1827 struct bfd_elf_section_data elf;
1828 unsigned int mapcount;
1829 unsigned int mapsize;
1830 elf_aarch64_section_map *map;
1831 }
1832 _aarch64_elf_section_data;
1833
1834 #define elf_aarch64_section_data(sec) \
1835 ((_aarch64_elf_section_data *) elf_section_data (sec))
1836
1837 /* The size of the thread control block which is defined to be two pointers. */
1838 #define TCB_SIZE (ARCH_SIZE/8)*2
1839
1840 struct elf_aarch64_local_symbol
1841 {
1842 unsigned int got_type;
1843 bfd_signed_vma got_refcount;
1844 bfd_vma got_offset;
1845
1846 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1847 offset is from the end of the jump table and reserved entries
1848 within the PLTGOT.
1849
1850 The magic value (bfd_vma) -1 indicates that an offset has not be
1851 allocated. */
1852 bfd_vma tlsdesc_got_jump_table_offset;
1853 };
1854
1855 struct elf_aarch64_obj_tdata
1856 {
1857 struct elf_obj_tdata root;
1858
1859 /* local symbol descriptors */
1860 struct elf_aarch64_local_symbol *locals;
1861
1862 /* Zero to warn when linking objects with incompatible enum sizes. */
1863 int no_enum_size_warning;
1864
1865 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1866 int no_wchar_size_warning;
1867 };
1868
1869 #define elf_aarch64_tdata(bfd) \
1870 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1871
1872 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1873
1874 #define is_aarch64_elf(bfd) \
1875 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1876 && elf_tdata (bfd) != NULL \
1877 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1878
1879 static bfd_boolean
1880 elfNN_aarch64_mkobject (bfd *abfd)
1881 {
1882 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1883 AARCH64_ELF_DATA);
1884 }
1885
1886 #define elf_aarch64_hash_entry(ent) \
1887 ((struct elf_aarch64_link_hash_entry *)(ent))
1888
1889 #define GOT_UNKNOWN 0
1890 #define GOT_NORMAL 1
1891 #define GOT_TLS_GD 2
1892 #define GOT_TLS_IE 4
1893 #define GOT_TLSDESC_GD 8
1894
1895 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1896
1897 /* AArch64 ELF linker hash entry. */
1898 struct elf_aarch64_link_hash_entry
1899 {
1900 struct elf_link_hash_entry root;
1901
1902 /* Track dynamic relocs copied for this symbol. */
1903 struct elf_dyn_relocs *dyn_relocs;
1904
1905 /* Since PLT entries have variable size, we need to record the
1906 index into .got.plt instead of recomputing it from the PLT
1907 offset. */
1908 bfd_signed_vma plt_got_offset;
1909
1910 /* Bit mask representing the type of GOT entry(s) if any required by
1911 this symbol. */
1912 unsigned int got_type;
1913
1914 /* A pointer to the most recently used stub hash entry against this
1915 symbol. */
1916 struct elf_aarch64_stub_hash_entry *stub_cache;
1917
1918 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1919 is from the end of the jump table and reserved entries within the PLTGOT.
1920
1921 The magic value (bfd_vma) -1 indicates that an offset has not
1922 be allocated. */
1923 bfd_vma tlsdesc_got_jump_table_offset;
1924 };
1925
1926 static unsigned int
1927 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1928 bfd *abfd,
1929 unsigned long r_symndx)
1930 {
1931 if (h)
1932 return elf_aarch64_hash_entry (h)->got_type;
1933
1934 if (! elf_aarch64_locals (abfd))
1935 return GOT_UNKNOWN;
1936
1937 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1938 }
1939
1940 /* Get the AArch64 elf linker hash table from a link_info structure. */
1941 #define elf_aarch64_hash_table(info) \
1942 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1943
1944 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1945 ((struct elf_aarch64_stub_hash_entry *) \
1946 bfd_hash_lookup ((table), (string), (create), (copy)))
1947
1948 /* AArch64 ELF linker hash table. */
1949 struct elf_aarch64_link_hash_table
1950 {
1951 /* The main hash table. */
1952 struct elf_link_hash_table root;
1953
1954 /* Nonzero to force PIC branch veneers. */
1955 int pic_veneer;
1956
1957 /* Fix erratum 835769. */
1958 int fix_erratum_835769;
1959
1960 /* Fix erratum 843419. */
1961 int fix_erratum_843419;
1962
1963 /* Enable ADRP->ADR rewrite for erratum 843419 workaround. */
1964 int fix_erratum_843419_adr;
1965
1966 /* The number of bytes in the initial entry in the PLT. */
1967 bfd_size_type plt_header_size;
1968
1969 /* The number of bytes in the subsequent PLT etries. */
1970 bfd_size_type plt_entry_size;
1971
1972 /* Short-cuts to get to dynamic linker sections. */
1973 asection *sdynbss;
1974 asection *srelbss;
1975
1976 /* Small local sym cache. */
1977 struct sym_cache sym_cache;
1978
1979 /* For convenience in allocate_dynrelocs. */
1980 bfd *obfd;
1981
1982 /* The amount of space used by the reserved portion of the sgotplt
1983 section, plus whatever space is used by the jump slots. */
1984 bfd_vma sgotplt_jump_table_size;
1985
1986 /* The stub hash table. */
1987 struct bfd_hash_table stub_hash_table;
1988
1989 /* Linker stub bfd. */
1990 bfd *stub_bfd;
1991
1992 /* Linker call-backs. */
1993 asection *(*add_stub_section) (const char *, asection *);
1994 void (*layout_sections_again) (void);
1995
1996 /* Array to keep track of which stub sections have been created, and
1997 information on stub grouping. */
1998 struct map_stub
1999 {
2000 /* This is the section to which stubs in the group will be
2001 attached. */
2002 asection *link_sec;
2003 /* The stub section. */
2004 asection *stub_sec;
2005 } *stub_group;
2006
2007 /* Assorted information used by elfNN_aarch64_size_stubs. */
2008 unsigned int bfd_count;
2009 int top_index;
2010 asection **input_list;
2011
2012 /* The offset into splt of the PLT entry for the TLS descriptor
2013 resolver. Special values are 0, if not necessary (or not found
2014 to be necessary yet), and -1 if needed but not determined
2015 yet. */
2016 bfd_vma tlsdesc_plt;
2017
2018 /* The GOT offset for the lazy trampoline. Communicated to the
2019 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
2020 indicates an offset is not allocated. */
2021 bfd_vma dt_tlsdesc_got;
2022
2023 /* Used by local STT_GNU_IFUNC symbols. */
2024 htab_t loc_hash_table;
2025 void * loc_hash_memory;
2026 };
2027
2028 /* Create an entry in an AArch64 ELF linker hash table. */
2029
2030 static struct bfd_hash_entry *
2031 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
2032 struct bfd_hash_table *table,
2033 const char *string)
2034 {
2035 struct elf_aarch64_link_hash_entry *ret =
2036 (struct elf_aarch64_link_hash_entry *) entry;
2037
2038 /* Allocate the structure if it has not already been allocated by a
2039 subclass. */
2040 if (ret == NULL)
2041 ret = bfd_hash_allocate (table,
2042 sizeof (struct elf_aarch64_link_hash_entry));
2043 if (ret == NULL)
2044 return (struct bfd_hash_entry *) ret;
2045
2046 /* Call the allocation method of the superclass. */
2047 ret = ((struct elf_aarch64_link_hash_entry *)
2048 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2049 table, string));
2050 if (ret != NULL)
2051 {
2052 ret->dyn_relocs = NULL;
2053 ret->got_type = GOT_UNKNOWN;
2054 ret->plt_got_offset = (bfd_vma) - 1;
2055 ret->stub_cache = NULL;
2056 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2057 }
2058
2059 return (struct bfd_hash_entry *) ret;
2060 }
2061
2062 /* Initialize an entry in the stub hash table. */
2063
2064 static struct bfd_hash_entry *
2065 stub_hash_newfunc (struct bfd_hash_entry *entry,
2066 struct bfd_hash_table *table, const char *string)
2067 {
2068 /* Allocate the structure if it has not already been allocated by a
2069 subclass. */
2070 if (entry == NULL)
2071 {
2072 entry = bfd_hash_allocate (table,
2073 sizeof (struct
2074 elf_aarch64_stub_hash_entry));
2075 if (entry == NULL)
2076 return entry;
2077 }
2078
2079 /* Call the allocation method of the superclass. */
2080 entry = bfd_hash_newfunc (entry, table, string);
2081 if (entry != NULL)
2082 {
2083 struct elf_aarch64_stub_hash_entry *eh;
2084
2085 /* Initialize the local fields. */
2086 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2087 eh->adrp_offset = 0;
2088 eh->stub_sec = NULL;
2089 eh->stub_offset = 0;
2090 eh->target_value = 0;
2091 eh->target_section = NULL;
2092 eh->stub_type = aarch64_stub_none;
2093 eh->h = NULL;
2094 eh->id_sec = NULL;
2095 }
2096
2097 return entry;
2098 }
2099
2100 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2101 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2102 as global symbol. We reuse indx and dynstr_index for local symbol
2103 hash since they aren't used by global symbols in this backend. */
2104
2105 static hashval_t
2106 elfNN_aarch64_local_htab_hash (const void *ptr)
2107 {
2108 struct elf_link_hash_entry *h
2109 = (struct elf_link_hash_entry *) ptr;
2110 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2111 }
2112
2113 /* Compare local hash entries. */
2114
2115 static int
2116 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2117 {
2118 struct elf_link_hash_entry *h1
2119 = (struct elf_link_hash_entry *) ptr1;
2120 struct elf_link_hash_entry *h2
2121 = (struct elf_link_hash_entry *) ptr2;
2122
2123 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2124 }
2125
2126 /* Find and/or create a hash entry for local symbol. */
2127
2128 static struct elf_link_hash_entry *
2129 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2130 bfd *abfd, const Elf_Internal_Rela *rel,
2131 bfd_boolean create)
2132 {
2133 struct elf_aarch64_link_hash_entry e, *ret;
2134 asection *sec = abfd->sections;
2135 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2136 ELFNN_R_SYM (rel->r_info));
2137 void **slot;
2138
2139 e.root.indx = sec->id;
2140 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2141 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2142 create ? INSERT : NO_INSERT);
2143
2144 if (!slot)
2145 return NULL;
2146
2147 if (*slot)
2148 {
2149 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2150 return &ret->root;
2151 }
2152
2153 ret = (struct elf_aarch64_link_hash_entry *)
2154 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2155 sizeof (struct elf_aarch64_link_hash_entry));
2156 if (ret)
2157 {
2158 memset (ret, 0, sizeof (*ret));
2159 ret->root.indx = sec->id;
2160 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2161 ret->root.dynindx = -1;
2162 *slot = ret;
2163 }
2164 return &ret->root;
2165 }
2166
2167 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2168
2169 static void
2170 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2171 struct elf_link_hash_entry *dir,
2172 struct elf_link_hash_entry *ind)
2173 {
2174 struct elf_aarch64_link_hash_entry *edir, *eind;
2175
2176 edir = (struct elf_aarch64_link_hash_entry *) dir;
2177 eind = (struct elf_aarch64_link_hash_entry *) ind;
2178
2179 if (eind->dyn_relocs != NULL)
2180 {
2181 if (edir->dyn_relocs != NULL)
2182 {
2183 struct elf_dyn_relocs **pp;
2184 struct elf_dyn_relocs *p;
2185
2186 /* Add reloc counts against the indirect sym to the direct sym
2187 list. Merge any entries against the same section. */
2188 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2189 {
2190 struct elf_dyn_relocs *q;
2191
2192 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2193 if (q->sec == p->sec)
2194 {
2195 q->pc_count += p->pc_count;
2196 q->count += p->count;
2197 *pp = p->next;
2198 break;
2199 }
2200 if (q == NULL)
2201 pp = &p->next;
2202 }
2203 *pp = edir->dyn_relocs;
2204 }
2205
2206 edir->dyn_relocs = eind->dyn_relocs;
2207 eind->dyn_relocs = NULL;
2208 }
2209
2210 if (ind->root.type == bfd_link_hash_indirect)
2211 {
2212 /* Copy over PLT info. */
2213 if (dir->got.refcount <= 0)
2214 {
2215 edir->got_type = eind->got_type;
2216 eind->got_type = GOT_UNKNOWN;
2217 }
2218 }
2219
2220 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2221 }
2222
2223 /* Destroy an AArch64 elf linker hash table. */
2224
2225 static void
2226 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2227 {
2228 struct elf_aarch64_link_hash_table *ret
2229 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2230
2231 if (ret->loc_hash_table)
2232 htab_delete (ret->loc_hash_table);
2233 if (ret->loc_hash_memory)
2234 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2235
2236 bfd_hash_table_free (&ret->stub_hash_table);
2237 _bfd_elf_link_hash_table_free (obfd);
2238 }
2239
2240 /* Create an AArch64 elf linker hash table. */
2241
2242 static struct bfd_link_hash_table *
2243 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2244 {
2245 struct elf_aarch64_link_hash_table *ret;
2246 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2247
2248 ret = bfd_zmalloc (amt);
2249 if (ret == NULL)
2250 return NULL;
2251
2252 if (!_bfd_elf_link_hash_table_init
2253 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2254 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2255 {
2256 free (ret);
2257 return NULL;
2258 }
2259
2260 ret->plt_header_size = PLT_ENTRY_SIZE;
2261 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2262 ret->obfd = abfd;
2263 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2264
2265 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2266 sizeof (struct elf_aarch64_stub_hash_entry)))
2267 {
2268 _bfd_elf_link_hash_table_free (abfd);
2269 return NULL;
2270 }
2271
2272 ret->loc_hash_table = htab_try_create (1024,
2273 elfNN_aarch64_local_htab_hash,
2274 elfNN_aarch64_local_htab_eq,
2275 NULL);
2276 ret->loc_hash_memory = objalloc_create ();
2277 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2278 {
2279 elfNN_aarch64_link_hash_table_free (abfd);
2280 return NULL;
2281 }
2282 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2283
2284 return &ret->root.root;
2285 }
2286
2287 static bfd_boolean
2288 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2289 bfd_vma offset, bfd_vma value)
2290 {
2291 reloc_howto_type *howto;
2292 bfd_vma place;
2293
2294 howto = elfNN_aarch64_howto_from_type (r_type);
2295 place = (input_section->output_section->vma + input_section->output_offset
2296 + offset);
2297
2298 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2299 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2300 return _bfd_aarch64_elf_put_addend (input_bfd,
2301 input_section->contents + offset, r_type,
2302 howto, value);
2303 }
2304
2305 static enum elf_aarch64_stub_type
2306 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2307 {
2308 if (aarch64_valid_for_adrp_p (value, place))
2309 return aarch64_stub_adrp_branch;
2310 return aarch64_stub_long_branch;
2311 }
2312
2313 /* Determine the type of stub needed, if any, for a call. */
2314
2315 static enum elf_aarch64_stub_type
2316 aarch64_type_of_stub (struct bfd_link_info *info,
2317 asection *input_sec,
2318 const Elf_Internal_Rela *rel,
2319 unsigned char st_type,
2320 struct elf_aarch64_link_hash_entry *hash,
2321 bfd_vma destination)
2322 {
2323 bfd_vma location;
2324 bfd_signed_vma branch_offset;
2325 unsigned int r_type;
2326 struct elf_aarch64_link_hash_table *globals;
2327 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2328 bfd_boolean via_plt_p;
2329
2330 if (st_type != STT_FUNC)
2331 return stub_type;
2332
2333 globals = elf_aarch64_hash_table (info);
2334 via_plt_p = (globals->root.splt != NULL && hash != NULL
2335 && hash->root.plt.offset != (bfd_vma) - 1);
2336 /* Make sure call to plt stub can fit into the branch range. */
2337 if (via_plt_p)
2338 destination = (globals->root.splt->output_section->vma
2339 + globals->root.splt->output_offset
2340 + hash->root.plt.offset);
2341
2342 /* Determine where the call point is. */
2343 location = (input_sec->output_offset
2344 + input_sec->output_section->vma + rel->r_offset);
2345
2346 branch_offset = (bfd_signed_vma) (destination - location);
2347
2348 r_type = ELFNN_R_TYPE (rel->r_info);
2349
2350 /* We don't want to redirect any old unconditional jump in this way,
2351 only one which is being used for a sibcall, where it is
2352 acceptable for the IP0 and IP1 registers to be clobbered. */
2353 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2354 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2355 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2356 {
2357 stub_type = aarch64_stub_long_branch;
2358 }
2359
2360 return stub_type;
2361 }
2362
2363 /* Build a name for an entry in the stub hash table. */
2364
2365 static char *
2366 elfNN_aarch64_stub_name (const asection *input_section,
2367 const asection *sym_sec,
2368 const struct elf_aarch64_link_hash_entry *hash,
2369 const Elf_Internal_Rela *rel)
2370 {
2371 char *stub_name;
2372 bfd_size_type len;
2373
2374 if (hash)
2375 {
2376 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2377 stub_name = bfd_malloc (len);
2378 if (stub_name != NULL)
2379 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2380 (unsigned int) input_section->id,
2381 hash->root.root.root.string,
2382 rel->r_addend);
2383 }
2384 else
2385 {
2386 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2387 stub_name = bfd_malloc (len);
2388 if (stub_name != NULL)
2389 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2390 (unsigned int) input_section->id,
2391 (unsigned int) sym_sec->id,
2392 (unsigned int) ELFNN_R_SYM (rel->r_info),
2393 rel->r_addend);
2394 }
2395
2396 return stub_name;
2397 }
2398
2399 /* Look up an entry in the stub hash. Stub entries are cached because
2400 creating the stub name takes a bit of time. */
2401
2402 static struct elf_aarch64_stub_hash_entry *
2403 elfNN_aarch64_get_stub_entry (const asection *input_section,
2404 const asection *sym_sec,
2405 struct elf_link_hash_entry *hash,
2406 const Elf_Internal_Rela *rel,
2407 struct elf_aarch64_link_hash_table *htab)
2408 {
2409 struct elf_aarch64_stub_hash_entry *stub_entry;
2410 struct elf_aarch64_link_hash_entry *h =
2411 (struct elf_aarch64_link_hash_entry *) hash;
2412 const asection *id_sec;
2413
2414 if ((input_section->flags & SEC_CODE) == 0)
2415 return NULL;
2416
2417 /* If this input section is part of a group of sections sharing one
2418 stub section, then use the id of the first section in the group.
2419 Stub names need to include a section id, as there may well be
2420 more than one stub used to reach say, printf, and we need to
2421 distinguish between them. */
2422 id_sec = htab->stub_group[input_section->id].link_sec;
2423
2424 if (h != NULL && h->stub_cache != NULL
2425 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2426 {
2427 stub_entry = h->stub_cache;
2428 }
2429 else
2430 {
2431 char *stub_name;
2432
2433 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2434 if (stub_name == NULL)
2435 return NULL;
2436
2437 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2438 stub_name, FALSE, FALSE);
2439 if (h != NULL)
2440 h->stub_cache = stub_entry;
2441
2442 free (stub_name);
2443 }
2444
2445 return stub_entry;
2446 }
2447
2448
2449 /* Create a stub section. */
2450
2451 static asection *
2452 _bfd_aarch64_create_stub_section (asection *section,
2453 struct elf_aarch64_link_hash_table *htab)
2454 {
2455 size_t namelen;
2456 bfd_size_type len;
2457 char *s_name;
2458
2459 namelen = strlen (section->name);
2460 len = namelen + sizeof (STUB_SUFFIX);
2461 s_name = bfd_alloc (htab->stub_bfd, len);
2462 if (s_name == NULL)
2463 return NULL;
2464
2465 memcpy (s_name, section->name, namelen);
2466 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2467 return (*htab->add_stub_section) (s_name, section);
2468 }
2469
2470
2471 /* Find or create a stub section for a link section.
2472
2473 Fix or create the stub section used to collect stubs attached to
2474 the specified link section. */
2475
2476 static asection *
2477 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2478 struct elf_aarch64_link_hash_table *htab)
2479 {
2480 if (htab->stub_group[link_section->id].stub_sec == NULL)
2481 htab->stub_group[link_section->id].stub_sec
2482 = _bfd_aarch64_create_stub_section (link_section, htab);
2483 return htab->stub_group[link_section->id].stub_sec;
2484 }
2485
2486
2487 /* Find or create a stub section in the stub group for an input
2488 section. */
2489
2490 static asection *
2491 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2492 struct elf_aarch64_link_hash_table *htab)
2493 {
2494 asection *link_sec = htab->stub_group[section->id].link_sec;
2495 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2496 }
2497
2498
2499 /* Add a new stub entry in the stub group associated with an input
2500 section to the stub hash. Not all fields of the new stub entry are
2501 initialised. */
2502
2503 static struct elf_aarch64_stub_hash_entry *
2504 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2505 asection *section,
2506 struct elf_aarch64_link_hash_table *htab)
2507 {
2508 asection *link_sec;
2509 asection *stub_sec;
2510 struct elf_aarch64_stub_hash_entry *stub_entry;
2511
2512 link_sec = htab->stub_group[section->id].link_sec;
2513 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2514
2515 /* Enter this entry into the linker stub hash table. */
2516 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2517 TRUE, FALSE);
2518 if (stub_entry == NULL)
2519 {
2520 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2521 section->owner, stub_name);
2522 return NULL;
2523 }
2524
2525 stub_entry->stub_sec = stub_sec;
2526 stub_entry->stub_offset = 0;
2527 stub_entry->id_sec = link_sec;
2528
2529 return stub_entry;
2530 }
2531
2532 /* Add a new stub entry in the final stub section to the stub hash.
2533 Not all fields of the new stub entry are initialised. */
2534
2535 static struct elf_aarch64_stub_hash_entry *
2536 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
2537 asection *link_section,
2538 struct elf_aarch64_link_hash_table *htab)
2539 {
2540 asection *stub_sec;
2541 struct elf_aarch64_stub_hash_entry *stub_entry;
2542
2543 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
2544 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2545 TRUE, FALSE);
2546 if (stub_entry == NULL)
2547 {
2548 (*_bfd_error_handler) (_("cannot create stub entry %s"), stub_name);
2549 return NULL;
2550 }
2551
2552 stub_entry->stub_sec = stub_sec;
2553 stub_entry->stub_offset = 0;
2554 stub_entry->id_sec = link_section;
2555
2556 return stub_entry;
2557 }
2558
2559
2560 static bfd_boolean
2561 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2562 void *in_arg ATTRIBUTE_UNUSED)
2563 {
2564 struct elf_aarch64_stub_hash_entry *stub_entry;
2565 asection *stub_sec;
2566 bfd *stub_bfd;
2567 bfd_byte *loc;
2568 bfd_vma sym_value;
2569 bfd_vma veneered_insn_loc;
2570 bfd_vma veneer_entry_loc;
2571 bfd_signed_vma branch_offset = 0;
2572 unsigned int template_size;
2573 const uint32_t *template;
2574 unsigned int i;
2575
2576 /* Massage our args to the form they really have. */
2577 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2578
2579 stub_sec = stub_entry->stub_sec;
2580
2581 /* Make a note of the offset within the stubs for this entry. */
2582 stub_entry->stub_offset = stub_sec->size;
2583 loc = stub_sec->contents + stub_entry->stub_offset;
2584
2585 stub_bfd = stub_sec->owner;
2586
2587 /* This is the address of the stub destination. */
2588 sym_value = (stub_entry->target_value
2589 + stub_entry->target_section->output_offset
2590 + stub_entry->target_section->output_section->vma);
2591
2592 if (stub_entry->stub_type == aarch64_stub_long_branch)
2593 {
2594 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2595 + stub_sec->output_offset);
2596
2597 /* See if we can relax the stub. */
2598 if (aarch64_valid_for_adrp_p (sym_value, place))
2599 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2600 }
2601
2602 switch (stub_entry->stub_type)
2603 {
2604 case aarch64_stub_adrp_branch:
2605 template = aarch64_adrp_branch_stub;
2606 template_size = sizeof (aarch64_adrp_branch_stub);
2607 break;
2608 case aarch64_stub_long_branch:
2609 template = aarch64_long_branch_stub;
2610 template_size = sizeof (aarch64_long_branch_stub);
2611 break;
2612 case aarch64_stub_erratum_835769_veneer:
2613 template = aarch64_erratum_835769_stub;
2614 template_size = sizeof (aarch64_erratum_835769_stub);
2615 break;
2616 case aarch64_stub_erratum_843419_veneer:
2617 template = aarch64_erratum_843419_stub;
2618 template_size = sizeof (aarch64_erratum_843419_stub);
2619 break;
2620 default:
2621 abort ();
2622 }
2623
2624 for (i = 0; i < (template_size / sizeof template[0]); i++)
2625 {
2626 bfd_putl32 (template[i], loc);
2627 loc += 4;
2628 }
2629
2630 template_size = (template_size + 7) & ~7;
2631 stub_sec->size += template_size;
2632
2633 switch (stub_entry->stub_type)
2634 {
2635 case aarch64_stub_adrp_branch:
2636 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2637 stub_entry->stub_offset, sym_value))
2638 /* The stub would not have been relaxed if the offset was out
2639 of range. */
2640 BFD_FAIL ();
2641
2642 if (aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
2643 stub_entry->stub_offset + 4, sym_value))
2644 BFD_FAIL ();
2645 break;
2646
2647 case aarch64_stub_long_branch:
2648 /* We want the value relative to the address 12 bytes back from the
2649 value itself. */
2650 if (aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
2651 stub_entry->stub_offset + 16, sym_value + 12))
2652 BFD_FAIL ();
2653 break;
2654
2655 case aarch64_stub_erratum_835769_veneer:
2656 veneered_insn_loc = stub_entry->target_section->output_section->vma
2657 + stub_entry->target_section->output_offset
2658 + stub_entry->target_value;
2659 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2660 + stub_entry->stub_sec->output_offset
2661 + stub_entry->stub_offset;
2662 branch_offset = veneered_insn_loc - veneer_entry_loc;
2663 branch_offset >>= 2;
2664 branch_offset &= 0x3ffffff;
2665 bfd_putl32 (stub_entry->veneered_insn,
2666 stub_sec->contents + stub_entry->stub_offset);
2667 bfd_putl32 (template[1] | branch_offset,
2668 stub_sec->contents + stub_entry->stub_offset + 4);
2669 break;
2670
2671 case aarch64_stub_erratum_843419_veneer:
2672 if (aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
2673 stub_entry->stub_offset + 4, sym_value + 4))
2674 BFD_FAIL ();
2675 break;
2676
2677 default:
2678 abort ();
2679 }
2680
2681 return TRUE;
2682 }
2683
2684 /* As above, but don't actually build the stub. Just bump offset so
2685 we know stub section sizes. */
2686
2687 static bfd_boolean
2688 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2689 void *in_arg ATTRIBUTE_UNUSED)
2690 {
2691 struct elf_aarch64_stub_hash_entry *stub_entry;
2692 int size;
2693
2694 /* Massage our args to the form they really have. */
2695 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2696
2697 switch (stub_entry->stub_type)
2698 {
2699 case aarch64_stub_adrp_branch:
2700 size = sizeof (aarch64_adrp_branch_stub);
2701 break;
2702 case aarch64_stub_long_branch:
2703 size = sizeof (aarch64_long_branch_stub);
2704 break;
2705 case aarch64_stub_erratum_835769_veneer:
2706 size = sizeof (aarch64_erratum_835769_stub);
2707 break;
2708 case aarch64_stub_erratum_843419_veneer:
2709 size = sizeof (aarch64_erratum_843419_stub);
2710 break;
2711 default:
2712 abort ();
2713 }
2714
2715 size = (size + 7) & ~7;
2716 stub_entry->stub_sec->size += size;
2717 return TRUE;
2718 }
2719
2720 /* External entry points for sizing and building linker stubs. */
2721
2722 /* Set up various things so that we can make a list of input sections
2723 for each output section included in the link. Returns -1 on error,
2724 0 when no stubs will be needed, and 1 on success. */
2725
2726 int
2727 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2728 struct bfd_link_info *info)
2729 {
2730 bfd *input_bfd;
2731 unsigned int bfd_count;
2732 int top_id, top_index;
2733 asection *section;
2734 asection **input_list, **list;
2735 bfd_size_type amt;
2736 struct elf_aarch64_link_hash_table *htab =
2737 elf_aarch64_hash_table (info);
2738
2739 if (!is_elf_hash_table (htab))
2740 return 0;
2741
2742 /* Count the number of input BFDs and find the top input section id. */
2743 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2744 input_bfd != NULL; input_bfd = input_bfd->link.next)
2745 {
2746 bfd_count += 1;
2747 for (section = input_bfd->sections;
2748 section != NULL; section = section->next)
2749 {
2750 if (top_id < section->id)
2751 top_id = section->id;
2752 }
2753 }
2754 htab->bfd_count = bfd_count;
2755
2756 amt = sizeof (struct map_stub) * (top_id + 1);
2757 htab->stub_group = bfd_zmalloc (amt);
2758 if (htab->stub_group == NULL)
2759 return -1;
2760
2761 /* We can't use output_bfd->section_count here to find the top output
2762 section index as some sections may have been removed, and
2763 _bfd_strip_section_from_output doesn't renumber the indices. */
2764 for (section = output_bfd->sections, top_index = 0;
2765 section != NULL; section = section->next)
2766 {
2767 if (top_index < section->index)
2768 top_index = section->index;
2769 }
2770
2771 htab->top_index = top_index;
2772 amt = sizeof (asection *) * (top_index + 1);
2773 input_list = bfd_malloc (amt);
2774 htab->input_list = input_list;
2775 if (input_list == NULL)
2776 return -1;
2777
2778 /* For sections we aren't interested in, mark their entries with a
2779 value we can check later. */
2780 list = input_list + top_index;
2781 do
2782 *list = bfd_abs_section_ptr;
2783 while (list-- != input_list);
2784
2785 for (section = output_bfd->sections;
2786 section != NULL; section = section->next)
2787 {
2788 if ((section->flags & SEC_CODE) != 0)
2789 input_list[section->index] = NULL;
2790 }
2791
2792 return 1;
2793 }
2794
2795 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2796 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2797
2798 /* The linker repeatedly calls this function for each input section,
2799 in the order that input sections are linked into output sections.
2800 Build lists of input sections to determine groupings between which
2801 we may insert linker stubs. */
2802
2803 void
2804 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2805 {
2806 struct elf_aarch64_link_hash_table *htab =
2807 elf_aarch64_hash_table (info);
2808
2809 if (isec->output_section->index <= htab->top_index)
2810 {
2811 asection **list = htab->input_list + isec->output_section->index;
2812
2813 if (*list != bfd_abs_section_ptr)
2814 {
2815 /* Steal the link_sec pointer for our list. */
2816 /* This happens to make the list in reverse order,
2817 which is what we want. */
2818 PREV_SEC (isec) = *list;
2819 *list = isec;
2820 }
2821 }
2822 }
2823
2824 /* See whether we can group stub sections together. Grouping stub
2825 sections may result in fewer stubs. More importantly, we need to
2826 put all .init* and .fini* stubs at the beginning of the .init or
2827 .fini output sections respectively, because glibc splits the
2828 _init and _fini functions into multiple parts. Putting a stub in
2829 the middle of a function is not a good idea. */
2830
2831 static void
2832 group_sections (struct elf_aarch64_link_hash_table *htab,
2833 bfd_size_type stub_group_size,
2834 bfd_boolean stubs_always_before_branch)
2835 {
2836 asection **list = htab->input_list + htab->top_index;
2837
2838 do
2839 {
2840 asection *tail = *list;
2841
2842 if (tail == bfd_abs_section_ptr)
2843 continue;
2844
2845 while (tail != NULL)
2846 {
2847 asection *curr;
2848 asection *prev;
2849 bfd_size_type total;
2850
2851 curr = tail;
2852 total = tail->size;
2853 while ((prev = PREV_SEC (curr)) != NULL
2854 && ((total += curr->output_offset - prev->output_offset)
2855 < stub_group_size))
2856 curr = prev;
2857
2858 /* OK, the size from the start of CURR to the end is less
2859 than stub_group_size and thus can be handled by one stub
2860 section. (Or the tail section is itself larger than
2861 stub_group_size, in which case we may be toast.)
2862 We should really be keeping track of the total size of
2863 stubs added here, as stubs contribute to the final output
2864 section size. */
2865 do
2866 {
2867 prev = PREV_SEC (tail);
2868 /* Set up this stub group. */
2869 htab->stub_group[tail->id].link_sec = curr;
2870 }
2871 while (tail != curr && (tail = prev) != NULL);
2872
2873 /* But wait, there's more! Input sections up to stub_group_size
2874 bytes before the stub section can be handled by it too. */
2875 if (!stubs_always_before_branch)
2876 {
2877 total = 0;
2878 while (prev != NULL
2879 && ((total += tail->output_offset - prev->output_offset)
2880 < stub_group_size))
2881 {
2882 tail = prev;
2883 prev = PREV_SEC (tail);
2884 htab->stub_group[tail->id].link_sec = curr;
2885 }
2886 }
2887 tail = prev;
2888 }
2889 }
2890 while (list-- != htab->input_list);
2891
2892 free (htab->input_list);
2893 }
2894
2895 #undef PREV_SEC
2896
2897 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2898
2899 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2900 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2901 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2902 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2903 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2904 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2905
2906 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2907 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2908 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2909 #define AARCH64_ZR 0x1f
2910
2911 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2912 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2913
2914 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2915 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2916 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2917 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2918 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2919 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2920 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2921 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2922 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2923 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2924 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2925 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2926 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2927 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2928 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2929 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2930 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2931 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2932
2933 /* Classify an INSN if it is indeed a load/store.
2934
2935 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2936
2937 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2938 is set equal to RT.
2939
2940 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2941
2942 */
2943
2944 static bfd_boolean
2945 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2946 bfd_boolean *pair, bfd_boolean *load)
2947 {
2948 uint32_t opcode;
2949 unsigned int r;
2950 uint32_t opc = 0;
2951 uint32_t v = 0;
2952 uint32_t opc_v = 0;
2953
2954 /* Bail out quickly if INSN doesn't fall into the the load-store
2955 encoding space. */
2956 if (!AARCH64_LDST (insn))
2957 return FALSE;
2958
2959 *pair = FALSE;
2960 *load = FALSE;
2961 if (AARCH64_LDST_EX (insn))
2962 {
2963 *rt = AARCH64_RT (insn);
2964 *rt2 = *rt;
2965 if (AARCH64_BIT (insn, 21) == 1)
2966 {
2967 *pair = TRUE;
2968 *rt2 = AARCH64_RT2 (insn);
2969 }
2970 *load = AARCH64_LD (insn);
2971 return TRUE;
2972 }
2973 else if (AARCH64_LDST_NAP (insn)
2974 || AARCH64_LDSTP_PI (insn)
2975 || AARCH64_LDSTP_O (insn)
2976 || AARCH64_LDSTP_PRE (insn))
2977 {
2978 *pair = TRUE;
2979 *rt = AARCH64_RT (insn);
2980 *rt2 = AARCH64_RT2 (insn);
2981 *load = AARCH64_LD (insn);
2982 return TRUE;
2983 }
2984 else if (AARCH64_LDST_PCREL (insn)
2985 || AARCH64_LDST_UI (insn)
2986 || AARCH64_LDST_PIIMM (insn)
2987 || AARCH64_LDST_U (insn)
2988 || AARCH64_LDST_PREIMM (insn)
2989 || AARCH64_LDST_RO (insn)
2990 || AARCH64_LDST_UIMM (insn))
2991 {
2992 *rt = AARCH64_RT (insn);
2993 *rt2 = *rt;
2994 if (AARCH64_LDST_PCREL (insn))
2995 *load = TRUE;
2996 opc = AARCH64_BITS (insn, 22, 2);
2997 v = AARCH64_BIT (insn, 26);
2998 opc_v = opc | (v << 2);
2999 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
3000 || opc_v == 5 || opc_v == 7);
3001 return TRUE;
3002 }
3003 else if (AARCH64_LDST_SIMD_M (insn)
3004 || AARCH64_LDST_SIMD_M_PI (insn))
3005 {
3006 *rt = AARCH64_RT (insn);
3007 *load = AARCH64_BIT (insn, 22);
3008 opcode = (insn >> 12) & 0xf;
3009 switch (opcode)
3010 {
3011 case 0:
3012 case 2:
3013 *rt2 = *rt + 3;
3014 break;
3015
3016 case 4:
3017 case 6:
3018 *rt2 = *rt + 2;
3019 break;
3020
3021 case 7:
3022 *rt2 = *rt;
3023 break;
3024
3025 case 8:
3026 case 10:
3027 *rt2 = *rt + 1;
3028 break;
3029
3030 default:
3031 return FALSE;
3032 }
3033 return TRUE;
3034 }
3035 else if (AARCH64_LDST_SIMD_S (insn)
3036 || AARCH64_LDST_SIMD_S_PI (insn))
3037 {
3038 *rt = AARCH64_RT (insn);
3039 r = (insn >> 21) & 1;
3040 *load = AARCH64_BIT (insn, 22);
3041 opcode = (insn >> 13) & 0x7;
3042 switch (opcode)
3043 {
3044 case 0:
3045 case 2:
3046 case 4:
3047 *rt2 = *rt + r;
3048 break;
3049
3050 case 1:
3051 case 3:
3052 case 5:
3053 *rt2 = *rt + (r == 0 ? 2 : 3);
3054 break;
3055
3056 case 6:
3057 *rt2 = *rt + r;
3058 break;
3059
3060 case 7:
3061 *rt2 = *rt + (r == 0 ? 2 : 3);
3062 break;
3063
3064 default:
3065 return FALSE;
3066 }
3067 return TRUE;
3068 }
3069
3070 return FALSE;
3071 }
3072
3073 /* Return TRUE if INSN is multiply-accumulate. */
3074
3075 static bfd_boolean
3076 aarch64_mlxl_p (uint32_t insn)
3077 {
3078 uint32_t op31 = AARCH64_OP31 (insn);
3079
3080 if (AARCH64_MAC (insn)
3081 && (op31 == 0 || op31 == 1 || op31 == 5)
3082 /* Exclude MUL instructions which are encoded as a multiple accumulate
3083 with RA = XZR. */
3084 && AARCH64_RA (insn) != AARCH64_ZR)
3085 return TRUE;
3086
3087 return FALSE;
3088 }
3089
3090 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3091 it is possible for a 64-bit multiply-accumulate instruction to generate an
3092 incorrect result. The details are quite complex and hard to
3093 determine statically, since branches in the code may exist in some
3094 circumstances, but all cases end with a memory (load, store, or
3095 prefetch) instruction followed immediately by the multiply-accumulate
3096 operation. We employ a linker patching technique, by moving the potentially
3097 affected multiply-accumulate instruction into a patch region and replacing
3098 the original instruction with a branch to the patch. This function checks
3099 if INSN_1 is the memory operation followed by a multiply-accumulate
3100 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3101 if INSN_1 and INSN_2 are safe. */
3102
3103 static bfd_boolean
3104 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3105 {
3106 uint32_t rt;
3107 uint32_t rt2;
3108 uint32_t rn;
3109 uint32_t rm;
3110 uint32_t ra;
3111 bfd_boolean pair;
3112 bfd_boolean load;
3113
3114 if (aarch64_mlxl_p (insn_2)
3115 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3116 {
3117 /* Any SIMD memory op is independent of the subsequent MLA
3118 by definition of the erratum. */
3119 if (AARCH64_BIT (insn_1, 26))
3120 return TRUE;
3121
3122 /* If not SIMD, check for integer memory ops and MLA relationship. */
3123 rn = AARCH64_RN (insn_2);
3124 ra = AARCH64_RA (insn_2);
3125 rm = AARCH64_RM (insn_2);
3126
3127 /* If this is a load and there's a true(RAW) dependency, we are safe
3128 and this is not an erratum sequence. */
3129 if (load &&
3130 (rt == rn || rt == rm || rt == ra
3131 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3132 return FALSE;
3133
3134 /* We conservatively put out stubs for all other cases (including
3135 writebacks). */
3136 return TRUE;
3137 }
3138
3139 return FALSE;
3140 }
3141
3142 /* Used to order a list of mapping symbols by address. */
3143
3144 static int
3145 elf_aarch64_compare_mapping (const void *a, const void *b)
3146 {
3147 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3148 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3149
3150 if (amap->vma > bmap->vma)
3151 return 1;
3152 else if (amap->vma < bmap->vma)
3153 return -1;
3154 else if (amap->type > bmap->type)
3155 /* Ensure results do not depend on the host qsort for objects with
3156 multiple mapping symbols at the same address by sorting on type
3157 after vma. */
3158 return 1;
3159 else if (amap->type < bmap->type)
3160 return -1;
3161 else
3162 return 0;
3163 }
3164
3165
3166 static char *
3167 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3168 {
3169 char *stub_name = (char *) bfd_malloc
3170 (strlen ("__erratum_835769_veneer_") + 16);
3171 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3172 return stub_name;
3173 }
3174
3175 /* Scan for Cortex-A53 erratum 835769 sequence.
3176
3177 Return TRUE else FALSE on abnormal termination. */
3178
3179 static bfd_boolean
3180 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3181 struct bfd_link_info *info,
3182 unsigned int *num_fixes_p)
3183 {
3184 asection *section;
3185 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3186 unsigned int num_fixes = *num_fixes_p;
3187
3188 if (htab == NULL)
3189 return TRUE;
3190
3191 for (section = input_bfd->sections;
3192 section != NULL;
3193 section = section->next)
3194 {
3195 bfd_byte *contents = NULL;
3196 struct _aarch64_elf_section_data *sec_data;
3197 unsigned int span;
3198
3199 if (elf_section_type (section) != SHT_PROGBITS
3200 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3201 || (section->flags & SEC_EXCLUDE) != 0
3202 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3203 || (section->output_section == bfd_abs_section_ptr))
3204 continue;
3205
3206 if (elf_section_data (section)->this_hdr.contents != NULL)
3207 contents = elf_section_data (section)->this_hdr.contents;
3208 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3209 return FALSE;
3210
3211 sec_data = elf_aarch64_section_data (section);
3212
3213 qsort (sec_data->map, sec_data->mapcount,
3214 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3215
3216 for (span = 0; span < sec_data->mapcount; span++)
3217 {
3218 unsigned int span_start = sec_data->map[span].vma;
3219 unsigned int span_end = ((span == sec_data->mapcount - 1)
3220 ? sec_data->map[0].vma + section->size
3221 : sec_data->map[span + 1].vma);
3222 unsigned int i;
3223 char span_type = sec_data->map[span].type;
3224
3225 if (span_type == 'd')
3226 continue;
3227
3228 for (i = span_start; i + 4 < span_end; i += 4)
3229 {
3230 uint32_t insn_1 = bfd_getl32 (contents + i);
3231 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3232
3233 if (aarch64_erratum_sequence (insn_1, insn_2))
3234 {
3235 struct elf_aarch64_stub_hash_entry *stub_entry;
3236 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3237 if (! stub_name)
3238 return FALSE;
3239
3240 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3241 section,
3242 htab);
3243 if (! stub_entry)
3244 return FALSE;
3245
3246 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3247 stub_entry->target_section = section;
3248 stub_entry->target_value = i + 4;
3249 stub_entry->veneered_insn = insn_2;
3250 stub_entry->output_name = stub_name;
3251 num_fixes++;
3252 }
3253 }
3254 }
3255 if (elf_section_data (section)->this_hdr.contents == NULL)
3256 free (contents);
3257 }
3258
3259 *num_fixes_p = num_fixes;
3260
3261 return TRUE;
3262 }
3263
3264
3265 /* Test if instruction INSN is ADRP. */
3266
3267 static bfd_boolean
3268 _bfd_aarch64_adrp_p (uint32_t insn)
3269 {
3270 return ((insn & 0x9f000000) == 0x90000000);
3271 }
3272
3273
3274 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
3275
3276 static bfd_boolean
3277 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
3278 uint32_t insn_3)
3279 {
3280 uint32_t rt;
3281 uint32_t rt2;
3282 bfd_boolean pair;
3283 bfd_boolean load;
3284
3285 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
3286 && (!pair
3287 || (pair && !load))
3288 && AARCH64_LDST_UIMM (insn_3)
3289 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
3290 }
3291
3292
3293 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
3294
3295 Return TRUE if section CONTENTS at offset I contains one of the
3296 erratum 843419 sequences, otherwise return FALSE. If a sequence is
3297 seen set P_VENEER_I to the offset of the final LOAD/STORE
3298 instruction in the sequence.
3299 */
3300
3301 static bfd_boolean
3302 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
3303 bfd_vma i, bfd_vma span_end,
3304 bfd_vma *p_veneer_i)
3305 {
3306 uint32_t insn_1 = bfd_getl32 (contents + i);
3307
3308 if (!_bfd_aarch64_adrp_p (insn_1))
3309 return FALSE;
3310
3311 if (span_end < i + 12)
3312 return FALSE;
3313
3314 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3315 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
3316
3317 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
3318 return FALSE;
3319
3320 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
3321 {
3322 *p_veneer_i = i + 8;
3323 return TRUE;
3324 }
3325
3326 if (span_end < i + 16)
3327 return FALSE;
3328
3329 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
3330
3331 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
3332 {
3333 *p_veneer_i = i + 12;
3334 return TRUE;
3335 }
3336
3337 return FALSE;
3338 }
3339
3340
3341 /* Resize all stub sections. */
3342
3343 static void
3344 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3345 {
3346 asection *section;
3347
3348 /* OK, we've added some stubs. Find out the new size of the
3349 stub sections. */
3350 for (section = htab->stub_bfd->sections;
3351 section != NULL; section = section->next)
3352 {
3353 /* Ignore non-stub sections. */
3354 if (!strstr (section->name, STUB_SUFFIX))
3355 continue;
3356 section->size = 0;
3357 }
3358
3359 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3360
3361 for (section = htab->stub_bfd->sections;
3362 section != NULL; section = section->next)
3363 {
3364 if (!strstr (section->name, STUB_SUFFIX))
3365 continue;
3366
3367 if (section->size)
3368 section->size += 4;
3369
3370 /* Ensure all stub sections have a size which is a multiple of
3371 4096. This is important in order to ensure that the insertion
3372 of stub sections does not in itself move existing code around
3373 in such a way that new errata sequences are created. */
3374 if (htab->fix_erratum_843419)
3375 if (section->size)
3376 section->size = BFD_ALIGN (section->size, 0x1000);
3377 }
3378 }
3379
3380
3381 /* Construct an erratum 843419 workaround stub name.
3382 */
3383
3384 static char *
3385 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
3386 bfd_vma offset)
3387 {
3388 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
3389 char *stub_name = bfd_malloc (len);
3390
3391 if (stub_name != NULL)
3392 snprintf (stub_name, len, "e843419@%04x_%08x_%" BFD_VMA_FMT "x",
3393 input_section->owner->id,
3394 input_section->id,
3395 offset);
3396 return stub_name;
3397 }
3398
3399 /* Build a stub_entry structure describing an 843419 fixup.
3400
3401 The stub_entry constructed is populated with the bit pattern INSN
3402 of the instruction located at OFFSET within input SECTION.
3403
3404 Returns TRUE on success. */
3405
3406 static bfd_boolean
3407 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
3408 bfd_vma adrp_offset,
3409 bfd_vma ldst_offset,
3410 asection *section,
3411 struct bfd_link_info *info)
3412 {
3413 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3414 char *stub_name;
3415 struct elf_aarch64_stub_hash_entry *stub_entry;
3416
3417 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
3418 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3419 FALSE, FALSE);
3420 if (stub_entry)
3421 {
3422 free (stub_name);
3423 return TRUE;
3424 }
3425
3426 /* We always place an 843419 workaround veneer in the stub section
3427 attached to the input section in which an erratum sequence has
3428 been found. This ensures that later in the link process (in
3429 elfNN_aarch64_write_section) when we copy the veneered
3430 instruction from the input section into the stub section the
3431 copied instruction will have had any relocations applied to it.
3432 If we placed workaround veneers in any other stub section then we
3433 could not assume that all relocations have been processed on the
3434 corresponding input section at the point we output the stub
3435 section.
3436 */
3437
3438 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
3439 if (stub_entry == NULL)
3440 {
3441 free (stub_name);
3442 return FALSE;
3443 }
3444
3445 stub_entry->adrp_offset = adrp_offset;
3446 stub_entry->target_value = ldst_offset;
3447 stub_entry->target_section = section;
3448 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
3449 stub_entry->veneered_insn = insn;
3450 stub_entry->output_name = stub_name;
3451
3452 return TRUE;
3453 }
3454
3455
3456 /* Scan an input section looking for the signature of erratum 843419.
3457
3458 Scans input SECTION in INPUT_BFD looking for erratum 843419
3459 signatures, for each signature found a stub_entry is created
3460 describing the location of the erratum for subsequent fixup.
3461
3462 Return TRUE on successful scan, FALSE on failure to scan.
3463 */
3464
3465 static bfd_boolean
3466 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
3467 struct bfd_link_info *info)
3468 {
3469 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3470
3471 if (htab == NULL)
3472 return TRUE;
3473
3474 if (elf_section_type (section) != SHT_PROGBITS
3475 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3476 || (section->flags & SEC_EXCLUDE) != 0
3477 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3478 || (section->output_section == bfd_abs_section_ptr))
3479 return TRUE;
3480
3481 do
3482 {
3483 bfd_byte *contents = NULL;
3484 struct _aarch64_elf_section_data *sec_data;
3485 unsigned int span;
3486
3487 if (elf_section_data (section)->this_hdr.contents != NULL)
3488 contents = elf_section_data (section)->this_hdr.contents;
3489 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3490 return FALSE;
3491
3492 sec_data = elf_aarch64_section_data (section);
3493
3494 qsort (sec_data->map, sec_data->mapcount,
3495 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3496
3497 for (span = 0; span < sec_data->mapcount; span++)
3498 {
3499 unsigned int span_start = sec_data->map[span].vma;
3500 unsigned int span_end = ((span == sec_data->mapcount - 1)
3501 ? sec_data->map[0].vma + section->size
3502 : sec_data->map[span + 1].vma);
3503 unsigned int i;
3504 char span_type = sec_data->map[span].type;
3505
3506 if (span_type == 'd')
3507 continue;
3508
3509 for (i = span_start; i + 8 < span_end; i += 4)
3510 {
3511 bfd_vma vma = (section->output_section->vma
3512 + section->output_offset
3513 + i);
3514 bfd_vma veneer_i;
3515
3516 if (_bfd_aarch64_erratum_843419_p
3517 (contents, vma, i, span_end, &veneer_i))
3518 {
3519 uint32_t insn = bfd_getl32 (contents + veneer_i);
3520
3521 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
3522 section, info))
3523 return FALSE;
3524 }
3525 }
3526 }
3527
3528 if (elf_section_data (section)->this_hdr.contents == NULL)
3529 free (contents);
3530 }
3531 while (0);
3532
3533 return TRUE;
3534 }
3535
3536
3537 /* Determine and set the size of the stub section for a final link.
3538
3539 The basic idea here is to examine all the relocations looking for
3540 PC-relative calls to a target that is unreachable with a "bl"
3541 instruction. */
3542
3543 bfd_boolean
3544 elfNN_aarch64_size_stubs (bfd *output_bfd,
3545 bfd *stub_bfd,
3546 struct bfd_link_info *info,
3547 bfd_signed_vma group_size,
3548 asection * (*add_stub_section) (const char *,
3549 asection *),
3550 void (*layout_sections_again) (void))
3551 {
3552 bfd_size_type stub_group_size;
3553 bfd_boolean stubs_always_before_branch;
3554 bfd_boolean stub_changed = FALSE;
3555 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3556 unsigned int num_erratum_835769_fixes = 0;
3557
3558 /* Propagate mach to stub bfd, because it may not have been
3559 finalized when we created stub_bfd. */
3560 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3561 bfd_get_mach (output_bfd));
3562
3563 /* Stash our params away. */
3564 htab->stub_bfd = stub_bfd;
3565 htab->add_stub_section = add_stub_section;
3566 htab->layout_sections_again = layout_sections_again;
3567 stubs_always_before_branch = group_size < 0;
3568 if (group_size < 0)
3569 stub_group_size = -group_size;
3570 else
3571 stub_group_size = group_size;
3572
3573 if (stub_group_size == 1)
3574 {
3575 /* Default values. */
3576 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3577 stub_group_size = 127 * 1024 * 1024;
3578 }
3579
3580 group_sections (htab, stub_group_size, stubs_always_before_branch);
3581
3582 (*htab->layout_sections_again) ();
3583
3584 if (htab->fix_erratum_835769)
3585 {
3586 bfd *input_bfd;
3587
3588 for (input_bfd = info->input_bfds;
3589 input_bfd != NULL; input_bfd = input_bfd->link.next)
3590 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3591 &num_erratum_835769_fixes))
3592 return FALSE;
3593
3594 _bfd_aarch64_resize_stubs (htab);
3595 (*htab->layout_sections_again) ();
3596 }
3597
3598 if (htab->fix_erratum_843419)
3599 {
3600 bfd *input_bfd;
3601
3602 for (input_bfd = info->input_bfds;
3603 input_bfd != NULL;
3604 input_bfd = input_bfd->link.next)
3605 {
3606 asection *section;
3607
3608 for (section = input_bfd->sections;
3609 section != NULL;
3610 section = section->next)
3611 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
3612 return FALSE;
3613 }
3614
3615 _bfd_aarch64_resize_stubs (htab);
3616 (*htab->layout_sections_again) ();
3617 }
3618
3619 while (1)
3620 {
3621 bfd *input_bfd;
3622
3623 for (input_bfd = info->input_bfds;
3624 input_bfd != NULL; input_bfd = input_bfd->link.next)
3625 {
3626 Elf_Internal_Shdr *symtab_hdr;
3627 asection *section;
3628 Elf_Internal_Sym *local_syms = NULL;
3629
3630 /* We'll need the symbol table in a second. */
3631 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3632 if (symtab_hdr->sh_info == 0)
3633 continue;
3634
3635 /* Walk over each section attached to the input bfd. */
3636 for (section = input_bfd->sections;
3637 section != NULL; section = section->next)
3638 {
3639 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3640
3641 /* If there aren't any relocs, then there's nothing more
3642 to do. */
3643 if ((section->flags & SEC_RELOC) == 0
3644 || section->reloc_count == 0
3645 || (section->flags & SEC_CODE) == 0)
3646 continue;
3647
3648 /* If this section is a link-once section that will be
3649 discarded, then don't create any stubs. */
3650 if (section->output_section == NULL
3651 || section->output_section->owner != output_bfd)
3652 continue;
3653
3654 /* Get the relocs. */
3655 internal_relocs
3656 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3657 NULL, info->keep_memory);
3658 if (internal_relocs == NULL)
3659 goto error_ret_free_local;
3660
3661 /* Now examine each relocation. */
3662 irela = internal_relocs;
3663 irelaend = irela + section->reloc_count;
3664 for (; irela < irelaend; irela++)
3665 {
3666 unsigned int r_type, r_indx;
3667 enum elf_aarch64_stub_type stub_type;
3668 struct elf_aarch64_stub_hash_entry *stub_entry;
3669 asection *sym_sec;
3670 bfd_vma sym_value;
3671 bfd_vma destination;
3672 struct elf_aarch64_link_hash_entry *hash;
3673 const char *sym_name;
3674 char *stub_name;
3675 const asection *id_sec;
3676 unsigned char st_type;
3677 bfd_size_type len;
3678
3679 r_type = ELFNN_R_TYPE (irela->r_info);
3680 r_indx = ELFNN_R_SYM (irela->r_info);
3681
3682 if (r_type >= (unsigned int) R_AARCH64_end)
3683 {
3684 bfd_set_error (bfd_error_bad_value);
3685 error_ret_free_internal:
3686 if (elf_section_data (section)->relocs == NULL)
3687 free (internal_relocs);
3688 goto error_ret_free_local;
3689 }
3690
3691 /* Only look for stubs on unconditional branch and
3692 branch and link instructions. */
3693 if (r_type != (unsigned int) AARCH64_R (CALL26)
3694 && r_type != (unsigned int) AARCH64_R (JUMP26))
3695 continue;
3696
3697 /* Now determine the call target, its name, value,
3698 section. */
3699 sym_sec = NULL;
3700 sym_value = 0;
3701 destination = 0;
3702 hash = NULL;
3703 sym_name = NULL;
3704 if (r_indx < symtab_hdr->sh_info)
3705 {
3706 /* It's a local symbol. */
3707 Elf_Internal_Sym *sym;
3708 Elf_Internal_Shdr *hdr;
3709
3710 if (local_syms == NULL)
3711 {
3712 local_syms
3713 = (Elf_Internal_Sym *) symtab_hdr->contents;
3714 if (local_syms == NULL)
3715 local_syms
3716 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3717 symtab_hdr->sh_info, 0,
3718 NULL, NULL, NULL);
3719 if (local_syms == NULL)
3720 goto error_ret_free_internal;
3721 }
3722
3723 sym = local_syms + r_indx;
3724 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3725 sym_sec = hdr->bfd_section;
3726 if (!sym_sec)
3727 /* This is an undefined symbol. It can never
3728 be resolved. */
3729 continue;
3730
3731 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3732 sym_value = sym->st_value;
3733 destination = (sym_value + irela->r_addend
3734 + sym_sec->output_offset
3735 + sym_sec->output_section->vma);
3736 st_type = ELF_ST_TYPE (sym->st_info);
3737 sym_name
3738 = bfd_elf_string_from_elf_section (input_bfd,
3739 symtab_hdr->sh_link,
3740 sym->st_name);
3741 }
3742 else
3743 {
3744 int e_indx;
3745
3746 e_indx = r_indx - symtab_hdr->sh_info;
3747 hash = ((struct elf_aarch64_link_hash_entry *)
3748 elf_sym_hashes (input_bfd)[e_indx]);
3749
3750 while (hash->root.root.type == bfd_link_hash_indirect
3751 || hash->root.root.type == bfd_link_hash_warning)
3752 hash = ((struct elf_aarch64_link_hash_entry *)
3753 hash->root.root.u.i.link);
3754
3755 if (hash->root.root.type == bfd_link_hash_defined
3756 || hash->root.root.type == bfd_link_hash_defweak)
3757 {
3758 struct elf_aarch64_link_hash_table *globals =
3759 elf_aarch64_hash_table (info);
3760 sym_sec = hash->root.root.u.def.section;
3761 sym_value = hash->root.root.u.def.value;
3762 /* For a destination in a shared library,
3763 use the PLT stub as target address to
3764 decide whether a branch stub is
3765 needed. */
3766 if (globals->root.splt != NULL && hash != NULL
3767 && hash->root.plt.offset != (bfd_vma) - 1)
3768 {
3769 sym_sec = globals->root.splt;
3770 sym_value = hash->root.plt.offset;
3771 if (sym_sec->output_section != NULL)
3772 destination = (sym_value
3773 + sym_sec->output_offset
3774 +
3775 sym_sec->output_section->vma);
3776 }
3777 else if (sym_sec->output_section != NULL)
3778 destination = (sym_value + irela->r_addend
3779 + sym_sec->output_offset
3780 + sym_sec->output_section->vma);
3781 }
3782 else if (hash->root.root.type == bfd_link_hash_undefined
3783 || (hash->root.root.type
3784 == bfd_link_hash_undefweak))
3785 {
3786 /* For a shared library, use the PLT stub as
3787 target address to decide whether a long
3788 branch stub is needed.
3789 For absolute code, they cannot be handled. */
3790 struct elf_aarch64_link_hash_table *globals =
3791 elf_aarch64_hash_table (info);
3792
3793 if (globals->root.splt != NULL && hash != NULL
3794 && hash->root.plt.offset != (bfd_vma) - 1)
3795 {
3796 sym_sec = globals->root.splt;
3797 sym_value = hash->root.plt.offset;
3798 if (sym_sec->output_section != NULL)
3799 destination = (sym_value
3800 + sym_sec->output_offset
3801 +
3802 sym_sec->output_section->vma);
3803 }
3804 else
3805 continue;
3806 }
3807 else
3808 {
3809 bfd_set_error (bfd_error_bad_value);
3810 goto error_ret_free_internal;
3811 }
3812 st_type = ELF_ST_TYPE (hash->root.type);
3813 sym_name = hash->root.root.root.string;
3814 }
3815
3816 /* Determine what (if any) linker stub is needed. */
3817 stub_type = aarch64_type_of_stub
3818 (info, section, irela, st_type, hash, destination);
3819 if (stub_type == aarch64_stub_none)
3820 continue;
3821
3822 /* Support for grouping stub sections. */
3823 id_sec = htab->stub_group[section->id].link_sec;
3824
3825 /* Get the name of this stub. */
3826 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3827 irela);
3828 if (!stub_name)
3829 goto error_ret_free_internal;
3830
3831 stub_entry =
3832 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3833 stub_name, FALSE, FALSE);
3834 if (stub_entry != NULL)
3835 {
3836 /* The proper stub has already been created. */
3837 free (stub_name);
3838 continue;
3839 }
3840
3841 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3842 (stub_name, section, htab);
3843 if (stub_entry == NULL)
3844 {
3845 free (stub_name);
3846 goto error_ret_free_internal;
3847 }
3848
3849 stub_entry->target_value = sym_value;
3850 stub_entry->target_section = sym_sec;
3851 stub_entry->stub_type = stub_type;
3852 stub_entry->h = hash;
3853 stub_entry->st_type = st_type;
3854
3855 if (sym_name == NULL)
3856 sym_name = "unnamed";
3857 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3858 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3859 if (stub_entry->output_name == NULL)
3860 {
3861 free (stub_name);
3862 goto error_ret_free_internal;
3863 }
3864
3865 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3866 sym_name);
3867
3868 stub_changed = TRUE;
3869 }
3870
3871 /* We're done with the internal relocs, free them. */
3872 if (elf_section_data (section)->relocs == NULL)
3873 free (internal_relocs);
3874 }
3875 }
3876
3877 if (!stub_changed)
3878 break;
3879
3880 _bfd_aarch64_resize_stubs (htab);
3881
3882 /* Ask the linker to do its stuff. */
3883 (*htab->layout_sections_again) ();
3884 stub_changed = FALSE;
3885 }
3886
3887 return TRUE;
3888
3889 error_ret_free_local:
3890 return FALSE;
3891 }
3892
3893 /* Build all the stubs associated with the current output file. The
3894 stubs are kept in a hash table attached to the main linker hash
3895 table. We also set up the .plt entries for statically linked PIC
3896 functions here. This function is called via aarch64_elf_finish in the
3897 linker. */
3898
3899 bfd_boolean
3900 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3901 {
3902 asection *stub_sec;
3903 struct bfd_hash_table *table;
3904 struct elf_aarch64_link_hash_table *htab;
3905
3906 htab = elf_aarch64_hash_table (info);
3907
3908 for (stub_sec = htab->stub_bfd->sections;
3909 stub_sec != NULL; stub_sec = stub_sec->next)
3910 {
3911 bfd_size_type size;
3912
3913 /* Ignore non-stub sections. */
3914 if (!strstr (stub_sec->name, STUB_SUFFIX))
3915 continue;
3916
3917 /* Allocate memory to hold the linker stubs. */
3918 size = stub_sec->size;
3919 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3920 if (stub_sec->contents == NULL && size != 0)
3921 return FALSE;
3922 stub_sec->size = 0;
3923
3924 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
3925 stub_sec->size += 4;
3926 }
3927
3928 /* Build the stubs as directed by the stub hash table. */
3929 table = &htab->stub_hash_table;
3930 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3931
3932 return TRUE;
3933 }
3934
3935
3936 /* Add an entry to the code/data map for section SEC. */
3937
3938 static void
3939 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3940 {
3941 struct _aarch64_elf_section_data *sec_data =
3942 elf_aarch64_section_data (sec);
3943 unsigned int newidx;
3944
3945 if (sec_data->map == NULL)
3946 {
3947 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3948 sec_data->mapcount = 0;
3949 sec_data->mapsize = 1;
3950 }
3951
3952 newidx = sec_data->mapcount++;
3953
3954 if (sec_data->mapcount > sec_data->mapsize)
3955 {
3956 sec_data->mapsize *= 2;
3957 sec_data->map = bfd_realloc_or_free
3958 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3959 }
3960
3961 if (sec_data->map)
3962 {
3963 sec_data->map[newidx].vma = vma;
3964 sec_data->map[newidx].type = type;
3965 }
3966 }
3967
3968
3969 /* Initialise maps of insn/data for input BFDs. */
3970 void
3971 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3972 {
3973 Elf_Internal_Sym *isymbuf;
3974 Elf_Internal_Shdr *hdr;
3975 unsigned int i, localsyms;
3976
3977 /* Make sure that we are dealing with an AArch64 elf binary. */
3978 if (!is_aarch64_elf (abfd))
3979 return;
3980
3981 if ((abfd->flags & DYNAMIC) != 0)
3982 return;
3983
3984 hdr = &elf_symtab_hdr (abfd);
3985 localsyms = hdr->sh_info;
3986
3987 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3988 should contain the number of local symbols, which should come before any
3989 global symbols. Mapping symbols are always local. */
3990 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3991
3992 /* No internal symbols read? Skip this BFD. */
3993 if (isymbuf == NULL)
3994 return;
3995
3996 for (i = 0; i < localsyms; i++)
3997 {
3998 Elf_Internal_Sym *isym = &isymbuf[i];
3999 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
4000 const char *name;
4001
4002 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
4003 {
4004 name = bfd_elf_string_from_elf_section (abfd,
4005 hdr->sh_link,
4006 isym->st_name);
4007
4008 if (bfd_is_aarch64_special_symbol_name
4009 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
4010 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
4011 }
4012 }
4013 }
4014
4015 /* Set option values needed during linking. */
4016 void
4017 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
4018 struct bfd_link_info *link_info,
4019 int no_enum_warn,
4020 int no_wchar_warn, int pic_veneer,
4021 int fix_erratum_835769,
4022 int fix_erratum_843419)
4023 {
4024 struct elf_aarch64_link_hash_table *globals;
4025
4026 globals = elf_aarch64_hash_table (link_info);
4027 globals->pic_veneer = pic_veneer;
4028 globals->fix_erratum_835769 = fix_erratum_835769;
4029 globals->fix_erratum_843419 = fix_erratum_843419;
4030 globals->fix_erratum_843419_adr = TRUE;
4031
4032 BFD_ASSERT (is_aarch64_elf (output_bfd));
4033 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
4034 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
4035 }
4036
4037 static bfd_vma
4038 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
4039 struct elf_aarch64_link_hash_table
4040 *globals, struct bfd_link_info *info,
4041 bfd_vma value, bfd *output_bfd,
4042 bfd_boolean *unresolved_reloc_p)
4043 {
4044 bfd_vma off = (bfd_vma) - 1;
4045 asection *basegot = globals->root.sgot;
4046 bfd_boolean dyn = globals->root.dynamic_sections_created;
4047
4048 if (h != NULL)
4049 {
4050 BFD_ASSERT (basegot != NULL);
4051 off = h->got.offset;
4052 BFD_ASSERT (off != (bfd_vma) - 1);
4053 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
4054 || (info->shared
4055 && SYMBOL_REFERENCES_LOCAL (info, h))
4056 || (ELF_ST_VISIBILITY (h->other)
4057 && h->root.type == bfd_link_hash_undefweak))
4058 {
4059 /* This is actually a static link, or it is a -Bsymbolic link
4060 and the symbol is defined locally. We must initialize this
4061 entry in the global offset table. Since the offset must
4062 always be a multiple of 8 (4 in the case of ILP32), we use
4063 the least significant bit to record whether we have
4064 initialized it already.
4065 When doing a dynamic link, we create a .rel(a).got relocation
4066 entry to initialize the value. This is done in the
4067 finish_dynamic_symbol routine. */
4068 if ((off & 1) != 0)
4069 off &= ~1;
4070 else
4071 {
4072 bfd_put_NN (output_bfd, value, basegot->contents + off);
4073 h->got.offset |= 1;
4074 }
4075 }
4076 else
4077 *unresolved_reloc_p = FALSE;
4078
4079 off = off + basegot->output_section->vma + basegot->output_offset;
4080 }
4081
4082 return off;
4083 }
4084
4085 /* Change R_TYPE to a more efficient access model where possible,
4086 return the new reloc type. */
4087
4088 static bfd_reloc_code_real_type
4089 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
4090 struct elf_link_hash_entry *h)
4091 {
4092 bfd_boolean is_local = h == NULL;
4093
4094 switch (r_type)
4095 {
4096 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4097 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4098 return (is_local
4099 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4100 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
4101
4102 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4103 return (is_local
4104 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4105 : r_type);
4106
4107 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4108 return (is_local
4109 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4110 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4111
4112 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4113 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4114 return (is_local
4115 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4116 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
4117
4118 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4119 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
4120
4121 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4122 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
4123
4124 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4125 return r_type;
4126
4127 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4128 return (is_local
4129 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
4130 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4131
4132 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4133 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4134 /* Instructions with these relocations will become NOPs. */
4135 return BFD_RELOC_AARCH64_NONE;
4136
4137 default:
4138 break;
4139 }
4140
4141 return r_type;
4142 }
4143
4144 static unsigned int
4145 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
4146 {
4147 switch (r_type)
4148 {
4149 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4150 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4151 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4152 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4153 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4154 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4155 return GOT_NORMAL;
4156
4157 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4158 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4159 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4160 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
4161 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
4162 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4163 return GOT_TLS_GD;
4164
4165 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4166 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4167 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4168 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4169 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4170 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4171 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4172 return GOT_TLSDESC_GD;
4173
4174 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4175 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4176 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4177 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4178 return GOT_TLS_IE;
4179
4180 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
4181 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4182 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4183 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4184 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4185 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4186 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4187 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4188 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4189 return GOT_UNKNOWN;
4190
4191 default:
4192 break;
4193 }
4194 return GOT_UNKNOWN;
4195 }
4196
4197 static bfd_boolean
4198 aarch64_can_relax_tls (bfd *input_bfd,
4199 struct bfd_link_info *info,
4200 bfd_reloc_code_real_type r_type,
4201 struct elf_link_hash_entry *h,
4202 unsigned long r_symndx)
4203 {
4204 unsigned int symbol_got_type;
4205 unsigned int reloc_got_type;
4206
4207 if (! IS_AARCH64_TLS_RELOC (r_type))
4208 return FALSE;
4209
4210 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
4211 reloc_got_type = aarch64_reloc_got_type (r_type);
4212
4213 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
4214 return TRUE;
4215
4216 if (info->shared)
4217 return FALSE;
4218
4219 if (h && h->root.type == bfd_link_hash_undefweak)
4220 return FALSE;
4221
4222 return TRUE;
4223 }
4224
4225 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
4226 enumerator. */
4227
4228 static bfd_reloc_code_real_type
4229 aarch64_tls_transition (bfd *input_bfd,
4230 struct bfd_link_info *info,
4231 unsigned int r_type,
4232 struct elf_link_hash_entry *h,
4233 unsigned long r_symndx)
4234 {
4235 bfd_reloc_code_real_type bfd_r_type
4236 = elfNN_aarch64_bfd_reloc_from_type (r_type);
4237
4238 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
4239 return bfd_r_type;
4240
4241 return aarch64_tls_transition_without_check (bfd_r_type, h);
4242 }
4243
4244 /* Return the base VMA address which should be subtracted from real addresses
4245 when resolving R_AARCH64_TLS_DTPREL relocation. */
4246
4247 static bfd_vma
4248 dtpoff_base (struct bfd_link_info *info)
4249 {
4250 /* If tls_sec is NULL, we should have signalled an error already. */
4251 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
4252 return elf_hash_table (info)->tls_sec->vma;
4253 }
4254
4255 /* Return the base VMA address which should be subtracted from real addresses
4256 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
4257
4258 static bfd_vma
4259 tpoff_base (struct bfd_link_info *info)
4260 {
4261 struct elf_link_hash_table *htab = elf_hash_table (info);
4262
4263 /* If tls_sec is NULL, we should have signalled an error already. */
4264 BFD_ASSERT (htab->tls_sec != NULL);
4265
4266 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
4267 htab->tls_sec->alignment_power);
4268 return htab->tls_sec->vma - base;
4269 }
4270
4271 static bfd_vma *
4272 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4273 unsigned long r_symndx)
4274 {
4275 /* Calculate the address of the GOT entry for symbol
4276 referred to in h. */
4277 if (h != NULL)
4278 return &h->got.offset;
4279 else
4280 {
4281 /* local symbol */
4282 struct elf_aarch64_local_symbol *l;
4283
4284 l = elf_aarch64_locals (input_bfd);
4285 return &l[r_symndx].got_offset;
4286 }
4287 }
4288
4289 static void
4290 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4291 unsigned long r_symndx)
4292 {
4293 bfd_vma *p;
4294 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
4295 *p |= 1;
4296 }
4297
4298 static int
4299 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
4300 unsigned long r_symndx)
4301 {
4302 bfd_vma value;
4303 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4304 return value & 1;
4305 }
4306
4307 static bfd_vma
4308 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4309 unsigned long r_symndx)
4310 {
4311 bfd_vma value;
4312 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4313 value &= ~1;
4314 return value;
4315 }
4316
4317 static bfd_vma *
4318 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4319 unsigned long r_symndx)
4320 {
4321 /* Calculate the address of the GOT entry for symbol
4322 referred to in h. */
4323 if (h != NULL)
4324 {
4325 struct elf_aarch64_link_hash_entry *eh;
4326 eh = (struct elf_aarch64_link_hash_entry *) h;
4327 return &eh->tlsdesc_got_jump_table_offset;
4328 }
4329 else
4330 {
4331 /* local symbol */
4332 struct elf_aarch64_local_symbol *l;
4333
4334 l = elf_aarch64_locals (input_bfd);
4335 return &l[r_symndx].tlsdesc_got_jump_table_offset;
4336 }
4337 }
4338
4339 static void
4340 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4341 unsigned long r_symndx)
4342 {
4343 bfd_vma *p;
4344 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4345 *p |= 1;
4346 }
4347
4348 static int
4349 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
4350 struct elf_link_hash_entry *h,
4351 unsigned long r_symndx)
4352 {
4353 bfd_vma value;
4354 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4355 return value & 1;
4356 }
4357
4358 static bfd_vma
4359 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4360 unsigned long r_symndx)
4361 {
4362 bfd_vma value;
4363 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4364 value &= ~1;
4365 return value;
4366 }
4367
4368 /* Data for make_branch_to_erratum_835769_stub(). */
4369
4370 struct erratum_835769_branch_to_stub_data
4371 {
4372 struct bfd_link_info *info;
4373 asection *output_section;
4374 bfd_byte *contents;
4375 };
4376
4377 /* Helper to insert branches to erratum 835769 stubs in the right
4378 places for a particular section. */
4379
4380 static bfd_boolean
4381 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4382 void *in_arg)
4383 {
4384 struct elf_aarch64_stub_hash_entry *stub_entry;
4385 struct erratum_835769_branch_to_stub_data *data;
4386 bfd_byte *contents;
4387 unsigned long branch_insn = 0;
4388 bfd_vma veneered_insn_loc, veneer_entry_loc;
4389 bfd_signed_vma branch_offset;
4390 unsigned int target;
4391 bfd *abfd;
4392
4393 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4394 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4395
4396 if (stub_entry->target_section != data->output_section
4397 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4398 return TRUE;
4399
4400 contents = data->contents;
4401 veneered_insn_loc = stub_entry->target_section->output_section->vma
4402 + stub_entry->target_section->output_offset
4403 + stub_entry->target_value;
4404 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4405 + stub_entry->stub_sec->output_offset
4406 + stub_entry->stub_offset;
4407 branch_offset = veneer_entry_loc - veneered_insn_loc;
4408
4409 abfd = stub_entry->target_section->owner;
4410 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4411 (*_bfd_error_handler)
4412 (_("%B: error: Erratum 835769 stub out "
4413 "of range (input file too large)"), abfd);
4414
4415 target = stub_entry->target_value;
4416 branch_insn = 0x14000000;
4417 branch_offset >>= 2;
4418 branch_offset &= 0x3ffffff;
4419 branch_insn |= branch_offset;
4420 bfd_putl32 (branch_insn, &contents[target]);
4421
4422 return TRUE;
4423 }
4424
4425
4426 static bfd_boolean
4427 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
4428 void *in_arg)
4429 {
4430 struct elf_aarch64_stub_hash_entry *stub_entry
4431 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4432 struct erratum_835769_branch_to_stub_data *data
4433 = (struct erratum_835769_branch_to_stub_data *) in_arg;
4434 struct bfd_link_info *info;
4435 struct elf_aarch64_link_hash_table *htab;
4436 bfd_byte *contents;
4437 asection *section;
4438 bfd *abfd;
4439 bfd_vma place;
4440 uint32_t insn;
4441
4442 info = data->info;
4443 contents = data->contents;
4444 section = data->output_section;
4445
4446 htab = elf_aarch64_hash_table (info);
4447
4448 if (stub_entry->target_section != section
4449 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
4450 return TRUE;
4451
4452 insn = bfd_getl32 (contents + stub_entry->target_value);
4453 bfd_putl32 (insn,
4454 stub_entry->stub_sec->contents + stub_entry->stub_offset);
4455
4456 place = (section->output_section->vma + section->output_offset
4457 + stub_entry->adrp_offset);
4458 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
4459
4460 if ((insn & AARCH64_ADRP_OP_MASK) != AARCH64_ADRP_OP)
4461 abort ();
4462
4463 bfd_signed_vma imm =
4464 (_bfd_aarch64_sign_extend
4465 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
4466 - (place & 0xfff));
4467
4468 if (htab->fix_erratum_843419_adr
4469 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
4470 {
4471 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
4472 | AARCH64_RT (insn));
4473 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
4474 }
4475 else
4476 {
4477 bfd_vma veneered_insn_loc;
4478 bfd_vma veneer_entry_loc;
4479 bfd_signed_vma branch_offset;
4480 uint32_t branch_insn;
4481
4482 veneered_insn_loc = stub_entry->target_section->output_section->vma
4483 + stub_entry->target_section->output_offset
4484 + stub_entry->target_value;
4485 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4486 + stub_entry->stub_sec->output_offset
4487 + stub_entry->stub_offset;
4488 branch_offset = veneer_entry_loc - veneered_insn_loc;
4489
4490 abfd = stub_entry->target_section->owner;
4491 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4492 (*_bfd_error_handler)
4493 (_("%B: error: Erratum 843419 stub out "
4494 "of range (input file too large)"), abfd);
4495
4496 branch_insn = 0x14000000;
4497 branch_offset >>= 2;
4498 branch_offset &= 0x3ffffff;
4499 branch_insn |= branch_offset;
4500 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
4501 }
4502 return TRUE;
4503 }
4504
4505
4506 static bfd_boolean
4507 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4508 struct bfd_link_info *link_info,
4509 asection *sec,
4510 bfd_byte *contents)
4511
4512 {
4513 struct elf_aarch64_link_hash_table *globals =
4514 elf_aarch64_hash_table (link_info);
4515
4516 if (globals == NULL)
4517 return FALSE;
4518
4519 /* Fix code to point to erratum 835769 stubs. */
4520 if (globals->fix_erratum_835769)
4521 {
4522 struct erratum_835769_branch_to_stub_data data;
4523
4524 data.info = link_info;
4525 data.output_section = sec;
4526 data.contents = contents;
4527 bfd_hash_traverse (&globals->stub_hash_table,
4528 make_branch_to_erratum_835769_stub, &data);
4529 }
4530
4531 if (globals->fix_erratum_843419)
4532 {
4533 struct erratum_835769_branch_to_stub_data data;
4534
4535 data.info = link_info;
4536 data.output_section = sec;
4537 data.contents = contents;
4538 bfd_hash_traverse (&globals->stub_hash_table,
4539 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
4540 }
4541
4542 return FALSE;
4543 }
4544
4545 /* Perform a relocation as part of a final link. */
4546 static bfd_reloc_status_type
4547 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4548 bfd *input_bfd,
4549 bfd *output_bfd,
4550 asection *input_section,
4551 bfd_byte *contents,
4552 Elf_Internal_Rela *rel,
4553 bfd_vma value,
4554 struct bfd_link_info *info,
4555 asection *sym_sec,
4556 struct elf_link_hash_entry *h,
4557 bfd_boolean *unresolved_reloc_p,
4558 bfd_boolean save_addend,
4559 bfd_vma *saved_addend,
4560 Elf_Internal_Sym *sym)
4561 {
4562 Elf_Internal_Shdr *symtab_hdr;
4563 unsigned int r_type = howto->type;
4564 bfd_reloc_code_real_type bfd_r_type
4565 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4566 bfd_reloc_code_real_type new_bfd_r_type;
4567 unsigned long r_symndx;
4568 bfd_byte *hit_data = contents + rel->r_offset;
4569 bfd_vma place, off;
4570 bfd_signed_vma signed_addend;
4571 struct elf_aarch64_link_hash_table *globals;
4572 bfd_boolean weak_undef_p;
4573 asection *base_got;
4574
4575 globals = elf_aarch64_hash_table (info);
4576
4577 symtab_hdr = &elf_symtab_hdr (input_bfd);
4578
4579 BFD_ASSERT (is_aarch64_elf (input_bfd));
4580
4581 r_symndx = ELFNN_R_SYM (rel->r_info);
4582
4583 /* It is possible to have linker relaxations on some TLS access
4584 models. Update our information here. */
4585 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4586 if (new_bfd_r_type != bfd_r_type)
4587 {
4588 bfd_r_type = new_bfd_r_type;
4589 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4590 BFD_ASSERT (howto != NULL);
4591 r_type = howto->type;
4592 }
4593
4594 place = input_section->output_section->vma
4595 + input_section->output_offset + rel->r_offset;
4596
4597 /* Get addend, accumulating the addend for consecutive relocs
4598 which refer to the same offset. */
4599 signed_addend = saved_addend ? *saved_addend : 0;
4600 signed_addend += rel->r_addend;
4601
4602 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4603 : bfd_is_und_section (sym_sec));
4604
4605 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4606 it here if it is defined in a non-shared object. */
4607 if (h != NULL
4608 && h->type == STT_GNU_IFUNC
4609 && h->def_regular)
4610 {
4611 asection *plt;
4612 const char *name;
4613 bfd_vma addend = 0;
4614
4615 if ((input_section->flags & SEC_ALLOC) == 0
4616 || h->plt.offset == (bfd_vma) -1)
4617 abort ();
4618
4619 /* STT_GNU_IFUNC symbol must go through PLT. */
4620 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4621 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4622
4623 switch (bfd_r_type)
4624 {
4625 default:
4626 if (h->root.root.string)
4627 name = h->root.root.string;
4628 else
4629 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4630 NULL);
4631 (*_bfd_error_handler)
4632 (_("%B: relocation %s against STT_GNU_IFUNC "
4633 "symbol `%s' isn't handled by %s"), input_bfd,
4634 howto->name, name, __FUNCTION__);
4635 bfd_set_error (bfd_error_bad_value);
4636 return FALSE;
4637
4638 case BFD_RELOC_AARCH64_NN:
4639 if (rel->r_addend != 0)
4640 {
4641 if (h->root.root.string)
4642 name = h->root.root.string;
4643 else
4644 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4645 sym, NULL);
4646 (*_bfd_error_handler)
4647 (_("%B: relocation %s against STT_GNU_IFUNC "
4648 "symbol `%s' has non-zero addend: %d"),
4649 input_bfd, howto->name, name, rel->r_addend);
4650 bfd_set_error (bfd_error_bad_value);
4651 return FALSE;
4652 }
4653
4654 /* Generate dynamic relocation only when there is a
4655 non-GOT reference in a shared object. */
4656 if (info->shared && h->non_got_ref)
4657 {
4658 Elf_Internal_Rela outrel;
4659 asection *sreloc;
4660
4661 /* Need a dynamic relocation to get the real function
4662 address. */
4663 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4664 info,
4665 input_section,
4666 rel->r_offset);
4667 if (outrel.r_offset == (bfd_vma) -1
4668 || outrel.r_offset == (bfd_vma) -2)
4669 abort ();
4670
4671 outrel.r_offset += (input_section->output_section->vma
4672 + input_section->output_offset);
4673
4674 if (h->dynindx == -1
4675 || h->forced_local
4676 || info->executable)
4677 {
4678 /* This symbol is resolved locally. */
4679 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4680 outrel.r_addend = (h->root.u.def.value
4681 + h->root.u.def.section->output_section->vma
4682 + h->root.u.def.section->output_offset);
4683 }
4684 else
4685 {
4686 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4687 outrel.r_addend = 0;
4688 }
4689
4690 sreloc = globals->root.irelifunc;
4691 elf_append_rela (output_bfd, sreloc, &outrel);
4692
4693 /* If this reloc is against an external symbol, we
4694 do not want to fiddle with the addend. Otherwise,
4695 we need to include the symbol value so that it
4696 becomes an addend for the dynamic reloc. For an
4697 internal symbol, we have updated addend. */
4698 return bfd_reloc_ok;
4699 }
4700 /* FALLTHROUGH */
4701 case BFD_RELOC_AARCH64_CALL26:
4702 case BFD_RELOC_AARCH64_JUMP26:
4703 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4704 signed_addend,
4705 weak_undef_p);
4706 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4707 howto, value);
4708 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4709 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4710 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4711 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4712 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4713 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4714 base_got = globals->root.sgot;
4715 off = h->got.offset;
4716
4717 if (base_got == NULL)
4718 abort ();
4719
4720 if (off == (bfd_vma) -1)
4721 {
4722 bfd_vma plt_index;
4723
4724 /* We can't use h->got.offset here to save state, or
4725 even just remember the offset, as finish_dynamic_symbol
4726 would use that as offset into .got. */
4727
4728 if (globals->root.splt != NULL)
4729 {
4730 plt_index = ((h->plt.offset - globals->plt_header_size) /
4731 globals->plt_entry_size);
4732 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4733 base_got = globals->root.sgotplt;
4734 }
4735 else
4736 {
4737 plt_index = h->plt.offset / globals->plt_entry_size;
4738 off = plt_index * GOT_ENTRY_SIZE;
4739 base_got = globals->root.igotplt;
4740 }
4741
4742 if (h->dynindx == -1
4743 || h->forced_local
4744 || info->symbolic)
4745 {
4746 /* This references the local definition. We must
4747 initialize this entry in the global offset table.
4748 Since the offset must always be a multiple of 8,
4749 we use the least significant bit to record
4750 whether we have initialized it already.
4751
4752 When doing a dynamic link, we create a .rela.got
4753 relocation entry to initialize the value. This
4754 is done in the finish_dynamic_symbol routine. */
4755 if ((off & 1) != 0)
4756 off &= ~1;
4757 else
4758 {
4759 bfd_put_NN (output_bfd, value,
4760 base_got->contents + off);
4761 /* Note that this is harmless as -1 | 1 still is -1. */
4762 h->got.offset |= 1;
4763 }
4764 }
4765 value = (base_got->output_section->vma
4766 + base_got->output_offset + off);
4767 }
4768 else
4769 value = aarch64_calculate_got_entry_vma (h, globals, info,
4770 value, output_bfd,
4771 unresolved_reloc_p);
4772 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4773 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4774 addend = (globals->root.sgot->output_section->vma
4775 + globals->root.sgot->output_offset);
4776 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4777 addend, weak_undef_p);
4778 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4779 case BFD_RELOC_AARCH64_ADD_LO12:
4780 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4781 break;
4782 }
4783 }
4784
4785 switch (bfd_r_type)
4786 {
4787 case BFD_RELOC_AARCH64_NONE:
4788 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4789 *unresolved_reloc_p = FALSE;
4790 return bfd_reloc_ok;
4791
4792 case BFD_RELOC_AARCH64_NN:
4793
4794 /* When generating a shared object or relocatable executable, these
4795 relocations are copied into the output file to be resolved at
4796 run time. */
4797 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4798 && (input_section->flags & SEC_ALLOC)
4799 && (h == NULL
4800 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4801 || h->root.type != bfd_link_hash_undefweak))
4802 {
4803 Elf_Internal_Rela outrel;
4804 bfd_byte *loc;
4805 bfd_boolean skip, relocate;
4806 asection *sreloc;
4807
4808 *unresolved_reloc_p = FALSE;
4809
4810 skip = FALSE;
4811 relocate = FALSE;
4812
4813 outrel.r_addend = signed_addend;
4814 outrel.r_offset =
4815 _bfd_elf_section_offset (output_bfd, info, input_section,
4816 rel->r_offset);
4817 if (outrel.r_offset == (bfd_vma) - 1)
4818 skip = TRUE;
4819 else if (outrel.r_offset == (bfd_vma) - 2)
4820 {
4821 skip = TRUE;
4822 relocate = TRUE;
4823 }
4824
4825 outrel.r_offset += (input_section->output_section->vma
4826 + input_section->output_offset);
4827
4828 if (skip)
4829 memset (&outrel, 0, sizeof outrel);
4830 else if (h != NULL
4831 && h->dynindx != -1
4832 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4833 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4834 else
4835 {
4836 int symbol;
4837
4838 /* On SVR4-ish systems, the dynamic loader cannot
4839 relocate the text and data segments independently,
4840 so the symbol does not matter. */
4841 symbol = 0;
4842 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4843 outrel.r_addend += value;
4844 }
4845
4846 sreloc = elf_section_data (input_section)->sreloc;
4847 if (sreloc == NULL || sreloc->contents == NULL)
4848 return bfd_reloc_notsupported;
4849
4850 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4851 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4852
4853 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4854 {
4855 /* Sanity to check that we have previously allocated
4856 sufficient space in the relocation section for the
4857 number of relocations we actually want to emit. */
4858 abort ();
4859 }
4860
4861 /* If this reloc is against an external symbol, we do not want to
4862 fiddle with the addend. Otherwise, we need to include the symbol
4863 value so that it becomes an addend for the dynamic reloc. */
4864 if (!relocate)
4865 return bfd_reloc_ok;
4866
4867 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4868 contents, rel->r_offset, value,
4869 signed_addend);
4870 }
4871 else
4872 value += signed_addend;
4873 break;
4874
4875 case BFD_RELOC_AARCH64_CALL26:
4876 case BFD_RELOC_AARCH64_JUMP26:
4877 {
4878 asection *splt = globals->root.splt;
4879 bfd_boolean via_plt_p =
4880 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4881
4882 /* A call to an undefined weak symbol is converted to a jump to
4883 the next instruction unless a PLT entry will be created.
4884 The jump to the next instruction is optimized as a NOP.
4885 Do the same for local undefined symbols. */
4886 if (weak_undef_p && ! via_plt_p)
4887 {
4888 bfd_putl32 (INSN_NOP, hit_data);
4889 return bfd_reloc_ok;
4890 }
4891
4892 /* If the call goes through a PLT entry, make sure to
4893 check distance to the right destination address. */
4894 if (via_plt_p)
4895 value = (splt->output_section->vma
4896 + splt->output_offset + h->plt.offset);
4897
4898 /* Check if a stub has to be inserted because the destination
4899 is too far away. */
4900 struct elf_aarch64_stub_hash_entry *stub_entry = NULL;
4901 if (! aarch64_valid_branch_p (value, place))
4902 /* The target is out of reach, so redirect the branch to
4903 the local stub for this function. */
4904 stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec, h,
4905 rel, globals);
4906 if (stub_entry != NULL)
4907 value = (stub_entry->stub_offset
4908 + stub_entry->stub_sec->output_offset
4909 + stub_entry->stub_sec->output_section->vma);
4910 }
4911 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4912 signed_addend, weak_undef_p);
4913 *unresolved_reloc_p = FALSE;
4914 break;
4915
4916 case BFD_RELOC_AARCH64_16_PCREL:
4917 case BFD_RELOC_AARCH64_32_PCREL:
4918 case BFD_RELOC_AARCH64_64_PCREL:
4919 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4920 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4921 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4922 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4923 if (info->shared
4924 && (input_section->flags & SEC_ALLOC) != 0
4925 && (input_section->flags & SEC_READONLY) != 0
4926 && h != NULL
4927 && !h->def_regular)
4928 {
4929 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4930
4931 (*_bfd_error_handler)
4932 (_("%B: relocation %s against external symbol `%s' can not be used"
4933 " when making a shared object; recompile with -fPIC"),
4934 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
4935 h->root.root.string);
4936 bfd_set_error (bfd_error_bad_value);
4937 return FALSE;
4938 }
4939
4940 case BFD_RELOC_AARCH64_16:
4941 #if ARCH_SIZE == 64
4942 case BFD_RELOC_AARCH64_32:
4943 #endif
4944 case BFD_RELOC_AARCH64_ADD_LO12:
4945 case BFD_RELOC_AARCH64_BRANCH19:
4946 case BFD_RELOC_AARCH64_LDST128_LO12:
4947 case BFD_RELOC_AARCH64_LDST16_LO12:
4948 case BFD_RELOC_AARCH64_LDST32_LO12:
4949 case BFD_RELOC_AARCH64_LDST64_LO12:
4950 case BFD_RELOC_AARCH64_LDST8_LO12:
4951 case BFD_RELOC_AARCH64_MOVW_G0:
4952 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4953 case BFD_RELOC_AARCH64_MOVW_G0_S:
4954 case BFD_RELOC_AARCH64_MOVW_G1:
4955 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4956 case BFD_RELOC_AARCH64_MOVW_G1_S:
4957 case BFD_RELOC_AARCH64_MOVW_G2:
4958 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4959 case BFD_RELOC_AARCH64_MOVW_G2_S:
4960 case BFD_RELOC_AARCH64_MOVW_G3:
4961 case BFD_RELOC_AARCH64_TSTBR14:
4962 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4963 signed_addend, weak_undef_p);
4964 break;
4965
4966 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4967 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4968 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4969 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4970 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4971 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4972 if (globals->root.sgot == NULL)
4973 BFD_ASSERT (h != NULL);
4974
4975 if (h != NULL)
4976 {
4977 bfd_vma addend = 0;
4978 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4979 output_bfd,
4980 unresolved_reloc_p);
4981 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4982 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4983 addend = (globals->root.sgot->output_section->vma
4984 + globals->root.sgot->output_offset);
4985 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4986 addend, weak_undef_p);
4987 }
4988 else
4989 {
4990 bfd_vma addend = 0;
4991 struct elf_aarch64_local_symbol *locals
4992 = elf_aarch64_locals (input_bfd);
4993
4994 if (locals == NULL)
4995 {
4996 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4997 (*_bfd_error_handler)
4998 (_("%B: Local symbol descriptor table be NULL when applying "
4999 "relocation %s against local symbol"),
5000 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
5001 abort ();
5002 }
5003
5004 off = symbol_got_offset (input_bfd, h, r_symndx);
5005 base_got = globals->root.sgot;
5006 bfd_vma got_entry_addr = (base_got->output_section->vma
5007 + base_got->output_offset + off);
5008
5009 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5010 {
5011 bfd_put_64 (output_bfd, value, base_got->contents + off);
5012
5013 if (info->shared)
5014 {
5015 asection *s;
5016 Elf_Internal_Rela outrel;
5017
5018 /* For local symbol, we have done absolute relocation in static
5019 linking stageh. While for share library, we need to update
5020 the content of GOT entry according to the share objects
5021 loading base address. So we need to generate a
5022 R_AARCH64_RELATIVE reloc for dynamic linker. */
5023 s = globals->root.srelgot;
5024 if (s == NULL)
5025 abort ();
5026
5027 outrel.r_offset = got_entry_addr;
5028 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
5029 outrel.r_addend = value;
5030 elf_append_rela (output_bfd, s, &outrel);
5031 }
5032
5033 symbol_got_offset_mark (input_bfd, h, r_symndx);
5034 }
5035
5036 /* Update the relocation value to GOT entry addr as we have transformed
5037 the direct data access into indirect data access through GOT. */
5038 value = got_entry_addr;
5039
5040 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5041 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5042 addend = base_got->output_section->vma + base_got->output_offset;
5043
5044 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5045 addend, weak_undef_p);
5046 }
5047
5048 break;
5049
5050 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5051 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5052 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5053 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5054 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5055 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5056 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5057 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5058 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5059 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5060 if (globals->root.sgot == NULL)
5061 return bfd_reloc_notsupported;
5062
5063 value = (symbol_got_offset (input_bfd, h, r_symndx)
5064 + globals->root.sgot->output_section->vma
5065 + globals->root.sgot->output_offset);
5066
5067 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5068 0, weak_undef_p);
5069 *unresolved_reloc_p = FALSE;
5070 break;
5071
5072 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
5073 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5074 signed_addend - dtpoff_base (info),
5075 weak_undef_p);
5076 break;
5077
5078 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5079 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5080 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5081 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5082 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5083 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5084 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5085 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5086 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5087 signed_addend - tpoff_base (info),
5088 weak_undef_p);
5089 *unresolved_reloc_p = FALSE;
5090 break;
5091
5092 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5093 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5094 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5095 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5096 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5097 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5098 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5099 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5100 if (globals->root.sgot == NULL)
5101 return bfd_reloc_notsupported;
5102 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
5103 + globals->root.sgotplt->output_section->vma
5104 + globals->root.sgotplt->output_offset
5105 + globals->sgotplt_jump_table_size);
5106
5107 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5108 0, weak_undef_p);
5109 *unresolved_reloc_p = FALSE;
5110 break;
5111
5112 default:
5113 return bfd_reloc_notsupported;
5114 }
5115
5116 if (saved_addend)
5117 *saved_addend = value;
5118
5119 /* Only apply the final relocation in a sequence. */
5120 if (save_addend)
5121 return bfd_reloc_continue;
5122
5123 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5124 howto, value);
5125 }
5126
5127 /* Handle TLS relaxations. Relaxing is possible for symbols that use
5128 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
5129 link.
5130
5131 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
5132 is to then call final_link_relocate. Return other values in the
5133 case of error. */
5134
5135 static bfd_reloc_status_type
5136 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
5137 bfd *input_bfd, bfd_byte *contents,
5138 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
5139 {
5140 bfd_boolean is_local = h == NULL;
5141 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
5142 unsigned long insn;
5143
5144 BFD_ASSERT (globals && input_bfd && contents && rel);
5145
5146 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5147 {
5148 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5149 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5150 if (is_local)
5151 {
5152 /* GD->LE relaxation:
5153 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
5154 or
5155 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
5156 */
5157 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5158 return bfd_reloc_continue;
5159 }
5160 else
5161 {
5162 /* GD->IE relaxation:
5163 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
5164 or
5165 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
5166 */
5167 return bfd_reloc_continue;
5168 }
5169
5170 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5171 BFD_ASSERT (0);
5172 break;
5173
5174 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5175 if (is_local)
5176 {
5177 /* Tiny TLSDESC->LE relaxation:
5178 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
5179 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
5180 .tlsdesccall var
5181 blr x1 => nop
5182 */
5183 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5184 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5185
5186 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5187 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
5188 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5189
5190 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5191 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
5192 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5193 return bfd_reloc_continue;
5194 }
5195 else
5196 {
5197 /* Tiny TLSDESC->IE relaxation:
5198 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
5199 adr x0, :tlsdesc:var => nop
5200 .tlsdesccall var
5201 blr x1 => nop
5202 */
5203 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5204 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5205
5206 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5207 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5208
5209 bfd_putl32 (0x58000000, contents + rel->r_offset);
5210 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
5211 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5212 return bfd_reloc_continue;
5213 }
5214
5215 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5216 if (is_local)
5217 {
5218 /* Tiny GD->LE relaxation:
5219 adr x0, :tlsgd:var => mrs x1, tpidr_el0
5220 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
5221 nop => add x0, x0, #:tprel_lo12_nc:x
5222 */
5223
5224 /* First kill the tls_get_addr reloc on the bl instruction. */
5225 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5226
5227 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
5228 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
5229 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
5230
5231 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5232 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
5233 rel[1].r_offset = rel->r_offset + 8;
5234
5235 /* Move the current relocation to the second instruction in
5236 the sequence. */
5237 rel->r_offset += 4;
5238 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5239 AARCH64_R (TLSLE_ADD_TPREL_HI12));
5240 return bfd_reloc_continue;
5241 }
5242 else
5243 {
5244 /* Tiny GD->IE relaxation:
5245 adr x0, :tlsgd:var => ldr x0, :gottprel:var
5246 bl __tls_get_addr => mrs x1, tpidr_el0
5247 nop => add x0, x0, x1
5248 */
5249
5250 /* First kill the tls_get_addr reloc on the bl instruction. */
5251 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5252 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5253
5254 bfd_putl32 (0x58000000, contents + rel->r_offset);
5255 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5256 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5257 return bfd_reloc_continue;
5258 }
5259
5260 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5261 return bfd_reloc_continue;
5262
5263 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5264 if (is_local)
5265 {
5266 /* GD->LE relaxation:
5267 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
5268 */
5269 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5270 return bfd_reloc_continue;
5271 }
5272 else
5273 {
5274 /* GD->IE relaxation:
5275 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
5276 */
5277 insn = bfd_getl32 (contents + rel->r_offset);
5278 insn &= 0xffffffe0;
5279 bfd_putl32 (insn, contents + rel->r_offset);
5280 return bfd_reloc_continue;
5281 }
5282
5283 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5284 if (is_local)
5285 {
5286 /* GD->LE relaxation
5287 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
5288 bl __tls_get_addr => mrs x1, tpidr_el0
5289 nop => add x0, x1, x0
5290 */
5291
5292 /* First kill the tls_get_addr reloc on the bl instruction. */
5293 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5294 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5295
5296 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5297 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5298 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5299 return bfd_reloc_continue;
5300 }
5301 else
5302 {
5303 /* GD->IE relaxation
5304 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
5305 BL __tls_get_addr => mrs x1, tpidr_el0
5306 R_AARCH64_CALL26
5307 NOP => add x0, x1, x0
5308 */
5309
5310 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
5311
5312 /* Remove the relocation on the BL instruction. */
5313 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5314
5315 bfd_putl32 (0xf9400000, contents + rel->r_offset);
5316
5317 /* We choose to fixup the BL and NOP instructions using the
5318 offset from the second relocation to allow flexibility in
5319 scheduling instructions between the ADD and BL. */
5320 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
5321 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
5322 return bfd_reloc_continue;
5323 }
5324
5325 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5326 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5327 /* GD->IE/LE relaxation:
5328 add x0, x0, #:tlsdesc_lo12:var => nop
5329 blr xd => nop
5330 */
5331 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
5332 return bfd_reloc_ok;
5333
5334 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5335 /* IE->LE relaxation:
5336 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
5337 */
5338 if (is_local)
5339 {
5340 insn = bfd_getl32 (contents + rel->r_offset);
5341 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
5342 }
5343 return bfd_reloc_continue;
5344
5345 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5346 /* IE->LE relaxation:
5347 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
5348 */
5349 if (is_local)
5350 {
5351 insn = bfd_getl32 (contents + rel->r_offset);
5352 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
5353 }
5354 return bfd_reloc_continue;
5355
5356 default:
5357 return bfd_reloc_continue;
5358 }
5359
5360 return bfd_reloc_ok;
5361 }
5362
5363 /* Relocate an AArch64 ELF section. */
5364
5365 static bfd_boolean
5366 elfNN_aarch64_relocate_section (bfd *output_bfd,
5367 struct bfd_link_info *info,
5368 bfd *input_bfd,
5369 asection *input_section,
5370 bfd_byte *contents,
5371 Elf_Internal_Rela *relocs,
5372 Elf_Internal_Sym *local_syms,
5373 asection **local_sections)
5374 {
5375 Elf_Internal_Shdr *symtab_hdr;
5376 struct elf_link_hash_entry **sym_hashes;
5377 Elf_Internal_Rela *rel;
5378 Elf_Internal_Rela *relend;
5379 const char *name;
5380 struct elf_aarch64_link_hash_table *globals;
5381 bfd_boolean save_addend = FALSE;
5382 bfd_vma addend = 0;
5383
5384 globals = elf_aarch64_hash_table (info);
5385
5386 symtab_hdr = &elf_symtab_hdr (input_bfd);
5387 sym_hashes = elf_sym_hashes (input_bfd);
5388
5389 rel = relocs;
5390 relend = relocs + input_section->reloc_count;
5391 for (; rel < relend; rel++)
5392 {
5393 unsigned int r_type;
5394 bfd_reloc_code_real_type bfd_r_type;
5395 bfd_reloc_code_real_type relaxed_bfd_r_type;
5396 reloc_howto_type *howto;
5397 unsigned long r_symndx;
5398 Elf_Internal_Sym *sym;
5399 asection *sec;
5400 struct elf_link_hash_entry *h;
5401 bfd_vma relocation;
5402 bfd_reloc_status_type r;
5403 arelent bfd_reloc;
5404 char sym_type;
5405 bfd_boolean unresolved_reloc = FALSE;
5406 char *error_message = NULL;
5407
5408 r_symndx = ELFNN_R_SYM (rel->r_info);
5409 r_type = ELFNN_R_TYPE (rel->r_info);
5410
5411 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
5412 howto = bfd_reloc.howto;
5413
5414 if (howto == NULL)
5415 {
5416 (*_bfd_error_handler)
5417 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
5418 input_bfd, input_section, r_type);
5419 return FALSE;
5420 }
5421 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
5422
5423 h = NULL;
5424 sym = NULL;
5425 sec = NULL;
5426
5427 if (r_symndx < symtab_hdr->sh_info)
5428 {
5429 sym = local_syms + r_symndx;
5430 sym_type = ELFNN_ST_TYPE (sym->st_info);
5431 sec = local_sections[r_symndx];
5432
5433 /* An object file might have a reference to a local
5434 undefined symbol. This is a daft object file, but we
5435 should at least do something about it. */
5436 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
5437 && bfd_is_und_section (sec)
5438 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
5439 {
5440 if (!info->callbacks->undefined_symbol
5441 (info, bfd_elf_string_from_elf_section
5442 (input_bfd, symtab_hdr->sh_link, sym->st_name),
5443 input_bfd, input_section, rel->r_offset, TRUE))
5444 return FALSE;
5445 }
5446
5447 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
5448
5449 /* Relocate against local STT_GNU_IFUNC symbol. */
5450 if (!info->relocatable
5451 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
5452 {
5453 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
5454 rel, FALSE);
5455 if (h == NULL)
5456 abort ();
5457
5458 /* Set STT_GNU_IFUNC symbol value. */
5459 h->root.u.def.value = sym->st_value;
5460 h->root.u.def.section = sec;
5461 }
5462 }
5463 else
5464 {
5465 bfd_boolean warned, ignored;
5466
5467 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
5468 r_symndx, symtab_hdr, sym_hashes,
5469 h, sec, relocation,
5470 unresolved_reloc, warned, ignored);
5471
5472 sym_type = h->type;
5473 }
5474
5475 if (sec != NULL && discarded_section (sec))
5476 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
5477 rel, 1, relend, howto, 0, contents);
5478
5479 if (info->relocatable)
5480 continue;
5481
5482 if (h != NULL)
5483 name = h->root.root.string;
5484 else
5485 {
5486 name = (bfd_elf_string_from_elf_section
5487 (input_bfd, symtab_hdr->sh_link, sym->st_name));
5488 if (name == NULL || *name == '\0')
5489 name = bfd_section_name (input_bfd, sec);
5490 }
5491
5492 if (r_symndx != 0
5493 && r_type != R_AARCH64_NONE
5494 && r_type != R_AARCH64_NULL
5495 && (h == NULL
5496 || h->root.type == bfd_link_hash_defined
5497 || h->root.type == bfd_link_hash_defweak)
5498 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
5499 {
5500 (*_bfd_error_handler)
5501 ((sym_type == STT_TLS
5502 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
5503 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
5504 input_bfd,
5505 input_section, (long) rel->r_offset, howto->name, name);
5506 }
5507
5508 /* We relax only if we can see that there can be a valid transition
5509 from a reloc type to another.
5510 We call elfNN_aarch64_final_link_relocate unless we're completely
5511 done, i.e., the relaxation produced the final output we want. */
5512
5513 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
5514 h, r_symndx);
5515 if (relaxed_bfd_r_type != bfd_r_type)
5516 {
5517 bfd_r_type = relaxed_bfd_r_type;
5518 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
5519 BFD_ASSERT (howto != NULL);
5520 r_type = howto->type;
5521 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
5522 unresolved_reloc = 0;
5523 }
5524 else
5525 r = bfd_reloc_continue;
5526
5527 /* There may be multiple consecutive relocations for the
5528 same offset. In that case we are supposed to treat the
5529 output of each relocation as the addend for the next. */
5530 if (rel + 1 < relend
5531 && rel->r_offset == rel[1].r_offset
5532 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
5533 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
5534 save_addend = TRUE;
5535 else
5536 save_addend = FALSE;
5537
5538 if (r == bfd_reloc_continue)
5539 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
5540 input_section, contents, rel,
5541 relocation, info, sec,
5542 h, &unresolved_reloc,
5543 save_addend, &addend, sym);
5544
5545 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5546 {
5547 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5548 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5549 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5550 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5551 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5552 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5553 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5554 {
5555 bfd_boolean need_relocs = FALSE;
5556 bfd_byte *loc;
5557 int indx;
5558 bfd_vma off;
5559
5560 off = symbol_got_offset (input_bfd, h, r_symndx);
5561 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5562
5563 need_relocs =
5564 (info->shared || indx != 0) &&
5565 (h == NULL
5566 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5567 || h->root.type != bfd_link_hash_undefweak);
5568
5569 BFD_ASSERT (globals->root.srelgot != NULL);
5570
5571 if (need_relocs)
5572 {
5573 Elf_Internal_Rela rela;
5574 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
5575 rela.r_addend = 0;
5576 rela.r_offset = globals->root.sgot->output_section->vma +
5577 globals->root.sgot->output_offset + off;
5578
5579
5580 loc = globals->root.srelgot->contents;
5581 loc += globals->root.srelgot->reloc_count++
5582 * RELOC_SIZE (htab);
5583 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5584
5585 bfd_reloc_code_real_type real_type =
5586 elfNN_aarch64_bfd_reloc_from_type (r_type);
5587
5588 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21
5589 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21
5590 || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC)
5591 {
5592 /* For local dynamic, don't generate DTPREL in any case.
5593 Initialize the DTPREL slot into zero, so we get module
5594 base address when invoke runtime TLS resolver. */
5595 bfd_put_NN (output_bfd, 0,
5596 globals->root.sgot->contents + off
5597 + GOT_ENTRY_SIZE);
5598 }
5599 else if (indx == 0)
5600 {
5601 bfd_put_NN (output_bfd,
5602 relocation - dtpoff_base (info),
5603 globals->root.sgot->contents + off
5604 + GOT_ENTRY_SIZE);
5605 }
5606 else
5607 {
5608 /* This TLS symbol is global. We emit a
5609 relocation to fixup the tls offset at load
5610 time. */
5611 rela.r_info =
5612 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
5613 rela.r_addend = 0;
5614 rela.r_offset =
5615 (globals->root.sgot->output_section->vma
5616 + globals->root.sgot->output_offset + off
5617 + GOT_ENTRY_SIZE);
5618
5619 loc = globals->root.srelgot->contents;
5620 loc += globals->root.srelgot->reloc_count++
5621 * RELOC_SIZE (globals);
5622 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5623 bfd_put_NN (output_bfd, (bfd_vma) 0,
5624 globals->root.sgot->contents + off
5625 + GOT_ENTRY_SIZE);
5626 }
5627 }
5628 else
5629 {
5630 bfd_put_NN (output_bfd, (bfd_vma) 1,
5631 globals->root.sgot->contents + off);
5632 bfd_put_NN (output_bfd,
5633 relocation - dtpoff_base (info),
5634 globals->root.sgot->contents + off
5635 + GOT_ENTRY_SIZE);
5636 }
5637
5638 symbol_got_offset_mark (input_bfd, h, r_symndx);
5639 }
5640 break;
5641
5642 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5643 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5644 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5645 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5646 {
5647 bfd_boolean need_relocs = FALSE;
5648 bfd_byte *loc;
5649 int indx;
5650 bfd_vma off;
5651
5652 off = symbol_got_offset (input_bfd, h, r_symndx);
5653
5654 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5655
5656 need_relocs =
5657 (info->shared || indx != 0) &&
5658 (h == NULL
5659 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5660 || h->root.type != bfd_link_hash_undefweak);
5661
5662 BFD_ASSERT (globals->root.srelgot != NULL);
5663
5664 if (need_relocs)
5665 {
5666 Elf_Internal_Rela rela;
5667
5668 if (indx == 0)
5669 rela.r_addend = relocation - dtpoff_base (info);
5670 else
5671 rela.r_addend = 0;
5672
5673 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5674 rela.r_offset = globals->root.sgot->output_section->vma +
5675 globals->root.sgot->output_offset + off;
5676
5677 loc = globals->root.srelgot->contents;
5678 loc += globals->root.srelgot->reloc_count++
5679 * RELOC_SIZE (htab);
5680
5681 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5682
5683 bfd_put_NN (output_bfd, rela.r_addend,
5684 globals->root.sgot->contents + off);
5685 }
5686 else
5687 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5688 globals->root.sgot->contents + off);
5689
5690 symbol_got_offset_mark (input_bfd, h, r_symndx);
5691 }
5692 break;
5693
5694 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5695 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5696 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5697 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5698 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5699 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5700 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5701 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5702 break;
5703
5704 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5705 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5706 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5707 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5708 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5709 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5710 {
5711 bfd_boolean need_relocs = FALSE;
5712 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5713 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5714
5715 need_relocs = (h == NULL
5716 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5717 || h->root.type != bfd_link_hash_undefweak);
5718
5719 BFD_ASSERT (globals->root.srelgot != NULL);
5720 BFD_ASSERT (globals->root.sgot != NULL);
5721
5722 if (need_relocs)
5723 {
5724 bfd_byte *loc;
5725 Elf_Internal_Rela rela;
5726 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5727
5728 rela.r_addend = 0;
5729 rela.r_offset = (globals->root.sgotplt->output_section->vma
5730 + globals->root.sgotplt->output_offset
5731 + off + globals->sgotplt_jump_table_size);
5732
5733 if (indx == 0)
5734 rela.r_addend = relocation - dtpoff_base (info);
5735
5736 /* Allocate the next available slot in the PLT reloc
5737 section to hold our R_AARCH64_TLSDESC, the next
5738 available slot is determined from reloc_count,
5739 which we step. But note, reloc_count was
5740 artifically moved down while allocating slots for
5741 real PLT relocs such that all of the PLT relocs
5742 will fit above the initial reloc_count and the
5743 extra stuff will fit below. */
5744 loc = globals->root.srelplt->contents;
5745 loc += globals->root.srelplt->reloc_count++
5746 * RELOC_SIZE (globals);
5747
5748 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5749
5750 bfd_put_NN (output_bfd, (bfd_vma) 0,
5751 globals->root.sgotplt->contents + off +
5752 globals->sgotplt_jump_table_size);
5753 bfd_put_NN (output_bfd, (bfd_vma) 0,
5754 globals->root.sgotplt->contents + off +
5755 globals->sgotplt_jump_table_size +
5756 GOT_ENTRY_SIZE);
5757 }
5758
5759 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5760 }
5761 break;
5762 default:
5763 break;
5764 }
5765
5766 if (!save_addend)
5767 addend = 0;
5768
5769
5770 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5771 because such sections are not SEC_ALLOC and thus ld.so will
5772 not process them. */
5773 if (unresolved_reloc
5774 && !((input_section->flags & SEC_DEBUGGING) != 0
5775 && h->def_dynamic)
5776 && _bfd_elf_section_offset (output_bfd, info, input_section,
5777 +rel->r_offset) != (bfd_vma) - 1)
5778 {
5779 (*_bfd_error_handler)
5780 (_
5781 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5782 input_bfd, input_section, (long) rel->r_offset, howto->name,
5783 h->root.root.string);
5784 return FALSE;
5785 }
5786
5787 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5788 {
5789 switch (r)
5790 {
5791 case bfd_reloc_overflow:
5792 if (!(*info->callbacks->reloc_overflow)
5793 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
5794 input_bfd, input_section, rel->r_offset))
5795 return FALSE;
5796 break;
5797
5798 case bfd_reloc_undefined:
5799 if (!((*info->callbacks->undefined_symbol)
5800 (info, name, input_bfd, input_section,
5801 rel->r_offset, TRUE)))
5802 return FALSE;
5803 break;
5804
5805 case bfd_reloc_outofrange:
5806 error_message = _("out of range");
5807 goto common_error;
5808
5809 case bfd_reloc_notsupported:
5810 error_message = _("unsupported relocation");
5811 goto common_error;
5812
5813 case bfd_reloc_dangerous:
5814 /* error_message should already be set. */
5815 goto common_error;
5816
5817 default:
5818 error_message = _("unknown error");
5819 /* Fall through. */
5820
5821 common_error:
5822 BFD_ASSERT (error_message != NULL);
5823 if (!((*info->callbacks->reloc_dangerous)
5824 (info, error_message, input_bfd, input_section,
5825 rel->r_offset)))
5826 return FALSE;
5827 break;
5828 }
5829 }
5830 }
5831
5832 return TRUE;
5833 }
5834
5835 /* Set the right machine number. */
5836
5837 static bfd_boolean
5838 elfNN_aarch64_object_p (bfd *abfd)
5839 {
5840 #if ARCH_SIZE == 32
5841 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5842 #else
5843 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5844 #endif
5845 return TRUE;
5846 }
5847
5848 /* Function to keep AArch64 specific flags in the ELF header. */
5849
5850 static bfd_boolean
5851 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5852 {
5853 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5854 {
5855 }
5856 else
5857 {
5858 elf_elfheader (abfd)->e_flags = flags;
5859 elf_flags_init (abfd) = TRUE;
5860 }
5861
5862 return TRUE;
5863 }
5864
5865 /* Merge backend specific data from an object file to the output
5866 object file when linking. */
5867
5868 static bfd_boolean
5869 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5870 {
5871 flagword out_flags;
5872 flagword in_flags;
5873 bfd_boolean flags_compatible = TRUE;
5874 asection *sec;
5875
5876 /* Check if we have the same endianess. */
5877 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5878 return FALSE;
5879
5880 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5881 return TRUE;
5882
5883 /* The input BFD must have had its flags initialised. */
5884 /* The following seems bogus to me -- The flags are initialized in
5885 the assembler but I don't think an elf_flags_init field is
5886 written into the object. */
5887 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5888
5889 in_flags = elf_elfheader (ibfd)->e_flags;
5890 out_flags = elf_elfheader (obfd)->e_flags;
5891
5892 if (!elf_flags_init (obfd))
5893 {
5894 /* If the input is the default architecture and had the default
5895 flags then do not bother setting the flags for the output
5896 architecture, instead allow future merges to do this. If no
5897 future merges ever set these flags then they will retain their
5898 uninitialised values, which surprise surprise, correspond
5899 to the default values. */
5900 if (bfd_get_arch_info (ibfd)->the_default
5901 && elf_elfheader (ibfd)->e_flags == 0)
5902 return TRUE;
5903
5904 elf_flags_init (obfd) = TRUE;
5905 elf_elfheader (obfd)->e_flags = in_flags;
5906
5907 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5908 && bfd_get_arch_info (obfd)->the_default)
5909 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5910 bfd_get_mach (ibfd));
5911
5912 return TRUE;
5913 }
5914
5915 /* Identical flags must be compatible. */
5916 if (in_flags == out_flags)
5917 return TRUE;
5918
5919 /* Check to see if the input BFD actually contains any sections. If
5920 not, its flags may not have been initialised either, but it
5921 cannot actually cause any incompatiblity. Do not short-circuit
5922 dynamic objects; their section list may be emptied by
5923 elf_link_add_object_symbols.
5924
5925 Also check to see if there are no code sections in the input.
5926 In this case there is no need to check for code specific flags.
5927 XXX - do we need to worry about floating-point format compatability
5928 in data sections ? */
5929 if (!(ibfd->flags & DYNAMIC))
5930 {
5931 bfd_boolean null_input_bfd = TRUE;
5932 bfd_boolean only_data_sections = TRUE;
5933
5934 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5935 {
5936 if ((bfd_get_section_flags (ibfd, sec)
5937 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5938 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5939 only_data_sections = FALSE;
5940
5941 null_input_bfd = FALSE;
5942 break;
5943 }
5944
5945 if (null_input_bfd || only_data_sections)
5946 return TRUE;
5947 }
5948
5949 return flags_compatible;
5950 }
5951
5952 /* Display the flags field. */
5953
5954 static bfd_boolean
5955 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5956 {
5957 FILE *file = (FILE *) ptr;
5958 unsigned long flags;
5959
5960 BFD_ASSERT (abfd != NULL && ptr != NULL);
5961
5962 /* Print normal ELF private data. */
5963 _bfd_elf_print_private_bfd_data (abfd, ptr);
5964
5965 flags = elf_elfheader (abfd)->e_flags;
5966 /* Ignore init flag - it may not be set, despite the flags field
5967 containing valid data. */
5968
5969 /* xgettext:c-format */
5970 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5971
5972 if (flags)
5973 fprintf (file, _("<Unrecognised flag bits set>"));
5974
5975 fputc ('\n', file);
5976
5977 return TRUE;
5978 }
5979
5980 /* Update the got entry reference counts for the section being removed. */
5981
5982 static bfd_boolean
5983 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5984 struct bfd_link_info *info,
5985 asection *sec,
5986 const Elf_Internal_Rela * relocs)
5987 {
5988 struct elf_aarch64_link_hash_table *htab;
5989 Elf_Internal_Shdr *symtab_hdr;
5990 struct elf_link_hash_entry **sym_hashes;
5991 struct elf_aarch64_local_symbol *locals;
5992 const Elf_Internal_Rela *rel, *relend;
5993
5994 if (info->relocatable)
5995 return TRUE;
5996
5997 htab = elf_aarch64_hash_table (info);
5998
5999 if (htab == NULL)
6000 return FALSE;
6001
6002 elf_section_data (sec)->local_dynrel = NULL;
6003
6004 symtab_hdr = &elf_symtab_hdr (abfd);
6005 sym_hashes = elf_sym_hashes (abfd);
6006
6007 locals = elf_aarch64_locals (abfd);
6008
6009 relend = relocs + sec->reloc_count;
6010 for (rel = relocs; rel < relend; rel++)
6011 {
6012 unsigned long r_symndx;
6013 unsigned int r_type;
6014 struct elf_link_hash_entry *h = NULL;
6015
6016 r_symndx = ELFNN_R_SYM (rel->r_info);
6017
6018 if (r_symndx >= symtab_hdr->sh_info)
6019 {
6020
6021 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6022 while (h->root.type == bfd_link_hash_indirect
6023 || h->root.type == bfd_link_hash_warning)
6024 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6025 }
6026 else
6027 {
6028 Elf_Internal_Sym *isym;
6029
6030 /* A local symbol. */
6031 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6032 abfd, r_symndx);
6033
6034 /* Check relocation against local STT_GNU_IFUNC symbol. */
6035 if (isym != NULL
6036 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6037 {
6038 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
6039 if (h == NULL)
6040 abort ();
6041 }
6042 }
6043
6044 if (h)
6045 {
6046 struct elf_aarch64_link_hash_entry *eh;
6047 struct elf_dyn_relocs **pp;
6048 struct elf_dyn_relocs *p;
6049
6050 eh = (struct elf_aarch64_link_hash_entry *) h;
6051
6052 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
6053 if (p->sec == sec)
6054 {
6055 /* Everything must go for SEC. */
6056 *pp = p->next;
6057 break;
6058 }
6059 }
6060
6061 r_type = ELFNN_R_TYPE (rel->r_info);
6062 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
6063 {
6064 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6065 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6066 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6067 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6068 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6069 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6070 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6071 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6072 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6073 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6074 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6075 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6076 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6077 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6078 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6079 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6080 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6081 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6082 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6083 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6084 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6085 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6086 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6087 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6088 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6089 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6090 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6091 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6092 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6093 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6094 if (h != NULL)
6095 {
6096 if (h->got.refcount > 0)
6097 h->got.refcount -= 1;
6098
6099 if (h->type == STT_GNU_IFUNC)
6100 {
6101 if (h->plt.refcount > 0)
6102 h->plt.refcount -= 1;
6103 }
6104 }
6105 else if (locals != NULL)
6106 {
6107 if (locals[r_symndx].got_refcount > 0)
6108 locals[r_symndx].got_refcount -= 1;
6109 }
6110 break;
6111
6112 case BFD_RELOC_AARCH64_CALL26:
6113 case BFD_RELOC_AARCH64_JUMP26:
6114 /* If this is a local symbol then we resolve it
6115 directly without creating a PLT entry. */
6116 if (h == NULL)
6117 continue;
6118
6119 if (h->plt.refcount > 0)
6120 h->plt.refcount -= 1;
6121 break;
6122
6123 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6124 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6125 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6126 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6127 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6128 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6129 case BFD_RELOC_AARCH64_MOVW_G3:
6130 case BFD_RELOC_AARCH64_NN:
6131 if (h != NULL && info->executable)
6132 {
6133 if (h->plt.refcount > 0)
6134 h->plt.refcount -= 1;
6135 }
6136 break;
6137
6138 default:
6139 break;
6140 }
6141 }
6142
6143 return TRUE;
6144 }
6145
6146 /* Adjust a symbol defined by a dynamic object and referenced by a
6147 regular object. The current definition is in some section of the
6148 dynamic object, but we're not including those sections. We have to
6149 change the definition to something the rest of the link can
6150 understand. */
6151
6152 static bfd_boolean
6153 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
6154 struct elf_link_hash_entry *h)
6155 {
6156 struct elf_aarch64_link_hash_table *htab;
6157 asection *s;
6158
6159 /* If this is a function, put it in the procedure linkage table. We
6160 will fill in the contents of the procedure linkage table later,
6161 when we know the address of the .got section. */
6162 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
6163 {
6164 if (h->plt.refcount <= 0
6165 || (h->type != STT_GNU_IFUNC
6166 && (SYMBOL_CALLS_LOCAL (info, h)
6167 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
6168 && h->root.type == bfd_link_hash_undefweak))))
6169 {
6170 /* This case can occur if we saw a CALL26 reloc in
6171 an input file, but the symbol wasn't referred to
6172 by a dynamic object or all references were
6173 garbage collected. In which case we can end up
6174 resolving. */
6175 h->plt.offset = (bfd_vma) - 1;
6176 h->needs_plt = 0;
6177 }
6178
6179 return TRUE;
6180 }
6181 else
6182 /* Otherwise, reset to -1. */
6183 h->plt.offset = (bfd_vma) - 1;
6184
6185
6186 /* If this is a weak symbol, and there is a real definition, the
6187 processor independent code will have arranged for us to see the
6188 real definition first, and we can just use the same value. */
6189 if (h->u.weakdef != NULL)
6190 {
6191 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
6192 || h->u.weakdef->root.type == bfd_link_hash_defweak);
6193 h->root.u.def.section = h->u.weakdef->root.u.def.section;
6194 h->root.u.def.value = h->u.weakdef->root.u.def.value;
6195 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
6196 h->non_got_ref = h->u.weakdef->non_got_ref;
6197 return TRUE;
6198 }
6199
6200 /* If we are creating a shared library, we must presume that the
6201 only references to the symbol are via the global offset table.
6202 For such cases we need not do anything here; the relocations will
6203 be handled correctly by relocate_section. */
6204 if (info->shared)
6205 return TRUE;
6206
6207 /* If there are no references to this symbol that do not use the
6208 GOT, we don't need to generate a copy reloc. */
6209 if (!h->non_got_ref)
6210 return TRUE;
6211
6212 /* If -z nocopyreloc was given, we won't generate them either. */
6213 if (info->nocopyreloc)
6214 {
6215 h->non_got_ref = 0;
6216 return TRUE;
6217 }
6218
6219 /* We must allocate the symbol in our .dynbss section, which will
6220 become part of the .bss section of the executable. There will be
6221 an entry for this symbol in the .dynsym section. The dynamic
6222 object will contain position independent code, so all references
6223 from the dynamic object to this symbol will go through the global
6224 offset table. The dynamic linker will use the .dynsym entry to
6225 determine the address it must put in the global offset table, so
6226 both the dynamic object and the regular object will refer to the
6227 same memory location for the variable. */
6228
6229 htab = elf_aarch64_hash_table (info);
6230
6231 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
6232 to copy the initial value out of the dynamic object and into the
6233 runtime process image. */
6234 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
6235 {
6236 htab->srelbss->size += RELOC_SIZE (htab);
6237 h->needs_copy = 1;
6238 }
6239
6240 s = htab->sdynbss;
6241
6242 return _bfd_elf_adjust_dynamic_copy (info, h, s);
6243
6244 }
6245
6246 static bfd_boolean
6247 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
6248 {
6249 struct elf_aarch64_local_symbol *locals;
6250 locals = elf_aarch64_locals (abfd);
6251 if (locals == NULL)
6252 {
6253 locals = (struct elf_aarch64_local_symbol *)
6254 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
6255 if (locals == NULL)
6256 return FALSE;
6257 elf_aarch64_locals (abfd) = locals;
6258 }
6259 return TRUE;
6260 }
6261
6262 /* Create the .got section to hold the global offset table. */
6263
6264 static bfd_boolean
6265 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
6266 {
6267 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6268 flagword flags;
6269 asection *s;
6270 struct elf_link_hash_entry *h;
6271 struct elf_link_hash_table *htab = elf_hash_table (info);
6272
6273 /* This function may be called more than once. */
6274 s = bfd_get_linker_section (abfd, ".got");
6275 if (s != NULL)
6276 return TRUE;
6277
6278 flags = bed->dynamic_sec_flags;
6279
6280 s = bfd_make_section_anyway_with_flags (abfd,
6281 (bed->rela_plts_and_copies_p
6282 ? ".rela.got" : ".rel.got"),
6283 (bed->dynamic_sec_flags
6284 | SEC_READONLY));
6285 if (s == NULL
6286 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6287 return FALSE;
6288 htab->srelgot = s;
6289
6290 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
6291 if (s == NULL
6292 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6293 return FALSE;
6294 htab->sgot = s;
6295 htab->sgot->size += GOT_ENTRY_SIZE;
6296
6297 if (bed->want_got_sym)
6298 {
6299 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
6300 (or .got.plt) section. We don't do this in the linker script
6301 because we don't want to define the symbol if we are not creating
6302 a global offset table. */
6303 h = _bfd_elf_define_linkage_sym (abfd, info, s,
6304 "_GLOBAL_OFFSET_TABLE_");
6305 elf_hash_table (info)->hgot = h;
6306 if (h == NULL)
6307 return FALSE;
6308 }
6309
6310 if (bed->want_got_plt)
6311 {
6312 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
6313 if (s == NULL
6314 || !bfd_set_section_alignment (abfd, s,
6315 bed->s->log_file_align))
6316 return FALSE;
6317 htab->sgotplt = s;
6318 }
6319
6320 /* The first bit of the global offset table is the header. */
6321 s->size += bed->got_header_size;
6322
6323 return TRUE;
6324 }
6325
6326 /* Look through the relocs for a section during the first phase. */
6327
6328 static bfd_boolean
6329 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
6330 asection *sec, const Elf_Internal_Rela *relocs)
6331 {
6332 Elf_Internal_Shdr *symtab_hdr;
6333 struct elf_link_hash_entry **sym_hashes;
6334 const Elf_Internal_Rela *rel;
6335 const Elf_Internal_Rela *rel_end;
6336 asection *sreloc;
6337
6338 struct elf_aarch64_link_hash_table *htab;
6339
6340 if (info->relocatable)
6341 return TRUE;
6342
6343 BFD_ASSERT (is_aarch64_elf (abfd));
6344
6345 htab = elf_aarch64_hash_table (info);
6346 sreloc = NULL;
6347
6348 symtab_hdr = &elf_symtab_hdr (abfd);
6349 sym_hashes = elf_sym_hashes (abfd);
6350
6351 rel_end = relocs + sec->reloc_count;
6352 for (rel = relocs; rel < rel_end; rel++)
6353 {
6354 struct elf_link_hash_entry *h;
6355 unsigned long r_symndx;
6356 unsigned int r_type;
6357 bfd_reloc_code_real_type bfd_r_type;
6358 Elf_Internal_Sym *isym;
6359
6360 r_symndx = ELFNN_R_SYM (rel->r_info);
6361 r_type = ELFNN_R_TYPE (rel->r_info);
6362
6363 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
6364 {
6365 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
6366 r_symndx);
6367 return FALSE;
6368 }
6369
6370 if (r_symndx < symtab_hdr->sh_info)
6371 {
6372 /* A local symbol. */
6373 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6374 abfd, r_symndx);
6375 if (isym == NULL)
6376 return FALSE;
6377
6378 /* Check relocation against local STT_GNU_IFUNC symbol. */
6379 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6380 {
6381 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
6382 TRUE);
6383 if (h == NULL)
6384 return FALSE;
6385
6386 /* Fake a STT_GNU_IFUNC symbol. */
6387 h->type = STT_GNU_IFUNC;
6388 h->def_regular = 1;
6389 h->ref_regular = 1;
6390 h->forced_local = 1;
6391 h->root.type = bfd_link_hash_defined;
6392 }
6393 else
6394 h = NULL;
6395 }
6396 else
6397 {
6398 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6399 while (h->root.type == bfd_link_hash_indirect
6400 || h->root.type == bfd_link_hash_warning)
6401 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6402
6403 /* PR15323, ref flags aren't set for references in the same
6404 object. */
6405 h->root.non_ir_ref = 1;
6406 }
6407
6408 /* Could be done earlier, if h were already available. */
6409 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
6410
6411 if (h != NULL)
6412 {
6413 /* Create the ifunc sections for static executables. If we
6414 never see an indirect function symbol nor we are building
6415 a static executable, those sections will be empty and
6416 won't appear in output. */
6417 switch (bfd_r_type)
6418 {
6419 default:
6420 break;
6421
6422 case BFD_RELOC_AARCH64_ADD_LO12:
6423 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6424 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6425 case BFD_RELOC_AARCH64_CALL26:
6426 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6427 case BFD_RELOC_AARCH64_JUMP26:
6428 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6429 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6430 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6431 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6432 case BFD_RELOC_AARCH64_NN:
6433 if (htab->root.dynobj == NULL)
6434 htab->root.dynobj = abfd;
6435 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
6436 return FALSE;
6437 break;
6438 }
6439
6440 /* It is referenced by a non-shared object. */
6441 h->ref_regular = 1;
6442 h->root.non_ir_ref = 1;
6443 }
6444
6445 switch (bfd_r_type)
6446 {
6447 case BFD_RELOC_AARCH64_NN:
6448
6449 /* We don't need to handle relocs into sections not going into
6450 the "real" output. */
6451 if ((sec->flags & SEC_ALLOC) == 0)
6452 break;
6453
6454 if (h != NULL)
6455 {
6456 if (!info->shared)
6457 h->non_got_ref = 1;
6458
6459 h->plt.refcount += 1;
6460 h->pointer_equality_needed = 1;
6461 }
6462
6463 /* No need to do anything if we're not creating a shared
6464 object. */
6465 if (! info->shared)
6466 break;
6467
6468 {
6469 struct elf_dyn_relocs *p;
6470 struct elf_dyn_relocs **head;
6471
6472 /* We must copy these reloc types into the output file.
6473 Create a reloc section in dynobj and make room for
6474 this reloc. */
6475 if (sreloc == NULL)
6476 {
6477 if (htab->root.dynobj == NULL)
6478 htab->root.dynobj = abfd;
6479
6480 sreloc = _bfd_elf_make_dynamic_reloc_section
6481 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
6482
6483 if (sreloc == NULL)
6484 return FALSE;
6485 }
6486
6487 /* If this is a global symbol, we count the number of
6488 relocations we need for this symbol. */
6489 if (h != NULL)
6490 {
6491 struct elf_aarch64_link_hash_entry *eh;
6492 eh = (struct elf_aarch64_link_hash_entry *) h;
6493 head = &eh->dyn_relocs;
6494 }
6495 else
6496 {
6497 /* Track dynamic relocs needed for local syms too.
6498 We really need local syms available to do this
6499 easily. Oh well. */
6500
6501 asection *s;
6502 void **vpp;
6503
6504 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6505 abfd, r_symndx);
6506 if (isym == NULL)
6507 return FALSE;
6508
6509 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
6510 if (s == NULL)
6511 s = sec;
6512
6513 /* Beware of type punned pointers vs strict aliasing
6514 rules. */
6515 vpp = &(elf_section_data (s)->local_dynrel);
6516 head = (struct elf_dyn_relocs **) vpp;
6517 }
6518
6519 p = *head;
6520 if (p == NULL || p->sec != sec)
6521 {
6522 bfd_size_type amt = sizeof *p;
6523 p = ((struct elf_dyn_relocs *)
6524 bfd_zalloc (htab->root.dynobj, amt));
6525 if (p == NULL)
6526 return FALSE;
6527 p->next = *head;
6528 *head = p;
6529 p->sec = sec;
6530 }
6531
6532 p->count += 1;
6533
6534 }
6535 break;
6536
6537 /* RR: We probably want to keep a consistency check that
6538 there are no dangling GOT_PAGE relocs. */
6539 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6540 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6541 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6542 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6543 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6544 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6545 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6546 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6547 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6548 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6549 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6550 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6551 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6552 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6553 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6554 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6555 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6556 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6557 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6558 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6559 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6560 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6561 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6562 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6563 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6564 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6565 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6566 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6567 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6568 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6569 {
6570 unsigned got_type;
6571 unsigned old_got_type;
6572
6573 got_type = aarch64_reloc_got_type (bfd_r_type);
6574
6575 if (h)
6576 {
6577 h->got.refcount += 1;
6578 old_got_type = elf_aarch64_hash_entry (h)->got_type;
6579 }
6580 else
6581 {
6582 struct elf_aarch64_local_symbol *locals;
6583
6584 if (!elfNN_aarch64_allocate_local_symbols
6585 (abfd, symtab_hdr->sh_info))
6586 return FALSE;
6587
6588 locals = elf_aarch64_locals (abfd);
6589 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6590 locals[r_symndx].got_refcount += 1;
6591 old_got_type = locals[r_symndx].got_type;
6592 }
6593
6594 /* If a variable is accessed with both general dynamic TLS
6595 methods, two slots may be created. */
6596 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
6597 got_type |= old_got_type;
6598
6599 /* We will already have issued an error message if there
6600 is a TLS/non-TLS mismatch, based on the symbol type.
6601 So just combine any TLS types needed. */
6602 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
6603 && got_type != GOT_NORMAL)
6604 got_type |= old_got_type;
6605
6606 /* If the symbol is accessed by both IE and GD methods, we
6607 are able to relax. Turn off the GD flag, without
6608 messing up with any other kind of TLS types that may be
6609 involved. */
6610 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
6611 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
6612
6613 if (old_got_type != got_type)
6614 {
6615 if (h != NULL)
6616 elf_aarch64_hash_entry (h)->got_type = got_type;
6617 else
6618 {
6619 struct elf_aarch64_local_symbol *locals;
6620 locals = elf_aarch64_locals (abfd);
6621 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6622 locals[r_symndx].got_type = got_type;
6623 }
6624 }
6625
6626 if (htab->root.dynobj == NULL)
6627 htab->root.dynobj = abfd;
6628 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
6629 return FALSE;
6630 break;
6631 }
6632
6633 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6634 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6635 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6636 case BFD_RELOC_AARCH64_MOVW_G3:
6637 if (info->shared)
6638 {
6639 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6640 (*_bfd_error_handler)
6641 (_("%B: relocation %s against `%s' can not be used when making "
6642 "a shared object; recompile with -fPIC"),
6643 abfd, elfNN_aarch64_howto_table[howto_index].name,
6644 (h) ? h->root.root.string : "a local symbol");
6645 bfd_set_error (bfd_error_bad_value);
6646 return FALSE;
6647 }
6648
6649 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6650 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6651 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6652 if (h != NULL && info->executable)
6653 {
6654 /* If this reloc is in a read-only section, we might
6655 need a copy reloc. We can't check reliably at this
6656 stage whether the section is read-only, as input
6657 sections have not yet been mapped to output sections.
6658 Tentatively set the flag for now, and correct in
6659 adjust_dynamic_symbol. */
6660 h->non_got_ref = 1;
6661 h->plt.refcount += 1;
6662 h->pointer_equality_needed = 1;
6663 }
6664 /* FIXME:: RR need to handle these in shared libraries
6665 and essentially bomb out as these being non-PIC
6666 relocations in shared libraries. */
6667 break;
6668
6669 case BFD_RELOC_AARCH64_CALL26:
6670 case BFD_RELOC_AARCH64_JUMP26:
6671 /* If this is a local symbol then we resolve it
6672 directly without creating a PLT entry. */
6673 if (h == NULL)
6674 continue;
6675
6676 h->needs_plt = 1;
6677 if (h->plt.refcount <= 0)
6678 h->plt.refcount = 1;
6679 else
6680 h->plt.refcount += 1;
6681 break;
6682
6683 default:
6684 break;
6685 }
6686 }
6687
6688 return TRUE;
6689 }
6690
6691 /* Treat mapping symbols as special target symbols. */
6692
6693 static bfd_boolean
6694 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6695 asymbol *sym)
6696 {
6697 return bfd_is_aarch64_special_symbol_name (sym->name,
6698 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6699 }
6700
6701 /* This is a copy of elf_find_function () from elf.c except that
6702 AArch64 mapping symbols are ignored when looking for function names. */
6703
6704 static bfd_boolean
6705 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6706 asymbol **symbols,
6707 asection *section,
6708 bfd_vma offset,
6709 const char **filename_ptr,
6710 const char **functionname_ptr)
6711 {
6712 const char *filename = NULL;
6713 asymbol *func = NULL;
6714 bfd_vma low_func = 0;
6715 asymbol **p;
6716
6717 for (p = symbols; *p != NULL; p++)
6718 {
6719 elf_symbol_type *q;
6720
6721 q = (elf_symbol_type *) * p;
6722
6723 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6724 {
6725 default:
6726 break;
6727 case STT_FILE:
6728 filename = bfd_asymbol_name (&q->symbol);
6729 break;
6730 case STT_FUNC:
6731 case STT_NOTYPE:
6732 /* Skip mapping symbols. */
6733 if ((q->symbol.flags & BSF_LOCAL)
6734 && (bfd_is_aarch64_special_symbol_name
6735 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6736 continue;
6737 /* Fall through. */
6738 if (bfd_get_section (&q->symbol) == section
6739 && q->symbol.value >= low_func && q->symbol.value <= offset)
6740 {
6741 func = (asymbol *) q;
6742 low_func = q->symbol.value;
6743 }
6744 break;
6745 }
6746 }
6747
6748 if (func == NULL)
6749 return FALSE;
6750
6751 if (filename_ptr)
6752 *filename_ptr = filename;
6753 if (functionname_ptr)
6754 *functionname_ptr = bfd_asymbol_name (func);
6755
6756 return TRUE;
6757 }
6758
6759
6760 /* Find the nearest line to a particular section and offset, for error
6761 reporting. This code is a duplicate of the code in elf.c, except
6762 that it uses aarch64_elf_find_function. */
6763
6764 static bfd_boolean
6765 elfNN_aarch64_find_nearest_line (bfd *abfd,
6766 asymbol **symbols,
6767 asection *section,
6768 bfd_vma offset,
6769 const char **filename_ptr,
6770 const char **functionname_ptr,
6771 unsigned int *line_ptr,
6772 unsigned int *discriminator_ptr)
6773 {
6774 bfd_boolean found = FALSE;
6775
6776 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6777 filename_ptr, functionname_ptr,
6778 line_ptr, discriminator_ptr,
6779 dwarf_debug_sections, 0,
6780 &elf_tdata (abfd)->dwarf2_find_line_info))
6781 {
6782 if (!*functionname_ptr)
6783 aarch64_elf_find_function (abfd, symbols, section, offset,
6784 *filename_ptr ? NULL : filename_ptr,
6785 functionname_ptr);
6786
6787 return TRUE;
6788 }
6789
6790 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6791 toolchain uses DWARF1. */
6792
6793 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6794 &found, filename_ptr,
6795 functionname_ptr, line_ptr,
6796 &elf_tdata (abfd)->line_info))
6797 return FALSE;
6798
6799 if (found && (*functionname_ptr || *line_ptr))
6800 return TRUE;
6801
6802 if (symbols == NULL)
6803 return FALSE;
6804
6805 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6806 filename_ptr, functionname_ptr))
6807 return FALSE;
6808
6809 *line_ptr = 0;
6810 return TRUE;
6811 }
6812
6813 static bfd_boolean
6814 elfNN_aarch64_find_inliner_info (bfd *abfd,
6815 const char **filename_ptr,
6816 const char **functionname_ptr,
6817 unsigned int *line_ptr)
6818 {
6819 bfd_boolean found;
6820 found = _bfd_dwarf2_find_inliner_info
6821 (abfd, filename_ptr,
6822 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6823 return found;
6824 }
6825
6826
6827 static void
6828 elfNN_aarch64_post_process_headers (bfd *abfd,
6829 struct bfd_link_info *link_info)
6830 {
6831 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6832
6833 i_ehdrp = elf_elfheader (abfd);
6834 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6835
6836 _bfd_elf_post_process_headers (abfd, link_info);
6837 }
6838
6839 static enum elf_reloc_type_class
6840 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6841 const asection *rel_sec ATTRIBUTE_UNUSED,
6842 const Elf_Internal_Rela *rela)
6843 {
6844 switch ((int) ELFNN_R_TYPE (rela->r_info))
6845 {
6846 case AARCH64_R (RELATIVE):
6847 return reloc_class_relative;
6848 case AARCH64_R (JUMP_SLOT):
6849 return reloc_class_plt;
6850 case AARCH64_R (COPY):
6851 return reloc_class_copy;
6852 default:
6853 return reloc_class_normal;
6854 }
6855 }
6856
6857 /* Handle an AArch64 specific section when reading an object file. This is
6858 called when bfd_section_from_shdr finds a section with an unknown
6859 type. */
6860
6861 static bfd_boolean
6862 elfNN_aarch64_section_from_shdr (bfd *abfd,
6863 Elf_Internal_Shdr *hdr,
6864 const char *name, int shindex)
6865 {
6866 /* There ought to be a place to keep ELF backend specific flags, but
6867 at the moment there isn't one. We just keep track of the
6868 sections by their name, instead. Fortunately, the ABI gives
6869 names for all the AArch64 specific sections, so we will probably get
6870 away with this. */
6871 switch (hdr->sh_type)
6872 {
6873 case SHT_AARCH64_ATTRIBUTES:
6874 break;
6875
6876 default:
6877 return FALSE;
6878 }
6879
6880 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6881 return FALSE;
6882
6883 return TRUE;
6884 }
6885
6886 /* A structure used to record a list of sections, independently
6887 of the next and prev fields in the asection structure. */
6888 typedef struct section_list
6889 {
6890 asection *sec;
6891 struct section_list *next;
6892 struct section_list *prev;
6893 }
6894 section_list;
6895
6896 /* Unfortunately we need to keep a list of sections for which
6897 an _aarch64_elf_section_data structure has been allocated. This
6898 is because it is possible for functions like elfNN_aarch64_write_section
6899 to be called on a section which has had an elf_data_structure
6900 allocated for it (and so the used_by_bfd field is valid) but
6901 for which the AArch64 extended version of this structure - the
6902 _aarch64_elf_section_data structure - has not been allocated. */
6903 static section_list *sections_with_aarch64_elf_section_data = NULL;
6904
6905 static void
6906 record_section_with_aarch64_elf_section_data (asection *sec)
6907 {
6908 struct section_list *entry;
6909
6910 entry = bfd_malloc (sizeof (*entry));
6911 if (entry == NULL)
6912 return;
6913 entry->sec = sec;
6914 entry->next = sections_with_aarch64_elf_section_data;
6915 entry->prev = NULL;
6916 if (entry->next != NULL)
6917 entry->next->prev = entry;
6918 sections_with_aarch64_elf_section_data = entry;
6919 }
6920
6921 static struct section_list *
6922 find_aarch64_elf_section_entry (asection *sec)
6923 {
6924 struct section_list *entry;
6925 static struct section_list *last_entry = NULL;
6926
6927 /* This is a short cut for the typical case where the sections are added
6928 to the sections_with_aarch64_elf_section_data list in forward order and
6929 then looked up here in backwards order. This makes a real difference
6930 to the ld-srec/sec64k.exp linker test. */
6931 entry = sections_with_aarch64_elf_section_data;
6932 if (last_entry != NULL)
6933 {
6934 if (last_entry->sec == sec)
6935 entry = last_entry;
6936 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6937 entry = last_entry->next;
6938 }
6939
6940 for (; entry; entry = entry->next)
6941 if (entry->sec == sec)
6942 break;
6943
6944 if (entry)
6945 /* Record the entry prior to this one - it is the entry we are
6946 most likely to want to locate next time. Also this way if we
6947 have been called from
6948 unrecord_section_with_aarch64_elf_section_data () we will not
6949 be caching a pointer that is about to be freed. */
6950 last_entry = entry->prev;
6951
6952 return entry;
6953 }
6954
6955 static void
6956 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6957 {
6958 struct section_list *entry;
6959
6960 entry = find_aarch64_elf_section_entry (sec);
6961
6962 if (entry)
6963 {
6964 if (entry->prev != NULL)
6965 entry->prev->next = entry->next;
6966 if (entry->next != NULL)
6967 entry->next->prev = entry->prev;
6968 if (entry == sections_with_aarch64_elf_section_data)
6969 sections_with_aarch64_elf_section_data = entry->next;
6970 free (entry);
6971 }
6972 }
6973
6974
6975 typedef struct
6976 {
6977 void *finfo;
6978 struct bfd_link_info *info;
6979 asection *sec;
6980 int sec_shndx;
6981 int (*func) (void *, const char *, Elf_Internal_Sym *,
6982 asection *, struct elf_link_hash_entry *);
6983 } output_arch_syminfo;
6984
6985 enum map_symbol_type
6986 {
6987 AARCH64_MAP_INSN,
6988 AARCH64_MAP_DATA
6989 };
6990
6991
6992 /* Output a single mapping symbol. */
6993
6994 static bfd_boolean
6995 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6996 enum map_symbol_type type, bfd_vma offset)
6997 {
6998 static const char *names[2] = { "$x", "$d" };
6999 Elf_Internal_Sym sym;
7000
7001 sym.st_value = (osi->sec->output_section->vma
7002 + osi->sec->output_offset + offset);
7003 sym.st_size = 0;
7004 sym.st_other = 0;
7005 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7006 sym.st_shndx = osi->sec_shndx;
7007 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
7008 }
7009
7010
7011
7012 /* Output mapping symbols for PLT entries associated with H. */
7013
7014 static bfd_boolean
7015 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
7016 {
7017 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
7018 bfd_vma addr;
7019
7020 if (h->root.type == bfd_link_hash_indirect)
7021 return TRUE;
7022
7023 if (h->root.type == bfd_link_hash_warning)
7024 /* When warning symbols are created, they **replace** the "real"
7025 entry in the hash table, thus we never get to see the real
7026 symbol in a hash traversal. So look at it now. */
7027 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7028
7029 if (h->plt.offset == (bfd_vma) - 1)
7030 return TRUE;
7031
7032 addr = h->plt.offset;
7033 if (addr == 32)
7034 {
7035 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7036 return FALSE;
7037 }
7038 return TRUE;
7039 }
7040
7041
7042 /* Output a single local symbol for a generated stub. */
7043
7044 static bfd_boolean
7045 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
7046 bfd_vma offset, bfd_vma size)
7047 {
7048 Elf_Internal_Sym sym;
7049
7050 sym.st_value = (osi->sec->output_section->vma
7051 + osi->sec->output_offset + offset);
7052 sym.st_size = size;
7053 sym.st_other = 0;
7054 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7055 sym.st_shndx = osi->sec_shndx;
7056 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
7057 }
7058
7059 static bfd_boolean
7060 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
7061 {
7062 struct elf_aarch64_stub_hash_entry *stub_entry;
7063 asection *stub_sec;
7064 bfd_vma addr;
7065 char *stub_name;
7066 output_arch_syminfo *osi;
7067
7068 /* Massage our args to the form they really have. */
7069 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
7070 osi = (output_arch_syminfo *) in_arg;
7071
7072 stub_sec = stub_entry->stub_sec;
7073
7074 /* Ensure this stub is attached to the current section being
7075 processed. */
7076 if (stub_sec != osi->sec)
7077 return TRUE;
7078
7079 addr = (bfd_vma) stub_entry->stub_offset;
7080
7081 stub_name = stub_entry->output_name;
7082
7083 switch (stub_entry->stub_type)
7084 {
7085 case aarch64_stub_adrp_branch:
7086 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7087 sizeof (aarch64_adrp_branch_stub)))
7088 return FALSE;
7089 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7090 return FALSE;
7091 break;
7092 case aarch64_stub_long_branch:
7093 if (!elfNN_aarch64_output_stub_sym
7094 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
7095 return FALSE;
7096 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7097 return FALSE;
7098 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
7099 return FALSE;
7100 break;
7101 case aarch64_stub_erratum_835769_veneer:
7102 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7103 sizeof (aarch64_erratum_835769_stub)))
7104 return FALSE;
7105 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7106 return FALSE;
7107 break;
7108 case aarch64_stub_erratum_843419_veneer:
7109 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7110 sizeof (aarch64_erratum_843419_stub)))
7111 return FALSE;
7112 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7113 return FALSE;
7114 break;
7115
7116 default:
7117 abort ();
7118 }
7119
7120 return TRUE;
7121 }
7122
7123 /* Output mapping symbols for linker generated sections. */
7124
7125 static bfd_boolean
7126 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
7127 struct bfd_link_info *info,
7128 void *finfo,
7129 int (*func) (void *, const char *,
7130 Elf_Internal_Sym *,
7131 asection *,
7132 struct elf_link_hash_entry
7133 *))
7134 {
7135 output_arch_syminfo osi;
7136 struct elf_aarch64_link_hash_table *htab;
7137
7138 htab = elf_aarch64_hash_table (info);
7139
7140 osi.finfo = finfo;
7141 osi.info = info;
7142 osi.func = func;
7143
7144 /* Long calls stubs. */
7145 if (htab->stub_bfd && htab->stub_bfd->sections)
7146 {
7147 asection *stub_sec;
7148
7149 for (stub_sec = htab->stub_bfd->sections;
7150 stub_sec != NULL; stub_sec = stub_sec->next)
7151 {
7152 /* Ignore non-stub sections. */
7153 if (!strstr (stub_sec->name, STUB_SUFFIX))
7154 continue;
7155
7156 osi.sec = stub_sec;
7157
7158 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7159 (output_bfd, osi.sec->output_section);
7160
7161 /* The first instruction in a stub is always a branch. */
7162 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
7163 return FALSE;
7164
7165 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
7166 &osi);
7167 }
7168 }
7169
7170 /* Finally, output mapping symbols for the PLT. */
7171 if (!htab->root.splt || htab->root.splt->size == 0)
7172 return TRUE;
7173
7174 /* For now live without mapping symbols for the plt. */
7175 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7176 (output_bfd, htab->root.splt->output_section);
7177 osi.sec = htab->root.splt;
7178
7179 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
7180 (void *) &osi);
7181
7182 return TRUE;
7183
7184 }
7185
7186 /* Allocate target specific section data. */
7187
7188 static bfd_boolean
7189 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
7190 {
7191 if (!sec->used_by_bfd)
7192 {
7193 _aarch64_elf_section_data *sdata;
7194 bfd_size_type amt = sizeof (*sdata);
7195
7196 sdata = bfd_zalloc (abfd, amt);
7197 if (sdata == NULL)
7198 return FALSE;
7199 sec->used_by_bfd = sdata;
7200 }
7201
7202 record_section_with_aarch64_elf_section_data (sec);
7203
7204 return _bfd_elf_new_section_hook (abfd, sec);
7205 }
7206
7207
7208 static void
7209 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
7210 asection *sec,
7211 void *ignore ATTRIBUTE_UNUSED)
7212 {
7213 unrecord_section_with_aarch64_elf_section_data (sec);
7214 }
7215
7216 static bfd_boolean
7217 elfNN_aarch64_close_and_cleanup (bfd *abfd)
7218 {
7219 if (abfd->sections)
7220 bfd_map_over_sections (abfd,
7221 unrecord_section_via_map_over_sections, NULL);
7222
7223 return _bfd_elf_close_and_cleanup (abfd);
7224 }
7225
7226 static bfd_boolean
7227 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
7228 {
7229 if (abfd->sections)
7230 bfd_map_over_sections (abfd,
7231 unrecord_section_via_map_over_sections, NULL);
7232
7233 return _bfd_free_cached_info (abfd);
7234 }
7235
7236 /* Create dynamic sections. This is different from the ARM backend in that
7237 the got, plt, gotplt and their relocation sections are all created in the
7238 standard part of the bfd elf backend. */
7239
7240 static bfd_boolean
7241 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
7242 struct bfd_link_info *info)
7243 {
7244 struct elf_aarch64_link_hash_table *htab;
7245
7246 /* We need to create .got section. */
7247 if (!aarch64_elf_create_got_section (dynobj, info))
7248 return FALSE;
7249
7250 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
7251 return FALSE;
7252
7253 htab = elf_aarch64_hash_table (info);
7254 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
7255 if (!info->shared)
7256 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
7257
7258 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
7259 abort ();
7260
7261 return TRUE;
7262 }
7263
7264
7265 /* Allocate space in .plt, .got and associated reloc sections for
7266 dynamic relocs. */
7267
7268 static bfd_boolean
7269 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
7270 {
7271 struct bfd_link_info *info;
7272 struct elf_aarch64_link_hash_table *htab;
7273 struct elf_aarch64_link_hash_entry *eh;
7274 struct elf_dyn_relocs *p;
7275
7276 /* An example of a bfd_link_hash_indirect symbol is versioned
7277 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7278 -> __gxx_personality_v0(bfd_link_hash_defined)
7279
7280 There is no need to process bfd_link_hash_indirect symbols here
7281 because we will also be presented with the concrete instance of
7282 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7283 called to copy all relevant data from the generic to the concrete
7284 symbol instance.
7285 */
7286 if (h->root.type == bfd_link_hash_indirect)
7287 return TRUE;
7288
7289 if (h->root.type == bfd_link_hash_warning)
7290 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7291
7292 info = (struct bfd_link_info *) inf;
7293 htab = elf_aarch64_hash_table (info);
7294
7295 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7296 here if it is defined and referenced in a non-shared object. */
7297 if (h->type == STT_GNU_IFUNC
7298 && h->def_regular)
7299 return TRUE;
7300 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
7301 {
7302 /* Make sure this symbol is output as a dynamic symbol.
7303 Undefined weak syms won't yet be marked as dynamic. */
7304 if (h->dynindx == -1 && !h->forced_local)
7305 {
7306 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7307 return FALSE;
7308 }
7309
7310 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
7311 {
7312 asection *s = htab->root.splt;
7313
7314 /* If this is the first .plt entry, make room for the special
7315 first entry. */
7316 if (s->size == 0)
7317 s->size += htab->plt_header_size;
7318
7319 h->plt.offset = s->size;
7320
7321 /* If this symbol is not defined in a regular file, and we are
7322 not generating a shared library, then set the symbol to this
7323 location in the .plt. This is required to make function
7324 pointers compare as equal between the normal executable and
7325 the shared library. */
7326 if (!info->shared && !h->def_regular)
7327 {
7328 h->root.u.def.section = s;
7329 h->root.u.def.value = h->plt.offset;
7330 }
7331
7332 /* Make room for this entry. For now we only create the
7333 small model PLT entries. We later need to find a way
7334 of relaxing into these from the large model PLT entries. */
7335 s->size += PLT_SMALL_ENTRY_SIZE;
7336
7337 /* We also need to make an entry in the .got.plt section, which
7338 will be placed in the .got section by the linker script. */
7339 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
7340
7341 /* We also need to make an entry in the .rela.plt section. */
7342 htab->root.srelplt->size += RELOC_SIZE (htab);
7343
7344 /* We need to ensure that all GOT entries that serve the PLT
7345 are consecutive with the special GOT slots [0] [1] and
7346 [2]. Any addtional relocations, such as
7347 R_AARCH64_TLSDESC, must be placed after the PLT related
7348 entries. We abuse the reloc_count such that during
7349 sizing we adjust reloc_count to indicate the number of
7350 PLT related reserved entries. In subsequent phases when
7351 filling in the contents of the reloc entries, PLT related
7352 entries are placed by computing their PLT index (0
7353 .. reloc_count). While other none PLT relocs are placed
7354 at the slot indicated by reloc_count and reloc_count is
7355 updated. */
7356
7357 htab->root.srelplt->reloc_count++;
7358 }
7359 else
7360 {
7361 h->plt.offset = (bfd_vma) - 1;
7362 h->needs_plt = 0;
7363 }
7364 }
7365 else
7366 {
7367 h->plt.offset = (bfd_vma) - 1;
7368 h->needs_plt = 0;
7369 }
7370
7371 eh = (struct elf_aarch64_link_hash_entry *) h;
7372 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7373
7374 if (h->got.refcount > 0)
7375 {
7376 bfd_boolean dyn;
7377 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
7378
7379 h->got.offset = (bfd_vma) - 1;
7380
7381 dyn = htab->root.dynamic_sections_created;
7382
7383 /* Make sure this symbol is output as a dynamic symbol.
7384 Undefined weak syms won't yet be marked as dynamic. */
7385 if (dyn && h->dynindx == -1 && !h->forced_local)
7386 {
7387 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7388 return FALSE;
7389 }
7390
7391 if (got_type == GOT_UNKNOWN)
7392 {
7393 }
7394 else if (got_type == GOT_NORMAL)
7395 {
7396 h->got.offset = htab->root.sgot->size;
7397 htab->root.sgot->size += GOT_ENTRY_SIZE;
7398 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7399 || h->root.type != bfd_link_hash_undefweak)
7400 && (info->shared
7401 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7402 {
7403 htab->root.srelgot->size += RELOC_SIZE (htab);
7404 }
7405 }
7406 else
7407 {
7408 int indx;
7409 if (got_type & GOT_TLSDESC_GD)
7410 {
7411 eh->tlsdesc_got_jump_table_offset =
7412 (htab->root.sgotplt->size
7413 - aarch64_compute_jump_table_size (htab));
7414 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7415 h->got.offset = (bfd_vma) - 2;
7416 }
7417
7418 if (got_type & GOT_TLS_GD)
7419 {
7420 h->got.offset = htab->root.sgot->size;
7421 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7422 }
7423
7424 if (got_type & GOT_TLS_IE)
7425 {
7426 h->got.offset = htab->root.sgot->size;
7427 htab->root.sgot->size += GOT_ENTRY_SIZE;
7428 }
7429
7430 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7431 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7432 || h->root.type != bfd_link_hash_undefweak)
7433 && (info->shared
7434 || indx != 0
7435 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7436 {
7437 if (got_type & GOT_TLSDESC_GD)
7438 {
7439 htab->root.srelplt->size += RELOC_SIZE (htab);
7440 /* Note reloc_count not incremented here! We have
7441 already adjusted reloc_count for this relocation
7442 type. */
7443
7444 /* TLSDESC PLT is now needed, but not yet determined. */
7445 htab->tlsdesc_plt = (bfd_vma) - 1;
7446 }
7447
7448 if (got_type & GOT_TLS_GD)
7449 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7450
7451 if (got_type & GOT_TLS_IE)
7452 htab->root.srelgot->size += RELOC_SIZE (htab);
7453 }
7454 }
7455 }
7456 else
7457 {
7458 h->got.offset = (bfd_vma) - 1;
7459 }
7460
7461 if (eh->dyn_relocs == NULL)
7462 return TRUE;
7463
7464 /* In the shared -Bsymbolic case, discard space allocated for
7465 dynamic pc-relative relocs against symbols which turn out to be
7466 defined in regular objects. For the normal shared case, discard
7467 space for pc-relative relocs that have become local due to symbol
7468 visibility changes. */
7469
7470 if (info->shared)
7471 {
7472 /* Relocs that use pc_count are those that appear on a call
7473 insn, or certain REL relocs that can generated via assembly.
7474 We want calls to protected symbols to resolve directly to the
7475 function rather than going via the plt. If people want
7476 function pointer comparisons to work as expected then they
7477 should avoid writing weird assembly. */
7478 if (SYMBOL_CALLS_LOCAL (info, h))
7479 {
7480 struct elf_dyn_relocs **pp;
7481
7482 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
7483 {
7484 p->count -= p->pc_count;
7485 p->pc_count = 0;
7486 if (p->count == 0)
7487 *pp = p->next;
7488 else
7489 pp = &p->next;
7490 }
7491 }
7492
7493 /* Also discard relocs on undefined weak syms with non-default
7494 visibility. */
7495 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
7496 {
7497 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7498 eh->dyn_relocs = NULL;
7499
7500 /* Make sure undefined weak symbols are output as a dynamic
7501 symbol in PIEs. */
7502 else if (h->dynindx == -1
7503 && !h->forced_local
7504 && !bfd_elf_link_record_dynamic_symbol (info, h))
7505 return FALSE;
7506 }
7507
7508 }
7509 else if (ELIMINATE_COPY_RELOCS)
7510 {
7511 /* For the non-shared case, discard space for relocs against
7512 symbols which turn out to need copy relocs or are not
7513 dynamic. */
7514
7515 if (!h->non_got_ref
7516 && ((h->def_dynamic
7517 && !h->def_regular)
7518 || (htab->root.dynamic_sections_created
7519 && (h->root.type == bfd_link_hash_undefweak
7520 || h->root.type == bfd_link_hash_undefined))))
7521 {
7522 /* Make sure this symbol is output as a dynamic symbol.
7523 Undefined weak syms won't yet be marked as dynamic. */
7524 if (h->dynindx == -1
7525 && !h->forced_local
7526 && !bfd_elf_link_record_dynamic_symbol (info, h))
7527 return FALSE;
7528
7529 /* If that succeeded, we know we'll be keeping all the
7530 relocs. */
7531 if (h->dynindx != -1)
7532 goto keep;
7533 }
7534
7535 eh->dyn_relocs = NULL;
7536
7537 keep:;
7538 }
7539
7540 /* Finally, allocate space. */
7541 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7542 {
7543 asection *sreloc;
7544
7545 sreloc = elf_section_data (p->sec)->sreloc;
7546
7547 BFD_ASSERT (sreloc != NULL);
7548
7549 sreloc->size += p->count * RELOC_SIZE (htab);
7550 }
7551
7552 return TRUE;
7553 }
7554
7555 /* Allocate space in .plt, .got and associated reloc sections for
7556 ifunc dynamic relocs. */
7557
7558 static bfd_boolean
7559 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
7560 void *inf)
7561 {
7562 struct bfd_link_info *info;
7563 struct elf_aarch64_link_hash_table *htab;
7564 struct elf_aarch64_link_hash_entry *eh;
7565
7566 /* An example of a bfd_link_hash_indirect symbol is versioned
7567 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7568 -> __gxx_personality_v0(bfd_link_hash_defined)
7569
7570 There is no need to process bfd_link_hash_indirect symbols here
7571 because we will also be presented with the concrete instance of
7572 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7573 called to copy all relevant data from the generic to the concrete
7574 symbol instance.
7575 */
7576 if (h->root.type == bfd_link_hash_indirect)
7577 return TRUE;
7578
7579 if (h->root.type == bfd_link_hash_warning)
7580 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7581
7582 info = (struct bfd_link_info *) inf;
7583 htab = elf_aarch64_hash_table (info);
7584
7585 eh = (struct elf_aarch64_link_hash_entry *) h;
7586
7587 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7588 here if it is defined and referenced in a non-shared object. */
7589 if (h->type == STT_GNU_IFUNC
7590 && h->def_regular)
7591 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
7592 &eh->dyn_relocs,
7593 htab->plt_entry_size,
7594 htab->plt_header_size,
7595 GOT_ENTRY_SIZE);
7596 return TRUE;
7597 }
7598
7599 /* Allocate space in .plt, .got and associated reloc sections for
7600 local dynamic relocs. */
7601
7602 static bfd_boolean
7603 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
7604 {
7605 struct elf_link_hash_entry *h
7606 = (struct elf_link_hash_entry *) *slot;
7607
7608 if (h->type != STT_GNU_IFUNC
7609 || !h->def_regular
7610 || !h->ref_regular
7611 || !h->forced_local
7612 || h->root.type != bfd_link_hash_defined)
7613 abort ();
7614
7615 return elfNN_aarch64_allocate_dynrelocs (h, inf);
7616 }
7617
7618 /* Allocate space in .plt, .got and associated reloc sections for
7619 local ifunc dynamic relocs. */
7620
7621 static bfd_boolean
7622 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
7623 {
7624 struct elf_link_hash_entry *h
7625 = (struct elf_link_hash_entry *) *slot;
7626
7627 if (h->type != STT_GNU_IFUNC
7628 || !h->def_regular
7629 || !h->ref_regular
7630 || !h->forced_local
7631 || h->root.type != bfd_link_hash_defined)
7632 abort ();
7633
7634 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
7635 }
7636
7637 /* Find any dynamic relocs that apply to read-only sections. */
7638
7639 static bfd_boolean
7640 aarch64_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
7641 {
7642 struct elf_aarch64_link_hash_entry * eh;
7643 struct elf_dyn_relocs * p;
7644
7645 eh = (struct elf_aarch64_link_hash_entry *) h;
7646 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7647 {
7648 asection *s = p->sec;
7649
7650 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7651 {
7652 struct bfd_link_info *info = (struct bfd_link_info *) inf;
7653
7654 info->flags |= DF_TEXTREL;
7655
7656 /* Not an error, just cut short the traversal. */
7657 return FALSE;
7658 }
7659 }
7660 return TRUE;
7661 }
7662
7663 /* This is the most important function of all . Innocuosly named
7664 though ! */
7665 static bfd_boolean
7666 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
7667 struct bfd_link_info *info)
7668 {
7669 struct elf_aarch64_link_hash_table *htab;
7670 bfd *dynobj;
7671 asection *s;
7672 bfd_boolean relocs;
7673 bfd *ibfd;
7674
7675 htab = elf_aarch64_hash_table ((info));
7676 dynobj = htab->root.dynobj;
7677
7678 BFD_ASSERT (dynobj != NULL);
7679
7680 if (htab->root.dynamic_sections_created)
7681 {
7682 if (info->executable)
7683 {
7684 s = bfd_get_linker_section (dynobj, ".interp");
7685 if (s == NULL)
7686 abort ();
7687 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7688 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7689 }
7690 }
7691
7692 /* Set up .got offsets for local syms, and space for local dynamic
7693 relocs. */
7694 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7695 {
7696 struct elf_aarch64_local_symbol *locals = NULL;
7697 Elf_Internal_Shdr *symtab_hdr;
7698 asection *srel;
7699 unsigned int i;
7700
7701 if (!is_aarch64_elf (ibfd))
7702 continue;
7703
7704 for (s = ibfd->sections; s != NULL; s = s->next)
7705 {
7706 struct elf_dyn_relocs *p;
7707
7708 for (p = (struct elf_dyn_relocs *)
7709 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7710 {
7711 if (!bfd_is_abs_section (p->sec)
7712 && bfd_is_abs_section (p->sec->output_section))
7713 {
7714 /* Input section has been discarded, either because
7715 it is a copy of a linkonce section or due to
7716 linker script /DISCARD/, so we'll be discarding
7717 the relocs too. */
7718 }
7719 else if (p->count != 0)
7720 {
7721 srel = elf_section_data (p->sec)->sreloc;
7722 srel->size += p->count * RELOC_SIZE (htab);
7723 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7724 info->flags |= DF_TEXTREL;
7725 }
7726 }
7727 }
7728
7729 locals = elf_aarch64_locals (ibfd);
7730 if (!locals)
7731 continue;
7732
7733 symtab_hdr = &elf_symtab_hdr (ibfd);
7734 srel = htab->root.srelgot;
7735 for (i = 0; i < symtab_hdr->sh_info; i++)
7736 {
7737 locals[i].got_offset = (bfd_vma) - 1;
7738 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7739 if (locals[i].got_refcount > 0)
7740 {
7741 unsigned got_type = locals[i].got_type;
7742 if (got_type & GOT_TLSDESC_GD)
7743 {
7744 locals[i].tlsdesc_got_jump_table_offset =
7745 (htab->root.sgotplt->size
7746 - aarch64_compute_jump_table_size (htab));
7747 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7748 locals[i].got_offset = (bfd_vma) - 2;
7749 }
7750
7751 if (got_type & GOT_TLS_GD)
7752 {
7753 locals[i].got_offset = htab->root.sgot->size;
7754 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7755 }
7756
7757 if (got_type & GOT_TLS_IE
7758 || got_type & GOT_NORMAL)
7759 {
7760 locals[i].got_offset = htab->root.sgot->size;
7761 htab->root.sgot->size += GOT_ENTRY_SIZE;
7762 }
7763
7764 if (got_type == GOT_UNKNOWN)
7765 {
7766 }
7767
7768 if (info->shared)
7769 {
7770 if (got_type & GOT_TLSDESC_GD)
7771 {
7772 htab->root.srelplt->size += RELOC_SIZE (htab);
7773 /* Note RELOC_COUNT not incremented here! */
7774 htab->tlsdesc_plt = (bfd_vma) - 1;
7775 }
7776
7777 if (got_type & GOT_TLS_GD)
7778 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7779
7780 if (got_type & GOT_TLS_IE
7781 || got_type & GOT_NORMAL)
7782 htab->root.srelgot->size += RELOC_SIZE (htab);
7783 }
7784 }
7785 else
7786 {
7787 locals[i].got_refcount = (bfd_vma) - 1;
7788 }
7789 }
7790 }
7791
7792
7793 /* Allocate global sym .plt and .got entries, and space for global
7794 sym dynamic relocs. */
7795 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7796 info);
7797
7798 /* Allocate global ifunc sym .plt and .got entries, and space for global
7799 ifunc sym dynamic relocs. */
7800 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7801 info);
7802
7803 /* Allocate .plt and .got entries, and space for local symbols. */
7804 htab_traverse (htab->loc_hash_table,
7805 elfNN_aarch64_allocate_local_dynrelocs,
7806 info);
7807
7808 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7809 htab_traverse (htab->loc_hash_table,
7810 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7811 info);
7812
7813 /* For every jump slot reserved in the sgotplt, reloc_count is
7814 incremented. However, when we reserve space for TLS descriptors,
7815 it's not incremented, so in order to compute the space reserved
7816 for them, it suffices to multiply the reloc count by the jump
7817 slot size. */
7818
7819 if (htab->root.srelplt)
7820 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7821
7822 if (htab->tlsdesc_plt)
7823 {
7824 if (htab->root.splt->size == 0)
7825 htab->root.splt->size += PLT_ENTRY_SIZE;
7826
7827 htab->tlsdesc_plt = htab->root.splt->size;
7828 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7829
7830 /* If we're not using lazy TLS relocations, don't generate the
7831 GOT entry required. */
7832 if (!(info->flags & DF_BIND_NOW))
7833 {
7834 htab->dt_tlsdesc_got = htab->root.sgot->size;
7835 htab->root.sgot->size += GOT_ENTRY_SIZE;
7836 }
7837 }
7838
7839 /* Init mapping symbols information to use later to distingush between
7840 code and data while scanning for errata. */
7841 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
7842 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7843 {
7844 if (!is_aarch64_elf (ibfd))
7845 continue;
7846 bfd_elfNN_aarch64_init_maps (ibfd);
7847 }
7848
7849 /* We now have determined the sizes of the various dynamic sections.
7850 Allocate memory for them. */
7851 relocs = FALSE;
7852 for (s = dynobj->sections; s != NULL; s = s->next)
7853 {
7854 if ((s->flags & SEC_LINKER_CREATED) == 0)
7855 continue;
7856
7857 if (s == htab->root.splt
7858 || s == htab->root.sgot
7859 || s == htab->root.sgotplt
7860 || s == htab->root.iplt
7861 || s == htab->root.igotplt || s == htab->sdynbss)
7862 {
7863 /* Strip this section if we don't need it; see the
7864 comment below. */
7865 }
7866 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7867 {
7868 if (s->size != 0 && s != htab->root.srelplt)
7869 relocs = TRUE;
7870
7871 /* We use the reloc_count field as a counter if we need
7872 to copy relocs into the output file. */
7873 if (s != htab->root.srelplt)
7874 s->reloc_count = 0;
7875 }
7876 else
7877 {
7878 /* It's not one of our sections, so don't allocate space. */
7879 continue;
7880 }
7881
7882 if (s->size == 0)
7883 {
7884 /* If we don't need this section, strip it from the
7885 output file. This is mostly to handle .rela.bss and
7886 .rela.plt. We must create both sections in
7887 create_dynamic_sections, because they must be created
7888 before the linker maps input sections to output
7889 sections. The linker does that before
7890 adjust_dynamic_symbol is called, and it is that
7891 function which decides whether anything needs to go
7892 into these sections. */
7893
7894 s->flags |= SEC_EXCLUDE;
7895 continue;
7896 }
7897
7898 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7899 continue;
7900
7901 /* Allocate memory for the section contents. We use bfd_zalloc
7902 here in case unused entries are not reclaimed before the
7903 section's contents are written out. This should not happen,
7904 but this way if it does, we get a R_AARCH64_NONE reloc instead
7905 of garbage. */
7906 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7907 if (s->contents == NULL)
7908 return FALSE;
7909 }
7910
7911 if (htab->root.dynamic_sections_created)
7912 {
7913 /* Add some entries to the .dynamic section. We fill in the
7914 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7915 must add the entries now so that we get the correct size for
7916 the .dynamic section. The DT_DEBUG entry is filled in by the
7917 dynamic linker and used by the debugger. */
7918 #define add_dynamic_entry(TAG, VAL) \
7919 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7920
7921 if (info->executable)
7922 {
7923 if (!add_dynamic_entry (DT_DEBUG, 0))
7924 return FALSE;
7925 }
7926
7927 if (htab->root.splt->size != 0)
7928 {
7929 if (!add_dynamic_entry (DT_PLTGOT, 0)
7930 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7931 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7932 || !add_dynamic_entry (DT_JMPREL, 0))
7933 return FALSE;
7934
7935 if (htab->tlsdesc_plt
7936 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7937 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7938 return FALSE;
7939 }
7940
7941 if (relocs)
7942 {
7943 if (!add_dynamic_entry (DT_RELA, 0)
7944 || !add_dynamic_entry (DT_RELASZ, 0)
7945 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7946 return FALSE;
7947
7948 /* If any dynamic relocs apply to a read-only section,
7949 then we need a DT_TEXTREL entry. */
7950 if ((info->flags & DF_TEXTREL) == 0)
7951 elf_link_hash_traverse (& htab->root, aarch64_readonly_dynrelocs,
7952 info);
7953
7954 if ((info->flags & DF_TEXTREL) != 0)
7955 {
7956 if (!add_dynamic_entry (DT_TEXTREL, 0))
7957 return FALSE;
7958 }
7959 }
7960 }
7961 #undef add_dynamic_entry
7962
7963 return TRUE;
7964 }
7965
7966 static inline void
7967 elf_aarch64_update_plt_entry (bfd *output_bfd,
7968 bfd_reloc_code_real_type r_type,
7969 bfd_byte *plt_entry, bfd_vma value)
7970 {
7971 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7972
7973 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7974 }
7975
7976 static void
7977 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7978 struct elf_aarch64_link_hash_table
7979 *htab, bfd *output_bfd,
7980 struct bfd_link_info *info)
7981 {
7982 bfd_byte *plt_entry;
7983 bfd_vma plt_index;
7984 bfd_vma got_offset;
7985 bfd_vma gotplt_entry_address;
7986 bfd_vma plt_entry_address;
7987 Elf_Internal_Rela rela;
7988 bfd_byte *loc;
7989 asection *plt, *gotplt, *relplt;
7990
7991 /* When building a static executable, use .iplt, .igot.plt and
7992 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7993 if (htab->root.splt != NULL)
7994 {
7995 plt = htab->root.splt;
7996 gotplt = htab->root.sgotplt;
7997 relplt = htab->root.srelplt;
7998 }
7999 else
8000 {
8001 plt = htab->root.iplt;
8002 gotplt = htab->root.igotplt;
8003 relplt = htab->root.irelplt;
8004 }
8005
8006 /* Get the index in the procedure linkage table which
8007 corresponds to this symbol. This is the index of this symbol
8008 in all the symbols for which we are making plt entries. The
8009 first entry in the procedure linkage table is reserved.
8010
8011 Get the offset into the .got table of the entry that
8012 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
8013 bytes. The first three are reserved for the dynamic linker.
8014
8015 For static executables, we don't reserve anything. */
8016
8017 if (plt == htab->root.splt)
8018 {
8019 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
8020 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
8021 }
8022 else
8023 {
8024 plt_index = h->plt.offset / htab->plt_entry_size;
8025 got_offset = plt_index * GOT_ENTRY_SIZE;
8026 }
8027
8028 plt_entry = plt->contents + h->plt.offset;
8029 plt_entry_address = plt->output_section->vma
8030 + plt->output_offset + h->plt.offset;
8031 gotplt_entry_address = gotplt->output_section->vma +
8032 gotplt->output_offset + got_offset;
8033
8034 /* Copy in the boiler-plate for the PLTn entry. */
8035 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
8036
8037 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8038 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8039 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8040 plt_entry,
8041 PG (gotplt_entry_address) -
8042 PG (plt_entry_address));
8043
8044 /* Fill in the lo12 bits for the load from the pltgot. */
8045 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8046 plt_entry + 4,
8047 PG_OFFSET (gotplt_entry_address));
8048
8049 /* Fill in the lo12 bits for the add from the pltgot entry. */
8050 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8051 plt_entry + 8,
8052 PG_OFFSET (gotplt_entry_address));
8053
8054 /* All the GOTPLT Entries are essentially initialized to PLT0. */
8055 bfd_put_NN (output_bfd,
8056 plt->output_section->vma + plt->output_offset,
8057 gotplt->contents + got_offset);
8058
8059 rela.r_offset = gotplt_entry_address;
8060
8061 if (h->dynindx == -1
8062 || ((info->executable
8063 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
8064 && h->def_regular
8065 && h->type == STT_GNU_IFUNC))
8066 {
8067 /* If an STT_GNU_IFUNC symbol is locally defined, generate
8068 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
8069 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
8070 rela.r_addend = (h->root.u.def.value
8071 + h->root.u.def.section->output_section->vma
8072 + h->root.u.def.section->output_offset);
8073 }
8074 else
8075 {
8076 /* Fill in the entry in the .rela.plt section. */
8077 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
8078 rela.r_addend = 0;
8079 }
8080
8081 /* Compute the relocation entry to used based on PLT index and do
8082 not adjust reloc_count. The reloc_count has already been adjusted
8083 to account for this entry. */
8084 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
8085 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8086 }
8087
8088 /* Size sections even though they're not dynamic. We use it to setup
8089 _TLS_MODULE_BASE_, if needed. */
8090
8091 static bfd_boolean
8092 elfNN_aarch64_always_size_sections (bfd *output_bfd,
8093 struct bfd_link_info *info)
8094 {
8095 asection *tls_sec;
8096
8097 if (info->relocatable)
8098 return TRUE;
8099
8100 tls_sec = elf_hash_table (info)->tls_sec;
8101
8102 if (tls_sec)
8103 {
8104 struct elf_link_hash_entry *tlsbase;
8105
8106 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
8107 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
8108
8109 if (tlsbase)
8110 {
8111 struct bfd_link_hash_entry *h = NULL;
8112 const struct elf_backend_data *bed =
8113 get_elf_backend_data (output_bfd);
8114
8115 if (!(_bfd_generic_link_add_one_symbol
8116 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
8117 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
8118 return FALSE;
8119
8120 tlsbase->type = STT_TLS;
8121 tlsbase = (struct elf_link_hash_entry *) h;
8122 tlsbase->def_regular = 1;
8123 tlsbase->other = STV_HIDDEN;
8124 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
8125 }
8126 }
8127
8128 return TRUE;
8129 }
8130
8131 /* Finish up dynamic symbol handling. We set the contents of various
8132 dynamic sections here. */
8133 static bfd_boolean
8134 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
8135 struct bfd_link_info *info,
8136 struct elf_link_hash_entry *h,
8137 Elf_Internal_Sym *sym)
8138 {
8139 struct elf_aarch64_link_hash_table *htab;
8140 htab = elf_aarch64_hash_table (info);
8141
8142 if (h->plt.offset != (bfd_vma) - 1)
8143 {
8144 asection *plt, *gotplt, *relplt;
8145
8146 /* This symbol has an entry in the procedure linkage table. Set
8147 it up. */
8148
8149 /* When building a static executable, use .iplt, .igot.plt and
8150 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8151 if (htab->root.splt != NULL)
8152 {
8153 plt = htab->root.splt;
8154 gotplt = htab->root.sgotplt;
8155 relplt = htab->root.srelplt;
8156 }
8157 else
8158 {
8159 plt = htab->root.iplt;
8160 gotplt = htab->root.igotplt;
8161 relplt = htab->root.irelplt;
8162 }
8163
8164 /* This symbol has an entry in the procedure linkage table. Set
8165 it up. */
8166 if ((h->dynindx == -1
8167 && !((h->forced_local || info->executable)
8168 && h->def_regular
8169 && h->type == STT_GNU_IFUNC))
8170 || plt == NULL
8171 || gotplt == NULL
8172 || relplt == NULL)
8173 abort ();
8174
8175 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
8176 if (!h->def_regular)
8177 {
8178 /* Mark the symbol as undefined, rather than as defined in
8179 the .plt section. */
8180 sym->st_shndx = SHN_UNDEF;
8181 /* If the symbol is weak we need to clear the value.
8182 Otherwise, the PLT entry would provide a definition for
8183 the symbol even if the symbol wasn't defined anywhere,
8184 and so the symbol would never be NULL. Leave the value if
8185 there were any relocations where pointer equality matters
8186 (this is a clue for the dynamic linker, to make function
8187 pointer comparisons work between an application and shared
8188 library). */
8189 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
8190 sym->st_value = 0;
8191 }
8192 }
8193
8194 if (h->got.offset != (bfd_vma) - 1
8195 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
8196 {
8197 Elf_Internal_Rela rela;
8198 bfd_byte *loc;
8199
8200 /* This symbol has an entry in the global offset table. Set it
8201 up. */
8202 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
8203 abort ();
8204
8205 rela.r_offset = (htab->root.sgot->output_section->vma
8206 + htab->root.sgot->output_offset
8207 + (h->got.offset & ~(bfd_vma) 1));
8208
8209 if (h->def_regular
8210 && h->type == STT_GNU_IFUNC)
8211 {
8212 if (info->shared)
8213 {
8214 /* Generate R_AARCH64_GLOB_DAT. */
8215 goto do_glob_dat;
8216 }
8217 else
8218 {
8219 asection *plt;
8220
8221 if (!h->pointer_equality_needed)
8222 abort ();
8223
8224 /* For non-shared object, we can't use .got.plt, which
8225 contains the real function address if we need pointer
8226 equality. We load the GOT entry with the PLT entry. */
8227 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
8228 bfd_put_NN (output_bfd, (plt->output_section->vma
8229 + plt->output_offset
8230 + h->plt.offset),
8231 htab->root.sgot->contents
8232 + (h->got.offset & ~(bfd_vma) 1));
8233 return TRUE;
8234 }
8235 }
8236 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
8237 {
8238 if (!h->def_regular)
8239 return FALSE;
8240
8241 BFD_ASSERT ((h->got.offset & 1) != 0);
8242 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
8243 rela.r_addend = (h->root.u.def.value
8244 + h->root.u.def.section->output_section->vma
8245 + h->root.u.def.section->output_offset);
8246 }
8247 else
8248 {
8249 do_glob_dat:
8250 BFD_ASSERT ((h->got.offset & 1) == 0);
8251 bfd_put_NN (output_bfd, (bfd_vma) 0,
8252 htab->root.sgot->contents + h->got.offset);
8253 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
8254 rela.r_addend = 0;
8255 }
8256
8257 loc = htab->root.srelgot->contents;
8258 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
8259 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8260 }
8261
8262 if (h->needs_copy)
8263 {
8264 Elf_Internal_Rela rela;
8265 bfd_byte *loc;
8266
8267 /* This symbol needs a copy reloc. Set it up. */
8268
8269 if (h->dynindx == -1
8270 || (h->root.type != bfd_link_hash_defined
8271 && h->root.type != bfd_link_hash_defweak)
8272 || htab->srelbss == NULL)
8273 abort ();
8274
8275 rela.r_offset = (h->root.u.def.value
8276 + h->root.u.def.section->output_section->vma
8277 + h->root.u.def.section->output_offset);
8278 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
8279 rela.r_addend = 0;
8280 loc = htab->srelbss->contents;
8281 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
8282 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8283 }
8284
8285 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
8286 be NULL for local symbols. */
8287 if (sym != NULL
8288 && (h == elf_hash_table (info)->hdynamic
8289 || h == elf_hash_table (info)->hgot))
8290 sym->st_shndx = SHN_ABS;
8291
8292 return TRUE;
8293 }
8294
8295 /* Finish up local dynamic symbol handling. We set the contents of
8296 various dynamic sections here. */
8297
8298 static bfd_boolean
8299 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
8300 {
8301 struct elf_link_hash_entry *h
8302 = (struct elf_link_hash_entry *) *slot;
8303 struct bfd_link_info *info
8304 = (struct bfd_link_info *) inf;
8305
8306 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
8307 info, h, NULL);
8308 }
8309
8310 static void
8311 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
8312 struct elf_aarch64_link_hash_table
8313 *htab)
8314 {
8315 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
8316 small and large plts and at the minute just generates
8317 the small PLT. */
8318
8319 /* PLT0 of the small PLT looks like this in ELF64 -
8320 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
8321 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
8322 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
8323 // symbol resolver
8324 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
8325 // GOTPLT entry for this.
8326 br x17
8327 PLT0 will be slightly different in ELF32 due to different got entry
8328 size.
8329 */
8330 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
8331 bfd_vma plt_base;
8332
8333
8334 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
8335 PLT_ENTRY_SIZE);
8336 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
8337 PLT_ENTRY_SIZE;
8338
8339 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
8340 + htab->root.sgotplt->output_offset
8341 + GOT_ENTRY_SIZE * 2);
8342
8343 plt_base = htab->root.splt->output_section->vma +
8344 htab->root.splt->output_offset;
8345
8346 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8347 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8348 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8349 htab->root.splt->contents + 4,
8350 PG (plt_got_2nd_ent) - PG (plt_base + 4));
8351
8352 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8353 htab->root.splt->contents + 8,
8354 PG_OFFSET (plt_got_2nd_ent));
8355
8356 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8357 htab->root.splt->contents + 12,
8358 PG_OFFSET (plt_got_2nd_ent));
8359 }
8360
8361 static bfd_boolean
8362 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
8363 struct bfd_link_info *info)
8364 {
8365 struct elf_aarch64_link_hash_table *htab;
8366 bfd *dynobj;
8367 asection *sdyn;
8368
8369 htab = elf_aarch64_hash_table (info);
8370 dynobj = htab->root.dynobj;
8371 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
8372
8373 if (htab->root.dynamic_sections_created)
8374 {
8375 ElfNN_External_Dyn *dyncon, *dynconend;
8376
8377 if (sdyn == NULL || htab->root.sgot == NULL)
8378 abort ();
8379
8380 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
8381 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
8382 for (; dyncon < dynconend; dyncon++)
8383 {
8384 Elf_Internal_Dyn dyn;
8385 asection *s;
8386
8387 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
8388
8389 switch (dyn.d_tag)
8390 {
8391 default:
8392 continue;
8393
8394 case DT_PLTGOT:
8395 s = htab->root.sgotplt;
8396 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
8397 break;
8398
8399 case DT_JMPREL:
8400 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
8401 break;
8402
8403 case DT_PLTRELSZ:
8404 s = htab->root.srelplt;
8405 dyn.d_un.d_val = s->size;
8406 break;
8407
8408 case DT_RELASZ:
8409 /* The procedure linkage table relocs (DT_JMPREL) should
8410 not be included in the overall relocs (DT_RELA).
8411 Therefore, we override the DT_RELASZ entry here to
8412 make it not include the JMPREL relocs. Since the
8413 linker script arranges for .rela.plt to follow all
8414 other relocation sections, we don't have to worry
8415 about changing the DT_RELA entry. */
8416 if (htab->root.srelplt != NULL)
8417 {
8418 s = htab->root.srelplt;
8419 dyn.d_un.d_val -= s->size;
8420 }
8421 break;
8422
8423 case DT_TLSDESC_PLT:
8424 s = htab->root.splt;
8425 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8426 + htab->tlsdesc_plt;
8427 break;
8428
8429 case DT_TLSDESC_GOT:
8430 s = htab->root.sgot;
8431 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8432 + htab->dt_tlsdesc_got;
8433 break;
8434 }
8435
8436 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
8437 }
8438
8439 }
8440
8441 /* Fill in the special first entry in the procedure linkage table. */
8442 if (htab->root.splt && htab->root.splt->size > 0)
8443 {
8444 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
8445
8446 elf_section_data (htab->root.splt->output_section)->
8447 this_hdr.sh_entsize = htab->plt_entry_size;
8448
8449
8450 if (htab->tlsdesc_plt)
8451 {
8452 bfd_put_NN (output_bfd, (bfd_vma) 0,
8453 htab->root.sgot->contents + htab->dt_tlsdesc_got);
8454
8455 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
8456 elfNN_aarch64_tlsdesc_small_plt_entry,
8457 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
8458
8459 {
8460 bfd_vma adrp1_addr =
8461 htab->root.splt->output_section->vma
8462 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
8463
8464 bfd_vma adrp2_addr = adrp1_addr + 4;
8465
8466 bfd_vma got_addr =
8467 htab->root.sgot->output_section->vma
8468 + htab->root.sgot->output_offset;
8469
8470 bfd_vma pltgot_addr =
8471 htab->root.sgotplt->output_section->vma
8472 + htab->root.sgotplt->output_offset;
8473
8474 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
8475
8476 bfd_byte *plt_entry =
8477 htab->root.splt->contents + htab->tlsdesc_plt;
8478
8479 /* adrp x2, DT_TLSDESC_GOT */
8480 elf_aarch64_update_plt_entry (output_bfd,
8481 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8482 plt_entry + 4,
8483 (PG (dt_tlsdesc_got)
8484 - PG (adrp1_addr)));
8485
8486 /* adrp x3, 0 */
8487 elf_aarch64_update_plt_entry (output_bfd,
8488 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8489 plt_entry + 8,
8490 (PG (pltgot_addr)
8491 - PG (adrp2_addr)));
8492
8493 /* ldr x2, [x2, #0] */
8494 elf_aarch64_update_plt_entry (output_bfd,
8495 BFD_RELOC_AARCH64_LDSTNN_LO12,
8496 plt_entry + 12,
8497 PG_OFFSET (dt_tlsdesc_got));
8498
8499 /* add x3, x3, 0 */
8500 elf_aarch64_update_plt_entry (output_bfd,
8501 BFD_RELOC_AARCH64_ADD_LO12,
8502 plt_entry + 16,
8503 PG_OFFSET (pltgot_addr));
8504 }
8505 }
8506 }
8507
8508 if (htab->root.sgotplt)
8509 {
8510 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
8511 {
8512 (*_bfd_error_handler)
8513 (_("discarded output section: `%A'"), htab->root.sgotplt);
8514 return FALSE;
8515 }
8516
8517 /* Fill in the first three entries in the global offset table. */
8518 if (htab->root.sgotplt->size > 0)
8519 {
8520 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
8521
8522 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
8523 bfd_put_NN (output_bfd,
8524 (bfd_vma) 0,
8525 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
8526 bfd_put_NN (output_bfd,
8527 (bfd_vma) 0,
8528 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
8529 }
8530
8531 if (htab->root.sgot)
8532 {
8533 if (htab->root.sgot->size > 0)
8534 {
8535 bfd_vma addr =
8536 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
8537 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
8538 }
8539 }
8540
8541 elf_section_data (htab->root.sgotplt->output_section)->
8542 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
8543 }
8544
8545 if (htab->root.sgot && htab->root.sgot->size > 0)
8546 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
8547 = GOT_ENTRY_SIZE;
8548
8549 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
8550 htab_traverse (htab->loc_hash_table,
8551 elfNN_aarch64_finish_local_dynamic_symbol,
8552 info);
8553
8554 return TRUE;
8555 }
8556
8557 /* Return address for Ith PLT stub in section PLT, for relocation REL
8558 or (bfd_vma) -1 if it should not be included. */
8559
8560 static bfd_vma
8561 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
8562 const arelent *rel ATTRIBUTE_UNUSED)
8563 {
8564 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
8565 }
8566
8567
8568 /* We use this so we can override certain functions
8569 (though currently we don't). */
8570
8571 const struct elf_size_info elfNN_aarch64_size_info =
8572 {
8573 sizeof (ElfNN_External_Ehdr),
8574 sizeof (ElfNN_External_Phdr),
8575 sizeof (ElfNN_External_Shdr),
8576 sizeof (ElfNN_External_Rel),
8577 sizeof (ElfNN_External_Rela),
8578 sizeof (ElfNN_External_Sym),
8579 sizeof (ElfNN_External_Dyn),
8580 sizeof (Elf_External_Note),
8581 4, /* Hash table entry size. */
8582 1, /* Internal relocs per external relocs. */
8583 ARCH_SIZE, /* Arch size. */
8584 LOG_FILE_ALIGN, /* Log_file_align. */
8585 ELFCLASSNN, EV_CURRENT,
8586 bfd_elfNN_write_out_phdrs,
8587 bfd_elfNN_write_shdrs_and_ehdr,
8588 bfd_elfNN_checksum_contents,
8589 bfd_elfNN_write_relocs,
8590 bfd_elfNN_swap_symbol_in,
8591 bfd_elfNN_swap_symbol_out,
8592 bfd_elfNN_slurp_reloc_table,
8593 bfd_elfNN_slurp_symbol_table,
8594 bfd_elfNN_swap_dyn_in,
8595 bfd_elfNN_swap_dyn_out,
8596 bfd_elfNN_swap_reloc_in,
8597 bfd_elfNN_swap_reloc_out,
8598 bfd_elfNN_swap_reloca_in,
8599 bfd_elfNN_swap_reloca_out
8600 };
8601
8602 #define ELF_ARCH bfd_arch_aarch64
8603 #define ELF_MACHINE_CODE EM_AARCH64
8604 #define ELF_MAXPAGESIZE 0x10000
8605 #define ELF_MINPAGESIZE 0x1000
8606 #define ELF_COMMONPAGESIZE 0x1000
8607
8608 #define bfd_elfNN_close_and_cleanup \
8609 elfNN_aarch64_close_and_cleanup
8610
8611 #define bfd_elfNN_bfd_free_cached_info \
8612 elfNN_aarch64_bfd_free_cached_info
8613
8614 #define bfd_elfNN_bfd_is_target_special_symbol \
8615 elfNN_aarch64_is_target_special_symbol
8616
8617 #define bfd_elfNN_bfd_link_hash_table_create \
8618 elfNN_aarch64_link_hash_table_create
8619
8620 #define bfd_elfNN_bfd_merge_private_bfd_data \
8621 elfNN_aarch64_merge_private_bfd_data
8622
8623 #define bfd_elfNN_bfd_print_private_bfd_data \
8624 elfNN_aarch64_print_private_bfd_data
8625
8626 #define bfd_elfNN_bfd_reloc_type_lookup \
8627 elfNN_aarch64_reloc_type_lookup
8628
8629 #define bfd_elfNN_bfd_reloc_name_lookup \
8630 elfNN_aarch64_reloc_name_lookup
8631
8632 #define bfd_elfNN_bfd_set_private_flags \
8633 elfNN_aarch64_set_private_flags
8634
8635 #define bfd_elfNN_find_inliner_info \
8636 elfNN_aarch64_find_inliner_info
8637
8638 #define bfd_elfNN_find_nearest_line \
8639 elfNN_aarch64_find_nearest_line
8640
8641 #define bfd_elfNN_mkobject \
8642 elfNN_aarch64_mkobject
8643
8644 #define bfd_elfNN_new_section_hook \
8645 elfNN_aarch64_new_section_hook
8646
8647 #define elf_backend_adjust_dynamic_symbol \
8648 elfNN_aarch64_adjust_dynamic_symbol
8649
8650 #define elf_backend_always_size_sections \
8651 elfNN_aarch64_always_size_sections
8652
8653 #define elf_backend_check_relocs \
8654 elfNN_aarch64_check_relocs
8655
8656 #define elf_backend_copy_indirect_symbol \
8657 elfNN_aarch64_copy_indirect_symbol
8658
8659 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
8660 to them in our hash. */
8661 #define elf_backend_create_dynamic_sections \
8662 elfNN_aarch64_create_dynamic_sections
8663
8664 #define elf_backend_init_index_section \
8665 _bfd_elf_init_2_index_sections
8666
8667 #define elf_backend_finish_dynamic_sections \
8668 elfNN_aarch64_finish_dynamic_sections
8669
8670 #define elf_backend_finish_dynamic_symbol \
8671 elfNN_aarch64_finish_dynamic_symbol
8672
8673 #define elf_backend_gc_sweep_hook \
8674 elfNN_aarch64_gc_sweep_hook
8675
8676 #define elf_backend_object_p \
8677 elfNN_aarch64_object_p
8678
8679 #define elf_backend_output_arch_local_syms \
8680 elfNN_aarch64_output_arch_local_syms
8681
8682 #define elf_backend_plt_sym_val \
8683 elfNN_aarch64_plt_sym_val
8684
8685 #define elf_backend_post_process_headers \
8686 elfNN_aarch64_post_process_headers
8687
8688 #define elf_backend_relocate_section \
8689 elfNN_aarch64_relocate_section
8690
8691 #define elf_backend_reloc_type_class \
8692 elfNN_aarch64_reloc_type_class
8693
8694 #define elf_backend_section_from_shdr \
8695 elfNN_aarch64_section_from_shdr
8696
8697 #define elf_backend_size_dynamic_sections \
8698 elfNN_aarch64_size_dynamic_sections
8699
8700 #define elf_backend_size_info \
8701 elfNN_aarch64_size_info
8702
8703 #define elf_backend_write_section \
8704 elfNN_aarch64_write_section
8705
8706 #define elf_backend_can_refcount 1
8707 #define elf_backend_can_gc_sections 1
8708 #define elf_backend_plt_readonly 1
8709 #define elf_backend_want_got_plt 1
8710 #define elf_backend_want_plt_sym 0
8711 #define elf_backend_may_use_rel_p 0
8712 #define elf_backend_may_use_rela_p 1
8713 #define elf_backend_default_use_rela_p 1
8714 #define elf_backend_rela_normal 1
8715 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8716 #define elf_backend_default_execstack 0
8717 #define elf_backend_extern_protected_data 1
8718
8719 #undef elf_backend_obj_attrs_section
8720 #define elf_backend_obj_attrs_section ".ARM.attributes"
8721
8722 #include "elfNN-target.h"