]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elfxx-aarch64.c
Fix diagnostic errors
[thirdparty/binutils-gdb.git] / bfd / elfxx-aarch64.c
1 /* AArch64-specific support for ELF.
2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "elfxx-aarch64.h"
23 #include <stdarg.h>
24 #include <string.h>
25
26 #define MASK(n) ((1u << (n)) - 1)
27
28 /* Sign-extend VALUE, which has the indicated number of BITS. */
29
30 bfd_signed_vma
31 _bfd_aarch64_sign_extend (bfd_vma value, int bits)
32 {
33 if (value & ((bfd_vma) 1 << (bits - 1)))
34 /* VALUE is negative. */
35 value |= ((bfd_vma) - 1) << bits;
36
37 return value;
38 }
39
40 /* Decode the IMM field of ADRP. */
41
42 uint32_t
43 _bfd_aarch64_decode_adrp_imm (uint32_t insn)
44 {
45 return (((insn >> 5) & MASK (19)) << 2) | ((insn >> 29) & MASK (2));
46 }
47
48 /* Reencode the imm field of add immediate. */
49 static inline uint32_t
50 reencode_add_imm (uint32_t insn, uint32_t imm)
51 {
52 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
53 }
54
55 /* Reencode the IMM field of ADR. */
56
57 uint32_t
58 _bfd_aarch64_reencode_adr_imm (uint32_t insn, uint32_t imm)
59 {
60 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
61 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
62 }
63
64 /* Reencode the imm field of ld/st pos immediate. */
65 static inline uint32_t
66 reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
67 {
68 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
69 }
70
71 /* Encode the 26-bit offset of unconditional branch. */
72 static inline uint32_t
73 reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
74 {
75 return (insn & ~MASK (26)) | (ofs & MASK (26));
76 }
77
78 /* Encode the 19-bit offset of conditional branch and compare & branch. */
79 static inline uint32_t
80 reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
81 {
82 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
83 }
84
85 /* Decode the 19-bit offset of load literal. */
86 static inline uint32_t
87 reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
88 {
89 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
90 }
91
92 /* Encode the 14-bit offset of test & branch. */
93 static inline uint32_t
94 reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
95 {
96 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
97 }
98
99 /* Reencode the imm field of move wide. */
100 static inline uint32_t
101 reencode_movw_imm (uint32_t insn, uint32_t imm)
102 {
103 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
104 }
105
106 /* Reencode mov[zn] to movz. */
107 static inline uint32_t
108 reencode_movzn_to_movz (uint32_t opcode)
109 {
110 return opcode | (1 << 30);
111 }
112
113 /* Reencode mov[zn] to movn. */
114 static inline uint32_t
115 reencode_movzn_to_movn (uint32_t opcode)
116 {
117 return opcode & ~(1 << 30);
118 }
119
120 /* Return non-zero if the indicated VALUE has overflowed the maximum
121 range expressible by a unsigned number with the indicated number of
122 BITS. */
123
124 static bfd_reloc_status_type
125 aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
126 {
127 bfd_vma lim;
128 if (bits >= sizeof (bfd_vma) * 8)
129 return bfd_reloc_ok;
130 lim = (bfd_vma) 1 << bits;
131 if (value >= lim)
132 return bfd_reloc_overflow;
133 return bfd_reloc_ok;
134 }
135
136 /* Return non-zero if the indicated VALUE has overflowed the maximum
137 range expressible by an signed number with the indicated number of
138 BITS. */
139
140 static bfd_reloc_status_type
141 aarch64_signed_overflow (bfd_vma value, unsigned int bits)
142 {
143 bfd_signed_vma svalue = (bfd_signed_vma) value;
144 bfd_signed_vma lim;
145
146 if (bits >= sizeof (bfd_vma) * 8)
147 return bfd_reloc_ok;
148 lim = (bfd_signed_vma) 1 << (bits - 1);
149 if (svalue < -lim || svalue >= lim)
150 return bfd_reloc_overflow;
151 return bfd_reloc_ok;
152 }
153
154 /* Insert the addend/value into the instruction or data object being
155 relocated. */
156 bfd_reloc_status_type
157 _bfd_aarch64_elf_put_addend (bfd *abfd,
158 bfd_byte *address, bfd_reloc_code_real_type r_type,
159 reloc_howto_type *howto, bfd_signed_vma addend)
160 {
161 bfd_reloc_status_type status = bfd_reloc_ok;
162 bfd_signed_vma old_addend = addend;
163 bfd_vma contents;
164 int size;
165
166 size = bfd_get_reloc_size (howto);
167 switch (size)
168 {
169 case 0:
170 return status;
171 case 2:
172 contents = bfd_get_16 (abfd, address);
173 break;
174 case 4:
175 if (howto->src_mask != 0xffffffff)
176 /* Must be 32-bit instruction, always little-endian. */
177 contents = bfd_getl32 (address);
178 else
179 /* Must be 32-bit data (endianness dependent). */
180 contents = bfd_get_32 (abfd, address);
181 break;
182 case 8:
183 contents = bfd_get_64 (abfd, address);
184 break;
185 default:
186 abort ();
187 }
188
189 switch (howto->complain_on_overflow)
190 {
191 case complain_overflow_dont:
192 break;
193 case complain_overflow_signed:
194 status = aarch64_signed_overflow (addend,
195 howto->bitsize + howto->rightshift);
196 break;
197 case complain_overflow_unsigned:
198 status = aarch64_unsigned_overflow (addend,
199 howto->bitsize + howto->rightshift);
200 break;
201 case complain_overflow_bitfield:
202 default:
203 abort ();
204 }
205
206 addend >>= howto->rightshift;
207
208 switch (r_type)
209 {
210 case BFD_RELOC_AARCH64_CALL26:
211 case BFD_RELOC_AARCH64_JUMP26:
212 contents = reencode_branch_ofs_26 (contents, addend);
213 break;
214
215 case BFD_RELOC_AARCH64_BRANCH19:
216 contents = reencode_cond_branch_ofs_19 (contents, addend);
217 break;
218
219 case BFD_RELOC_AARCH64_TSTBR14:
220 contents = reencode_tst_branch_ofs_14 (contents, addend);
221 break;
222
223 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
224 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
225 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
226 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
227 if (old_addend & ((1 << howto->rightshift) - 1))
228 return bfd_reloc_overflow;
229 contents = reencode_ld_lit_ofs_19 (contents, addend);
230 break;
231
232 case BFD_RELOC_AARCH64_TLSDESC_CALL:
233 break;
234
235 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
236 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
237 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
238 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
239 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
240 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
241 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
242 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
243 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
244 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
245 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
246 contents = _bfd_aarch64_reencode_adr_imm (contents, addend);
247 break;
248
249 case BFD_RELOC_AARCH64_ADD_LO12:
250 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
251 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
252 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
253 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
254 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
255 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
256 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
257 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
258 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
259 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
260 12 bits of the page offset following
261 BFD_RELOC_AARCH64_ADR_HI21_PCREL which computes the
262 (pc-relative) page base. */
263 contents = reencode_add_imm (contents, addend);
264 break;
265
266 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
267 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
268 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
269 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
270 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
271 case BFD_RELOC_AARCH64_LDST128_LO12:
272 case BFD_RELOC_AARCH64_LDST16_LO12:
273 case BFD_RELOC_AARCH64_LDST32_LO12:
274 case BFD_RELOC_AARCH64_LDST64_LO12:
275 case BFD_RELOC_AARCH64_LDST8_LO12:
276 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
277 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
278 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
279 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
280 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
281 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
282 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
283 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
284 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
285 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
286 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
287 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
288 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
289 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
290 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
291 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
292 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
293 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
294 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
295 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
296 if (old_addend & ((1 << howto->rightshift) - 1))
297 return bfd_reloc_overflow;
298 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
299 12 bits address offset. */
300 contents = reencode_ldst_pos_imm (contents, addend);
301 break;
302
303 /* Group relocations to create high bits of a 16, 32, 48 or 64
304 bit signed data or abs address inline. Will change
305 instruction to MOVN or MOVZ depending on sign of calculated
306 value. */
307
308 case BFD_RELOC_AARCH64_MOVW_G0_S:
309 case BFD_RELOC_AARCH64_MOVW_G1_S:
310 case BFD_RELOC_AARCH64_MOVW_G2_S:
311 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
312 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
313 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
314 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
315 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
316 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
317 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
318 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
319 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
320 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
321 /* NOTE: We can only come here with movz or movn. */
322 if (addend < 0)
323 {
324 /* Force use of MOVN. */
325 addend = ~addend;
326 contents = reencode_movzn_to_movn (contents);
327 }
328 else
329 {
330 /* Force use of MOVZ. */
331 contents = reencode_movzn_to_movz (contents);
332 }
333 /* Fall through. */
334
335 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
336 data or abs address inline. */
337
338 case BFD_RELOC_AARCH64_MOVW_G0:
339 case BFD_RELOC_AARCH64_MOVW_G0_NC:
340 case BFD_RELOC_AARCH64_MOVW_G1:
341 case BFD_RELOC_AARCH64_MOVW_G1_NC:
342 case BFD_RELOC_AARCH64_MOVW_G2:
343 case BFD_RELOC_AARCH64_MOVW_G2_NC:
344 case BFD_RELOC_AARCH64_MOVW_G3:
345 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
346 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
347 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
348 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
349 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
350 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
351 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
352 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
353 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
354 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
355 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
356 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
357 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
358 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
359 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
360 contents = reencode_movw_imm (contents, addend);
361 break;
362
363 default:
364 /* Repack simple data */
365 if (howto->dst_mask & (howto->dst_mask + 1))
366 return bfd_reloc_notsupported;
367
368 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
369 break;
370 }
371
372 switch (size)
373 {
374 case 2:
375 bfd_put_16 (abfd, contents, address);
376 break;
377 case 4:
378 if (howto->dst_mask != 0xffffffff)
379 /* must be 32-bit instruction, always little-endian */
380 bfd_putl32 (contents, address);
381 else
382 /* must be 32-bit data (endianness dependent) */
383 bfd_put_32 (abfd, contents, address);
384 break;
385 case 8:
386 bfd_put_64 (abfd, contents, address);
387 break;
388 default:
389 abort ();
390 }
391
392 return status;
393 }
394
395 bfd_vma
396 _bfd_aarch64_elf_resolve_relocation (bfd_reloc_code_real_type r_type,
397 bfd_vma place, bfd_vma value,
398 bfd_vma addend, bfd_boolean weak_undef_p)
399 {
400 switch (r_type)
401 {
402 case BFD_RELOC_AARCH64_NONE:
403 case BFD_RELOC_AARCH64_TLSDESC_CALL:
404 break;
405
406 case BFD_RELOC_AARCH64_16_PCREL:
407 case BFD_RELOC_AARCH64_32_PCREL:
408 case BFD_RELOC_AARCH64_64_PCREL:
409 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
410 case BFD_RELOC_AARCH64_BRANCH19:
411 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
412 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
413 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
414 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
415 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
416 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
417 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
418 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
419 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
420 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
421 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
422 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
423 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
424 case BFD_RELOC_AARCH64_TSTBR14:
425 if (weak_undef_p)
426 value = place;
427 value = value + addend - place;
428 break;
429
430 case BFD_RELOC_AARCH64_CALL26:
431 case BFD_RELOC_AARCH64_JUMP26:
432 value = value + addend - place;
433 break;
434
435 case BFD_RELOC_AARCH64_16:
436 case BFD_RELOC_AARCH64_32:
437 case BFD_RELOC_AARCH64_MOVW_G0:
438 case BFD_RELOC_AARCH64_MOVW_G0_NC:
439 case BFD_RELOC_AARCH64_MOVW_G0_S:
440 case BFD_RELOC_AARCH64_MOVW_G1:
441 case BFD_RELOC_AARCH64_MOVW_G1_NC:
442 case BFD_RELOC_AARCH64_MOVW_G1_S:
443 case BFD_RELOC_AARCH64_MOVW_G2:
444 case BFD_RELOC_AARCH64_MOVW_G2_NC:
445 case BFD_RELOC_AARCH64_MOVW_G2_S:
446 case BFD_RELOC_AARCH64_MOVW_G3:
447 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
448 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
449 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
450 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
451 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
452 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
453 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
454 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
455 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
456 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
457 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
458 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
459 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
460 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
461 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
462 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
463 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
464 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
465 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
466 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
467 value = value + addend;
468 break;
469
470 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
471 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
472 if (weak_undef_p)
473 value = PG (place);
474 value = PG (value + addend) - PG (place);
475 break;
476
477 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
478 value = value + addend - place;
479 break;
480
481 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
482 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
483 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
484 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
485 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
486 value = PG (value + addend) - PG (place);
487 break;
488
489 /* Caller must make sure addend is the base address of .got section. */
490 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
491 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
492 addend = PG (addend);
493 /* Fall through. */
494 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
495 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
496 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
497 value = value - addend;
498 break;
499
500 case BFD_RELOC_AARCH64_ADD_LO12:
501 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
502 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
503 case BFD_RELOC_AARCH64_LDST128_LO12:
504 case BFD_RELOC_AARCH64_LDST16_LO12:
505 case BFD_RELOC_AARCH64_LDST32_LO12:
506 case BFD_RELOC_AARCH64_LDST64_LO12:
507 case BFD_RELOC_AARCH64_LDST8_LO12:
508 case BFD_RELOC_AARCH64_TLSDESC_ADD:
509 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
510 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
511 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
512 case BFD_RELOC_AARCH64_TLSDESC_LDR:
513 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
514 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
515 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
516 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
517 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
518 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
519 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
520 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
521 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
522 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
523 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
524 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
525 value = PG_OFFSET (value + addend);
526 break;
527
528 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
529 value = value + addend;
530 break;
531
532 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
533 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
534 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
535 value = (value + addend) & (bfd_vma) 0xffff0000;
536 break;
537 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
538 /* Mask off low 12bits, keep all other high bits, so that the later
539 generic code could check whehter there is overflow. */
540 value = (value + addend) & ~(bfd_vma) 0xfff;
541 break;
542
543 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
544 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
545 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
546 value = (value + addend) & (bfd_vma) 0xffff;
547 break;
548
549 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
550 value = (value + addend) & ~(bfd_vma) 0xffffffff;
551 value -= place & ~(bfd_vma) 0xffffffff;
552 break;
553
554 default:
555 break;
556 }
557
558 return value;
559 }
560
561 /* Support for core dump NOTE sections. */
562
563 bfd_boolean
564 _bfd_aarch64_elf_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
565 {
566 int offset;
567 size_t size;
568
569 switch (note->descsz)
570 {
571 default:
572 return FALSE;
573
574 case 392: /* sizeof(struct elf_prstatus) on Linux/arm64. */
575 /* pr_cursig */
576 elf_tdata (abfd)->core->signal
577 = bfd_get_16 (abfd, note->descdata + 12);
578
579 /* pr_pid */
580 elf_tdata (abfd)->core->lwpid
581 = bfd_get_32 (abfd, note->descdata + 32);
582
583 /* pr_reg */
584 offset = 112;
585 size = 272;
586
587 break;
588 }
589
590 /* Make a ".reg/999" section. */
591 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
592 size, note->descpos + offset);
593 }
594
595 bfd_boolean
596 _bfd_aarch64_elf_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
597 {
598 switch (note->descsz)
599 {
600 default:
601 return FALSE;
602
603 case 136: /* This is sizeof(struct elf_prpsinfo) on Linux/aarch64. */
604 elf_tdata (abfd)->core->pid = bfd_get_32 (abfd, note->descdata + 24);
605 elf_tdata (abfd)->core->program
606 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
607 elf_tdata (abfd)->core->command
608 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
609 }
610
611 /* Note that for some reason, a spurious space is tacked
612 onto the end of the args in some (at least one anyway)
613 implementations, so strip it off if it exists. */
614
615 {
616 char *command = elf_tdata (abfd)->core->command;
617 int n = strlen (command);
618
619 if (0 < n && command[n - 1] == ' ')
620 command[n - 1] = '\0';
621 }
622
623 return TRUE;
624 }
625
626 char *
627 _bfd_aarch64_elf_write_core_note (bfd *abfd, char *buf, int *bufsiz, int note_type,
628 ...)
629 {
630 switch (note_type)
631 {
632 default:
633 return NULL;
634
635 case NT_PRPSINFO:
636 {
637 char data[136] ATTRIBUTE_NONSTRING;
638 va_list ap;
639
640 va_start (ap, note_type);
641 memset (data, 0, sizeof (data));
642 strncpy (data + 40, va_arg (ap, const char *), 16);
643 #if GCC_VERSION == 8001
644 DIAGNOSTIC_PUSH;
645 /* GCC 8.1 warns about 80 equals destination size with
646 -Wstringop-truncation:
647 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
648 */
649 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
650 #endif
651 strncpy (data + 56, va_arg (ap, const char *), 80);
652 #if GCC_VERSION == 8001
653 DIAGNOSTIC_POP;
654 #endif
655 va_end (ap);
656
657 return elfcore_write_note (abfd, buf, bufsiz, "CORE",
658 note_type, data, sizeof (data));
659 }
660
661 case NT_PRSTATUS:
662 {
663 char data[392];
664 va_list ap;
665 long pid;
666 int cursig;
667 const void *greg;
668
669 va_start (ap, note_type);
670 memset (data, 0, sizeof (data));
671 pid = va_arg (ap, long);
672 bfd_put_32 (abfd, pid, data + 32);
673 cursig = va_arg (ap, int);
674 bfd_put_16 (abfd, cursig, data + 12);
675 greg = va_arg (ap, const void *);
676 memcpy (data + 112, greg, 272);
677 va_end (ap);
678
679 return elfcore_write_note (abfd, buf, bufsiz, "CORE",
680 note_type, data, sizeof (data));
681 }
682 }
683 }