1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2022 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
25 #include "libiberty.h"
29 #include "elf-vxworks.h"
31 #include "elf32-arm.h"
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
60 #define elf_info_to_howto NULL
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
69 static bool elf32_arm_write_section (bfd
*output_bfd
,
70 struct bfd_link_info
*link_info
,
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
78 static reloc_howto_type elf32_arm_howto_table_1
[] =
81 HOWTO (R_ARM_NONE
, /* type */
83 3, /* size (0 = byte, 1 = short, 2 = long) */
85 false, /* pc_relative */
87 complain_overflow_dont
,/* complain_on_overflow */
88 bfd_elf_generic_reloc
, /* special_function */
89 "R_ARM_NONE", /* name */
90 false, /* partial_inplace */
93 false), /* pcrel_offset */
95 HOWTO (R_ARM_PC24
, /* type */
97 2, /* size (0 = byte, 1 = short, 2 = long) */
99 true, /* pc_relative */
101 complain_overflow_signed
,/* complain_on_overflow */
102 bfd_elf_generic_reloc
, /* special_function */
103 "R_ARM_PC24", /* name */
104 false, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 true), /* pcrel_offset */
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32
, /* type */
112 2, /* size (0 = byte, 1 = short, 2 = long) */
114 false, /* pc_relative */
116 complain_overflow_bitfield
,/* complain_on_overflow */
117 bfd_elf_generic_reloc
, /* special_function */
118 "R_ARM_ABS32", /* name */
119 false, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 false), /* pcrel_offset */
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32
, /* type */
127 2, /* size (0 = byte, 1 = short, 2 = long) */
129 true, /* pc_relative */
131 complain_overflow_bitfield
,/* complain_on_overflow */
132 bfd_elf_generic_reloc
, /* special_function */
133 "R_ARM_REL32", /* name */
134 false, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 true), /* pcrel_offset */
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0
, /* type */
142 0, /* size (0 = byte, 1 = short, 2 = long) */
144 true, /* pc_relative */
146 complain_overflow_dont
,/* complain_on_overflow */
147 bfd_elf_generic_reloc
, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 false, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 true), /* pcrel_offset */
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16
, /* type */
157 1, /* size (0 = byte, 1 = short, 2 = long) */
159 false, /* pc_relative */
161 complain_overflow_bitfield
,/* complain_on_overflow */
162 bfd_elf_generic_reloc
, /* special_function */
163 "R_ARM_ABS16", /* name */
164 false, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 false), /* pcrel_offset */
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12
, /* type */
172 2, /* size (0 = byte, 1 = short, 2 = long) */
174 false, /* pc_relative */
176 complain_overflow_bitfield
,/* complain_on_overflow */
177 bfd_elf_generic_reloc
, /* special_function */
178 "R_ARM_ABS12", /* name */
179 false, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 false), /* pcrel_offset */
184 HOWTO (R_ARM_THM_ABS5
, /* type */
186 1, /* size (0 = byte, 1 = short, 2 = long) */
188 false, /* pc_relative */
190 complain_overflow_bitfield
,/* complain_on_overflow */
191 bfd_elf_generic_reloc
, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 false, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 false), /* pcrel_offset */
199 HOWTO (R_ARM_ABS8
, /* type */
201 0, /* size (0 = byte, 1 = short, 2 = long) */
203 false, /* pc_relative */
205 complain_overflow_bitfield
,/* complain_on_overflow */
206 bfd_elf_generic_reloc
, /* special_function */
207 "R_ARM_ABS8", /* name */
208 false, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 false), /* pcrel_offset */
213 HOWTO (R_ARM_SBREL32
, /* type */
215 2, /* size (0 = byte, 1 = short, 2 = long) */
217 false, /* pc_relative */
219 complain_overflow_dont
,/* complain_on_overflow */
220 bfd_elf_generic_reloc
, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 false, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 false), /* pcrel_offset */
227 HOWTO (R_ARM_THM_CALL
, /* type */
229 2, /* size (0 = byte, 1 = short, 2 = long) */
231 true, /* pc_relative */
233 complain_overflow_signed
,/* complain_on_overflow */
234 bfd_elf_generic_reloc
, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 false, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 true), /* pcrel_offset */
241 HOWTO (R_ARM_THM_PC8
, /* type */
243 1, /* size (0 = byte, 1 = short, 2 = long) */
245 true, /* pc_relative */
247 complain_overflow_signed
,/* complain_on_overflow */
248 bfd_elf_generic_reloc
, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 false, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 true), /* pcrel_offset */
255 HOWTO (R_ARM_BREL_ADJ
, /* type */
257 1, /* size (0 = byte, 1 = short, 2 = long) */
259 false, /* pc_relative */
261 complain_overflow_signed
,/* complain_on_overflow */
262 bfd_elf_generic_reloc
, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 false, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 false), /* pcrel_offset */
269 HOWTO (R_ARM_TLS_DESC
, /* type */
271 2, /* size (0 = byte, 1 = short, 2 = long) */
273 false, /* pc_relative */
275 complain_overflow_bitfield
,/* complain_on_overflow */
276 bfd_elf_generic_reloc
, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 false, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 false), /* pcrel_offset */
283 HOWTO (R_ARM_THM_SWI8
, /* type */
285 0, /* size (0 = byte, 1 = short, 2 = long) */
287 false, /* pc_relative */
289 complain_overflow_signed
,/* complain_on_overflow */
290 bfd_elf_generic_reloc
, /* special_function */
291 "R_ARM_SWI8", /* name */
292 false, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 false), /* pcrel_offset */
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25
, /* type */
300 2, /* size (0 = byte, 1 = short, 2 = long) */
302 true, /* pc_relative */
304 complain_overflow_signed
,/* complain_on_overflow */
305 bfd_elf_generic_reloc
, /* special_function */
306 "R_ARM_XPC25", /* name */
307 false, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 true), /* pcrel_offset */
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22
, /* type */
315 2, /* size (0 = byte, 1 = short, 2 = long) */
317 true, /* pc_relative */
319 complain_overflow_signed
,/* complain_on_overflow */
320 bfd_elf_generic_reloc
, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 false, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 true), /* pcrel_offset */
327 /* Dynamic TLS relocations. */
329 HOWTO (R_ARM_TLS_DTPMOD32
, /* type */
331 2, /* size (0 = byte, 1 = short, 2 = long) */
333 false, /* pc_relative */
335 complain_overflow_bitfield
,/* complain_on_overflow */
336 bfd_elf_generic_reloc
, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 true, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 false), /* pcrel_offset */
343 HOWTO (R_ARM_TLS_DTPOFF32
, /* type */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
347 false, /* pc_relative */
349 complain_overflow_bitfield
,/* complain_on_overflow */
350 bfd_elf_generic_reloc
, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 true, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 false), /* pcrel_offset */
357 HOWTO (R_ARM_TLS_TPOFF32
, /* type */
359 2, /* size (0 = byte, 1 = short, 2 = long) */
361 false, /* pc_relative */
363 complain_overflow_bitfield
,/* complain_on_overflow */
364 bfd_elf_generic_reloc
, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 true, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 false), /* pcrel_offset */
371 /* Relocs used in ARM Linux */
373 HOWTO (R_ARM_COPY
, /* type */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
377 false, /* pc_relative */
379 complain_overflow_bitfield
,/* complain_on_overflow */
380 bfd_elf_generic_reloc
, /* special_function */
381 "R_ARM_COPY", /* name */
382 true, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 false), /* pcrel_offset */
387 HOWTO (R_ARM_GLOB_DAT
, /* type */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
391 false, /* pc_relative */
393 complain_overflow_bitfield
,/* complain_on_overflow */
394 bfd_elf_generic_reloc
, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 true, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 false), /* pcrel_offset */
401 HOWTO (R_ARM_JUMP_SLOT
, /* type */
403 2, /* size (0 = byte, 1 = short, 2 = long) */
405 false, /* pc_relative */
407 complain_overflow_bitfield
,/* complain_on_overflow */
408 bfd_elf_generic_reloc
, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 true, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 false), /* pcrel_offset */
415 HOWTO (R_ARM_RELATIVE
, /* type */
417 2, /* size (0 = byte, 1 = short, 2 = long) */
419 false, /* pc_relative */
421 complain_overflow_bitfield
,/* complain_on_overflow */
422 bfd_elf_generic_reloc
, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 true, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 false), /* pcrel_offset */
429 HOWTO (R_ARM_GOTOFF32
, /* type */
431 2, /* size (0 = byte, 1 = short, 2 = long) */
433 false, /* pc_relative */
435 complain_overflow_bitfield
,/* complain_on_overflow */
436 bfd_elf_generic_reloc
, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 true, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 false), /* pcrel_offset */
443 HOWTO (R_ARM_GOTPC
, /* type */
445 2, /* size (0 = byte, 1 = short, 2 = long) */
447 true, /* pc_relative */
449 complain_overflow_bitfield
,/* complain_on_overflow */
450 bfd_elf_generic_reloc
, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 true, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 true), /* pcrel_offset */
457 HOWTO (R_ARM_GOT32
, /* type */
459 2, /* size (0 = byte, 1 = short, 2 = long) */
461 false, /* pc_relative */
463 complain_overflow_bitfield
,/* complain_on_overflow */
464 bfd_elf_generic_reloc
, /* special_function */
465 "R_ARM_GOT32", /* name */
466 true, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 false), /* pcrel_offset */
471 HOWTO (R_ARM_PLT32
, /* type */
473 2, /* size (0 = byte, 1 = short, 2 = long) */
475 true, /* pc_relative */
477 complain_overflow_bitfield
,/* complain_on_overflow */
478 bfd_elf_generic_reloc
, /* special_function */
479 "R_ARM_PLT32", /* name */
480 false, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 true), /* pcrel_offset */
485 HOWTO (R_ARM_CALL
, /* type */
487 2, /* size (0 = byte, 1 = short, 2 = long) */
489 true, /* pc_relative */
491 complain_overflow_signed
,/* complain_on_overflow */
492 bfd_elf_generic_reloc
, /* special_function */
493 "R_ARM_CALL", /* name */
494 false, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 true), /* pcrel_offset */
499 HOWTO (R_ARM_JUMP24
, /* type */
501 2, /* size (0 = byte, 1 = short, 2 = long) */
503 true, /* pc_relative */
505 complain_overflow_signed
,/* complain_on_overflow */
506 bfd_elf_generic_reloc
, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 false, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 true), /* pcrel_offset */
513 HOWTO (R_ARM_THM_JUMP24
, /* type */
515 2, /* size (0 = byte, 1 = short, 2 = long) */
517 true, /* pc_relative */
519 complain_overflow_signed
,/* complain_on_overflow */
520 bfd_elf_generic_reloc
, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 false, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 true), /* pcrel_offset */
527 HOWTO (R_ARM_BASE_ABS
, /* type */
529 2, /* size (0 = byte, 1 = short, 2 = long) */
531 false, /* pc_relative */
533 complain_overflow_dont
,/* complain_on_overflow */
534 bfd_elf_generic_reloc
, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 false, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 false), /* pcrel_offset */
541 HOWTO (R_ARM_ALU_PCREL7_0
, /* type */
543 2, /* size (0 = byte, 1 = short, 2 = long) */
545 true, /* pc_relative */
547 complain_overflow_dont
,/* complain_on_overflow */
548 bfd_elf_generic_reloc
, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 false, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 true), /* pcrel_offset */
555 HOWTO (R_ARM_ALU_PCREL15_8
, /* type */
557 2, /* size (0 = byte, 1 = short, 2 = long) */
559 true, /* pc_relative */
561 complain_overflow_dont
,/* complain_on_overflow */
562 bfd_elf_generic_reloc
, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 false, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 true), /* pcrel_offset */
569 HOWTO (R_ARM_ALU_PCREL23_15
, /* type */
571 2, /* size (0 = byte, 1 = short, 2 = long) */
573 true, /* pc_relative */
575 complain_overflow_dont
,/* complain_on_overflow */
576 bfd_elf_generic_reloc
, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 false, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 true), /* pcrel_offset */
583 HOWTO (R_ARM_LDR_SBREL_11_0
, /* type */
585 2, /* size (0 = byte, 1 = short, 2 = long) */
587 false, /* pc_relative */
589 complain_overflow_dont
,/* complain_on_overflow */
590 bfd_elf_generic_reloc
, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 false, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 false), /* pcrel_offset */
597 HOWTO (R_ARM_ALU_SBREL_19_12
, /* type */
599 2, /* size (0 = byte, 1 = short, 2 = long) */
601 false, /* pc_relative */
603 complain_overflow_dont
,/* complain_on_overflow */
604 bfd_elf_generic_reloc
, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 false, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 false), /* pcrel_offset */
611 HOWTO (R_ARM_ALU_SBREL_27_20
, /* type */
613 2, /* size (0 = byte, 1 = short, 2 = long) */
615 false, /* pc_relative */
617 complain_overflow_dont
,/* complain_on_overflow */
618 bfd_elf_generic_reloc
, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 false, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 false), /* pcrel_offset */
625 HOWTO (R_ARM_TARGET1
, /* type */
627 2, /* size (0 = byte, 1 = short, 2 = long) */
629 false, /* pc_relative */
631 complain_overflow_dont
,/* complain_on_overflow */
632 bfd_elf_generic_reloc
, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 false, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 false), /* pcrel_offset */
639 HOWTO (R_ARM_ROSEGREL32
, /* type */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
643 false, /* pc_relative */
645 complain_overflow_dont
,/* complain_on_overflow */
646 bfd_elf_generic_reloc
, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 false, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 false), /* pcrel_offset */
653 HOWTO (R_ARM_V4BX
, /* type */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
657 false, /* pc_relative */
659 complain_overflow_dont
,/* complain_on_overflow */
660 bfd_elf_generic_reloc
, /* special_function */
661 "R_ARM_V4BX", /* name */
662 false, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 false), /* pcrel_offset */
667 HOWTO (R_ARM_TARGET2
, /* type */
669 2, /* size (0 = byte, 1 = short, 2 = long) */
671 false, /* pc_relative */
673 complain_overflow_signed
,/* complain_on_overflow */
674 bfd_elf_generic_reloc
, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 false, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 true), /* pcrel_offset */
681 HOWTO (R_ARM_PREL31
, /* type */
683 2, /* size (0 = byte, 1 = short, 2 = long) */
685 true, /* pc_relative */
687 complain_overflow_signed
,/* complain_on_overflow */
688 bfd_elf_generic_reloc
, /* special_function */
689 "R_ARM_PREL31", /* name */
690 false, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 true), /* pcrel_offset */
695 HOWTO (R_ARM_MOVW_ABS_NC
, /* type */
697 2, /* size (0 = byte, 1 = short, 2 = long) */
699 false, /* pc_relative */
701 complain_overflow_dont
,/* complain_on_overflow */
702 bfd_elf_generic_reloc
, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 false, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 false), /* pcrel_offset */
709 HOWTO (R_ARM_MOVT_ABS
, /* type */
711 2, /* size (0 = byte, 1 = short, 2 = long) */
713 false, /* pc_relative */
715 complain_overflow_bitfield
,/* complain_on_overflow */
716 bfd_elf_generic_reloc
, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 false, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 false), /* pcrel_offset */
723 HOWTO (R_ARM_MOVW_PREL_NC
, /* type */
725 2, /* size (0 = byte, 1 = short, 2 = long) */
727 true, /* pc_relative */
729 complain_overflow_dont
,/* complain_on_overflow */
730 bfd_elf_generic_reloc
, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 false, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 true), /* pcrel_offset */
737 HOWTO (R_ARM_MOVT_PREL
, /* type */
739 2, /* size (0 = byte, 1 = short, 2 = long) */
741 true, /* pc_relative */
743 complain_overflow_bitfield
,/* complain_on_overflow */
744 bfd_elf_generic_reloc
, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 false, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 true), /* pcrel_offset */
751 HOWTO (R_ARM_THM_MOVW_ABS_NC
, /* type */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
755 false, /* pc_relative */
757 complain_overflow_dont
,/* complain_on_overflow */
758 bfd_elf_generic_reloc
, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 false, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 false), /* pcrel_offset */
765 HOWTO (R_ARM_THM_MOVT_ABS
, /* type */
767 2, /* size (0 = byte, 1 = short, 2 = long) */
769 false, /* pc_relative */
771 complain_overflow_bitfield
,/* complain_on_overflow */
772 bfd_elf_generic_reloc
, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 false, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 false), /* pcrel_offset */
779 HOWTO (R_ARM_THM_MOVW_PREL_NC
,/* type */
781 2, /* size (0 = byte, 1 = short, 2 = long) */
783 true, /* pc_relative */
785 complain_overflow_dont
,/* complain_on_overflow */
786 bfd_elf_generic_reloc
, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 false, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 true), /* pcrel_offset */
793 HOWTO (R_ARM_THM_MOVT_PREL
, /* type */
795 2, /* size (0 = byte, 1 = short, 2 = long) */
797 true, /* pc_relative */
799 complain_overflow_bitfield
,/* complain_on_overflow */
800 bfd_elf_generic_reloc
, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 false, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 true), /* pcrel_offset */
807 HOWTO (R_ARM_THM_JUMP19
, /* type */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
811 true, /* pc_relative */
813 complain_overflow_signed
,/* complain_on_overflow */
814 bfd_elf_generic_reloc
, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 false, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 true), /* pcrel_offset */
821 HOWTO (R_ARM_THM_JUMP6
, /* type */
823 1, /* size (0 = byte, 1 = short, 2 = long) */
825 true, /* pc_relative */
827 complain_overflow_unsigned
,/* complain_on_overflow */
828 bfd_elf_generic_reloc
, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 false, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 true), /* pcrel_offset */
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
838 HOWTO (R_ARM_THM_ALU_PREL_11_0
,/* type */
840 2, /* size (0 = byte, 1 = short, 2 = long) */
842 true, /* pc_relative */
844 complain_overflow_dont
,/* complain_on_overflow */
845 bfd_elf_generic_reloc
, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 false, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 true), /* pcrel_offset */
852 HOWTO (R_ARM_THM_PC12
, /* type */
854 2, /* size (0 = byte, 1 = short, 2 = long) */
856 true, /* pc_relative */
858 complain_overflow_dont
,/* complain_on_overflow */
859 bfd_elf_generic_reloc
, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 false, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 true), /* pcrel_offset */
866 HOWTO (R_ARM_ABS32_NOI
, /* type */
868 2, /* size (0 = byte, 1 = short, 2 = long) */
870 false, /* pc_relative */
872 complain_overflow_dont
,/* complain_on_overflow */
873 bfd_elf_generic_reloc
, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 false, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 false), /* pcrel_offset */
880 HOWTO (R_ARM_REL32_NOI
, /* type */
882 2, /* size (0 = byte, 1 = short, 2 = long) */
884 true, /* pc_relative */
886 complain_overflow_dont
,/* complain_on_overflow */
887 bfd_elf_generic_reloc
, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 false, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 false), /* pcrel_offset */
894 /* Group relocations. */
896 HOWTO (R_ARM_ALU_PC_G0_NC
, /* type */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
900 true, /* pc_relative */
902 complain_overflow_dont
,/* complain_on_overflow */
903 bfd_elf_generic_reloc
, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 false, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 true), /* pcrel_offset */
910 HOWTO (R_ARM_ALU_PC_G0
, /* type */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
914 true, /* pc_relative */
916 complain_overflow_dont
,/* complain_on_overflow */
917 bfd_elf_generic_reloc
, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 false, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 true), /* pcrel_offset */
924 HOWTO (R_ARM_ALU_PC_G1_NC
, /* type */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
928 true, /* pc_relative */
930 complain_overflow_dont
,/* complain_on_overflow */
931 bfd_elf_generic_reloc
, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 false, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 true), /* pcrel_offset */
938 HOWTO (R_ARM_ALU_PC_G1
, /* type */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
942 true, /* pc_relative */
944 complain_overflow_dont
,/* complain_on_overflow */
945 bfd_elf_generic_reloc
, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 false, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 true), /* pcrel_offset */
952 HOWTO (R_ARM_ALU_PC_G2
, /* type */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
956 true, /* pc_relative */
958 complain_overflow_dont
,/* complain_on_overflow */
959 bfd_elf_generic_reloc
, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 false, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 true), /* pcrel_offset */
966 HOWTO (R_ARM_LDR_PC_G1
, /* type */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
970 true, /* pc_relative */
972 complain_overflow_dont
,/* complain_on_overflow */
973 bfd_elf_generic_reloc
, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 false, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 true), /* pcrel_offset */
980 HOWTO (R_ARM_LDR_PC_G2
, /* type */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
984 true, /* pc_relative */
986 complain_overflow_dont
,/* complain_on_overflow */
987 bfd_elf_generic_reloc
, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 false, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 true), /* pcrel_offset */
994 HOWTO (R_ARM_LDRS_PC_G0
, /* type */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
998 true, /* pc_relative */
1000 complain_overflow_dont
,/* complain_on_overflow */
1001 bfd_elf_generic_reloc
, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 false, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 true), /* pcrel_offset */
1008 HOWTO (R_ARM_LDRS_PC_G1
, /* type */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1012 true, /* pc_relative */
1014 complain_overflow_dont
,/* complain_on_overflow */
1015 bfd_elf_generic_reloc
, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 false, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 true), /* pcrel_offset */
1022 HOWTO (R_ARM_LDRS_PC_G2
, /* type */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1026 true, /* pc_relative */
1028 complain_overflow_dont
,/* complain_on_overflow */
1029 bfd_elf_generic_reloc
, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 false, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 true), /* pcrel_offset */
1036 HOWTO (R_ARM_LDC_PC_G0
, /* type */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1040 true, /* pc_relative */
1042 complain_overflow_dont
,/* complain_on_overflow */
1043 bfd_elf_generic_reloc
, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 false, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 true), /* pcrel_offset */
1050 HOWTO (R_ARM_LDC_PC_G1
, /* type */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1054 true, /* pc_relative */
1056 complain_overflow_dont
,/* complain_on_overflow */
1057 bfd_elf_generic_reloc
, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 false, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 true), /* pcrel_offset */
1064 HOWTO (R_ARM_LDC_PC_G2
, /* type */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1068 true, /* pc_relative */
1070 complain_overflow_dont
,/* complain_on_overflow */
1071 bfd_elf_generic_reloc
, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 false, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 true), /* pcrel_offset */
1078 HOWTO (R_ARM_ALU_SB_G0_NC
, /* type */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1082 true, /* pc_relative */
1084 complain_overflow_dont
,/* complain_on_overflow */
1085 bfd_elf_generic_reloc
, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 false, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 true), /* pcrel_offset */
1092 HOWTO (R_ARM_ALU_SB_G0
, /* type */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1096 true, /* pc_relative */
1098 complain_overflow_dont
,/* complain_on_overflow */
1099 bfd_elf_generic_reloc
, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 false, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 true), /* pcrel_offset */
1106 HOWTO (R_ARM_ALU_SB_G1_NC
, /* type */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1110 true, /* pc_relative */
1112 complain_overflow_dont
,/* complain_on_overflow */
1113 bfd_elf_generic_reloc
, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 false, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 true), /* pcrel_offset */
1120 HOWTO (R_ARM_ALU_SB_G1
, /* type */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1124 true, /* pc_relative */
1126 complain_overflow_dont
,/* complain_on_overflow */
1127 bfd_elf_generic_reloc
, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 false, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 true), /* pcrel_offset */
1134 HOWTO (R_ARM_ALU_SB_G2
, /* type */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1138 true, /* pc_relative */
1140 complain_overflow_dont
,/* complain_on_overflow */
1141 bfd_elf_generic_reloc
, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 false, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 true), /* pcrel_offset */
1148 HOWTO (R_ARM_LDR_SB_G0
, /* type */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1152 true, /* pc_relative */
1154 complain_overflow_dont
,/* complain_on_overflow */
1155 bfd_elf_generic_reloc
, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 false, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 true), /* pcrel_offset */
1162 HOWTO (R_ARM_LDR_SB_G1
, /* type */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1166 true, /* pc_relative */
1168 complain_overflow_dont
,/* complain_on_overflow */
1169 bfd_elf_generic_reloc
, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 false, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 true), /* pcrel_offset */
1176 HOWTO (R_ARM_LDR_SB_G2
, /* type */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1180 true, /* pc_relative */
1182 complain_overflow_dont
,/* complain_on_overflow */
1183 bfd_elf_generic_reloc
, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 false, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 true), /* pcrel_offset */
1190 HOWTO (R_ARM_LDRS_SB_G0
, /* type */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1194 true, /* pc_relative */
1196 complain_overflow_dont
,/* complain_on_overflow */
1197 bfd_elf_generic_reloc
, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 false, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 true), /* pcrel_offset */
1204 HOWTO (R_ARM_LDRS_SB_G1
, /* type */
1206 2, /* size (0 = byte, 1 = short, 2 = long) */
1208 true, /* pc_relative */
1210 complain_overflow_dont
,/* complain_on_overflow */
1211 bfd_elf_generic_reloc
, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 false, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 true), /* pcrel_offset */
1218 HOWTO (R_ARM_LDRS_SB_G2
, /* type */
1220 2, /* size (0 = byte, 1 = short, 2 = long) */
1222 true, /* pc_relative */
1224 complain_overflow_dont
,/* complain_on_overflow */
1225 bfd_elf_generic_reloc
, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 false, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 true), /* pcrel_offset */
1232 HOWTO (R_ARM_LDC_SB_G0
, /* type */
1234 2, /* size (0 = byte, 1 = short, 2 = long) */
1236 true, /* pc_relative */
1238 complain_overflow_dont
,/* complain_on_overflow */
1239 bfd_elf_generic_reloc
, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 false, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 true), /* pcrel_offset */
1246 HOWTO (R_ARM_LDC_SB_G1
, /* type */
1248 2, /* size (0 = byte, 1 = short, 2 = long) */
1250 true, /* pc_relative */
1252 complain_overflow_dont
,/* complain_on_overflow */
1253 bfd_elf_generic_reloc
, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 false, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 true), /* pcrel_offset */
1260 HOWTO (R_ARM_LDC_SB_G2
, /* type */
1262 2, /* size (0 = byte, 1 = short, 2 = long) */
1264 true, /* pc_relative */
1266 complain_overflow_dont
,/* complain_on_overflow */
1267 bfd_elf_generic_reloc
, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 false, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 true), /* pcrel_offset */
1274 /* End of group relocations. */
1276 HOWTO (R_ARM_MOVW_BREL_NC
, /* type */
1278 2, /* size (0 = byte, 1 = short, 2 = long) */
1280 false, /* pc_relative */
1282 complain_overflow_dont
,/* complain_on_overflow */
1283 bfd_elf_generic_reloc
, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 false, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 false), /* pcrel_offset */
1290 HOWTO (R_ARM_MOVT_BREL
, /* type */
1292 2, /* size (0 = byte, 1 = short, 2 = long) */
1294 false, /* pc_relative */
1296 complain_overflow_bitfield
,/* complain_on_overflow */
1297 bfd_elf_generic_reloc
, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 false, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 false), /* pcrel_offset */
1304 HOWTO (R_ARM_MOVW_BREL
, /* type */
1306 2, /* size (0 = byte, 1 = short, 2 = long) */
1308 false, /* pc_relative */
1310 complain_overflow_dont
,/* complain_on_overflow */
1311 bfd_elf_generic_reloc
, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 false, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 false), /* pcrel_offset */
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC
,/* type */
1320 2, /* size (0 = byte, 1 = short, 2 = long) */
1322 false, /* pc_relative */
1324 complain_overflow_dont
,/* complain_on_overflow */
1325 bfd_elf_generic_reloc
, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 false, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 false), /* pcrel_offset */
1332 HOWTO (R_ARM_THM_MOVT_BREL
, /* type */
1334 2, /* size (0 = byte, 1 = short, 2 = long) */
1336 false, /* pc_relative */
1338 complain_overflow_bitfield
,/* complain_on_overflow */
1339 bfd_elf_generic_reloc
, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 false, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 false), /* pcrel_offset */
1346 HOWTO (R_ARM_THM_MOVW_BREL
, /* type */
1348 2, /* size (0 = byte, 1 = short, 2 = long) */
1350 false, /* pc_relative */
1352 complain_overflow_dont
,/* complain_on_overflow */
1353 bfd_elf_generic_reloc
, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 false, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 false), /* pcrel_offset */
1360 HOWTO (R_ARM_TLS_GOTDESC
, /* type */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1364 false, /* pc_relative */
1366 complain_overflow_bitfield
,/* complain_on_overflow */
1367 NULL
, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 true, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 false), /* pcrel_offset */
1374 HOWTO (R_ARM_TLS_CALL
, /* type */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1378 false, /* pc_relative */
1380 complain_overflow_dont
,/* complain_on_overflow */
1381 bfd_elf_generic_reloc
, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 false, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 false), /* pcrel_offset */
1388 HOWTO (R_ARM_TLS_DESCSEQ
, /* type */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1392 false, /* pc_relative */
1394 complain_overflow_dont
,/* complain_on_overflow */
1395 bfd_elf_generic_reloc
, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 false, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 false), /* pcrel_offset */
1402 HOWTO (R_ARM_THM_TLS_CALL
, /* type */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1406 false, /* pc_relative */
1408 complain_overflow_dont
,/* complain_on_overflow */
1409 bfd_elf_generic_reloc
, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 false, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 false), /* pcrel_offset */
1416 HOWTO (R_ARM_PLT32_ABS
, /* type */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1420 false, /* pc_relative */
1422 complain_overflow_dont
,/* complain_on_overflow */
1423 bfd_elf_generic_reloc
, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 false, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 false), /* pcrel_offset */
1430 HOWTO (R_ARM_GOT_ABS
, /* type */
1432 2, /* size (0 = byte, 1 = short, 2 = long) */
1434 false, /* pc_relative */
1436 complain_overflow_dont
,/* complain_on_overflow */
1437 bfd_elf_generic_reloc
, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 false, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 false), /* pcrel_offset */
1444 HOWTO (R_ARM_GOT_PREL
, /* type */
1446 2, /* size (0 = byte, 1 = short, 2 = long) */
1448 true, /* pc_relative */
1450 complain_overflow_dont
, /* complain_on_overflow */
1451 bfd_elf_generic_reloc
, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 false, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 true), /* pcrel_offset */
1458 HOWTO (R_ARM_GOT_BREL12
, /* type */
1460 2, /* size (0 = byte, 1 = short, 2 = long) */
1462 false, /* pc_relative */
1464 complain_overflow_bitfield
,/* complain_on_overflow */
1465 bfd_elf_generic_reloc
, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 false, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 false), /* pcrel_offset */
1472 HOWTO (R_ARM_GOTOFF12
, /* type */
1474 2, /* size (0 = byte, 1 = short, 2 = long) */
1476 false, /* pc_relative */
1478 complain_overflow_bitfield
,/* complain_on_overflow */
1479 bfd_elf_generic_reloc
, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 false, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 false), /* pcrel_offset */
1486 EMPTY_HOWTO (R_ARM_GOTRELAX
), /* reserved for future GOT-load optimizations */
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY
, /* type */
1491 2, /* size (0 = byte, 1 = short, 2 = long) */
1493 false, /* pc_relative */
1495 complain_overflow_dont
, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn
, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 false, /* partial_inplace */
1501 false), /* pcrel_offset */
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT
, /* type */
1506 2, /* size (0 = byte, 1 = short, 2 = long) */
1508 false, /* pc_relative */
1510 complain_overflow_dont
, /* complain_on_overflow */
1511 NULL
, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 false, /* partial_inplace */
1516 false), /* pcrel_offset */
1518 HOWTO (R_ARM_THM_JUMP11
, /* type */
1520 1, /* size (0 = byte, 1 = short, 2 = long) */
1522 true, /* pc_relative */
1524 complain_overflow_signed
, /* complain_on_overflow */
1525 bfd_elf_generic_reloc
, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 false, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 true), /* pcrel_offset */
1532 HOWTO (R_ARM_THM_JUMP8
, /* type */
1534 1, /* size (0 = byte, 1 = short, 2 = long) */
1536 true, /* pc_relative */
1538 complain_overflow_signed
, /* complain_on_overflow */
1539 bfd_elf_generic_reloc
, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 false, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 true), /* pcrel_offset */
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32
, /* type */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1551 false, /* pc_relative */
1553 complain_overflow_bitfield
,/* complain_on_overflow */
1554 NULL
, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 true, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 false), /* pcrel_offset */
1561 HOWTO (R_ARM_TLS_LDM32
, /* type */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1565 false, /* pc_relative */
1567 complain_overflow_bitfield
,/* complain_on_overflow */
1568 bfd_elf_generic_reloc
, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 true, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 false), /* pcrel_offset */
1575 HOWTO (R_ARM_TLS_LDO32
, /* type */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1579 false, /* pc_relative */
1581 complain_overflow_bitfield
,/* complain_on_overflow */
1582 bfd_elf_generic_reloc
, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 true, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 false), /* pcrel_offset */
1589 HOWTO (R_ARM_TLS_IE32
, /* type */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1593 false, /* pc_relative */
1595 complain_overflow_bitfield
,/* complain_on_overflow */
1596 NULL
, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 true, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 false), /* pcrel_offset */
1603 HOWTO (R_ARM_TLS_LE32
, /* type */
1605 2, /* size (0 = byte, 1 = short, 2 = long) */
1607 false, /* pc_relative */
1609 complain_overflow_bitfield
,/* complain_on_overflow */
1610 NULL
, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 true, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 false), /* pcrel_offset */
1617 HOWTO (R_ARM_TLS_LDO12
, /* type */
1619 2, /* size (0 = byte, 1 = short, 2 = long) */
1621 false, /* pc_relative */
1623 complain_overflow_bitfield
,/* complain_on_overflow */
1624 bfd_elf_generic_reloc
, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 false, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 false), /* pcrel_offset */
1631 HOWTO (R_ARM_TLS_LE12
, /* type */
1633 2, /* size (0 = byte, 1 = short, 2 = long) */
1635 false, /* pc_relative */
1637 complain_overflow_bitfield
,/* complain_on_overflow */
1638 bfd_elf_generic_reloc
, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 false, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 false), /* pcrel_offset */
1645 HOWTO (R_ARM_TLS_IE12GP
, /* type */
1647 2, /* size (0 = byte, 1 = short, 2 = long) */
1649 false, /* pc_relative */
1651 complain_overflow_bitfield
,/* complain_on_overflow */
1652 bfd_elf_generic_reloc
, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 false, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 false), /* pcrel_offset */
1659 /* 112-127 private relocations. */
1677 /* R_ARM_ME_TOO, obsolete. */
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ
, /* type */
1682 1, /* size (0 = byte, 1 = short, 2 = long) */
1684 false, /* pc_relative */
1686 complain_overflow_dont
,/* complain_on_overflow */
1687 bfd_elf_generic_reloc
, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 false, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 false), /* pcrel_offset */
1695 HOWTO (R_ARM_THM_ALU_ABS_G0_NC
,/* type. */
1696 0, /* rightshift. */
1697 1, /* size (0 = byte, 1 = short, 2 = long). */
1699 false, /* pc_relative. */
1701 complain_overflow_bitfield
,/* complain_on_overflow. */
1702 bfd_elf_generic_reloc
, /* special_function. */
1703 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1704 false, /* partial_inplace. */
1705 0x00000000, /* src_mask. */
1706 0x00000000, /* dst_mask. */
1707 false), /* pcrel_offset. */
1708 HOWTO (R_ARM_THM_ALU_ABS_G1_NC
,/* type. */
1709 0, /* rightshift. */
1710 1, /* size (0 = byte, 1 = short, 2 = long). */
1712 false, /* pc_relative. */
1714 complain_overflow_bitfield
,/* complain_on_overflow. */
1715 bfd_elf_generic_reloc
, /* special_function. */
1716 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1717 false, /* partial_inplace. */
1718 0x00000000, /* src_mask. */
1719 0x00000000, /* dst_mask. */
1720 false), /* pcrel_offset. */
1721 HOWTO (R_ARM_THM_ALU_ABS_G2_NC
,/* type. */
1722 0, /* rightshift. */
1723 1, /* size (0 = byte, 1 = short, 2 = long). */
1725 false, /* pc_relative. */
1727 complain_overflow_bitfield
,/* complain_on_overflow. */
1728 bfd_elf_generic_reloc
, /* special_function. */
1729 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1730 false, /* partial_inplace. */
1731 0x00000000, /* src_mask. */
1732 0x00000000, /* dst_mask. */
1733 false), /* pcrel_offset. */
1734 HOWTO (R_ARM_THM_ALU_ABS_G3_NC
,/* type. */
1735 0, /* rightshift. */
1736 1, /* size (0 = byte, 1 = short, 2 = long). */
1738 false, /* pc_relative. */
1740 complain_overflow_bitfield
,/* complain_on_overflow. */
1741 bfd_elf_generic_reloc
, /* special_function. */
1742 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1743 false, /* partial_inplace. */
1744 0x00000000, /* src_mask. */
1745 0x00000000, /* dst_mask. */
1746 false), /* pcrel_offset. */
1747 /* Relocations for Armv8.1-M Mainline. */
1748 HOWTO (R_ARM_THM_BF16
, /* type. */
1749 0, /* rightshift. */
1750 1, /* size (0 = byte, 1 = short, 2 = long). */
1752 true, /* pc_relative. */
1754 complain_overflow_dont
,/* do not complain_on_overflow. */
1755 bfd_elf_generic_reloc
, /* special_function. */
1756 "R_ARM_THM_BF16", /* name. */
1757 false, /* partial_inplace. */
1758 0x001f0ffe, /* src_mask. */
1759 0x001f0ffe, /* dst_mask. */
1760 true), /* pcrel_offset. */
1761 HOWTO (R_ARM_THM_BF12
, /* type. */
1762 0, /* rightshift. */
1763 1, /* size (0 = byte, 1 = short, 2 = long). */
1765 true, /* pc_relative. */
1767 complain_overflow_dont
,/* do not complain_on_overflow. */
1768 bfd_elf_generic_reloc
, /* special_function. */
1769 "R_ARM_THM_BF12", /* name. */
1770 false, /* partial_inplace. */
1771 0x00010ffe, /* src_mask. */
1772 0x00010ffe, /* dst_mask. */
1773 true), /* pcrel_offset. */
1774 HOWTO (R_ARM_THM_BF18
, /* type. */
1775 0, /* rightshift. */
1776 1, /* size (0 = byte, 1 = short, 2 = long). */
1778 true, /* pc_relative. */
1780 complain_overflow_dont
,/* do not complain_on_overflow. */
1781 bfd_elf_generic_reloc
, /* special_function. */
1782 "R_ARM_THM_BF18", /* name. */
1783 false, /* partial_inplace. */
1784 0x007f0ffe, /* src_mask. */
1785 0x007f0ffe, /* dst_mask. */
1786 true), /* pcrel_offset. */
1790 static reloc_howto_type elf32_arm_howto_table_2
[8] =
1792 HOWTO (R_ARM_IRELATIVE
, /* type */
1794 2, /* size (0 = byte, 1 = short, 2 = long) */
1796 false, /* pc_relative */
1798 complain_overflow_bitfield
,/* complain_on_overflow */
1799 bfd_elf_generic_reloc
, /* special_function */
1800 "R_ARM_IRELATIVE", /* name */
1801 true, /* partial_inplace */
1802 0xffffffff, /* src_mask */
1803 0xffffffff, /* dst_mask */
1804 false), /* pcrel_offset */
1805 HOWTO (R_ARM_GOTFUNCDESC
, /* type */
1807 2, /* size (0 = byte, 1 = short, 2 = long) */
1809 false, /* pc_relative */
1811 complain_overflow_bitfield
,/* complain_on_overflow */
1812 bfd_elf_generic_reloc
, /* special_function */
1813 "R_ARM_GOTFUNCDESC", /* name */
1814 false, /* partial_inplace */
1816 0xffffffff, /* dst_mask */
1817 false), /* pcrel_offset */
1818 HOWTO (R_ARM_GOTOFFFUNCDESC
, /* type */
1820 2, /* size (0 = byte, 1 = short, 2 = long) */
1822 false, /* pc_relative */
1824 complain_overflow_bitfield
,/* complain_on_overflow */
1825 bfd_elf_generic_reloc
, /* special_function */
1826 "R_ARM_GOTOFFFUNCDESC",/* name */
1827 false, /* partial_inplace */
1829 0xffffffff, /* dst_mask */
1830 false), /* pcrel_offset */
1831 HOWTO (R_ARM_FUNCDESC
, /* type */
1833 2, /* size (0 = byte, 1 = short, 2 = long) */
1835 false, /* pc_relative */
1837 complain_overflow_bitfield
,/* complain_on_overflow */
1838 bfd_elf_generic_reloc
, /* special_function */
1839 "R_ARM_FUNCDESC", /* name */
1840 false, /* partial_inplace */
1842 0xffffffff, /* dst_mask */
1843 false), /* pcrel_offset */
1844 HOWTO (R_ARM_FUNCDESC_VALUE
, /* type */
1846 2, /* size (0 = byte, 1 = short, 2 = long) */
1848 false, /* pc_relative */
1850 complain_overflow_bitfield
,/* complain_on_overflow */
1851 bfd_elf_generic_reloc
, /* special_function */
1852 "R_ARM_FUNCDESC_VALUE",/* name */
1853 false, /* partial_inplace */
1855 0xffffffff, /* dst_mask */
1856 false), /* pcrel_offset */
1857 HOWTO (R_ARM_TLS_GD32_FDPIC
, /* type */
1859 2, /* size (0 = byte, 1 = short, 2 = long) */
1861 false, /* pc_relative */
1863 complain_overflow_bitfield
,/* complain_on_overflow */
1864 bfd_elf_generic_reloc
, /* special_function */
1865 "R_ARM_TLS_GD32_FDPIC",/* name */
1866 false, /* partial_inplace */
1868 0xffffffff, /* dst_mask */
1869 false), /* pcrel_offset */
1870 HOWTO (R_ARM_TLS_LDM32_FDPIC
, /* type */
1872 2, /* size (0 = byte, 1 = short, 2 = long) */
1874 false, /* pc_relative */
1876 complain_overflow_bitfield
,/* complain_on_overflow */
1877 bfd_elf_generic_reloc
, /* special_function */
1878 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879 false, /* partial_inplace */
1881 0xffffffff, /* dst_mask */
1882 false), /* pcrel_offset */
1883 HOWTO (R_ARM_TLS_IE32_FDPIC
, /* type */
1885 2, /* size (0 = byte, 1 = short, 2 = long) */
1887 false, /* pc_relative */
1889 complain_overflow_bitfield
,/* complain_on_overflow */
1890 bfd_elf_generic_reloc
, /* special_function */
1891 "R_ARM_TLS_IE32_FDPIC",/* name */
1892 false, /* partial_inplace */
1894 0xffffffff, /* dst_mask */
1895 false), /* pcrel_offset */
1898 /* 249-255 extended, currently unused, relocations: */
1899 static reloc_howto_type elf32_arm_howto_table_3
[4] =
1901 HOWTO (R_ARM_RREL32
, /* type */
1903 0, /* size (0 = byte, 1 = short, 2 = long) */
1905 false, /* pc_relative */
1907 complain_overflow_dont
,/* complain_on_overflow */
1908 bfd_elf_generic_reloc
, /* special_function */
1909 "R_ARM_RREL32", /* name */
1910 false, /* partial_inplace */
1913 false), /* pcrel_offset */
1915 HOWTO (R_ARM_RABS32
, /* type */
1917 0, /* size (0 = byte, 1 = short, 2 = long) */
1919 false, /* pc_relative */
1921 complain_overflow_dont
,/* complain_on_overflow */
1922 bfd_elf_generic_reloc
, /* special_function */
1923 "R_ARM_RABS32", /* name */
1924 false, /* partial_inplace */
1927 false), /* pcrel_offset */
1929 HOWTO (R_ARM_RPC24
, /* type */
1931 0, /* size (0 = byte, 1 = short, 2 = long) */
1933 false, /* pc_relative */
1935 complain_overflow_dont
,/* complain_on_overflow */
1936 bfd_elf_generic_reloc
, /* special_function */
1937 "R_ARM_RPC24", /* name */
1938 false, /* partial_inplace */
1941 false), /* pcrel_offset */
1943 HOWTO (R_ARM_RBASE
, /* type */
1945 0, /* size (0 = byte, 1 = short, 2 = long) */
1947 false, /* pc_relative */
1949 complain_overflow_dont
,/* complain_on_overflow */
1950 bfd_elf_generic_reloc
, /* special_function */
1951 "R_ARM_RBASE", /* name */
1952 false, /* partial_inplace */
1955 false) /* pcrel_offset */
1958 static reloc_howto_type
*
1959 elf32_arm_howto_from_type (unsigned int r_type
)
1961 if (r_type
< ARRAY_SIZE (elf32_arm_howto_table_1
))
1962 return &elf32_arm_howto_table_1
[r_type
];
1964 if (r_type
>= R_ARM_IRELATIVE
1965 && r_type
< R_ARM_IRELATIVE
+ ARRAY_SIZE (elf32_arm_howto_table_2
))
1966 return &elf32_arm_howto_table_2
[r_type
- R_ARM_IRELATIVE
];
1968 if (r_type
>= R_ARM_RREL32
1969 && r_type
< R_ARM_RREL32
+ ARRAY_SIZE (elf32_arm_howto_table_3
))
1970 return &elf32_arm_howto_table_3
[r_type
- R_ARM_RREL32
];
1976 elf32_arm_info_to_howto (bfd
* abfd
, arelent
* bfd_reloc
,
1977 Elf_Internal_Rela
* elf_reloc
)
1979 unsigned int r_type
;
1981 r_type
= ELF32_R_TYPE (elf_reloc
->r_info
);
1982 if ((bfd_reloc
->howto
= elf32_arm_howto_from_type (r_type
)) == NULL
)
1984 /* xgettext:c-format */
1985 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1987 bfd_set_error (bfd_error_bad_value
);
1993 struct elf32_arm_reloc_map
1995 bfd_reloc_code_real_type bfd_reloc_val
;
1996 unsigned char elf_reloc_val
;
1999 /* All entries in this list must also be present in elf32_arm_howto_table. */
2000 static const struct elf32_arm_reloc_map elf32_arm_reloc_map
[] =
2002 {BFD_RELOC_NONE
, R_ARM_NONE
},
2003 {BFD_RELOC_ARM_PCREL_BRANCH
, R_ARM_PC24
},
2004 {BFD_RELOC_ARM_PCREL_CALL
, R_ARM_CALL
},
2005 {BFD_RELOC_ARM_PCREL_JUMP
, R_ARM_JUMP24
},
2006 {BFD_RELOC_ARM_PCREL_BLX
, R_ARM_XPC25
},
2007 {BFD_RELOC_THUMB_PCREL_BLX
, R_ARM_THM_XPC22
},
2008 {BFD_RELOC_32
, R_ARM_ABS32
},
2009 {BFD_RELOC_32_PCREL
, R_ARM_REL32
},
2010 {BFD_RELOC_8
, R_ARM_ABS8
},
2011 {BFD_RELOC_16
, R_ARM_ABS16
},
2012 {BFD_RELOC_ARM_OFFSET_IMM
, R_ARM_ABS12
},
2013 {BFD_RELOC_ARM_THUMB_OFFSET
, R_ARM_THM_ABS5
},
2014 {BFD_RELOC_THUMB_PCREL_BRANCH25
, R_ARM_THM_JUMP24
},
2015 {BFD_RELOC_THUMB_PCREL_BRANCH23
, R_ARM_THM_CALL
},
2016 {BFD_RELOC_THUMB_PCREL_BRANCH12
, R_ARM_THM_JUMP11
},
2017 {BFD_RELOC_THUMB_PCREL_BRANCH20
, R_ARM_THM_JUMP19
},
2018 {BFD_RELOC_THUMB_PCREL_BRANCH9
, R_ARM_THM_JUMP8
},
2019 {BFD_RELOC_THUMB_PCREL_BRANCH7
, R_ARM_THM_JUMP6
},
2020 {BFD_RELOC_ARM_GLOB_DAT
, R_ARM_GLOB_DAT
},
2021 {BFD_RELOC_ARM_JUMP_SLOT
, R_ARM_JUMP_SLOT
},
2022 {BFD_RELOC_ARM_RELATIVE
, R_ARM_RELATIVE
},
2023 {BFD_RELOC_ARM_GOTOFF
, R_ARM_GOTOFF32
},
2024 {BFD_RELOC_ARM_GOTPC
, R_ARM_GOTPC
},
2025 {BFD_RELOC_ARM_GOT_PREL
, R_ARM_GOT_PREL
},
2026 {BFD_RELOC_ARM_GOT32
, R_ARM_GOT32
},
2027 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
2028 {BFD_RELOC_ARM_TARGET1
, R_ARM_TARGET1
},
2029 {BFD_RELOC_ARM_ROSEGREL32
, R_ARM_ROSEGREL32
},
2030 {BFD_RELOC_ARM_SBREL32
, R_ARM_SBREL32
},
2031 {BFD_RELOC_ARM_PREL31
, R_ARM_PREL31
},
2032 {BFD_RELOC_ARM_TARGET2
, R_ARM_TARGET2
},
2033 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
2034 {BFD_RELOC_ARM_TLS_GOTDESC
, R_ARM_TLS_GOTDESC
},
2035 {BFD_RELOC_ARM_TLS_CALL
, R_ARM_TLS_CALL
},
2036 {BFD_RELOC_ARM_THM_TLS_CALL
, R_ARM_THM_TLS_CALL
},
2037 {BFD_RELOC_ARM_TLS_DESCSEQ
, R_ARM_TLS_DESCSEQ
},
2038 {BFD_RELOC_ARM_THM_TLS_DESCSEQ
, R_ARM_THM_TLS_DESCSEQ
},
2039 {BFD_RELOC_ARM_TLS_DESC
, R_ARM_TLS_DESC
},
2040 {BFD_RELOC_ARM_TLS_GD32
, R_ARM_TLS_GD32
},
2041 {BFD_RELOC_ARM_TLS_LDO32
, R_ARM_TLS_LDO32
},
2042 {BFD_RELOC_ARM_TLS_LDM32
, R_ARM_TLS_LDM32
},
2043 {BFD_RELOC_ARM_TLS_DTPMOD32
, R_ARM_TLS_DTPMOD32
},
2044 {BFD_RELOC_ARM_TLS_DTPOFF32
, R_ARM_TLS_DTPOFF32
},
2045 {BFD_RELOC_ARM_TLS_TPOFF32
, R_ARM_TLS_TPOFF32
},
2046 {BFD_RELOC_ARM_TLS_IE32
, R_ARM_TLS_IE32
},
2047 {BFD_RELOC_ARM_TLS_LE32
, R_ARM_TLS_LE32
},
2048 {BFD_RELOC_ARM_IRELATIVE
, R_ARM_IRELATIVE
},
2049 {BFD_RELOC_ARM_GOTFUNCDESC
, R_ARM_GOTFUNCDESC
},
2050 {BFD_RELOC_ARM_GOTOFFFUNCDESC
, R_ARM_GOTOFFFUNCDESC
},
2051 {BFD_RELOC_ARM_FUNCDESC
, R_ARM_FUNCDESC
},
2052 {BFD_RELOC_ARM_FUNCDESC_VALUE
, R_ARM_FUNCDESC_VALUE
},
2053 {BFD_RELOC_ARM_TLS_GD32_FDPIC
, R_ARM_TLS_GD32_FDPIC
},
2054 {BFD_RELOC_ARM_TLS_LDM32_FDPIC
, R_ARM_TLS_LDM32_FDPIC
},
2055 {BFD_RELOC_ARM_TLS_IE32_FDPIC
, R_ARM_TLS_IE32_FDPIC
},
2056 {BFD_RELOC_VTABLE_INHERIT
, R_ARM_GNU_VTINHERIT
},
2057 {BFD_RELOC_VTABLE_ENTRY
, R_ARM_GNU_VTENTRY
},
2058 {BFD_RELOC_ARM_MOVW
, R_ARM_MOVW_ABS_NC
},
2059 {BFD_RELOC_ARM_MOVT
, R_ARM_MOVT_ABS
},
2060 {BFD_RELOC_ARM_MOVW_PCREL
, R_ARM_MOVW_PREL_NC
},
2061 {BFD_RELOC_ARM_MOVT_PCREL
, R_ARM_MOVT_PREL
},
2062 {BFD_RELOC_ARM_THUMB_MOVW
, R_ARM_THM_MOVW_ABS_NC
},
2063 {BFD_RELOC_ARM_THUMB_MOVT
, R_ARM_THM_MOVT_ABS
},
2064 {BFD_RELOC_ARM_THUMB_MOVW_PCREL
, R_ARM_THM_MOVW_PREL_NC
},
2065 {BFD_RELOC_ARM_THUMB_MOVT_PCREL
, R_ARM_THM_MOVT_PREL
},
2066 {BFD_RELOC_ARM_ALU_PC_G0_NC
, R_ARM_ALU_PC_G0_NC
},
2067 {BFD_RELOC_ARM_ALU_PC_G0
, R_ARM_ALU_PC_G0
},
2068 {BFD_RELOC_ARM_ALU_PC_G1_NC
, R_ARM_ALU_PC_G1_NC
},
2069 {BFD_RELOC_ARM_ALU_PC_G1
, R_ARM_ALU_PC_G1
},
2070 {BFD_RELOC_ARM_ALU_PC_G2
, R_ARM_ALU_PC_G2
},
2071 {BFD_RELOC_ARM_LDR_PC_G0
, R_ARM_LDR_PC_G0
},
2072 {BFD_RELOC_ARM_LDR_PC_G1
, R_ARM_LDR_PC_G1
},
2073 {BFD_RELOC_ARM_LDR_PC_G2
, R_ARM_LDR_PC_G2
},
2074 {BFD_RELOC_ARM_LDRS_PC_G0
, R_ARM_LDRS_PC_G0
},
2075 {BFD_RELOC_ARM_LDRS_PC_G1
, R_ARM_LDRS_PC_G1
},
2076 {BFD_RELOC_ARM_LDRS_PC_G2
, R_ARM_LDRS_PC_G2
},
2077 {BFD_RELOC_ARM_LDC_PC_G0
, R_ARM_LDC_PC_G0
},
2078 {BFD_RELOC_ARM_LDC_PC_G1
, R_ARM_LDC_PC_G1
},
2079 {BFD_RELOC_ARM_LDC_PC_G2
, R_ARM_LDC_PC_G2
},
2080 {BFD_RELOC_ARM_ALU_SB_G0_NC
, R_ARM_ALU_SB_G0_NC
},
2081 {BFD_RELOC_ARM_ALU_SB_G0
, R_ARM_ALU_SB_G0
},
2082 {BFD_RELOC_ARM_ALU_SB_G1_NC
, R_ARM_ALU_SB_G1_NC
},
2083 {BFD_RELOC_ARM_ALU_SB_G1
, R_ARM_ALU_SB_G1
},
2084 {BFD_RELOC_ARM_ALU_SB_G2
, R_ARM_ALU_SB_G2
},
2085 {BFD_RELOC_ARM_LDR_SB_G0
, R_ARM_LDR_SB_G0
},
2086 {BFD_RELOC_ARM_LDR_SB_G1
, R_ARM_LDR_SB_G1
},
2087 {BFD_RELOC_ARM_LDR_SB_G2
, R_ARM_LDR_SB_G2
},
2088 {BFD_RELOC_ARM_LDRS_SB_G0
, R_ARM_LDRS_SB_G0
},
2089 {BFD_RELOC_ARM_LDRS_SB_G1
, R_ARM_LDRS_SB_G1
},
2090 {BFD_RELOC_ARM_LDRS_SB_G2
, R_ARM_LDRS_SB_G2
},
2091 {BFD_RELOC_ARM_LDC_SB_G0
, R_ARM_LDC_SB_G0
},
2092 {BFD_RELOC_ARM_LDC_SB_G1
, R_ARM_LDC_SB_G1
},
2093 {BFD_RELOC_ARM_LDC_SB_G2
, R_ARM_LDC_SB_G2
},
2094 {BFD_RELOC_ARM_V4BX
, R_ARM_V4BX
},
2095 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
, R_ARM_THM_ALU_ABS_G3_NC
},
2096 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
, R_ARM_THM_ALU_ABS_G2_NC
},
2097 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
, R_ARM_THM_ALU_ABS_G1_NC
},
2098 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
, R_ARM_THM_ALU_ABS_G0_NC
},
2099 {BFD_RELOC_ARM_THUMB_BF17
, R_ARM_THM_BF16
},
2100 {BFD_RELOC_ARM_THUMB_BF13
, R_ARM_THM_BF12
},
2101 {BFD_RELOC_ARM_THUMB_BF19
, R_ARM_THM_BF18
}
2104 static reloc_howto_type
*
2105 elf32_arm_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
2106 bfd_reloc_code_real_type code
)
2110 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_reloc_map
); i
++)
2111 if (elf32_arm_reloc_map
[i
].bfd_reloc_val
== code
)
2112 return elf32_arm_howto_from_type (elf32_arm_reloc_map
[i
].elf_reloc_val
);
2117 static reloc_howto_type
*
2118 elf32_arm_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
2123 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_1
); i
++)
2124 if (elf32_arm_howto_table_1
[i
].name
!= NULL
2125 && strcasecmp (elf32_arm_howto_table_1
[i
].name
, r_name
) == 0)
2126 return &elf32_arm_howto_table_1
[i
];
2128 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_2
); i
++)
2129 if (elf32_arm_howto_table_2
[i
].name
!= NULL
2130 && strcasecmp (elf32_arm_howto_table_2
[i
].name
, r_name
) == 0)
2131 return &elf32_arm_howto_table_2
[i
];
2133 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_3
); i
++)
2134 if (elf32_arm_howto_table_3
[i
].name
!= NULL
2135 && strcasecmp (elf32_arm_howto_table_3
[i
].name
, r_name
) == 0)
2136 return &elf32_arm_howto_table_3
[i
];
2141 /* Support for core dump NOTE sections. */
2144 elf32_arm_nabi_grok_prstatus (bfd
*abfd
, Elf_Internal_Note
*note
)
2149 switch (note
->descsz
)
2154 case 148: /* Linux/ARM 32-bit. */
2156 elf_tdata (abfd
)->core
->signal
= bfd_get_16 (abfd
, note
->descdata
+ 12);
2159 elf_tdata (abfd
)->core
->lwpid
= bfd_get_32 (abfd
, note
->descdata
+ 24);
2168 /* Make a ".reg/999" section. */
2169 return _bfd_elfcore_make_pseudosection (abfd
, ".reg",
2170 size
, note
->descpos
+ offset
);
2174 elf32_arm_nabi_grok_psinfo (bfd
*abfd
, Elf_Internal_Note
*note
)
2176 switch (note
->descsz
)
2181 case 124: /* Linux/ARM elf_prpsinfo. */
2182 elf_tdata (abfd
)->core
->pid
2183 = bfd_get_32 (abfd
, note
->descdata
+ 12);
2184 elf_tdata (abfd
)->core
->program
2185 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 28, 16);
2186 elf_tdata (abfd
)->core
->command
2187 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 44, 80);
2190 /* Note that for some reason, a spurious space is tacked
2191 onto the end of the args in some (at least one anyway)
2192 implementations, so strip it off if it exists. */
2194 char *command
= elf_tdata (abfd
)->core
->command
;
2195 int n
= strlen (command
);
2197 if (0 < n
&& command
[n
- 1] == ' ')
2198 command
[n
- 1] = '\0';
2205 elf32_arm_nabi_write_core_note (bfd
*abfd
, char *buf
, int *bufsiz
,
2215 char data
[124] ATTRIBUTE_NONSTRING
;
2218 va_start (ap
, note_type
);
2219 memset (data
, 0, sizeof (data
));
2220 strncpy (data
+ 28, va_arg (ap
, const char *), 16);
2221 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2223 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224 -Wstringop-truncation:
2225 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2227 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION
;
2229 strncpy (data
+ 44, va_arg (ap
, const char *), 80);
2230 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2235 return elfcore_write_note (abfd
, buf
, bufsiz
,
2236 "CORE", note_type
, data
, sizeof (data
));
2247 va_start (ap
, note_type
);
2248 memset (data
, 0, sizeof (data
));
2249 pid
= va_arg (ap
, long);
2250 bfd_put_32 (abfd
, pid
, data
+ 24);
2251 cursig
= va_arg (ap
, int);
2252 bfd_put_16 (abfd
, cursig
, data
+ 12);
2253 greg
= va_arg (ap
, const void *);
2254 memcpy (data
+ 72, greg
, 72);
2257 return elfcore_write_note (abfd
, buf
, bufsiz
,
2258 "CORE", note_type
, data
, sizeof (data
));
2263 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2264 #define TARGET_LITTLE_NAME "elf32-littlearm"
2265 #define TARGET_BIG_SYM arm_elf32_be_vec
2266 #define TARGET_BIG_NAME "elf32-bigarm"
2268 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2269 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2270 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2272 typedef unsigned long int insn32
;
2273 typedef unsigned short int insn16
;
2275 /* In lieu of proper flags, assume all EABIv4 or later objects are
2277 #define INTERWORK_FLAG(abfd) \
2278 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280 || ((abfd)->flags & BFD_LINKER_CREATED))
2282 /* The linker script knows the section names for placement.
2283 The entry_names are used to do simple name mangling on the stubs.
2284 Given a function name, and its type, the stub can be found. The
2285 name can be changed. The only requirement is the %s be present. */
2286 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2289 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2292 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2295 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2298 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2301 #define STUB_ENTRY_NAME "__%s_veneer"
2303 #define CMSE_PREFIX "__acle_se_"
2305 #define CMSE_STUB_NAME ".gnu.sgstubs"
2307 /* The name of the dynamic interpreter. This is put in the .interp
2309 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2311 /* FDPIC default stack size. */
2312 #define DEFAULT_STACK_SIZE 0x8000
2314 static const unsigned long tls_trampoline
[] =
2316 0xe08e0000, /* add r0, lr, r0 */
2317 0xe5901004, /* ldr r1, [r0,#4] */
2318 0xe12fff11, /* bx r1 */
2321 static const unsigned long dl_tlsdesc_lazy_trampoline
[] =
2323 0xe52d2004, /* push {r2} */
2324 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2325 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2326 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2327 0xe081100f, /* 2: add r1, pc */
2328 0xe12fff12, /* bx r2 */
2329 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330 + dl_tlsdesc_lazy_resolver(GOT) */
2331 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2334 /* NOTE: [Thumb nop sequence]
2335 When adding code that transitions from Thumb to Arm the instruction that
2336 should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337 a nop for performance reasons. */
2339 /* ARM FDPIC PLT entry. */
2340 /* The last 5 words contain PLT lazy fragment code and data. */
2341 static const bfd_vma elf32_arm_fdpic_plt_entry
[] =
2343 0xe59fc008, /* ldr r12, .L1 */
2344 0xe08cc009, /* add r12, r12, r9 */
2345 0xe59c9004, /* ldr r9, [r12, #4] */
2346 0xe59cf000, /* ldr pc, [r12] */
2347 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2348 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2349 0xe51fc00c, /* ldr r12, [pc, #-12] */
2350 0xe92d1000, /* push {r12} */
2351 0xe599c004, /* ldr r12, [r9, #4] */
2352 0xe599f000, /* ldr pc, [r9] */
2355 /* Thumb FDPIC PLT entry. */
2356 /* The last 5 words contain PLT lazy fragment code and data. */
2357 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry
[] =
2359 0xc00cf8df, /* ldr.w r12, .L1 */
2360 0x0c09eb0c, /* add.w r12, r12, r9 */
2361 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2362 0xf000f8dc, /* ldr.w pc, [r12] */
2363 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2364 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2365 0xc008f85f, /* ldr.w r12, .L2 */
2366 0xcd04f84d, /* push {r12} */
2367 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2368 0xf000f8d9, /* ldr.w pc, [r9] */
2371 #ifdef FOUR_WORD_PLT
2373 /* The first entry in a procedure linkage table looks like
2374 this. It is set up so that any shared library function that is
2375 called before the relocation has been set up calls the dynamic
2377 static const bfd_vma elf32_arm_plt0_entry
[] =
2379 0xe52de004, /* str lr, [sp, #-4]! */
2380 0xe59fe010, /* ldr lr, [pc, #16] */
2381 0xe08fe00e, /* add lr, pc, lr */
2382 0xe5bef008, /* ldr pc, [lr, #8]! */
2385 /* Subsequent entries in a procedure linkage table look like
2387 static const bfd_vma elf32_arm_plt_entry
[] =
2389 0xe28fc600, /* add ip, pc, #NN */
2390 0xe28cca00, /* add ip, ip, #NN */
2391 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2392 0x00000000, /* unused */
2395 #else /* not FOUR_WORD_PLT */
2397 /* The first entry in a procedure linkage table looks like
2398 this. It is set up so that any shared library function that is
2399 called before the relocation has been set up calls the dynamic
2401 static const bfd_vma elf32_arm_plt0_entry
[] =
2403 0xe52de004, /* str lr, [sp, #-4]! */
2404 0xe59fe004, /* ldr lr, [pc, #4] */
2405 0xe08fe00e, /* add lr, pc, lr */
2406 0xe5bef008, /* ldr pc, [lr, #8]! */
2407 0x00000000, /* &GOT[0] - . */
2410 /* By default subsequent entries in a procedure linkage table look like
2411 this. Offsets that don't fit into 28 bits will cause link error. */
2412 static const bfd_vma elf32_arm_plt_entry_short
[] =
2414 0xe28fc600, /* add ip, pc, #0xNN00000 */
2415 0xe28cca00, /* add ip, ip, #0xNN000 */
2416 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2419 /* When explicitly asked, we'll use this "long" entry format
2420 which can cope with arbitrary displacements. */
2421 static const bfd_vma elf32_arm_plt_entry_long
[] =
2423 0xe28fc200, /* add ip, pc, #0xN0000000 */
2424 0xe28cc600, /* add ip, ip, #0xNN00000 */
2425 0xe28cca00, /* add ip, ip, #0xNN000 */
2426 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2429 static bool elf32_arm_use_long_plt_entry
= false;
2431 #endif /* not FOUR_WORD_PLT */
2433 /* The first entry in a procedure linkage table looks like this.
2434 It is set up so that any shared library function that is called before the
2435 relocation has been set up calls the dynamic linker first. */
2436 static const bfd_vma elf32_thumb2_plt0_entry
[] =
2438 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439 an instruction maybe encoded to one or two array elements. */
2440 0xf8dfb500, /* push {lr} */
2441 0x44fee008, /* ldr.w lr, [pc, #8] */
2443 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2444 0x00000000, /* &GOT[0] - . */
2447 /* Subsequent entries in a procedure linkage table for thumb only target
2449 static const bfd_vma elf32_thumb2_plt_entry
[] =
2451 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452 an instruction maybe encoded to one or two array elements. */
2453 0x0c00f240, /* movw ip, #0xNNNN */
2454 0x0c00f2c0, /* movt ip, #0xNNNN */
2455 0xf8dc44fc, /* add ip, pc */
2456 0xe7fcf000 /* ldr.w pc, [ip] */
2460 /* The format of the first entry in the procedure linkage table
2461 for a VxWorks executable. */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry
[] =
2464 0xe52dc008, /* str ip,[sp,#-8]! */
2465 0xe59fc000, /* ldr ip,[pc] */
2466 0xe59cf008, /* ldr pc,[ip,#8] */
2467 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2470 /* The format of subsequent entries in a VxWorks executable. */
2471 static const bfd_vma elf32_arm_vxworks_exec_plt_entry
[] =
2473 0xe59fc000, /* ldr ip,[pc] */
2474 0xe59cf000, /* ldr pc,[ip] */
2475 0x00000000, /* .long @got */
2476 0xe59fc000, /* ldr ip,[pc] */
2477 0xea000000, /* b _PLT */
2478 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2481 /* The format of entries in a VxWorks shared library. */
2482 static const bfd_vma elf32_arm_vxworks_shared_plt_entry
[] =
2484 0xe59fc000, /* ldr ip,[pc] */
2485 0xe79cf009, /* ldr pc,[ip,r9] */
2486 0x00000000, /* .long @got */
2487 0xe59fc000, /* ldr ip,[pc] */
2488 0xe599f008, /* ldr pc,[r9,#8] */
2489 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2492 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2493 #define PLT_THUMB_STUB_SIZE 4
2494 static const bfd_vma elf32_arm_plt_thumb_stub
[] =
2500 /* The first entry in a procedure linkage table looks like
2501 this. It is set up so that any shared library function that is
2502 called before the relocation has been set up calls the dynamic
2504 static const bfd_vma elf32_arm_nacl_plt0_entry
[] =
2507 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2508 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2509 0xe08cc00f, /* add ip, ip, pc */
2510 0xe52dc008, /* str ip, [sp, #-8]! */
2511 /* Second bundle: */
2512 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2513 0xe59cc000, /* ldr ip, [ip] */
2514 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2515 0xe12fff1c, /* bx ip */
2517 0xe320f000, /* nop */
2518 0xe320f000, /* nop */
2519 0xe320f000, /* nop */
2521 0xe50dc004, /* str ip, [sp, #-4] */
2522 /* Fourth bundle: */
2523 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2524 0xe59cc000, /* ldr ip, [ip] */
2525 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2526 0xe12fff1c, /* bx ip */
2528 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2530 /* Subsequent entries in a procedure linkage table look like this. */
2531 static const bfd_vma elf32_arm_nacl_plt_entry
[] =
2533 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2534 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2535 0xe08cc00f, /* add ip, ip, pc */
2536 0xea000000, /* b .Lplt_tail */
2539 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2540 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2541 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2542 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2543 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2544 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2545 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2546 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2556 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2557 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2558 is inserted in arm_build_one_stub(). */
2559 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2560 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2561 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2562 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2563 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2564 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2565 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2566 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2571 enum stub_insn_type type
;
2572 unsigned int r_type
;
2576 /* See note [Thumb nop sequence] when adding a veneer. */
2578 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2579 to reach the stub if necessary. */
2580 static const insn_sequence elf32_arm_stub_long_branch_any_any
[] =
2582 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2583 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2586 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2588 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb
[] =
2590 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2591 ARM_INSN (0xe12fff1c), /* bx ip */
2592 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2595 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2596 static const insn_sequence elf32_arm_stub_long_branch_thumb_only
[] =
2598 THUMB16_INSN (0xb401), /* push {r0} */
2599 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2600 THUMB16_INSN (0x4684), /* mov ip, r0 */
2601 THUMB16_INSN (0xbc01), /* pop {r0} */
2602 THUMB16_INSN (0x4760), /* bx ip */
2603 THUMB16_INSN (0xbf00), /* nop */
2604 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2607 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2608 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only
[] =
2610 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2611 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(x) */
2614 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2615 M-profile architectures. */
2616 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure
[] =
2618 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2619 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2620 THUMB16_INSN (0x4760), /* bx ip */
2623 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2625 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb
[] =
2627 THUMB16_INSN (0x4778), /* bx pc */
2628 THUMB16_INSN (0xe7fd), /* b .-2 */
2629 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2630 ARM_INSN (0xe12fff1c), /* bx ip */
2631 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2634 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2636 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm
[] =
2638 THUMB16_INSN (0x4778), /* bx pc */
2639 THUMB16_INSN (0xe7fd), /* b .-2 */
2640 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2641 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2644 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2645 one, when the destination is close enough. */
2646 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm
[] =
2648 THUMB16_INSN (0x4778), /* bx pc */
2649 THUMB16_INSN (0xe7fd), /* b .-2 */
2650 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2653 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2654 blx to reach the stub if necessary. */
2655 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic
[] =
2657 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2658 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2659 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2662 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2663 blx to reach the stub if necessary. We can not add into pc;
2664 it is not guaranteed to mode switch (different in ARMv6 and
2666 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic
[] =
2668 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2669 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2670 ARM_INSN (0xe12fff1c), /* bx ip */
2671 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2674 /* V4T ARM -> ARM long branch stub, PIC. */
2675 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic
[] =
2677 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2678 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2679 ARM_INSN (0xe12fff1c), /* bx ip */
2680 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2683 /* V4T Thumb -> ARM long branch stub, PIC. */
2684 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic
[] =
2686 THUMB16_INSN (0x4778), /* bx pc */
2687 THUMB16_INSN (0xe7fd), /* b .-2 */
2688 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2689 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2690 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2693 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2695 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic
[] =
2697 THUMB16_INSN (0xb401), /* push {r0} */
2698 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2699 THUMB16_INSN (0x46fc), /* mov ip, pc */
2700 THUMB16_INSN (0x4484), /* add ip, r0 */
2701 THUMB16_INSN (0xbc01), /* pop {r0} */
2702 THUMB16_INSN (0x4760), /* bx ip */
2703 DATA_WORD (0, R_ARM_REL32
, 4), /* dcd R_ARM_REL32(X) */
2706 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2708 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic
[] =
2710 THUMB16_INSN (0x4778), /* bx pc */
2711 THUMB16_INSN (0xe7fd), /* b .-2 */
2712 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2713 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2714 ARM_INSN (0xe12fff1c), /* bx ip */
2715 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2718 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2719 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2720 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic
[] =
2722 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2723 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2724 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2727 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2728 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2729 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic
[] =
2731 THUMB16_INSN (0x4778), /* bx pc */
2732 THUMB16_INSN (0xe7fd), /* b .-2 */
2733 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2734 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2735 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2738 /* NaCl ARM -> ARM long branch stub. */
2739 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl
[] =
2741 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2742 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2743 ARM_INSN (0xe12fff1c), /* bx ip */
2744 ARM_INSN (0xe320f000), /* nop */
2745 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2746 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2747 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2748 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2751 /* NaCl ARM -> ARM long branch stub, PIC. */
2752 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic
[] =
2754 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2755 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2756 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2757 ARM_INSN (0xe12fff1c), /* bx ip */
2758 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2759 DATA_WORD (0, R_ARM_REL32
, 8), /* dcd R_ARM_REL32(X+8) */
2760 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2761 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2764 /* Stub used for transition to secure state (aka SG veneer). */
2765 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only
[] =
2767 THUMB32_INSN (0xe97fe97f), /* sg. */
2768 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2772 /* Cortex-A8 erratum-workaround stubs. */
2774 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2775 can't use a conditional branch to reach this stub). */
2777 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond
[] =
2779 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2780 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2781 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2784 /* Stub used for b.w and bl.w instructions. */
2786 static const insn_sequence elf32_arm_stub_a8_veneer_b
[] =
2788 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2791 static const insn_sequence elf32_arm_stub_a8_veneer_bl
[] =
2793 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2796 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2797 instruction (which switches to ARM mode) to point to this stub. Jump to the
2798 real destination using an ARM-mode branch. */
2800 static const insn_sequence elf32_arm_stub_a8_veneer_blx
[] =
2802 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2805 /* For each section group there can be a specially created linker section
2806 to hold the stubs for that group. The name of the stub section is based
2807 upon the name of another section within that group with the suffix below
2810 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2811 create what appeared to be a linker stub section when it actually
2812 contained user code/data. For example, consider this fragment:
2814 const char * stubborn_problems[] = { "np" };
2816 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2819 .data.rel.local.stubborn_problems
2821 This then causes problems in arm32_arm_build_stubs() as it triggers:
2823 // Ignore non-stub sections.
2824 if (!strstr (stub_sec->name, STUB_SUFFIX))
2827 And so the section would be ignored instead of being processed. Hence
2828 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2830 #define STUB_SUFFIX ".__stub"
2832 /* One entry per long/short branch stub defined above. */
2834 DEF_STUB (long_branch_any_any) \
2835 DEF_STUB (long_branch_v4t_arm_thumb) \
2836 DEF_STUB (long_branch_thumb_only) \
2837 DEF_STUB (long_branch_v4t_thumb_thumb) \
2838 DEF_STUB (long_branch_v4t_thumb_arm) \
2839 DEF_STUB (short_branch_v4t_thumb_arm) \
2840 DEF_STUB (long_branch_any_arm_pic) \
2841 DEF_STUB (long_branch_any_thumb_pic) \
2842 DEF_STUB (long_branch_v4t_thumb_thumb_pic) \
2843 DEF_STUB (long_branch_v4t_arm_thumb_pic) \
2844 DEF_STUB (long_branch_v4t_thumb_arm_pic) \
2845 DEF_STUB (long_branch_thumb_only_pic) \
2846 DEF_STUB (long_branch_any_tls_pic) \
2847 DEF_STUB (long_branch_v4t_thumb_tls_pic) \
2848 DEF_STUB (long_branch_arm_nacl) \
2849 DEF_STUB (long_branch_arm_nacl_pic) \
2850 DEF_STUB (cmse_branch_thumb_only) \
2851 DEF_STUB (a8_veneer_b_cond) \
2852 DEF_STUB (a8_veneer_b) \
2853 DEF_STUB (a8_veneer_bl) \
2854 DEF_STUB (a8_veneer_blx) \
2855 DEF_STUB (long_branch_thumb2_only) \
2856 DEF_STUB (long_branch_thumb2_only_pure)
2858 #define DEF_STUB(x) arm_stub_##x,
2859 enum elf32_arm_stub_type
2867 /* Note the first a8_veneer type. */
2868 const unsigned arm_stub_a8_veneer_lwm
= arm_stub_a8_veneer_b_cond
;
2872 const insn_sequence
* template_sequence
;
2876 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2877 static const stub_def stub_definitions
[] =
2883 struct elf32_arm_stub_hash_entry
2885 /* Base hash table entry structure. */
2886 struct bfd_hash_entry root
;
2888 /* The stub section. */
2891 /* Offset within stub_sec of the beginning of this stub. */
2892 bfd_vma stub_offset
;
2894 /* Given the symbol's value and its section we can determine its final
2895 value when building the stubs (so the stub knows where to jump). */
2896 bfd_vma target_value
;
2897 asection
*target_section
;
2899 /* Same as above but for the source of the branch to the stub. Used for
2900 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2901 such, source section does not need to be recorded since Cortex-A8 erratum
2902 workaround stubs are only generated when both source and target are in the
2904 bfd_vma source_value
;
2906 /* The instruction which caused this stub to be generated (only valid for
2907 Cortex-A8 erratum workaround stubs at present). */
2908 unsigned long orig_insn
;
2910 /* The stub type. */
2911 enum elf32_arm_stub_type stub_type
;
2912 /* Its encoding size in bytes. */
2915 const insn_sequence
*stub_template
;
2916 /* The size of the template (number of entries). */
2917 int stub_template_size
;
2919 /* The symbol table entry, if any, that this was derived from. */
2920 struct elf32_arm_link_hash_entry
*h
;
2922 /* Type of branch. */
2923 enum arm_st_branch_type branch_type
;
2925 /* Where this stub is being called from, or, in the case of combined
2926 stub sections, the first input section in the group. */
2929 /* The name for the local symbol at the start of this stub. The
2930 stub name in the hash table has to be unique; this does not, so
2931 it can be friendlier. */
2935 /* Used to build a map of a section. This is required for mixed-endian
2938 typedef struct elf32_elf_section_map
2943 elf32_arm_section_map
;
2945 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2949 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
,
2950 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
,
2951 VFP11_ERRATUM_ARM_VENEER
,
2952 VFP11_ERRATUM_THUMB_VENEER
2954 elf32_vfp11_erratum_type
;
2956 typedef struct elf32_vfp11_erratum_list
2958 struct elf32_vfp11_erratum_list
*next
;
2964 struct elf32_vfp11_erratum_list
*veneer
;
2965 unsigned int vfp_insn
;
2969 struct elf32_vfp11_erratum_list
*branch
;
2973 elf32_vfp11_erratum_type type
;
2975 elf32_vfp11_erratum_list
;
2977 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2981 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
,
2982 STM32L4XX_ERRATUM_VENEER
2984 elf32_stm32l4xx_erratum_type
;
2986 typedef struct elf32_stm32l4xx_erratum_list
2988 struct elf32_stm32l4xx_erratum_list
*next
;
2994 struct elf32_stm32l4xx_erratum_list
*veneer
;
2999 struct elf32_stm32l4xx_erratum_list
*branch
;
3003 elf32_stm32l4xx_erratum_type type
;
3005 elf32_stm32l4xx_erratum_list
;
3010 INSERT_EXIDX_CANTUNWIND_AT_END
3012 arm_unwind_edit_type
;
3014 /* A (sorted) list of edits to apply to an unwind table. */
3015 typedef struct arm_unwind_table_edit
3017 arm_unwind_edit_type type
;
3018 /* Note: we sometimes want to insert an unwind entry corresponding to a
3019 section different from the one we're currently writing out, so record the
3020 (text) section this edit relates to here. */
3021 asection
*linked_section
;
3023 struct arm_unwind_table_edit
*next
;
3025 arm_unwind_table_edit
;
3027 typedef struct _arm_elf_section_data
3029 /* Information about mapping symbols. */
3030 struct bfd_elf_section_data elf
;
3031 unsigned int mapcount
;
3032 unsigned int mapsize
;
3033 elf32_arm_section_map
*map
;
3034 /* Information about CPU errata. */
3035 unsigned int erratumcount
;
3036 elf32_vfp11_erratum_list
*erratumlist
;
3037 unsigned int stm32l4xx_erratumcount
;
3038 elf32_stm32l4xx_erratum_list
*stm32l4xx_erratumlist
;
3039 unsigned int additional_reloc_count
;
3040 /* Information about unwind tables. */
3043 /* Unwind info attached to a text section. */
3046 asection
*arm_exidx_sec
;
3049 /* Unwind info attached to an .ARM.exidx section. */
3052 arm_unwind_table_edit
*unwind_edit_list
;
3053 arm_unwind_table_edit
*unwind_edit_tail
;
3057 _arm_elf_section_data
;
3059 #define elf32_arm_section_data(sec) \
3060 ((_arm_elf_section_data *) elf_section_data (sec))
3062 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3063 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3064 so may be created multiple times: we use an array of these entries whilst
3065 relaxing which we can refresh easily, then create stubs for each potentially
3066 erratum-triggering instruction once we've settled on a solution. */
3068 struct a8_erratum_fix
3073 bfd_vma target_offset
;
3074 unsigned long orig_insn
;
3076 enum elf32_arm_stub_type stub_type
;
3077 enum arm_st_branch_type branch_type
;
3080 /* A table of relocs applied to branches which might trigger Cortex-A8
3083 struct a8_erratum_reloc
3086 bfd_vma destination
;
3087 struct elf32_arm_link_hash_entry
*hash
;
3088 const char *sym_name
;
3089 unsigned int r_type
;
3090 enum arm_st_branch_type branch_type
;
3094 /* The size of the thread control block. */
3097 /* ARM-specific information about a PLT entry, over and above the usual
3101 /* We reference count Thumb references to a PLT entry separately,
3102 so that we can emit the Thumb trampoline only if needed. */
3103 bfd_signed_vma thumb_refcount
;
3105 /* Some references from Thumb code may be eliminated by BL->BLX
3106 conversion, so record them separately. */
3107 bfd_signed_vma maybe_thumb_refcount
;
3109 /* How many of the recorded PLT accesses were from non-call relocations.
3110 This information is useful when deciding whether anything takes the
3111 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3112 non-call references to the function should resolve directly to the
3113 real runtime target. */
3114 unsigned int noncall_refcount
;
3116 /* Since PLT entries have variable size if the Thumb prologue is
3117 used, we need to record the index into .got.plt instead of
3118 recomputing it from the PLT offset. */
3119 bfd_signed_vma got_offset
;
3122 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3123 struct arm_local_iplt_info
3125 /* The information that is usually found in the generic ELF part of
3126 the hash table entry. */
3127 union gotplt_union root
;
3129 /* The information that is usually found in the ARM-specific part of
3130 the hash table entry. */
3131 struct arm_plt_info arm
;
3133 /* A list of all potential dynamic relocations against this symbol. */
3134 struct elf_dyn_relocs
*dyn_relocs
;
3137 /* Structure to handle FDPIC support for local functions. */
3140 unsigned int funcdesc_cnt
;
3141 unsigned int gotofffuncdesc_cnt
;
3142 int funcdesc_offset
;
3145 struct elf_arm_obj_tdata
3147 struct elf_obj_tdata root
;
3149 /* Zero to warn when linking objects with incompatible enum sizes. */
3150 int no_enum_size_warning
;
3152 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3153 int no_wchar_size_warning
;
3155 /* The number of entries in each of the arrays in this strcuture.
3156 Used to avoid buffer overruns. */
3157 bfd_size_type num_entries
;
3159 /* tls_type for each local got entry. */
3160 char *local_got_tls_type
;
3162 /* GOTPLT entries for TLS descriptors. */
3163 bfd_vma
*local_tlsdesc_gotent
;
3165 /* Information for local symbols that need entries in .iplt. */
3166 struct arm_local_iplt_info
**local_iplt
;
3168 /* Maintains FDPIC counters and funcdesc info. */
3169 struct fdpic_local
*local_fdpic_cnts
;
3172 #define elf_arm_tdata(bfd) \
3173 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3175 #define elf32_arm_num_entries(bfd) \
3176 (elf_arm_tdata (bfd)->num_entries)
3178 #define elf32_arm_local_got_tls_type(bfd) \
3179 (elf_arm_tdata (bfd)->local_got_tls_type)
3181 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3182 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3184 #define elf32_arm_local_iplt(bfd) \
3185 (elf_arm_tdata (bfd)->local_iplt)
3187 #define elf32_arm_local_fdpic_cnts(bfd) \
3188 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3190 #define is_arm_elf(bfd) \
3191 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3192 && elf_tdata (bfd) != NULL \
3193 && elf_object_id (bfd) == ARM_ELF_DATA)
3196 elf32_arm_mkobject (bfd
*abfd
)
3198 return bfd_elf_allocate_object (abfd
, sizeof (struct elf_arm_obj_tdata
),
3202 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3204 /* Structure to handle FDPIC support for extern functions. */
3205 struct fdpic_global
{
3206 unsigned int gotofffuncdesc_cnt
;
3207 unsigned int gotfuncdesc_cnt
;
3208 unsigned int funcdesc_cnt
;
3209 int funcdesc_offset
;
3210 int gotfuncdesc_offset
;
3213 /* Arm ELF linker hash entry. */
3214 struct elf32_arm_link_hash_entry
3216 struct elf_link_hash_entry root
;
3218 /* ARM-specific PLT information. */
3219 struct arm_plt_info plt
;
3221 #define GOT_UNKNOWN 0
3222 #define GOT_NORMAL 1
3223 #define GOT_TLS_GD 2
3224 #define GOT_TLS_IE 4
3225 #define GOT_TLS_GDESC 8
3226 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3227 unsigned int tls_type
: 8;
3229 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3230 unsigned int is_iplt
: 1;
3232 unsigned int unused
: 23;
3234 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3235 starting at the end of the jump table. */
3236 bfd_vma tlsdesc_got
;
3238 /* The symbol marking the real symbol location for exported thumb
3239 symbols with Arm stubs. */
3240 struct elf_link_hash_entry
*export_glue
;
3242 /* A pointer to the most recently used stub hash entry against this
3244 struct elf32_arm_stub_hash_entry
*stub_cache
;
3246 /* Counter for FDPIC relocations against this symbol. */
3247 struct fdpic_global fdpic_cnts
;
3250 /* Traverse an arm ELF linker hash table. */
3251 #define elf32_arm_link_hash_traverse(table, func, info) \
3252 (elf_link_hash_traverse \
3254 (bool (*) (struct elf_link_hash_entry *, void *)) (func), \
3257 /* Get the ARM elf linker hash table from a link_info structure. */
3258 #define elf32_arm_hash_table(p) \
3259 ((is_elf_hash_table ((p)->hash) \
3260 && elf_hash_table_id (elf_hash_table (p)) == ARM_ELF_DATA) \
3261 ? (struct elf32_arm_link_hash_table *) (p)->hash : NULL)
3263 #define arm_stub_hash_lookup(table, string, create, copy) \
3264 ((struct elf32_arm_stub_hash_entry *) \
3265 bfd_hash_lookup ((table), (string), (create), (copy)))
3267 /* Array to keep track of which stub sections have been created, and
3268 information on stub grouping. */
3271 /* This is the section to which stubs in the group will be
3274 /* The stub section. */
3278 #define elf32_arm_compute_jump_table_size(htab) \
3279 ((htab)->next_tls_desc_index * 4)
3281 /* ARM ELF linker hash table. */
3282 struct elf32_arm_link_hash_table
3284 /* The main hash table. */
3285 struct elf_link_hash_table root
;
3287 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3288 bfd_size_type thumb_glue_size
;
3290 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3291 bfd_size_type arm_glue_size
;
3293 /* The size in bytes of section containing the ARMv4 BX veneers. */
3294 bfd_size_type bx_glue_size
;
3296 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3297 veneer has been populated. */
3298 bfd_vma bx_glue_offset
[15];
3300 /* The size in bytes of the section containing glue for VFP11 erratum
3302 bfd_size_type vfp11_erratum_glue_size
;
3304 /* The size in bytes of the section containing glue for STM32L4XX erratum
3306 bfd_size_type stm32l4xx_erratum_glue_size
;
3308 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3309 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3310 elf32_arm_write_section(). */
3311 struct a8_erratum_fix
*a8_erratum_fixes
;
3312 unsigned int num_a8_erratum_fixes
;
3314 /* An arbitrary input BFD chosen to hold the glue sections. */
3315 bfd
* bfd_of_glue_owner
;
3317 /* Nonzero to output a BE8 image. */
3320 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3321 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3324 /* The relocation to use for R_ARM_TARGET2 relocations. */
3327 /* 0 = Ignore R_ARM_V4BX.
3328 1 = Convert BX to MOV PC.
3329 2 = Generate v4 interworing stubs. */
3332 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3335 /* Whether we should fix the ARM1176 BLX immediate issue. */
3338 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3341 /* What sort of code sequences we should look for which may trigger the
3342 VFP11 denorm erratum. */
3343 bfd_arm_vfp11_fix vfp11_fix
;
3345 /* Global counter for the number of fixes we have emitted. */
3346 int num_vfp11_fixes
;
3348 /* What sort of code sequences we should look for which may trigger the
3349 STM32L4XX erratum. */
3350 bfd_arm_stm32l4xx_fix stm32l4xx_fix
;
3352 /* Global counter for the number of fixes we have emitted. */
3353 int num_stm32l4xx_fixes
;
3355 /* Nonzero to force PIC branch veneers. */
3358 /* The number of bytes in the initial entry in the PLT. */
3359 bfd_size_type plt_header_size
;
3361 /* The number of bytes in the subsequent PLT etries. */
3362 bfd_size_type plt_entry_size
;
3364 /* True if the target uses REL relocations. */
3367 /* Nonzero if import library must be a secure gateway import library
3368 as per ARMv8-M Security Extensions. */
3371 /* The import library whose symbols' address must remain stable in
3372 the import library generated. */
3375 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3376 bfd_vma next_tls_desc_index
;
3378 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3379 bfd_vma num_tls_desc
;
3381 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3384 /* Offset in .plt section of tls_arm_trampoline. */
3385 bfd_vma tls_trampoline
;
3387 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3390 bfd_signed_vma refcount
;
3394 /* For convenience in allocate_dynrelocs. */
3397 /* The amount of space used by the reserved portion of the sgotplt
3398 section, plus whatever space is used by the jump slots. */
3399 bfd_vma sgotplt_jump_table_size
;
3401 /* The stub hash table. */
3402 struct bfd_hash_table stub_hash_table
;
3404 /* Linker stub bfd. */
3407 /* Linker call-backs. */
3408 asection
* (*add_stub_section
) (const char *, asection
*, asection
*,
3410 void (*layout_sections_again
) (void);
3412 /* Array to keep track of which stub sections have been created, and
3413 information on stub grouping. */
3414 struct map_stub
*stub_group
;
3416 /* Input stub section holding secure gateway veneers. */
3417 asection
*cmse_stub_sec
;
3419 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3420 start to be allocated. */
3421 bfd_vma new_cmse_stub_offset
;
3423 /* Number of elements in stub_group. */
3424 unsigned int top_id
;
3426 /* Assorted information used by elf32_arm_size_stubs. */
3427 unsigned int bfd_count
;
3428 unsigned int top_index
;
3429 asection
**input_list
;
3431 /* True if the target system uses FDPIC. */
3434 /* Fixup section. Used for FDPIC. */
3438 /* Add an FDPIC read-only fixup. */
3440 arm_elf_add_rofixup (bfd
*output_bfd
, asection
*srofixup
, bfd_vma offset
)
3442 bfd_vma fixup_offset
;
3444 fixup_offset
= srofixup
->reloc_count
++ * 4;
3445 BFD_ASSERT (fixup_offset
< srofixup
->size
);
3446 bfd_put_32 (output_bfd
, offset
, srofixup
->contents
+ fixup_offset
);
3450 ctz (unsigned int mask
)
3452 #if GCC_VERSION >= 3004
3453 return __builtin_ctz (mask
);
3457 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3468 elf32_arm_popcount (unsigned int mask
)
3470 #if GCC_VERSION >= 3004
3471 return __builtin_popcount (mask
);
3476 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3486 static void elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
3487 asection
*sreloc
, Elf_Internal_Rela
*rel
);
3490 arm_elf_fill_funcdesc (bfd
*output_bfd
,
3491 struct bfd_link_info
*info
,
3492 int *funcdesc_offset
,
3496 bfd_vma dynreloc_value
,
3499 if ((*funcdesc_offset
& 1) == 0)
3501 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
3502 asection
*sgot
= globals
->root
.sgot
;
3504 if (bfd_link_pic (info
))
3506 asection
*srelgot
= globals
->root
.srelgot
;
3507 Elf_Internal_Rela outrel
;
3509 outrel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_FUNCDESC_VALUE
);
3510 outrel
.r_offset
= sgot
->output_section
->vma
+ sgot
->output_offset
+ offset
;
3511 outrel
.r_addend
= 0;
3513 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
3514 bfd_put_32 (output_bfd
, addr
, sgot
->contents
+ offset
);
3515 bfd_put_32 (output_bfd
, seg
, sgot
->contents
+ offset
+ 4);
3519 struct elf_link_hash_entry
*hgot
= globals
->root
.hgot
;
3520 bfd_vma got_value
= hgot
->root
.u
.def
.value
3521 + hgot
->root
.u
.def
.section
->output_section
->vma
3522 + hgot
->root
.u
.def
.section
->output_offset
;
3524 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
,
3525 sgot
->output_section
->vma
+ sgot
->output_offset
3527 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
,
3528 sgot
->output_section
->vma
+ sgot
->output_offset
3530 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ offset
);
3531 bfd_put_32 (output_bfd
, got_value
, sgot
->contents
+ offset
+ 4);
3533 *funcdesc_offset
|= 1;
3537 /* Create an entry in an ARM ELF linker hash table. */
3539 static struct bfd_hash_entry
*
3540 elf32_arm_link_hash_newfunc (struct bfd_hash_entry
* entry
,
3541 struct bfd_hash_table
* table
,
3542 const char * string
)
3544 struct elf32_arm_link_hash_entry
* ret
=
3545 (struct elf32_arm_link_hash_entry
*) entry
;
3547 /* Allocate the structure if it has not already been allocated by a
3550 ret
= (struct elf32_arm_link_hash_entry
*)
3551 bfd_hash_allocate (table
, sizeof (struct elf32_arm_link_hash_entry
));
3553 return (struct bfd_hash_entry
*) ret
;
3555 /* Call the allocation method of the superclass. */
3556 ret
= ((struct elf32_arm_link_hash_entry
*)
3557 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry
*) ret
,
3561 ret
->tls_type
= GOT_UNKNOWN
;
3562 ret
->tlsdesc_got
= (bfd_vma
) -1;
3563 ret
->plt
.thumb_refcount
= 0;
3564 ret
->plt
.maybe_thumb_refcount
= 0;
3565 ret
->plt
.noncall_refcount
= 0;
3566 ret
->plt
.got_offset
= -1;
3567 ret
->is_iplt
= false;
3568 ret
->export_glue
= NULL
;
3570 ret
->stub_cache
= NULL
;
3572 ret
->fdpic_cnts
.gotofffuncdesc_cnt
= 0;
3573 ret
->fdpic_cnts
.gotfuncdesc_cnt
= 0;
3574 ret
->fdpic_cnts
.funcdesc_cnt
= 0;
3575 ret
->fdpic_cnts
.funcdesc_offset
= -1;
3576 ret
->fdpic_cnts
.gotfuncdesc_offset
= -1;
3579 return (struct bfd_hash_entry
*) ret
;
3582 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3586 elf32_arm_allocate_local_sym_info (bfd
*abfd
)
3588 if (elf_local_got_refcounts (abfd
) == NULL
)
3590 bfd_size_type num_syms
;
3592 elf32_arm_num_entries (abfd
) = 0;
3594 /* Whilst it might be tempting to allocate a single block of memory and
3595 then divide it up amoungst the arrays in the elf_arm_obj_tdata
3596 structure, this interferes with the work of memory checkers looking
3597 for buffer overruns. So allocate each array individually. */
3599 num_syms
= elf_tdata (abfd
)->symtab_hdr
.sh_info
;
3601 elf_local_got_refcounts (abfd
) = bfd_zalloc
3602 (abfd
, num_syms
* sizeof (* elf_local_got_refcounts (abfd
)));
3604 if (elf_local_got_refcounts (abfd
) == NULL
)
3607 elf32_arm_local_tlsdesc_gotent (abfd
) = bfd_zalloc
3608 (abfd
, num_syms
* sizeof (* elf32_arm_local_tlsdesc_gotent (abfd
)));
3610 if (elf32_arm_local_tlsdesc_gotent (abfd
) == NULL
)
3613 elf32_arm_local_iplt (abfd
) = bfd_zalloc
3614 (abfd
, num_syms
* sizeof (* elf32_arm_local_iplt (abfd
)));
3616 if (elf32_arm_local_iplt (abfd
) == NULL
)
3619 elf32_arm_local_fdpic_cnts (abfd
) = bfd_zalloc
3620 (abfd
, num_syms
* sizeof (* elf32_arm_local_fdpic_cnts (abfd
)));
3622 if (elf32_arm_local_fdpic_cnts (abfd
) == NULL
)
3625 elf32_arm_local_got_tls_type (abfd
) = bfd_zalloc
3626 (abfd
, num_syms
* sizeof (* elf32_arm_local_got_tls_type (abfd
)));
3628 if (elf32_arm_local_got_tls_type (abfd
) == NULL
)
3631 elf32_arm_num_entries (abfd
) = num_syms
;
3633 #if GCC_VERSION >= 3000
3634 BFD_ASSERT (__alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd
))
3635 <= __alignof__ (*elf_local_got_refcounts (abfd
)));
3636 BFD_ASSERT (__alignof__ (*elf32_arm_local_iplt (abfd
))
3637 <= __alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd
)));
3638 BFD_ASSERT (__alignof__ (*elf32_arm_local_fdpic_cnts (abfd
))
3639 <= __alignof__ (*elf32_arm_local_iplt (abfd
)));
3640 BFD_ASSERT (__alignof__ (*elf32_arm_local_got_tls_type (abfd
))
3641 <= __alignof__ (*elf32_arm_local_fdpic_cnts (abfd
)));
3647 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3648 to input bfd ABFD. Create the information if it doesn't already exist.
3649 Return null if an allocation fails. */
3651 static struct arm_local_iplt_info
*
3652 elf32_arm_create_local_iplt (bfd
*abfd
, unsigned long r_symndx
)
3654 struct arm_local_iplt_info
**ptr
;
3656 if (!elf32_arm_allocate_local_sym_info (abfd
))
3659 BFD_ASSERT (r_symndx
< elf_tdata (abfd
)->symtab_hdr
.sh_info
);
3660 BFD_ASSERT (r_symndx
< elf32_arm_num_entries (abfd
));
3661 ptr
= &elf32_arm_local_iplt (abfd
)[r_symndx
];
3663 *ptr
= bfd_zalloc (abfd
, sizeof (**ptr
));
3667 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3668 in ABFD's symbol table. If the symbol is global, H points to its
3669 hash table entry, otherwise H is null.
3671 Return true if the symbol does have PLT information. When returning
3672 true, point *ROOT_PLT at the target-independent reference count/offset
3673 union and *ARM_PLT at the ARM-specific information. */
3676 elf32_arm_get_plt_info (bfd
*abfd
, struct elf32_arm_link_hash_table
*globals
,
3677 struct elf32_arm_link_hash_entry
*h
,
3678 unsigned long r_symndx
, union gotplt_union
**root_plt
,
3679 struct arm_plt_info
**arm_plt
)
3681 struct arm_local_iplt_info
*local_iplt
;
3683 if (globals
->root
.splt
== NULL
&& globals
->root
.iplt
== NULL
)
3688 *root_plt
= &h
->root
.plt
;
3693 if (elf32_arm_local_iplt (abfd
) == NULL
)
3696 if (r_symndx
>= elf32_arm_num_entries (abfd
))
3699 local_iplt
= elf32_arm_local_iplt (abfd
)[r_symndx
];
3700 if (local_iplt
== NULL
)
3703 *root_plt
= &local_iplt
->root
;
3704 *arm_plt
= &local_iplt
->arm
;
3708 static bool using_thumb_only (struct elf32_arm_link_hash_table
*globals
);
3710 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3714 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info
*info
,
3715 struct arm_plt_info
*arm_plt
)
3717 struct elf32_arm_link_hash_table
*htab
;
3719 htab
= elf32_arm_hash_table (info
);
3721 return (!using_thumb_only (htab
) && (arm_plt
->thumb_refcount
!= 0
3722 || (!htab
->use_blx
&& arm_plt
->maybe_thumb_refcount
!= 0)));
3725 /* Return a pointer to the head of the dynamic reloc list that should
3726 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3727 ABFD's symbol table. Return null if an error occurs. */
3729 static struct elf_dyn_relocs
**
3730 elf32_arm_get_local_dynreloc_list (bfd
*abfd
, unsigned long r_symndx
,
3731 Elf_Internal_Sym
*isym
)
3733 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
)
3735 struct arm_local_iplt_info
*local_iplt
;
3737 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
3738 if (local_iplt
== NULL
)
3740 return &local_iplt
->dyn_relocs
;
3744 /* Track dynamic relocs needed for local syms too.
3745 We really need local syms available to do this
3750 s
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
3754 vpp
= &elf_section_data (s
)->local_dynrel
;
3755 return (struct elf_dyn_relocs
**) vpp
;
3759 /* Initialize an entry in the stub hash table. */
3761 static struct bfd_hash_entry
*
3762 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
3763 struct bfd_hash_table
*table
,
3766 /* Allocate the structure if it has not already been allocated by a
3770 entry
= (struct bfd_hash_entry
*)
3771 bfd_hash_allocate (table
, sizeof (struct elf32_arm_stub_hash_entry
));
3776 /* Call the allocation method of the superclass. */
3777 entry
= bfd_hash_newfunc (entry
, table
, string
);
3780 struct elf32_arm_stub_hash_entry
*eh
;
3782 /* Initialize the local fields. */
3783 eh
= (struct elf32_arm_stub_hash_entry
*) entry
;
3784 eh
->stub_sec
= NULL
;
3785 eh
->stub_offset
= (bfd_vma
) -1;
3786 eh
->source_value
= 0;
3787 eh
->target_value
= 0;
3788 eh
->target_section
= NULL
;
3790 eh
->stub_type
= arm_stub_none
;
3792 eh
->stub_template
= NULL
;
3793 eh
->stub_template_size
= -1;
3796 eh
->output_name
= NULL
;
3802 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3803 shortcuts to them in our hash table. */
3806 create_got_section (bfd
*dynobj
, struct bfd_link_info
*info
)
3808 struct elf32_arm_link_hash_table
*htab
;
3810 htab
= elf32_arm_hash_table (info
);
3814 if (! _bfd_elf_create_got_section (dynobj
, info
))
3817 /* Also create .rofixup. */
3820 htab
->srofixup
= bfd_make_section_with_flags (dynobj
, ".rofixup",
3821 (SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
3822 | SEC_IN_MEMORY
| SEC_LINKER_CREATED
| SEC_READONLY
));
3823 if (htab
->srofixup
== NULL
3824 || !bfd_set_section_alignment (htab
->srofixup
, 2))
3831 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3834 create_ifunc_sections (struct bfd_link_info
*info
)
3836 struct elf32_arm_link_hash_table
*htab
;
3837 const struct elf_backend_data
*bed
;
3842 htab
= elf32_arm_hash_table (info
);
3843 dynobj
= htab
->root
.dynobj
;
3844 bed
= get_elf_backend_data (dynobj
);
3845 flags
= bed
->dynamic_sec_flags
;
3847 if (htab
->root
.iplt
== NULL
)
3849 s
= bfd_make_section_anyway_with_flags (dynobj
, ".iplt",
3850 flags
| SEC_READONLY
| SEC_CODE
);
3852 || !bfd_set_section_alignment (s
, bed
->plt_alignment
))
3854 htab
->root
.iplt
= s
;
3857 if (htab
->root
.irelplt
== NULL
)
3859 s
= bfd_make_section_anyway_with_flags (dynobj
,
3860 RELOC_SECTION (htab
, ".iplt"),
3861 flags
| SEC_READONLY
);
3863 || !bfd_set_section_alignment (s
, bed
->s
->log_file_align
))
3865 htab
->root
.irelplt
= s
;
3868 if (htab
->root
.igotplt
== NULL
)
3870 s
= bfd_make_section_anyway_with_flags (dynobj
, ".igot.plt", flags
);
3872 || !bfd_set_section_alignment (s
, bed
->s
->log_file_align
))
3874 htab
->root
.igotplt
= s
;
3879 /* Determine if we're dealing with a Thumb only architecture. */
3882 using_thumb_only (struct elf32_arm_link_hash_table
*globals
)
3885 int profile
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3886 Tag_CPU_arch_profile
);
3889 return profile
== 'M';
3891 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3893 /* Force return logic to be reviewed for each new architecture. */
3894 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
3896 if (arch
== TAG_CPU_ARCH_V6_M
3897 || arch
== TAG_CPU_ARCH_V6S_M
3898 || arch
== TAG_CPU_ARCH_V7E_M
3899 || arch
== TAG_CPU_ARCH_V8M_BASE
3900 || arch
== TAG_CPU_ARCH_V8M_MAIN
3901 || arch
== TAG_CPU_ARCH_V8_1M_MAIN
)
3907 /* Determine if we're dealing with a Thumb-2 object. */
3910 using_thumb2 (struct elf32_arm_link_hash_table
*globals
)
3913 int thumb_isa
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3916 /* No use of thumb permitted, or a legacy thumb-1/2 definition. */
3918 return thumb_isa
== 2;
3920 /* Variant of thumb is described by the architecture tag. */
3921 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3923 /* Force return logic to be reviewed for each new architecture. */
3924 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
3926 return (arch
== TAG_CPU_ARCH_V6T2
3927 || arch
== TAG_CPU_ARCH_V7
3928 || arch
== TAG_CPU_ARCH_V7E_M
3929 || arch
== TAG_CPU_ARCH_V8
3930 || arch
== TAG_CPU_ARCH_V8R
3931 || arch
== TAG_CPU_ARCH_V8M_MAIN
3932 || arch
== TAG_CPU_ARCH_V8_1M_MAIN
);
3935 /* Determine whether Thumb-2 BL instruction is available. */
3938 using_thumb2_bl (struct elf32_arm_link_hash_table
*globals
)
3941 bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3943 /* Force return logic to be reviewed for each new architecture. */
3944 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V9
);
3946 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3947 return (arch
== TAG_CPU_ARCH_V6T2
3948 || arch
>= TAG_CPU_ARCH_V7
);
3951 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3952 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3956 elf32_arm_create_dynamic_sections (bfd
*dynobj
, struct bfd_link_info
*info
)
3958 struct elf32_arm_link_hash_table
*htab
;
3960 htab
= elf32_arm_hash_table (info
);
3964 if (!htab
->root
.sgot
&& !create_got_section (dynobj
, info
))
3967 if (!_bfd_elf_create_dynamic_sections (dynobj
, info
))
3970 if (htab
->root
.target_os
== is_vxworks
)
3972 if (!elf_vxworks_create_dynamic_sections (dynobj
, info
, &htab
->srelplt2
))
3975 if (bfd_link_pic (info
))
3977 htab
->plt_header_size
= 0;
3978 htab
->plt_entry_size
3979 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry
);
3983 htab
->plt_header_size
3984 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry
);
3985 htab
->plt_entry_size
3986 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry
);
3989 if (elf_elfheader (dynobj
))
3990 elf_elfheader (dynobj
)->e_ident
[EI_CLASS
] = ELFCLASS32
;
3995 Test for thumb only architectures. Note - we cannot just call
3996 using_thumb_only() as the attributes in the output bfd have not been
3997 initialised at this point, so instead we use the input bfd. */
3998 bfd
* saved_obfd
= htab
->obfd
;
4000 htab
->obfd
= dynobj
;
4001 if (using_thumb_only (htab
))
4003 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
4004 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
4006 htab
->obfd
= saved_obfd
;
4009 if (htab
->fdpic_p
) {
4010 htab
->plt_header_size
= 0;
4011 if (info
->flags
& DF_BIND_NOW
)
4012 htab
->plt_entry_size
= 4 * (ARRAY_SIZE (elf32_arm_fdpic_plt_entry
) - 5);
4014 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry
);
4017 if (!htab
->root
.splt
4018 || !htab
->root
.srelplt
4019 || !htab
->root
.sdynbss
4020 || (!bfd_link_pic (info
) && !htab
->root
.srelbss
))
4026 /* Copy the extra info we tack onto an elf_link_hash_entry. */
4029 elf32_arm_copy_indirect_symbol (struct bfd_link_info
*info
,
4030 struct elf_link_hash_entry
*dir
,
4031 struct elf_link_hash_entry
*ind
)
4033 struct elf32_arm_link_hash_entry
*edir
, *eind
;
4035 edir
= (struct elf32_arm_link_hash_entry
*) dir
;
4036 eind
= (struct elf32_arm_link_hash_entry
*) ind
;
4038 if (ind
->root
.type
== bfd_link_hash_indirect
)
4040 /* Copy over PLT info. */
4041 edir
->plt
.thumb_refcount
+= eind
->plt
.thumb_refcount
;
4042 eind
->plt
.thumb_refcount
= 0;
4043 edir
->plt
.maybe_thumb_refcount
+= eind
->plt
.maybe_thumb_refcount
;
4044 eind
->plt
.maybe_thumb_refcount
= 0;
4045 edir
->plt
.noncall_refcount
+= eind
->plt
.noncall_refcount
;
4046 eind
->plt
.noncall_refcount
= 0;
4048 /* Copy FDPIC counters. */
4049 edir
->fdpic_cnts
.gotofffuncdesc_cnt
+= eind
->fdpic_cnts
.gotofffuncdesc_cnt
;
4050 edir
->fdpic_cnts
.gotfuncdesc_cnt
+= eind
->fdpic_cnts
.gotfuncdesc_cnt
;
4051 edir
->fdpic_cnts
.funcdesc_cnt
+= eind
->fdpic_cnts
.funcdesc_cnt
;
4053 /* We should only allocate a function to .iplt once the final
4054 symbol information is known. */
4055 BFD_ASSERT (!eind
->is_iplt
);
4057 if (dir
->got
.refcount
<= 0)
4059 edir
->tls_type
= eind
->tls_type
;
4060 eind
->tls_type
= GOT_UNKNOWN
;
4064 _bfd_elf_link_hash_copy_indirect (info
, dir
, ind
);
4067 /* Destroy an ARM elf linker hash table. */
4070 elf32_arm_link_hash_table_free (bfd
*obfd
)
4072 struct elf32_arm_link_hash_table
*ret
4073 = (struct elf32_arm_link_hash_table
*) obfd
->link
.hash
;
4075 bfd_hash_table_free (&ret
->stub_hash_table
);
4076 _bfd_elf_link_hash_table_free (obfd
);
4079 /* Create an ARM elf linker hash table. */
4081 static struct bfd_link_hash_table
*
4082 elf32_arm_link_hash_table_create (bfd
*abfd
)
4084 struct elf32_arm_link_hash_table
*ret
;
4085 size_t amt
= sizeof (struct elf32_arm_link_hash_table
);
4087 ret
= (struct elf32_arm_link_hash_table
*) bfd_zmalloc (amt
);
4091 if (!_bfd_elf_link_hash_table_init (& ret
->root
, abfd
,
4092 elf32_arm_link_hash_newfunc
,
4093 sizeof (struct elf32_arm_link_hash_entry
),
4100 ret
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
4101 ret
->stm32l4xx_fix
= BFD_ARM_STM32L4XX_FIX_NONE
;
4102 #ifdef FOUR_WORD_PLT
4103 ret
->plt_header_size
= 16;
4104 ret
->plt_entry_size
= 16;
4106 ret
->plt_header_size
= 20;
4107 ret
->plt_entry_size
= elf32_arm_use_long_plt_entry
? 16 : 12;
4109 ret
->use_rel
= true;
4113 if (!bfd_hash_table_init (&ret
->stub_hash_table
, stub_hash_newfunc
,
4114 sizeof (struct elf32_arm_stub_hash_entry
)))
4116 _bfd_elf_link_hash_table_free (abfd
);
4119 ret
->root
.root
.hash_table_free
= elf32_arm_link_hash_table_free
;
4121 return &ret
->root
.root
;
4124 /* Determine what kind of NOPs are available. */
4127 arch_has_arm_nop (struct elf32_arm_link_hash_table
*globals
)
4129 const int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
4132 /* Force return logic to be reviewed for each new architecture. */
4133 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V9
);
4135 return (arch
== TAG_CPU_ARCH_V6T2
4136 || arch
== TAG_CPU_ARCH_V6K
4137 || arch
== TAG_CPU_ARCH_V7
4138 || arch
== TAG_CPU_ARCH_V8
4139 || arch
== TAG_CPU_ARCH_V8R
4140 || arch
== TAG_CPU_ARCH_V9
);
4144 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type
)
4148 case arm_stub_long_branch_thumb_only
:
4149 case arm_stub_long_branch_thumb2_only
:
4150 case arm_stub_long_branch_thumb2_only_pure
:
4151 case arm_stub_long_branch_v4t_thumb_arm
:
4152 case arm_stub_short_branch_v4t_thumb_arm
:
4153 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4154 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4155 case arm_stub_long_branch_thumb_only_pic
:
4156 case arm_stub_cmse_branch_thumb_only
:
4167 /* Determine the type of stub needed, if any, for a call. */
4169 static enum elf32_arm_stub_type
4170 arm_type_of_stub (struct bfd_link_info
*info
,
4171 asection
*input_sec
,
4172 const Elf_Internal_Rela
*rel
,
4173 unsigned char st_type
,
4174 enum arm_st_branch_type
*actual_branch_type
,
4175 struct elf32_arm_link_hash_entry
*hash
,
4176 bfd_vma destination
,
4182 bfd_signed_vma branch_offset
;
4183 unsigned int r_type
;
4184 struct elf32_arm_link_hash_table
* globals
;
4185 bool thumb2
, thumb2_bl
, thumb_only
;
4186 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
4188 enum arm_st_branch_type branch_type
= *actual_branch_type
;
4189 union gotplt_union
*root_plt
;
4190 struct arm_plt_info
*arm_plt
;
4194 if (branch_type
== ST_BRANCH_LONG
)
4197 globals
= elf32_arm_hash_table (info
);
4198 if (globals
== NULL
)
4201 thumb_only
= using_thumb_only (globals
);
4202 thumb2
= using_thumb2 (globals
);
4203 thumb2_bl
= using_thumb2_bl (globals
);
4205 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
4207 /* True for architectures that implement the thumb2 movw instruction. */
4208 thumb2_movw
= thumb2
|| (arch
== TAG_CPU_ARCH_V8M_BASE
);
4210 /* Determine where the call point is. */
4211 location
= (input_sec
->output_offset
4212 + input_sec
->output_section
->vma
4215 r_type
= ELF32_R_TYPE (rel
->r_info
);
4217 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4218 are considering a function call relocation. */
4219 if (thumb_only
&& (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
4220 || r_type
== R_ARM_THM_JUMP19
)
4221 && branch_type
== ST_BRANCH_TO_ARM
)
4222 branch_type
= ST_BRANCH_TO_THUMB
;
4224 /* For TLS call relocs, it is the caller's responsibility to provide
4225 the address of the appropriate trampoline. */
4226 if (r_type
!= R_ARM_TLS_CALL
4227 && r_type
!= R_ARM_THM_TLS_CALL
4228 && elf32_arm_get_plt_info (input_bfd
, globals
, hash
,
4229 ELF32_R_SYM (rel
->r_info
), &root_plt
,
4231 && root_plt
->offset
!= (bfd_vma
) -1)
4235 if (hash
== NULL
|| hash
->is_iplt
)
4236 splt
= globals
->root
.iplt
;
4238 splt
= globals
->root
.splt
;
4243 /* Note when dealing with PLT entries: the main PLT stub is in
4244 ARM mode, so if the branch is in Thumb mode, another
4245 Thumb->ARM stub will be inserted later just before the ARM
4246 PLT stub. If a long branch stub is needed, we'll add a
4247 Thumb->Arm one and branch directly to the ARM PLT entry.
4248 Here, we have to check if a pre-PLT Thumb->ARM stub
4249 is needed and if it will be close enough. */
4251 destination
= (splt
->output_section
->vma
4252 + splt
->output_offset
4253 + root_plt
->offset
);
4256 /* Thumb branch/call to PLT: it can become a branch to ARM
4257 or to Thumb. We must perform the same checks and
4258 corrections as in elf32_arm_final_link_relocate. */
4259 if ((r_type
== R_ARM_THM_CALL
)
4260 || (r_type
== R_ARM_THM_JUMP24
))
4262 if (globals
->use_blx
4263 && r_type
== R_ARM_THM_CALL
4266 /* If the Thumb BLX instruction is available, convert
4267 the BL to a BLX instruction to call the ARM-mode
4269 branch_type
= ST_BRANCH_TO_ARM
;
4274 /* Target the Thumb stub before the ARM PLT entry. */
4275 destination
-= PLT_THUMB_STUB_SIZE
;
4276 branch_type
= ST_BRANCH_TO_THUMB
;
4281 branch_type
= ST_BRANCH_TO_ARM
;
4285 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4286 BFD_ASSERT (st_type
!= STT_GNU_IFUNC
);
4288 branch_offset
= (bfd_signed_vma
)(destination
- location
);
4290 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
4291 || r_type
== R_ARM_THM_TLS_CALL
|| r_type
== R_ARM_THM_JUMP19
)
4293 /* Handle cases where:
4294 - this call goes too far (different Thumb/Thumb2 max
4296 - it's a Thumb->Arm call and blx is not available, or it's a
4297 Thumb->Arm branch (not bl). A stub is needed in this case,
4298 but only if this call is not through a PLT entry. Indeed,
4299 PLT stubs handle mode switching already. */
4301 && (branch_offset
> THM_MAX_FWD_BRANCH_OFFSET
4302 || (branch_offset
< THM_MAX_BWD_BRANCH_OFFSET
)))
4304 && (branch_offset
> THM2_MAX_FWD_BRANCH_OFFSET
4305 || (branch_offset
< THM2_MAX_BWD_BRANCH_OFFSET
)))
4307 && (branch_offset
> THM2_MAX_FWD_COND_BRANCH_OFFSET
4308 || (branch_offset
< THM2_MAX_BWD_COND_BRANCH_OFFSET
))
4309 && (r_type
== R_ARM_THM_JUMP19
))
4310 || (branch_type
== ST_BRANCH_TO_ARM
4311 && (((r_type
== R_ARM_THM_CALL
4312 || r_type
== R_ARM_THM_TLS_CALL
) && !globals
->use_blx
)
4313 || (r_type
== R_ARM_THM_JUMP24
)
4314 || (r_type
== R_ARM_THM_JUMP19
))
4317 /* If we need to insert a Thumb-Thumb long branch stub to a
4318 PLT, use one that branches directly to the ARM PLT
4319 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4320 stub, undo this now. */
4321 if ((branch_type
== ST_BRANCH_TO_THUMB
) && use_plt
&& !thumb_only
)
4323 branch_type
= ST_BRANCH_TO_ARM
;
4324 branch_offset
+= PLT_THUMB_STUB_SIZE
;
4327 if (branch_type
== ST_BRANCH_TO_THUMB
)
4329 /* Thumb to thumb. */
4332 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4334 (_("%pB(%pA): warning: long branch veneers used in"
4335 " section with SHF_ARM_PURECODE section"
4336 " attribute is only supported for M-profile"
4337 " targets that implement the movw instruction"),
4338 input_bfd
, input_sec
);
4340 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4342 ? ((globals
->use_blx
4343 && (r_type
== R_ARM_THM_CALL
))
4344 /* V5T and above. Stub starts with ARM code, so
4345 we must be able to switch mode before
4346 reaching it, which is only possible for 'bl'
4347 (ie R_ARM_THM_CALL relocation). */
4348 ? arm_stub_long_branch_any_thumb_pic
4349 /* On V4T, use Thumb code only. */
4350 : arm_stub_long_branch_v4t_thumb_thumb_pic
)
4352 /* non-PIC stubs. */
4353 : ((globals
->use_blx
4354 && (r_type
== R_ARM_THM_CALL
))
4355 /* V5T and above. */
4356 ? arm_stub_long_branch_any_any
4358 : arm_stub_long_branch_v4t_thumb_thumb
);
4362 if (thumb2_movw
&& (input_sec
->flags
& SEC_ELF_PURECODE
))
4363 stub_type
= arm_stub_long_branch_thumb2_only_pure
;
4366 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4368 (_("%pB(%pA): warning: long branch veneers used in"
4369 " section with SHF_ARM_PURECODE section"
4370 " attribute is only supported for M-profile"
4371 " targets that implement the movw instruction"),
4372 input_bfd
, input_sec
);
4374 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4376 ? arm_stub_long_branch_thumb_only_pic
4378 : (thumb2
? arm_stub_long_branch_thumb2_only
4379 : arm_stub_long_branch_thumb_only
);
4385 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4387 (_("%pB(%pA): warning: long branch veneers used in"
4388 " section with SHF_ARM_PURECODE section"
4389 " attribute is only supported" " for M-profile"
4390 " targets that implement the movw instruction"),
4391 input_bfd
, input_sec
);
4395 && sym_sec
->owner
!= NULL
4396 && !INTERWORK_FLAG (sym_sec
->owner
))
4399 (_("%pB(%s): warning: interworking not enabled;"
4400 " first occurrence: %pB: %s call to %s"),
4401 sym_sec
->owner
, name
, input_bfd
, "Thumb", "ARM");
4405 (bfd_link_pic (info
) | globals
->pic_veneer
)
4407 ? (r_type
== R_ARM_THM_TLS_CALL
4408 /* TLS PIC stubs. */
4409 ? (globals
->use_blx
? arm_stub_long_branch_any_tls_pic
4410 : arm_stub_long_branch_v4t_thumb_tls_pic
)
4411 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
4412 /* V5T PIC and above. */
4413 ? arm_stub_long_branch_any_arm_pic
4415 : arm_stub_long_branch_v4t_thumb_arm_pic
))
4417 /* non-PIC stubs. */
4418 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
4419 /* V5T and above. */
4420 ? arm_stub_long_branch_any_any
4422 : arm_stub_long_branch_v4t_thumb_arm
);
4424 /* Handle v4t short branches. */
4425 if ((stub_type
== arm_stub_long_branch_v4t_thumb_arm
)
4426 && (branch_offset
<= THM_MAX_FWD_BRANCH_OFFSET
)
4427 && (branch_offset
>= THM_MAX_BWD_BRANCH_OFFSET
))
4428 stub_type
= arm_stub_short_branch_v4t_thumb_arm
;
4432 else if (r_type
== R_ARM_CALL
4433 || r_type
== R_ARM_JUMP24
4434 || r_type
== R_ARM_PLT32
4435 || r_type
== R_ARM_TLS_CALL
)
4437 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4439 (_("%pB(%pA): warning: long branch veneers used in"
4440 " section with SHF_ARM_PURECODE section"
4441 " attribute is only supported for M-profile"
4442 " targets that implement the movw instruction"),
4443 input_bfd
, input_sec
);
4444 if (branch_type
== ST_BRANCH_TO_THUMB
)
4449 && sym_sec
->owner
!= NULL
4450 && !INTERWORK_FLAG (sym_sec
->owner
))
4453 (_("%pB(%s): warning: interworking not enabled;"
4454 " first occurrence: %pB: %s call to %s"),
4455 sym_sec
->owner
, name
, input_bfd
, "ARM", "Thumb");
4458 /* We have an extra 2-bytes reach because of
4459 the mode change (bit 24 (H) of BLX encoding). */
4460 if (branch_offset
> (ARM_MAX_FWD_BRANCH_OFFSET
+ 2)
4461 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
)
4462 || (r_type
== R_ARM_CALL
&& !globals
->use_blx
)
4463 || (r_type
== R_ARM_JUMP24
)
4464 || (r_type
== R_ARM_PLT32
))
4466 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4468 ? ((globals
->use_blx
)
4469 /* V5T and above. */
4470 ? arm_stub_long_branch_any_thumb_pic
4472 : arm_stub_long_branch_v4t_arm_thumb_pic
)
4474 /* non-PIC stubs. */
4475 : ((globals
->use_blx
)
4476 /* V5T and above. */
4477 ? arm_stub_long_branch_any_any
4479 : arm_stub_long_branch_v4t_arm_thumb
);
4485 if (branch_offset
> ARM_MAX_FWD_BRANCH_OFFSET
4486 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
))
4489 (bfd_link_pic (info
) | globals
->pic_veneer
)
4491 ? (r_type
== R_ARM_TLS_CALL
4493 ? arm_stub_long_branch_any_tls_pic
4494 : (globals
->root
.target_os
== is_nacl
4495 ? arm_stub_long_branch_arm_nacl_pic
4496 : arm_stub_long_branch_any_arm_pic
))
4497 /* non-PIC stubs. */
4498 : (globals
->root
.target_os
== is_nacl
4499 ? arm_stub_long_branch_arm_nacl
4500 : arm_stub_long_branch_any_any
);
4505 /* If a stub is needed, record the actual destination type. */
4506 if (stub_type
!= arm_stub_none
)
4507 *actual_branch_type
= branch_type
;
4512 /* Build a name for an entry in the stub hash table. */
4515 elf32_arm_stub_name (const asection
*input_section
,
4516 const asection
*sym_sec
,
4517 const struct elf32_arm_link_hash_entry
*hash
,
4518 const Elf_Internal_Rela
*rel
,
4519 enum elf32_arm_stub_type stub_type
)
4526 len
= 8 + 1 + strlen (hash
->root
.root
.root
.string
) + 1 + 8 + 1 + 2 + 1;
4527 stub_name
= (char *) bfd_malloc (len
);
4528 if (stub_name
!= NULL
)
4529 sprintf (stub_name
, "%08x_%s+%x_%d",
4530 input_section
->id
& 0xffffffff,
4531 hash
->root
.root
.root
.string
,
4532 (int) rel
->r_addend
& 0xffffffff,
4537 len
= 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4538 stub_name
= (char *) bfd_malloc (len
);
4539 if (stub_name
!= NULL
)
4540 sprintf (stub_name
, "%08x_%x:%x+%x_%d",
4541 input_section
->id
& 0xffffffff,
4542 sym_sec
->id
& 0xffffffff,
4543 ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
4544 || ELF32_R_TYPE (rel
->r_info
) == R_ARM_THM_TLS_CALL
4545 ? 0 : (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
4546 (int) rel
->r_addend
& 0xffffffff,
4553 /* Look up an entry in the stub hash. Stub entries are cached because
4554 creating the stub name takes a bit of time. */
4556 static struct elf32_arm_stub_hash_entry
*
4557 elf32_arm_get_stub_entry (const asection
*input_section
,
4558 const asection
*sym_sec
,
4559 struct elf_link_hash_entry
*hash
,
4560 const Elf_Internal_Rela
*rel
,
4561 struct elf32_arm_link_hash_table
*htab
,
4562 enum elf32_arm_stub_type stub_type
)
4564 struct elf32_arm_stub_hash_entry
*stub_entry
;
4565 struct elf32_arm_link_hash_entry
*h
= (struct elf32_arm_link_hash_entry
*) hash
;
4566 const asection
*id_sec
;
4568 if ((input_section
->flags
& SEC_CODE
) == 0)
4571 /* If the input section is the CMSE stubs one and it needs a long
4572 branch stub to reach it's final destination, give up with an
4573 error message: this is not supported. See PR ld/24709. */
4574 if (!strncmp (input_section
->name
, CMSE_STUB_NAME
, strlen (CMSE_STUB_NAME
)))
4576 bfd
*output_bfd
= htab
->obfd
;
4577 asection
*out_sec
= bfd_get_section_by_name (output_bfd
, CMSE_STUB_NAME
);
4579 _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4580 "(%#" PRIx64
") from destination (%#" PRIx64
")"),
4582 (uint64_t)out_sec
->output_section
->vma
4583 + out_sec
->output_offset
,
4584 (uint64_t)sym_sec
->output_section
->vma
4585 + sym_sec
->output_offset
4586 + h
->root
.root
.u
.def
.value
);
4587 /* Exit, rather than leave incompletely processed
4592 /* If this input section is part of a group of sections sharing one
4593 stub section, then use the id of the first section in the group.
4594 Stub names need to include a section id, as there may well be
4595 more than one stub used to reach say, printf, and we need to
4596 distinguish between them. */
4597 BFD_ASSERT (input_section
->id
<= htab
->top_id
);
4598 id_sec
= htab
->stub_group
[input_section
->id
].link_sec
;
4600 if (h
!= NULL
&& h
->stub_cache
!= NULL
4601 && h
->stub_cache
->h
== h
4602 && h
->stub_cache
->id_sec
== id_sec
4603 && h
->stub_cache
->stub_type
== stub_type
)
4605 stub_entry
= h
->stub_cache
;
4611 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, h
, rel
, stub_type
);
4612 if (stub_name
== NULL
)
4615 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
,
4616 stub_name
, false, false);
4618 h
->stub_cache
= stub_entry
;
4626 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4630 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type
)
4632 if (stub_type
>= max_stub_type
)
4633 abort (); /* Should be unreachable. */
4637 case arm_stub_cmse_branch_thumb_only
:
4644 abort (); /* Should be unreachable. */
4647 /* Required alignment (as a power of 2) for the dedicated section holding
4648 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4649 with input sections. */
4652 arm_dedicated_stub_output_section_required_alignment
4653 (enum elf32_arm_stub_type stub_type
)
4655 if (stub_type
>= max_stub_type
)
4656 abort (); /* Should be unreachable. */
4660 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4662 case arm_stub_cmse_branch_thumb_only
:
4666 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4670 abort (); /* Should be unreachable. */
4673 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4674 NULL if veneers of this type are interspersed with input sections. */
4677 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type
)
4679 if (stub_type
>= max_stub_type
)
4680 abort (); /* Should be unreachable. */
4684 case arm_stub_cmse_branch_thumb_only
:
4685 return CMSE_STUB_NAME
;
4688 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4692 abort (); /* Should be unreachable. */
4695 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4696 returns the address of the hash table field in HTAB holding a pointer to the
4697 corresponding input section. Otherwise, returns NULL. */
4700 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table
*htab
,
4701 enum elf32_arm_stub_type stub_type
)
4703 if (stub_type
>= max_stub_type
)
4704 abort (); /* Should be unreachable. */
4708 case arm_stub_cmse_branch_thumb_only
:
4709 return &htab
->cmse_stub_sec
;
4712 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4716 abort (); /* Should be unreachable. */
4719 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4720 is the section that branch into veneer and can be NULL if stub should go in
4721 a dedicated output section. Returns a pointer to the stub section, and the
4722 section to which the stub section will be attached (in *LINK_SEC_P).
4723 LINK_SEC_P may be NULL. */
4726 elf32_arm_create_or_find_stub_sec (asection
**link_sec_p
, asection
*section
,
4727 struct elf32_arm_link_hash_table
*htab
,
4728 enum elf32_arm_stub_type stub_type
)
4730 asection
*link_sec
, *out_sec
, **stub_sec_p
;
4731 const char *stub_sec_prefix
;
4732 bool dedicated_output_section
=
4733 arm_dedicated_stub_output_section_required (stub_type
);
4736 if (dedicated_output_section
)
4738 bfd
*output_bfd
= htab
->obfd
;
4739 const char *out_sec_name
=
4740 arm_dedicated_stub_output_section_name (stub_type
);
4742 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
4743 stub_sec_prefix
= out_sec_name
;
4744 align
= arm_dedicated_stub_output_section_required_alignment (stub_type
);
4745 out_sec
= bfd_get_section_by_name (output_bfd
, out_sec_name
);
4746 if (out_sec
== NULL
)
4748 _bfd_error_handler (_("no address assigned to the veneers output "
4749 "section %s"), out_sec_name
);
4755 BFD_ASSERT (section
->id
<= htab
->top_id
);
4756 link_sec
= htab
->stub_group
[section
->id
].link_sec
;
4757 BFD_ASSERT (link_sec
!= NULL
);
4758 stub_sec_p
= &htab
->stub_group
[section
->id
].stub_sec
;
4759 if (*stub_sec_p
== NULL
)
4760 stub_sec_p
= &htab
->stub_group
[link_sec
->id
].stub_sec
;
4761 stub_sec_prefix
= link_sec
->name
;
4762 out_sec
= link_sec
->output_section
;
4763 align
= htab
->root
.target_os
== is_nacl
? 4 : 3;
4766 if (*stub_sec_p
== NULL
)
4772 namelen
= strlen (stub_sec_prefix
);
4773 len
= namelen
+ sizeof (STUB_SUFFIX
);
4774 s_name
= (char *) bfd_alloc (htab
->stub_bfd
, len
);
4778 memcpy (s_name
, stub_sec_prefix
, namelen
);
4779 memcpy (s_name
+ namelen
, STUB_SUFFIX
, sizeof (STUB_SUFFIX
));
4780 *stub_sec_p
= (*htab
->add_stub_section
) (s_name
, out_sec
, link_sec
,
4782 if (*stub_sec_p
== NULL
)
4785 out_sec
->flags
|= SEC_ALLOC
| SEC_LOAD
| SEC_READONLY
| SEC_CODE
4786 | SEC_HAS_CONTENTS
| SEC_RELOC
| SEC_IN_MEMORY
4790 if (!dedicated_output_section
)
4791 htab
->stub_group
[section
->id
].stub_sec
= *stub_sec_p
;
4794 *link_sec_p
= link_sec
;
4799 /* Add a new stub entry to the stub hash. Not all fields of the new
4800 stub entry are initialised. */
4802 static struct elf32_arm_stub_hash_entry
*
4803 elf32_arm_add_stub (const char *stub_name
, asection
*section
,
4804 struct elf32_arm_link_hash_table
*htab
,
4805 enum elf32_arm_stub_type stub_type
)
4809 struct elf32_arm_stub_hash_entry
*stub_entry
;
4811 stub_sec
= elf32_arm_create_or_find_stub_sec (&link_sec
, section
, htab
,
4813 if (stub_sec
== NULL
)
4816 /* Enter this entry into the linker stub hash table. */
4817 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
4819 if (stub_entry
== NULL
)
4821 if (section
== NULL
)
4823 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4824 section
->owner
, stub_name
);
4828 stub_entry
->stub_sec
= stub_sec
;
4829 stub_entry
->stub_offset
= (bfd_vma
) -1;
4830 stub_entry
->id_sec
= link_sec
;
4835 /* Store an Arm insn into an output section not processed by
4836 elf32_arm_write_section. */
4839 put_arm_insn (struct elf32_arm_link_hash_table
* htab
,
4840 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4842 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4843 bfd_putl32 (val
, ptr
);
4845 bfd_putb32 (val
, ptr
);
4848 /* Store a 16-bit Thumb insn into an output section not processed by
4849 elf32_arm_write_section. */
4852 put_thumb_insn (struct elf32_arm_link_hash_table
* htab
,
4853 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4855 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4856 bfd_putl16 (val
, ptr
);
4858 bfd_putb16 (val
, ptr
);
4861 /* Store a Thumb2 insn into an output section not processed by
4862 elf32_arm_write_section. */
4865 put_thumb2_insn (struct elf32_arm_link_hash_table
* htab
,
4866 bfd
* output_bfd
, bfd_vma val
, bfd_byte
* ptr
)
4868 /* T2 instructions are 16-bit streamed. */
4869 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4871 bfd_putl16 ((val
>> 16) & 0xffff, ptr
);
4872 bfd_putl16 ((val
& 0xffff), ptr
+ 2);
4876 bfd_putb16 ((val
>> 16) & 0xffff, ptr
);
4877 bfd_putb16 ((val
& 0xffff), ptr
+ 2);
4881 /* If it's possible to change R_TYPE to a more efficient access
4882 model, return the new reloc type. */
4885 elf32_arm_tls_transition (struct bfd_link_info
*info
, int r_type
,
4886 struct elf_link_hash_entry
*h
)
4888 int is_local
= (h
== NULL
);
4890 if (bfd_link_dll (info
)
4891 || (h
&& h
->root
.type
== bfd_link_hash_undefweak
))
4894 /* We do not support relaxations for Old TLS models. */
4897 case R_ARM_TLS_GOTDESC
:
4898 case R_ARM_TLS_CALL
:
4899 case R_ARM_THM_TLS_CALL
:
4900 case R_ARM_TLS_DESCSEQ
:
4901 case R_ARM_THM_TLS_DESCSEQ
:
4902 return is_local
? R_ARM_TLS_LE32
: R_ARM_TLS_IE32
;
4908 static bfd_reloc_status_type elf32_arm_final_link_relocate
4909 (reloc_howto_type
*, bfd
*, bfd
*, asection
*, bfd_byte
*,
4910 Elf_Internal_Rela
*, bfd_vma
, struct bfd_link_info
*, asection
*,
4911 const char *, unsigned char, enum arm_st_branch_type
,
4912 struct elf_link_hash_entry
*, bool *, char **);
4915 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type
)
4919 case arm_stub_a8_veneer_b_cond
:
4920 case arm_stub_a8_veneer_b
:
4921 case arm_stub_a8_veneer_bl
:
4924 case arm_stub_long_branch_any_any
:
4925 case arm_stub_long_branch_v4t_arm_thumb
:
4926 case arm_stub_long_branch_thumb_only
:
4927 case arm_stub_long_branch_thumb2_only
:
4928 case arm_stub_long_branch_thumb2_only_pure
:
4929 case arm_stub_long_branch_v4t_thumb_thumb
:
4930 case arm_stub_long_branch_v4t_thumb_arm
:
4931 case arm_stub_short_branch_v4t_thumb_arm
:
4932 case arm_stub_long_branch_any_arm_pic
:
4933 case arm_stub_long_branch_any_thumb_pic
:
4934 case arm_stub_long_branch_v4t_thumb_thumb_pic
:
4935 case arm_stub_long_branch_v4t_arm_thumb_pic
:
4936 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4937 case arm_stub_long_branch_thumb_only_pic
:
4938 case arm_stub_long_branch_any_tls_pic
:
4939 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4940 case arm_stub_cmse_branch_thumb_only
:
4941 case arm_stub_a8_veneer_blx
:
4944 case arm_stub_long_branch_arm_nacl
:
4945 case arm_stub_long_branch_arm_nacl_pic
:
4949 abort (); /* Should be unreachable. */
4953 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4954 veneering (TRUE) or have their own symbol (FALSE). */
4957 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type
)
4959 if (stub_type
>= max_stub_type
)
4960 abort (); /* Should be unreachable. */
4964 case arm_stub_cmse_branch_thumb_only
:
4971 abort (); /* Should be unreachable. */
4974 /* Returns the padding needed for the dedicated section used stubs of type
4978 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type
)
4980 if (stub_type
>= max_stub_type
)
4981 abort (); /* Should be unreachable. */
4985 case arm_stub_cmse_branch_thumb_only
:
4992 abort (); /* Should be unreachable. */
4995 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4996 returns the address of the hash table field in HTAB holding the offset at
4997 which new veneers should be layed out in the stub section. */
5000 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table
*htab
,
5001 enum elf32_arm_stub_type stub_type
)
5005 case arm_stub_cmse_branch_thumb_only
:
5006 return &htab
->new_cmse_stub_offset
;
5009 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
5015 arm_build_one_stub (struct bfd_hash_entry
*gen_entry
,
5019 bool removed_sg_veneer
;
5020 struct elf32_arm_stub_hash_entry
*stub_entry
;
5021 struct elf32_arm_link_hash_table
*globals
;
5022 struct bfd_link_info
*info
;
5029 const insn_sequence
*template_sequence
;
5031 int stub_reloc_idx
[MAXRELOCS
] = {-1, -1};
5032 int stub_reloc_offset
[MAXRELOCS
] = {0, 0};
5034 int just_allocated
= 0;
5036 /* Massage our args to the form they really have. */
5037 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
5038 info
= (struct bfd_link_info
*) in_arg
;
5040 /* Fail if the target section could not be assigned to an output
5041 section. The user should fix his linker script. */
5042 if (stub_entry
->target_section
->output_section
== NULL
5043 && info
->non_contiguous_regions
)
5044 info
->callbacks
->einfo (_("%F%P: Could not assign '%pA' to an output section. "
5045 "Retry without --enable-non-contiguous-regions.\n"),
5046 stub_entry
->target_section
);
5048 globals
= elf32_arm_hash_table (info
);
5049 if (globals
== NULL
)
5052 stub_sec
= stub_entry
->stub_sec
;
5054 if ((globals
->fix_cortex_a8
< 0)
5055 != (arm_stub_required_alignment (stub_entry
->stub_type
) == 2))
5056 /* We have to do less-strictly-aligned fixes last. */
5059 /* Assign a slot at the end of section if none assigned yet. */
5060 if (stub_entry
->stub_offset
== (bfd_vma
) -1)
5062 stub_entry
->stub_offset
= stub_sec
->size
;
5065 loc
= stub_sec
->contents
+ stub_entry
->stub_offset
;
5067 stub_bfd
= stub_sec
->owner
;
5069 /* This is the address of the stub destination. */
5070 sym_value
= (stub_entry
->target_value
5071 + stub_entry
->target_section
->output_offset
5072 + stub_entry
->target_section
->output_section
->vma
);
5074 template_sequence
= stub_entry
->stub_template
;
5075 template_size
= stub_entry
->stub_template_size
;
5078 for (i
= 0; i
< template_size
; i
++)
5080 switch (template_sequence
[i
].type
)
5084 bfd_vma data
= (bfd_vma
) template_sequence
[i
].data
;
5085 if (template_sequence
[i
].reloc_addend
!= 0)
5087 /* We've borrowed the reloc_addend field to mean we should
5088 insert a condition code into this (Thumb-1 branch)
5089 instruction. See THUMB16_BCOND_INSN. */
5090 BFD_ASSERT ((data
& 0xff00) == 0xd000);
5091 data
|= ((stub_entry
->orig_insn
>> 22) & 0xf) << 8;
5093 bfd_put_16 (stub_bfd
, data
, loc
+ size
);
5099 bfd_put_16 (stub_bfd
,
5100 (template_sequence
[i
].data
>> 16) & 0xffff,
5102 bfd_put_16 (stub_bfd
, template_sequence
[i
].data
& 0xffff,
5104 if (template_sequence
[i
].r_type
!= R_ARM_NONE
)
5106 stub_reloc_idx
[nrelocs
] = i
;
5107 stub_reloc_offset
[nrelocs
++] = size
;
5113 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
,
5115 /* Handle cases where the target is encoded within the
5117 if (template_sequence
[i
].r_type
== R_ARM_JUMP24
)
5119 stub_reloc_idx
[nrelocs
] = i
;
5120 stub_reloc_offset
[nrelocs
++] = size
;
5126 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
, loc
+ size
);
5127 stub_reloc_idx
[nrelocs
] = i
;
5128 stub_reloc_offset
[nrelocs
++] = size
;
5139 stub_sec
->size
+= size
;
5141 /* Stub size has already been computed in arm_size_one_stub. Check
5143 BFD_ASSERT (size
== stub_entry
->stub_size
);
5145 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5146 if (stub_entry
->branch_type
== ST_BRANCH_TO_THUMB
)
5149 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5150 to relocate in each stub. */
5152 (size
== 0 && stub_entry
->stub_type
== arm_stub_cmse_branch_thumb_only
);
5153 BFD_ASSERT (removed_sg_veneer
|| (nrelocs
!= 0 && nrelocs
<= MAXRELOCS
));
5155 for (i
= 0; i
< nrelocs
; i
++)
5157 Elf_Internal_Rela rel
;
5158 bool unresolved_reloc
;
5159 char *error_message
;
5161 sym_value
+ template_sequence
[stub_reloc_idx
[i
]].reloc_addend
;
5163 rel
.r_offset
= stub_entry
->stub_offset
+ stub_reloc_offset
[i
];
5164 rel
.r_info
= ELF32_R_INFO (0,
5165 template_sequence
[stub_reloc_idx
[i
]].r_type
);
5168 if (stub_entry
->stub_type
== arm_stub_a8_veneer_b_cond
&& i
== 0)
5169 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5170 template should refer back to the instruction after the original
5171 branch. We use target_section as Cortex-A8 erratum workaround stubs
5172 are only generated when both source and target are in the same
5174 points_to
= stub_entry
->target_section
->output_section
->vma
5175 + stub_entry
->target_section
->output_offset
5176 + stub_entry
->source_value
;
5178 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5179 (template_sequence
[stub_reloc_idx
[i
]].r_type
),
5180 stub_bfd
, info
->output_bfd
, stub_sec
, stub_sec
->contents
, &rel
,
5181 points_to
, info
, stub_entry
->target_section
, "", STT_FUNC
,
5182 stub_entry
->branch_type
,
5183 (struct elf_link_hash_entry
*) stub_entry
->h
, &unresolved_reloc
,
5191 /* Calculate the template, template size and instruction size for a stub.
5192 Return value is the instruction size. */
5195 find_stub_size_and_template (enum elf32_arm_stub_type stub_type
,
5196 const insn_sequence
**stub_template
,
5197 int *stub_template_size
)
5199 const insn_sequence
*template_sequence
= NULL
;
5200 int template_size
= 0, i
;
5203 template_sequence
= stub_definitions
[stub_type
].template_sequence
;
5205 *stub_template
= template_sequence
;
5207 template_size
= stub_definitions
[stub_type
].template_size
;
5208 if (stub_template_size
)
5209 *stub_template_size
= template_size
;
5212 for (i
= 0; i
< template_size
; i
++)
5214 switch (template_sequence
[i
].type
)
5235 /* As above, but don't actually build the stub. Just bump offset so
5236 we know stub section sizes. */
5239 arm_size_one_stub (struct bfd_hash_entry
*gen_entry
,
5240 void *in_arg ATTRIBUTE_UNUSED
)
5242 struct elf32_arm_stub_hash_entry
*stub_entry
;
5243 const insn_sequence
*template_sequence
;
5244 int template_size
, size
;
5246 /* Massage our args to the form they really have. */
5247 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
5249 BFD_ASSERT ((stub_entry
->stub_type
> arm_stub_none
)
5250 && stub_entry
->stub_type
< ARRAY_SIZE (stub_definitions
));
5252 size
= find_stub_size_and_template (stub_entry
->stub_type
, &template_sequence
,
5255 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5256 if (stub_entry
->stub_template_size
)
5258 stub_entry
->stub_size
= size
;
5259 stub_entry
->stub_template
= template_sequence
;
5260 stub_entry
->stub_template_size
= template_size
;
5263 /* Already accounted for. */
5264 if (stub_entry
->stub_offset
!= (bfd_vma
) -1)
5267 size
= (size
+ 7) & ~7;
5268 stub_entry
->stub_sec
->size
+= size
;
5273 /* External entry points for sizing and building linker stubs. */
5275 /* Set up various things so that we can make a list of input sections
5276 for each output section included in the link. Returns -1 on error,
5277 0 when no stubs will be needed, and 1 on success. */
5280 elf32_arm_setup_section_lists (bfd
*output_bfd
,
5281 struct bfd_link_info
*info
)
5284 unsigned int bfd_count
;
5285 unsigned int top_id
, top_index
;
5287 asection
**input_list
, **list
;
5289 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5294 /* Count the number of input BFDs and find the top input section id. */
5295 for (input_bfd
= info
->input_bfds
, bfd_count
= 0, top_id
= 0;
5297 input_bfd
= input_bfd
->link
.next
)
5300 for (section
= input_bfd
->sections
;
5302 section
= section
->next
)
5304 if (top_id
< section
->id
)
5305 top_id
= section
->id
;
5308 htab
->bfd_count
= bfd_count
;
5310 amt
= sizeof (struct map_stub
) * (top_id
+ 1);
5311 htab
->stub_group
= (struct map_stub
*) bfd_zmalloc (amt
);
5312 if (htab
->stub_group
== NULL
)
5314 htab
->top_id
= top_id
;
5316 /* We can't use output_bfd->section_count here to find the top output
5317 section index as some sections may have been removed, and
5318 _bfd_strip_section_from_output doesn't renumber the indices. */
5319 for (section
= output_bfd
->sections
, top_index
= 0;
5321 section
= section
->next
)
5323 if (top_index
< section
->index
)
5324 top_index
= section
->index
;
5327 htab
->top_index
= top_index
;
5328 amt
= sizeof (asection
*) * (top_index
+ 1);
5329 input_list
= (asection
**) bfd_malloc (amt
);
5330 htab
->input_list
= input_list
;
5331 if (input_list
== NULL
)
5334 /* For sections we aren't interested in, mark their entries with a
5335 value we can check later. */
5336 list
= input_list
+ top_index
;
5338 *list
= bfd_abs_section_ptr
;
5339 while (list
-- != input_list
);
5341 for (section
= output_bfd
->sections
;
5343 section
= section
->next
)
5345 if ((section
->flags
& SEC_CODE
) != 0)
5346 input_list
[section
->index
] = NULL
;
5352 /* The linker repeatedly calls this function for each input section,
5353 in the order that input sections are linked into output sections.
5354 Build lists of input sections to determine groupings between which
5355 we may insert linker stubs. */
5358 elf32_arm_next_input_section (struct bfd_link_info
*info
,
5361 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5366 if (isec
->output_section
->index
<= htab
->top_index
)
5368 asection
**list
= htab
->input_list
+ isec
->output_section
->index
;
5370 if (*list
!= bfd_abs_section_ptr
&& (isec
->flags
& SEC_CODE
) != 0)
5372 /* Steal the link_sec pointer for our list. */
5373 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5374 /* This happens to make the list in reverse order,
5375 which we reverse later. */
5376 PREV_SEC (isec
) = *list
;
5382 /* See whether we can group stub sections together. Grouping stub
5383 sections may result in fewer stubs. More importantly, we need to
5384 put all .init* and .fini* stubs at the end of the .init or
5385 .fini output sections respectively, because glibc splits the
5386 _init and _fini functions into multiple parts. Putting a stub in
5387 the middle of a function is not a good idea. */
5390 group_sections (struct elf32_arm_link_hash_table
*htab
,
5391 bfd_size_type stub_group_size
,
5392 bool stubs_always_after_branch
)
5394 asection
**list
= htab
->input_list
;
5398 asection
*tail
= *list
;
5401 if (tail
== bfd_abs_section_ptr
)
5404 /* Reverse the list: we must avoid placing stubs at the
5405 beginning of the section because the beginning of the text
5406 section may be required for an interrupt vector in bare metal
5408 #define NEXT_SEC PREV_SEC
5410 while (tail
!= NULL
)
5412 /* Pop from tail. */
5413 asection
*item
= tail
;
5414 tail
= PREV_SEC (item
);
5417 NEXT_SEC (item
) = head
;
5421 while (head
!= NULL
)
5425 bfd_vma stub_group_start
= head
->output_offset
;
5426 bfd_vma end_of_next
;
5429 while (NEXT_SEC (curr
) != NULL
)
5431 next
= NEXT_SEC (curr
);
5432 end_of_next
= next
->output_offset
+ next
->size
;
5433 if (end_of_next
- stub_group_start
>= stub_group_size
)
5434 /* End of NEXT is too far from start, so stop. */
5436 /* Add NEXT to the group. */
5440 /* OK, the size from the start to the start of CURR is less
5441 than stub_group_size and thus can be handled by one stub
5442 section. (Or the head section is itself larger than
5443 stub_group_size, in which case we may be toast.)
5444 We should really be keeping track of the total size of
5445 stubs added here, as stubs contribute to the final output
5449 next
= NEXT_SEC (head
);
5450 /* Set up this stub group. */
5451 htab
->stub_group
[head
->id
].link_sec
= curr
;
5453 while (head
!= curr
&& (head
= next
) != NULL
);
5455 /* But wait, there's more! Input sections up to stub_group_size
5456 bytes after the stub section can be handled by it too. */
5457 if (!stubs_always_after_branch
)
5459 stub_group_start
= curr
->output_offset
+ curr
->size
;
5461 while (next
!= NULL
)
5463 end_of_next
= next
->output_offset
+ next
->size
;
5464 if (end_of_next
- stub_group_start
>= stub_group_size
)
5465 /* End of NEXT is too far from stubs, so stop. */
5467 /* Add NEXT to the stub group. */
5469 next
= NEXT_SEC (head
);
5470 htab
->stub_group
[head
->id
].link_sec
= curr
;
5476 while (list
++ != htab
->input_list
+ htab
->top_index
);
5478 free (htab
->input_list
);
5483 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5487 a8_reloc_compare (const void *a
, const void *b
)
5489 const struct a8_erratum_reloc
*ra
= (const struct a8_erratum_reloc
*) a
;
5490 const struct a8_erratum_reloc
*rb
= (const struct a8_erratum_reloc
*) b
;
5492 if (ra
->from
< rb
->from
)
5494 else if (ra
->from
> rb
->from
)
5500 static struct elf_link_hash_entry
*find_thumb_glue (struct bfd_link_info
*,
5501 const char *, char **);
5503 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5504 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5505 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5509 cortex_a8_erratum_scan (bfd
*input_bfd
,
5510 struct bfd_link_info
*info
,
5511 struct a8_erratum_fix
**a8_fixes_p
,
5512 unsigned int *num_a8_fixes_p
,
5513 unsigned int *a8_fix_table_size_p
,
5514 struct a8_erratum_reloc
*a8_relocs
,
5515 unsigned int num_a8_relocs
,
5516 unsigned prev_num_a8_fixes
,
5517 bool *stub_changed_p
)
5520 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5521 struct a8_erratum_fix
*a8_fixes
= *a8_fixes_p
;
5522 unsigned int num_a8_fixes
= *num_a8_fixes_p
;
5523 unsigned int a8_fix_table_size
= *a8_fix_table_size_p
;
5528 for (section
= input_bfd
->sections
;
5530 section
= section
->next
)
5532 bfd_byte
*contents
= NULL
;
5533 struct _arm_elf_section_data
*sec_data
;
5537 if (elf_section_type (section
) != SHT_PROGBITS
5538 || (elf_section_flags (section
) & SHF_EXECINSTR
) == 0
5539 || (section
->flags
& SEC_EXCLUDE
) != 0
5540 || (section
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
)
5541 || (section
->output_section
== bfd_abs_section_ptr
))
5544 base_vma
= section
->output_section
->vma
+ section
->output_offset
;
5546 if (elf_section_data (section
)->this_hdr
.contents
!= NULL
)
5547 contents
= elf_section_data (section
)->this_hdr
.contents
;
5548 else if (! bfd_malloc_and_get_section (input_bfd
, section
, &contents
))
5551 sec_data
= elf32_arm_section_data (section
);
5553 for (span
= 0; span
< sec_data
->mapcount
; span
++)
5555 unsigned int span_start
= sec_data
->map
[span
].vma
;
5556 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
5557 ? section
->size
: sec_data
->map
[span
+ 1].vma
;
5559 char span_type
= sec_data
->map
[span
].type
;
5560 bool last_was_32bit
= false, last_was_branch
= false;
5562 if (span_type
!= 't')
5565 /* Span is entirely within a single 4KB region: skip scanning. */
5566 if (((base_vma
+ span_start
) & ~0xfff)
5567 == ((base_vma
+ span_end
) & ~0xfff))
5570 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5572 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5573 * The branch target is in the same 4KB region as the
5574 first half of the branch.
5575 * The instruction before the branch is a 32-bit
5576 length non-branch instruction. */
5577 for (i
= span_start
; i
< span_end
;)
5579 unsigned int insn
= bfd_getl16 (&contents
[i
]);
5580 bool insn_32bit
= false, is_blx
= false, is_b
= false;
5581 bool is_bl
= false, is_bcc
= false, is_32bit_branch
;
5583 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
5588 /* Load the rest of the insn (in manual-friendly order). */
5589 insn
= (insn
<< 16) | bfd_getl16 (&contents
[i
+ 2]);
5591 /* Encoding T4: B<c>.W. */
5592 is_b
= (insn
& 0xf800d000) == 0xf0009000;
5593 /* Encoding T1: BL<c>.W. */
5594 is_bl
= (insn
& 0xf800d000) == 0xf000d000;
5595 /* Encoding T2: BLX<c>.W. */
5596 is_blx
= (insn
& 0xf800d000) == 0xf000c000;
5597 /* Encoding T3: B<c>.W (not permitted in IT block). */
5598 is_bcc
= (insn
& 0xf800d000) == 0xf0008000
5599 && (insn
& 0x07f00000) != 0x03800000;
5602 is_32bit_branch
= is_b
|| is_bl
|| is_blx
|| is_bcc
;
5604 if (((base_vma
+ i
) & 0xfff) == 0xffe
5608 && ! last_was_branch
)
5610 bfd_signed_vma offset
= 0;
5611 bool force_target_arm
= false;
5612 bool force_target_thumb
= false;
5614 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
5615 struct a8_erratum_reloc key
, *found
;
5616 bool use_plt
= false;
5618 key
.from
= base_vma
+ i
;
5619 found
= (struct a8_erratum_reloc
*)
5620 bsearch (&key
, a8_relocs
, num_a8_relocs
,
5621 sizeof (struct a8_erratum_reloc
),
5626 char *error_message
= NULL
;
5627 struct elf_link_hash_entry
*entry
;
5629 /* We don't care about the error returned from this
5630 function, only if there is glue or not. */
5631 entry
= find_thumb_glue (info
, found
->sym_name
,
5635 found
->non_a8_stub
= true;
5637 /* Keep a simpler condition, for the sake of clarity. */
5638 if (htab
->root
.splt
!= NULL
&& found
->hash
!= NULL
5639 && found
->hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5642 if (found
->r_type
== R_ARM_THM_CALL
)
5644 if (found
->branch_type
== ST_BRANCH_TO_ARM
5646 force_target_arm
= true;
5648 force_target_thumb
= true;
5652 /* Check if we have an offending branch instruction. */
5654 if (found
&& found
->non_a8_stub
)
5655 /* We've already made a stub for this instruction, e.g.
5656 it's a long branch or a Thumb->ARM stub. Assume that
5657 stub will suffice to work around the A8 erratum (see
5658 setting of always_after_branch above). */
5662 offset
= (insn
& 0x7ff) << 1;
5663 offset
|= (insn
& 0x3f0000) >> 4;
5664 offset
|= (insn
& 0x2000) ? 0x40000 : 0;
5665 offset
|= (insn
& 0x800) ? 0x80000 : 0;
5666 offset
|= (insn
& 0x4000000) ? 0x100000 : 0;
5667 if (offset
& 0x100000)
5668 offset
|= ~ ((bfd_signed_vma
) 0xfffff);
5669 stub_type
= arm_stub_a8_veneer_b_cond
;
5671 else if (is_b
|| is_bl
|| is_blx
)
5673 int s
= (insn
& 0x4000000) != 0;
5674 int j1
= (insn
& 0x2000) != 0;
5675 int j2
= (insn
& 0x800) != 0;
5679 offset
= (insn
& 0x7ff) << 1;
5680 offset
|= (insn
& 0x3ff0000) >> 4;
5684 if (offset
& 0x1000000)
5685 offset
|= ~ ((bfd_signed_vma
) 0xffffff);
5688 offset
&= ~ ((bfd_signed_vma
) 3);
5690 stub_type
= is_blx
? arm_stub_a8_veneer_blx
:
5691 is_bl
? arm_stub_a8_veneer_bl
: arm_stub_a8_veneer_b
;
5694 if (stub_type
!= arm_stub_none
)
5696 bfd_vma pc_for_insn
= base_vma
+ i
+ 4;
5698 /* The original instruction is a BL, but the target is
5699 an ARM instruction. If we were not making a stub,
5700 the BL would have been converted to a BLX. Use the
5701 BLX stub instead in that case. */
5702 if (htab
->use_blx
&& force_target_arm
5703 && stub_type
== arm_stub_a8_veneer_bl
)
5705 stub_type
= arm_stub_a8_veneer_blx
;
5709 /* Conversely, if the original instruction was
5710 BLX but the target is Thumb mode, use the BL
5712 else if (force_target_thumb
5713 && stub_type
== arm_stub_a8_veneer_blx
)
5715 stub_type
= arm_stub_a8_veneer_bl
;
5721 pc_for_insn
&= ~ ((bfd_vma
) 3);
5723 /* If we found a relocation, use the proper destination,
5724 not the offset in the (unrelocated) instruction.
5725 Note this is always done if we switched the stub type
5729 (bfd_signed_vma
) (found
->destination
- pc_for_insn
);
5731 /* If the stub will use a Thumb-mode branch to a
5732 PLT target, redirect it to the preceding Thumb
5734 if (stub_type
!= arm_stub_a8_veneer_blx
&& use_plt
)
5735 offset
-= PLT_THUMB_STUB_SIZE
;
5737 target
= pc_for_insn
+ offset
;
5739 /* The BLX stub is ARM-mode code. Adjust the offset to
5740 take the different PC value (+8 instead of +4) into
5742 if (stub_type
== arm_stub_a8_veneer_blx
)
5745 if (((base_vma
+ i
) & ~0xfff) == (target
& ~0xfff))
5747 char *stub_name
= NULL
;
5749 if (num_a8_fixes
== a8_fix_table_size
)
5751 a8_fix_table_size
*= 2;
5752 a8_fixes
= (struct a8_erratum_fix
*)
5753 bfd_realloc (a8_fixes
,
5754 sizeof (struct a8_erratum_fix
)
5755 * a8_fix_table_size
);
5758 if (num_a8_fixes
< prev_num_a8_fixes
)
5760 /* If we're doing a subsequent scan,
5761 check if we've found the same fix as
5762 before, and try and reuse the stub
5764 stub_name
= a8_fixes
[num_a8_fixes
].stub_name
;
5765 if ((a8_fixes
[num_a8_fixes
].section
!= section
)
5766 || (a8_fixes
[num_a8_fixes
].offset
!= i
))
5770 *stub_changed_p
= true;
5776 stub_name
= (char *) bfd_malloc (8 + 1 + 8 + 1);
5777 if (stub_name
!= NULL
)
5778 sprintf (stub_name
, "%x:%x", section
->id
, i
);
5781 a8_fixes
[num_a8_fixes
].input_bfd
= input_bfd
;
5782 a8_fixes
[num_a8_fixes
].section
= section
;
5783 a8_fixes
[num_a8_fixes
].offset
= i
;
5784 a8_fixes
[num_a8_fixes
].target_offset
=
5786 a8_fixes
[num_a8_fixes
].orig_insn
= insn
;
5787 a8_fixes
[num_a8_fixes
].stub_name
= stub_name
;
5788 a8_fixes
[num_a8_fixes
].stub_type
= stub_type
;
5789 a8_fixes
[num_a8_fixes
].branch_type
=
5790 is_blx
? ST_BRANCH_TO_ARM
: ST_BRANCH_TO_THUMB
;
5797 i
+= insn_32bit
? 4 : 2;
5798 last_was_32bit
= insn_32bit
;
5799 last_was_branch
= is_32bit_branch
;
5803 if (elf_section_data (section
)->this_hdr
.contents
== NULL
)
5807 *a8_fixes_p
= a8_fixes
;
5808 *num_a8_fixes_p
= num_a8_fixes
;
5809 *a8_fix_table_size_p
= a8_fix_table_size
;
5814 /* Create or update a stub entry depending on whether the stub can already be
5815 found in HTAB. The stub is identified by:
5816 - its type STUB_TYPE
5817 - its source branch (note that several can share the same stub) whose
5818 section and relocation (if any) are given by SECTION and IRELA
5820 - its target symbol whose input section, hash, name, value and branch type
5821 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5824 If found, the value of the stub's target symbol is updated from SYM_VALUE
5825 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5826 TRUE and the stub entry is initialized.
5828 Returns the stub that was created or updated, or NULL if an error
5831 static struct elf32_arm_stub_hash_entry
*
5832 elf32_arm_create_stub (struct elf32_arm_link_hash_table
*htab
,
5833 enum elf32_arm_stub_type stub_type
, asection
*section
,
5834 Elf_Internal_Rela
*irela
, asection
*sym_sec
,
5835 struct elf32_arm_link_hash_entry
*hash
, char *sym_name
,
5836 bfd_vma sym_value
, enum arm_st_branch_type branch_type
,
5839 const asection
*id_sec
;
5841 struct elf32_arm_stub_hash_entry
*stub_entry
;
5842 unsigned int r_type
;
5843 bool sym_claimed
= arm_stub_sym_claimed (stub_type
);
5845 BFD_ASSERT (stub_type
!= arm_stub_none
);
5849 stub_name
= sym_name
;
5853 BFD_ASSERT (section
);
5854 BFD_ASSERT (section
->id
<= htab
->top_id
);
5856 /* Support for grouping stub sections. */
5857 id_sec
= htab
->stub_group
[section
->id
].link_sec
;
5859 /* Get the name of this stub. */
5860 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, hash
, irela
,
5866 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
, false,
5868 /* The proper stub has already been created, just update its value. */
5869 if (stub_entry
!= NULL
)
5873 stub_entry
->target_value
= sym_value
;
5877 stub_entry
= elf32_arm_add_stub (stub_name
, section
, htab
, stub_type
);
5878 if (stub_entry
== NULL
)
5885 stub_entry
->target_value
= sym_value
;
5886 stub_entry
->target_section
= sym_sec
;
5887 stub_entry
->stub_type
= stub_type
;
5888 stub_entry
->h
= hash
;
5889 stub_entry
->branch_type
= branch_type
;
5892 stub_entry
->output_name
= sym_name
;
5895 if (sym_name
== NULL
)
5896 sym_name
= "unnamed";
5897 stub_entry
->output_name
= (char *)
5898 bfd_alloc (htab
->stub_bfd
, sizeof (THUMB2ARM_GLUE_ENTRY_NAME
)
5899 + strlen (sym_name
));
5900 if (stub_entry
->output_name
== NULL
)
5906 /* For historical reasons, use the existing names for ARM-to-Thumb and
5907 Thumb-to-ARM stubs. */
5908 r_type
= ELF32_R_TYPE (irela
->r_info
);
5909 if ((r_type
== (unsigned int) R_ARM_THM_CALL
5910 || r_type
== (unsigned int) R_ARM_THM_JUMP24
5911 || r_type
== (unsigned int) R_ARM_THM_JUMP19
)
5912 && branch_type
== ST_BRANCH_TO_ARM
)
5913 sprintf (stub_entry
->output_name
, THUMB2ARM_GLUE_ENTRY_NAME
, sym_name
);
5914 else if ((r_type
== (unsigned int) R_ARM_CALL
5915 || r_type
== (unsigned int) R_ARM_JUMP24
)
5916 && branch_type
== ST_BRANCH_TO_THUMB
)
5917 sprintf (stub_entry
->output_name
, ARM2THUMB_GLUE_ENTRY_NAME
, sym_name
);
5919 sprintf (stub_entry
->output_name
, STUB_ENTRY_NAME
, sym_name
);
5926 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5927 gateway veneer to transition from non secure to secure state and create them
5930 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5931 defines the conditions that govern Secure Gateway veneer creation for a
5932 given symbol <SYM> as follows:
5933 - it has function type
5934 - it has non local binding
5935 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5936 same type, binding and value as <SYM> (called normal symbol).
5937 An entry function can handle secure state transition itself in which case
5938 its special symbol would have a different value from the normal symbol.
5940 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5941 entry mapping while HTAB gives the name to hash entry mapping.
5942 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5945 The return value gives whether a stub failed to be allocated. */
5948 cmse_scan (bfd
*input_bfd
, struct elf32_arm_link_hash_table
*htab
,
5949 obj_attribute
*out_attr
, struct elf_link_hash_entry
**sym_hashes
,
5950 int *cmse_stub_created
)
5952 const struct elf_backend_data
*bed
;
5953 Elf_Internal_Shdr
*symtab_hdr
;
5954 unsigned i
, j
, sym_count
, ext_start
;
5955 Elf_Internal_Sym
*cmse_sym
, *local_syms
;
5956 struct elf32_arm_link_hash_entry
*hash
, *cmse_hash
= NULL
;
5957 enum arm_st_branch_type branch_type
;
5958 char *sym_name
, *lsym_name
;
5961 struct elf32_arm_stub_hash_entry
*stub_entry
;
5962 bool is_v8m
, new_stub
, cmse_invalid
, ret
= true;
5964 bed
= get_elf_backend_data (input_bfd
);
5965 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
5966 sym_count
= symtab_hdr
->sh_size
/ bed
->s
->sizeof_sym
;
5967 ext_start
= symtab_hdr
->sh_info
;
5968 is_v8m
= (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V8M_BASE
5969 && out_attr
[Tag_CPU_arch_profile
].i
== 'M');
5971 local_syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
5972 if (local_syms
== NULL
)
5973 local_syms
= bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
5974 symtab_hdr
->sh_info
, 0, NULL
, NULL
,
5976 if (symtab_hdr
->sh_info
&& local_syms
== NULL
)
5980 for (i
= 0; i
< sym_count
; i
++)
5982 cmse_invalid
= false;
5986 cmse_sym
= &local_syms
[i
];
5987 sym_name
= bfd_elf_string_from_elf_section (input_bfd
,
5988 symtab_hdr
->sh_link
,
5990 if (!sym_name
|| !startswith (sym_name
, CMSE_PREFIX
))
5993 /* Special symbol with local binding. */
5994 cmse_invalid
= true;
5998 cmse_hash
= elf32_arm_hash_entry (sym_hashes
[i
- ext_start
]);
5999 sym_name
= (char *) cmse_hash
->root
.root
.root
.string
;
6000 if (!startswith (sym_name
, CMSE_PREFIX
))
6003 /* Special symbol has incorrect binding or type. */
6004 if ((cmse_hash
->root
.root
.type
!= bfd_link_hash_defined
6005 && cmse_hash
->root
.root
.type
!= bfd_link_hash_defweak
)
6006 || cmse_hash
->root
.type
!= STT_FUNC
)
6007 cmse_invalid
= true;
6012 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
6013 "ARMv8-M architecture or later"),
6014 input_bfd
, sym_name
);
6015 is_v8m
= true; /* Avoid multiple warning. */
6021 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
6022 " a global or weak function symbol"),
6023 input_bfd
, sym_name
);
6029 sym_name
+= strlen (CMSE_PREFIX
);
6030 hash
= (struct elf32_arm_link_hash_entry
*)
6031 elf_link_hash_lookup (&(htab
)->root
, sym_name
, false, false, true);
6033 /* No associated normal symbol or it is neither global nor weak. */
6035 || (hash
->root
.root
.type
!= bfd_link_hash_defined
6036 && hash
->root
.root
.type
!= bfd_link_hash_defweak
)
6037 || hash
->root
.type
!= STT_FUNC
)
6039 /* Initialize here to avoid warning about use of possibly
6040 uninitialized variable. */
6045 /* Searching for a normal symbol with local binding. */
6046 for (; j
< ext_start
; j
++)
6049 bfd_elf_string_from_elf_section (input_bfd
,
6050 symtab_hdr
->sh_link
,
6051 local_syms
[j
].st_name
);
6052 if (!strcmp (sym_name
, lsym_name
))
6057 if (hash
|| j
< ext_start
)
6060 (_("%pB: invalid standard symbol `%s'; it must be "
6061 "a global or weak function symbol"),
6062 input_bfd
, sym_name
);
6066 (_("%pB: absent standard symbol `%s'"), input_bfd
, sym_name
);
6072 sym_value
= hash
->root
.root
.u
.def
.value
;
6073 section
= hash
->root
.root
.u
.def
.section
;
6075 if (cmse_hash
->root
.root
.u
.def
.section
!= section
)
6078 (_("%pB: `%s' and its special symbol are in different sections"),
6079 input_bfd
, sym_name
);
6082 if (cmse_hash
->root
.root
.u
.def
.value
!= sym_value
)
6083 continue; /* Ignore: could be an entry function starting with SG. */
6085 /* If this section is a link-once section that will be discarded, then
6086 don't create any stubs. */
6087 if (section
->output_section
== NULL
)
6090 (_("%pB: entry function `%s' not output"), input_bfd
, sym_name
);
6094 if (hash
->root
.size
== 0)
6097 (_("%pB: entry function `%s' is empty"), input_bfd
, sym_name
);
6103 branch_type
= ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
6105 = elf32_arm_create_stub (htab
, arm_stub_cmse_branch_thumb_only
,
6106 NULL
, NULL
, section
, hash
, sym_name
,
6107 sym_value
, branch_type
, &new_stub
);
6109 if (stub_entry
== NULL
)
6113 BFD_ASSERT (new_stub
);
6114 (*cmse_stub_created
)++;
6118 if (!symtab_hdr
->contents
)
6123 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6124 code entry function, ie can be called from non secure code without using a
6128 cmse_entry_fct_p (struct elf32_arm_link_hash_entry
*hash
)
6130 bfd_byte contents
[4];
6131 uint32_t first_insn
;
6136 /* Defined symbol of function type. */
6137 if (hash
->root
.root
.type
!= bfd_link_hash_defined
6138 && hash
->root
.root
.type
!= bfd_link_hash_defweak
)
6140 if (hash
->root
.type
!= STT_FUNC
)
6143 /* Read first instruction. */
6144 section
= hash
->root
.root
.u
.def
.section
;
6145 abfd
= section
->owner
;
6146 offset
= hash
->root
.root
.u
.def
.value
- section
->vma
;
6147 if (!bfd_get_section_contents (abfd
, section
, contents
, offset
,
6151 first_insn
= bfd_get_32 (abfd
, contents
);
6153 /* Starts by SG instruction. */
6154 return first_insn
== 0xe97fe97f;
6157 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6158 secure gateway veneers (ie. the veneers was not in the input import library)
6159 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6162 arm_list_new_cmse_stub (struct bfd_hash_entry
*gen_entry
, void *gen_info
)
6164 struct elf32_arm_stub_hash_entry
*stub_entry
;
6165 struct bfd_link_info
*info
;
6167 /* Massage our args to the form they really have. */
6168 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
6169 info
= (struct bfd_link_info
*) gen_info
;
6171 if (info
->out_implib_bfd
)
6174 if (stub_entry
->stub_type
!= arm_stub_cmse_branch_thumb_only
)
6177 if (stub_entry
->stub_offset
== (bfd_vma
) -1)
6178 _bfd_error_handler (" %s", stub_entry
->output_name
);
6183 /* Set offset of each secure gateway veneers so that its address remain
6184 identical to the one in the input import library referred by
6185 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6186 (present in input import library but absent from the executable being
6187 linked) or if new veneers appeared and there is no output import library
6188 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6189 number of secure gateway veneers found in the input import library.
6191 The function returns whether an error occurred. If no error occurred,
6192 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6193 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6194 veneer observed set for new veneers to be layed out after. */
6197 set_cmse_veneer_addr_from_implib (struct bfd_link_info
*info
,
6198 struct elf32_arm_link_hash_table
*htab
,
6199 int *cmse_stub_created
)
6206 asection
*stub_out_sec
;
6208 Elf_Internal_Sym
*intsym
;
6209 const char *out_sec_name
;
6210 bfd_size_type cmse_stub_size
;
6211 asymbol
**sympp
= NULL
, *sym
;
6212 struct elf32_arm_link_hash_entry
*hash
;
6213 const insn_sequence
*cmse_stub_template
;
6214 struct elf32_arm_stub_hash_entry
*stub_entry
;
6215 int cmse_stub_template_size
, new_cmse_stubs_created
= *cmse_stub_created
;
6216 bfd_vma veneer_value
, stub_offset
, next_cmse_stub_offset
;
6217 bfd_vma cmse_stub_array_start
= (bfd_vma
) -1, cmse_stub_sec_vma
= 0;
6219 /* No input secure gateway import library. */
6220 if (!htab
->in_implib_bfd
)
6223 in_implib_bfd
= htab
->in_implib_bfd
;
6224 if (!htab
->cmse_implib
)
6226 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6227 "Gateway import libraries"), in_implib_bfd
);
6231 /* Get symbol table size. */
6232 symsize
= bfd_get_symtab_upper_bound (in_implib_bfd
);
6236 /* Read in the input secure gateway import library's symbol table. */
6237 sympp
= (asymbol
**) bfd_malloc (symsize
);
6241 symcount
= bfd_canonicalize_symtab (in_implib_bfd
, sympp
);
6248 htab
->new_cmse_stub_offset
= 0;
6250 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only
,
6251 &cmse_stub_template
,
6252 &cmse_stub_template_size
);
6254 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only
);
6256 bfd_get_section_by_name (htab
->obfd
, out_sec_name
);
6257 if (stub_out_sec
!= NULL
)
6258 cmse_stub_sec_vma
= stub_out_sec
->vma
;
6260 /* Set addresses of veneers mentionned in input secure gateway import
6261 library's symbol table. */
6262 for (i
= 0; i
< symcount
; i
++)
6266 sym_name
= (char *) bfd_asymbol_name (sym
);
6267 intsym
= &((elf_symbol_type
*) sym
)->internal_elf_sym
;
6269 if (sym
->section
!= bfd_abs_section_ptr
6270 || !(flags
& (BSF_GLOBAL
| BSF_WEAK
))
6271 || (flags
& BSF_FUNCTION
) != BSF_FUNCTION
6272 || (ARM_GET_SYM_BRANCH_TYPE (intsym
->st_target_internal
)
6273 != ST_BRANCH_TO_THUMB
))
6275 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6276 "symbol should be absolute, global and "
6277 "refer to Thumb functions"),
6278 in_implib_bfd
, sym_name
);
6283 veneer_value
= bfd_asymbol_value (sym
);
6284 stub_offset
= veneer_value
- cmse_stub_sec_vma
;
6285 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, sym_name
,
6287 hash
= (struct elf32_arm_link_hash_entry
*)
6288 elf_link_hash_lookup (&(htab
)->root
, sym_name
, false, false, true);
6290 /* Stub entry should have been created by cmse_scan or the symbol be of
6291 a secure function callable from non secure code. */
6292 if (!stub_entry
&& !hash
)
6297 (_("entry function `%s' disappeared from secure code"), sym_name
);
6298 hash
= (struct elf32_arm_link_hash_entry
*)
6299 elf_link_hash_lookup (&(htab
)->root
, sym_name
, true, true, true);
6301 = elf32_arm_create_stub (htab
, arm_stub_cmse_branch_thumb_only
,
6302 NULL
, NULL
, bfd_abs_section_ptr
, hash
,
6303 sym_name
, veneer_value
,
6304 ST_BRANCH_TO_THUMB
, &new_stub
);
6305 if (stub_entry
== NULL
)
6309 BFD_ASSERT (new_stub
);
6310 new_cmse_stubs_created
++;
6311 (*cmse_stub_created
)++;
6313 stub_entry
->stub_template_size
= stub_entry
->stub_size
= 0;
6314 stub_entry
->stub_offset
= stub_offset
;
6316 /* Symbol found is not callable from non secure code. */
6317 else if (!stub_entry
)
6319 if (!cmse_entry_fct_p (hash
))
6321 _bfd_error_handler (_("`%s' refers to a non entry function"),
6329 /* Only stubs for SG veneers should have been created. */
6330 BFD_ASSERT (stub_entry
->stub_type
== arm_stub_cmse_branch_thumb_only
);
6332 /* Check visibility hasn't changed. */
6333 if (!!(flags
& BSF_GLOBAL
)
6334 != (hash
->root
.root
.type
== bfd_link_hash_defined
))
6336 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd
,
6339 stub_entry
->stub_offset
= stub_offset
;
6342 /* Size should match that of a SG veneer. */
6343 if (intsym
->st_size
!= cmse_stub_size
)
6345 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6346 in_implib_bfd
, sym_name
);
6350 /* Previous veneer address is before current SG veneer section. */
6351 if (veneer_value
< cmse_stub_sec_vma
)
6353 /* Avoid offset underflow. */
6355 stub_entry
->stub_offset
= 0;
6360 /* Complain if stub offset not a multiple of stub size. */
6361 if (stub_offset
% cmse_stub_size
)
6364 (_("offset of veneer for entry function `%s' not a multiple of "
6365 "its size"), sym_name
);
6372 new_cmse_stubs_created
--;
6373 if (veneer_value
< cmse_stub_array_start
)
6374 cmse_stub_array_start
= veneer_value
;
6375 next_cmse_stub_offset
= stub_offset
+ ((cmse_stub_size
+ 7) & ~7);
6376 if (next_cmse_stub_offset
> htab
->new_cmse_stub_offset
)
6377 htab
->new_cmse_stub_offset
= next_cmse_stub_offset
;
6380 if (!info
->out_implib_bfd
&& new_cmse_stubs_created
!= 0)
6382 BFD_ASSERT (new_cmse_stubs_created
> 0);
6384 (_("new entry function(s) introduced but no output import library "
6386 bfd_hash_traverse (&htab
->stub_hash_table
, arm_list_new_cmse_stub
, info
);
6389 if (cmse_stub_array_start
!= cmse_stub_sec_vma
)
6392 (_("start address of `%s' is different from previous link"),
6402 /* Determine and set the size of the stub section for a final link.
6404 The basic idea here is to examine all the relocations looking for
6405 PC-relative calls to a target that is unreachable with a "bl"
6409 elf32_arm_size_stubs (bfd
*output_bfd
,
6411 struct bfd_link_info
*info
,
6412 bfd_signed_vma group_size
,
6413 asection
* (*add_stub_section
) (const char *, asection
*,
6416 void (*layout_sections_again
) (void))
6419 obj_attribute
*out_attr
;
6420 int cmse_stub_created
= 0;
6421 bfd_size_type stub_group_size
;
6422 bool m_profile
, stubs_always_after_branch
, first_veneer_scan
= true;
6423 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
6424 struct a8_erratum_fix
*a8_fixes
= NULL
;
6425 unsigned int num_a8_fixes
= 0, a8_fix_table_size
= 10;
6426 struct a8_erratum_reloc
*a8_relocs
= NULL
;
6427 unsigned int num_a8_relocs
= 0, a8_reloc_table_size
= 10, i
;
6432 if (htab
->fix_cortex_a8
)
6434 a8_fixes
= (struct a8_erratum_fix
*)
6435 bfd_zmalloc (sizeof (struct a8_erratum_fix
) * a8_fix_table_size
);
6436 a8_relocs
= (struct a8_erratum_reloc
*)
6437 bfd_zmalloc (sizeof (struct a8_erratum_reloc
) * a8_reloc_table_size
);
6440 /* Propagate mach to stub bfd, because it may not have been
6441 finalized when we created stub_bfd. */
6442 bfd_set_arch_mach (stub_bfd
, bfd_get_arch (output_bfd
),
6443 bfd_get_mach (output_bfd
));
6445 /* Stash our params away. */
6446 htab
->stub_bfd
= stub_bfd
;
6447 htab
->add_stub_section
= add_stub_section
;
6448 htab
->layout_sections_again
= layout_sections_again
;
6449 stubs_always_after_branch
= group_size
< 0;
6451 out_attr
= elf_known_obj_attributes_proc (output_bfd
);
6452 m_profile
= out_attr
[Tag_CPU_arch_profile
].i
== 'M';
6454 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6455 as the first half of a 32-bit branch straddling two 4K pages. This is a
6456 crude way of enforcing that. */
6457 if (htab
->fix_cortex_a8
)
6458 stubs_always_after_branch
= 1;
6461 stub_group_size
= -group_size
;
6463 stub_group_size
= group_size
;
6465 if (stub_group_size
== 1)
6467 /* Default values. */
6468 /* Thumb branch range is +-4MB has to be used as the default
6469 maximum size (a given section can contain both ARM and Thumb
6470 code, so the worst case has to be taken into account).
6472 This value is 24K less than that, which allows for 2025
6473 12-byte stubs. If we exceed that, then we will fail to link.
6474 The user will have to relink with an explicit group size
6476 stub_group_size
= 4170000;
6479 group_sections (htab
, stub_group_size
, stubs_always_after_branch
);
6481 /* If we're applying the cortex A8 fix, we need to determine the
6482 program header size now, because we cannot change it later --
6483 that could alter section placements. Notice the A8 erratum fix
6484 ends up requiring the section addresses to remain unchanged
6485 modulo the page size. That's something we cannot represent
6486 inside BFD, and we don't want to force the section alignment to
6487 be the page size. */
6488 if (htab
->fix_cortex_a8
)
6489 (*htab
->layout_sections_again
) ();
6494 unsigned int bfd_indx
;
6496 enum elf32_arm_stub_type stub_type
;
6497 bool stub_changed
= false;
6498 unsigned prev_num_a8_fixes
= num_a8_fixes
;
6501 for (input_bfd
= info
->input_bfds
, bfd_indx
= 0;
6503 input_bfd
= input_bfd
->link
.next
, bfd_indx
++)
6505 Elf_Internal_Shdr
*symtab_hdr
;
6507 Elf_Internal_Sym
*local_syms
= NULL
;
6509 if (!is_arm_elf (input_bfd
))
6511 if ((input_bfd
->flags
& DYNAMIC
) != 0
6512 && (elf_sym_hashes (input_bfd
) == NULL
6513 || (elf_dyn_lib_class (input_bfd
) & DYN_AS_NEEDED
) != 0))
6518 /* We'll need the symbol table in a second. */
6519 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
6520 if (symtab_hdr
->sh_info
== 0)
6523 /* Limit scan of symbols to object file whose profile is
6524 Microcontroller to not hinder performance in the general case. */
6525 if (m_profile
&& first_veneer_scan
)
6527 struct elf_link_hash_entry
**sym_hashes
;
6529 sym_hashes
= elf_sym_hashes (input_bfd
);
6530 if (!cmse_scan (input_bfd
, htab
, out_attr
, sym_hashes
,
6531 &cmse_stub_created
))
6532 goto error_ret_free_local
;
6534 if (cmse_stub_created
!= 0)
6535 stub_changed
= true;
6538 /* Walk over each section attached to the input bfd. */
6539 for (section
= input_bfd
->sections
;
6541 section
= section
->next
)
6543 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
6545 /* If there aren't any relocs, then there's nothing more
6547 if ((section
->flags
& SEC_RELOC
) == 0
6548 || section
->reloc_count
== 0
6549 || (section
->flags
& SEC_CODE
) == 0)
6552 /* If this section is a link-once section that will be
6553 discarded, then don't create any stubs. */
6554 if (section
->output_section
== NULL
6555 || section
->output_section
->owner
!= output_bfd
)
6558 /* Get the relocs. */
6560 = _bfd_elf_link_read_relocs (input_bfd
, section
, NULL
,
6561 NULL
, info
->keep_memory
);
6562 if (internal_relocs
== NULL
)
6563 goto error_ret_free_local
;
6565 /* Now examine each relocation. */
6566 irela
= internal_relocs
;
6567 irelaend
= irela
+ section
->reloc_count
;
6568 for (; irela
< irelaend
; irela
++)
6570 unsigned int r_type
, r_indx
;
6573 bfd_vma destination
;
6574 struct elf32_arm_link_hash_entry
*hash
;
6575 const char *sym_name
;
6576 unsigned char st_type
;
6577 enum arm_st_branch_type branch_type
;
6578 bool created_stub
= false;
6580 r_type
= ELF32_R_TYPE (irela
->r_info
);
6581 r_indx
= ELF32_R_SYM (irela
->r_info
);
6583 if (r_type
>= (unsigned int) R_ARM_max
)
6585 bfd_set_error (bfd_error_bad_value
);
6586 error_ret_free_internal
:
6587 if (elf_section_data (section
)->relocs
== NULL
)
6588 free (internal_relocs
);
6590 error_ret_free_local
:
6591 if (symtab_hdr
->contents
!= (unsigned char *) local_syms
)
6597 if (r_indx
>= symtab_hdr
->sh_info
)
6598 hash
= elf32_arm_hash_entry
6599 (elf_sym_hashes (input_bfd
)
6600 [r_indx
- symtab_hdr
->sh_info
]);
6602 /* Only look for stubs on branch instructions, or
6603 non-relaxed TLSCALL */
6604 if ((r_type
!= (unsigned int) R_ARM_CALL
)
6605 && (r_type
!= (unsigned int) R_ARM_THM_CALL
)
6606 && (r_type
!= (unsigned int) R_ARM_JUMP24
)
6607 && (r_type
!= (unsigned int) R_ARM_THM_JUMP19
)
6608 && (r_type
!= (unsigned int) R_ARM_THM_XPC22
)
6609 && (r_type
!= (unsigned int) R_ARM_THM_JUMP24
)
6610 && (r_type
!= (unsigned int) R_ARM_PLT32
)
6611 && !((r_type
== (unsigned int) R_ARM_TLS_CALL
6612 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
6613 && r_type
== (elf32_arm_tls_transition
6615 (struct elf_link_hash_entry
*) hash
))
6616 && ((hash
? hash
->tls_type
6617 : (elf32_arm_local_got_tls_type
6618 (input_bfd
)[r_indx
]))
6619 & GOT_TLS_GDESC
) != 0))
6622 /* Now determine the call target, its name, value,
6629 if (r_type
== (unsigned int) R_ARM_TLS_CALL
6630 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
6632 /* A non-relaxed TLS call. The target is the
6633 plt-resident trampoline and nothing to do
6635 BFD_ASSERT (htab
->tls_trampoline
> 0);
6636 sym_sec
= htab
->root
.splt
;
6637 sym_value
= htab
->tls_trampoline
;
6640 branch_type
= ST_BRANCH_TO_ARM
;
6644 /* It's a local symbol. */
6645 Elf_Internal_Sym
*sym
;
6647 if (local_syms
== NULL
)
6650 = (Elf_Internal_Sym
*) symtab_hdr
->contents
;
6651 if (local_syms
== NULL
)
6653 = bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
6654 symtab_hdr
->sh_info
, 0,
6656 if (local_syms
== NULL
)
6657 goto error_ret_free_internal
;
6660 sym
= local_syms
+ r_indx
;
6661 if (sym
->st_shndx
== SHN_UNDEF
)
6662 sym_sec
= bfd_und_section_ptr
;
6663 else if (sym
->st_shndx
== SHN_ABS
)
6664 sym_sec
= bfd_abs_section_ptr
;
6665 else if (sym
->st_shndx
== SHN_COMMON
)
6666 sym_sec
= bfd_com_section_ptr
;
6669 bfd_section_from_elf_index (input_bfd
, sym
->st_shndx
);
6672 /* This is an undefined symbol. It can never
6676 if (ELF_ST_TYPE (sym
->st_info
) != STT_SECTION
)
6677 sym_value
= sym
->st_value
;
6678 destination
= (sym_value
+ irela
->r_addend
6679 + sym_sec
->output_offset
6680 + sym_sec
->output_section
->vma
);
6681 st_type
= ELF_ST_TYPE (sym
->st_info
);
6683 ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
6685 = bfd_elf_string_from_elf_section (input_bfd
,
6686 symtab_hdr
->sh_link
,
6691 /* It's an external symbol. */
6692 while (hash
->root
.root
.type
== bfd_link_hash_indirect
6693 || hash
->root
.root
.type
== bfd_link_hash_warning
)
6694 hash
= ((struct elf32_arm_link_hash_entry
*)
6695 hash
->root
.root
.u
.i
.link
);
6697 if (hash
->root
.root
.type
== bfd_link_hash_defined
6698 || hash
->root
.root
.type
== bfd_link_hash_defweak
)
6700 sym_sec
= hash
->root
.root
.u
.def
.section
;
6701 sym_value
= hash
->root
.root
.u
.def
.value
;
6703 struct elf32_arm_link_hash_table
*globals
=
6704 elf32_arm_hash_table (info
);
6706 /* For a destination in a shared library,
6707 use the PLT stub as target address to
6708 decide whether a branch stub is
6711 && globals
->root
.splt
!= NULL
6713 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
6715 sym_sec
= globals
->root
.splt
;
6716 sym_value
= hash
->root
.plt
.offset
;
6717 if (sym_sec
->output_section
!= NULL
)
6718 destination
= (sym_value
6719 + sym_sec
->output_offset
6720 + sym_sec
->output_section
->vma
);
6722 else if (sym_sec
->output_section
!= NULL
)
6723 destination
= (sym_value
+ irela
->r_addend
6724 + sym_sec
->output_offset
6725 + sym_sec
->output_section
->vma
);
6727 else if ((hash
->root
.root
.type
== bfd_link_hash_undefined
)
6728 || (hash
->root
.root
.type
== bfd_link_hash_undefweak
))
6730 /* For a shared library, use the PLT stub as
6731 target address to decide whether a long
6732 branch stub is needed.
6733 For absolute code, they cannot be handled. */
6734 struct elf32_arm_link_hash_table
*globals
=
6735 elf32_arm_hash_table (info
);
6738 && globals
->root
.splt
!= NULL
6740 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
6742 sym_sec
= globals
->root
.splt
;
6743 sym_value
= hash
->root
.plt
.offset
;
6744 if (sym_sec
->output_section
!= NULL
)
6745 destination
= (sym_value
6746 + sym_sec
->output_offset
6747 + sym_sec
->output_section
->vma
);
6754 bfd_set_error (bfd_error_bad_value
);
6755 goto error_ret_free_internal
;
6757 st_type
= hash
->root
.type
;
6759 ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
6760 sym_name
= hash
->root
.root
.root
.string
;
6766 struct elf32_arm_stub_hash_entry
*stub_entry
;
6768 /* Determine what (if any) linker stub is needed. */
6769 stub_type
= arm_type_of_stub (info
, section
, irela
,
6770 st_type
, &branch_type
,
6771 hash
, destination
, sym_sec
,
6772 input_bfd
, sym_name
);
6773 if (stub_type
== arm_stub_none
)
6776 /* We've either created a stub for this reloc already,
6777 or we are about to. */
6779 elf32_arm_create_stub (htab
, stub_type
, section
, irela
,
6781 (char *) sym_name
, sym_value
,
6782 branch_type
, &new_stub
);
6784 created_stub
= stub_entry
!= NULL
;
6786 goto error_ret_free_internal
;
6790 stub_changed
= true;
6794 /* Look for relocations which might trigger Cortex-A8
6796 if (htab
->fix_cortex_a8
6797 && (r_type
== (unsigned int) R_ARM_THM_JUMP24
6798 || r_type
== (unsigned int) R_ARM_THM_JUMP19
6799 || r_type
== (unsigned int) R_ARM_THM_CALL
6800 || r_type
== (unsigned int) R_ARM_THM_XPC22
))
6802 bfd_vma from
= section
->output_section
->vma
6803 + section
->output_offset
6806 if ((from
& 0xfff) == 0xffe)
6808 /* Found a candidate. Note we haven't checked the
6809 destination is within 4K here: if we do so (and
6810 don't create an entry in a8_relocs) we can't tell
6811 that a branch should have been relocated when
6813 if (num_a8_relocs
== a8_reloc_table_size
)
6815 a8_reloc_table_size
*= 2;
6816 a8_relocs
= (struct a8_erratum_reloc
*)
6817 bfd_realloc (a8_relocs
,
6818 sizeof (struct a8_erratum_reloc
)
6819 * a8_reloc_table_size
);
6822 a8_relocs
[num_a8_relocs
].from
= from
;
6823 a8_relocs
[num_a8_relocs
].destination
= destination
;
6824 a8_relocs
[num_a8_relocs
].r_type
= r_type
;
6825 a8_relocs
[num_a8_relocs
].branch_type
= branch_type
;
6826 a8_relocs
[num_a8_relocs
].sym_name
= sym_name
;
6827 a8_relocs
[num_a8_relocs
].non_a8_stub
= created_stub
;
6828 a8_relocs
[num_a8_relocs
].hash
= hash
;
6835 /* We're done with the internal relocs, free them. */
6836 if (elf_section_data (section
)->relocs
== NULL
)
6837 free (internal_relocs
);
6840 if (htab
->fix_cortex_a8
)
6842 /* Sort relocs which might apply to Cortex-A8 erratum. */
6843 qsort (a8_relocs
, num_a8_relocs
,
6844 sizeof (struct a8_erratum_reloc
),
6847 /* Scan for branches which might trigger Cortex-A8 erratum. */
6848 if (cortex_a8_erratum_scan (input_bfd
, info
, &a8_fixes
,
6849 &num_a8_fixes
, &a8_fix_table_size
,
6850 a8_relocs
, num_a8_relocs
,
6851 prev_num_a8_fixes
, &stub_changed
)
6853 goto error_ret_free_local
;
6856 if (local_syms
!= NULL
6857 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
6859 if (!info
->keep_memory
)
6862 symtab_hdr
->contents
= (unsigned char *) local_syms
;
6866 if (first_veneer_scan
6867 && !set_cmse_veneer_addr_from_implib (info
, htab
,
6868 &cmse_stub_created
))
6871 if (prev_num_a8_fixes
!= num_a8_fixes
)
6872 stub_changed
= true;
6877 /* OK, we've added some stubs. Find out the new size of the
6879 for (stub_sec
= htab
->stub_bfd
->sections
;
6881 stub_sec
= stub_sec
->next
)
6883 /* Ignore non-stub sections. */
6884 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
6890 /* Add new SG veneers after those already in the input import
6892 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
;
6895 bfd_vma
*start_offset_p
;
6896 asection
**stub_sec_p
;
6898 start_offset_p
= arm_new_stubs_start_offset_ptr (htab
, stub_type
);
6899 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
6900 if (start_offset_p
== NULL
)
6903 BFD_ASSERT (stub_sec_p
!= NULL
);
6904 if (*stub_sec_p
!= NULL
)
6905 (*stub_sec_p
)->size
= *start_offset_p
;
6908 /* Compute stub section size, considering padding. */
6909 bfd_hash_traverse (&htab
->stub_hash_table
, arm_size_one_stub
, htab
);
6910 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
;
6914 asection
**stub_sec_p
;
6916 padding
= arm_dedicated_stub_section_padding (stub_type
);
6917 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
6918 /* Skip if no stub input section or no stub section padding
6920 if ((stub_sec_p
!= NULL
&& *stub_sec_p
== NULL
) || padding
== 0)
6922 /* Stub section padding required but no dedicated section. */
6923 BFD_ASSERT (stub_sec_p
);
6925 size
= (*stub_sec_p
)->size
;
6926 size
= (size
+ padding
- 1) & ~(padding
- 1);
6927 (*stub_sec_p
)->size
= size
;
6930 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6931 if (htab
->fix_cortex_a8
)
6932 for (i
= 0; i
< num_a8_fixes
; i
++)
6934 stub_sec
= elf32_arm_create_or_find_stub_sec (NULL
,
6935 a8_fixes
[i
].section
, htab
, a8_fixes
[i
].stub_type
);
6937 if (stub_sec
== NULL
)
6941 += find_stub_size_and_template (a8_fixes
[i
].stub_type
, NULL
,
6946 /* Ask the linker to do its stuff. */
6947 (*htab
->layout_sections_again
) ();
6948 first_veneer_scan
= false;
6951 /* Add stubs for Cortex-A8 erratum fixes now. */
6952 if (htab
->fix_cortex_a8
)
6954 for (i
= 0; i
< num_a8_fixes
; i
++)
6956 struct elf32_arm_stub_hash_entry
*stub_entry
;
6957 char *stub_name
= a8_fixes
[i
].stub_name
;
6958 asection
*section
= a8_fixes
[i
].section
;
6959 unsigned int section_id
= a8_fixes
[i
].section
->id
;
6960 asection
*link_sec
= htab
->stub_group
[section_id
].link_sec
;
6961 asection
*stub_sec
= htab
->stub_group
[section_id
].stub_sec
;
6962 const insn_sequence
*template_sequence
;
6963 int template_size
, size
= 0;
6965 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
6967 if (stub_entry
== NULL
)
6969 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6970 section
->owner
, stub_name
);
6974 stub_entry
->stub_sec
= stub_sec
;
6975 stub_entry
->stub_offset
= (bfd_vma
) -1;
6976 stub_entry
->id_sec
= link_sec
;
6977 stub_entry
->stub_type
= a8_fixes
[i
].stub_type
;
6978 stub_entry
->source_value
= a8_fixes
[i
].offset
;
6979 stub_entry
->target_section
= a8_fixes
[i
].section
;
6980 stub_entry
->target_value
= a8_fixes
[i
].target_offset
;
6981 stub_entry
->orig_insn
= a8_fixes
[i
].orig_insn
;
6982 stub_entry
->branch_type
= a8_fixes
[i
].branch_type
;
6984 size
= find_stub_size_and_template (a8_fixes
[i
].stub_type
,
6988 stub_entry
->stub_size
= size
;
6989 stub_entry
->stub_template
= template_sequence
;
6990 stub_entry
->stub_template_size
= template_size
;
6993 /* Stash the Cortex-A8 erratum fix array for use later in
6994 elf32_arm_write_section(). */
6995 htab
->a8_erratum_fixes
= a8_fixes
;
6996 htab
->num_a8_erratum_fixes
= num_a8_fixes
;
7000 htab
->a8_erratum_fixes
= NULL
;
7001 htab
->num_a8_erratum_fixes
= 0;
7006 /* Build all the stubs associated with the current output file. The
7007 stubs are kept in a hash table attached to the main linker hash
7008 table. We also set up the .plt entries for statically linked PIC
7009 functions here. This function is called via arm_elf_finish in the
7013 elf32_arm_build_stubs (struct bfd_link_info
*info
)
7016 struct bfd_hash_table
*table
;
7017 enum elf32_arm_stub_type stub_type
;
7018 struct elf32_arm_link_hash_table
*htab
;
7020 htab
= elf32_arm_hash_table (info
);
7024 for (stub_sec
= htab
->stub_bfd
->sections
;
7026 stub_sec
= stub_sec
->next
)
7030 /* Ignore non-stub sections. */
7031 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
7034 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
7035 must at least be done for stub section requiring padding and for SG
7036 veneers to ensure that a non secure code branching to a removed SG
7037 veneer causes an error. */
7038 size
= stub_sec
->size
;
7039 stub_sec
->contents
= (unsigned char *) bfd_zalloc (htab
->stub_bfd
, size
);
7040 if (stub_sec
->contents
== NULL
&& size
!= 0)
7046 /* Add new SG veneers after those already in the input import library. */
7047 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
; stub_type
++)
7049 bfd_vma
*start_offset_p
;
7050 asection
**stub_sec_p
;
7052 start_offset_p
= arm_new_stubs_start_offset_ptr (htab
, stub_type
);
7053 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
7054 if (start_offset_p
== NULL
)
7057 BFD_ASSERT (stub_sec_p
!= NULL
);
7058 if (*stub_sec_p
!= NULL
)
7059 (*stub_sec_p
)->size
= *start_offset_p
;
7062 /* Build the stubs as directed by the stub hash table. */
7063 table
= &htab
->stub_hash_table
;
7064 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
7065 if (htab
->fix_cortex_a8
)
7067 /* Place the cortex a8 stubs last. */
7068 htab
->fix_cortex_a8
= -1;
7069 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
7075 /* Locate the Thumb encoded calling stub for NAME. */
7077 static struct elf_link_hash_entry
*
7078 find_thumb_glue (struct bfd_link_info
*link_info
,
7080 char **error_message
)
7083 struct elf_link_hash_entry
*hash
;
7084 struct elf32_arm_link_hash_table
*hash_table
;
7086 /* We need a pointer to the armelf specific hash table. */
7087 hash_table
= elf32_arm_hash_table (link_info
);
7088 if (hash_table
== NULL
)
7091 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7092 + strlen (THUMB2ARM_GLUE_ENTRY_NAME
) + 1);
7094 BFD_ASSERT (tmp_name
);
7096 sprintf (tmp_name
, THUMB2ARM_GLUE_ENTRY_NAME
, name
);
7098 hash
= elf_link_hash_lookup
7099 (&(hash_table
)->root
, tmp_name
, false, false, true);
7102 && asprintf (error_message
, _("unable to find %s glue '%s' for '%s'"),
7103 "Thumb", tmp_name
, name
) == -1)
7104 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
7111 /* Locate the ARM encoded calling stub for NAME. */
7113 static struct elf_link_hash_entry
*
7114 find_arm_glue (struct bfd_link_info
*link_info
,
7116 char **error_message
)
7119 struct elf_link_hash_entry
*myh
;
7120 struct elf32_arm_link_hash_table
*hash_table
;
7122 /* We need a pointer to the elfarm specific hash table. */
7123 hash_table
= elf32_arm_hash_table (link_info
);
7124 if (hash_table
== NULL
)
7127 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7128 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
7129 BFD_ASSERT (tmp_name
);
7131 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
7133 myh
= elf_link_hash_lookup
7134 (&(hash_table
)->root
, tmp_name
, false, false, true);
7137 && asprintf (error_message
, _("unable to find %s glue '%s' for '%s'"),
7138 "ARM", tmp_name
, name
) == -1)
7139 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
7146 /* ARM->Thumb glue (static images):
7150 ldr r12, __func_addr
7153 .word func @ behave as if you saw a ARM_32 reloc.
7160 .word func @ behave as if you saw a ARM_32 reloc.
7162 (relocatable images)
7165 ldr r12, __func_offset
7171 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7172 static const insn32 a2t1_ldr_insn
= 0xe59fc000;
7173 static const insn32 a2t2_bx_r12_insn
= 0xe12fff1c;
7174 static const insn32 a2t3_func_addr_insn
= 0x00000001;
7176 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7177 static const insn32 a2t1v5_ldr_insn
= 0xe51ff004;
7178 static const insn32 a2t2v5_func_addr_insn
= 0x00000001;
7180 #define ARM2THUMB_PIC_GLUE_SIZE 16
7181 static const insn32 a2t1p_ldr_insn
= 0xe59fc004;
7182 static const insn32 a2t2p_add_pc_insn
= 0xe08cc00f;
7183 static const insn32 a2t3p_bx_r12_insn
= 0xe12fff1c;
7185 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7189 __func_from_thumb: __func_from_thumb:
7191 nop ldr r6, __func_addr
7201 #define THUMB2ARM_GLUE_SIZE 8
7202 static const insn16 t2a1_bx_pc_insn
= 0x4778;
7203 static const insn16 t2a2_noop_insn
= 0x46c0;
7204 static const insn32 t2a3_b_insn
= 0xea000000;
7206 #define VFP11_ERRATUM_VENEER_SIZE 8
7207 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7208 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7210 #define ARM_BX_VENEER_SIZE 12
7211 static const insn32 armbx1_tst_insn
= 0xe3100001;
7212 static const insn32 armbx2_moveq_insn
= 0x01a0f000;
7213 static const insn32 armbx3_bx_insn
= 0xe12fff10;
7215 #ifndef ELFARM_NABI_C_INCLUDED
7217 arm_allocate_glue_section_space (bfd
* abfd
, bfd_size_type size
, const char * name
)
7220 bfd_byte
* contents
;
7224 /* Do not include empty glue sections in the output. */
7227 s
= bfd_get_linker_section (abfd
, name
);
7229 s
->flags
|= SEC_EXCLUDE
;
7234 BFD_ASSERT (abfd
!= NULL
);
7236 s
= bfd_get_linker_section (abfd
, name
);
7237 BFD_ASSERT (s
!= NULL
);
7239 contents
= (bfd_byte
*) bfd_zalloc (abfd
, size
);
7241 BFD_ASSERT (s
->size
== size
);
7242 s
->contents
= contents
;
7246 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info
* info
)
7248 struct elf32_arm_link_hash_table
* globals
;
7250 globals
= elf32_arm_hash_table (info
);
7251 BFD_ASSERT (globals
!= NULL
);
7253 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7254 globals
->arm_glue_size
,
7255 ARM2THUMB_GLUE_SECTION_NAME
);
7257 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7258 globals
->thumb_glue_size
,
7259 THUMB2ARM_GLUE_SECTION_NAME
);
7261 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7262 globals
->vfp11_erratum_glue_size
,
7263 VFP11_ERRATUM_VENEER_SECTION_NAME
);
7265 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7266 globals
->stm32l4xx_erratum_glue_size
,
7267 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7269 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7270 globals
->bx_glue_size
,
7271 ARM_BX_GLUE_SECTION_NAME
);
7276 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7277 returns the symbol identifying the stub. */
7279 static struct elf_link_hash_entry
*
7280 record_arm_to_thumb_glue (struct bfd_link_info
* link_info
,
7281 struct elf_link_hash_entry
* h
)
7283 const char * name
= h
->root
.root
.string
;
7286 struct elf_link_hash_entry
* myh
;
7287 struct bfd_link_hash_entry
* bh
;
7288 struct elf32_arm_link_hash_table
* globals
;
7292 globals
= elf32_arm_hash_table (link_info
);
7293 BFD_ASSERT (globals
!= NULL
);
7294 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7296 s
= bfd_get_linker_section
7297 (globals
->bfd_of_glue_owner
, ARM2THUMB_GLUE_SECTION_NAME
);
7299 BFD_ASSERT (s
!= NULL
);
7301 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7302 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
7303 BFD_ASSERT (tmp_name
);
7305 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
7307 myh
= elf_link_hash_lookup
7308 (&(globals
)->root
, tmp_name
, false, false, true);
7312 /* We've already seen this guy. */
7317 /* The only trick here is using hash_table->arm_glue_size as the value.
7318 Even though the section isn't allocated yet, this is where we will be
7319 putting it. The +1 on the value marks that the stub has not been
7320 output yet - not that it is a Thumb function. */
7322 val
= globals
->arm_glue_size
+ 1;
7323 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
7324 tmp_name
, BSF_GLOBAL
, s
, val
,
7325 NULL
, true, false, &bh
);
7327 myh
= (struct elf_link_hash_entry
*) bh
;
7328 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7329 myh
->forced_local
= 1;
7333 if (bfd_link_pic (link_info
)
7334 || globals
->root
.is_relocatable_executable
7335 || globals
->pic_veneer
)
7336 size
= ARM2THUMB_PIC_GLUE_SIZE
;
7337 else if (globals
->use_blx
)
7338 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
7340 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
7343 globals
->arm_glue_size
+= size
;
7348 /* Allocate space for ARMv4 BX veneers. */
7351 record_arm_bx_glue (struct bfd_link_info
* link_info
, int reg
)
7354 struct elf32_arm_link_hash_table
*globals
;
7356 struct elf_link_hash_entry
*myh
;
7357 struct bfd_link_hash_entry
*bh
;
7360 /* BX PC does not need a veneer. */
7364 globals
= elf32_arm_hash_table (link_info
);
7365 BFD_ASSERT (globals
!= NULL
);
7366 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7368 /* Check if this veneer has already been allocated. */
7369 if (globals
->bx_glue_offset
[reg
])
7372 s
= bfd_get_linker_section
7373 (globals
->bfd_of_glue_owner
, ARM_BX_GLUE_SECTION_NAME
);
7375 BFD_ASSERT (s
!= NULL
);
7377 /* Add symbol for veneer. */
7379 bfd_malloc ((bfd_size_type
) strlen (ARM_BX_GLUE_ENTRY_NAME
) + 1);
7380 BFD_ASSERT (tmp_name
);
7382 sprintf (tmp_name
, ARM_BX_GLUE_ENTRY_NAME
, reg
);
7384 myh
= elf_link_hash_lookup
7385 (&(globals
)->root
, tmp_name
, false, false, false);
7387 BFD_ASSERT (myh
== NULL
);
7390 val
= globals
->bx_glue_size
;
7391 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
7392 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7393 NULL
, true, false, &bh
);
7395 myh
= (struct elf_link_hash_entry
*) bh
;
7396 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7397 myh
->forced_local
= 1;
7399 s
->size
+= ARM_BX_VENEER_SIZE
;
7400 globals
->bx_glue_offset
[reg
] = globals
->bx_glue_size
| 2;
7401 globals
->bx_glue_size
+= ARM_BX_VENEER_SIZE
;
7405 /* Add an entry to the code/data map for section SEC. */
7408 elf32_arm_section_map_add (asection
*sec
, char type
, bfd_vma vma
)
7410 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
7411 unsigned int newidx
;
7413 if (sec_data
->map
== NULL
)
7415 sec_data
->map
= (elf32_arm_section_map
*)
7416 bfd_malloc (sizeof (elf32_arm_section_map
));
7417 sec_data
->mapcount
= 0;
7418 sec_data
->mapsize
= 1;
7421 newidx
= sec_data
->mapcount
++;
7423 if (sec_data
->mapcount
> sec_data
->mapsize
)
7425 sec_data
->mapsize
*= 2;
7426 sec_data
->map
= (elf32_arm_section_map
*)
7427 bfd_realloc_or_free (sec_data
->map
, sec_data
->mapsize
7428 * sizeof (elf32_arm_section_map
));
7433 sec_data
->map
[newidx
].vma
= vma
;
7434 sec_data
->map
[newidx
].type
= type
;
7439 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7440 veneers are handled for now. */
7443 record_vfp11_erratum_veneer (struct bfd_link_info
*link_info
,
7444 elf32_vfp11_erratum_list
*branch
,
7446 asection
*branch_sec
,
7447 unsigned int offset
)
7450 struct elf32_arm_link_hash_table
*hash_table
;
7452 struct elf_link_hash_entry
*myh
;
7453 struct bfd_link_hash_entry
*bh
;
7455 struct _arm_elf_section_data
*sec_data
;
7456 elf32_vfp11_erratum_list
*newerr
;
7458 hash_table
= elf32_arm_hash_table (link_info
);
7459 BFD_ASSERT (hash_table
!= NULL
);
7460 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
7462 s
= bfd_get_linker_section
7463 (hash_table
->bfd_of_glue_owner
, VFP11_ERRATUM_VENEER_SECTION_NAME
);
7465 sec_data
= elf32_arm_section_data (s
);
7467 BFD_ASSERT (s
!= NULL
);
7469 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7470 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7471 BFD_ASSERT (tmp_name
);
7473 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
7474 hash_table
->num_vfp11_fixes
);
7476 myh
= elf_link_hash_lookup
7477 (&(hash_table
)->root
, tmp_name
, false, false, false);
7479 BFD_ASSERT (myh
== NULL
);
7482 val
= hash_table
->vfp11_erratum_glue_size
;
7483 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
7484 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7485 NULL
, true, false, &bh
);
7487 myh
= (struct elf_link_hash_entry
*) bh
;
7488 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7489 myh
->forced_local
= 1;
7491 /* Link veneer back to calling location. */
7492 sec_data
->erratumcount
+= 1;
7493 newerr
= (elf32_vfp11_erratum_list
*)
7494 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
7496 newerr
->type
= VFP11_ERRATUM_ARM_VENEER
;
7498 newerr
->u
.v
.branch
= branch
;
7499 newerr
->u
.v
.id
= hash_table
->num_vfp11_fixes
;
7500 branch
->u
.b
.veneer
= newerr
;
7502 newerr
->next
= sec_data
->erratumlist
;
7503 sec_data
->erratumlist
= newerr
;
7505 /* A symbol for the return from the veneer. */
7506 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
7507 hash_table
->num_vfp11_fixes
);
7509 myh
= elf_link_hash_lookup
7510 (&(hash_table
)->root
, tmp_name
, false, false, false);
7517 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
7518 branch_sec
, val
, NULL
, true, false, &bh
);
7520 myh
= (struct elf_link_hash_entry
*) bh
;
7521 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7522 myh
->forced_local
= 1;
7526 /* Generate a mapping symbol for the veneer section, and explicitly add an
7527 entry for that symbol to the code/data map for the section. */
7528 if (hash_table
->vfp11_erratum_glue_size
== 0)
7531 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7532 ever requires this erratum fix. */
7533 _bfd_generic_link_add_one_symbol (link_info
,
7534 hash_table
->bfd_of_glue_owner
, "$a",
7535 BSF_LOCAL
, s
, 0, NULL
,
7538 myh
= (struct elf_link_hash_entry
*) bh
;
7539 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
7540 myh
->forced_local
= 1;
7542 /* The elf32_arm_init_maps function only cares about symbols from input
7543 BFDs. We must make a note of this generated mapping symbol
7544 ourselves so that code byteswapping works properly in
7545 elf32_arm_write_section. */
7546 elf32_arm_section_map_add (s
, 'a', 0);
7549 s
->size
+= VFP11_ERRATUM_VENEER_SIZE
;
7550 hash_table
->vfp11_erratum_glue_size
+= VFP11_ERRATUM_VENEER_SIZE
;
7551 hash_table
->num_vfp11_fixes
++;
7553 /* The offset of the veneer. */
7557 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7558 veneers need to be handled because used only in Cortex-M. */
7561 record_stm32l4xx_erratum_veneer (struct bfd_link_info
*link_info
,
7562 elf32_stm32l4xx_erratum_list
*branch
,
7564 asection
*branch_sec
,
7565 unsigned int offset
,
7566 bfd_size_type veneer_size
)
7569 struct elf32_arm_link_hash_table
*hash_table
;
7571 struct elf_link_hash_entry
*myh
;
7572 struct bfd_link_hash_entry
*bh
;
7574 struct _arm_elf_section_data
*sec_data
;
7575 elf32_stm32l4xx_erratum_list
*newerr
;
7577 hash_table
= elf32_arm_hash_table (link_info
);
7578 BFD_ASSERT (hash_table
!= NULL
);
7579 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
7581 s
= bfd_get_linker_section
7582 (hash_table
->bfd_of_glue_owner
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7584 BFD_ASSERT (s
!= NULL
);
7586 sec_data
= elf32_arm_section_data (s
);
7588 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7589 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7590 BFD_ASSERT (tmp_name
);
7592 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
7593 hash_table
->num_stm32l4xx_fixes
);
7595 myh
= elf_link_hash_lookup
7596 (&(hash_table
)->root
, tmp_name
, false, false, false);
7598 BFD_ASSERT (myh
== NULL
);
7601 val
= hash_table
->stm32l4xx_erratum_glue_size
;
7602 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
7603 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7604 NULL
, true, false, &bh
);
7606 myh
= (struct elf_link_hash_entry
*) bh
;
7607 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7608 myh
->forced_local
= 1;
7610 /* Link veneer back to calling location. */
7611 sec_data
->stm32l4xx_erratumcount
+= 1;
7612 newerr
= (elf32_stm32l4xx_erratum_list
*)
7613 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list
));
7615 newerr
->type
= STM32L4XX_ERRATUM_VENEER
;
7617 newerr
->u
.v
.branch
= branch
;
7618 newerr
->u
.v
.id
= hash_table
->num_stm32l4xx_fixes
;
7619 branch
->u
.b
.veneer
= newerr
;
7621 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
7622 sec_data
->stm32l4xx_erratumlist
= newerr
;
7624 /* A symbol for the return from the veneer. */
7625 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
7626 hash_table
->num_stm32l4xx_fixes
);
7628 myh
= elf_link_hash_lookup
7629 (&(hash_table
)->root
, tmp_name
, false, false, false);
7636 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
7637 branch_sec
, val
, NULL
, true, false, &bh
);
7639 myh
= (struct elf_link_hash_entry
*) bh
;
7640 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7641 myh
->forced_local
= 1;
7645 /* Generate a mapping symbol for the veneer section, and explicitly add an
7646 entry for that symbol to the code/data map for the section. */
7647 if (hash_table
->stm32l4xx_erratum_glue_size
== 0)
7650 /* Creates a THUMB symbol since there is no other choice. */
7651 _bfd_generic_link_add_one_symbol (link_info
,
7652 hash_table
->bfd_of_glue_owner
, "$t",
7653 BSF_LOCAL
, s
, 0, NULL
,
7656 myh
= (struct elf_link_hash_entry
*) bh
;
7657 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
7658 myh
->forced_local
= 1;
7660 /* The elf32_arm_init_maps function only cares about symbols from input
7661 BFDs. We must make a note of this generated mapping symbol
7662 ourselves so that code byteswapping works properly in
7663 elf32_arm_write_section. */
7664 elf32_arm_section_map_add (s
, 't', 0);
7667 s
->size
+= veneer_size
;
7668 hash_table
->stm32l4xx_erratum_glue_size
+= veneer_size
;
7669 hash_table
->num_stm32l4xx_fixes
++;
7671 /* The offset of the veneer. */
7675 #define ARM_GLUE_SECTION_FLAGS \
7676 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7677 | SEC_READONLY | SEC_LINKER_CREATED)
7679 /* Create a fake section for use by the ARM backend of the linker. */
7682 arm_make_glue_section (bfd
* abfd
, const char * name
)
7686 sec
= bfd_get_linker_section (abfd
, name
);
7691 sec
= bfd_make_section_anyway_with_flags (abfd
, name
, ARM_GLUE_SECTION_FLAGS
);
7694 || !bfd_set_section_alignment (sec
, 2))
7697 /* Set the gc mark to prevent the section from being removed by garbage
7698 collection, despite the fact that no relocs refer to this section. */
7704 /* Set size of .plt entries. This function is called from the
7705 linker scripts in ld/emultempl/{armelf}.em. */
7708 bfd_elf32_arm_use_long_plt (void)
7710 elf32_arm_use_long_plt_entry
= true;
7713 /* Add the glue sections to ABFD. This function is called from the
7714 linker scripts in ld/emultempl/{armelf}.em. */
7717 bfd_elf32_arm_add_glue_sections_to_bfd (bfd
*abfd
,
7718 struct bfd_link_info
*info
)
7720 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
7721 bool dostm32l4xx
= globals
7722 && globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
;
7725 /* If we are only performing a partial
7726 link do not bother adding the glue. */
7727 if (bfd_link_relocatable (info
))
7730 addglue
= arm_make_glue_section (abfd
, ARM2THUMB_GLUE_SECTION_NAME
)
7731 && arm_make_glue_section (abfd
, THUMB2ARM_GLUE_SECTION_NAME
)
7732 && arm_make_glue_section (abfd
, VFP11_ERRATUM_VENEER_SECTION_NAME
)
7733 && arm_make_glue_section (abfd
, ARM_BX_GLUE_SECTION_NAME
);
7739 && arm_make_glue_section (abfd
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7742 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7743 ensures they are not marked for deletion by
7744 strip_excluded_output_sections () when veneers are going to be created
7745 later. Not doing so would trigger assert on empty section size in
7746 lang_size_sections_1 (). */
7749 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info
*info
)
7751 enum elf32_arm_stub_type stub_type
;
7753 /* If we are only performing a partial
7754 link do not bother adding the glue. */
7755 if (bfd_link_relocatable (info
))
7758 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
; stub_type
++)
7761 const char *out_sec_name
;
7763 if (!arm_dedicated_stub_output_section_required (stub_type
))
7766 out_sec_name
= arm_dedicated_stub_output_section_name (stub_type
);
7767 out_sec
= bfd_get_section_by_name (info
->output_bfd
, out_sec_name
);
7768 if (out_sec
!= NULL
)
7769 out_sec
->flags
|= SEC_KEEP
;
7773 /* Select a BFD to be used to hold the sections used by the glue code.
7774 This function is called from the linker scripts in ld/emultempl/
7778 bfd_elf32_arm_get_bfd_for_interworking (bfd
*abfd
, struct bfd_link_info
*info
)
7780 struct elf32_arm_link_hash_table
*globals
;
7782 /* If we are only performing a partial link
7783 do not bother getting a bfd to hold the glue. */
7784 if (bfd_link_relocatable (info
))
7787 /* Make sure we don't attach the glue sections to a dynamic object. */
7788 BFD_ASSERT (!(abfd
->flags
& DYNAMIC
));
7790 globals
= elf32_arm_hash_table (info
);
7791 BFD_ASSERT (globals
!= NULL
);
7793 if (globals
->bfd_of_glue_owner
!= NULL
)
7796 /* Save the bfd for later use. */
7797 globals
->bfd_of_glue_owner
= abfd
;
7803 check_use_blx (struct elf32_arm_link_hash_table
*globals
)
7807 cpu_arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
7810 if (globals
->fix_arm1176
)
7812 if (cpu_arch
== TAG_CPU_ARCH_V6T2
|| cpu_arch
> TAG_CPU_ARCH_V6K
)
7813 globals
->use_blx
= 1;
7817 if (cpu_arch
> TAG_CPU_ARCH_V4T
)
7818 globals
->use_blx
= 1;
7823 bfd_elf32_arm_process_before_allocation (bfd
*abfd
,
7824 struct bfd_link_info
*link_info
)
7826 Elf_Internal_Shdr
*symtab_hdr
;
7827 Elf_Internal_Rela
*internal_relocs
= NULL
;
7828 Elf_Internal_Rela
*irel
, *irelend
;
7829 bfd_byte
*contents
= NULL
;
7832 struct elf32_arm_link_hash_table
*globals
;
7834 /* If we are only performing a partial link do not bother
7835 to construct any glue. */
7836 if (bfd_link_relocatable (link_info
))
7839 /* Here we have a bfd that is to be included on the link. We have a
7840 hook to do reloc rummaging, before section sizes are nailed down. */
7841 globals
= elf32_arm_hash_table (link_info
);
7842 BFD_ASSERT (globals
!= NULL
);
7844 check_use_blx (globals
);
7846 if (globals
->byteswap_code
&& !bfd_big_endian (abfd
))
7848 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7853 /* PR 5398: If we have not decided to include any loadable sections in
7854 the output then we will not have a glue owner bfd. This is OK, it
7855 just means that there is nothing else for us to do here. */
7856 if (globals
->bfd_of_glue_owner
== NULL
)
7859 /* Rummage around all the relocs and map the glue vectors. */
7860 sec
= abfd
->sections
;
7865 for (; sec
!= NULL
; sec
= sec
->next
)
7867 if (sec
->reloc_count
== 0)
7870 if ((sec
->flags
& SEC_EXCLUDE
) != 0)
7873 symtab_hdr
= & elf_symtab_hdr (abfd
);
7875 /* Load the relocs. */
7877 = _bfd_elf_link_read_relocs (abfd
, sec
, NULL
, NULL
, false);
7879 if (internal_relocs
== NULL
)
7882 irelend
= internal_relocs
+ sec
->reloc_count
;
7883 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
7886 unsigned long r_index
;
7888 struct elf_link_hash_entry
*h
;
7890 r_type
= ELF32_R_TYPE (irel
->r_info
);
7891 r_index
= ELF32_R_SYM (irel
->r_info
);
7893 /* These are the only relocation types we care about. */
7894 if ( r_type
!= R_ARM_PC24
7895 && (r_type
!= R_ARM_V4BX
|| globals
->fix_v4bx
< 2))
7898 /* Get the section contents if we haven't done so already. */
7899 if (contents
== NULL
)
7901 /* Get cached copy if it exists. */
7902 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
7903 contents
= elf_section_data (sec
)->this_hdr
.contents
;
7906 /* Go get them off disk. */
7907 if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
7912 if (r_type
== R_ARM_V4BX
)
7916 reg
= bfd_get_32 (abfd
, contents
+ irel
->r_offset
) & 0xf;
7917 record_arm_bx_glue (link_info
, reg
);
7921 /* If the relocation is not against a symbol it cannot concern us. */
7924 /* We don't care about local symbols. */
7925 if (r_index
< symtab_hdr
->sh_info
)
7928 /* This is an external symbol. */
7929 r_index
-= symtab_hdr
->sh_info
;
7930 h
= (struct elf_link_hash_entry
*)
7931 elf_sym_hashes (abfd
)[r_index
];
7933 /* If the relocation is against a static symbol it must be within
7934 the current section and so cannot be a cross ARM/Thumb relocation. */
7938 /* If the call will go through a PLT entry then we do not need
7940 if (globals
->root
.splt
!= NULL
&& h
->plt
.offset
!= (bfd_vma
) -1)
7946 /* This one is a call from arm code. We need to look up
7947 the target of the call. If it is a thumb target, we
7949 if (ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
7950 == ST_BRANCH_TO_THUMB
)
7951 record_arm_to_thumb_glue (link_info
, h
);
7959 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7963 if (elf_section_data (sec
)->relocs
!= internal_relocs
)
7964 free (internal_relocs
);
7965 internal_relocs
= NULL
;
7971 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7973 if (elf_section_data (sec
)->relocs
!= internal_relocs
)
7974 free (internal_relocs
);
7981 /* Initialise maps of ARM/Thumb/data for input BFDs. */
7984 bfd_elf32_arm_init_maps (bfd
*abfd
)
7986 Elf_Internal_Sym
*isymbuf
;
7987 Elf_Internal_Shdr
*hdr
;
7988 unsigned int i
, localsyms
;
7990 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
7991 if (! is_arm_elf (abfd
))
7994 if ((abfd
->flags
& DYNAMIC
) != 0)
7997 hdr
= & elf_symtab_hdr (abfd
);
7998 localsyms
= hdr
->sh_info
;
8000 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
8001 should contain the number of local symbols, which should come before any
8002 global symbols. Mapping symbols are always local. */
8003 isymbuf
= bfd_elf_get_elf_syms (abfd
, hdr
, localsyms
, 0, NULL
, NULL
,
8006 /* No internal symbols read? Skip this BFD. */
8007 if (isymbuf
== NULL
)
8010 for (i
= 0; i
< localsyms
; i
++)
8012 Elf_Internal_Sym
*isym
= &isymbuf
[i
];
8013 asection
*sec
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
8017 && ELF_ST_BIND (isym
->st_info
) == STB_LOCAL
)
8019 name
= bfd_elf_string_from_elf_section (abfd
,
8020 hdr
->sh_link
, isym
->st_name
);
8022 if (bfd_is_arm_special_symbol_name (name
,
8023 BFD_ARM_SPECIAL_SYM_TYPE_MAP
))
8024 elf32_arm_section_map_add (sec
, name
[1], isym
->st_value
);
8030 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8031 say what they wanted. */
8034 bfd_elf32_arm_set_cortex_a8_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8036 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8037 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8039 if (globals
== NULL
)
8042 if (globals
->fix_cortex_a8
== -1)
8044 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
8045 if (out_attr
[Tag_CPU_arch
].i
== TAG_CPU_ARCH_V7
8046 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
8047 || out_attr
[Tag_CPU_arch_profile
].i
== 0))
8048 globals
->fix_cortex_a8
= 1;
8050 globals
->fix_cortex_a8
= 0;
8056 bfd_elf32_arm_set_vfp11_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8058 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8059 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8061 if (globals
== NULL
)
8063 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8064 if (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V7
)
8066 switch (globals
->vfp11_fix
)
8068 case BFD_ARM_VFP11_FIX_DEFAULT
:
8069 case BFD_ARM_VFP11_FIX_NONE
:
8070 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
8074 /* Give a warning, but do as the user requests anyway. */
8075 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8076 "workaround is not necessary for target architecture"), obfd
);
8079 else if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_DEFAULT
)
8080 /* For earlier architectures, we might need the workaround, but do not
8081 enable it by default. If users is running with broken hardware, they
8082 must enable the erratum fix explicitly. */
8083 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
8087 bfd_elf32_arm_set_stm32l4xx_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8089 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8090 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8092 if (globals
== NULL
)
8095 /* We assume only Cortex-M4 may require the fix. */
8096 if (out_attr
[Tag_CPU_arch
].i
!= TAG_CPU_ARCH_V7E_M
8097 || out_attr
[Tag_CPU_arch_profile
].i
!= 'M')
8099 if (globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
)
8100 /* Give a warning, but do as the user requests anyway. */
8102 (_("%pB: warning: selected STM32L4XX erratum "
8103 "workaround is not necessary for target architecture"), obfd
);
8107 enum bfd_arm_vfp11_pipe
8115 /* Return a VFP register number. This is encoded as RX:X for single-precision
8116 registers, or X:RX for double-precision registers, where RX is the group of
8117 four bits in the instruction encoding and X is the single extension bit.
8118 RX and X fields are specified using their lowest (starting) bit. The return
8121 0...31: single-precision registers s0...s31
8122 32...63: double-precision registers d0...d31.
8124 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8125 encounter VFP3 instructions, so we allow the full range for DP registers. */
8128 bfd_arm_vfp11_regno (unsigned int insn
, bool is_double
, unsigned int rx
,
8132 return (((insn
>> rx
) & 0xf) | (((insn
>> x
) & 1) << 4)) + 32;
8134 return (((insn
>> rx
) & 0xf) << 1) | ((insn
>> x
) & 1);
8137 /* Set bits in *WMASK according to a register number REG as encoded by
8138 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8141 bfd_arm_vfp11_write_mask (unsigned int *wmask
, unsigned int reg
)
8146 *wmask
|= 3 << ((reg
- 32) * 2);
8149 /* Return TRUE if WMASK overwrites anything in REGS. */
8152 bfd_arm_vfp11_antidependency (unsigned int wmask
, int *regs
, int numregs
)
8156 for (i
= 0; i
< numregs
; i
++)
8158 unsigned int reg
= regs
[i
];
8160 if (reg
< 32 && (wmask
& (1 << reg
)) != 0)
8168 if ((wmask
& (3 << (reg
* 2))) != 0)
8175 /* In this function, we're interested in two things: finding input registers
8176 for VFP data-processing instructions, and finding the set of registers which
8177 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8178 hold the written set, so FLDM etc. are easy to deal with (we're only
8179 interested in 32 SP registers or 16 dp registers, due to the VFP version
8180 implemented by the chip in question). DP registers are marked by setting
8181 both SP registers in the write mask). */
8183 static enum bfd_arm_vfp11_pipe
8184 bfd_arm_vfp11_insn_decode (unsigned int insn
, unsigned int *destmask
, int *regs
,
8187 enum bfd_arm_vfp11_pipe vpipe
= VFP11_BAD
;
8188 bool is_double
= ((insn
& 0xf00) == 0xb00) ? 1 : 0;
8190 if ((insn
& 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8193 unsigned int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
8194 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
8196 pqrs
= ((insn
& 0x00800000) >> 20)
8197 | ((insn
& 0x00300000) >> 19)
8198 | ((insn
& 0x00000040) >> 6);
8202 case 0: /* fmac[sd]. */
8203 case 1: /* fnmac[sd]. */
8204 case 2: /* fmsc[sd]. */
8205 case 3: /* fnmsc[sd]. */
8207 bfd_arm_vfp11_write_mask (destmask
, fd
);
8209 regs
[1] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
8214 case 4: /* fmul[sd]. */
8215 case 5: /* fnmul[sd]. */
8216 case 6: /* fadd[sd]. */
8217 case 7: /* fsub[sd]. */
8221 case 8: /* fdiv[sd]. */
8224 bfd_arm_vfp11_write_mask (destmask
, fd
);
8225 regs
[0] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
8230 case 15: /* extended opcode. */
8232 unsigned int extn
= ((insn
>> 15) & 0x1e)
8233 | ((insn
>> 7) & 1);
8237 case 0: /* fcpy[sd]. */
8238 case 1: /* fabs[sd]. */
8239 case 2: /* fneg[sd]. */
8240 case 8: /* fcmp[sd]. */
8241 case 9: /* fcmpe[sd]. */
8242 case 10: /* fcmpz[sd]. */
8243 case 11: /* fcmpez[sd]. */
8244 case 16: /* fuito[sd]. */
8245 case 17: /* fsito[sd]. */
8246 case 24: /* ftoui[sd]. */
8247 case 25: /* ftouiz[sd]. */
8248 case 26: /* ftosi[sd]. */
8249 case 27: /* ftosiz[sd]. */
8250 /* These instructions will not bounce due to underflow. */
8255 case 3: /* fsqrt[sd]. */
8256 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8257 registers to cause the erratum in previous instructions. */
8258 bfd_arm_vfp11_write_mask (destmask
, fd
);
8262 case 15: /* fcvt{ds,sd}. */
8266 bfd_arm_vfp11_write_mask (destmask
, fd
);
8268 /* Only FCVTSD can underflow. */
8269 if ((insn
& 0x100) != 0)
8288 /* Two-register transfer. */
8289 else if ((insn
& 0x0fe00ed0) == 0x0c400a10)
8291 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
8293 if ((insn
& 0x100000) == 0)
8296 bfd_arm_vfp11_write_mask (destmask
, fm
);
8299 bfd_arm_vfp11_write_mask (destmask
, fm
);
8300 bfd_arm_vfp11_write_mask (destmask
, fm
+ 1);
8306 else if ((insn
& 0x0e100e00) == 0x0c100a00) /* A load insn. */
8308 int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
8309 unsigned int puw
= ((insn
>> 21) & 0x1) | (((insn
>> 23) & 3) << 1);
8313 case 0: /* Two-reg transfer. We should catch these above. */
8316 case 2: /* fldm[sdx]. */
8320 unsigned int i
, offset
= insn
& 0xff;
8325 for (i
= fd
; i
< fd
+ offset
; i
++)
8326 bfd_arm_vfp11_write_mask (destmask
, i
);
8330 case 4: /* fld[sd]. */
8332 bfd_arm_vfp11_write_mask (destmask
, fd
);
8341 /* Single-register transfer. Note L==0. */
8342 else if ((insn
& 0x0f100e10) == 0x0e000a10)
8344 unsigned int opcode
= (insn
>> 21) & 7;
8345 unsigned int fn
= bfd_arm_vfp11_regno (insn
, is_double
, 16, 7);
8349 case 0: /* fmsr/fmdlr. */
8350 case 1: /* fmdhr. */
8351 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8352 destination register. I don't know if this is exactly right,
8353 but it is the conservative choice. */
8354 bfd_arm_vfp11_write_mask (destmask
, fn
);
8368 static int elf32_arm_compare_mapping (const void * a
, const void * b
);
8371 /* Look for potentially-troublesome code sequences which might trigger the
8372 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8373 (available from ARM) for details of the erratum. A short version is
8374 described in ld.texinfo. */
8377 bfd_elf32_arm_vfp11_erratum_scan (bfd
*abfd
, struct bfd_link_info
*link_info
)
8380 bfd_byte
*contents
= NULL
;
8382 int regs
[3], numregs
= 0;
8383 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8384 int use_vector
= (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_VECTOR
);
8386 if (globals
== NULL
)
8389 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8390 The states transition as follows:
8392 0 -> 1 (vector) or 0 -> 2 (scalar)
8393 A VFP FMAC-pipeline instruction has been seen. Fill
8394 regs[0]..regs[numregs-1] with its input operands. Remember this
8395 instruction in 'first_fmac'.
8398 Any instruction, except for a VFP instruction which overwrites
8403 A VFP instruction has been seen which overwrites any of regs[*].
8404 We must make a veneer! Reset state to 0 before examining next
8408 If we fail to match anything in state 2, reset to state 0 and reset
8409 the instruction pointer to the instruction after 'first_fmac'.
8411 If the VFP11 vector mode is in use, there must be at least two unrelated
8412 instructions between anti-dependent VFP11 instructions to properly avoid
8413 triggering the erratum, hence the use of the extra state 1. */
8415 /* If we are only performing a partial link do not bother
8416 to construct any glue. */
8417 if (bfd_link_relocatable (link_info
))
8420 /* Skip if this bfd does not correspond to an ELF image. */
8421 if (! is_arm_elf (abfd
))
8424 /* We should have chosen a fix type by the time we get here. */
8425 BFD_ASSERT (globals
->vfp11_fix
!= BFD_ARM_VFP11_FIX_DEFAULT
);
8427 if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_NONE
)
8430 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8431 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
8434 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8436 unsigned int i
, span
, first_fmac
= 0, veneer_of_insn
= 0;
8437 struct _arm_elf_section_data
*sec_data
;
8439 /* If we don't have executable progbits, we're not interested in this
8440 section. Also skip if section is to be excluded. */
8441 if (elf_section_type (sec
) != SHT_PROGBITS
8442 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
8443 || (sec
->flags
& SEC_EXCLUDE
) != 0
8444 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
8445 || sec
->output_section
== bfd_abs_section_ptr
8446 || strcmp (sec
->name
, VFP11_ERRATUM_VENEER_SECTION_NAME
) == 0)
8449 sec_data
= elf32_arm_section_data (sec
);
8451 if (sec_data
->mapcount
== 0)
8454 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
8455 contents
= elf_section_data (sec
)->this_hdr
.contents
;
8456 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
8459 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
8460 elf32_arm_compare_mapping
);
8462 for (span
= 0; span
< sec_data
->mapcount
; span
++)
8464 unsigned int span_start
= sec_data
->map
[span
].vma
;
8465 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
8466 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
8467 char span_type
= sec_data
->map
[span
].type
;
8469 /* FIXME: Only ARM mode is supported at present. We may need to
8470 support Thumb-2 mode also at some point. */
8471 if (span_type
!= 'a')
8474 for (i
= span_start
; i
< span_end
;)
8476 unsigned int next_i
= i
+ 4;
8477 unsigned int insn
= bfd_big_endian (abfd
)
8478 ? (((unsigned) contents
[i
] << 24)
8479 | (contents
[i
+ 1] << 16)
8480 | (contents
[i
+ 2] << 8)
8482 : (((unsigned) contents
[i
+ 3] << 24)
8483 | (contents
[i
+ 2] << 16)
8484 | (contents
[i
+ 1] << 8)
8486 unsigned int writemask
= 0;
8487 enum bfd_arm_vfp11_pipe vpipe
;
8492 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
, regs
,
8494 /* I'm assuming the VFP11 erratum can trigger with denorm
8495 operands on either the FMAC or the DS pipeline. This might
8496 lead to slightly overenthusiastic veneer insertion. */
8497 if (vpipe
== VFP11_FMAC
|| vpipe
== VFP11_DS
)
8499 state
= use_vector
? 1 : 2;
8501 veneer_of_insn
= insn
;
8507 int other_regs
[3], other_numregs
;
8508 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
8511 if (vpipe
!= VFP11_BAD
8512 && bfd_arm_vfp11_antidependency (writemask
, regs
,
8522 int other_regs
[3], other_numregs
;
8523 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
8526 if (vpipe
!= VFP11_BAD
8527 && bfd_arm_vfp11_antidependency (writemask
, regs
,
8533 next_i
= first_fmac
+ 4;
8539 abort (); /* Should be unreachable. */
8544 elf32_vfp11_erratum_list
*newerr
=(elf32_vfp11_erratum_list
*)
8545 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
8547 elf32_arm_section_data (sec
)->erratumcount
+= 1;
8549 newerr
->u
.b
.vfp_insn
= veneer_of_insn
;
8554 newerr
->type
= VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
;
8561 record_vfp11_erratum_veneer (link_info
, newerr
, abfd
, sec
,
8566 newerr
->next
= sec_data
->erratumlist
;
8567 sec_data
->erratumlist
= newerr
;
8576 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8584 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8590 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8591 after sections have been laid out, using specially-named symbols. */
8594 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd
*abfd
,
8595 struct bfd_link_info
*link_info
)
8598 struct elf32_arm_link_hash_table
*globals
;
8601 if (bfd_link_relocatable (link_info
))
8604 /* Skip if this bfd does not correspond to an ELF image. */
8605 if (! is_arm_elf (abfd
))
8608 globals
= elf32_arm_hash_table (link_info
);
8609 if (globals
== NULL
)
8612 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
8613 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
8614 BFD_ASSERT (tmp_name
);
8616 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8618 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
8619 elf32_vfp11_erratum_list
*errnode
= sec_data
->erratumlist
;
8621 for (; errnode
!= NULL
; errnode
= errnode
->next
)
8623 struct elf_link_hash_entry
*myh
;
8626 switch (errnode
->type
)
8628 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
8629 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
:
8630 /* Find veneer symbol. */
8631 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
8632 errnode
->u
.b
.veneer
->u
.v
.id
);
8634 myh
= elf_link_hash_lookup
8635 (&(globals
)->root
, tmp_name
, false, false, true);
8638 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8639 abfd
, "VFP11", tmp_name
);
8641 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8642 + myh
->root
.u
.def
.section
->output_offset
8643 + myh
->root
.u
.def
.value
;
8645 errnode
->u
.b
.veneer
->vma
= vma
;
8648 case VFP11_ERRATUM_ARM_VENEER
:
8649 case VFP11_ERRATUM_THUMB_VENEER
:
8650 /* Find return location. */
8651 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
8654 myh
= elf_link_hash_lookup
8655 (&(globals
)->root
, tmp_name
, false, false, true);
8658 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8659 abfd
, "VFP11", tmp_name
);
8661 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8662 + myh
->root
.u
.def
.section
->output_offset
8663 + myh
->root
.u
.def
.value
;
8665 errnode
->u
.v
.branch
->vma
= vma
;
8677 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8678 return locations after sections have been laid out, using
8679 specially-named symbols. */
8682 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd
*abfd
,
8683 struct bfd_link_info
*link_info
)
8686 struct elf32_arm_link_hash_table
*globals
;
8689 if (bfd_link_relocatable (link_info
))
8692 /* Skip if this bfd does not correspond to an ELF image. */
8693 if (! is_arm_elf (abfd
))
8696 globals
= elf32_arm_hash_table (link_info
);
8697 if (globals
== NULL
)
8700 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
8701 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
8702 BFD_ASSERT (tmp_name
);
8704 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8706 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
8707 elf32_stm32l4xx_erratum_list
*errnode
= sec_data
->stm32l4xx_erratumlist
;
8709 for (; errnode
!= NULL
; errnode
= errnode
->next
)
8711 struct elf_link_hash_entry
*myh
;
8714 switch (errnode
->type
)
8716 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
8717 /* Find veneer symbol. */
8718 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
8719 errnode
->u
.b
.veneer
->u
.v
.id
);
8721 myh
= elf_link_hash_lookup
8722 (&(globals
)->root
, tmp_name
, false, false, true);
8725 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8726 abfd
, "STM32L4XX", tmp_name
);
8728 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8729 + myh
->root
.u
.def
.section
->output_offset
8730 + myh
->root
.u
.def
.value
;
8732 errnode
->u
.b
.veneer
->vma
= vma
;
8735 case STM32L4XX_ERRATUM_VENEER
:
8736 /* Find return location. */
8737 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
8740 myh
= elf_link_hash_lookup
8741 (&(globals
)->root
, tmp_name
, false, false, true);
8744 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8745 abfd
, "STM32L4XX", tmp_name
);
8747 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8748 + myh
->root
.u
.def
.section
->output_offset
8749 + myh
->root
.u
.def
.value
;
8751 errnode
->u
.v
.branch
->vma
= vma
;
8764 is_thumb2_ldmia (const insn32 insn
)
8766 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8767 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8768 return (insn
& 0xffd02000) == 0xe8900000;
8772 is_thumb2_ldmdb (const insn32 insn
)
8774 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8775 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8776 return (insn
& 0xffd02000) == 0xe9100000;
8780 is_thumb2_vldm (const insn32 insn
)
8782 /* A6.5 Extension register load or store instruction
8784 We look for SP 32-bit and DP 64-bit registers.
8785 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8786 <list> is consecutive 64-bit registers
8787 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8788 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8789 <list> is consecutive 32-bit registers
8790 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8791 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8792 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8794 (((insn
& 0xfe100f00) == 0xec100b00) ||
8795 ((insn
& 0xfe100f00) == 0xec100a00))
8796 && /* (IA without !). */
8797 (((((insn
<< 7) >> 28) & 0xd) == 0x4)
8798 /* (IA with !), includes VPOP (when reg number is SP). */
8799 || ((((insn
<< 7) >> 28) & 0xd) == 0x5)
8801 || ((((insn
<< 7) >> 28) & 0xd) == 0x9));
8804 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8806 - computes the number and the mode of memory accesses
8807 - decides if the replacement should be done:
8808 . replaces only if > 8-word accesses
8809 . or (testing purposes only) replaces all accesses. */
8812 stm32l4xx_need_create_replacing_stub (const insn32 insn
,
8813 bfd_arm_stm32l4xx_fix stm32l4xx_fix
)
8817 /* The field encoding the register list is the same for both LDMIA
8818 and LDMDB encodings. */
8819 if (is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
))
8820 nb_words
= elf32_arm_popcount (insn
& 0x0000ffff);
8821 else if (is_thumb2_vldm (insn
))
8822 nb_words
= (insn
& 0xff);
8824 /* DEFAULT mode accounts for the real bug condition situation,
8825 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8826 return (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_DEFAULT
8828 : stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_ALL
);
8831 /* Look for potentially-troublesome code sequences which might trigger
8832 the STM STM32L4XX erratum. */
8835 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd
*abfd
,
8836 struct bfd_link_info
*link_info
)
8839 bfd_byte
*contents
= NULL
;
8840 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8842 if (globals
== NULL
)
8845 /* If we are only performing a partial link do not bother
8846 to construct any glue. */
8847 if (bfd_link_relocatable (link_info
))
8850 /* Skip if this bfd does not correspond to an ELF image. */
8851 if (! is_arm_elf (abfd
))
8854 if (globals
->stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_NONE
)
8857 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8858 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
8861 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8863 unsigned int i
, span
;
8864 struct _arm_elf_section_data
*sec_data
;
8866 /* If we don't have executable progbits, we're not interested in this
8867 section. Also skip if section is to be excluded. */
8868 if (elf_section_type (sec
) != SHT_PROGBITS
8869 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
8870 || (sec
->flags
& SEC_EXCLUDE
) != 0
8871 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
8872 || sec
->output_section
== bfd_abs_section_ptr
8873 || strcmp (sec
->name
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
) == 0)
8876 sec_data
= elf32_arm_section_data (sec
);
8878 if (sec_data
->mapcount
== 0)
8881 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
8882 contents
= elf_section_data (sec
)->this_hdr
.contents
;
8883 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
8886 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
8887 elf32_arm_compare_mapping
);
8889 for (span
= 0; span
< sec_data
->mapcount
; span
++)
8891 unsigned int span_start
= sec_data
->map
[span
].vma
;
8892 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
8893 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
8894 char span_type
= sec_data
->map
[span
].type
;
8895 int itblock_current_pos
= 0;
8897 /* Only Thumb2 mode need be supported with this CM4 specific
8898 code, we should not encounter any arm mode eg span_type
8900 if (span_type
!= 't')
8903 for (i
= span_start
; i
< span_end
;)
8905 unsigned int insn
= bfd_get_16 (abfd
, &contents
[i
]);
8906 bool insn_32bit
= false;
8907 bool is_ldm
= false;
8908 bool is_vldm
= false;
8909 bool is_not_last_in_it_block
= false;
8911 /* The first 16-bits of all 32-bit thumb2 instructions start
8912 with opcode[15..13]=0b111 and the encoded op1 can be anything
8913 except opcode[12..11]!=0b00.
8914 See 32-bit Thumb instruction encoding. */
8915 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
8918 /* Compute the predicate that tells if the instruction
8919 is concerned by the IT block
8920 - Creates an error if there is a ldm that is not
8921 last in the IT block thus cannot be replaced
8922 - Otherwise we can create a branch at the end of the
8923 IT block, it will be controlled naturally by IT
8924 with the proper pseudo-predicate
8925 - So the only interesting predicate is the one that
8926 tells that we are not on the last item of an IT
8928 if (itblock_current_pos
!= 0)
8929 is_not_last_in_it_block
= !!--itblock_current_pos
;
8933 /* Load the rest of the insn (in manual-friendly order). */
8934 insn
= (insn
<< 16) | bfd_get_16 (abfd
, &contents
[i
+ 2]);
8935 is_ldm
= is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
);
8936 is_vldm
= is_thumb2_vldm (insn
);
8938 /* Veneers are created for (v)ldm depending on
8939 option flags and memory accesses conditions; but
8940 if the instruction is not the last instruction of
8941 an IT block, we cannot create a jump there, so we
8943 if ((is_ldm
|| is_vldm
)
8944 && stm32l4xx_need_create_replacing_stub
8945 (insn
, globals
->stm32l4xx_fix
))
8947 if (is_not_last_in_it_block
)
8950 /* xgettext:c-format */
8951 (_("%pB(%pA+%#x): error: multiple load detected"
8952 " in non-last IT block instruction:"
8953 " STM32L4XX veneer cannot be generated; "
8954 "use gcc option -mrestrict-it to generate"
8955 " only one instruction per IT block"),
8960 elf32_stm32l4xx_erratum_list
*newerr
=
8961 (elf32_stm32l4xx_erratum_list
*)
8963 (sizeof (elf32_stm32l4xx_erratum_list
));
8965 elf32_arm_section_data (sec
)
8966 ->stm32l4xx_erratumcount
+= 1;
8967 newerr
->u
.b
.insn
= insn
;
8968 /* We create only thumb branches. */
8970 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
;
8971 record_stm32l4xx_erratum_veneer
8972 (link_info
, newerr
, abfd
, sec
,
8975 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
:
8976 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
8978 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
8979 sec_data
->stm32l4xx_erratumlist
= newerr
;
8986 IT blocks are only encoded in T1
8987 Encoding T1: IT{x{y{z}}} <firstcond>
8988 1 0 1 1 - 1 1 1 1 - firstcond - mask
8989 if mask = '0000' then see 'related encodings'
8990 We don't deal with UNPREDICTABLE, just ignore these.
8991 There can be no nested IT blocks so an IT block
8992 is naturally a new one for which it is worth
8993 computing its size. */
8994 bool is_newitblock
= ((insn
& 0xff00) == 0xbf00)
8995 && ((insn
& 0x000f) != 0x0000);
8996 /* If we have a new IT block we compute its size. */
8999 /* Compute the number of instructions controlled
9000 by the IT block, it will be used to decide
9001 whether we are inside an IT block or not. */
9002 unsigned int mask
= insn
& 0x000f;
9003 itblock_current_pos
= 4 - ctz (mask
);
9007 i
+= insn_32bit
? 4 : 2;
9011 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
9019 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
9025 /* Set target relocation values needed during linking. */
9028 bfd_elf32_arm_set_target_params (struct bfd
*output_bfd
,
9029 struct bfd_link_info
*link_info
,
9030 struct elf32_arm_params
*params
)
9032 struct elf32_arm_link_hash_table
*globals
;
9034 globals
= elf32_arm_hash_table (link_info
);
9035 if (globals
== NULL
)
9038 globals
->target1_is_rel
= params
->target1_is_rel
;
9039 if (globals
->fdpic_p
)
9040 globals
->target2_reloc
= R_ARM_GOT32
;
9041 else if (strcmp (params
->target2_type
, "rel") == 0)
9042 globals
->target2_reloc
= R_ARM_REL32
;
9043 else if (strcmp (params
->target2_type
, "abs") == 0)
9044 globals
->target2_reloc
= R_ARM_ABS32
;
9045 else if (strcmp (params
->target2_type
, "got-rel") == 0)
9046 globals
->target2_reloc
= R_ARM_GOT_PREL
;
9049 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9050 params
->target2_type
);
9052 globals
->fix_v4bx
= params
->fix_v4bx
;
9053 globals
->use_blx
|= params
->use_blx
;
9054 globals
->vfp11_fix
= params
->vfp11_denorm_fix
;
9055 globals
->stm32l4xx_fix
= params
->stm32l4xx_fix
;
9056 if (globals
->fdpic_p
)
9057 globals
->pic_veneer
= 1;
9059 globals
->pic_veneer
= params
->pic_veneer
;
9060 globals
->fix_cortex_a8
= params
->fix_cortex_a8
;
9061 globals
->fix_arm1176
= params
->fix_arm1176
;
9062 globals
->cmse_implib
= params
->cmse_implib
;
9063 globals
->in_implib_bfd
= params
->in_implib_bfd
;
9065 BFD_ASSERT (is_arm_elf (output_bfd
));
9066 elf_arm_tdata (output_bfd
)->no_enum_size_warning
9067 = params
->no_enum_size_warning
;
9068 elf_arm_tdata (output_bfd
)->no_wchar_size_warning
9069 = params
->no_wchar_size_warning
;
9072 /* Replace the target offset of a Thumb bl or b.w instruction. */
9075 insert_thumb_branch (bfd
*abfd
, long int offset
, bfd_byte
*insn
)
9081 BFD_ASSERT ((offset
& 1) == 0);
9083 upper
= bfd_get_16 (abfd
, insn
);
9084 lower
= bfd_get_16 (abfd
, insn
+ 2);
9085 reloc_sign
= (offset
< 0) ? 1 : 0;
9086 upper
= (upper
& ~(bfd_vma
) 0x7ff)
9087 | ((offset
>> 12) & 0x3ff)
9088 | (reloc_sign
<< 10);
9089 lower
= (lower
& ~(bfd_vma
) 0x2fff)
9090 | (((!((offset
>> 23) & 1)) ^ reloc_sign
) << 13)
9091 | (((!((offset
>> 22) & 1)) ^ reloc_sign
) << 11)
9092 | ((offset
>> 1) & 0x7ff);
9093 bfd_put_16 (abfd
, upper
, insn
);
9094 bfd_put_16 (abfd
, lower
, insn
+ 2);
9097 /* Thumb code calling an ARM function. */
9100 elf32_thumb_to_arm_stub (struct bfd_link_info
* info
,
9104 asection
* input_section
,
9105 bfd_byte
* hit_data
,
9108 bfd_signed_vma addend
,
9110 char **error_message
)
9114 long int ret_offset
;
9115 struct elf_link_hash_entry
* myh
;
9116 struct elf32_arm_link_hash_table
* globals
;
9118 myh
= find_thumb_glue (info
, name
, error_message
);
9122 globals
= elf32_arm_hash_table (info
);
9123 BFD_ASSERT (globals
!= NULL
);
9124 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9126 my_offset
= myh
->root
.u
.def
.value
;
9128 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9129 THUMB2ARM_GLUE_SECTION_NAME
);
9131 BFD_ASSERT (s
!= NULL
);
9132 BFD_ASSERT (s
->contents
!= NULL
);
9133 BFD_ASSERT (s
->output_section
!= NULL
);
9135 if ((my_offset
& 0x01) == 0x01)
9138 && sym_sec
->owner
!= NULL
9139 && !INTERWORK_FLAG (sym_sec
->owner
))
9142 (_("%pB(%s): warning: interworking not enabled;"
9143 " first occurrence: %pB: %s call to %s"),
9144 sym_sec
->owner
, name
, input_bfd
, "Thumb", "ARM");
9150 myh
->root
.u
.def
.value
= my_offset
;
9152 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a1_bx_pc_insn
,
9153 s
->contents
+ my_offset
);
9155 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a2_noop_insn
,
9156 s
->contents
+ my_offset
+ 2);
9159 /* Address of destination of the stub. */
9160 ((bfd_signed_vma
) val
)
9162 /* Offset from the start of the current section
9163 to the start of the stubs. */
9165 /* Offset of the start of this stub from the start of the stubs. */
9167 /* Address of the start of the current section. */
9168 + s
->output_section
->vma
)
9169 /* The branch instruction is 4 bytes into the stub. */
9171 /* ARM branches work from the pc of the instruction + 8. */
9174 put_arm_insn (globals
, output_bfd
,
9175 (bfd_vma
) t2a3_b_insn
| ((ret_offset
>> 2) & 0x00FFFFFF),
9176 s
->contents
+ my_offset
+ 4);
9179 BFD_ASSERT (my_offset
<= globals
->thumb_glue_size
);
9181 /* Now go back and fix up the original BL insn to point to here. */
9183 /* Address of where the stub is located. */
9184 (s
->output_section
->vma
+ s
->output_offset
+ my_offset
)
9185 /* Address of where the BL is located. */
9186 - (input_section
->output_section
->vma
+ input_section
->output_offset
9188 /* Addend in the relocation. */
9190 /* Biassing for PC-relative addressing. */
9193 insert_thumb_branch (input_bfd
, ret_offset
, hit_data
- input_section
->vma
);
9198 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9200 static struct elf_link_hash_entry
*
9201 elf32_arm_create_thumb_stub (struct bfd_link_info
* info
,
9208 char ** error_message
)
9211 long int ret_offset
;
9212 struct elf_link_hash_entry
* myh
;
9213 struct elf32_arm_link_hash_table
* globals
;
9215 myh
= find_arm_glue (info
, name
, error_message
);
9219 globals
= elf32_arm_hash_table (info
);
9220 BFD_ASSERT (globals
!= NULL
);
9221 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9223 my_offset
= myh
->root
.u
.def
.value
;
9225 if ((my_offset
& 0x01) == 0x01)
9228 && sym_sec
->owner
!= NULL
9229 && !INTERWORK_FLAG (sym_sec
->owner
))
9232 (_("%pB(%s): warning: interworking not enabled;"
9233 " first occurrence: %pB: %s call to %s"),
9234 sym_sec
->owner
, name
, input_bfd
, "ARM", "Thumb");
9238 myh
->root
.u
.def
.value
= my_offset
;
9240 if (bfd_link_pic (info
)
9241 || globals
->root
.is_relocatable_executable
9242 || globals
->pic_veneer
)
9244 /* For relocatable objects we can't use absolute addresses,
9245 so construct the address from a relative offset. */
9246 /* TODO: If the offset is small it's probably worth
9247 constructing the address with adds. */
9248 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1p_ldr_insn
,
9249 s
->contents
+ my_offset
);
9250 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2p_add_pc_insn
,
9251 s
->contents
+ my_offset
+ 4);
9252 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t3p_bx_r12_insn
,
9253 s
->contents
+ my_offset
+ 8);
9254 /* Adjust the offset by 4 for the position of the add,
9255 and 8 for the pipeline offset. */
9256 ret_offset
= (val
- (s
->output_offset
9257 + s
->output_section
->vma
9260 bfd_put_32 (output_bfd
, ret_offset
,
9261 s
->contents
+ my_offset
+ 12);
9263 else if (globals
->use_blx
)
9265 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1v5_ldr_insn
,
9266 s
->contents
+ my_offset
);
9268 /* It's a thumb address. Add the low order bit. */
9269 bfd_put_32 (output_bfd
, val
| a2t2v5_func_addr_insn
,
9270 s
->contents
+ my_offset
+ 4);
9274 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1_ldr_insn
,
9275 s
->contents
+ my_offset
);
9277 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2_bx_r12_insn
,
9278 s
->contents
+ my_offset
+ 4);
9280 /* It's a thumb address. Add the low order bit. */
9281 bfd_put_32 (output_bfd
, val
| a2t3_func_addr_insn
,
9282 s
->contents
+ my_offset
+ 8);
9288 BFD_ASSERT (my_offset
<= globals
->arm_glue_size
);
9293 /* Arm code calling a Thumb function. */
9296 elf32_arm_to_thumb_stub (struct bfd_link_info
* info
,
9300 asection
* input_section
,
9301 bfd_byte
* hit_data
,
9304 bfd_signed_vma addend
,
9306 char **error_message
)
9308 unsigned long int tmp
;
9311 long int ret_offset
;
9312 struct elf_link_hash_entry
* myh
;
9313 struct elf32_arm_link_hash_table
* globals
;
9315 globals
= elf32_arm_hash_table (info
);
9316 BFD_ASSERT (globals
!= NULL
);
9317 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9319 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9320 ARM2THUMB_GLUE_SECTION_NAME
);
9321 BFD_ASSERT (s
!= NULL
);
9322 BFD_ASSERT (s
->contents
!= NULL
);
9323 BFD_ASSERT (s
->output_section
!= NULL
);
9325 myh
= elf32_arm_create_thumb_stub (info
, name
, input_bfd
, output_bfd
,
9326 sym_sec
, val
, s
, error_message
);
9330 my_offset
= myh
->root
.u
.def
.value
;
9331 tmp
= bfd_get_32 (input_bfd
, hit_data
);
9332 tmp
= tmp
& 0xFF000000;
9334 /* Somehow these are both 4 too far, so subtract 8. */
9335 ret_offset
= (s
->output_offset
9337 + s
->output_section
->vma
9338 - (input_section
->output_offset
9339 + input_section
->output_section
->vma
9343 tmp
= tmp
| ((ret_offset
>> 2) & 0x00FFFFFF);
9345 bfd_put_32 (output_bfd
, (bfd_vma
) tmp
, hit_data
- input_section
->vma
);
9350 /* Populate Arm stub for an exported Thumb function. */
9353 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry
*h
, void * inf
)
9355 struct bfd_link_info
* info
= (struct bfd_link_info
*) inf
;
9357 struct elf_link_hash_entry
* myh
;
9358 struct elf32_arm_link_hash_entry
*eh
;
9359 struct elf32_arm_link_hash_table
* globals
;
9362 char *error_message
;
9364 eh
= elf32_arm_hash_entry (h
);
9365 /* Allocate stubs for exported Thumb functions on v4t. */
9366 if (eh
->export_glue
== NULL
)
9369 globals
= elf32_arm_hash_table (info
);
9370 BFD_ASSERT (globals
!= NULL
);
9371 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9373 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9374 ARM2THUMB_GLUE_SECTION_NAME
);
9375 BFD_ASSERT (s
!= NULL
);
9376 BFD_ASSERT (s
->contents
!= NULL
);
9377 BFD_ASSERT (s
->output_section
!= NULL
);
9379 sec
= eh
->export_glue
->root
.u
.def
.section
;
9381 BFD_ASSERT (sec
->output_section
!= NULL
);
9383 val
= eh
->export_glue
->root
.u
.def
.value
+ sec
->output_offset
9384 + sec
->output_section
->vma
;
9386 myh
= elf32_arm_create_thumb_stub (info
, h
->root
.root
.string
,
9387 h
->root
.u
.def
.section
->owner
,
9388 globals
->obfd
, sec
, val
, s
,
9394 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9397 elf32_arm_bx_glue (struct bfd_link_info
* info
, int reg
)
9402 struct elf32_arm_link_hash_table
*globals
;
9404 globals
= elf32_arm_hash_table (info
);
9405 BFD_ASSERT (globals
!= NULL
);
9406 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9408 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9409 ARM_BX_GLUE_SECTION_NAME
);
9410 BFD_ASSERT (s
!= NULL
);
9411 BFD_ASSERT (s
->contents
!= NULL
);
9412 BFD_ASSERT (s
->output_section
!= NULL
);
9414 BFD_ASSERT (globals
->bx_glue_offset
[reg
] & 2);
9416 glue_addr
= globals
->bx_glue_offset
[reg
] & ~(bfd_vma
)3;
9418 if ((globals
->bx_glue_offset
[reg
] & 1) == 0)
9420 p
= s
->contents
+ glue_addr
;
9421 bfd_put_32 (globals
->obfd
, armbx1_tst_insn
+ (reg
<< 16), p
);
9422 bfd_put_32 (globals
->obfd
, armbx2_moveq_insn
+ reg
, p
+ 4);
9423 bfd_put_32 (globals
->obfd
, armbx3_bx_insn
+ reg
, p
+ 8);
9424 globals
->bx_glue_offset
[reg
] |= 1;
9427 return glue_addr
+ s
->output_section
->vma
+ s
->output_offset
;
9430 /* Generate Arm stubs for exported Thumb symbols. */
9432 elf32_arm_begin_write_processing (bfd
*abfd ATTRIBUTE_UNUSED
,
9433 struct bfd_link_info
*link_info
)
9435 struct elf32_arm_link_hash_table
* globals
;
9437 if (link_info
== NULL
)
9438 /* Ignore this if we are not called by the ELF backend linker. */
9441 globals
= elf32_arm_hash_table (link_info
);
9442 if (globals
== NULL
)
9445 /* If blx is available then exported Thumb symbols are OK and there is
9447 if (globals
->use_blx
)
9450 elf_link_hash_traverse (&globals
->root
, elf32_arm_to_thumb_export_stub
,
9454 /* Reserve space for COUNT dynamic relocations in relocation selection
9458 elf32_arm_allocate_dynrelocs (struct bfd_link_info
*info
, asection
*sreloc
,
9459 bfd_size_type count
)
9461 struct elf32_arm_link_hash_table
*htab
;
9463 htab
= elf32_arm_hash_table (info
);
9464 BFD_ASSERT (htab
->root
.dynamic_sections_created
);
9467 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
9470 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9471 dynamic, the relocations should go in SRELOC, otherwise they should
9472 go in the special .rel.iplt section. */
9475 elf32_arm_allocate_irelocs (struct bfd_link_info
*info
, asection
*sreloc
,
9476 bfd_size_type count
)
9478 struct elf32_arm_link_hash_table
*htab
;
9480 htab
= elf32_arm_hash_table (info
);
9481 if (!htab
->root
.dynamic_sections_created
)
9482 htab
->root
.irelplt
->size
+= RELOC_SIZE (htab
) * count
;
9485 BFD_ASSERT (sreloc
!= NULL
);
9486 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
9490 /* Add relocation REL to the end of relocation section SRELOC. */
9493 elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
9494 asection
*sreloc
, Elf_Internal_Rela
*rel
)
9497 struct elf32_arm_link_hash_table
*htab
;
9499 htab
= elf32_arm_hash_table (info
);
9500 if (!htab
->root
.dynamic_sections_created
9501 && ELF32_R_TYPE (rel
->r_info
) == R_ARM_IRELATIVE
)
9502 sreloc
= htab
->root
.irelplt
;
9505 loc
= sreloc
->contents
;
9506 loc
+= sreloc
->reloc_count
++ * RELOC_SIZE (htab
);
9507 if (sreloc
->reloc_count
* RELOC_SIZE (htab
) > sreloc
->size
)
9509 SWAP_RELOC_OUT (htab
) (output_bfd
, rel
, loc
);
9512 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9513 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9517 elf32_arm_allocate_plt_entry (struct bfd_link_info
*info
,
9519 union gotplt_union
*root_plt
,
9520 struct arm_plt_info
*arm_plt
)
9522 struct elf32_arm_link_hash_table
*htab
;
9526 htab
= elf32_arm_hash_table (info
);
9530 splt
= htab
->root
.iplt
;
9531 sgotplt
= htab
->root
.igotplt
;
9533 /* NaCl uses a special first entry in .iplt too. */
9534 if (htab
->root
.target_os
== is_nacl
&& splt
->size
== 0)
9535 splt
->size
+= htab
->plt_header_size
;
9537 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9538 elf32_arm_allocate_irelocs (info
, htab
->root
.irelplt
, 1);
9542 splt
= htab
->root
.splt
;
9543 sgotplt
= htab
->root
.sgotplt
;
9547 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9548 /* For lazy binding, relocations will be put into .rel.plt, in
9549 .rel.got otherwise. */
9550 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9551 if (info
->flags
& DF_BIND_NOW
)
9552 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
9554 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
9558 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9559 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
9562 /* If this is the first .plt entry, make room for the special
9564 if (splt
->size
== 0)
9565 splt
->size
+= htab
->plt_header_size
;
9567 htab
->next_tls_desc_index
++;
9570 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9571 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9572 splt
->size
+= PLT_THUMB_STUB_SIZE
;
9573 root_plt
->offset
= splt
->size
;
9574 splt
->size
+= htab
->plt_entry_size
;
9576 /* We also need to make an entry in the .got.plt section, which
9577 will be placed in the .got section by the linker script. */
9579 arm_plt
->got_offset
= sgotplt
->size
;
9581 arm_plt
->got_offset
= sgotplt
->size
- 8 * htab
->num_tls_desc
;
9583 /* Function descriptor takes 64 bits in GOT. */
9590 arm_movw_immediate (bfd_vma value
)
9592 return (value
& 0x00000fff) | ((value
& 0x0000f000) << 4);
9596 arm_movt_immediate (bfd_vma value
)
9598 return ((value
& 0x0fff0000) >> 16) | ((value
& 0xf0000000) >> 12);
9601 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9602 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9603 Otherwise, DYNINDX is the index of the symbol in the dynamic
9604 symbol table and SYM_VALUE is undefined.
9606 ROOT_PLT points to the offset of the PLT entry from the start of its
9607 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9608 bookkeeping information.
9610 Returns FALSE if there was a problem. */
9613 elf32_arm_populate_plt_entry (bfd
*output_bfd
, struct bfd_link_info
*info
,
9614 union gotplt_union
*root_plt
,
9615 struct arm_plt_info
*arm_plt
,
9616 int dynindx
, bfd_vma sym_value
)
9618 struct elf32_arm_link_hash_table
*htab
;
9624 Elf_Internal_Rela rel
;
9625 bfd_vma got_header_size
;
9627 htab
= elf32_arm_hash_table (info
);
9629 /* Pick the appropriate sections and sizes. */
9632 splt
= htab
->root
.iplt
;
9633 sgot
= htab
->root
.igotplt
;
9634 srel
= htab
->root
.irelplt
;
9636 /* There are no reserved entries in .igot.plt, and no special
9637 first entry in .iplt. */
9638 got_header_size
= 0;
9642 splt
= htab
->root
.splt
;
9643 sgot
= htab
->root
.sgotplt
;
9644 srel
= htab
->root
.srelplt
;
9646 got_header_size
= get_elf_backend_data (output_bfd
)->got_header_size
;
9648 BFD_ASSERT (splt
!= NULL
&& srel
!= NULL
);
9650 bfd_vma got_offset
, got_address
, plt_address
;
9651 bfd_vma got_displacement
, initial_got_entry
;
9654 BFD_ASSERT (sgot
!= NULL
);
9656 /* Get the offset into the .(i)got.plt table of the entry that
9657 corresponds to this function. */
9658 got_offset
= (arm_plt
->got_offset
& -2);
9660 /* Get the index in the procedure linkage table which
9661 corresponds to this symbol. This is the index of this symbol
9662 in all the symbols for which we are making plt entries.
9663 After the reserved .got.plt entries, all symbols appear in
9664 the same order as in .plt. */
9666 /* Function descriptor takes 8 bytes. */
9667 plt_index
= (got_offset
- got_header_size
) / 8;
9669 plt_index
= (got_offset
- got_header_size
) / 4;
9671 /* Calculate the address of the GOT entry. */
9672 got_address
= (sgot
->output_section
->vma
9673 + sgot
->output_offset
9676 /* ...and the address of the PLT entry. */
9677 plt_address
= (splt
->output_section
->vma
9678 + splt
->output_offset
9679 + root_plt
->offset
);
9681 ptr
= splt
->contents
+ root_plt
->offset
;
9682 if (htab
->root
.target_os
== is_vxworks
&& bfd_link_pic (info
))
9687 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
9689 val
= elf32_arm_vxworks_shared_plt_entry
[i
];
9691 val
|= got_address
- sgot
->output_section
->vma
;
9693 val
|= plt_index
* RELOC_SIZE (htab
);
9694 if (i
== 2 || i
== 5)
9695 bfd_put_32 (output_bfd
, val
, ptr
);
9697 put_arm_insn (htab
, output_bfd
, val
, ptr
);
9700 else if (htab
->root
.target_os
== is_vxworks
)
9705 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
9707 val
= elf32_arm_vxworks_exec_plt_entry
[i
];
9711 val
|= 0xffffff & -((root_plt
->offset
+ i
* 4 + 8) >> 2);
9713 val
|= plt_index
* RELOC_SIZE (htab
);
9714 if (i
== 2 || i
== 5)
9715 bfd_put_32 (output_bfd
, val
, ptr
);
9717 put_arm_insn (htab
, output_bfd
, val
, ptr
);
9720 loc
= (htab
->srelplt2
->contents
9721 + (plt_index
* 2 + 1) * RELOC_SIZE (htab
));
9723 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9724 referencing the GOT for this PLT entry. */
9725 rel
.r_offset
= plt_address
+ 8;
9726 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
9727 rel
.r_addend
= got_offset
;
9728 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9729 loc
+= RELOC_SIZE (htab
);
9731 /* Create the R_ARM_ABS32 relocation referencing the
9732 beginning of the PLT for this GOT entry. */
9733 rel
.r_offset
= got_address
;
9734 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
9736 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9738 else if (htab
->root
.target_os
== is_nacl
)
9740 /* Calculate the displacement between the PLT slot and the
9741 common tail that's part of the special initial PLT slot. */
9742 int32_t tail_displacement
9743 = ((splt
->output_section
->vma
+ splt
->output_offset
9744 + ARM_NACL_PLT_TAIL_OFFSET
)
9745 - (plt_address
+ htab
->plt_entry_size
+ 4));
9746 BFD_ASSERT ((tail_displacement
& 3) == 0);
9747 tail_displacement
>>= 2;
9749 BFD_ASSERT ((tail_displacement
& 0xff000000) == 0
9750 || (-tail_displacement
& 0xff000000) == 0);
9752 /* Calculate the displacement between the PLT slot and the entry
9753 in the GOT. The offset accounts for the value produced by
9754 adding to pc in the penultimate instruction of the PLT stub. */
9755 got_displacement
= (got_address
9756 - (plt_address
+ htab
->plt_entry_size
));
9758 /* NaCl does not support interworking at all. */
9759 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
));
9761 put_arm_insn (htab
, output_bfd
,
9762 elf32_arm_nacl_plt_entry
[0]
9763 | arm_movw_immediate (got_displacement
),
9765 put_arm_insn (htab
, output_bfd
,
9766 elf32_arm_nacl_plt_entry
[1]
9767 | arm_movt_immediate (got_displacement
),
9769 put_arm_insn (htab
, output_bfd
,
9770 elf32_arm_nacl_plt_entry
[2],
9772 put_arm_insn (htab
, output_bfd
,
9773 elf32_arm_nacl_plt_entry
[3]
9774 | (tail_displacement
& 0x00ffffff),
9777 else if (htab
->fdpic_p
)
9779 const bfd_vma
*plt_entry
= using_thumb_only (htab
)
9780 ? elf32_arm_fdpic_thumb_plt_entry
9781 : elf32_arm_fdpic_plt_entry
;
9783 /* Fill-up Thumb stub if needed. */
9784 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9786 put_thumb_insn (htab
, output_bfd
,
9787 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
9788 put_thumb_insn (htab
, output_bfd
,
9789 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
9791 /* As we are using 32 bit instructions even for the Thumb
9792 version, we have to use 'put_arm_insn' instead of
9793 'put_thumb_insn'. */
9794 put_arm_insn (htab
, output_bfd
, plt_entry
[0], ptr
+ 0);
9795 put_arm_insn (htab
, output_bfd
, plt_entry
[1], ptr
+ 4);
9796 put_arm_insn (htab
, output_bfd
, plt_entry
[2], ptr
+ 8);
9797 put_arm_insn (htab
, output_bfd
, plt_entry
[3], ptr
+ 12);
9798 bfd_put_32 (output_bfd
, got_offset
, ptr
+ 16);
9800 if (!(info
->flags
& DF_BIND_NOW
))
9802 /* funcdesc_value_reloc_offset. */
9803 bfd_put_32 (output_bfd
,
9804 htab
->root
.srelplt
->reloc_count
* RELOC_SIZE (htab
),
9806 put_arm_insn (htab
, output_bfd
, plt_entry
[6], ptr
+ 24);
9807 put_arm_insn (htab
, output_bfd
, plt_entry
[7], ptr
+ 28);
9808 put_arm_insn (htab
, output_bfd
, plt_entry
[8], ptr
+ 32);
9809 put_arm_insn (htab
, output_bfd
, plt_entry
[9], ptr
+ 36);
9812 else if (using_thumb_only (htab
))
9814 /* PR ld/16017: Generate thumb only PLT entries. */
9815 if (!using_thumb2 (htab
))
9817 /* FIXME: We ought to be able to generate thumb-1 PLT
9819 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9824 /* Calculate the displacement between the PLT slot and the entry in
9825 the GOT. The 12-byte offset accounts for the value produced by
9826 adding to pc in the 3rd instruction of the PLT stub. */
9827 got_displacement
= got_address
- (plt_address
+ 12);
9829 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9830 instead of 'put_thumb_insn'. */
9831 put_arm_insn (htab
, output_bfd
,
9832 elf32_thumb2_plt_entry
[0]
9833 | ((got_displacement
& 0x000000ff) << 16)
9834 | ((got_displacement
& 0x00000700) << 20)
9835 | ((got_displacement
& 0x00000800) >> 1)
9836 | ((got_displacement
& 0x0000f000) >> 12),
9838 put_arm_insn (htab
, output_bfd
,
9839 elf32_thumb2_plt_entry
[1]
9840 | ((got_displacement
& 0x00ff0000) )
9841 | ((got_displacement
& 0x07000000) << 4)
9842 | ((got_displacement
& 0x08000000) >> 17)
9843 | ((got_displacement
& 0xf0000000) >> 28),
9845 put_arm_insn (htab
, output_bfd
,
9846 elf32_thumb2_plt_entry
[2],
9848 put_arm_insn (htab
, output_bfd
,
9849 elf32_thumb2_plt_entry
[3],
9854 /* Calculate the displacement between the PLT slot and the
9855 entry in the GOT. The eight-byte offset accounts for the
9856 value produced by adding to pc in the first instruction
9858 got_displacement
= got_address
- (plt_address
+ 8);
9860 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9862 put_thumb_insn (htab
, output_bfd
,
9863 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
9864 put_thumb_insn (htab
, output_bfd
,
9865 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
9868 if (!elf32_arm_use_long_plt_entry
)
9870 BFD_ASSERT ((got_displacement
& 0xf0000000) == 0);
9872 put_arm_insn (htab
, output_bfd
,
9873 elf32_arm_plt_entry_short
[0]
9874 | ((got_displacement
& 0x0ff00000) >> 20),
9876 put_arm_insn (htab
, output_bfd
,
9877 elf32_arm_plt_entry_short
[1]
9878 | ((got_displacement
& 0x000ff000) >> 12),
9880 put_arm_insn (htab
, output_bfd
,
9881 elf32_arm_plt_entry_short
[2]
9882 | (got_displacement
& 0x00000fff),
9884 #ifdef FOUR_WORD_PLT
9885 bfd_put_32 (output_bfd
, elf32_arm_plt_entry_short
[3], ptr
+ 12);
9890 put_arm_insn (htab
, output_bfd
,
9891 elf32_arm_plt_entry_long
[0]
9892 | ((got_displacement
& 0xf0000000) >> 28),
9894 put_arm_insn (htab
, output_bfd
,
9895 elf32_arm_plt_entry_long
[1]
9896 | ((got_displacement
& 0x0ff00000) >> 20),
9898 put_arm_insn (htab
, output_bfd
,
9899 elf32_arm_plt_entry_long
[2]
9900 | ((got_displacement
& 0x000ff000) >> 12),
9902 put_arm_insn (htab
, output_bfd
,
9903 elf32_arm_plt_entry_long
[3]
9904 | (got_displacement
& 0x00000fff),
9909 /* Fill in the entry in the .rel(a).(i)plt section. */
9910 rel
.r_offset
= got_address
;
9914 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9915 The dynamic linker or static executable then calls SYM_VALUE
9916 to determine the correct run-time value of the .igot.plt entry. */
9917 rel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
9918 initial_got_entry
= sym_value
;
9922 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9923 used by PLT entry. */
9926 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_FUNCDESC_VALUE
);
9927 initial_got_entry
= 0;
9931 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_JUMP_SLOT
);
9932 initial_got_entry
= (splt
->output_section
->vma
9933 + splt
->output_offset
);
9936 When thumb only we need to set the LSB for any address that
9937 will be used with an interworking branch instruction. */
9938 if (using_thumb_only (htab
))
9939 initial_got_entry
|= 1;
9943 /* Fill in the entry in the global offset table. */
9944 bfd_put_32 (output_bfd
, initial_got_entry
,
9945 sgot
->contents
+ got_offset
);
9947 if (htab
->fdpic_p
&& !(info
->flags
& DF_BIND_NOW
))
9949 /* Setup initial funcdesc value. */
9950 /* FIXME: we don't support lazy binding because there is a
9951 race condition between both words getting written and
9952 some other thread attempting to read them. The ARM
9953 architecture does not have an atomic 64 bit load/store
9954 instruction that could be used to prevent it; it is
9955 recommended that threaded FDPIC applications run with the
9956 LD_BIND_NOW environment variable set. */
9957 bfd_put_32 (output_bfd
, plt_address
+ 0x18,
9958 sgot
->contents
+ got_offset
);
9959 bfd_put_32 (output_bfd
, -1 /*TODO*/,
9960 sgot
->contents
+ got_offset
+ 4);
9964 elf32_arm_add_dynreloc (output_bfd
, info
, srel
, &rel
);
9969 /* For FDPIC we put PLT relocationss into .rel.got when not
9970 lazy binding otherwise we put them in .rel.plt. For now,
9971 we don't support lazy binding so put it in .rel.got. */
9972 if (info
->flags
& DF_BIND_NOW
)
9973 elf32_arm_add_dynreloc (output_bfd
, info
, htab
->root
.srelgot
, &rel
);
9975 elf32_arm_add_dynreloc (output_bfd
, info
, htab
->root
.srelplt
, &rel
);
9979 loc
= srel
->contents
+ plt_index
* RELOC_SIZE (htab
);
9980 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9987 /* Some relocations map to different relocations depending on the
9988 target. Return the real relocation. */
9991 arm_real_reloc_type (struct elf32_arm_link_hash_table
* globals
,
9997 if (globals
->target1_is_rel
)
10000 return R_ARM_ABS32
;
10002 case R_ARM_TARGET2
:
10003 return globals
->target2_reloc
;
10010 /* Return the base VMA address which should be subtracted from real addresses
10011 when resolving @dtpoff relocation.
10012 This is PT_TLS segment p_vaddr. */
10015 dtpoff_base (struct bfd_link_info
*info
)
10017 /* If tls_sec is NULL, we should have signalled an error already. */
10018 if (elf_hash_table (info
)->tls_sec
== NULL
)
10020 return elf_hash_table (info
)->tls_sec
->vma
;
10023 /* Return the relocation value for @tpoff relocation
10024 if STT_TLS virtual address is ADDRESS. */
10027 tpoff (struct bfd_link_info
*info
, bfd_vma address
)
10029 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
10032 /* If tls_sec is NULL, we should have signalled an error already. */
10033 if (htab
->tls_sec
== NULL
)
10035 base
= align_power ((bfd_vma
) TCB_SIZE
, htab
->tls_sec
->alignment_power
);
10036 return address
- htab
->tls_sec
->vma
+ base
;
10039 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10040 VALUE is the relocation value. */
10042 static bfd_reloc_status_type
10043 elf32_arm_abs12_reloc (bfd
*abfd
, void *data
, bfd_vma value
)
10046 return bfd_reloc_overflow
;
10048 value
|= bfd_get_32 (abfd
, data
) & 0xfffff000;
10049 bfd_put_32 (abfd
, value
, data
);
10050 return bfd_reloc_ok
;
10053 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10054 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10055 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10057 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10058 is to then call final_link_relocate. Return other values in the
10061 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10062 the pre-relaxed code. It would be nice if the relocs were updated
10063 to match the optimization. */
10065 static bfd_reloc_status_type
10066 elf32_arm_tls_relax (struct elf32_arm_link_hash_table
*globals
,
10067 bfd
*input_bfd
, asection
*input_sec
, bfd_byte
*contents
,
10068 Elf_Internal_Rela
*rel
, unsigned long is_local
)
10070 unsigned long insn
;
10072 switch (ELF32_R_TYPE (rel
->r_info
))
10075 return bfd_reloc_notsupported
;
10077 case R_ARM_TLS_GOTDESC
:
10082 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
10084 insn
-= 5; /* THUMB */
10086 insn
-= 8; /* ARM */
10088 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
10089 return bfd_reloc_continue
;
10091 case R_ARM_THM_TLS_DESCSEQ
:
10093 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
);
10094 if ((insn
& 0xff78) == 0x4478) /* add rx, pc */
10098 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10100 else if ((insn
& 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10104 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10107 bfd_put_16 (input_bfd
, insn
& 0xf83f, contents
+ rel
->r_offset
);
10109 else if ((insn
& 0xff87) == 0x4780) /* blx rx */
10113 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10116 bfd_put_16 (input_bfd
, 0x4600 | (insn
& 0x78),
10117 contents
+ rel
->r_offset
);
10121 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
10122 /* It's a 32 bit instruction, fetch the rest of it for
10123 error generation. */
10124 insn
= (insn
<< 16)
10125 | bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
+ 2);
10127 /* xgettext:c-format */
10128 (_("%pB(%pA+%#" PRIx64
"): "
10129 "unexpected %s instruction '%#lx' in TLS trampoline"),
10130 input_bfd
, input_sec
, (uint64_t) rel
->r_offset
,
10132 return bfd_reloc_notsupported
;
10136 case R_ARM_TLS_DESCSEQ
:
10138 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
10139 if ((insn
& 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10143 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xffff),
10144 contents
+ rel
->r_offset
);
10146 else if ((insn
& 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10150 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
10153 bfd_put_32 (input_bfd
, insn
& 0xfffff000,
10154 contents
+ rel
->r_offset
);
10156 else if ((insn
& 0xfffffff0) == 0xe12fff30) /* blx rx */
10160 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
10163 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xf),
10164 contents
+ rel
->r_offset
);
10169 /* xgettext:c-format */
10170 (_("%pB(%pA+%#" PRIx64
"): "
10171 "unexpected %s instruction '%#lx' in TLS trampoline"),
10172 input_bfd
, input_sec
, (uint64_t) rel
->r_offset
,
10174 return bfd_reloc_notsupported
;
10178 case R_ARM_TLS_CALL
:
10179 /* GD->IE relaxation, turn the instruction into 'nop' or
10180 'ldr r0, [pc,r0]' */
10181 insn
= is_local
? 0xe1a00000 : 0xe79f0000;
10182 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
10185 case R_ARM_THM_TLS_CALL
:
10186 /* GD->IE relaxation. */
10188 /* add r0,pc; ldr r0, [r0] */
10190 else if (using_thumb2 (globals
))
10197 bfd_put_16 (input_bfd
, insn
>> 16, contents
+ rel
->r_offset
);
10198 bfd_put_16 (input_bfd
, insn
& 0xffff, contents
+ rel
->r_offset
+ 2);
10201 return bfd_reloc_ok
;
10204 /* For a given value of n, calculate the value of G_n as required to
10205 deal with group relocations. We return it in the form of an
10206 encoded constant-and-rotation, together with the final residual. If n is
10207 specified as less than zero, then final_residual is filled with the
10208 input value and no further action is performed. */
10211 calculate_group_reloc_mask (bfd_vma value
, int n
, bfd_vma
*final_residual
)
10215 bfd_vma encoded_g_n
= 0;
10216 bfd_vma residual
= value
; /* Also known as Y_n. */
10218 for (current_n
= 0; current_n
<= n
; current_n
++)
10222 /* Calculate which part of the value to mask. */
10229 /* Determine the most significant bit in the residual and
10230 align the resulting value to a 2-bit boundary. */
10231 for (msb
= 30; msb
>= 0; msb
-= 2)
10232 if (residual
& (3u << msb
))
10235 /* The desired shift is now (msb - 6), or zero, whichever
10242 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10243 g_n
= residual
& (0xff << shift
);
10244 encoded_g_n
= (g_n
>> shift
)
10245 | ((g_n
<= 0xff ? 0 : (32 - shift
) / 2) << 8);
10247 /* Calculate the residual for the next time around. */
10251 *final_residual
= residual
;
10253 return encoded_g_n
;
10256 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10257 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10260 identify_add_or_sub (bfd_vma insn
)
10262 int opcode
= insn
& 0x1e00000;
10264 if (opcode
== 1 << 23) /* ADD */
10267 if (opcode
== 1 << 22) /* SUB */
10273 /* Perform a relocation as part of a final link. */
10275 static bfd_reloc_status_type
10276 elf32_arm_final_link_relocate (reloc_howto_type
* howto
,
10279 asection
* input_section
,
10280 bfd_byte
* contents
,
10281 Elf_Internal_Rela
* rel
,
10283 struct bfd_link_info
* info
,
10284 asection
* sym_sec
,
10285 const char * sym_name
,
10286 unsigned char st_type
,
10287 enum arm_st_branch_type branch_type
,
10288 struct elf_link_hash_entry
* h
,
10289 bool * unresolved_reloc_p
,
10290 char ** error_message
)
10292 unsigned long r_type
= howto
->type
;
10293 unsigned long r_symndx
;
10294 bfd_byte
* hit_data
= contents
+ rel
->r_offset
;
10295 bfd_vma
* local_got_offsets
;
10296 bfd_vma
* local_tlsdesc_gotents
;
10299 asection
* sreloc
= NULL
;
10300 asection
* srelgot
;
10302 bfd_signed_vma signed_addend
;
10303 unsigned char dynreloc_st_type
;
10304 bfd_vma dynreloc_value
;
10305 struct elf32_arm_link_hash_table
* globals
;
10306 struct elf32_arm_link_hash_entry
*eh
;
10307 union gotplt_union
*root_plt
;
10308 struct arm_plt_info
*arm_plt
;
10309 bfd_vma plt_offset
;
10310 bfd_vma gotplt_offset
;
10311 bool has_iplt_entry
;
10312 bool resolved_to_zero
;
10314 globals
= elf32_arm_hash_table (info
);
10315 if (globals
== NULL
)
10316 return bfd_reloc_notsupported
;
10318 BFD_ASSERT (is_arm_elf (input_bfd
));
10319 BFD_ASSERT (howto
!= NULL
);
10321 /* Some relocation types map to different relocations depending on the
10322 target. We pick the right one here. */
10323 r_type
= arm_real_reloc_type (globals
, r_type
);
10325 /* It is possible to have linker relaxations on some TLS access
10326 models. Update our information here. */
10327 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
10329 if (r_type
!= howto
->type
)
10330 howto
= elf32_arm_howto_from_type (r_type
);
10332 eh
= (struct elf32_arm_link_hash_entry
*) h
;
10333 sgot
= globals
->root
.sgot
;
10334 local_got_offsets
= elf_local_got_offsets (input_bfd
);
10335 local_tlsdesc_gotents
= elf32_arm_local_tlsdesc_gotent (input_bfd
);
10337 if (globals
->root
.dynamic_sections_created
)
10338 srelgot
= globals
->root
.srelgot
;
10342 r_symndx
= ELF32_R_SYM (rel
->r_info
);
10344 if (globals
->use_rel
)
10348 switch (howto
->size
)
10350 case 0: addend
= bfd_get_8 (input_bfd
, hit_data
); break;
10351 case 1: addend
= bfd_get_16 (input_bfd
, hit_data
); break;
10352 case 2: addend
= bfd_get_32 (input_bfd
, hit_data
); break;
10353 default: addend
= 0; break;
10355 /* Note: the addend and signed_addend calculated here are
10356 incorrect for any split field. */
10357 addend
&= howto
->src_mask
;
10358 sign
= howto
->src_mask
& ~(howto
->src_mask
>> 1);
10359 signed_addend
= (addend
^ sign
) - sign
;
10360 signed_addend
= (bfd_vma
) signed_addend
<< howto
->rightshift
;
10361 addend
<<= howto
->rightshift
;
10364 addend
= signed_addend
= rel
->r_addend
;
10366 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10367 are resolving a function call relocation. */
10368 if (using_thumb_only (globals
)
10369 && (r_type
== R_ARM_THM_CALL
10370 || r_type
== R_ARM_THM_JUMP24
)
10371 && branch_type
== ST_BRANCH_TO_ARM
)
10372 branch_type
= ST_BRANCH_TO_THUMB
;
10374 /* Record the symbol information that should be used in dynamic
10376 dynreloc_st_type
= st_type
;
10377 dynreloc_value
= value
;
10378 if (branch_type
== ST_BRANCH_TO_THUMB
)
10379 dynreloc_value
|= 1;
10381 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10382 VALUE appropriately for relocations that we resolve at link time. */
10383 has_iplt_entry
= false;
10384 if (elf32_arm_get_plt_info (input_bfd
, globals
, eh
, r_symndx
, &root_plt
,
10386 && root_plt
->offset
!= (bfd_vma
) -1)
10388 plt_offset
= root_plt
->offset
;
10389 gotplt_offset
= arm_plt
->got_offset
;
10391 if (h
== NULL
|| eh
->is_iplt
)
10393 has_iplt_entry
= true;
10394 splt
= globals
->root
.iplt
;
10396 /* Populate .iplt entries here, because not all of them will
10397 be seen by finish_dynamic_symbol. The lower bit is set if
10398 we have already populated the entry. */
10399 if (plt_offset
& 1)
10403 if (elf32_arm_populate_plt_entry (output_bfd
, info
, root_plt
, arm_plt
,
10404 -1, dynreloc_value
))
10405 root_plt
->offset
|= 1;
10407 return bfd_reloc_notsupported
;
10410 /* Static relocations always resolve to the .iplt entry. */
10411 st_type
= STT_FUNC
;
10412 value
= (splt
->output_section
->vma
10413 + splt
->output_offset
10415 branch_type
= ST_BRANCH_TO_ARM
;
10417 /* If there are non-call relocations that resolve to the .iplt
10418 entry, then all dynamic ones must too. */
10419 if (arm_plt
->noncall_refcount
!= 0)
10421 dynreloc_st_type
= st_type
;
10422 dynreloc_value
= value
;
10426 /* We populate the .plt entry in finish_dynamic_symbol. */
10427 splt
= globals
->root
.splt
;
10432 plt_offset
= (bfd_vma
) -1;
10433 gotplt_offset
= (bfd_vma
) -1;
10436 resolved_to_zero
= (h
!= NULL
10437 && UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
));
10442 /* We don't need to find a value for this symbol. It's just a
10444 *unresolved_reloc_p
= false;
10445 return bfd_reloc_ok
;
10448 if (globals
->root
.target_os
!= is_vxworks
)
10449 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
10450 /* Fall through. */
10454 case R_ARM_ABS32_NOI
:
10456 case R_ARM_REL32_NOI
:
10462 /* Handle relocations which should use the PLT entry. ABS32/REL32
10463 will use the symbol's value, which may point to a PLT entry, but we
10464 don't need to handle that here. If we created a PLT entry, all
10465 branches in this object should go to it, except if the PLT is too
10466 far away, in which case a long branch stub should be inserted. */
10467 if ((r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_REL32
10468 && r_type
!= R_ARM_ABS32_NOI
&& r_type
!= R_ARM_REL32_NOI
10469 && r_type
!= R_ARM_CALL
10470 && r_type
!= R_ARM_JUMP24
10471 && r_type
!= R_ARM_PLT32
)
10472 && plt_offset
!= (bfd_vma
) -1)
10474 /* If we've created a .plt section, and assigned a PLT entry
10475 to this function, it must either be a STT_GNU_IFUNC reference
10476 or not be known to bind locally. In other cases, we should
10477 have cleared the PLT entry by now. */
10478 BFD_ASSERT (has_iplt_entry
|| !SYMBOL_CALLS_LOCAL (info
, h
));
10480 value
= (splt
->output_section
->vma
10481 + splt
->output_offset
10483 *unresolved_reloc_p
= false;
10484 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10485 contents
, rel
->r_offset
, value
,
10489 /* When generating a shared object or relocatable executable, these
10490 relocations are copied into the output file to be resolved at
10492 if ((bfd_link_pic (info
)
10493 || globals
->root
.is_relocatable_executable
10494 || globals
->fdpic_p
)
10495 && (input_section
->flags
& SEC_ALLOC
)
10496 && !(globals
->root
.target_os
== is_vxworks
10497 && strcmp (input_section
->output_section
->name
,
10499 && ((r_type
!= R_ARM_REL32
&& r_type
!= R_ARM_REL32_NOI
)
10500 || !SYMBOL_CALLS_LOCAL (info
, h
))
10501 && !(input_bfd
== globals
->stub_bfd
10502 && strstr (input_section
->name
, STUB_SUFFIX
))
10504 || (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
10505 && !resolved_to_zero
)
10506 || h
->root
.type
!= bfd_link_hash_undefweak
)
10507 && r_type
!= R_ARM_PC24
10508 && r_type
!= R_ARM_CALL
10509 && r_type
!= R_ARM_JUMP24
10510 && r_type
!= R_ARM_PREL31
10511 && r_type
!= R_ARM_PLT32
)
10513 Elf_Internal_Rela outrel
;
10514 bool skip
, relocate
;
10517 if ((r_type
== R_ARM_REL32
|| r_type
== R_ARM_REL32_NOI
)
10518 && !h
->def_regular
)
10520 char *v
= _("shared object");
10522 if (bfd_link_executable (info
))
10523 v
= _("PIE executable");
10526 (_("%pB: relocation %s against external or undefined symbol `%s'"
10527 " can not be used when making a %s; recompile with -fPIC"), input_bfd
,
10528 elf32_arm_howto_table_1
[r_type
].name
, h
->root
.root
.string
, v
);
10529 return bfd_reloc_notsupported
;
10532 *unresolved_reloc_p
= false;
10534 if (sreloc
== NULL
&& globals
->root
.dynamic_sections_created
)
10536 sreloc
= _bfd_elf_get_dynamic_reloc_section (input_bfd
, input_section
,
10537 ! globals
->use_rel
);
10539 if (sreloc
== NULL
)
10540 return bfd_reloc_notsupported
;
10546 outrel
.r_addend
= addend
;
10548 _bfd_elf_section_offset (output_bfd
, info
, input_section
,
10550 if (outrel
.r_offset
== (bfd_vma
) -1)
10552 else if (outrel
.r_offset
== (bfd_vma
) -2)
10553 skip
= true, relocate
= true;
10554 outrel
.r_offset
+= (input_section
->output_section
->vma
10555 + input_section
->output_offset
);
10558 memset (&outrel
, 0, sizeof outrel
);
10560 && h
->dynindx
!= -1
10561 && (!bfd_link_pic (info
)
10562 || !(bfd_link_pie (info
)
10563 || SYMBOLIC_BIND (info
, h
))
10564 || !h
->def_regular
))
10565 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, r_type
);
10570 /* This symbol is local, or marked to become local. */
10571 BFD_ASSERT (r_type
== R_ARM_ABS32
|| r_type
== R_ARM_ABS32_NOI
10572 || (globals
->fdpic_p
&& !bfd_link_pic (info
)));
10573 /* On SVR4-ish systems, the dynamic loader cannot
10574 relocate the text and data segments independently,
10575 so the symbol does not matter. */
10577 if (dynreloc_st_type
== STT_GNU_IFUNC
)
10578 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10579 to the .iplt entry. Instead, every non-call reference
10580 must use an R_ARM_IRELATIVE relocation to obtain the
10581 correct run-time address. */
10582 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_IRELATIVE
);
10583 else if (globals
->fdpic_p
&& !bfd_link_pic (info
))
10586 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_RELATIVE
);
10587 if (globals
->use_rel
)
10590 outrel
.r_addend
+= dynreloc_value
;
10594 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
, outrel
.r_offset
);
10596 elf32_arm_add_dynreloc (output_bfd
, info
, sreloc
, &outrel
);
10598 /* If this reloc is against an external symbol, we do not want to
10599 fiddle with the addend. Otherwise, we need to include the symbol
10600 value so that it becomes an addend for the dynamic reloc. */
10602 return bfd_reloc_ok
;
10604 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10605 contents
, rel
->r_offset
,
10606 dynreloc_value
, (bfd_vma
) 0);
10608 else switch (r_type
)
10611 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
10613 case R_ARM_XPC25
: /* Arm BLX instruction. */
10616 case R_ARM_PC24
: /* Arm B/BL instruction. */
10619 struct elf32_arm_stub_hash_entry
*stub_entry
= NULL
;
10621 if (r_type
== R_ARM_XPC25
)
10623 /* Check for Arm calling Arm function. */
10624 /* FIXME: Should we translate the instruction into a BL
10625 instruction instead ? */
10626 if (branch_type
!= ST_BRANCH_TO_THUMB
)
10628 (_("\%pB: warning: %s BLX instruction targets"
10629 " %s function '%s'"),
10631 "ARM", h
? h
->root
.root
.string
: "(local)");
10633 else if (r_type
== R_ARM_PC24
)
10635 /* Check for Arm calling Thumb function. */
10636 if (branch_type
== ST_BRANCH_TO_THUMB
)
10638 if (elf32_arm_to_thumb_stub (info
, sym_name
, input_bfd
,
10639 output_bfd
, input_section
,
10640 hit_data
, sym_sec
, rel
->r_offset
,
10641 signed_addend
, value
,
10643 return bfd_reloc_ok
;
10645 return bfd_reloc_dangerous
;
10649 /* Check if a stub has to be inserted because the
10650 destination is too far or we are changing mode. */
10651 if ( r_type
== R_ARM_CALL
10652 || r_type
== R_ARM_JUMP24
10653 || r_type
== R_ARM_PLT32
)
10655 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
10656 struct elf32_arm_link_hash_entry
*hash
;
10658 hash
= (struct elf32_arm_link_hash_entry
*) h
;
10659 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
10660 st_type
, &branch_type
,
10661 hash
, value
, sym_sec
,
10662 input_bfd
, sym_name
);
10664 if (stub_type
!= arm_stub_none
)
10666 /* The target is out of reach, so redirect the
10667 branch to the local stub for this function. */
10668 stub_entry
= elf32_arm_get_stub_entry (input_section
,
10673 if (stub_entry
!= NULL
)
10674 value
= (stub_entry
->stub_offset
10675 + stub_entry
->stub_sec
->output_offset
10676 + stub_entry
->stub_sec
->output_section
->vma
);
10678 if (plt_offset
!= (bfd_vma
) -1)
10679 *unresolved_reloc_p
= false;
10684 /* If the call goes through a PLT entry, make sure to
10685 check distance to the right destination address. */
10686 if (plt_offset
!= (bfd_vma
) -1)
10688 value
= (splt
->output_section
->vma
10689 + splt
->output_offset
10691 *unresolved_reloc_p
= false;
10692 /* The PLT entry is in ARM mode, regardless of the
10693 target function. */
10694 branch_type
= ST_BRANCH_TO_ARM
;
10699 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10701 S is the address of the symbol in the relocation.
10702 P is address of the instruction being relocated.
10703 A is the addend (extracted from the instruction) in bytes.
10705 S is held in 'value'.
10706 P is the base address of the section containing the
10707 instruction plus the offset of the reloc into that
10709 (input_section->output_section->vma +
10710 input_section->output_offset +
10712 A is the addend, converted into bytes, ie:
10713 (signed_addend * 4)
10715 Note: None of these operations have knowledge of the pipeline
10716 size of the processor, thus it is up to the assembler to
10717 encode this information into the addend. */
10718 value
-= (input_section
->output_section
->vma
10719 + input_section
->output_offset
);
10720 value
-= rel
->r_offset
;
10721 value
+= signed_addend
;
10723 signed_addend
= value
;
10724 signed_addend
>>= howto
->rightshift
;
10726 /* A branch to an undefined weak symbol is turned into a jump to
10727 the next instruction unless a PLT entry will be created.
10728 Do the same for local undefined symbols (but not for STN_UNDEF).
10729 The jump to the next instruction is optimized as a NOP depending
10730 on the architecture. */
10731 if (h
? (h
->root
.type
== bfd_link_hash_undefweak
10732 && plt_offset
== (bfd_vma
) -1)
10733 : r_symndx
!= STN_UNDEF
&& bfd_is_und_section (sym_sec
))
10735 value
= (bfd_get_32 (input_bfd
, hit_data
) & 0xf0000000);
10737 if (arch_has_arm_nop (globals
))
10738 value
|= 0x0320f000;
10740 value
|= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10744 /* Perform a signed range check. */
10745 if ( signed_addend
> ((bfd_signed_vma
) (howto
->dst_mask
>> 1))
10746 || signed_addend
< - ((bfd_signed_vma
) ((howto
->dst_mask
+ 1) >> 1)))
10747 return bfd_reloc_overflow
;
10749 addend
= (value
& 2);
10751 value
= (signed_addend
& howto
->dst_mask
)
10752 | (bfd_get_32 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
10754 if (r_type
== R_ARM_CALL
)
10756 /* Set the H bit in the BLX instruction. */
10757 if (branch_type
== ST_BRANCH_TO_THUMB
)
10760 value
|= (1 << 24);
10762 value
&= ~(bfd_vma
)(1 << 24);
10765 /* Select the correct instruction (BL or BLX). */
10766 /* Only if we are not handling a BL to a stub. In this
10767 case, mode switching is performed by the stub. */
10768 if (branch_type
== ST_BRANCH_TO_THUMB
&& !stub_entry
)
10769 value
|= (1 << 28);
10770 else if (stub_entry
|| branch_type
!= ST_BRANCH_UNKNOWN
)
10772 value
&= ~(bfd_vma
)(1 << 28);
10773 value
|= (1 << 24);
10782 if (branch_type
== ST_BRANCH_TO_THUMB
)
10786 case R_ARM_ABS32_NOI
:
10792 if (branch_type
== ST_BRANCH_TO_THUMB
)
10794 value
-= (input_section
->output_section
->vma
10795 + input_section
->output_offset
+ rel
->r_offset
);
10798 case R_ARM_REL32_NOI
:
10800 value
-= (input_section
->output_section
->vma
10801 + input_section
->output_offset
+ rel
->r_offset
);
10805 value
-= (input_section
->output_section
->vma
10806 + input_section
->output_offset
+ rel
->r_offset
);
10807 value
+= signed_addend
;
10808 if (! h
|| h
->root
.type
!= bfd_link_hash_undefweak
)
10810 /* Check for overflow. */
10811 if ((value
^ (value
>> 1)) & (1 << 30))
10812 return bfd_reloc_overflow
;
10814 value
&= 0x7fffffff;
10815 value
|= (bfd_get_32 (input_bfd
, hit_data
) & 0x80000000);
10816 if (branch_type
== ST_BRANCH_TO_THUMB
)
10821 bfd_put_32 (input_bfd
, value
, hit_data
);
10822 return bfd_reloc_ok
;
10827 /* There is no way to tell whether the user intended to use a signed or
10828 unsigned addend. When checking for overflow we accept either,
10829 as specified by the AAELF. */
10830 if ((long) value
> 0xff || (long) value
< -0x80)
10831 return bfd_reloc_overflow
;
10833 bfd_put_8 (input_bfd
, value
, hit_data
);
10834 return bfd_reloc_ok
;
10839 /* See comment for R_ARM_ABS8. */
10840 if ((long) value
> 0xffff || (long) value
< -0x8000)
10841 return bfd_reloc_overflow
;
10843 bfd_put_16 (input_bfd
, value
, hit_data
);
10844 return bfd_reloc_ok
;
10846 case R_ARM_THM_ABS5
:
10847 /* Support ldr and str instructions for the thumb. */
10848 if (globals
->use_rel
)
10850 /* Need to refetch addend. */
10851 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
10852 /* ??? Need to determine shift amount from operand size. */
10853 addend
>>= howto
->rightshift
;
10857 /* ??? Isn't value unsigned? */
10858 if ((long) value
> 0x1f || (long) value
< -0x10)
10859 return bfd_reloc_overflow
;
10861 /* ??? Value needs to be properly shifted into place first. */
10862 value
|= bfd_get_16 (input_bfd
, hit_data
) & 0xf83f;
10863 bfd_put_16 (input_bfd
, value
, hit_data
);
10864 return bfd_reloc_ok
;
10866 case R_ARM_THM_ALU_PREL_11_0
:
10867 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10870 bfd_signed_vma relocation
;
10872 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
10873 | bfd_get_16 (input_bfd
, hit_data
+ 2);
10875 if (globals
->use_rel
)
10877 signed_addend
= (insn
& 0xff) | ((insn
& 0x7000) >> 4)
10878 | ((insn
& (1 << 26)) >> 15);
10879 if (insn
& 0xf00000)
10880 signed_addend
= -signed_addend
;
10883 relocation
= value
+ signed_addend
;
10884 relocation
-= Pa (input_section
->output_section
->vma
10885 + input_section
->output_offset
10888 /* PR 21523: Use an absolute value. The user of this reloc will
10889 have already selected an ADD or SUB insn appropriately. */
10890 value
= llabs (relocation
);
10892 if (value
>= 0x1000)
10893 return bfd_reloc_overflow
;
10895 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10896 if (branch_type
== ST_BRANCH_TO_THUMB
)
10899 insn
= (insn
& 0xfb0f8f00) | (value
& 0xff)
10900 | ((value
& 0x700) << 4)
10901 | ((value
& 0x800) << 15);
10902 if (relocation
< 0)
10905 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
10906 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
10908 return bfd_reloc_ok
;
10911 case R_ARM_THM_PC8
:
10912 /* PR 10073: This reloc is not generated by the GNU toolchain,
10913 but it is supported for compatibility with third party libraries
10914 generated by other compilers, specifically the ARM/IAR. */
10917 bfd_signed_vma relocation
;
10919 insn
= bfd_get_16 (input_bfd
, hit_data
);
10921 if (globals
->use_rel
)
10922 addend
= ((((insn
& 0x00ff) << 2) + 4) & 0x3ff) -4;
10924 relocation
= value
+ addend
;
10925 relocation
-= Pa (input_section
->output_section
->vma
10926 + input_section
->output_offset
10929 value
= relocation
;
10931 /* We do not check for overflow of this reloc. Although strictly
10932 speaking this is incorrect, it appears to be necessary in order
10933 to work with IAR generated relocs. Since GCC and GAS do not
10934 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10935 a problem for them. */
10938 insn
= (insn
& 0xff00) | (value
>> 2);
10940 bfd_put_16 (input_bfd
, insn
, hit_data
);
10942 return bfd_reloc_ok
;
10945 case R_ARM_THM_PC12
:
10946 /* Corresponds to: ldr.w reg, [pc, #offset]. */
10949 bfd_signed_vma relocation
;
10951 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
10952 | bfd_get_16 (input_bfd
, hit_data
+ 2);
10954 if (globals
->use_rel
)
10956 signed_addend
= insn
& 0xfff;
10957 if (!(insn
& (1 << 23)))
10958 signed_addend
= -signed_addend
;
10961 relocation
= value
+ signed_addend
;
10962 relocation
-= Pa (input_section
->output_section
->vma
10963 + input_section
->output_offset
10966 value
= relocation
;
10968 if (value
>= 0x1000)
10969 return bfd_reloc_overflow
;
10971 insn
= (insn
& 0xff7ff000) | value
;
10972 if (relocation
>= 0)
10975 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
10976 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
10978 return bfd_reloc_ok
;
10981 case R_ARM_THM_XPC22
:
10982 case R_ARM_THM_CALL
:
10983 case R_ARM_THM_JUMP24
:
10984 /* Thumb BL (branch long instruction). */
10986 bfd_vma relocation
;
10987 bfd_vma reloc_sign
;
10988 bool overflow
= false;
10989 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
10990 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
10991 bfd_signed_vma reloc_signed_max
;
10992 bfd_signed_vma reloc_signed_min
;
10994 bfd_signed_vma signed_check
;
10996 const int thumb2
= using_thumb2 (globals
);
10997 const int thumb2_bl
= using_thumb2_bl (globals
);
10999 /* A branch to an undefined weak symbol is turned into a jump to
11000 the next instruction unless a PLT entry will be created.
11001 The jump to the next instruction is optimized as a NOP.W for
11002 Thumb-2 enabled architectures. */
11003 if (h
&& h
->root
.type
== bfd_link_hash_undefweak
11004 && plt_offset
== (bfd_vma
) -1)
11008 bfd_put_16 (input_bfd
, 0xf3af, hit_data
);
11009 bfd_put_16 (input_bfd
, 0x8000, hit_data
+ 2);
11013 bfd_put_16 (input_bfd
, 0xe000, hit_data
);
11014 bfd_put_16 (input_bfd
, 0xbf00, hit_data
+ 2);
11016 return bfd_reloc_ok
;
11019 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11020 with Thumb-1) involving the J1 and J2 bits. */
11021 if (globals
->use_rel
)
11023 bfd_vma s
= (upper_insn
& (1 << 10)) >> 10;
11024 bfd_vma upper
= upper_insn
& 0x3ff;
11025 bfd_vma lower
= lower_insn
& 0x7ff;
11026 bfd_vma j1
= (lower_insn
& (1 << 13)) >> 13;
11027 bfd_vma j2
= (lower_insn
& (1 << 11)) >> 11;
11028 bfd_vma i1
= j1
^ s
? 0 : 1;
11029 bfd_vma i2
= j2
^ s
? 0 : 1;
11031 addend
= (i1
<< 23) | (i2
<< 22) | (upper
<< 12) | (lower
<< 1);
11033 addend
= (addend
| ((s
? 0 : 1) << 24)) - (1 << 24);
11035 signed_addend
= addend
;
11038 if (r_type
== R_ARM_THM_XPC22
)
11040 /* Check for Thumb to Thumb call. */
11041 /* FIXME: Should we translate the instruction into a BL
11042 instruction instead ? */
11043 if (branch_type
== ST_BRANCH_TO_THUMB
)
11045 (_("%pB: warning: %s BLX instruction targets"
11046 " %s function '%s'"),
11047 input_bfd
, "Thumb",
11048 "Thumb", h
? h
->root
.root
.string
: "(local)");
11052 /* If it is not a call to Thumb, assume call to Arm.
11053 If it is a call relative to a section name, then it is not a
11054 function call at all, but rather a long jump. Calls through
11055 the PLT do not require stubs. */
11056 if (branch_type
== ST_BRANCH_TO_ARM
&& plt_offset
== (bfd_vma
) -1)
11058 if (globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
11060 /* Convert BL to BLX. */
11061 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11063 else if (( r_type
!= R_ARM_THM_CALL
)
11064 && (r_type
!= R_ARM_THM_JUMP24
))
11066 if (elf32_thumb_to_arm_stub
11067 (info
, sym_name
, input_bfd
, output_bfd
, input_section
,
11068 hit_data
, sym_sec
, rel
->r_offset
, signed_addend
, value
,
11070 return bfd_reloc_ok
;
11072 return bfd_reloc_dangerous
;
11075 else if (branch_type
== ST_BRANCH_TO_THUMB
11076 && globals
->use_blx
11077 && r_type
== R_ARM_THM_CALL
)
11079 /* Make sure this is a BL. */
11080 lower_insn
|= 0x1800;
11084 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
11085 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
)
11087 /* Check if a stub has to be inserted because the destination
11089 struct elf32_arm_stub_hash_entry
*stub_entry
;
11090 struct elf32_arm_link_hash_entry
*hash
;
11092 hash
= (struct elf32_arm_link_hash_entry
*) h
;
11094 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
11095 st_type
, &branch_type
,
11096 hash
, value
, sym_sec
,
11097 input_bfd
, sym_name
);
11099 if (stub_type
!= arm_stub_none
)
11101 /* The target is out of reach or we are changing modes, so
11102 redirect the branch to the local stub for this
11104 stub_entry
= elf32_arm_get_stub_entry (input_section
,
11108 if (stub_entry
!= NULL
)
11110 value
= (stub_entry
->stub_offset
11111 + stub_entry
->stub_sec
->output_offset
11112 + stub_entry
->stub_sec
->output_section
->vma
);
11114 if (plt_offset
!= (bfd_vma
) -1)
11115 *unresolved_reloc_p
= false;
11118 /* If this call becomes a call to Arm, force BLX. */
11119 if (globals
->use_blx
&& (r_type
== R_ARM_THM_CALL
))
11122 && !arm_stub_is_thumb (stub_entry
->stub_type
))
11123 || branch_type
!= ST_BRANCH_TO_THUMB
)
11124 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11129 /* Handle calls via the PLT. */
11130 if (stub_type
== arm_stub_none
&& plt_offset
!= (bfd_vma
) -1)
11132 value
= (splt
->output_section
->vma
11133 + splt
->output_offset
11136 if (globals
->use_blx
11137 && r_type
== R_ARM_THM_CALL
11138 && ! using_thumb_only (globals
))
11140 /* If the Thumb BLX instruction is available, convert
11141 the BL to a BLX instruction to call the ARM-mode
11143 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11144 branch_type
= ST_BRANCH_TO_ARM
;
11148 if (! using_thumb_only (globals
))
11149 /* Target the Thumb stub before the ARM PLT entry. */
11150 value
-= PLT_THUMB_STUB_SIZE
;
11151 branch_type
= ST_BRANCH_TO_THUMB
;
11153 *unresolved_reloc_p
= false;
11156 relocation
= value
+ signed_addend
;
11158 relocation
-= (input_section
->output_section
->vma
11159 + input_section
->output_offset
11162 check
= relocation
>> howto
->rightshift
;
11164 /* If this is a signed value, the rightshift just dropped
11165 leading 1 bits (assuming twos complement). */
11166 if ((bfd_signed_vma
) relocation
>= 0)
11167 signed_check
= check
;
11169 signed_check
= check
| ~((bfd_vma
) -1 >> howto
->rightshift
);
11171 /* Calculate the permissable maximum and minimum values for
11172 this relocation according to whether we're relocating for
11174 bitsize
= howto
->bitsize
;
11177 reloc_signed_max
= (1 << (bitsize
- 1)) - 1;
11178 reloc_signed_min
= ~reloc_signed_max
;
11180 /* Assumes two's complement. */
11181 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11184 if ((lower_insn
& 0x5000) == 0x4000)
11185 /* For a BLX instruction, make sure that the relocation is rounded up
11186 to a word boundary. This follows the semantics of the instruction
11187 which specifies that bit 1 of the target address will come from bit
11188 1 of the base address. */
11189 relocation
= (relocation
+ 2) & ~ 3;
11191 /* Put RELOCATION back into the insn. Assumes two's complement.
11192 We use the Thumb-2 encoding, which is safe even if dealing with
11193 a Thumb-1 instruction by virtue of our overflow check above. */
11194 reloc_sign
= (signed_check
< 0) ? 1 : 0;
11195 upper_insn
= (upper_insn
& ~(bfd_vma
) 0x7ff)
11196 | ((relocation
>> 12) & 0x3ff)
11197 | (reloc_sign
<< 10);
11198 lower_insn
= (lower_insn
& ~(bfd_vma
) 0x2fff)
11199 | (((!((relocation
>> 23) & 1)) ^ reloc_sign
) << 13)
11200 | (((!((relocation
>> 22) & 1)) ^ reloc_sign
) << 11)
11201 | ((relocation
>> 1) & 0x7ff);
11203 /* Put the relocated value back in the object file: */
11204 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11205 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11207 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
11211 case R_ARM_THM_JUMP19
:
11212 /* Thumb32 conditional branch instruction. */
11214 bfd_vma relocation
;
11215 bool overflow
= false;
11216 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
11217 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
11218 bfd_signed_vma reloc_signed_max
= 0xffffe;
11219 bfd_signed_vma reloc_signed_min
= -0x100000;
11220 bfd_signed_vma signed_check
;
11221 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
11222 struct elf32_arm_stub_hash_entry
*stub_entry
;
11223 struct elf32_arm_link_hash_entry
*hash
;
11225 /* Need to refetch the addend, reconstruct the top three bits,
11226 and squish the two 11 bit pieces together. */
11227 if (globals
->use_rel
)
11229 bfd_vma S
= (upper_insn
& 0x0400) >> 10;
11230 bfd_vma upper
= (upper_insn
& 0x003f);
11231 bfd_vma J1
= (lower_insn
& 0x2000) >> 13;
11232 bfd_vma J2
= (lower_insn
& 0x0800) >> 11;
11233 bfd_vma lower
= (lower_insn
& 0x07ff);
11237 upper
|= (!S
) << 8;
11238 upper
-= 0x0100; /* Sign extend. */
11240 addend
= (upper
<< 12) | (lower
<< 1);
11241 signed_addend
= addend
;
11244 /* Handle calls via the PLT. */
11245 if (plt_offset
!= (bfd_vma
) -1)
11247 value
= (splt
->output_section
->vma
11248 + splt
->output_offset
11250 /* Target the Thumb stub before the ARM PLT entry. */
11251 value
-= PLT_THUMB_STUB_SIZE
;
11252 *unresolved_reloc_p
= false;
11255 hash
= (struct elf32_arm_link_hash_entry
*)h
;
11257 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
11258 st_type
, &branch_type
,
11259 hash
, value
, sym_sec
,
11260 input_bfd
, sym_name
);
11261 if (stub_type
!= arm_stub_none
)
11263 stub_entry
= elf32_arm_get_stub_entry (input_section
,
11267 if (stub_entry
!= NULL
)
11269 value
= (stub_entry
->stub_offset
11270 + stub_entry
->stub_sec
->output_offset
11271 + stub_entry
->stub_sec
->output_section
->vma
);
11275 relocation
= value
+ signed_addend
;
11276 relocation
-= (input_section
->output_section
->vma
11277 + input_section
->output_offset
11279 signed_check
= (bfd_signed_vma
) relocation
;
11281 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11284 /* Put RELOCATION back into the insn. */
11286 bfd_vma S
= (relocation
& 0x00100000) >> 20;
11287 bfd_vma J2
= (relocation
& 0x00080000) >> 19;
11288 bfd_vma J1
= (relocation
& 0x00040000) >> 18;
11289 bfd_vma hi
= (relocation
& 0x0003f000) >> 12;
11290 bfd_vma lo
= (relocation
& 0x00000ffe) >> 1;
11292 upper_insn
= (upper_insn
& 0xfbc0) | (S
<< 10) | hi
;
11293 lower_insn
= (lower_insn
& 0xd000) | (J1
<< 13) | (J2
<< 11) | lo
;
11296 /* Put the relocated value back in the object file: */
11297 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11298 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11300 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
11303 case R_ARM_THM_JUMP11
:
11304 case R_ARM_THM_JUMP8
:
11305 case R_ARM_THM_JUMP6
:
11306 /* Thumb B (branch) instruction). */
11308 bfd_signed_vma relocation
;
11309 bfd_signed_vma reloc_signed_max
= (1 << (howto
->bitsize
- 1)) - 1;
11310 bfd_signed_vma reloc_signed_min
= ~ reloc_signed_max
;
11311 bfd_signed_vma signed_check
;
11313 /* CZB cannot jump backward. */
11314 if (r_type
== R_ARM_THM_JUMP6
)
11316 reloc_signed_min
= 0;
11317 if (globals
->use_rel
)
11318 signed_addend
= ((addend
& 0x200) >> 3) | ((addend
& 0xf8) >> 2);
11321 relocation
= value
+ signed_addend
;
11323 relocation
-= (input_section
->output_section
->vma
11324 + input_section
->output_offset
11327 relocation
>>= howto
->rightshift
;
11328 signed_check
= relocation
;
11330 if (r_type
== R_ARM_THM_JUMP6
)
11331 relocation
= ((relocation
& 0x0020) << 4) | ((relocation
& 0x001f) << 3);
11333 relocation
&= howto
->dst_mask
;
11334 relocation
|= (bfd_get_16 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
11336 bfd_put_16 (input_bfd
, relocation
, hit_data
);
11338 /* Assumes two's complement. */
11339 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11340 return bfd_reloc_overflow
;
11342 return bfd_reloc_ok
;
11345 case R_ARM_ALU_PCREL7_0
:
11346 case R_ARM_ALU_PCREL15_8
:
11347 case R_ARM_ALU_PCREL23_15
:
11350 bfd_vma relocation
;
11352 insn
= bfd_get_32 (input_bfd
, hit_data
);
11353 if (globals
->use_rel
)
11355 /* Extract the addend. */
11356 addend
= (insn
& 0xff) << ((insn
& 0xf00) >> 7);
11357 signed_addend
= addend
;
11359 relocation
= value
+ signed_addend
;
11361 relocation
-= (input_section
->output_section
->vma
11362 + input_section
->output_offset
11364 insn
= (insn
& ~0xfff)
11365 | ((howto
->bitpos
<< 7) & 0xf00)
11366 | ((relocation
>> howto
->bitpos
) & 0xff);
11367 bfd_put_32 (input_bfd
, value
, hit_data
);
11369 return bfd_reloc_ok
;
11371 case R_ARM_GNU_VTINHERIT
:
11372 case R_ARM_GNU_VTENTRY
:
11373 return bfd_reloc_ok
;
11375 case R_ARM_GOTOFF32
:
11376 /* Relocation is relative to the start of the
11377 global offset table. */
11379 BFD_ASSERT (sgot
!= NULL
);
11381 return bfd_reloc_notsupported
;
11383 /* If we are addressing a Thumb function, we need to adjust the
11384 address by one, so that attempts to call the function pointer will
11385 correctly interpret it as Thumb code. */
11386 if (branch_type
== ST_BRANCH_TO_THUMB
)
11389 /* Note that sgot->output_offset is not involved in this
11390 calculation. We always want the start of .got. If we
11391 define _GLOBAL_OFFSET_TABLE in a different way, as is
11392 permitted by the ABI, we might have to change this
11394 value
-= sgot
->output_section
->vma
;
11395 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11396 contents
, rel
->r_offset
, value
,
11400 /* Use global offset table as symbol value. */
11401 BFD_ASSERT (sgot
!= NULL
);
11404 return bfd_reloc_notsupported
;
11406 *unresolved_reloc_p
= false;
11407 value
= sgot
->output_section
->vma
;
11408 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11409 contents
, rel
->r_offset
, value
,
11413 case R_ARM_GOT_PREL
:
11414 /* Relocation is to the entry for this symbol in the
11415 global offset table. */
11417 return bfd_reloc_notsupported
;
11419 if (dynreloc_st_type
== STT_GNU_IFUNC
11420 && plt_offset
!= (bfd_vma
) -1
11421 && (h
== NULL
|| SYMBOL_REFERENCES_LOCAL (info
, h
)))
11423 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11424 symbol, and the relocation resolves directly to the runtime
11425 target rather than to the .iplt entry. This means that any
11426 .got entry would be the same value as the .igot.plt entry,
11427 so there's no point creating both. */
11428 sgot
= globals
->root
.igotplt
;
11429 value
= sgot
->output_offset
+ gotplt_offset
;
11431 else if (h
!= NULL
)
11435 off
= h
->got
.offset
;
11436 BFD_ASSERT (off
!= (bfd_vma
) -1);
11437 if ((off
& 1) != 0)
11439 /* We have already processsed one GOT relocation against
11442 if (globals
->root
.dynamic_sections_created
11443 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
11444 *unresolved_reloc_p
= false;
11448 Elf_Internal_Rela outrel
;
11451 if (((h
->dynindx
!= -1) || globals
->fdpic_p
)
11452 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
11454 /* If the symbol doesn't resolve locally in a static
11455 object, we have an undefined reference. If the
11456 symbol doesn't resolve locally in a dynamic object,
11457 it should be resolved by the dynamic linker. */
11458 if (globals
->root
.dynamic_sections_created
)
11460 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_GLOB_DAT
);
11461 *unresolved_reloc_p
= false;
11465 outrel
.r_addend
= 0;
11469 if (dynreloc_st_type
== STT_GNU_IFUNC
)
11470 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
11471 else if (bfd_link_pic (info
)
11472 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
))
11473 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
11477 if (globals
->fdpic_p
)
11480 outrel
.r_addend
= dynreloc_value
;
11483 /* The GOT entry is initialized to zero by default.
11484 See if we should install a different value. */
11485 if (outrel
.r_addend
!= 0
11486 && (globals
->use_rel
|| outrel
.r_info
== 0))
11488 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11489 sgot
->contents
+ off
);
11490 outrel
.r_addend
= 0;
11494 arm_elf_add_rofixup (output_bfd
,
11495 elf32_arm_hash_table (info
)->srofixup
,
11496 sgot
->output_section
->vma
11497 + sgot
->output_offset
+ off
);
11499 else if (outrel
.r_info
!= 0)
11501 outrel
.r_offset
= (sgot
->output_section
->vma
11502 + sgot
->output_offset
11504 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11507 h
->got
.offset
|= 1;
11509 value
= sgot
->output_offset
+ off
;
11515 BFD_ASSERT (local_got_offsets
!= NULL
11516 && local_got_offsets
[r_symndx
] != (bfd_vma
) -1);
11518 off
= local_got_offsets
[r_symndx
];
11520 /* The offset must always be a multiple of 4. We use the
11521 least significant bit to record whether we have already
11522 generated the necessary reloc. */
11523 if ((off
& 1) != 0)
11527 Elf_Internal_Rela outrel
;
11530 if (dynreloc_st_type
== STT_GNU_IFUNC
)
11531 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
11532 else if (bfd_link_pic (info
))
11533 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
11537 if (globals
->fdpic_p
)
11541 /* The GOT entry is initialized to zero by default.
11542 See if we should install a different value. */
11543 if (globals
->use_rel
|| outrel
.r_info
== 0)
11544 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ off
);
11547 arm_elf_add_rofixup (output_bfd
,
11549 sgot
->output_section
->vma
11550 + sgot
->output_offset
+ off
);
11552 else if (outrel
.r_info
!= 0)
11554 outrel
.r_addend
= addend
+ dynreloc_value
;
11555 outrel
.r_offset
= (sgot
->output_section
->vma
11556 + sgot
->output_offset
11558 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11561 local_got_offsets
[r_symndx
] |= 1;
11564 value
= sgot
->output_offset
+ off
;
11566 if (r_type
!= R_ARM_GOT32
)
11567 value
+= sgot
->output_section
->vma
;
11569 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11570 contents
, rel
->r_offset
, value
,
11573 case R_ARM_TLS_LDO32
:
11574 value
= value
- dtpoff_base (info
);
11576 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11577 contents
, rel
->r_offset
, value
,
11580 case R_ARM_TLS_LDM32
:
11581 case R_ARM_TLS_LDM32_FDPIC
:
11588 off
= globals
->tls_ldm_got
.offset
;
11590 if ((off
& 1) != 0)
11594 /* If we don't know the module number, create a relocation
11596 if (bfd_link_dll (info
))
11598 Elf_Internal_Rela outrel
;
11600 if (srelgot
== NULL
)
11603 outrel
.r_addend
= 0;
11604 outrel
.r_offset
= (sgot
->output_section
->vma
11605 + sgot
->output_offset
+ off
);
11606 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32
);
11608 if (globals
->use_rel
)
11609 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11610 sgot
->contents
+ off
);
11612 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11615 bfd_put_32 (output_bfd
, 1, sgot
->contents
+ off
);
11617 globals
->tls_ldm_got
.offset
|= 1;
11620 if (r_type
== R_ARM_TLS_LDM32_FDPIC
)
11622 bfd_put_32 (output_bfd
,
11623 globals
->root
.sgot
->output_offset
+ off
,
11624 contents
+ rel
->r_offset
);
11626 return bfd_reloc_ok
;
11630 value
= sgot
->output_section
->vma
+ sgot
->output_offset
+ off
11631 - (input_section
->output_section
->vma
11632 + input_section
->output_offset
+ rel
->r_offset
);
11634 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11635 contents
, rel
->r_offset
, value
,
11640 case R_ARM_TLS_CALL
:
11641 case R_ARM_THM_TLS_CALL
:
11642 case R_ARM_TLS_GD32
:
11643 case R_ARM_TLS_GD32_FDPIC
:
11644 case R_ARM_TLS_IE32
:
11645 case R_ARM_TLS_IE32_FDPIC
:
11646 case R_ARM_TLS_GOTDESC
:
11647 case R_ARM_TLS_DESCSEQ
:
11648 case R_ARM_THM_TLS_DESCSEQ
:
11650 bfd_vma off
, offplt
;
11654 BFD_ASSERT (sgot
!= NULL
);
11659 dyn
= globals
->root
.dynamic_sections_created
;
11660 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
11661 bfd_link_pic (info
),
11663 && (!bfd_link_pic (info
)
11664 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
11666 *unresolved_reloc_p
= false;
11669 off
= h
->got
.offset
;
11670 offplt
= elf32_arm_hash_entry (h
)->tlsdesc_got
;
11671 tls_type
= ((struct elf32_arm_link_hash_entry
*) h
)->tls_type
;
11675 BFD_ASSERT (local_got_offsets
!= NULL
);
11677 if (r_symndx
>= elf32_arm_num_entries (input_bfd
))
11679 _bfd_error_handler (_("\
11680 %pB: expected symbol index in range 0..%lu but found local symbol with index %lu"),
11682 (unsigned long) elf32_arm_num_entries (input_bfd
),
11686 off
= local_got_offsets
[r_symndx
];
11687 offplt
= local_tlsdesc_gotents
[r_symndx
];
11688 tls_type
= elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
];
11691 /* Linker relaxations happens from one of the
11692 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11693 if (ELF32_R_TYPE (rel
->r_info
) != r_type
)
11694 tls_type
= GOT_TLS_IE
;
11696 BFD_ASSERT (tls_type
!= GOT_UNKNOWN
);
11698 if ((off
& 1) != 0)
11702 bool need_relocs
= false;
11703 Elf_Internal_Rela outrel
;
11706 /* The GOT entries have not been initialized yet. Do it
11707 now, and emit any relocations. If both an IE GOT and a
11708 GD GOT are necessary, we emit the GD first. */
11710 if ((bfd_link_dll (info
) || indx
!= 0)
11712 || (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
11713 && !resolved_to_zero
)
11714 || h
->root
.type
!= bfd_link_hash_undefweak
))
11716 need_relocs
= true;
11717 BFD_ASSERT (srelgot
!= NULL
);
11720 if (tls_type
& GOT_TLS_GDESC
)
11724 /* We should have relaxed, unless this is an undefined
11726 BFD_ASSERT ((h
&& (h
->root
.type
== bfd_link_hash_undefweak
))
11727 || bfd_link_dll (info
));
11728 BFD_ASSERT (globals
->sgotplt_jump_table_size
+ offplt
+ 8
11729 <= globals
->root
.sgotplt
->size
);
11731 outrel
.r_addend
= 0;
11732 outrel
.r_offset
= (globals
->root
.sgotplt
->output_section
->vma
11733 + globals
->root
.sgotplt
->output_offset
11735 + globals
->sgotplt_jump_table_size
);
11737 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DESC
);
11738 sreloc
= globals
->root
.srelplt
;
11739 loc
= sreloc
->contents
;
11740 loc
+= globals
->next_tls_desc_index
++ * RELOC_SIZE (globals
);
11741 BFD_ASSERT (loc
+ RELOC_SIZE (globals
)
11742 <= sreloc
->contents
+ sreloc
->size
);
11744 SWAP_RELOC_OUT (globals
) (output_bfd
, &outrel
, loc
);
11746 /* For globals, the first word in the relocation gets
11747 the relocation index and the top bit set, or zero,
11748 if we're binding now. For locals, it gets the
11749 symbol's offset in the tls section. */
11750 bfd_put_32 (output_bfd
,
11751 !h
? value
- elf_hash_table (info
)->tls_sec
->vma
11752 : info
->flags
& DF_BIND_NOW
? 0
11753 : 0x80000000 | ELF32_R_SYM (outrel
.r_info
),
11754 globals
->root
.sgotplt
->contents
+ offplt
11755 + globals
->sgotplt_jump_table_size
);
11757 /* Second word in the relocation is always zero. */
11758 bfd_put_32 (output_bfd
, 0,
11759 globals
->root
.sgotplt
->contents
+ offplt
11760 + globals
->sgotplt_jump_table_size
+ 4);
11762 if (tls_type
& GOT_TLS_GD
)
11766 outrel
.r_addend
= 0;
11767 outrel
.r_offset
= (sgot
->output_section
->vma
11768 + sgot
->output_offset
11770 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DTPMOD32
);
11772 if (globals
->use_rel
)
11773 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11774 sgot
->contents
+ cur_off
);
11776 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11779 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
11780 sgot
->contents
+ cur_off
+ 4);
11783 outrel
.r_addend
= 0;
11784 outrel
.r_info
= ELF32_R_INFO (indx
,
11785 R_ARM_TLS_DTPOFF32
);
11786 outrel
.r_offset
+= 4;
11788 if (globals
->use_rel
)
11789 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11790 sgot
->contents
+ cur_off
+ 4);
11792 elf32_arm_add_dynreloc (output_bfd
, info
,
11798 /* If we are not emitting relocations for a
11799 general dynamic reference, then we must be in a
11800 static link or an executable link with the
11801 symbol binding locally. Mark it as belonging
11802 to module 1, the executable. */
11803 bfd_put_32 (output_bfd
, 1,
11804 sgot
->contents
+ cur_off
);
11805 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
11806 sgot
->contents
+ cur_off
+ 4);
11812 if (tls_type
& GOT_TLS_IE
)
11817 outrel
.r_addend
= value
- dtpoff_base (info
);
11819 outrel
.r_addend
= 0;
11820 outrel
.r_offset
= (sgot
->output_section
->vma
11821 + sgot
->output_offset
11823 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_TPOFF32
);
11825 if (globals
->use_rel
)
11826 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11827 sgot
->contents
+ cur_off
);
11829 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11832 bfd_put_32 (output_bfd
, tpoff (info
, value
),
11833 sgot
->contents
+ cur_off
);
11838 h
->got
.offset
|= 1;
11840 local_got_offsets
[r_symndx
] |= 1;
11843 if ((tls_type
& GOT_TLS_GD
) && r_type
!= R_ARM_TLS_GD32
&& r_type
!= R_ARM_TLS_GD32_FDPIC
)
11845 else if (tls_type
& GOT_TLS_GDESC
)
11848 if (ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
11849 || ELF32_R_TYPE (rel
->r_info
) == R_ARM_THM_TLS_CALL
)
11851 bfd_signed_vma offset
;
11852 /* TLS stubs are arm mode. The original symbol is a
11853 data object, so branch_type is bogus. */
11854 branch_type
= ST_BRANCH_TO_ARM
;
11855 enum elf32_arm_stub_type stub_type
11856 = arm_type_of_stub (info
, input_section
, rel
,
11857 st_type
, &branch_type
,
11858 (struct elf32_arm_link_hash_entry
*)h
,
11859 globals
->tls_trampoline
, globals
->root
.splt
,
11860 input_bfd
, sym_name
);
11862 if (stub_type
!= arm_stub_none
)
11864 struct elf32_arm_stub_hash_entry
*stub_entry
11865 = elf32_arm_get_stub_entry
11866 (input_section
, globals
->root
.splt
, 0, rel
,
11867 globals
, stub_type
);
11868 offset
= (stub_entry
->stub_offset
11869 + stub_entry
->stub_sec
->output_offset
11870 + stub_entry
->stub_sec
->output_section
->vma
);
11873 offset
= (globals
->root
.splt
->output_section
->vma
11874 + globals
->root
.splt
->output_offset
11875 + globals
->tls_trampoline
);
11877 if (ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
)
11879 unsigned long inst
;
11881 offset
-= (input_section
->output_section
->vma
11882 + input_section
->output_offset
11883 + rel
->r_offset
+ 8);
11885 inst
= offset
>> 2;
11886 inst
&= 0x00ffffff;
11887 value
= inst
| (globals
->use_blx
? 0xfa000000 : 0xeb000000);
11891 /* Thumb blx encodes the offset in a complicated
11893 unsigned upper_insn
, lower_insn
;
11896 offset
-= (input_section
->output_section
->vma
11897 + input_section
->output_offset
11898 + rel
->r_offset
+ 4);
11900 if (stub_type
!= arm_stub_none
11901 && arm_stub_is_thumb (stub_type
))
11903 lower_insn
= 0xd000;
11907 lower_insn
= 0xc000;
11908 /* Round up the offset to a word boundary. */
11909 offset
= (offset
+ 2) & ~2;
11913 upper_insn
= (0xf000
11914 | ((offset
>> 12) & 0x3ff)
11916 lower_insn
|= (((!((offset
>> 23) & 1)) ^ neg
) << 13)
11917 | (((!((offset
>> 22) & 1)) ^ neg
) << 11)
11918 | ((offset
>> 1) & 0x7ff);
11919 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11920 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11921 return bfd_reloc_ok
;
11924 /* These relocations needs special care, as besides the fact
11925 they point somewhere in .gotplt, the addend must be
11926 adjusted accordingly depending on the type of instruction
11928 else if ((r_type
== R_ARM_TLS_GOTDESC
) && (tls_type
& GOT_TLS_GDESC
))
11930 unsigned long data
, insn
;
11933 data
= bfd_get_signed_32 (input_bfd
, hit_data
);
11939 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
- data
);
11940 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
11941 insn
= (insn
<< 16)
11942 | bfd_get_16 (input_bfd
,
11943 contents
+ rel
->r_offset
- data
+ 2);
11944 if ((insn
& 0xf800c000) == 0xf000c000)
11947 else if ((insn
& 0xffffff00) == 0x4400)
11953 /* xgettext:c-format */
11954 (_("%pB(%pA+%#" PRIx64
"): "
11955 "unexpected %s instruction '%#lx' "
11956 "referenced by TLS_GOTDESC"),
11957 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
11959 return bfd_reloc_notsupported
;
11964 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
- data
);
11966 switch (insn
>> 24)
11968 case 0xeb: /* bl */
11969 case 0xfa: /* blx */
11973 case 0xe0: /* add */
11979 /* xgettext:c-format */
11980 (_("%pB(%pA+%#" PRIx64
"): "
11981 "unexpected %s instruction '%#lx' "
11982 "referenced by TLS_GOTDESC"),
11983 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
11985 return bfd_reloc_notsupported
;
11989 value
+= ((globals
->root
.sgotplt
->output_section
->vma
11990 + globals
->root
.sgotplt
->output_offset
+ off
)
11991 - (input_section
->output_section
->vma
11992 + input_section
->output_offset
11994 + globals
->sgotplt_jump_table_size
);
11997 value
= ((globals
->root
.sgot
->output_section
->vma
11998 + globals
->root
.sgot
->output_offset
+ off
)
11999 - (input_section
->output_section
->vma
12000 + input_section
->output_offset
+ rel
->r_offset
));
12002 if (globals
->fdpic_p
&& (r_type
== R_ARM_TLS_GD32_FDPIC
||
12003 r_type
== R_ARM_TLS_IE32_FDPIC
))
12005 /* For FDPIC relocations, resolve to the offset of the GOT
12006 entry from the start of GOT. */
12007 bfd_put_32 (output_bfd
,
12008 globals
->root
.sgot
->output_offset
+ off
,
12009 contents
+ rel
->r_offset
);
12011 return bfd_reloc_ok
;
12015 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
12016 contents
, rel
->r_offset
, value
,
12021 case R_ARM_TLS_LE32
:
12022 if (bfd_link_dll (info
))
12025 /* xgettext:c-format */
12026 (_("%pB(%pA+%#" PRIx64
"): %s relocation not permitted "
12027 "in shared object"),
12028 input_bfd
, input_section
, (uint64_t) rel
->r_offset
, howto
->name
);
12029 return bfd_reloc_notsupported
;
12032 value
= tpoff (info
, value
);
12034 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
12035 contents
, rel
->r_offset
, value
,
12039 if (globals
->fix_v4bx
)
12041 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12043 /* Ensure that we have a BX instruction. */
12044 BFD_ASSERT ((insn
& 0x0ffffff0) == 0x012fff10);
12046 if (globals
->fix_v4bx
== 2 && (insn
& 0xf) != 0xf)
12048 /* Branch to veneer. */
12050 glue_addr
= elf32_arm_bx_glue (info
, insn
& 0xf);
12051 glue_addr
-= input_section
->output_section
->vma
12052 + input_section
->output_offset
12053 + rel
->r_offset
+ 8;
12054 insn
= (insn
& 0xf0000000) | 0x0a000000
12055 | ((glue_addr
>> 2) & 0x00ffffff);
12059 /* Preserve Rm (lowest four bits) and the condition code
12060 (highest four bits). Other bits encode MOV PC,Rm. */
12061 insn
= (insn
& 0xf000000f) | 0x01a0f000;
12064 bfd_put_32 (input_bfd
, insn
, hit_data
);
12066 return bfd_reloc_ok
;
12068 case R_ARM_MOVW_ABS_NC
:
12069 case R_ARM_MOVT_ABS
:
12070 case R_ARM_MOVW_PREL_NC
:
12071 case R_ARM_MOVT_PREL
:
12072 /* Until we properly support segment-base-relative addressing then
12073 we assume the segment base to be zero, as for the group relocations.
12074 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12075 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12076 case R_ARM_MOVW_BREL_NC
:
12077 case R_ARM_MOVW_BREL
:
12078 case R_ARM_MOVT_BREL
:
12080 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12082 if (globals
->use_rel
)
12084 addend
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
12085 signed_addend
= (addend
^ 0x8000) - 0x8000;
12088 value
+= signed_addend
;
12090 if (r_type
== R_ARM_MOVW_PREL_NC
|| r_type
== R_ARM_MOVT_PREL
)
12091 value
-= (input_section
->output_section
->vma
12092 + input_section
->output_offset
+ rel
->r_offset
);
12094 if (r_type
== R_ARM_MOVW_BREL
&& value
>= 0x10000)
12095 return bfd_reloc_overflow
;
12097 if (branch_type
== ST_BRANCH_TO_THUMB
)
12100 if (r_type
== R_ARM_MOVT_ABS
|| r_type
== R_ARM_MOVT_PREL
12101 || r_type
== R_ARM_MOVT_BREL
)
12104 insn
&= 0xfff0f000;
12105 insn
|= value
& 0xfff;
12106 insn
|= (value
& 0xf000) << 4;
12107 bfd_put_32 (input_bfd
, insn
, hit_data
);
12109 return bfd_reloc_ok
;
12111 case R_ARM_THM_MOVW_ABS_NC
:
12112 case R_ARM_THM_MOVT_ABS
:
12113 case R_ARM_THM_MOVW_PREL_NC
:
12114 case R_ARM_THM_MOVT_PREL
:
12115 /* Until we properly support segment-base-relative addressing then
12116 we assume the segment base to be zero, as for the above relocations.
12117 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12118 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12119 as R_ARM_THM_MOVT_ABS. */
12120 case R_ARM_THM_MOVW_BREL_NC
:
12121 case R_ARM_THM_MOVW_BREL
:
12122 case R_ARM_THM_MOVT_BREL
:
12126 insn
= bfd_get_16 (input_bfd
, hit_data
) << 16;
12127 insn
|= bfd_get_16 (input_bfd
, hit_data
+ 2);
12129 if (globals
->use_rel
)
12131 addend
= ((insn
>> 4) & 0xf000)
12132 | ((insn
>> 15) & 0x0800)
12133 | ((insn
>> 4) & 0x0700)
12135 signed_addend
= (addend
^ 0x8000) - 0x8000;
12138 value
+= signed_addend
;
12140 if (r_type
== R_ARM_THM_MOVW_PREL_NC
|| r_type
== R_ARM_THM_MOVT_PREL
)
12141 value
-= (input_section
->output_section
->vma
12142 + input_section
->output_offset
+ rel
->r_offset
);
12144 if (r_type
== R_ARM_THM_MOVW_BREL
&& value
>= 0x10000)
12145 return bfd_reloc_overflow
;
12147 if (branch_type
== ST_BRANCH_TO_THUMB
)
12150 if (r_type
== R_ARM_THM_MOVT_ABS
|| r_type
== R_ARM_THM_MOVT_PREL
12151 || r_type
== R_ARM_THM_MOVT_BREL
)
12154 insn
&= 0xfbf08f00;
12155 insn
|= (value
& 0xf000) << 4;
12156 insn
|= (value
& 0x0800) << 15;
12157 insn
|= (value
& 0x0700) << 4;
12158 insn
|= (value
& 0x00ff);
12160 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
12161 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
12163 return bfd_reloc_ok
;
12165 case R_ARM_ALU_PC_G0_NC
:
12166 case R_ARM_ALU_PC_G1_NC
:
12167 case R_ARM_ALU_PC_G0
:
12168 case R_ARM_ALU_PC_G1
:
12169 case R_ARM_ALU_PC_G2
:
12170 case R_ARM_ALU_SB_G0_NC
:
12171 case R_ARM_ALU_SB_G1_NC
:
12172 case R_ARM_ALU_SB_G0
:
12173 case R_ARM_ALU_SB_G1
:
12174 case R_ARM_ALU_SB_G2
:
12176 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12177 bfd_vma pc
= input_section
->output_section
->vma
12178 + input_section
->output_offset
+ rel
->r_offset
;
12179 /* sb is the origin of the *segment* containing the symbol. */
12180 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12183 bfd_signed_vma signed_value
;
12186 /* Determine which group of bits to select. */
12189 case R_ARM_ALU_PC_G0_NC
:
12190 case R_ARM_ALU_PC_G0
:
12191 case R_ARM_ALU_SB_G0_NC
:
12192 case R_ARM_ALU_SB_G0
:
12196 case R_ARM_ALU_PC_G1_NC
:
12197 case R_ARM_ALU_PC_G1
:
12198 case R_ARM_ALU_SB_G1_NC
:
12199 case R_ARM_ALU_SB_G1
:
12203 case R_ARM_ALU_PC_G2
:
12204 case R_ARM_ALU_SB_G2
:
12212 /* If REL, extract the addend from the insn. If RELA, it will
12213 have already been fetched for us. */
12214 if (globals
->use_rel
)
12217 bfd_vma constant
= insn
& 0xff;
12218 bfd_vma rotation
= (insn
& 0xf00) >> 8;
12221 signed_addend
= constant
;
12224 /* Compensate for the fact that in the instruction, the
12225 rotation is stored in multiples of 2 bits. */
12228 /* Rotate "constant" right by "rotation" bits. */
12229 signed_addend
= (constant
>> rotation
) |
12230 (constant
<< (8 * sizeof (bfd_vma
) - rotation
));
12233 /* Determine if the instruction is an ADD or a SUB.
12234 (For REL, this determines the sign of the addend.) */
12235 negative
= identify_add_or_sub (insn
);
12239 /* xgettext:c-format */
12240 (_("%pB(%pA+%#" PRIx64
"): only ADD or SUB instructions "
12241 "are allowed for ALU group relocations"),
12242 input_bfd
, input_section
, (uint64_t) rel
->r_offset
);
12243 return bfd_reloc_overflow
;
12246 signed_addend
*= negative
;
12249 /* Compute the value (X) to go in the place. */
12250 if (r_type
== R_ARM_ALU_PC_G0_NC
12251 || r_type
== R_ARM_ALU_PC_G1_NC
12252 || r_type
== R_ARM_ALU_PC_G0
12253 || r_type
== R_ARM_ALU_PC_G1
12254 || r_type
== R_ARM_ALU_PC_G2
)
12256 signed_value
= value
- pc
+ signed_addend
;
12258 /* Section base relative. */
12259 signed_value
= value
- sb
+ signed_addend
;
12261 /* If the target symbol is a Thumb function, then set the
12262 Thumb bit in the address. */
12263 if (branch_type
== ST_BRANCH_TO_THUMB
)
12266 /* Calculate the value of the relevant G_n, in encoded
12267 constant-with-rotation format. */
12268 g_n
= calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12271 /* Check for overflow if required. */
12272 if ((r_type
== R_ARM_ALU_PC_G0
12273 || r_type
== R_ARM_ALU_PC_G1
12274 || r_type
== R_ARM_ALU_PC_G2
12275 || r_type
== R_ARM_ALU_SB_G0
12276 || r_type
== R_ARM_ALU_SB_G1
12277 || r_type
== R_ARM_ALU_SB_G2
) && residual
!= 0)
12280 /* xgettext:c-format */
12281 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12282 "splitting %#" PRIx64
" for group relocation %s"),
12283 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12284 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12286 return bfd_reloc_overflow
;
12289 /* Mask out the value and the ADD/SUB part of the opcode; take care
12290 not to destroy the S bit. */
12291 insn
&= 0xff1ff000;
12293 /* Set the opcode according to whether the value to go in the
12294 place is negative. */
12295 if (signed_value
< 0)
12300 /* Encode the offset. */
12303 bfd_put_32 (input_bfd
, insn
, hit_data
);
12305 return bfd_reloc_ok
;
12307 case R_ARM_LDR_PC_G0
:
12308 case R_ARM_LDR_PC_G1
:
12309 case R_ARM_LDR_PC_G2
:
12310 case R_ARM_LDR_SB_G0
:
12311 case R_ARM_LDR_SB_G1
:
12312 case R_ARM_LDR_SB_G2
:
12314 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12315 bfd_vma pc
= input_section
->output_section
->vma
12316 + input_section
->output_offset
+ rel
->r_offset
;
12317 /* sb is the origin of the *segment* containing the symbol. */
12318 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12320 bfd_signed_vma signed_value
;
12323 /* Determine which groups of bits to calculate. */
12326 case R_ARM_LDR_PC_G0
:
12327 case R_ARM_LDR_SB_G0
:
12331 case R_ARM_LDR_PC_G1
:
12332 case R_ARM_LDR_SB_G1
:
12336 case R_ARM_LDR_PC_G2
:
12337 case R_ARM_LDR_SB_G2
:
12345 /* If REL, extract the addend from the insn. If RELA, it will
12346 have already been fetched for us. */
12347 if (globals
->use_rel
)
12349 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12350 signed_addend
= negative
* (insn
& 0xfff);
12353 /* Compute the value (X) to go in the place. */
12354 if (r_type
== R_ARM_LDR_PC_G0
12355 || r_type
== R_ARM_LDR_PC_G1
12356 || r_type
== R_ARM_LDR_PC_G2
)
12358 signed_value
= value
- pc
+ signed_addend
;
12360 /* Section base relative. */
12361 signed_value
= value
- sb
+ signed_addend
;
12363 /* Calculate the value of the relevant G_{n-1} to obtain
12364 the residual at that stage. */
12365 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12366 group
- 1, &residual
);
12368 /* Check for overflow. */
12369 if (residual
>= 0x1000)
12372 /* xgettext:c-format */
12373 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12374 "splitting %#" PRIx64
" for group relocation %s"),
12375 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12376 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12378 return bfd_reloc_overflow
;
12381 /* Mask out the value and U bit. */
12382 insn
&= 0xff7ff000;
12384 /* Set the U bit if the value to go in the place is non-negative. */
12385 if (signed_value
>= 0)
12388 /* Encode the offset. */
12391 bfd_put_32 (input_bfd
, insn
, hit_data
);
12393 return bfd_reloc_ok
;
12395 case R_ARM_LDRS_PC_G0
:
12396 case R_ARM_LDRS_PC_G1
:
12397 case R_ARM_LDRS_PC_G2
:
12398 case R_ARM_LDRS_SB_G0
:
12399 case R_ARM_LDRS_SB_G1
:
12400 case R_ARM_LDRS_SB_G2
:
12402 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12403 bfd_vma pc
= input_section
->output_section
->vma
12404 + input_section
->output_offset
+ rel
->r_offset
;
12405 /* sb is the origin of the *segment* containing the symbol. */
12406 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12408 bfd_signed_vma signed_value
;
12411 /* Determine which groups of bits to calculate. */
12414 case R_ARM_LDRS_PC_G0
:
12415 case R_ARM_LDRS_SB_G0
:
12419 case R_ARM_LDRS_PC_G1
:
12420 case R_ARM_LDRS_SB_G1
:
12424 case R_ARM_LDRS_PC_G2
:
12425 case R_ARM_LDRS_SB_G2
:
12433 /* If REL, extract the addend from the insn. If RELA, it will
12434 have already been fetched for us. */
12435 if (globals
->use_rel
)
12437 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12438 signed_addend
= negative
* (((insn
& 0xf00) >> 4) + (insn
& 0xf));
12441 /* Compute the value (X) to go in the place. */
12442 if (r_type
== R_ARM_LDRS_PC_G0
12443 || r_type
== R_ARM_LDRS_PC_G1
12444 || r_type
== R_ARM_LDRS_PC_G2
)
12446 signed_value
= value
- pc
+ signed_addend
;
12448 /* Section base relative. */
12449 signed_value
= value
- sb
+ signed_addend
;
12451 /* Calculate the value of the relevant G_{n-1} to obtain
12452 the residual at that stage. */
12453 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12454 group
- 1, &residual
);
12456 /* Check for overflow. */
12457 if (residual
>= 0x100)
12460 /* xgettext:c-format */
12461 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12462 "splitting %#" PRIx64
" for group relocation %s"),
12463 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12464 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12466 return bfd_reloc_overflow
;
12469 /* Mask out the value and U bit. */
12470 insn
&= 0xff7ff0f0;
12472 /* Set the U bit if the value to go in the place is non-negative. */
12473 if (signed_value
>= 0)
12476 /* Encode the offset. */
12477 insn
|= ((residual
& 0xf0) << 4) | (residual
& 0xf);
12479 bfd_put_32 (input_bfd
, insn
, hit_data
);
12481 return bfd_reloc_ok
;
12483 case R_ARM_LDC_PC_G0
:
12484 case R_ARM_LDC_PC_G1
:
12485 case R_ARM_LDC_PC_G2
:
12486 case R_ARM_LDC_SB_G0
:
12487 case R_ARM_LDC_SB_G1
:
12488 case R_ARM_LDC_SB_G2
:
12490 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12491 bfd_vma pc
= input_section
->output_section
->vma
12492 + input_section
->output_offset
+ rel
->r_offset
;
12493 /* sb is the origin of the *segment* containing the symbol. */
12494 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12496 bfd_signed_vma signed_value
;
12499 /* Determine which groups of bits to calculate. */
12502 case R_ARM_LDC_PC_G0
:
12503 case R_ARM_LDC_SB_G0
:
12507 case R_ARM_LDC_PC_G1
:
12508 case R_ARM_LDC_SB_G1
:
12512 case R_ARM_LDC_PC_G2
:
12513 case R_ARM_LDC_SB_G2
:
12521 /* If REL, extract the addend from the insn. If RELA, it will
12522 have already been fetched for us. */
12523 if (globals
->use_rel
)
12525 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12526 signed_addend
= negative
* ((insn
& 0xff) << 2);
12529 /* Compute the value (X) to go in the place. */
12530 if (r_type
== R_ARM_LDC_PC_G0
12531 || r_type
== R_ARM_LDC_PC_G1
12532 || r_type
== R_ARM_LDC_PC_G2
)
12534 signed_value
= value
- pc
+ signed_addend
;
12536 /* Section base relative. */
12537 signed_value
= value
- sb
+ signed_addend
;
12539 /* Calculate the value of the relevant G_{n-1} to obtain
12540 the residual at that stage. */
12541 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12542 group
- 1, &residual
);
12544 /* Check for overflow. (The absolute value to go in the place must be
12545 divisible by four and, after having been divided by four, must
12546 fit in eight bits.) */
12547 if ((residual
& 0x3) != 0 || residual
>= 0x400)
12550 /* xgettext:c-format */
12551 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12552 "splitting %#" PRIx64
" for group relocation %s"),
12553 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12554 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12556 return bfd_reloc_overflow
;
12559 /* Mask out the value and U bit. */
12560 insn
&= 0xff7fff00;
12562 /* Set the U bit if the value to go in the place is non-negative. */
12563 if (signed_value
>= 0)
12566 /* Encode the offset. */
12567 insn
|= residual
>> 2;
12569 bfd_put_32 (input_bfd
, insn
, hit_data
);
12571 return bfd_reloc_ok
;
12573 case R_ARM_THM_ALU_ABS_G0_NC
:
12574 case R_ARM_THM_ALU_ABS_G1_NC
:
12575 case R_ARM_THM_ALU_ABS_G2_NC
:
12576 case R_ARM_THM_ALU_ABS_G3_NC
:
12578 const int shift_array
[4] = {0, 8, 16, 24};
12579 bfd_vma insn
= bfd_get_16 (input_bfd
, hit_data
);
12580 bfd_vma addr
= value
;
12581 int shift
= shift_array
[r_type
- R_ARM_THM_ALU_ABS_G0_NC
];
12583 /* Compute address. */
12584 if (globals
->use_rel
)
12585 signed_addend
= insn
& 0xff;
12586 addr
+= signed_addend
;
12587 if (branch_type
== ST_BRANCH_TO_THUMB
)
12589 /* Clean imm8 insn. */
12591 /* And update with correct part of address. */
12592 insn
|= (addr
>> shift
) & 0xff;
12594 bfd_put_16 (input_bfd
, insn
, hit_data
);
12597 *unresolved_reloc_p
= false;
12598 return bfd_reloc_ok
;
12600 case R_ARM_GOTOFFFUNCDESC
:
12604 struct fdpic_local
*local_fdpic_cnts
= elf32_arm_local_fdpic_cnts (input_bfd
);
12605 int dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12607 if (r_symndx
>= elf32_arm_num_entries (input_bfd
))
12609 * error_message
= _("local symbol index too big");
12610 return bfd_reloc_dangerous
;
12613 int offset
= local_fdpic_cnts
[r_symndx
].funcdesc_offset
& ~1;
12614 bfd_vma addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12617 if (bfd_link_pic (info
) && dynindx
== 0)
12619 * error_message
= _("no dynamic index information available");
12620 return bfd_reloc_dangerous
;
12623 /* Resolve relocation. */
12624 bfd_put_32 (output_bfd
, (offset
+ sgot
->output_offset
)
12625 , contents
+ rel
->r_offset
);
12626 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12628 arm_elf_fill_funcdesc (output_bfd
, info
,
12629 &local_fdpic_cnts
[r_symndx
].funcdesc_offset
,
12630 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12635 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12639 /* For static binaries, sym_sec can be null. */
12642 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12643 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12651 if (bfd_link_pic (info
) && dynindx
== 0)
12653 * error_message
= _("no dynamic index information available");
12654 return bfd_reloc_dangerous
;
12657 /* This case cannot occur since funcdesc is allocated by
12658 the dynamic loader so we cannot resolve the relocation. */
12659 if (h
->dynindx
!= -1)
12661 * error_message
= _("invalid dynamic index");
12662 return bfd_reloc_dangerous
;
12665 /* Resolve relocation. */
12666 bfd_put_32 (output_bfd
, (offset
+ sgot
->output_offset
),
12667 contents
+ rel
->r_offset
);
12668 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12669 arm_elf_fill_funcdesc (output_bfd
, info
,
12670 &eh
->fdpic_cnts
.funcdesc_offset
,
12671 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12674 *unresolved_reloc_p
= false;
12675 return bfd_reloc_ok
;
12677 case R_ARM_GOTFUNCDESC
:
12681 Elf_Internal_Rela outrel
;
12683 /* Resolve relocation. */
12684 bfd_put_32 (output_bfd
, ((eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1)
12685 + sgot
->output_offset
),
12686 contents
+ rel
->r_offset
);
12687 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12688 if (h
->dynindx
== -1)
12691 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12695 /* For static binaries sym_sec can be null. */
12698 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12699 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12707 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12708 arm_elf_fill_funcdesc (output_bfd
, info
,
12709 &eh
->fdpic_cnts
.funcdesc_offset
,
12710 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12713 /* Add a dynamic relocation on GOT entry if not already done. */
12714 if ((eh
->fdpic_cnts
.gotfuncdesc_offset
& 1) == 0)
12716 if (h
->dynindx
== -1)
12718 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12719 if (h
->root
.type
== bfd_link_hash_undefweak
)
12720 bfd_put_32 (output_bfd
, 0, sgot
->contents
12721 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1));
12723 bfd_put_32 (output_bfd
, sgot
->output_section
->vma
12724 + sgot
->output_offset
12725 + (eh
->fdpic_cnts
.funcdesc_offset
& ~1),
12727 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1));
12731 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_FUNCDESC
);
12733 outrel
.r_offset
= sgot
->output_section
->vma
12734 + sgot
->output_offset
12735 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1);
12736 outrel
.r_addend
= 0;
12737 if (h
->dynindx
== -1 && !bfd_link_pic (info
))
12738 if (h
->root
.type
== bfd_link_hash_undefweak
)
12739 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
, -1);
12741 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
,
12744 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12745 eh
->fdpic_cnts
.gotfuncdesc_offset
|= 1;
12750 /* Such relocation on static function should not have been
12751 emitted by the compiler. */
12752 return bfd_reloc_notsupported
;
12755 *unresolved_reloc_p
= false;
12756 return bfd_reloc_ok
;
12758 case R_ARM_FUNCDESC
:
12762 struct fdpic_local
*local_fdpic_cnts
= elf32_arm_local_fdpic_cnts (input_bfd
);
12763 Elf_Internal_Rela outrel
;
12764 int dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12766 if (r_symndx
>= elf32_arm_num_entries (input_bfd
))
12768 * error_message
= _("local symbol index too big");
12769 return bfd_reloc_dangerous
;
12772 int offset
= local_fdpic_cnts
[r_symndx
].funcdesc_offset
& ~1;
12773 bfd_vma addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12776 if (bfd_link_pic (info
) && dynindx
== 0)
12778 * error_message
= _("dynamic index information not available");
12779 return bfd_reloc_dangerous
;
12782 /* Replace static FUNCDESC relocation with a
12783 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12785 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12786 outrel
.r_offset
= input_section
->output_section
->vma
12787 + input_section
->output_offset
+ rel
->r_offset
;
12788 outrel
.r_addend
= 0;
12789 if (bfd_link_pic (info
))
12790 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12792 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
, outrel
.r_offset
);
12794 bfd_put_32 (input_bfd
, sgot
->output_section
->vma
12795 + sgot
->output_offset
+ offset
, hit_data
);
12797 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12798 arm_elf_fill_funcdesc (output_bfd
, info
,
12799 &local_fdpic_cnts
[r_symndx
].funcdesc_offset
,
12800 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12804 if (h
->dynindx
== -1)
12807 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12810 Elf_Internal_Rela outrel
;
12812 /* For static binaries sym_sec can be null. */
12815 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12816 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12824 if (bfd_link_pic (info
) && dynindx
== 0)
12827 /* Replace static FUNCDESC relocation with a
12828 R_ARM_RELATIVE dynamic relocation. */
12829 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12830 outrel
.r_offset
= input_section
->output_section
->vma
12831 + input_section
->output_offset
+ rel
->r_offset
;
12832 outrel
.r_addend
= 0;
12833 if (bfd_link_pic (info
))
12834 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12836 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
, outrel
.r_offset
);
12838 bfd_put_32 (input_bfd
, sgot
->output_section
->vma
12839 + sgot
->output_offset
+ offset
, hit_data
);
12841 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12842 arm_elf_fill_funcdesc (output_bfd
, info
,
12843 &eh
->fdpic_cnts
.funcdesc_offset
,
12844 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12848 Elf_Internal_Rela outrel
;
12850 /* Add a dynamic relocation. */
12851 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_FUNCDESC
);
12852 outrel
.r_offset
= input_section
->output_section
->vma
12853 + input_section
->output_offset
+ rel
->r_offset
;
12854 outrel
.r_addend
= 0;
12855 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12859 *unresolved_reloc_p
= false;
12860 return bfd_reloc_ok
;
12862 case R_ARM_THM_BF16
:
12864 bfd_vma relocation
;
12865 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
12866 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
12868 if (globals
->use_rel
)
12870 bfd_vma immA
= (upper_insn
& 0x001f);
12871 bfd_vma immB
= (lower_insn
& 0x07fe) >> 1;
12872 bfd_vma immC
= (lower_insn
& 0x0800) >> 11;
12873 addend
= (immA
<< 12);
12874 addend
|= (immB
<< 2);
12875 addend
|= (immC
<< 1);
12878 signed_addend
= (addend
& 0x10000) ? addend
- (1 << 17) : addend
;
12881 relocation
= value
+ signed_addend
;
12882 relocation
-= (input_section
->output_section
->vma
12883 + input_section
->output_offset
12886 /* Put RELOCATION back into the insn. */
12888 bfd_vma immA
= (relocation
& 0x0001f000) >> 12;
12889 bfd_vma immB
= (relocation
& 0x00000ffc) >> 2;
12890 bfd_vma immC
= (relocation
& 0x00000002) >> 1;
12892 upper_insn
= (upper_insn
& 0xffe0) | immA
;
12893 lower_insn
= (lower_insn
& 0xf001) | (immC
<< 11) | (immB
<< 1);
12896 /* Put the relocated value back in the object file: */
12897 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
12898 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
12900 return bfd_reloc_ok
;
12903 case R_ARM_THM_BF12
:
12905 bfd_vma relocation
;
12906 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
12907 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
12909 if (globals
->use_rel
)
12911 bfd_vma immA
= (upper_insn
& 0x0001);
12912 bfd_vma immB
= (lower_insn
& 0x07fe) >> 1;
12913 bfd_vma immC
= (lower_insn
& 0x0800) >> 11;
12914 addend
= (immA
<< 12);
12915 addend
|= (immB
<< 2);
12916 addend
|= (immC
<< 1);
12919 addend
= (addend
& 0x1000) ? addend
- (1 << 13) : addend
;
12920 signed_addend
= addend
;
12923 relocation
= value
+ signed_addend
;
12924 relocation
-= (input_section
->output_section
->vma
12925 + input_section
->output_offset
12928 /* Put RELOCATION back into the insn. */
12930 bfd_vma immA
= (relocation
& 0x00001000) >> 12;
12931 bfd_vma immB
= (relocation
& 0x00000ffc) >> 2;
12932 bfd_vma immC
= (relocation
& 0x00000002) >> 1;
12934 upper_insn
= (upper_insn
& 0xfffe) | immA
;
12935 lower_insn
= (lower_insn
& 0xf001) | (immC
<< 11) | (immB
<< 1);
12938 /* Put the relocated value back in the object file: */
12939 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
12940 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
12942 return bfd_reloc_ok
;
12945 case R_ARM_THM_BF18
:
12947 bfd_vma relocation
;
12948 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
12949 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
12951 if (globals
->use_rel
)
12953 bfd_vma immA
= (upper_insn
& 0x007f);
12954 bfd_vma immB
= (lower_insn
& 0x07fe) >> 1;
12955 bfd_vma immC
= (lower_insn
& 0x0800) >> 11;
12956 addend
= (immA
<< 12);
12957 addend
|= (immB
<< 2);
12958 addend
|= (immC
<< 1);
12961 addend
= (addend
& 0x40000) ? addend
- (1 << 19) : addend
;
12962 signed_addend
= addend
;
12965 relocation
= value
+ signed_addend
;
12966 relocation
-= (input_section
->output_section
->vma
12967 + input_section
->output_offset
12970 /* Put RELOCATION back into the insn. */
12972 bfd_vma immA
= (relocation
& 0x0007f000) >> 12;
12973 bfd_vma immB
= (relocation
& 0x00000ffc) >> 2;
12974 bfd_vma immC
= (relocation
& 0x00000002) >> 1;
12976 upper_insn
= (upper_insn
& 0xff80) | immA
;
12977 lower_insn
= (lower_insn
& 0xf001) | (immC
<< 11) | (immB
<< 1);
12980 /* Put the relocated value back in the object file: */
12981 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
12982 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
12984 return bfd_reloc_ok
;
12988 return bfd_reloc_notsupported
;
12992 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
12994 arm_add_to_rel (bfd
* abfd
,
12995 bfd_byte
* address
,
12996 reloc_howto_type
* howto
,
12997 bfd_signed_vma increment
)
12999 bfd_signed_vma addend
;
13001 if (howto
->type
== R_ARM_THM_CALL
13002 || howto
->type
== R_ARM_THM_JUMP24
)
13004 int upper_insn
, lower_insn
;
13007 upper_insn
= bfd_get_16 (abfd
, address
);
13008 lower_insn
= bfd_get_16 (abfd
, address
+ 2);
13009 upper
= upper_insn
& 0x7ff;
13010 lower
= lower_insn
& 0x7ff;
13012 addend
= (upper
<< 12) | (lower
<< 1);
13013 addend
+= increment
;
13016 upper_insn
= (upper_insn
& 0xf800) | ((addend
>> 11) & 0x7ff);
13017 lower_insn
= (lower_insn
& 0xf800) | (addend
& 0x7ff);
13019 bfd_put_16 (abfd
, (bfd_vma
) upper_insn
, address
);
13020 bfd_put_16 (abfd
, (bfd_vma
) lower_insn
, address
+ 2);
13026 contents
= bfd_get_32 (abfd
, address
);
13028 /* Get the (signed) value from the instruction. */
13029 addend
= contents
& howto
->src_mask
;
13030 if (addend
& ((howto
->src_mask
+ 1) >> 1))
13032 bfd_signed_vma mask
;
13035 mask
&= ~ howto
->src_mask
;
13039 /* Add in the increment, (which is a byte value). */
13040 switch (howto
->type
)
13043 addend
+= increment
;
13050 addend
<<= howto
->size
;
13051 addend
+= increment
;
13053 /* Should we check for overflow here ? */
13055 /* Drop any undesired bits. */
13056 addend
>>= howto
->rightshift
;
13060 contents
= (contents
& ~ howto
->dst_mask
) | (addend
& howto
->dst_mask
);
13062 bfd_put_32 (abfd
, contents
, address
);
13066 #define IS_ARM_TLS_RELOC(R_TYPE) \
13067 ((R_TYPE) == R_ARM_TLS_GD32 \
13068 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
13069 || (R_TYPE) == R_ARM_TLS_LDO32 \
13070 || (R_TYPE) == R_ARM_TLS_LDM32 \
13071 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
13072 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
13073 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
13074 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
13075 || (R_TYPE) == R_ARM_TLS_LE32 \
13076 || (R_TYPE) == R_ARM_TLS_IE32 \
13077 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
13078 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13080 /* Specific set of relocations for the gnu tls dialect. */
13081 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
13082 ((R_TYPE) == R_ARM_TLS_GOTDESC \
13083 || (R_TYPE) == R_ARM_TLS_CALL \
13084 || (R_TYPE) == R_ARM_THM_TLS_CALL \
13085 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
13086 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13088 /* Relocate an ARM ELF section. */
13091 elf32_arm_relocate_section (bfd
* output_bfd
,
13092 struct bfd_link_info
* info
,
13094 asection
* input_section
,
13095 bfd_byte
* contents
,
13096 Elf_Internal_Rela
* relocs
,
13097 Elf_Internal_Sym
* local_syms
,
13098 asection
** local_sections
)
13100 Elf_Internal_Shdr
*symtab_hdr
;
13101 struct elf_link_hash_entry
**sym_hashes
;
13102 Elf_Internal_Rela
*rel
;
13103 Elf_Internal_Rela
*relend
;
13105 struct elf32_arm_link_hash_table
* globals
;
13107 globals
= elf32_arm_hash_table (info
);
13108 if (globals
== NULL
)
13111 symtab_hdr
= & elf_symtab_hdr (input_bfd
);
13112 sym_hashes
= elf_sym_hashes (input_bfd
);
13115 relend
= relocs
+ input_section
->reloc_count
;
13116 for (; rel
< relend
; rel
++)
13119 reloc_howto_type
* howto
;
13120 unsigned long r_symndx
;
13121 Elf_Internal_Sym
* sym
;
13123 struct elf_link_hash_entry
* h
;
13124 bfd_vma relocation
;
13125 bfd_reloc_status_type r
;
13128 bool unresolved_reloc
= false;
13129 char *error_message
= NULL
;
13131 r_symndx
= ELF32_R_SYM (rel
->r_info
);
13132 r_type
= ELF32_R_TYPE (rel
->r_info
);
13133 r_type
= arm_real_reloc_type (globals
, r_type
);
13135 if ( r_type
== R_ARM_GNU_VTENTRY
13136 || r_type
== R_ARM_GNU_VTINHERIT
)
13139 howto
= bfd_reloc
.howto
= elf32_arm_howto_from_type (r_type
);
13142 return _bfd_unrecognized_reloc (input_bfd
, input_section
, r_type
);
13148 if (r_symndx
< symtab_hdr
->sh_info
)
13150 sym
= local_syms
+ r_symndx
;
13151 sym_type
= ELF32_ST_TYPE (sym
->st_info
);
13152 sec
= local_sections
[r_symndx
];
13154 /* An object file might have a reference to a local
13155 undefined symbol. This is a daft object file, but we
13156 should at least do something about it. V4BX & NONE
13157 relocations do not use the symbol and are explicitly
13158 allowed to use the undefined symbol, so allow those.
13159 Likewise for relocations against STN_UNDEF. */
13160 if (r_type
!= R_ARM_V4BX
13161 && r_type
!= R_ARM_NONE
13162 && r_symndx
!= STN_UNDEF
13163 && bfd_is_und_section (sec
)
13164 && ELF_ST_BIND (sym
->st_info
) != STB_WEAK
)
13165 (*info
->callbacks
->undefined_symbol
)
13166 (info
, bfd_elf_string_from_elf_section
13167 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
),
13168 input_bfd
, input_section
,
13169 rel
->r_offset
, true);
13171 if (globals
->use_rel
)
13173 relocation
= (sec
->output_section
->vma
13174 + sec
->output_offset
13176 if (!bfd_link_relocatable (info
)
13177 && (sec
->flags
& SEC_MERGE
)
13178 && ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
13181 bfd_vma addend
, value
;
13185 case R_ARM_MOVW_ABS_NC
:
13186 case R_ARM_MOVT_ABS
:
13187 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
13188 addend
= ((value
& 0xf0000) >> 4) | (value
& 0xfff);
13189 addend
= (addend
^ 0x8000) - 0x8000;
13192 case R_ARM_THM_MOVW_ABS_NC
:
13193 case R_ARM_THM_MOVT_ABS
:
13194 value
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
)
13196 value
|= bfd_get_16 (input_bfd
,
13197 contents
+ rel
->r_offset
+ 2);
13198 addend
= ((value
& 0xf7000) >> 4) | (value
& 0xff)
13199 | ((value
& 0x04000000) >> 15);
13200 addend
= (addend
^ 0x8000) - 0x8000;
13204 if (howto
->rightshift
13205 || (howto
->src_mask
& (howto
->src_mask
+ 1)))
13208 /* xgettext:c-format */
13209 (_("%pB(%pA+%#" PRIx64
"): "
13210 "%s relocation against SEC_MERGE section"),
13211 input_bfd
, input_section
,
13212 (uint64_t) rel
->r_offset
, howto
->name
);
13216 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
13218 /* Get the (signed) value from the instruction. */
13219 addend
= value
& howto
->src_mask
;
13220 if (addend
& ((howto
->src_mask
+ 1) >> 1))
13222 bfd_signed_vma mask
;
13225 mask
&= ~ howto
->src_mask
;
13233 _bfd_elf_rel_local_sym (output_bfd
, sym
, &msec
, addend
)
13235 addend
+= msec
->output_section
->vma
+ msec
->output_offset
;
13237 /* Cases here must match those in the preceding
13238 switch statement. */
13241 case R_ARM_MOVW_ABS_NC
:
13242 case R_ARM_MOVT_ABS
:
13243 value
= (value
& 0xfff0f000) | ((addend
& 0xf000) << 4)
13244 | (addend
& 0xfff);
13245 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
13248 case R_ARM_THM_MOVW_ABS_NC
:
13249 case R_ARM_THM_MOVT_ABS
:
13250 value
= (value
& 0xfbf08f00) | ((addend
& 0xf700) << 4)
13251 | (addend
& 0xff) | ((addend
& 0x0800) << 15);
13252 bfd_put_16 (input_bfd
, value
>> 16,
13253 contents
+ rel
->r_offset
);
13254 bfd_put_16 (input_bfd
, value
,
13255 contents
+ rel
->r_offset
+ 2);
13259 value
= (value
& ~ howto
->dst_mask
)
13260 | (addend
& howto
->dst_mask
);
13261 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
13267 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
13271 bool warned
, ignored
;
13273 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
13274 r_symndx
, symtab_hdr
, sym_hashes
,
13275 h
, sec
, relocation
,
13276 unresolved_reloc
, warned
, ignored
);
13278 sym_type
= h
->type
;
13281 if (sec
!= NULL
&& discarded_section (sec
))
13282 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
13283 rel
, 1, relend
, howto
, 0, contents
);
13285 if (bfd_link_relocatable (info
))
13287 /* This is a relocatable link. We don't have to change
13288 anything, unless the reloc is against a section symbol,
13289 in which case we have to adjust according to where the
13290 section symbol winds up in the output section. */
13291 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
13293 if (globals
->use_rel
)
13294 arm_add_to_rel (input_bfd
, contents
+ rel
->r_offset
,
13295 howto
, (bfd_signed_vma
) sec
->output_offset
);
13297 rel
->r_addend
+= sec
->output_offset
;
13303 name
= h
->root
.root
.string
;
13306 name
= (bfd_elf_string_from_elf_section
13307 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
));
13308 if (name
== NULL
|| *name
== '\0')
13309 name
= bfd_section_name (sec
);
13312 if (r_symndx
!= STN_UNDEF
13313 && r_type
!= R_ARM_NONE
13315 || h
->root
.type
== bfd_link_hash_defined
13316 || h
->root
.type
== bfd_link_hash_defweak
)
13317 && IS_ARM_TLS_RELOC (r_type
) != (sym_type
== STT_TLS
))
13320 ((sym_type
== STT_TLS
13321 /* xgettext:c-format */
13322 ? _("%pB(%pA+%#" PRIx64
"): %s used with TLS symbol %s")
13323 /* xgettext:c-format */
13324 : _("%pB(%pA+%#" PRIx64
"): %s used with non-TLS symbol %s")),
13327 (uint64_t) rel
->r_offset
,
13332 /* We call elf32_arm_final_link_relocate unless we're completely
13333 done, i.e., the relaxation produced the final output we want,
13334 and we won't let anybody mess with it. Also, we have to do
13335 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13336 both in relaxed and non-relaxed cases. */
13337 if ((elf32_arm_tls_transition (info
, r_type
, h
) != (unsigned)r_type
)
13338 || (IS_ARM_TLS_GNU_RELOC (r_type
)
13339 && !((h
? elf32_arm_hash_entry (h
)->tls_type
:
13340 elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
])
13343 r
= elf32_arm_tls_relax (globals
, input_bfd
, input_section
,
13344 contents
, rel
, h
== NULL
);
13345 /* This may have been marked unresolved because it came from
13346 a shared library. But we've just dealt with that. */
13347 unresolved_reloc
= 0;
13350 r
= bfd_reloc_continue
;
13352 if (r
== bfd_reloc_continue
)
13354 unsigned char branch_type
=
13355 h
? ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
13356 : ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
13358 r
= elf32_arm_final_link_relocate (howto
, input_bfd
, output_bfd
,
13359 input_section
, contents
, rel
,
13360 relocation
, info
, sec
, name
,
13361 sym_type
, branch_type
, h
,
13366 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13367 because such sections are not SEC_ALLOC and thus ld.so will
13368 not process them. */
13369 if (unresolved_reloc
13370 && !((input_section
->flags
& SEC_DEBUGGING
) != 0
13372 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
13373 rel
->r_offset
) != (bfd_vma
) -1)
13376 /* xgettext:c-format */
13377 (_("%pB(%pA+%#" PRIx64
"): "
13378 "unresolvable %s relocation against symbol `%s'"),
13381 (uint64_t) rel
->r_offset
,
13383 h
->root
.root
.string
);
13387 if (r
!= bfd_reloc_ok
)
13391 case bfd_reloc_overflow
:
13392 /* If the overflowing reloc was to an undefined symbol,
13393 we have already printed one error message and there
13394 is no point complaining again. */
13395 if (!h
|| h
->root
.type
!= bfd_link_hash_undefined
)
13396 (*info
->callbacks
->reloc_overflow
)
13397 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
,
13398 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
);
13401 case bfd_reloc_undefined
:
13402 (*info
->callbacks
->undefined_symbol
)
13403 (info
, name
, input_bfd
, input_section
, rel
->r_offset
, true);
13406 case bfd_reloc_outofrange
:
13407 error_message
= _("out of range");
13410 case bfd_reloc_notsupported
:
13411 error_message
= _("unsupported relocation");
13414 case bfd_reloc_dangerous
:
13415 /* error_message should already be set. */
13419 error_message
= _("unknown error");
13420 /* Fall through. */
13423 BFD_ASSERT (error_message
!= NULL
);
13424 (*info
->callbacks
->reloc_dangerous
)
13425 (info
, error_message
, input_bfd
, input_section
, rel
->r_offset
);
13434 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13435 adds the edit to the start of the list. (The list must be built in order of
13436 ascending TINDEX: the function's callers are primarily responsible for
13437 maintaining that condition). */
13440 add_unwind_table_edit (arm_unwind_table_edit
**head
,
13441 arm_unwind_table_edit
**tail
,
13442 arm_unwind_edit_type type
,
13443 asection
*linked_section
,
13444 unsigned int tindex
)
13446 arm_unwind_table_edit
*new_edit
= (arm_unwind_table_edit
*)
13447 xmalloc (sizeof (arm_unwind_table_edit
));
13449 new_edit
->type
= type
;
13450 new_edit
->linked_section
= linked_section
;
13451 new_edit
->index
= tindex
;
13455 new_edit
->next
= NULL
;
13458 (*tail
)->next
= new_edit
;
13460 (*tail
) = new_edit
;
13463 (*head
) = new_edit
;
13467 new_edit
->next
= *head
;
13476 static _arm_elf_section_data
*get_arm_elf_section_data (asection
*);
13478 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13481 adjust_exidx_size (asection
*exidx_sec
, int adjust
)
13485 if (!exidx_sec
->rawsize
)
13486 exidx_sec
->rawsize
= exidx_sec
->size
;
13488 bfd_set_section_size (exidx_sec
, exidx_sec
->size
+ adjust
);
13489 out_sec
= exidx_sec
->output_section
;
13490 /* Adjust size of output section. */
13491 bfd_set_section_size (out_sec
, out_sec
->size
+ adjust
);
13494 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13497 insert_cantunwind_after (asection
*text_sec
, asection
*exidx_sec
)
13499 struct _arm_elf_section_data
*exidx_arm_data
;
13501 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
13502 add_unwind_table_edit
13503 (&exidx_arm_data
->u
.exidx
.unwind_edit_list
,
13504 &exidx_arm_data
->u
.exidx
.unwind_edit_tail
,
13505 INSERT_EXIDX_CANTUNWIND_AT_END
, text_sec
, UINT_MAX
);
13507 exidx_arm_data
->additional_reloc_count
++;
13509 adjust_exidx_size (exidx_sec
, 8);
13512 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13513 made to those tables, such that:
13515 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13516 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13517 codes which have been inlined into the index).
13519 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13521 The edits are applied when the tables are written
13522 (in elf32_arm_write_section). */
13525 elf32_arm_fix_exidx_coverage (asection
**text_section_order
,
13526 unsigned int num_text_sections
,
13527 struct bfd_link_info
*info
,
13528 bool merge_exidx_entries
)
13531 unsigned int last_second_word
= 0, i
;
13532 asection
*last_exidx_sec
= NULL
;
13533 asection
*last_text_sec
= NULL
;
13534 int last_unwind_type
= -1;
13536 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13538 for (inp
= info
->input_bfds
; inp
!= NULL
; inp
= inp
->link
.next
)
13542 for (sec
= inp
->sections
; sec
!= NULL
; sec
= sec
->next
)
13544 struct bfd_elf_section_data
*elf_sec
= elf_section_data (sec
);
13545 Elf_Internal_Shdr
*hdr
= &elf_sec
->this_hdr
;
13547 if (!hdr
|| hdr
->sh_type
!= SHT_ARM_EXIDX
)
13550 if (elf_sec
->linked_to
)
13552 Elf_Internal_Shdr
*linked_hdr
13553 = &elf_section_data (elf_sec
->linked_to
)->this_hdr
;
13554 struct _arm_elf_section_data
*linked_sec_arm_data
13555 = get_arm_elf_section_data (linked_hdr
->bfd_section
);
13557 if (linked_sec_arm_data
== NULL
)
13560 /* Link this .ARM.exidx section back from the text section it
13562 linked_sec_arm_data
->u
.text
.arm_exidx_sec
= sec
;
13567 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13568 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13569 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13571 for (i
= 0; i
< num_text_sections
; i
++)
13573 asection
*sec
= text_section_order
[i
];
13574 asection
*exidx_sec
;
13575 struct _arm_elf_section_data
*arm_data
= get_arm_elf_section_data (sec
);
13576 struct _arm_elf_section_data
*exidx_arm_data
;
13577 bfd_byte
*contents
= NULL
;
13578 int deleted_exidx_bytes
= 0;
13580 arm_unwind_table_edit
*unwind_edit_head
= NULL
;
13581 arm_unwind_table_edit
*unwind_edit_tail
= NULL
;
13582 Elf_Internal_Shdr
*hdr
;
13585 if (arm_data
== NULL
)
13588 exidx_sec
= arm_data
->u
.text
.arm_exidx_sec
;
13589 if (exidx_sec
== NULL
)
13591 /* Section has no unwind data. */
13592 if (last_unwind_type
== 0 || !last_exidx_sec
)
13595 /* Ignore zero sized sections. */
13596 if (sec
->size
== 0)
13599 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
13600 last_unwind_type
= 0;
13604 /* Skip /DISCARD/ sections. */
13605 if (bfd_is_abs_section (exidx_sec
->output_section
))
13608 hdr
= &elf_section_data (exidx_sec
)->this_hdr
;
13609 if (hdr
->sh_type
!= SHT_ARM_EXIDX
)
13612 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
13613 if (exidx_arm_data
== NULL
)
13616 ibfd
= exidx_sec
->owner
;
13618 if (hdr
->contents
!= NULL
)
13619 contents
= hdr
->contents
;
13620 else if (! bfd_malloc_and_get_section (ibfd
, exidx_sec
, &contents
))
13624 if (last_unwind_type
> 0)
13626 unsigned int first_word
= bfd_get_32 (ibfd
, contents
);
13627 /* Add cantunwind if first unwind item does not match section
13629 if (first_word
!= sec
->vma
)
13631 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
13632 last_unwind_type
= 0;
13636 for (j
= 0; j
< hdr
->sh_size
; j
+= 8)
13638 unsigned int second_word
= bfd_get_32 (ibfd
, contents
+ j
+ 4);
13642 /* An EXIDX_CANTUNWIND entry. */
13643 if (second_word
== 1)
13645 if (last_unwind_type
== 0)
13649 /* Inlined unwinding data. Merge if equal to previous. */
13650 else if ((second_word
& 0x80000000) != 0)
13652 if (merge_exidx_entries
13653 && last_second_word
== second_word
&& last_unwind_type
== 1)
13656 last_second_word
= second_word
;
13658 /* Normal table entry. In theory we could merge these too,
13659 but duplicate entries are likely to be much less common. */
13663 if (elide
&& !bfd_link_relocatable (info
))
13665 add_unwind_table_edit (&unwind_edit_head
, &unwind_edit_tail
,
13666 DELETE_EXIDX_ENTRY
, NULL
, j
/ 8);
13668 deleted_exidx_bytes
+= 8;
13671 last_unwind_type
= unwind_type
;
13674 /* Free contents if we allocated it ourselves. */
13675 if (contents
!= hdr
->contents
)
13678 /* Record edits to be applied later (in elf32_arm_write_section). */
13679 exidx_arm_data
->u
.exidx
.unwind_edit_list
= unwind_edit_head
;
13680 exidx_arm_data
->u
.exidx
.unwind_edit_tail
= unwind_edit_tail
;
13682 if (deleted_exidx_bytes
> 0)
13683 adjust_exidx_size (exidx_sec
, - deleted_exidx_bytes
);
13685 last_exidx_sec
= exidx_sec
;
13686 last_text_sec
= sec
;
13689 /* Add terminating CANTUNWIND entry. */
13690 if (!bfd_link_relocatable (info
) && last_exidx_sec
13691 && last_unwind_type
!= 0)
13692 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
13698 elf32_arm_output_glue_section (struct bfd_link_info
*info
, bfd
*obfd
,
13699 bfd
*ibfd
, const char *name
)
13701 asection
*sec
, *osec
;
13703 sec
= bfd_get_linker_section (ibfd
, name
);
13704 if (sec
== NULL
|| (sec
->flags
& SEC_EXCLUDE
) != 0)
13707 osec
= sec
->output_section
;
13708 if (elf32_arm_write_section (obfd
, info
, sec
, sec
->contents
))
13711 if (! bfd_set_section_contents (obfd
, osec
, sec
->contents
,
13712 sec
->output_offset
, sec
->size
))
13719 elf32_arm_final_link (bfd
*abfd
, struct bfd_link_info
*info
)
13721 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
13722 asection
*sec
, *osec
;
13724 if (globals
== NULL
)
13727 /* Invoke the regular ELF backend linker to do all the work. */
13728 if (!bfd_elf_final_link (abfd
, info
))
13731 /* Process stub sections (eg BE8 encoding, ...). */
13732 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
13734 for (i
=0; i
<htab
->top_id
; i
++)
13736 sec
= htab
->stub_group
[i
].stub_sec
;
13737 /* Only process it once, in its link_sec slot. */
13738 if (sec
&& i
== htab
->stub_group
[i
].link_sec
->id
)
13740 osec
= sec
->output_section
;
13741 elf32_arm_write_section (abfd
, info
, sec
, sec
->contents
);
13742 if (! bfd_set_section_contents (abfd
, osec
, sec
->contents
,
13743 sec
->output_offset
, sec
->size
))
13748 /* Write out any glue sections now that we have created all the
13750 if (globals
->bfd_of_glue_owner
!= NULL
)
13752 if (! elf32_arm_output_glue_section (info
, abfd
,
13753 globals
->bfd_of_glue_owner
,
13754 ARM2THUMB_GLUE_SECTION_NAME
))
13757 if (! elf32_arm_output_glue_section (info
, abfd
,
13758 globals
->bfd_of_glue_owner
,
13759 THUMB2ARM_GLUE_SECTION_NAME
))
13762 if (! elf32_arm_output_glue_section (info
, abfd
,
13763 globals
->bfd_of_glue_owner
,
13764 VFP11_ERRATUM_VENEER_SECTION_NAME
))
13767 if (! elf32_arm_output_glue_section (info
, abfd
,
13768 globals
->bfd_of_glue_owner
,
13769 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
))
13772 if (! elf32_arm_output_glue_section (info
, abfd
,
13773 globals
->bfd_of_glue_owner
,
13774 ARM_BX_GLUE_SECTION_NAME
))
13781 /* Return a best guess for the machine number based on the attributes. */
13783 static unsigned int
13784 bfd_arm_get_mach_from_attributes (bfd
* abfd
)
13786 int arch
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
13790 case TAG_CPU_ARCH_PRE_V4
: return bfd_mach_arm_3M
;
13791 case TAG_CPU_ARCH_V4
: return bfd_mach_arm_4
;
13792 case TAG_CPU_ARCH_V4T
: return bfd_mach_arm_4T
;
13793 case TAG_CPU_ARCH_V5T
: return bfd_mach_arm_5T
;
13795 case TAG_CPU_ARCH_V5TE
:
13799 BFD_ASSERT (Tag_CPU_name
< NUM_KNOWN_OBJ_ATTRIBUTES
);
13800 name
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_CPU_name
].s
;
13804 if (strcmp (name
, "IWMMXT2") == 0)
13805 return bfd_mach_arm_iWMMXt2
;
13807 if (strcmp (name
, "IWMMXT") == 0)
13808 return bfd_mach_arm_iWMMXt
;
13810 if (strcmp (name
, "XSCALE") == 0)
13814 BFD_ASSERT (Tag_WMMX_arch
< NUM_KNOWN_OBJ_ATTRIBUTES
);
13815 wmmx
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_WMMX_arch
].i
;
13818 case 1: return bfd_mach_arm_iWMMXt
;
13819 case 2: return bfd_mach_arm_iWMMXt2
;
13820 default: return bfd_mach_arm_XScale
;
13825 return bfd_mach_arm_5TE
;
13828 case TAG_CPU_ARCH_V5TEJ
:
13829 return bfd_mach_arm_5TEJ
;
13830 case TAG_CPU_ARCH_V6
:
13831 return bfd_mach_arm_6
;
13832 case TAG_CPU_ARCH_V6KZ
:
13833 return bfd_mach_arm_6KZ
;
13834 case TAG_CPU_ARCH_V6T2
:
13835 return bfd_mach_arm_6T2
;
13836 case TAG_CPU_ARCH_V6K
:
13837 return bfd_mach_arm_6K
;
13838 case TAG_CPU_ARCH_V7
:
13839 return bfd_mach_arm_7
;
13840 case TAG_CPU_ARCH_V6_M
:
13841 return bfd_mach_arm_6M
;
13842 case TAG_CPU_ARCH_V6S_M
:
13843 return bfd_mach_arm_6SM
;
13844 case TAG_CPU_ARCH_V7E_M
:
13845 return bfd_mach_arm_7EM
;
13846 case TAG_CPU_ARCH_V8
:
13847 return bfd_mach_arm_8
;
13848 case TAG_CPU_ARCH_V8R
:
13849 return bfd_mach_arm_8R
;
13850 case TAG_CPU_ARCH_V8M_BASE
:
13851 return bfd_mach_arm_8M_BASE
;
13852 case TAG_CPU_ARCH_V8M_MAIN
:
13853 return bfd_mach_arm_8M_MAIN
;
13854 case TAG_CPU_ARCH_V8_1M_MAIN
:
13855 return bfd_mach_arm_8_1M_MAIN
;
13856 case TAG_CPU_ARCH_V9
:
13857 return bfd_mach_arm_9
;
13860 /* Force entry to be added for any new known Tag_CPU_arch value. */
13861 BFD_ASSERT (arch
> MAX_TAG_CPU_ARCH
);
13863 /* Unknown Tag_CPU_arch value. */
13864 return bfd_mach_arm_unknown
;
13868 /* Set the right machine number. */
13871 elf32_arm_object_p (bfd
*abfd
)
13875 mach
= bfd_arm_get_mach_from_notes (abfd
, ARM_NOTE_SECTION
);
13877 if (mach
== bfd_mach_arm_unknown
)
13879 if (elf_elfheader (abfd
)->e_flags
& EF_ARM_MAVERICK_FLOAT
)
13880 mach
= bfd_mach_arm_ep9312
;
13882 mach
= bfd_arm_get_mach_from_attributes (abfd
);
13885 bfd_default_set_arch_mach (abfd
, bfd_arch_arm
, mach
);
13889 /* Function to keep ARM specific flags in the ELF header. */
13892 elf32_arm_set_private_flags (bfd
*abfd
, flagword flags
)
13894 if (elf_flags_init (abfd
)
13895 && elf_elfheader (abfd
)->e_flags
!= flags
)
13897 if (EF_ARM_EABI_VERSION (flags
) == EF_ARM_EABI_UNKNOWN
)
13899 if (flags
& EF_ARM_INTERWORK
)
13901 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13905 (_("warning: clearing the interworking flag of %pB due to outside request"),
13911 elf_elfheader (abfd
)->e_flags
= flags
;
13912 elf_flags_init (abfd
) = true;
13918 /* Copy backend specific data from one object module to another. */
13921 elf32_arm_copy_private_bfd_data (bfd
*ibfd
, bfd
*obfd
)
13924 flagword out_flags
;
13926 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
13929 in_flags
= elf_elfheader (ibfd
)->e_flags
;
13930 out_flags
= elf_elfheader (obfd
)->e_flags
;
13932 if (elf_flags_init (obfd
)
13933 && EF_ARM_EABI_VERSION (out_flags
) == EF_ARM_EABI_UNKNOWN
13934 && in_flags
!= out_flags
)
13936 /* Cannot mix APCS26 and APCS32 code. */
13937 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
13940 /* Cannot mix float APCS and non-float APCS code. */
13941 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
13944 /* If the src and dest have different interworking flags
13945 then turn off the interworking bit. */
13946 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
13948 if (out_flags
& EF_ARM_INTERWORK
)
13950 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13953 in_flags
&= ~EF_ARM_INTERWORK
;
13956 /* Likewise for PIC, though don't warn for this case. */
13957 if ((in_flags
& EF_ARM_PIC
) != (out_flags
& EF_ARM_PIC
))
13958 in_flags
&= ~EF_ARM_PIC
;
13961 elf_elfheader (obfd
)->e_flags
= in_flags
;
13962 elf_flags_init (obfd
) = true;
13964 return _bfd_elf_copy_private_bfd_data (ibfd
, obfd
);
13967 /* Values for Tag_ABI_PCS_R9_use. */
13976 /* Values for Tag_ABI_PCS_RW_data. */
13979 AEABI_PCS_RW_data_absolute
,
13980 AEABI_PCS_RW_data_PCrel
,
13981 AEABI_PCS_RW_data_SBrel
,
13982 AEABI_PCS_RW_data_unused
13985 /* Values for Tag_ABI_enum_size. */
13991 AEABI_enum_forced_wide
13994 /* Determine whether an object attribute tag takes an integer, a
13998 elf32_arm_obj_attrs_arg_type (int tag
)
14000 if (tag
== Tag_compatibility
)
14001 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_STR_VAL
;
14002 else if (tag
== Tag_nodefaults
)
14003 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_NO_DEFAULT
;
14004 else if (tag
== Tag_CPU_raw_name
|| tag
== Tag_CPU_name
)
14005 return ATTR_TYPE_FLAG_STR_VAL
;
14007 return ATTR_TYPE_FLAG_INT_VAL
;
14009 return (tag
& 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL
: ATTR_TYPE_FLAG_INT_VAL
;
14012 /* The ABI defines that Tag_conformance should be emitted first, and that
14013 Tag_nodefaults should be second (if either is defined). This sets those
14014 two positions, and bumps up the position of all the remaining tags to
14017 elf32_arm_obj_attrs_order (int num
)
14019 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
)
14020 return Tag_conformance
;
14021 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
+ 1)
14022 return Tag_nodefaults
;
14023 if ((num
- 2) < Tag_nodefaults
)
14025 if ((num
- 1) < Tag_conformance
)
14030 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
14032 elf32_arm_obj_attrs_handle_unknown (bfd
*abfd
, int tag
)
14034 if ((tag
& 127) < 64)
14037 (_("%pB: unknown mandatory EABI object attribute %d"),
14039 bfd_set_error (bfd_error_bad_value
);
14045 (_("warning: %pB: unknown EABI object attribute %d"),
14051 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
14052 Returns -1 if no architecture could be read. */
14055 get_secondary_compatible_arch (bfd
*abfd
)
14057 obj_attribute
*attr
=
14058 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
14060 /* Note: the tag and its argument below are uleb128 values, though
14061 currently-defined values fit in one byte for each. */
14063 && attr
->s
[0] == Tag_CPU_arch
14064 && (attr
->s
[1] & 128) != 128
14065 && attr
->s
[2] == 0)
14068 /* This tag is "safely ignorable", so don't complain if it looks funny. */
14072 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14073 The tag is removed if ARCH is -1. */
14076 set_secondary_compatible_arch (bfd
*abfd
, int arch
)
14078 obj_attribute
*attr
=
14079 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
14087 /* Note: the tag and its argument below are uleb128 values, though
14088 currently-defined values fit in one byte for each. */
14090 attr
->s
= (char *) bfd_alloc (abfd
, 3);
14091 attr
->s
[0] = Tag_CPU_arch
;
14096 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14100 tag_cpu_arch_combine (bfd
*ibfd
, int oldtag
, int *secondary_compat_out
,
14101 int newtag
, int secondary_compat
)
14103 #define T(X) TAG_CPU_ARCH_##X
14104 int tagl
, tagh
, result
;
14107 T(V6T2
), /* PRE_V4. */
14109 T(V6T2
), /* V4T. */
14110 T(V6T2
), /* V5T. */
14111 T(V6T2
), /* V5TE. */
14112 T(V6T2
), /* V5TEJ. */
14115 T(V6T2
) /* V6T2. */
14119 T(V6K
), /* PRE_V4. */
14123 T(V6K
), /* V5TE. */
14124 T(V6K
), /* V5TEJ. */
14126 T(V6KZ
), /* V6KZ. */
14132 T(V7
), /* PRE_V4. */
14137 T(V7
), /* V5TEJ. */
14150 T(V6K
), /* V5TE. */
14151 T(V6K
), /* V5TEJ. */
14153 T(V6KZ
), /* V6KZ. */
14157 T(V6_M
) /* V6_M. */
14159 const int v6s_m
[] =
14165 T(V6K
), /* V5TE. */
14166 T(V6K
), /* V5TEJ. */
14168 T(V6KZ
), /* V6KZ. */
14172 T(V6S_M
), /* V6_M. */
14173 T(V6S_M
) /* V6S_M. */
14175 const int v7e_m
[] =
14179 T(V7E_M
), /* V4T. */
14180 T(V7E_M
), /* V5T. */
14181 T(V7E_M
), /* V5TE. */
14182 T(V7E_M
), /* V5TEJ. */
14183 T(V7E_M
), /* V6. */
14184 T(V7E_M
), /* V6KZ. */
14185 T(V7E_M
), /* V6T2. */
14186 T(V7E_M
), /* V6K. */
14187 T(V7E_M
), /* V7. */
14188 T(V7E_M
), /* V6_M. */
14189 T(V7E_M
), /* V6S_M. */
14190 T(V7E_M
) /* V7E_M. */
14194 T(V8
), /* PRE_V4. */
14199 T(V8
), /* V5TEJ. */
14206 T(V8
), /* V6S_M. */
14207 T(V8
), /* V7E_M. */
14210 T(V8
), /* V8-M.BASE. */
14211 T(V8
), /* V8-M.MAIN. */
14215 T(V8
), /* V8.1-M.MAIN. */
14219 T(V8R
), /* PRE_V4. */
14223 T(V8R
), /* V5TE. */
14224 T(V8R
), /* V5TEJ. */
14226 T(V8R
), /* V6KZ. */
14227 T(V8R
), /* V6T2. */
14230 T(V8R
), /* V6_M. */
14231 T(V8R
), /* V6S_M. */
14232 T(V8R
), /* V7E_M. */
14236 const int v8m_baseline
[] =
14249 T(V8M_BASE
), /* V6_M. */
14250 T(V8M_BASE
), /* V6S_M. */
14254 T(V8M_BASE
) /* V8-M BASELINE. */
14256 const int v8m_mainline
[] =
14268 T(V8M_MAIN
), /* V7. */
14269 T(V8M_MAIN
), /* V6_M. */
14270 T(V8M_MAIN
), /* V6S_M. */
14271 T(V8M_MAIN
), /* V7E_M. */
14274 T(V8M_MAIN
), /* V8-M BASELINE. */
14275 T(V8M_MAIN
) /* V8-M MAINLINE. */
14277 const int v8_1m_mainline
[] =
14289 T(V8_1M_MAIN
), /* V7. */
14290 T(V8_1M_MAIN
), /* V6_M. */
14291 T(V8_1M_MAIN
), /* V6S_M. */
14292 T(V8_1M_MAIN
), /* V7E_M. */
14295 T(V8_1M_MAIN
), /* V8-M BASELINE. */
14296 T(V8_1M_MAIN
), /* V8-M MAINLINE. */
14297 -1, /* Unused (18). */
14298 -1, /* Unused (19). */
14299 -1, /* Unused (20). */
14300 T(V8_1M_MAIN
) /* V8.1-M MAINLINE. */
14304 T(V9
), /* PRE_V4. */
14309 T(V9
), /* V5TEJ. */
14316 T(V9
), /* V6S_M. */
14317 T(V9
), /* V7E_M. */
14320 T(V9
), /* V8-M.BASE. */
14321 T(V9
), /* V8-M.MAIN. */
14325 T(V9
), /* V8.1-M.MAIN. */
14328 const int v4t_plus_v6_m
[] =
14334 T(V5TE
), /* V5TE. */
14335 T(V5TEJ
), /* V5TEJ. */
14337 T(V6KZ
), /* V6KZ. */
14338 T(V6T2
), /* V6T2. */
14341 T(V6_M
), /* V6_M. */
14342 T(V6S_M
), /* V6S_M. */
14343 T(V7E_M
), /* V7E_M. */
14346 T(V8M_BASE
), /* V8-M BASELINE. */
14347 T(V8M_MAIN
), /* V8-M MAINLINE. */
14348 -1, /* Unused (18). */
14349 -1, /* Unused (19). */
14350 -1, /* Unused (20). */
14351 T(V8_1M_MAIN
), /* V8.1-M MAINLINE. */
14353 T(V4T_PLUS_V6_M
) /* V4T plus V6_M. */
14355 const int *comb
[] =
14372 /* Pseudo-architecture. */
14376 /* Check we've not got a higher architecture than we know about. */
14378 if (oldtag
> MAX_TAG_CPU_ARCH
|| newtag
> MAX_TAG_CPU_ARCH
)
14380 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd
);
14384 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14386 if ((oldtag
== T(V6_M
) && *secondary_compat_out
== T(V4T
))
14387 || (oldtag
== T(V4T
) && *secondary_compat_out
== T(V6_M
)))
14388 oldtag
= T(V4T_PLUS_V6_M
);
14390 /* And override the new tag if we have a Tag_also_compatible_with on the
14393 if ((newtag
== T(V6_M
) && secondary_compat
== T(V4T
))
14394 || (newtag
== T(V4T
) && secondary_compat
== T(V6_M
)))
14395 newtag
= T(V4T_PLUS_V6_M
);
14397 tagl
= (oldtag
< newtag
) ? oldtag
: newtag
;
14398 result
= tagh
= (oldtag
> newtag
) ? oldtag
: newtag
;
14400 /* Architectures before V6KZ add features monotonically. */
14401 if (tagh
<= TAG_CPU_ARCH_V6KZ
)
14404 result
= comb
[tagh
- T(V6T2
)] ? comb
[tagh
- T(V6T2
)][tagl
] : -1;
14406 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14407 as the canonical version. */
14408 if (result
== T(V4T_PLUS_V6_M
))
14411 *secondary_compat_out
= T(V6_M
);
14414 *secondary_compat_out
= -1;
14418 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14419 ibfd
, oldtag
, newtag
);
14427 /* Query attributes object to see if integer divide instructions may be
14428 present in an object. */
14430 elf32_arm_attributes_accept_div (const obj_attribute
*attr
)
14432 int arch
= attr
[Tag_CPU_arch
].i
;
14433 int profile
= attr
[Tag_CPU_arch_profile
].i
;
14435 switch (attr
[Tag_DIV_use
].i
)
14438 /* Integer divide allowed if instruction contained in archetecture. */
14439 if (arch
== TAG_CPU_ARCH_V7
&& (profile
== 'R' || profile
== 'M'))
14441 else if (arch
>= TAG_CPU_ARCH_V7E_M
)
14447 /* Integer divide explicitly prohibited. */
14451 /* Unrecognised case - treat as allowing divide everywhere. */
14453 /* Integer divide allowed in ARM state. */
14458 /* Query attributes object to see if integer divide instructions are
14459 forbidden to be in the object. This is not the inverse of
14460 elf32_arm_attributes_accept_div. */
14462 elf32_arm_attributes_forbid_div (const obj_attribute
*attr
)
14464 return attr
[Tag_DIV_use
].i
== 1;
14467 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14468 are conflicting attributes. */
14471 elf32_arm_merge_eabi_attributes (bfd
*ibfd
, struct bfd_link_info
*info
)
14473 bfd
*obfd
= info
->output_bfd
;
14474 obj_attribute
*in_attr
;
14475 obj_attribute
*out_attr
;
14476 /* Some tags have 0 = don't care, 1 = strong requirement,
14477 2 = weak requirement. */
14478 static const int order_021
[3] = {0, 2, 1};
14480 bool result
= true;
14481 const char *sec_name
= get_elf_backend_data (ibfd
)->obj_attrs_section
;
14483 /* Skip the linker stubs file. This preserves previous behavior
14484 of accepting unknown attributes in the first input file - but
14486 if (ibfd
->flags
& BFD_LINKER_CREATED
)
14489 /* Skip any input that hasn't attribute section.
14490 This enables to link object files without attribute section with
14492 if (bfd_get_section_by_name (ibfd
, sec_name
) == NULL
)
14495 if (!elf_known_obj_attributes_proc (obfd
)[0].i
)
14497 /* This is the first object. Copy the attributes. */
14498 _bfd_elf_copy_obj_attributes (ibfd
, obfd
);
14500 out_attr
= elf_known_obj_attributes_proc (obfd
);
14502 /* Use the Tag_null value to indicate the attributes have been
14506 /* We do not output objects with Tag_MPextension_use_legacy - we move
14507 the attribute's value to Tag_MPextension_use. */
14508 if (out_attr
[Tag_MPextension_use_legacy
].i
!= 0)
14510 if (out_attr
[Tag_MPextension_use
].i
!= 0
14511 && out_attr
[Tag_MPextension_use_legacy
].i
14512 != out_attr
[Tag_MPextension_use
].i
)
14515 (_("Error: %pB has both the current and legacy "
14516 "Tag_MPextension_use attributes"), ibfd
);
14520 out_attr
[Tag_MPextension_use
] =
14521 out_attr
[Tag_MPextension_use_legacy
];
14522 out_attr
[Tag_MPextension_use_legacy
].type
= 0;
14523 out_attr
[Tag_MPextension_use_legacy
].i
= 0;
14529 in_attr
= elf_known_obj_attributes_proc (ibfd
);
14530 out_attr
= elf_known_obj_attributes_proc (obfd
);
14531 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14532 if (in_attr
[Tag_ABI_VFP_args
].i
!= out_attr
[Tag_ABI_VFP_args
].i
)
14534 /* Ignore mismatches if the object doesn't use floating point or is
14535 floating point ABI independent. */
14536 if (out_attr
[Tag_ABI_FP_number_model
].i
== AEABI_FP_number_model_none
14537 || (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
14538 && out_attr
[Tag_ABI_VFP_args
].i
== AEABI_VFP_args_compatible
))
14539 out_attr
[Tag_ABI_VFP_args
].i
= in_attr
[Tag_ABI_VFP_args
].i
;
14540 else if (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
14541 && in_attr
[Tag_ABI_VFP_args
].i
!= AEABI_VFP_args_compatible
)
14544 (_("error: %pB uses VFP register arguments, %pB does not"),
14545 in_attr
[Tag_ABI_VFP_args
].i
? ibfd
: obfd
,
14546 in_attr
[Tag_ABI_VFP_args
].i
? obfd
: ibfd
);
14551 for (i
= LEAST_KNOWN_OBJ_ATTRIBUTE
; i
< NUM_KNOWN_OBJ_ATTRIBUTES
; i
++)
14553 /* Merge this attribute with existing attributes. */
14556 case Tag_CPU_raw_name
:
14558 /* These are merged after Tag_CPU_arch. */
14561 case Tag_ABI_optimization_goals
:
14562 case Tag_ABI_FP_optimization_goals
:
14563 /* Use the first value seen. */
14568 int secondary_compat
= -1, secondary_compat_out
= -1;
14569 unsigned int saved_out_attr
= out_attr
[i
].i
;
14571 static const char *name_table
[] =
14573 /* These aren't real CPU names, but we can't guess
14574 that from the architecture version alone. */
14591 "ARM v8-M.baseline",
14592 "ARM v8-M.mainline",
14596 "ARM v8.1-M.mainline",
14600 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14601 secondary_compat
= get_secondary_compatible_arch (ibfd
);
14602 secondary_compat_out
= get_secondary_compatible_arch (obfd
);
14603 arch_attr
= tag_cpu_arch_combine (ibfd
, out_attr
[i
].i
,
14604 &secondary_compat_out
,
14608 /* Return with error if failed to merge. */
14609 if (arch_attr
== -1)
14612 out_attr
[i
].i
= arch_attr
;
14614 set_secondary_compatible_arch (obfd
, secondary_compat_out
);
14616 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14617 if (out_attr
[i
].i
== saved_out_attr
)
14618 ; /* Leave the names alone. */
14619 else if (out_attr
[i
].i
== in_attr
[i
].i
)
14621 /* The output architecture has been changed to match the
14622 input architecture. Use the input names. */
14623 out_attr
[Tag_CPU_name
].s
= in_attr
[Tag_CPU_name
].s
14624 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_name
].s
)
14626 out_attr
[Tag_CPU_raw_name
].s
= in_attr
[Tag_CPU_raw_name
].s
14627 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_raw_name
].s
)
14632 out_attr
[Tag_CPU_name
].s
= NULL
;
14633 out_attr
[Tag_CPU_raw_name
].s
= NULL
;
14636 /* If we still don't have a value for Tag_CPU_name,
14637 make one up now. Tag_CPU_raw_name remains blank. */
14638 if (out_attr
[Tag_CPU_name
].s
== NULL
14639 && out_attr
[i
].i
< ARRAY_SIZE (name_table
))
14640 out_attr
[Tag_CPU_name
].s
=
14641 _bfd_elf_attr_strdup (obfd
, name_table
[out_attr
[i
].i
]);
14645 case Tag_ARM_ISA_use
:
14646 case Tag_THUMB_ISA_use
:
14647 case Tag_WMMX_arch
:
14648 case Tag_Advanced_SIMD_arch
:
14649 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14650 case Tag_ABI_FP_rounding
:
14651 case Tag_ABI_FP_exceptions
:
14652 case Tag_ABI_FP_user_exceptions
:
14653 case Tag_ABI_FP_number_model
:
14654 case Tag_FP_HP_extension
:
14655 case Tag_CPU_unaligned_access
:
14657 case Tag_MPextension_use
:
14659 case Tag_PAC_extension
:
14660 case Tag_BTI_extension
:
14662 case Tag_PACRET_use
:
14663 /* Use the largest value specified. */
14664 if (in_attr
[i
].i
> out_attr
[i
].i
)
14665 out_attr
[i
].i
= in_attr
[i
].i
;
14668 case Tag_ABI_align_preserved
:
14669 case Tag_ABI_PCS_RO_data
:
14670 /* Use the smallest value specified. */
14671 if (in_attr
[i
].i
< out_attr
[i
].i
)
14672 out_attr
[i
].i
= in_attr
[i
].i
;
14675 case Tag_ABI_align_needed
:
14676 if ((in_attr
[i
].i
> 0 || out_attr
[i
].i
> 0)
14677 && (in_attr
[Tag_ABI_align_preserved
].i
== 0
14678 || out_attr
[Tag_ABI_align_preserved
].i
== 0))
14680 /* This error message should be enabled once all non-conformant
14681 binaries in the toolchain have had the attributes set
14684 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14688 /* Fall through. */
14689 case Tag_ABI_FP_denormal
:
14690 case Tag_ABI_PCS_GOT_use
:
14691 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14692 value if greater than 2 (for future-proofing). */
14693 if ((in_attr
[i
].i
> 2 && in_attr
[i
].i
> out_attr
[i
].i
)
14694 || (in_attr
[i
].i
<= 2 && out_attr
[i
].i
<= 2
14695 && order_021
[in_attr
[i
].i
] > order_021
[out_attr
[i
].i
]))
14696 out_attr
[i
].i
= in_attr
[i
].i
;
14699 case Tag_Virtualization_use
:
14700 /* The virtualization tag effectively stores two bits of
14701 information: the intended use of TrustZone (in bit 0), and the
14702 intended use of Virtualization (in bit 1). */
14703 if (out_attr
[i
].i
== 0)
14704 out_attr
[i
].i
= in_attr
[i
].i
;
14705 else if (in_attr
[i
].i
!= 0
14706 && in_attr
[i
].i
!= out_attr
[i
].i
)
14708 if (in_attr
[i
].i
<= 3 && out_attr
[i
].i
<= 3)
14713 (_("error: %pB: unable to merge virtualization attributes "
14721 case Tag_CPU_arch_profile
:
14722 if (out_attr
[i
].i
!= in_attr
[i
].i
)
14724 /* 0 will merge with anything.
14725 'A' and 'S' merge to 'A'.
14726 'R' and 'S' merge to 'R'.
14727 'M' and 'A|R|S' is an error. */
14728 if (out_attr
[i
].i
== 0
14729 || (out_attr
[i
].i
== 'S'
14730 && (in_attr
[i
].i
== 'A' || in_attr
[i
].i
== 'R')))
14731 out_attr
[i
].i
= in_attr
[i
].i
;
14732 else if (in_attr
[i
].i
== 0
14733 || (in_attr
[i
].i
== 'S'
14734 && (out_attr
[i
].i
== 'A' || out_attr
[i
].i
== 'R')))
14735 ; /* Do nothing. */
14739 (_("error: %pB: conflicting architecture profiles %c/%c"),
14741 in_attr
[i
].i
? in_attr
[i
].i
: '0',
14742 out_attr
[i
].i
? out_attr
[i
].i
: '0');
14748 case Tag_DSP_extension
:
14749 /* No need to change output value if any of:
14750 - pre (<=) ARMv5T input architecture (do not have DSP)
14751 - M input profile not ARMv7E-M and do not have DSP. */
14752 if (in_attr
[Tag_CPU_arch
].i
<= 3
14753 || (in_attr
[Tag_CPU_arch_profile
].i
== 'M'
14754 && in_attr
[Tag_CPU_arch
].i
!= 13
14755 && in_attr
[i
].i
== 0))
14756 ; /* Do nothing. */
14757 /* Output value should be 0 if DSP part of architecture, ie.
14758 - post (>=) ARMv5te architecture output
14759 - A, R or S profile output or ARMv7E-M output architecture. */
14760 else if (out_attr
[Tag_CPU_arch
].i
>= 4
14761 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
14762 || out_attr
[Tag_CPU_arch_profile
].i
== 'R'
14763 || out_attr
[Tag_CPU_arch_profile
].i
== 'S'
14764 || out_attr
[Tag_CPU_arch
].i
== 13))
14766 /* Otherwise, DSP instructions are added and not part of output
14774 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14775 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14776 when it's 0. It might mean absence of FP hardware if
14777 Tag_FP_arch is zero. */
14779 #define VFP_VERSION_COUNT 9
14780 static const struct
14784 } vfp_versions
[VFP_VERSION_COUNT
] =
14800 /* If the output has no requirement about FP hardware,
14801 follow the requirement of the input. */
14802 if (out_attr
[i
].i
== 0)
14804 /* This assert is still reasonable, we shouldn't
14805 produce the suspicious build attribute
14806 combination (See below for in_attr). */
14807 BFD_ASSERT (out_attr
[Tag_ABI_HardFP_use
].i
== 0);
14808 out_attr
[i
].i
= in_attr
[i
].i
;
14809 out_attr
[Tag_ABI_HardFP_use
].i
14810 = in_attr
[Tag_ABI_HardFP_use
].i
;
14813 /* If the input has no requirement about FP hardware, do
14815 else if (in_attr
[i
].i
== 0)
14817 /* We used to assert that Tag_ABI_HardFP_use was
14818 zero here, but we should never assert when
14819 consuming an object file that has suspicious
14820 build attributes. The single precision variant
14821 of 'no FP architecture' is still 'no FP
14822 architecture', so we just ignore the tag in this
14827 /* Both the input and the output have nonzero Tag_FP_arch.
14828 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14830 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14832 if (in_attr
[Tag_ABI_HardFP_use
].i
== 0
14833 && out_attr
[Tag_ABI_HardFP_use
].i
== 0)
14835 /* If the input and the output have different Tag_ABI_HardFP_use,
14836 the combination of them is 0 (implied by Tag_FP_arch). */
14837 else if (in_attr
[Tag_ABI_HardFP_use
].i
14838 != out_attr
[Tag_ABI_HardFP_use
].i
)
14839 out_attr
[Tag_ABI_HardFP_use
].i
= 0;
14841 /* Now we can handle Tag_FP_arch. */
14843 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14844 pick the biggest. */
14845 if (in_attr
[i
].i
>= VFP_VERSION_COUNT
14846 && in_attr
[i
].i
> out_attr
[i
].i
)
14848 out_attr
[i
] = in_attr
[i
];
14851 /* The output uses the superset of input features
14852 (ISA version) and registers. */
14853 ver
= vfp_versions
[in_attr
[i
].i
].ver
;
14854 if (ver
< vfp_versions
[out_attr
[i
].i
].ver
)
14855 ver
= vfp_versions
[out_attr
[i
].i
].ver
;
14856 regs
= vfp_versions
[in_attr
[i
].i
].regs
;
14857 if (regs
< vfp_versions
[out_attr
[i
].i
].regs
)
14858 regs
= vfp_versions
[out_attr
[i
].i
].regs
;
14859 /* This assumes all possible supersets are also a valid
14861 for (newval
= VFP_VERSION_COUNT
- 1; newval
> 0; newval
--)
14863 if (regs
== vfp_versions
[newval
].regs
14864 && ver
== vfp_versions
[newval
].ver
)
14867 out_attr
[i
].i
= newval
;
14870 case Tag_PCS_config
:
14871 if (out_attr
[i
].i
== 0)
14872 out_attr
[i
].i
= in_attr
[i
].i
;
14873 else if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= in_attr
[i
].i
)
14875 /* It's sometimes ok to mix different configs, so this is only
14878 (_("warning: %pB: conflicting platform configuration"), ibfd
);
14881 case Tag_ABI_PCS_R9_use
:
14882 if (in_attr
[i
].i
!= out_attr
[i
].i
14883 && out_attr
[i
].i
!= AEABI_R9_unused
14884 && in_attr
[i
].i
!= AEABI_R9_unused
)
14887 (_("error: %pB: conflicting use of R9"), ibfd
);
14890 if (out_attr
[i
].i
== AEABI_R9_unused
)
14891 out_attr
[i
].i
= in_attr
[i
].i
;
14893 case Tag_ABI_PCS_RW_data
:
14894 if (in_attr
[i
].i
== AEABI_PCS_RW_data_SBrel
14895 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_SB
14896 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_unused
)
14899 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14903 /* Use the smallest value specified. */
14904 if (in_attr
[i
].i
< out_attr
[i
].i
)
14905 out_attr
[i
].i
= in_attr
[i
].i
;
14907 case Tag_ABI_PCS_wchar_t
:
14908 if (out_attr
[i
].i
&& in_attr
[i
].i
&& out_attr
[i
].i
!= in_attr
[i
].i
14909 && !elf_arm_tdata (obfd
)->no_wchar_size_warning
)
14912 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14913 ibfd
, in_attr
[i
].i
, out_attr
[i
].i
);
14915 else if (in_attr
[i
].i
&& !out_attr
[i
].i
)
14916 out_attr
[i
].i
= in_attr
[i
].i
;
14918 case Tag_ABI_enum_size
:
14919 if (in_attr
[i
].i
!= AEABI_enum_unused
)
14921 if (out_attr
[i
].i
== AEABI_enum_unused
14922 || out_attr
[i
].i
== AEABI_enum_forced_wide
)
14924 /* The existing object is compatible with anything.
14925 Use whatever requirements the new object has. */
14926 out_attr
[i
].i
= in_attr
[i
].i
;
14928 else if (in_attr
[i
].i
!= AEABI_enum_forced_wide
14929 && out_attr
[i
].i
!= in_attr
[i
].i
14930 && !elf_arm_tdata (obfd
)->no_enum_size_warning
)
14932 static const char *aeabi_enum_names
[] =
14933 { "", "variable-size", "32-bit", "" };
14934 const char *in_name
=
14935 in_attr
[i
].i
< ARRAY_SIZE (aeabi_enum_names
)
14936 ? aeabi_enum_names
[in_attr
[i
].i
]
14938 const char *out_name
=
14939 out_attr
[i
].i
< ARRAY_SIZE (aeabi_enum_names
)
14940 ? aeabi_enum_names
[out_attr
[i
].i
]
14943 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14944 ibfd
, in_name
, out_name
);
14948 case Tag_ABI_VFP_args
:
14951 case Tag_ABI_WMMX_args
:
14952 if (in_attr
[i
].i
!= out_attr
[i
].i
)
14955 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14960 case Tag_compatibility
:
14961 /* Merged in target-independent code. */
14963 case Tag_ABI_HardFP_use
:
14964 /* This is handled along with Tag_FP_arch. */
14966 case Tag_ABI_FP_16bit_format
:
14967 if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= 0)
14969 if (in_attr
[i
].i
!= out_attr
[i
].i
)
14972 (_("error: fp16 format mismatch between %pB and %pB"),
14977 if (in_attr
[i
].i
!= 0)
14978 out_attr
[i
].i
= in_attr
[i
].i
;
14982 /* A value of zero on input means that the divide instruction may
14983 be used if available in the base architecture as specified via
14984 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
14985 the user did not want divide instructions. A value of 2
14986 explicitly means that divide instructions were allowed in ARM
14987 and Thumb state. */
14988 if (in_attr
[i
].i
== out_attr
[i
].i
)
14989 /* Do nothing. */ ;
14990 else if (elf32_arm_attributes_forbid_div (in_attr
)
14991 && !elf32_arm_attributes_accept_div (out_attr
))
14993 else if (elf32_arm_attributes_forbid_div (out_attr
)
14994 && elf32_arm_attributes_accept_div (in_attr
))
14995 out_attr
[i
].i
= in_attr
[i
].i
;
14996 else if (in_attr
[i
].i
== 2)
14997 out_attr
[i
].i
= in_attr
[i
].i
;
15000 case Tag_MPextension_use_legacy
:
15001 /* We don't output objects with Tag_MPextension_use_legacy - we
15002 move the value to Tag_MPextension_use. */
15003 if (in_attr
[i
].i
!= 0 && in_attr
[Tag_MPextension_use
].i
!= 0)
15005 if (in_attr
[Tag_MPextension_use
].i
!= in_attr
[i
].i
)
15008 (_("%pB has both the current and legacy "
15009 "Tag_MPextension_use attributes"),
15015 if (in_attr
[i
].i
> out_attr
[Tag_MPextension_use
].i
)
15016 out_attr
[Tag_MPextension_use
] = in_attr
[i
];
15020 case Tag_nodefaults
:
15021 /* This tag is set if it exists, but the value is unused (and is
15022 typically zero). We don't actually need to do anything here -
15023 the merge happens automatically when the type flags are merged
15026 case Tag_also_compatible_with
:
15027 /* Already done in Tag_CPU_arch. */
15029 case Tag_conformance
:
15030 /* Keep the attribute if it matches. Throw it away otherwise.
15031 No attribute means no claim to conform. */
15032 if (!in_attr
[i
].s
|| !out_attr
[i
].s
15033 || strcmp (in_attr
[i
].s
, out_attr
[i
].s
) != 0)
15034 out_attr
[i
].s
= NULL
;
15039 = result
&& _bfd_elf_merge_unknown_attribute_low (ibfd
, obfd
, i
);
15042 /* If out_attr was copied from in_attr then it won't have a type yet. */
15043 if (in_attr
[i
].type
&& !out_attr
[i
].type
)
15044 out_attr
[i
].type
= in_attr
[i
].type
;
15047 /* Merge Tag_compatibility attributes and any common GNU ones. */
15048 if (!_bfd_elf_merge_object_attributes (ibfd
, info
))
15051 /* Check for any attributes not known on ARM. */
15052 result
&= _bfd_elf_merge_unknown_attribute_list (ibfd
, obfd
);
15058 /* Return TRUE if the two EABI versions are incompatible. */
15061 elf32_arm_versions_compatible (unsigned iver
, unsigned over
)
15063 /* v4 and v5 are the same spec before and after it was released,
15064 so allow mixing them. */
15065 if ((iver
== EF_ARM_EABI_VER4
&& over
== EF_ARM_EABI_VER5
)
15066 || (iver
== EF_ARM_EABI_VER5
&& over
== EF_ARM_EABI_VER4
))
15069 return (iver
== over
);
15072 /* Merge backend specific data from an object file to the output
15073 object file when linking. */
15076 elf32_arm_merge_private_bfd_data (bfd
*, struct bfd_link_info
*);
15078 /* Display the flags field. */
15081 elf32_arm_print_private_bfd_data (bfd
*abfd
, void * ptr
)
15083 FILE * file
= (FILE *) ptr
;
15084 unsigned long flags
;
15086 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
15088 /* Print normal ELF private data. */
15089 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
15091 flags
= elf_elfheader (abfd
)->e_flags
;
15092 /* Ignore init flag - it may not be set, despite the flags field
15093 containing valid data. */
15095 fprintf (file
, _("private flags = 0x%lx:"), elf_elfheader (abfd
)->e_flags
);
15097 switch (EF_ARM_EABI_VERSION (flags
))
15099 case EF_ARM_EABI_UNKNOWN
:
15100 /* The following flag bits are GNU extensions and not part of the
15101 official ARM ELF extended ABI. Hence they are only decoded if
15102 the EABI version is not set. */
15103 if (flags
& EF_ARM_INTERWORK
)
15104 fprintf (file
, _(" [interworking enabled]"));
15106 if (flags
& EF_ARM_APCS_26
)
15107 fprintf (file
, " [APCS-26]");
15109 fprintf (file
, " [APCS-32]");
15111 if (flags
& EF_ARM_VFP_FLOAT
)
15112 fprintf (file
, _(" [VFP float format]"));
15113 else if (flags
& EF_ARM_MAVERICK_FLOAT
)
15114 fprintf (file
, _(" [Maverick float format]"));
15116 fprintf (file
, _(" [FPA float format]"));
15118 if (flags
& EF_ARM_APCS_FLOAT
)
15119 fprintf (file
, _(" [floats passed in float registers]"));
15121 if (flags
& EF_ARM_PIC
)
15122 fprintf (file
, _(" [position independent]"));
15124 if (flags
& EF_ARM_NEW_ABI
)
15125 fprintf (file
, _(" [new ABI]"));
15127 if (flags
& EF_ARM_OLD_ABI
)
15128 fprintf (file
, _(" [old ABI]"));
15130 if (flags
& EF_ARM_SOFT_FLOAT
)
15131 fprintf (file
, _(" [software FP]"));
15133 flags
&= ~(EF_ARM_INTERWORK
| EF_ARM_APCS_26
| EF_ARM_APCS_FLOAT
15134 | EF_ARM_PIC
| EF_ARM_NEW_ABI
| EF_ARM_OLD_ABI
15135 | EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
15136 | EF_ARM_MAVERICK_FLOAT
);
15139 case EF_ARM_EABI_VER1
:
15140 fprintf (file
, _(" [Version1 EABI]"));
15142 if (flags
& EF_ARM_SYMSARESORTED
)
15143 fprintf (file
, _(" [sorted symbol table]"));
15145 fprintf (file
, _(" [unsorted symbol table]"));
15147 flags
&= ~ EF_ARM_SYMSARESORTED
;
15150 case EF_ARM_EABI_VER2
:
15151 fprintf (file
, _(" [Version2 EABI]"));
15153 if (flags
& EF_ARM_SYMSARESORTED
)
15154 fprintf (file
, _(" [sorted symbol table]"));
15156 fprintf (file
, _(" [unsorted symbol table]"));
15158 if (flags
& EF_ARM_DYNSYMSUSESEGIDX
)
15159 fprintf (file
, _(" [dynamic symbols use segment index]"));
15161 if (flags
& EF_ARM_MAPSYMSFIRST
)
15162 fprintf (file
, _(" [mapping symbols precede others]"));
15164 flags
&= ~(EF_ARM_SYMSARESORTED
| EF_ARM_DYNSYMSUSESEGIDX
15165 | EF_ARM_MAPSYMSFIRST
);
15168 case EF_ARM_EABI_VER3
:
15169 fprintf (file
, _(" [Version3 EABI]"));
15172 case EF_ARM_EABI_VER4
:
15173 fprintf (file
, _(" [Version4 EABI]"));
15176 case EF_ARM_EABI_VER5
:
15177 fprintf (file
, _(" [Version5 EABI]"));
15179 if (flags
& EF_ARM_ABI_FLOAT_SOFT
)
15180 fprintf (file
, _(" [soft-float ABI]"));
15182 if (flags
& EF_ARM_ABI_FLOAT_HARD
)
15183 fprintf (file
, _(" [hard-float ABI]"));
15185 flags
&= ~(EF_ARM_ABI_FLOAT_SOFT
| EF_ARM_ABI_FLOAT_HARD
);
15188 if (flags
& EF_ARM_BE8
)
15189 fprintf (file
, _(" [BE8]"));
15191 if (flags
& EF_ARM_LE8
)
15192 fprintf (file
, _(" [LE8]"));
15194 flags
&= ~(EF_ARM_LE8
| EF_ARM_BE8
);
15198 fprintf (file
, _(" <EABI version unrecognised>"));
15202 flags
&= ~ EF_ARM_EABIMASK
;
15204 if (flags
& EF_ARM_RELEXEC
)
15205 fprintf (file
, _(" [relocatable executable]"));
15207 if (flags
& EF_ARM_PIC
)
15208 fprintf (file
, _(" [position independent]"));
15210 if (elf_elfheader (abfd
)->e_ident
[EI_OSABI
] == ELFOSABI_ARM_FDPIC
)
15211 fprintf (file
, _(" [FDPIC ABI supplement]"));
15213 flags
&= ~ (EF_ARM_RELEXEC
| EF_ARM_PIC
);
15216 fprintf (file
, _(" <Unrecognised flag bits set>"));
15218 fputc ('\n', file
);
15224 elf32_arm_get_symbol_type (Elf_Internal_Sym
* elf_sym
, int type
)
15226 switch (ELF_ST_TYPE (elf_sym
->st_info
))
15228 case STT_ARM_TFUNC
:
15229 return ELF_ST_TYPE (elf_sym
->st_info
);
15231 case STT_ARM_16BIT
:
15232 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15233 This allows us to distinguish between data used by Thumb instructions
15234 and non-data (which is probably code) inside Thumb regions of an
15236 if (type
!= STT_OBJECT
&& type
!= STT_TLS
)
15237 return ELF_ST_TYPE (elf_sym
->st_info
);
15248 elf32_arm_gc_mark_hook (asection
*sec
,
15249 struct bfd_link_info
*info
,
15250 Elf_Internal_Rela
*rel
,
15251 struct elf_link_hash_entry
*h
,
15252 Elf_Internal_Sym
*sym
)
15255 switch (ELF32_R_TYPE (rel
->r_info
))
15257 case R_ARM_GNU_VTINHERIT
:
15258 case R_ARM_GNU_VTENTRY
:
15262 return _bfd_elf_gc_mark_hook (sec
, info
, rel
, h
, sym
);
15265 /* Look through the relocs for a section during the first phase. */
15268 elf32_arm_check_relocs (bfd
*abfd
, struct bfd_link_info
*info
,
15269 asection
*sec
, const Elf_Internal_Rela
*relocs
)
15271 Elf_Internal_Shdr
*symtab_hdr
;
15272 struct elf_link_hash_entry
**sym_hashes
;
15273 const Elf_Internal_Rela
*rel
;
15274 const Elf_Internal_Rela
*rel_end
;
15277 struct elf32_arm_link_hash_table
*htab
;
15279 bool may_become_dynamic_p
;
15280 bool may_need_local_target_p
;
15281 unsigned long nsyms
;
15283 if (bfd_link_relocatable (info
))
15286 BFD_ASSERT (is_arm_elf (abfd
));
15288 htab
= elf32_arm_hash_table (info
);
15294 /* Create dynamic sections for relocatable executables so that we can
15295 copy relocations. */
15296 if (htab
->root
.is_relocatable_executable
15297 && ! htab
->root
.dynamic_sections_created
)
15299 if (! _bfd_elf_link_create_dynamic_sections (abfd
, info
))
15303 if (htab
->root
.dynobj
== NULL
)
15304 htab
->root
.dynobj
= abfd
;
15305 if (!create_ifunc_sections (info
))
15308 dynobj
= htab
->root
.dynobj
;
15310 symtab_hdr
= & elf_symtab_hdr (abfd
);
15311 sym_hashes
= elf_sym_hashes (abfd
);
15312 nsyms
= NUM_SHDR_ENTRIES (symtab_hdr
);
15314 rel_end
= relocs
+ sec
->reloc_count
;
15315 for (rel
= relocs
; rel
< rel_end
; rel
++)
15317 Elf_Internal_Sym
*isym
;
15318 struct elf_link_hash_entry
*h
;
15319 struct elf32_arm_link_hash_entry
*eh
;
15320 unsigned int r_symndx
;
15323 r_symndx
= ELF32_R_SYM (rel
->r_info
);
15324 r_type
= ELF32_R_TYPE (rel
->r_info
);
15325 r_type
= arm_real_reloc_type (htab
, r_type
);
15327 if (r_symndx
>= nsyms
15328 /* PR 9934: It is possible to have relocations that do not
15329 refer to symbols, thus it is also possible to have an
15330 object file containing relocations but no symbol table. */
15331 && (r_symndx
> STN_UNDEF
|| nsyms
> 0))
15333 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd
,
15342 if (r_symndx
< symtab_hdr
->sh_info
)
15344 /* A local symbol. */
15345 isym
= bfd_sym_from_r_symndx (&htab
->root
.sym_cache
,
15352 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
15353 while (h
->root
.type
== bfd_link_hash_indirect
15354 || h
->root
.type
== bfd_link_hash_warning
)
15355 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
15359 eh
= (struct elf32_arm_link_hash_entry
*) h
;
15361 call_reloc_p
= false;
15362 may_become_dynamic_p
= false;
15363 may_need_local_target_p
= false;
15365 /* Could be done earlier, if h were already available. */
15366 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
15369 case R_ARM_GOTOFFFUNCDESC
:
15373 if (!elf32_arm_allocate_local_sym_info (abfd
))
15375 if (r_symndx
>= elf32_arm_num_entries (abfd
))
15377 elf32_arm_local_fdpic_cnts (abfd
) [r_symndx
].gotofffuncdesc_cnt
+= 1;
15378 elf32_arm_local_fdpic_cnts (abfd
) [r_symndx
].funcdesc_offset
= -1;
15382 eh
->fdpic_cnts
.gotofffuncdesc_cnt
++;
15387 case R_ARM_GOTFUNCDESC
:
15391 /* Such a relocation is not supposed to be generated
15392 by gcc on a static function. */
15393 /* Anyway if needed it could be handled. */
15398 eh
->fdpic_cnts
.gotfuncdesc_cnt
++;
15403 case R_ARM_FUNCDESC
:
15407 if (!elf32_arm_allocate_local_sym_info (abfd
))
15409 if (r_symndx
>= elf32_arm_num_entries (abfd
))
15411 elf32_arm_local_fdpic_cnts (abfd
) [r_symndx
].funcdesc_cnt
+= 1;
15412 elf32_arm_local_fdpic_cnts (abfd
) [r_symndx
].funcdesc_offset
= -1;
15416 eh
->fdpic_cnts
.funcdesc_cnt
++;
15422 case R_ARM_GOT_PREL
:
15423 case R_ARM_TLS_GD32
:
15424 case R_ARM_TLS_GD32_FDPIC
:
15425 case R_ARM_TLS_IE32
:
15426 case R_ARM_TLS_IE32_FDPIC
:
15427 case R_ARM_TLS_GOTDESC
:
15428 case R_ARM_TLS_DESCSEQ
:
15429 case R_ARM_THM_TLS_DESCSEQ
:
15430 case R_ARM_TLS_CALL
:
15431 case R_ARM_THM_TLS_CALL
:
15432 /* This symbol requires a global offset table entry. */
15434 int tls_type
, old_tls_type
;
15438 case R_ARM_TLS_GD32
: tls_type
= GOT_TLS_GD
; break;
15439 case R_ARM_TLS_GD32_FDPIC
: tls_type
= GOT_TLS_GD
; break;
15441 case R_ARM_TLS_IE32
: tls_type
= GOT_TLS_IE
; break;
15442 case R_ARM_TLS_IE32_FDPIC
: tls_type
= GOT_TLS_IE
; break;
15444 case R_ARM_TLS_GOTDESC
:
15445 case R_ARM_TLS_CALL
: case R_ARM_THM_TLS_CALL
:
15446 case R_ARM_TLS_DESCSEQ
: case R_ARM_THM_TLS_DESCSEQ
:
15447 tls_type
= GOT_TLS_GDESC
; break;
15449 default: tls_type
= GOT_NORMAL
; break;
15452 if (!bfd_link_executable (info
) && (tls_type
& GOT_TLS_IE
))
15453 info
->flags
|= DF_STATIC_TLS
;
15458 old_tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
15462 /* This is a global offset table entry for a local symbol. */
15463 if (!elf32_arm_allocate_local_sym_info (abfd
))
15465 if (r_symndx
>= elf32_arm_num_entries (abfd
))
15467 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd
,
15472 elf_local_got_refcounts (abfd
)[r_symndx
] += 1;
15473 old_tls_type
= elf32_arm_local_got_tls_type (abfd
) [r_symndx
];
15476 /* If a variable is accessed with both tls methods, two
15477 slots may be created. */
15478 if (GOT_TLS_GD_ANY_P (old_tls_type
)
15479 && GOT_TLS_GD_ANY_P (tls_type
))
15480 tls_type
|= old_tls_type
;
15482 /* We will already have issued an error message if there
15483 is a TLS/non-TLS mismatch, based on the symbol
15484 type. So just combine any TLS types needed. */
15485 if (old_tls_type
!= GOT_UNKNOWN
&& old_tls_type
!= GOT_NORMAL
15486 && tls_type
!= GOT_NORMAL
)
15487 tls_type
|= old_tls_type
;
15489 /* If the symbol is accessed in both IE and GDESC
15490 method, we're able to relax. Turn off the GDESC flag,
15491 without messing up with any other kind of tls types
15492 that may be involved. */
15493 if ((tls_type
& GOT_TLS_IE
) && (tls_type
& GOT_TLS_GDESC
))
15494 tls_type
&= ~GOT_TLS_GDESC
;
15496 if (old_tls_type
!= tls_type
)
15499 elf32_arm_hash_entry (h
)->tls_type
= tls_type
;
15501 elf32_arm_local_got_tls_type (abfd
) [r_symndx
] = tls_type
;
15504 /* Fall through. */
15506 case R_ARM_TLS_LDM32
:
15507 case R_ARM_TLS_LDM32_FDPIC
:
15508 if (r_type
== R_ARM_TLS_LDM32
|| r_type
== R_ARM_TLS_LDM32_FDPIC
)
15509 htab
->tls_ldm_got
.refcount
++;
15510 /* Fall through. */
15512 case R_ARM_GOTOFF32
:
15514 if (htab
->root
.sgot
== NULL
15515 && !create_got_section (htab
->root
.dynobj
, info
))
15524 case R_ARM_THM_CALL
:
15525 case R_ARM_THM_JUMP24
:
15526 case R_ARM_THM_JUMP19
:
15527 call_reloc_p
= true;
15528 may_need_local_target_p
= true;
15532 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15533 ldr __GOTT_INDEX__ offsets. */
15534 if (htab
->root
.target_os
!= is_vxworks
)
15536 may_need_local_target_p
= true;
15539 else goto jump_over
;
15541 /* Fall through. */
15543 case R_ARM_MOVW_ABS_NC
:
15544 case R_ARM_MOVT_ABS
:
15545 case R_ARM_THM_MOVW_ABS_NC
:
15546 case R_ARM_THM_MOVT_ABS
:
15547 if (bfd_link_pic (info
))
15550 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15551 abfd
, elf32_arm_howto_table_1
[r_type
].name
,
15552 (h
) ? h
->root
.root
.string
: "a local symbol");
15553 bfd_set_error (bfd_error_bad_value
);
15557 /* Fall through. */
15559 case R_ARM_ABS32_NOI
:
15561 if (h
!= NULL
&& bfd_link_executable (info
))
15563 h
->pointer_equality_needed
= 1;
15565 /* Fall through. */
15567 case R_ARM_REL32_NOI
:
15568 case R_ARM_MOVW_PREL_NC
:
15569 case R_ARM_MOVT_PREL
:
15570 case R_ARM_THM_MOVW_PREL_NC
:
15571 case R_ARM_THM_MOVT_PREL
:
15573 /* Should the interworking branches be listed here? */
15574 if ((bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
15576 && (sec
->flags
& SEC_ALLOC
) != 0)
15579 && elf32_arm_howto_from_type (r_type
)->pc_relative
)
15581 /* In shared libraries and relocatable executables,
15582 we treat local relative references as calls;
15583 see the related SYMBOL_CALLS_LOCAL code in
15584 allocate_dynrelocs. */
15585 call_reloc_p
= true;
15586 may_need_local_target_p
= true;
15589 /* We are creating a shared library or relocatable
15590 executable, and this is a reloc against a global symbol,
15591 or a non-PC-relative reloc against a local symbol.
15592 We may need to copy the reloc into the output. */
15593 may_become_dynamic_p
= true;
15596 may_need_local_target_p
= true;
15599 /* This relocation describes the C++ object vtable hierarchy.
15600 Reconstruct it for later use during GC. */
15601 case R_ARM_GNU_VTINHERIT
:
15602 if (!bfd_elf_gc_record_vtinherit (abfd
, sec
, h
, rel
->r_offset
))
15606 /* This relocation describes which C++ vtable entries are actually
15607 used. Record for later use during GC. */
15608 case R_ARM_GNU_VTENTRY
:
15609 if (!bfd_elf_gc_record_vtentry (abfd
, sec
, h
, rel
->r_offset
))
15617 /* We may need a .plt entry if the function this reloc
15618 refers to is in a different object, regardless of the
15619 symbol's type. We can't tell for sure yet, because
15620 something later might force the symbol local. */
15622 else if (may_need_local_target_p
)
15623 /* If this reloc is in a read-only section, we might
15624 need a copy reloc. We can't check reliably at this
15625 stage whether the section is read-only, as input
15626 sections have not yet been mapped to output sections.
15627 Tentatively set the flag for now, and correct in
15628 adjust_dynamic_symbol. */
15629 h
->non_got_ref
= 1;
15632 if (may_need_local_target_p
15633 && (h
!= NULL
|| ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
))
15635 union gotplt_union
*root_plt
;
15636 struct arm_plt_info
*arm_plt
;
15637 struct arm_local_iplt_info
*local_iplt
;
15641 root_plt
= &h
->plt
;
15642 arm_plt
= &eh
->plt
;
15646 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
15647 if (local_iplt
== NULL
)
15649 root_plt
= &local_iplt
->root
;
15650 arm_plt
= &local_iplt
->arm
;
15653 /* If the symbol is a function that doesn't bind locally,
15654 this relocation will need a PLT entry. */
15655 if (root_plt
->refcount
!= -1)
15656 root_plt
->refcount
+= 1;
15659 arm_plt
->noncall_refcount
++;
15661 /* It's too early to use htab->use_blx here, so we have to
15662 record possible blx references separately from
15663 relocs that definitely need a thumb stub. */
15665 if (r_type
== R_ARM_THM_CALL
)
15666 arm_plt
->maybe_thumb_refcount
+= 1;
15668 if (r_type
== R_ARM_THM_JUMP24
15669 || r_type
== R_ARM_THM_JUMP19
)
15670 arm_plt
->thumb_refcount
+= 1;
15673 if (may_become_dynamic_p
)
15675 struct elf_dyn_relocs
*p
, **head
;
15677 /* Create a reloc section in dynobj. */
15678 if (sreloc
== NULL
)
15680 sreloc
= _bfd_elf_make_dynamic_reloc_section
15681 (sec
, dynobj
, 2, abfd
, ! htab
->use_rel
);
15683 if (sreloc
== NULL
)
15687 /* If this is a global symbol, count the number of
15688 relocations we need for this symbol. */
15690 head
= &h
->dyn_relocs
;
15693 head
= elf32_arm_get_local_dynreloc_list (abfd
, r_symndx
, isym
);
15699 if (p
== NULL
|| p
->sec
!= sec
)
15701 size_t amt
= sizeof *p
;
15703 p
= (struct elf_dyn_relocs
*) bfd_alloc (htab
->root
.dynobj
, amt
);
15713 if (elf32_arm_howto_from_type (r_type
)->pc_relative
)
15716 if (h
== NULL
&& htab
->fdpic_p
&& !bfd_link_pic (info
)
15717 && r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_ABS32_NOI
)
15719 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15720 that will become rofixup. */
15721 /* This is due to the fact that we suppose all will become rofixup. */
15723 (_("FDPIC does not yet support %s relocation"
15724 " to become dynamic for executable"),
15725 elf32_arm_howto_table_1
[r_type
].name
);
15735 elf32_arm_update_relocs (asection
*o
,
15736 struct bfd_elf_section_reloc_data
*reldata
)
15738 void (*swap_in
) (bfd
*, const bfd_byte
*, Elf_Internal_Rela
*);
15739 void (*swap_out
) (bfd
*, const Elf_Internal_Rela
*, bfd_byte
*);
15740 const struct elf_backend_data
*bed
;
15741 _arm_elf_section_data
*eado
;
15742 struct bfd_link_order
*p
;
15743 bfd_byte
*erela_head
, *erela
;
15744 Elf_Internal_Rela
*irela_head
, *irela
;
15745 Elf_Internal_Shdr
*rel_hdr
;
15747 unsigned int count
;
15749 eado
= get_arm_elf_section_data (o
);
15751 if (!eado
|| eado
->elf
.this_hdr
.sh_type
!= SHT_ARM_EXIDX
)
15755 bed
= get_elf_backend_data (abfd
);
15756 rel_hdr
= reldata
->hdr
;
15758 if (rel_hdr
->sh_entsize
== bed
->s
->sizeof_rel
)
15760 swap_in
= bed
->s
->swap_reloc_in
;
15761 swap_out
= bed
->s
->swap_reloc_out
;
15763 else if (rel_hdr
->sh_entsize
== bed
->s
->sizeof_rela
)
15765 swap_in
= bed
->s
->swap_reloca_in
;
15766 swap_out
= bed
->s
->swap_reloca_out
;
15771 erela_head
= rel_hdr
->contents
;
15772 irela_head
= (Elf_Internal_Rela
*) bfd_zmalloc
15773 ((NUM_SHDR_ENTRIES (rel_hdr
) + 1) * sizeof (*irela_head
));
15775 erela
= erela_head
;
15776 irela
= irela_head
;
15779 for (p
= o
->map_head
.link_order
; p
; p
= p
->next
)
15781 if (p
->type
== bfd_section_reloc_link_order
15782 || p
->type
== bfd_symbol_reloc_link_order
)
15784 (*swap_in
) (abfd
, erela
, irela
);
15785 erela
+= rel_hdr
->sh_entsize
;
15789 else if (p
->type
== bfd_indirect_link_order
)
15791 struct bfd_elf_section_reloc_data
*input_reldata
;
15792 arm_unwind_table_edit
*edit_list
, *edit_tail
;
15793 _arm_elf_section_data
*eadi
;
15798 i
= p
->u
.indirect
.section
;
15800 eadi
= get_arm_elf_section_data (i
);
15801 edit_list
= eadi
->u
.exidx
.unwind_edit_list
;
15802 edit_tail
= eadi
->u
.exidx
.unwind_edit_tail
;
15803 offset
= i
->output_offset
;
15805 if (eadi
->elf
.rel
.hdr
&&
15806 eadi
->elf
.rel
.hdr
->sh_entsize
== rel_hdr
->sh_entsize
)
15807 input_reldata
= &eadi
->elf
.rel
;
15808 else if (eadi
->elf
.rela
.hdr
&&
15809 eadi
->elf
.rela
.hdr
->sh_entsize
== rel_hdr
->sh_entsize
)
15810 input_reldata
= &eadi
->elf
.rela
;
15816 for (j
= 0; j
< NUM_SHDR_ENTRIES (input_reldata
->hdr
); j
++)
15818 arm_unwind_table_edit
*edit_node
, *edit_next
;
15820 bfd_vma reloc_index
;
15822 (*swap_in
) (abfd
, erela
, irela
);
15823 reloc_index
= (irela
->r_offset
- offset
) / 8;
15826 edit_node
= edit_list
;
15827 for (edit_next
= edit_list
;
15828 edit_next
&& edit_next
->index
<= reloc_index
;
15829 edit_next
= edit_node
->next
)
15832 edit_node
= edit_next
;
15835 if (edit_node
->type
!= DELETE_EXIDX_ENTRY
15836 || edit_node
->index
!= reloc_index
)
15838 irela
->r_offset
-= bias
* 8;
15843 erela
+= rel_hdr
->sh_entsize
;
15846 if (edit_tail
->type
== INSERT_EXIDX_CANTUNWIND_AT_END
)
15848 /* New relocation entity. */
15849 asection
*text_sec
= edit_tail
->linked_section
;
15850 asection
*text_out
= text_sec
->output_section
;
15851 bfd_vma exidx_offset
= offset
+ i
->size
- 8;
15853 irela
->r_addend
= 0;
15854 irela
->r_offset
= exidx_offset
;
15855 irela
->r_info
= ELF32_R_INFO
15856 (text_out
->target_index
, R_ARM_PREL31
);
15863 for (j
= 0; j
< NUM_SHDR_ENTRIES (input_reldata
->hdr
); j
++)
15865 (*swap_in
) (abfd
, erela
, irela
);
15866 erela
+= rel_hdr
->sh_entsize
;
15870 count
+= NUM_SHDR_ENTRIES (input_reldata
->hdr
);
15875 reldata
->count
= count
;
15876 rel_hdr
->sh_size
= count
* rel_hdr
->sh_entsize
;
15878 erela
= erela_head
;
15879 irela
= irela_head
;
15882 (*swap_out
) (abfd
, irela
, erela
);
15883 erela
+= rel_hdr
->sh_entsize
;
15890 /* Hashes are no longer valid. */
15891 free (reldata
->hashes
);
15892 reldata
->hashes
= NULL
;
15895 /* Unwinding tables are not referenced directly. This pass marks them as
15896 required if the corresponding code section is marked. Similarly, ARMv8-M
15897 secure entry functions can only be referenced by SG veneers which are
15898 created after the GC process. They need to be marked in case they reside in
15899 their own section (as would be the case if code was compiled with
15900 -ffunction-sections). */
15903 elf32_arm_gc_mark_extra_sections (struct bfd_link_info
*info
,
15904 elf_gc_mark_hook_fn gc_mark_hook
)
15907 Elf_Internal_Shdr
**elf_shdrp
;
15908 asection
*cmse_sec
;
15909 obj_attribute
*out_attr
;
15910 Elf_Internal_Shdr
*symtab_hdr
;
15911 unsigned i
, sym_count
, ext_start
;
15912 const struct elf_backend_data
*bed
;
15913 struct elf_link_hash_entry
**sym_hashes
;
15914 struct elf32_arm_link_hash_entry
*cmse_hash
;
15915 bool again
, is_v8m
, first_bfd_browse
= true;
15916 bool debug_sec_need_to_be_marked
= false;
15919 _bfd_elf_gc_mark_extra_sections (info
, gc_mark_hook
);
15921 out_attr
= elf_known_obj_attributes_proc (info
->output_bfd
);
15922 is_v8m
= out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V8M_BASE
15923 && out_attr
[Tag_CPU_arch_profile
].i
== 'M';
15925 /* Marking EH data may cause additional code sections to be marked,
15926 requiring multiple passes. */
15931 for (sub
= info
->input_bfds
; sub
!= NULL
; sub
= sub
->link
.next
)
15935 if (! is_arm_elf (sub
))
15938 elf_shdrp
= elf_elfsections (sub
);
15939 for (o
= sub
->sections
; o
!= NULL
; o
= o
->next
)
15941 Elf_Internal_Shdr
*hdr
;
15943 hdr
= &elf_section_data (o
)->this_hdr
;
15944 if (hdr
->sh_type
== SHT_ARM_EXIDX
15946 && hdr
->sh_link
< elf_numsections (sub
)
15948 && elf_shdrp
[hdr
->sh_link
]->bfd_section
->gc_mark
)
15951 if (!_bfd_elf_gc_mark (info
, o
, gc_mark_hook
))
15956 /* Mark section holding ARMv8-M secure entry functions. We mark all
15957 of them so no need for a second browsing. */
15958 if (is_v8m
&& first_bfd_browse
)
15960 sym_hashes
= elf_sym_hashes (sub
);
15961 bed
= get_elf_backend_data (sub
);
15962 symtab_hdr
= &elf_tdata (sub
)->symtab_hdr
;
15963 sym_count
= symtab_hdr
->sh_size
/ bed
->s
->sizeof_sym
;
15964 ext_start
= symtab_hdr
->sh_info
;
15966 /* Scan symbols. */
15967 for (i
= ext_start
; i
< sym_count
; i
++)
15969 cmse_hash
= elf32_arm_hash_entry (sym_hashes
[i
- ext_start
]);
15971 /* Assume it is a special symbol. If not, cmse_scan will
15972 warn about it and user can do something about it. */
15973 if (startswith (cmse_hash
->root
.root
.root
.string
,
15976 cmse_sec
= cmse_hash
->root
.root
.u
.def
.section
;
15977 if (!cmse_sec
->gc_mark
15978 && !_bfd_elf_gc_mark (info
, cmse_sec
, gc_mark_hook
))
15980 /* The debug sections related to these secure entry
15981 functions are marked on enabling below flag. */
15982 debug_sec_need_to_be_marked
= true;
15986 if (debug_sec_need_to_be_marked
)
15988 /* Looping over all the sections of the object file containing
15989 Armv8-M secure entry functions and marking all the debug
15991 for (isec
= sub
->sections
; isec
!= NULL
; isec
= isec
->next
)
15993 /* If not a debug sections, skip it. */
15994 if (!isec
->gc_mark
&& (isec
->flags
& SEC_DEBUGGING
))
15995 isec
->gc_mark
= 1 ;
15997 debug_sec_need_to_be_marked
= false;
16001 first_bfd_browse
= false;
16007 /* Treat mapping symbols as special target symbols. */
16010 elf32_arm_is_target_special_symbol (bfd
* abfd ATTRIBUTE_UNUSED
, asymbol
* sym
)
16012 return bfd_is_arm_special_symbol_name (sym
->name
,
16013 BFD_ARM_SPECIAL_SYM_TYPE_ANY
);
16016 /* If the ELF symbol SYM might be a function in SEC, return the
16017 function size and set *CODE_OFF to the function's entry point,
16018 otherwise return zero. */
16020 static bfd_size_type
16021 elf32_arm_maybe_function_sym (const asymbol
*sym
, asection
*sec
,
16024 bfd_size_type size
;
16025 elf_symbol_type
* elf_sym
= (elf_symbol_type
*) sym
;
16027 if ((sym
->flags
& (BSF_SECTION_SYM
| BSF_FILE
| BSF_OBJECT
16028 | BSF_THREAD_LOCAL
| BSF_RELC
| BSF_SRELC
)) != 0
16029 || sym
->section
!= sec
)
16032 size
= (sym
->flags
& BSF_SYNTHETIC
) ? 0 : elf_sym
->internal_elf_sym
.st_size
;
16034 if (!(sym
->flags
& BSF_SYNTHETIC
))
16035 switch (ELF_ST_TYPE (elf_sym
->internal_elf_sym
.st_info
))
16038 /* Ignore symbols created by the annobin plugin for gcc and clang.
16039 These symbols are hidden, local, notype and have a size of 0. */
16041 && sym
->flags
& BSF_LOCAL
16042 && ELF_ST_VISIBILITY (elf_sym
->internal_elf_sym
.st_other
) == STV_HIDDEN
)
16044 /* Fall through. */
16046 case STT_ARM_TFUNC
:
16047 /* FIXME: Allow STT_GNU_IFUNC as well ? */
16053 if ((sym
->flags
& BSF_LOCAL
)
16054 && bfd_is_arm_special_symbol_name (sym
->name
,
16055 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
16058 *code_off
= sym
->value
;
16060 /* Do not return 0 for the function's size. */
16061 return size
? size
: 1;
16066 elf32_arm_find_inliner_info (bfd
* abfd
,
16067 const char ** filename_ptr
,
16068 const char ** functionname_ptr
,
16069 unsigned int * line_ptr
)
16072 found
= _bfd_dwarf2_find_inliner_info (abfd
, filename_ptr
,
16073 functionname_ptr
, line_ptr
,
16074 & elf_tdata (abfd
)->dwarf2_find_line_info
);
16078 /* Adjust a symbol defined by a dynamic object and referenced by a
16079 regular object. The current definition is in some section of the
16080 dynamic object, but we're not including those sections. We have to
16081 change the definition to something the rest of the link can
16085 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info
* info
,
16086 struct elf_link_hash_entry
* h
)
16089 asection
*s
, *srel
;
16090 struct elf32_arm_link_hash_entry
* eh
;
16091 struct elf32_arm_link_hash_table
*globals
;
16093 globals
= elf32_arm_hash_table (info
);
16094 if (globals
== NULL
)
16097 dynobj
= elf_hash_table (info
)->dynobj
;
16099 /* Make sure we know what is going on here. */
16100 BFD_ASSERT (dynobj
!= NULL
16102 || h
->type
== STT_GNU_IFUNC
16106 && !h
->def_regular
)));
16108 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16110 /* If this is a function, put it in the procedure linkage table. We
16111 will fill in the contents of the procedure linkage table later,
16112 when we know the address of the .got section. */
16113 if (h
->type
== STT_FUNC
|| h
->type
== STT_GNU_IFUNC
|| h
->needs_plt
)
16115 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16116 symbol binds locally. */
16117 if (h
->plt
.refcount
<= 0
16118 || (h
->type
!= STT_GNU_IFUNC
16119 && (SYMBOL_CALLS_LOCAL (info
, h
)
16120 || (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
16121 && h
->root
.type
== bfd_link_hash_undefweak
))))
16123 /* This case can occur if we saw a PLT32 reloc in an input
16124 file, but the symbol was never referred to by a dynamic
16125 object, or if all references were garbage collected. In
16126 such a case, we don't actually need to build a procedure
16127 linkage table, and we can just do a PC24 reloc instead. */
16128 h
->plt
.offset
= (bfd_vma
) -1;
16129 eh
->plt
.thumb_refcount
= 0;
16130 eh
->plt
.maybe_thumb_refcount
= 0;
16131 eh
->plt
.noncall_refcount
= 0;
16139 /* It's possible that we incorrectly decided a .plt reloc was
16140 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16141 in check_relocs. We can't decide accurately between function
16142 and non-function syms in check-relocs; Objects loaded later in
16143 the link may change h->type. So fix it now. */
16144 h
->plt
.offset
= (bfd_vma
) -1;
16145 eh
->plt
.thumb_refcount
= 0;
16146 eh
->plt
.maybe_thumb_refcount
= 0;
16147 eh
->plt
.noncall_refcount
= 0;
16150 /* If this is a weak symbol, and there is a real definition, the
16151 processor independent code will have arranged for us to see the
16152 real definition first, and we can just use the same value. */
16153 if (h
->is_weakalias
)
16155 struct elf_link_hash_entry
*def
= weakdef (h
);
16156 BFD_ASSERT (def
->root
.type
== bfd_link_hash_defined
);
16157 h
->root
.u
.def
.section
= def
->root
.u
.def
.section
;
16158 h
->root
.u
.def
.value
= def
->root
.u
.def
.value
;
16162 /* If there are no non-GOT references, we do not need a copy
16164 if (!h
->non_got_ref
)
16167 /* This is a reference to a symbol defined by a dynamic object which
16168 is not a function. */
16170 /* If we are creating a shared library, we must presume that the
16171 only references to the symbol are via the global offset table.
16172 For such cases we need not do anything here; the relocations will
16173 be handled correctly by relocate_section. Relocatable executables
16174 can reference data in shared objects directly, so we don't need to
16175 do anything here. */
16176 if (bfd_link_pic (info
) || globals
->root
.is_relocatable_executable
)
16179 /* We must allocate the symbol in our .dynbss section, which will
16180 become part of the .bss section of the executable. There will be
16181 an entry for this symbol in the .dynsym section. The dynamic
16182 object will contain position independent code, so all references
16183 from the dynamic object to this symbol will go through the global
16184 offset table. The dynamic linker will use the .dynsym entry to
16185 determine the address it must put in the global offset table, so
16186 both the dynamic object and the regular object will refer to the
16187 same memory location for the variable. */
16188 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16189 linker to copy the initial value out of the dynamic object and into
16190 the runtime process image. We need to remember the offset into the
16191 .rel(a).bss section we are going to use. */
16192 if ((h
->root
.u
.def
.section
->flags
& SEC_READONLY
) != 0)
16194 s
= globals
->root
.sdynrelro
;
16195 srel
= globals
->root
.sreldynrelro
;
16199 s
= globals
->root
.sdynbss
;
16200 srel
= globals
->root
.srelbss
;
16202 if (info
->nocopyreloc
== 0
16203 && (h
->root
.u
.def
.section
->flags
& SEC_ALLOC
) != 0
16206 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16210 return _bfd_elf_adjust_dynamic_copy (info
, h
, s
);
16213 /* Allocate space in .plt, .got and associated reloc sections for
16217 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry
*h
, void * inf
)
16219 struct bfd_link_info
*info
;
16220 struct elf32_arm_link_hash_table
*htab
;
16221 struct elf32_arm_link_hash_entry
*eh
;
16222 struct elf_dyn_relocs
*p
;
16224 if (h
->root
.type
== bfd_link_hash_indirect
)
16227 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16229 info
= (struct bfd_link_info
*) inf
;
16230 htab
= elf32_arm_hash_table (info
);
16234 if ((htab
->root
.dynamic_sections_created
|| h
->type
== STT_GNU_IFUNC
)
16235 && h
->plt
.refcount
> 0)
16237 /* Make sure this symbol is output as a dynamic symbol.
16238 Undefined weak syms won't yet be marked as dynamic. */
16239 if (h
->dynindx
== -1 && !h
->forced_local
16240 && h
->root
.type
== bfd_link_hash_undefweak
)
16242 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16246 /* If the call in the PLT entry binds locally, the associated
16247 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16248 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16249 than the .plt section. */
16250 if (h
->type
== STT_GNU_IFUNC
&& SYMBOL_CALLS_LOCAL (info
, h
))
16253 if (eh
->plt
.noncall_refcount
== 0
16254 && SYMBOL_REFERENCES_LOCAL (info
, h
))
16255 /* All non-call references can be resolved directly.
16256 This means that they can (and in some cases, must)
16257 resolve directly to the run-time target, rather than
16258 to the PLT. That in turns means that any .got entry
16259 would be equal to the .igot.plt entry, so there's
16260 no point having both. */
16261 h
->got
.refcount
= 0;
16264 if (bfd_link_pic (info
)
16266 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h
))
16268 elf32_arm_allocate_plt_entry (info
, eh
->is_iplt
, &h
->plt
, &eh
->plt
);
16270 /* If this symbol is not defined in a regular file, and we are
16271 not generating a shared library, then set the symbol to this
16272 location in the .plt. This is required to make function
16273 pointers compare as equal between the normal executable and
16274 the shared library. */
16275 if (! bfd_link_pic (info
)
16276 && !h
->def_regular
)
16278 h
->root
.u
.def
.section
= htab
->root
.splt
;
16279 h
->root
.u
.def
.value
= h
->plt
.offset
;
16281 /* Make sure the function is not marked as Thumb, in case
16282 it is the target of an ABS32 relocation, which will
16283 point to the PLT entry. */
16284 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
16287 /* VxWorks executables have a second set of relocations for
16288 each PLT entry. They go in a separate relocation section,
16289 which is processed by the kernel loader. */
16290 if (htab
->root
.target_os
== is_vxworks
&& !bfd_link_pic (info
))
16292 /* There is a relocation for the initial PLT entry:
16293 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16294 if (h
->plt
.offset
== htab
->plt_header_size
)
16295 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 1);
16297 /* There are two extra relocations for each subsequent
16298 PLT entry: an R_ARM_32 relocation for the GOT entry,
16299 and an R_ARM_32 relocation for the PLT entry. */
16300 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 2);
16305 h
->plt
.offset
= (bfd_vma
) -1;
16311 h
->plt
.offset
= (bfd_vma
) -1;
16315 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16316 eh
->tlsdesc_got
= (bfd_vma
) -1;
16318 if (h
->got
.refcount
> 0)
16322 int tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
16325 /* Make sure this symbol is output as a dynamic symbol.
16326 Undefined weak syms won't yet be marked as dynamic. */
16327 if (htab
->root
.dynamic_sections_created
16328 && h
->dynindx
== -1
16329 && !h
->forced_local
16330 && h
->root
.type
== bfd_link_hash_undefweak
)
16332 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16336 s
= htab
->root
.sgot
;
16337 h
->got
.offset
= s
->size
;
16339 if (tls_type
== GOT_UNKNOWN
)
16342 if (tls_type
== GOT_NORMAL
)
16343 /* Non-TLS symbols need one GOT slot. */
16347 if (tls_type
& GOT_TLS_GDESC
)
16349 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16351 = (htab
->root
.sgotplt
->size
16352 - elf32_arm_compute_jump_table_size (htab
));
16353 htab
->root
.sgotplt
->size
+= 8;
16354 h
->got
.offset
= (bfd_vma
) -2;
16355 /* plt.got_offset needs to know there's a TLS_DESC
16356 reloc in the middle of .got.plt. */
16357 htab
->num_tls_desc
++;
16360 if (tls_type
& GOT_TLS_GD
)
16362 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16363 consecutive GOT slots. If the symbol is both GD
16364 and GDESC, got.offset may have been
16366 h
->got
.offset
= s
->size
;
16370 if (tls_type
& GOT_TLS_IE
)
16371 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16376 dyn
= htab
->root
.dynamic_sections_created
;
16379 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
, bfd_link_pic (info
), h
)
16380 && (!bfd_link_pic (info
)
16381 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
16384 if (tls_type
!= GOT_NORMAL
16385 && (bfd_link_dll (info
) || indx
!= 0)
16386 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
16387 || h
->root
.type
!= bfd_link_hash_undefweak
))
16389 if (tls_type
& GOT_TLS_IE
)
16390 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16392 if (tls_type
& GOT_TLS_GD
)
16393 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16395 if (tls_type
& GOT_TLS_GDESC
)
16397 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
16398 /* GDESC needs a trampoline to jump to. */
16399 htab
->tls_trampoline
= -1;
16402 /* Only GD needs it. GDESC just emits one relocation per
16404 if ((tls_type
& GOT_TLS_GD
) && indx
!= 0)
16405 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16407 else if (((indx
!= -1) || htab
->fdpic_p
)
16408 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
16410 if (htab
->root
.dynamic_sections_created
)
16411 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16412 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16414 else if (h
->type
== STT_GNU_IFUNC
16415 && eh
->plt
.noncall_refcount
== 0)
16416 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16417 they all resolve dynamically instead. Reserve room for the
16418 GOT entry's R_ARM_IRELATIVE relocation. */
16419 elf32_arm_allocate_irelocs (info
, htab
->root
.srelgot
, 1);
16420 else if (bfd_link_pic (info
)
16421 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
))
16422 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16423 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16424 else if (htab
->fdpic_p
&& tls_type
== GOT_NORMAL
)
16425 /* Reserve room for rofixup for FDPIC executable. */
16426 /* TLS relocs do not need space since they are completely
16428 htab
->srofixup
->size
+= 4;
16431 h
->got
.offset
= (bfd_vma
) -1;
16433 /* FDPIC support. */
16434 if (eh
->fdpic_cnts
.gotofffuncdesc_cnt
> 0)
16436 /* Symbol musn't be exported. */
16437 if (h
->dynindx
!= -1)
16440 /* We only allocate one function descriptor with its associated
16442 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16444 asection
*s
= htab
->root
.sgot
;
16446 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16448 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16449 if (bfd_link_pic (info
))
16450 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16452 htab
->srofixup
->size
+= 8;
16456 if (eh
->fdpic_cnts
.gotfuncdesc_cnt
> 0)
16458 asection
*s
= htab
->root
.sgot
;
16460 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16461 && !h
->forced_local
)
16462 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16465 if (h
->dynindx
== -1)
16467 /* We only allocate one function descriptor with its
16468 associated relocation. */
16469 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16472 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16474 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16476 if (bfd_link_pic (info
))
16477 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16479 htab
->srofixup
->size
+= 8;
16483 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16484 R_ARM_RELATIVE/rofixup relocation on it. */
16485 eh
->fdpic_cnts
.gotfuncdesc_offset
= s
->size
;
16487 if (h
->dynindx
== -1 && !bfd_link_pic (info
))
16488 htab
->srofixup
->size
+= 4;
16490 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16493 if (eh
->fdpic_cnts
.funcdesc_cnt
> 0)
16495 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16496 && !h
->forced_local
)
16497 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16500 if (h
->dynindx
== -1)
16502 /* We only allocate one function descriptor with its
16503 associated relocation. */
16504 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16506 asection
*s
= htab
->root
.sgot
;
16508 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16510 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16512 if (bfd_link_pic (info
))
16513 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16515 htab
->srofixup
->size
+= 8;
16518 if (h
->dynindx
== -1 && !bfd_link_pic (info
))
16520 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16521 htab
->srofixup
->size
+= 4 * eh
->fdpic_cnts
.funcdesc_cnt
;
16525 /* Will need one dynamic reloc per reference. will be either
16526 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16527 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
,
16528 eh
->fdpic_cnts
.funcdesc_cnt
);
16532 /* Allocate stubs for exported Thumb functions on v4t. */
16533 if (!htab
->use_blx
&& h
->dynindx
!= -1
16535 && ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
) == ST_BRANCH_TO_THUMB
16536 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
16538 struct elf_link_hash_entry
* th
;
16539 struct bfd_link_hash_entry
* bh
;
16540 struct elf_link_hash_entry
* myh
;
16544 /* Create a new symbol to regist the real location of the function. */
16545 s
= h
->root
.u
.def
.section
;
16546 sprintf (name
, "__real_%s", h
->root
.root
.string
);
16547 _bfd_generic_link_add_one_symbol (info
, s
->owner
,
16548 name
, BSF_GLOBAL
, s
,
16549 h
->root
.u
.def
.value
,
16550 NULL
, true, false, &bh
);
16552 myh
= (struct elf_link_hash_entry
*) bh
;
16553 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
16554 myh
->forced_local
= 1;
16555 ARM_SET_SYM_BRANCH_TYPE (myh
->target_internal
, ST_BRANCH_TO_THUMB
);
16556 eh
->export_glue
= myh
;
16557 th
= record_arm_to_thumb_glue (info
, h
);
16558 /* Point the symbol at the stub. */
16559 h
->type
= ELF_ST_INFO (ELF_ST_BIND (h
->type
), STT_FUNC
);
16560 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
16561 h
->root
.u
.def
.section
= th
->root
.u
.def
.section
;
16562 h
->root
.u
.def
.value
= th
->root
.u
.def
.value
& ~1;
16565 if (h
->dyn_relocs
== NULL
)
16568 /* In the shared -Bsymbolic case, discard space allocated for
16569 dynamic pc-relative relocs against symbols which turn out to be
16570 defined in regular objects. For the normal shared case, discard
16571 space for pc-relative relocs that have become local due to symbol
16572 visibility changes. */
16574 if (bfd_link_pic (info
)
16575 || htab
->root
.is_relocatable_executable
16578 /* Relocs that use pc_count are PC-relative forms, which will appear
16579 on something like ".long foo - ." or "movw REG, foo - .". We want
16580 calls to protected symbols to resolve directly to the function
16581 rather than going via the plt. If people want function pointer
16582 comparisons to work as expected then they should avoid writing
16583 assembly like ".long foo - .". */
16584 if (SYMBOL_CALLS_LOCAL (info
, h
))
16586 struct elf_dyn_relocs
**pp
;
16588 for (pp
= &h
->dyn_relocs
; (p
= *pp
) != NULL
; )
16590 p
->count
-= p
->pc_count
;
16599 if (htab
->root
.target_os
== is_vxworks
)
16601 struct elf_dyn_relocs
**pp
;
16603 for (pp
= &h
->dyn_relocs
; (p
= *pp
) != NULL
; )
16605 if (strcmp (p
->sec
->output_section
->name
, ".tls_vars") == 0)
16612 /* Also discard relocs on undefined weak syms with non-default
16614 if (h
->dyn_relocs
!= NULL
16615 && h
->root
.type
== bfd_link_hash_undefweak
)
16617 if (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
16618 || UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
))
16619 h
->dyn_relocs
= NULL
;
16621 /* Make sure undefined weak symbols are output as a dynamic
16623 else if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16624 && !h
->forced_local
)
16626 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16631 else if (htab
->root
.is_relocatable_executable
&& h
->dynindx
== -1
16632 && h
->root
.type
== bfd_link_hash_new
)
16634 /* Output absolute symbols so that we can create relocations
16635 against them. For normal symbols we output a relocation
16636 against the section that contains them. */
16637 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16644 /* For the non-shared case, discard space for relocs against
16645 symbols which turn out to need copy relocs or are not
16648 if (!h
->non_got_ref
16649 && ((h
->def_dynamic
16650 && !h
->def_regular
)
16651 || (htab
->root
.dynamic_sections_created
16652 && (h
->root
.type
== bfd_link_hash_undefweak
16653 || h
->root
.type
== bfd_link_hash_undefined
))))
16655 /* Make sure this symbol is output as a dynamic symbol.
16656 Undefined weak syms won't yet be marked as dynamic. */
16657 if (h
->dynindx
== -1 && !h
->forced_local
16658 && h
->root
.type
== bfd_link_hash_undefweak
)
16660 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16664 /* If that succeeded, we know we'll be keeping all the
16666 if (h
->dynindx
!= -1)
16670 h
->dyn_relocs
= NULL
;
16675 /* Finally, allocate space. */
16676 for (p
= h
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
16678 asection
*sreloc
= elf_section_data (p
->sec
)->sreloc
;
16680 if (h
->type
== STT_GNU_IFUNC
16681 && eh
->plt
.noncall_refcount
== 0
16682 && SYMBOL_REFERENCES_LOCAL (info
, h
))
16683 elf32_arm_allocate_irelocs (info
, sreloc
, p
->count
);
16684 else if (h
->dynindx
!= -1
16685 && (!bfd_link_pic (info
) || !info
->symbolic
|| !h
->def_regular
))
16686 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
16687 else if (htab
->fdpic_p
&& !bfd_link_pic (info
))
16688 htab
->srofixup
->size
+= 4 * p
->count
;
16690 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
16697 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info
*info
,
16700 struct elf32_arm_link_hash_table
*globals
;
16702 globals
= elf32_arm_hash_table (info
);
16703 if (globals
== NULL
)
16706 globals
->byteswap_code
= byteswap_code
;
16709 /* Set the sizes of the dynamic sections. */
16712 elf32_arm_size_dynamic_sections (bfd
* output_bfd ATTRIBUTE_UNUSED
,
16713 struct bfd_link_info
* info
)
16719 struct elf32_arm_link_hash_table
*htab
;
16721 htab
= elf32_arm_hash_table (info
);
16725 dynobj
= elf_hash_table (info
)->dynobj
;
16726 BFD_ASSERT (dynobj
!= NULL
);
16727 check_use_blx (htab
);
16729 if (elf_hash_table (info
)->dynamic_sections_created
)
16731 /* Set the contents of the .interp section to the interpreter. */
16732 if (bfd_link_executable (info
) && !info
->nointerp
)
16734 s
= bfd_get_linker_section (dynobj
, ".interp");
16735 BFD_ASSERT (s
!= NULL
);
16736 s
->size
= sizeof ELF_DYNAMIC_INTERPRETER
;
16737 s
->contents
= (unsigned char *) ELF_DYNAMIC_INTERPRETER
;
16741 /* Set up .got offsets for local syms, and space for local dynamic
16743 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
16745 bfd_signed_vma
*local_got
;
16746 bfd_signed_vma
*end_local_got
;
16747 struct arm_local_iplt_info
**local_iplt_ptr
, *local_iplt
;
16748 char *local_tls_type
;
16749 bfd_vma
*local_tlsdesc_gotent
;
16750 bfd_size_type locsymcount
;
16751 Elf_Internal_Shdr
*symtab_hdr
;
16753 unsigned int symndx
;
16754 struct fdpic_local
*local_fdpic_cnts
;
16756 if (! is_arm_elf (ibfd
))
16759 for (s
= ibfd
->sections
; s
!= NULL
; s
= s
->next
)
16761 struct elf_dyn_relocs
*p
;
16763 for (p
= (struct elf_dyn_relocs
*)
16764 elf_section_data (s
)->local_dynrel
; p
!= NULL
; p
= p
->next
)
16766 if (!bfd_is_abs_section (p
->sec
)
16767 && bfd_is_abs_section (p
->sec
->output_section
))
16769 /* Input section has been discarded, either because
16770 it is a copy of a linkonce section or due to
16771 linker script /DISCARD/, so we'll be discarding
16774 else if (htab
->root
.target_os
== is_vxworks
16775 && strcmp (p
->sec
->output_section
->name
,
16778 /* Relocations in vxworks .tls_vars sections are
16779 handled specially by the loader. */
16781 else if (p
->count
!= 0)
16783 srel
= elf_section_data (p
->sec
)->sreloc
;
16784 if (htab
->fdpic_p
&& !bfd_link_pic (info
))
16785 htab
->srofixup
->size
+= 4 * p
->count
;
16787 elf32_arm_allocate_dynrelocs (info
, srel
, p
->count
);
16788 if ((p
->sec
->output_section
->flags
& SEC_READONLY
) != 0)
16789 info
->flags
|= DF_TEXTREL
;
16794 local_got
= elf_local_got_refcounts (ibfd
);
16795 if (local_got
== NULL
)
16798 symtab_hdr
= & elf_symtab_hdr (ibfd
);
16799 locsymcount
= symtab_hdr
->sh_info
;
16800 end_local_got
= local_got
+ locsymcount
;
16801 local_iplt_ptr
= elf32_arm_local_iplt (ibfd
);
16802 local_tls_type
= elf32_arm_local_got_tls_type (ibfd
);
16803 local_tlsdesc_gotent
= elf32_arm_local_tlsdesc_gotent (ibfd
);
16804 local_fdpic_cnts
= elf32_arm_local_fdpic_cnts (ibfd
);
16806 s
= htab
->root
.sgot
;
16807 srel
= htab
->root
.srelgot
;
16808 for (; local_got
< end_local_got
;
16809 ++local_got
, ++local_iplt_ptr
, ++local_tls_type
,
16810 ++local_tlsdesc_gotent
, ++symndx
, ++local_fdpic_cnts
)
16812 if (symndx
>= elf32_arm_num_entries (ibfd
))
16815 *local_tlsdesc_gotent
= (bfd_vma
) -1;
16816 local_iplt
= *local_iplt_ptr
;
16818 /* FDPIC support. */
16819 if (local_fdpic_cnts
->gotofffuncdesc_cnt
> 0)
16821 if (local_fdpic_cnts
->funcdesc_offset
== -1)
16823 local_fdpic_cnts
->funcdesc_offset
= s
->size
;
16826 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16827 if (bfd_link_pic (info
))
16828 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16830 htab
->srofixup
->size
+= 8;
16834 if (local_fdpic_cnts
->funcdesc_cnt
> 0)
16836 if (local_fdpic_cnts
->funcdesc_offset
== -1)
16838 local_fdpic_cnts
->funcdesc_offset
= s
->size
;
16841 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16842 if (bfd_link_pic (info
))
16843 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16845 htab
->srofixup
->size
+= 8;
16848 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16849 if (bfd_link_pic (info
))
16850 elf32_arm_allocate_dynrelocs (info
, srel
, local_fdpic_cnts
->funcdesc_cnt
);
16852 htab
->srofixup
->size
+= 4 * local_fdpic_cnts
->funcdesc_cnt
;
16855 if (local_iplt
!= NULL
)
16857 struct elf_dyn_relocs
*p
;
16859 if (local_iplt
->root
.refcount
> 0)
16861 elf32_arm_allocate_plt_entry (info
, true,
16864 if (local_iplt
->arm
.noncall_refcount
== 0)
16865 /* All references to the PLT are calls, so all
16866 non-call references can resolve directly to the
16867 run-time target. This means that the .got entry
16868 would be the same as the .igot.plt entry, so there's
16869 no point creating both. */
16874 BFD_ASSERT (local_iplt
->arm
.noncall_refcount
== 0);
16875 local_iplt
->root
.offset
= (bfd_vma
) -1;
16878 for (p
= local_iplt
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
16882 psrel
= elf_section_data (p
->sec
)->sreloc
;
16883 if (local_iplt
->arm
.noncall_refcount
== 0)
16884 elf32_arm_allocate_irelocs (info
, psrel
, p
->count
);
16886 elf32_arm_allocate_dynrelocs (info
, psrel
, p
->count
);
16889 if (*local_got
> 0)
16891 Elf_Internal_Sym
*isym
;
16893 *local_got
= s
->size
;
16894 if (*local_tls_type
& GOT_TLS_GD
)
16895 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16897 if (*local_tls_type
& GOT_TLS_GDESC
)
16899 *local_tlsdesc_gotent
= htab
->root
.sgotplt
->size
16900 - elf32_arm_compute_jump_table_size (htab
);
16901 htab
->root
.sgotplt
->size
+= 8;
16902 *local_got
= (bfd_vma
) -2;
16903 /* plt.got_offset needs to know there's a TLS_DESC
16904 reloc in the middle of .got.plt. */
16905 htab
->num_tls_desc
++;
16907 if (*local_tls_type
& GOT_TLS_IE
)
16910 if (*local_tls_type
& GOT_NORMAL
)
16912 /* If the symbol is both GD and GDESC, *local_got
16913 may have been overwritten. */
16914 *local_got
= s
->size
;
16918 isym
= bfd_sym_from_r_symndx (&htab
->root
.sym_cache
, ibfd
,
16923 /* If all references to an STT_GNU_IFUNC PLT are calls,
16924 then all non-call references, including this GOT entry,
16925 resolve directly to the run-time target. */
16926 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
16927 && (local_iplt
== NULL
16928 || local_iplt
->arm
.noncall_refcount
== 0))
16929 elf32_arm_allocate_irelocs (info
, srel
, 1);
16930 else if (bfd_link_pic (info
) || output_bfd
->flags
& DYNAMIC
|| htab
->fdpic_p
)
16932 if ((bfd_link_pic (info
) && !(*local_tls_type
& GOT_TLS_GDESC
)))
16933 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16934 else if (htab
->fdpic_p
&& *local_tls_type
& GOT_NORMAL
)
16935 htab
->srofixup
->size
+= 4;
16937 if ((bfd_link_pic (info
) || htab
->fdpic_p
)
16938 && *local_tls_type
& GOT_TLS_GDESC
)
16940 elf32_arm_allocate_dynrelocs (info
,
16941 htab
->root
.srelplt
, 1);
16942 htab
->tls_trampoline
= -1;
16947 *local_got
= (bfd_vma
) -1;
16951 if (htab
->tls_ldm_got
.refcount
> 0)
16953 /* Allocate two GOT entries and one dynamic relocation (if necessary)
16954 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
16955 htab
->tls_ldm_got
.offset
= htab
->root
.sgot
->size
;
16956 htab
->root
.sgot
->size
+= 8;
16957 if (bfd_link_pic (info
))
16958 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16961 htab
->tls_ldm_got
.offset
= -1;
16963 /* At the very end of the .rofixup section is a pointer to the GOT,
16964 reserve space for it. */
16965 if (htab
->fdpic_p
&& htab
->srofixup
!= NULL
)
16966 htab
->srofixup
->size
+= 4;
16968 /* Allocate global sym .plt and .got entries, and space for global
16969 sym dynamic relocs. */
16970 elf_link_hash_traverse (& htab
->root
, allocate_dynrelocs_for_symbol
, info
);
16972 /* Here we rummage through the found bfds to collect glue information. */
16973 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
16975 if (! is_arm_elf (ibfd
))
16978 /* Initialise mapping tables for code/data. */
16979 bfd_elf32_arm_init_maps (ibfd
);
16981 if (!bfd_elf32_arm_process_before_allocation (ibfd
, info
)
16982 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd
, info
)
16983 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd
, info
))
16984 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd
);
16987 /* Allocate space for the glue sections now that we've sized them. */
16988 bfd_elf32_arm_allocate_interworking_sections (info
);
16990 /* For every jump slot reserved in the sgotplt, reloc_count is
16991 incremented. However, when we reserve space for TLS descriptors,
16992 it's not incremented, so in order to compute the space reserved
16993 for them, it suffices to multiply the reloc count by the jump
16995 if (htab
->root
.srelplt
)
16996 htab
->sgotplt_jump_table_size
= elf32_arm_compute_jump_table_size (htab
);
16998 if (htab
->tls_trampoline
)
17000 if (htab
->root
.splt
->size
== 0)
17001 htab
->root
.splt
->size
+= htab
->plt_header_size
;
17003 htab
->tls_trampoline
= htab
->root
.splt
->size
;
17004 htab
->root
.splt
->size
+= htab
->plt_entry_size
;
17006 /* If we're not using lazy TLS relocations, don't generate the
17007 PLT and GOT entries they require. */
17008 if ((info
->flags
& DF_BIND_NOW
))
17009 htab
->root
.tlsdesc_plt
= 0;
17012 htab
->root
.tlsdesc_got
= htab
->root
.sgot
->size
;
17013 htab
->root
.sgot
->size
+= 4;
17015 htab
->root
.tlsdesc_plt
= htab
->root
.splt
->size
;
17016 htab
->root
.splt
->size
+= 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline
);
17020 /* The check_relocs and adjust_dynamic_symbol entry points have
17021 determined the sizes of the various dynamic sections. Allocate
17022 memory for them. */
17024 for (s
= dynobj
->sections
; s
!= NULL
; s
= s
->next
)
17028 if ((s
->flags
& SEC_LINKER_CREATED
) == 0)
17031 /* It's OK to base decisions on the section name, because none
17032 of the dynobj section names depend upon the input files. */
17033 name
= bfd_section_name (s
);
17035 if (s
== htab
->root
.splt
)
17037 /* Remember whether there is a PLT. */
17040 else if (startswith (name
, ".rel"))
17044 /* Remember whether there are any reloc sections other
17045 than .rel(a).plt and .rela.plt.unloaded. */
17046 if (s
!= htab
->root
.srelplt
&& s
!= htab
->srelplt2
)
17049 /* We use the reloc_count field as a counter if we need
17050 to copy relocs into the output file. */
17051 s
->reloc_count
= 0;
17054 else if (s
!= htab
->root
.sgot
17055 && s
!= htab
->root
.sgotplt
17056 && s
!= htab
->root
.iplt
17057 && s
!= htab
->root
.igotplt
17058 && s
!= htab
->root
.sdynbss
17059 && s
!= htab
->root
.sdynrelro
17060 && s
!= htab
->srofixup
)
17062 /* It's not one of our sections, so don't allocate space. */
17068 /* If we don't need this section, strip it from the
17069 output file. This is mostly to handle .rel(a).bss and
17070 .rel(a).plt. We must create both sections in
17071 create_dynamic_sections, because they must be created
17072 before the linker maps input sections to output
17073 sections. The linker does that before
17074 adjust_dynamic_symbol is called, and it is that
17075 function which decides whether anything needs to go
17076 into these sections. */
17077 s
->flags
|= SEC_EXCLUDE
;
17081 if ((s
->flags
& SEC_HAS_CONTENTS
) == 0)
17084 /* Allocate memory for the section contents. */
17085 s
->contents
= (unsigned char *) bfd_zalloc (dynobj
, s
->size
);
17086 if (s
->contents
== NULL
)
17090 return _bfd_elf_maybe_vxworks_add_dynamic_tags (output_bfd
, info
,
17094 /* Size sections even though they're not dynamic. We use it to setup
17095 _TLS_MODULE_BASE_, if needed. */
17098 elf32_arm_always_size_sections (bfd
*output_bfd
,
17099 struct bfd_link_info
*info
)
17102 struct elf32_arm_link_hash_table
*htab
;
17104 htab
= elf32_arm_hash_table (info
);
17106 if (bfd_link_relocatable (info
))
17109 tls_sec
= elf_hash_table (info
)->tls_sec
;
17113 struct elf_link_hash_entry
*tlsbase
;
17115 tlsbase
= elf_link_hash_lookup
17116 (elf_hash_table (info
), "_TLS_MODULE_BASE_", true, true, false);
17120 struct bfd_link_hash_entry
*bh
= NULL
;
17121 const struct elf_backend_data
*bed
17122 = get_elf_backend_data (output_bfd
);
17124 if (!(_bfd_generic_link_add_one_symbol
17125 (info
, output_bfd
, "_TLS_MODULE_BASE_", BSF_LOCAL
,
17126 tls_sec
, 0, NULL
, false,
17127 bed
->collect
, &bh
)))
17130 tlsbase
->type
= STT_TLS
;
17131 tlsbase
= (struct elf_link_hash_entry
*)bh
;
17132 tlsbase
->def_regular
= 1;
17133 tlsbase
->other
= STV_HIDDEN
;
17134 (*bed
->elf_backend_hide_symbol
) (info
, tlsbase
, true);
17138 if (htab
->fdpic_p
&& !bfd_link_relocatable (info
)
17139 && !bfd_elf_stack_segment_size (output_bfd
, info
,
17140 "__stacksize", DEFAULT_STACK_SIZE
))
17146 /* Finish up dynamic symbol handling. We set the contents of various
17147 dynamic sections here. */
17150 elf32_arm_finish_dynamic_symbol (bfd
* output_bfd
,
17151 struct bfd_link_info
* info
,
17152 struct elf_link_hash_entry
* h
,
17153 Elf_Internal_Sym
* sym
)
17155 struct elf32_arm_link_hash_table
*htab
;
17156 struct elf32_arm_link_hash_entry
*eh
;
17158 htab
= elf32_arm_hash_table (info
);
17162 eh
= (struct elf32_arm_link_hash_entry
*) h
;
17164 if (h
->plt
.offset
!= (bfd_vma
) -1)
17168 BFD_ASSERT (h
->dynindx
!= -1);
17169 if (! elf32_arm_populate_plt_entry (output_bfd
, info
, &h
->plt
, &eh
->plt
,
17174 if (!h
->def_regular
)
17176 /* Mark the symbol as undefined, rather than as defined in
17177 the .plt section. */
17178 sym
->st_shndx
= SHN_UNDEF
;
17179 /* If the symbol is weak we need to clear the value.
17180 Otherwise, the PLT entry would provide a definition for
17181 the symbol even if the symbol wasn't defined anywhere,
17182 and so the symbol would never be NULL. Leave the value if
17183 there were any relocations where pointer equality matters
17184 (this is a clue for the dynamic linker, to make function
17185 pointer comparisons work between an application and shared
17187 if (!h
->ref_regular_nonweak
|| !h
->pointer_equality_needed
)
17190 else if (eh
->is_iplt
&& eh
->plt
.noncall_refcount
!= 0)
17192 /* At least one non-call relocation references this .iplt entry,
17193 so the .iplt entry is the function's canonical address. */
17194 sym
->st_info
= ELF_ST_INFO (ELF_ST_BIND (sym
->st_info
), STT_FUNC
);
17195 ARM_SET_SYM_BRANCH_TYPE (sym
->st_target_internal
, ST_BRANCH_TO_ARM
);
17196 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
17197 (output_bfd
, htab
->root
.iplt
->output_section
));
17198 sym
->st_value
= (h
->plt
.offset
17199 + htab
->root
.iplt
->output_section
->vma
17200 + htab
->root
.iplt
->output_offset
);
17207 Elf_Internal_Rela rel
;
17209 /* This symbol needs a copy reloc. Set it up. */
17210 BFD_ASSERT (h
->dynindx
!= -1
17211 && (h
->root
.type
== bfd_link_hash_defined
17212 || h
->root
.type
== bfd_link_hash_defweak
));
17215 rel
.r_offset
= (h
->root
.u
.def
.value
17216 + h
->root
.u
.def
.section
->output_section
->vma
17217 + h
->root
.u
.def
.section
->output_offset
);
17218 rel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_COPY
);
17219 if (h
->root
.u
.def
.section
== htab
->root
.sdynrelro
)
17220 s
= htab
->root
.sreldynrelro
;
17222 s
= htab
->root
.srelbss
;
17223 elf32_arm_add_dynreloc (output_bfd
, info
, s
, &rel
);
17226 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17227 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17228 it is relative to the ".got" section. */
17229 if (h
== htab
->root
.hdynamic
17231 && htab
->root
.target_os
!= is_vxworks
17232 && h
== htab
->root
.hgot
))
17233 sym
->st_shndx
= SHN_ABS
;
17239 arm_put_trampoline (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
17241 const unsigned long *template, unsigned count
)
17245 for (ix
= 0; ix
!= count
; ix
++)
17247 unsigned long insn
= template[ix
];
17249 /* Emit mov pc,rx if bx is not permitted. */
17250 if (htab
->fix_v4bx
== 1 && (insn
& 0x0ffffff0) == 0x012fff10)
17251 insn
= (insn
& 0xf000000f) | 0x01a0f000;
17252 put_arm_insn (htab
, output_bfd
, insn
, (char *)contents
+ ix
*4);
17256 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17257 other variants, NaCl needs this entry in a static executable's
17258 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17259 zero. For .iplt really only the last bundle is useful, and .iplt
17260 could have a shorter first entry, with each individual PLT entry's
17261 relative branch calculated differently so it targets the last
17262 bundle instead of the instruction before it (labelled .Lplt_tail
17263 above). But it's simpler to keep the size and layout of PLT0
17264 consistent with the dynamic case, at the cost of some dead code at
17265 the start of .iplt and the one dead store to the stack at the start
17268 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
17269 asection
*plt
, bfd_vma got_displacement
)
17273 put_arm_insn (htab
, output_bfd
,
17274 elf32_arm_nacl_plt0_entry
[0]
17275 | arm_movw_immediate (got_displacement
),
17276 plt
->contents
+ 0);
17277 put_arm_insn (htab
, output_bfd
,
17278 elf32_arm_nacl_plt0_entry
[1]
17279 | arm_movt_immediate (got_displacement
),
17280 plt
->contents
+ 4);
17282 for (i
= 2; i
< ARRAY_SIZE (elf32_arm_nacl_plt0_entry
); ++i
)
17283 put_arm_insn (htab
, output_bfd
,
17284 elf32_arm_nacl_plt0_entry
[i
],
17285 plt
->contents
+ (i
* 4));
17288 /* Finish up the dynamic sections. */
17291 elf32_arm_finish_dynamic_sections (bfd
* output_bfd
, struct bfd_link_info
* info
)
17296 struct elf32_arm_link_hash_table
*htab
;
17298 htab
= elf32_arm_hash_table (info
);
17302 dynobj
= elf_hash_table (info
)->dynobj
;
17304 sgot
= htab
->root
.sgotplt
;
17305 /* A broken linker script might have discarded the dynamic sections.
17306 Catch this here so that we do not seg-fault later on. */
17307 if (sgot
!= NULL
&& bfd_is_abs_section (sgot
->output_section
))
17309 sdyn
= bfd_get_linker_section (dynobj
, ".dynamic");
17311 if (elf_hash_table (info
)->dynamic_sections_created
)
17314 Elf32_External_Dyn
*dyncon
, *dynconend
;
17316 splt
= htab
->root
.splt
;
17317 BFD_ASSERT (splt
!= NULL
&& sdyn
!= NULL
);
17318 BFD_ASSERT (sgot
!= NULL
);
17320 dyncon
= (Elf32_External_Dyn
*) sdyn
->contents
;
17321 dynconend
= (Elf32_External_Dyn
*) (sdyn
->contents
+ sdyn
->size
);
17323 for (; dyncon
< dynconend
; dyncon
++)
17325 Elf_Internal_Dyn dyn
;
17329 bfd_elf32_swap_dyn_in (dynobj
, dyncon
, &dyn
);
17334 if (htab
->root
.target_os
== is_vxworks
17335 && elf_vxworks_finish_dynamic_entry (output_bfd
, &dyn
))
17336 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17351 name
= RELOC_SECTION (htab
, ".plt");
17353 s
= bfd_get_linker_section (dynobj
, name
);
17357 (_("could not find section %s"), name
);
17358 bfd_set_error (bfd_error_invalid_operation
);
17361 dyn
.d_un
.d_ptr
= s
->output_section
->vma
+ s
->output_offset
;
17362 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17366 s
= htab
->root
.srelplt
;
17367 BFD_ASSERT (s
!= NULL
);
17368 dyn
.d_un
.d_val
= s
->size
;
17369 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17378 case DT_TLSDESC_PLT
:
17379 s
= htab
->root
.splt
;
17380 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
17381 + htab
->root
.tlsdesc_plt
);
17382 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17385 case DT_TLSDESC_GOT
:
17386 s
= htab
->root
.sgot
;
17387 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
17388 + htab
->root
.tlsdesc_got
);
17389 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17392 /* Set the bottom bit of DT_INIT/FINI if the
17393 corresponding function is Thumb. */
17395 name
= info
->init_function
;
17398 name
= info
->fini_function
;
17400 /* If it wasn't set by elf_bfd_final_link
17401 then there is nothing to adjust. */
17402 if (dyn
.d_un
.d_val
!= 0)
17404 struct elf_link_hash_entry
* eh
;
17406 eh
= elf_link_hash_lookup (elf_hash_table (info
), name
,
17407 false, false, true);
17409 && ARM_GET_SYM_BRANCH_TYPE (eh
->target_internal
)
17410 == ST_BRANCH_TO_THUMB
)
17412 dyn
.d_un
.d_val
|= 1;
17413 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17420 /* Fill in the first entry in the procedure linkage table. */
17421 if (splt
->size
> 0 && htab
->plt_header_size
)
17423 const bfd_vma
*plt0_entry
;
17424 bfd_vma got_address
, plt_address
, got_displacement
;
17426 /* Calculate the addresses of the GOT and PLT. */
17427 got_address
= sgot
->output_section
->vma
+ sgot
->output_offset
;
17428 plt_address
= splt
->output_section
->vma
+ splt
->output_offset
;
17430 if (htab
->root
.target_os
== is_vxworks
)
17432 /* The VxWorks GOT is relocated by the dynamic linker.
17433 Therefore, we must emit relocations rather than simply
17434 computing the values now. */
17435 Elf_Internal_Rela rel
;
17437 plt0_entry
= elf32_arm_vxworks_exec_plt0_entry
;
17438 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17439 splt
->contents
+ 0);
17440 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17441 splt
->contents
+ 4);
17442 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17443 splt
->contents
+ 8);
17444 bfd_put_32 (output_bfd
, got_address
, splt
->contents
+ 12);
17446 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17447 rel
.r_offset
= plt_address
+ 12;
17448 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
17450 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
,
17451 htab
->srelplt2
->contents
);
17453 else if (htab
->root
.target_os
== is_nacl
)
17454 arm_nacl_put_plt0 (htab
, output_bfd
, splt
,
17455 got_address
+ 8 - (plt_address
+ 16));
17456 else if (using_thumb_only (htab
))
17458 got_displacement
= got_address
- (plt_address
+ 12);
17460 plt0_entry
= elf32_thumb2_plt0_entry
;
17461 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17462 splt
->contents
+ 0);
17463 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17464 splt
->contents
+ 4);
17465 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17466 splt
->contents
+ 8);
17468 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 12);
17472 got_displacement
= got_address
- (plt_address
+ 16);
17474 plt0_entry
= elf32_arm_plt0_entry
;
17475 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17476 splt
->contents
+ 0);
17477 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17478 splt
->contents
+ 4);
17479 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17480 splt
->contents
+ 8);
17481 put_arm_insn (htab
, output_bfd
, plt0_entry
[3],
17482 splt
->contents
+ 12);
17484 #ifdef FOUR_WORD_PLT
17485 /* The displacement value goes in the otherwise-unused
17486 last word of the second entry. */
17487 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 28);
17489 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 16);
17494 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17495 really seem like the right value. */
17496 if (splt
->output_section
->owner
== output_bfd
)
17497 elf_section_data (splt
->output_section
)->this_hdr
.sh_entsize
= 4;
17499 if (htab
->root
.tlsdesc_plt
)
17501 bfd_vma got_address
17502 = sgot
->output_section
->vma
+ sgot
->output_offset
;
17503 bfd_vma gotplt_address
= (htab
->root
.sgot
->output_section
->vma
17504 + htab
->root
.sgot
->output_offset
);
17505 bfd_vma plt_address
17506 = splt
->output_section
->vma
+ splt
->output_offset
;
17508 arm_put_trampoline (htab
, output_bfd
,
17509 splt
->contents
+ htab
->root
.tlsdesc_plt
,
17510 dl_tlsdesc_lazy_trampoline
, 6);
17512 bfd_put_32 (output_bfd
,
17513 gotplt_address
+ htab
->root
.tlsdesc_got
17514 - (plt_address
+ htab
->root
.tlsdesc_plt
)
17515 - dl_tlsdesc_lazy_trampoline
[6],
17516 splt
->contents
+ htab
->root
.tlsdesc_plt
+ 24);
17517 bfd_put_32 (output_bfd
,
17518 got_address
- (plt_address
+ htab
->root
.tlsdesc_plt
)
17519 - dl_tlsdesc_lazy_trampoline
[7],
17520 splt
->contents
+ htab
->root
.tlsdesc_plt
+ 24 + 4);
17523 if (htab
->tls_trampoline
)
17525 arm_put_trampoline (htab
, output_bfd
,
17526 splt
->contents
+ htab
->tls_trampoline
,
17527 tls_trampoline
, 3);
17528 #ifdef FOUR_WORD_PLT
17529 bfd_put_32 (output_bfd
, 0x00000000,
17530 splt
->contents
+ htab
->tls_trampoline
+ 12);
17534 if (htab
->root
.target_os
== is_vxworks
17535 && !bfd_link_pic (info
)
17536 && htab
->root
.splt
->size
> 0)
17538 /* Correct the .rel(a).plt.unloaded relocations. They will have
17539 incorrect symbol indexes. */
17543 num_plts
= ((htab
->root
.splt
->size
- htab
->plt_header_size
)
17544 / htab
->plt_entry_size
);
17545 p
= htab
->srelplt2
->contents
+ RELOC_SIZE (htab
);
17547 for (; num_plts
; num_plts
--)
17549 Elf_Internal_Rela rel
;
17551 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
17552 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
17553 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
17554 p
+= RELOC_SIZE (htab
);
17556 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
17557 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
17558 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
17559 p
+= RELOC_SIZE (htab
);
17564 if (htab
->root
.target_os
== is_nacl
17565 && htab
->root
.iplt
!= NULL
17566 && htab
->root
.iplt
->size
> 0)
17567 /* NaCl uses a special first entry in .iplt too. */
17568 arm_nacl_put_plt0 (htab
, output_bfd
, htab
->root
.iplt
, 0);
17570 /* Fill in the first three entries in the global offset table. */
17573 if (sgot
->size
> 0)
17576 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
);
17578 bfd_put_32 (output_bfd
,
17579 sdyn
->output_section
->vma
+ sdyn
->output_offset
,
17581 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 4);
17582 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 8);
17585 elf_section_data (sgot
->output_section
)->this_hdr
.sh_entsize
= 4;
17588 /* At the very end of the .rofixup section is a pointer to the GOT. */
17589 if (htab
->fdpic_p
&& htab
->srofixup
!= NULL
)
17591 struct elf_link_hash_entry
*hgot
= htab
->root
.hgot
;
17593 bfd_vma got_value
= hgot
->root
.u
.def
.value
17594 + hgot
->root
.u
.def
.section
->output_section
->vma
17595 + hgot
->root
.u
.def
.section
->output_offset
;
17597 arm_elf_add_rofixup (output_bfd
, htab
->srofixup
, got_value
);
17599 /* Make sure we allocated and generated the same number of fixups. */
17600 BFD_ASSERT (htab
->srofixup
->reloc_count
* 4 == htab
->srofixup
->size
);
17607 elf32_arm_init_file_header (bfd
*abfd
, struct bfd_link_info
*link_info
)
17609 Elf_Internal_Ehdr
* i_ehdrp
; /* ELF file header, internal form. */
17610 struct elf32_arm_link_hash_table
*globals
;
17611 struct elf_segment_map
*m
;
17613 if (!_bfd_elf_init_file_header (abfd
, link_info
))
17616 i_ehdrp
= elf_elfheader (abfd
);
17618 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_UNKNOWN
)
17619 i_ehdrp
->e_ident
[EI_OSABI
] = ELFOSABI_ARM
;
17620 i_ehdrp
->e_ident
[EI_ABIVERSION
] = ARM_ELF_ABI_VERSION
;
17624 globals
= elf32_arm_hash_table (link_info
);
17625 if (globals
!= NULL
&& globals
->byteswap_code
)
17626 i_ehdrp
->e_flags
|= EF_ARM_BE8
;
17628 if (globals
->fdpic_p
)
17629 i_ehdrp
->e_ident
[EI_OSABI
] |= ELFOSABI_ARM_FDPIC
;
17632 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_VER5
17633 && ((i_ehdrp
->e_type
== ET_DYN
) || (i_ehdrp
->e_type
== ET_EXEC
)))
17635 int abi
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_ABI_VFP_args
);
17636 if (abi
== AEABI_VFP_args_vfp
)
17637 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_HARD
;
17639 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_SOFT
;
17642 /* Scan segment to set p_flags attribute if it contains only sections with
17643 SHF_ARM_PURECODE flag. */
17644 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
17650 for (j
= 0; j
< m
->count
; j
++)
17652 if (!(elf_section_flags (m
->sections
[j
]) & SHF_ARM_PURECODE
))
17658 m
->p_flags_valid
= 1;
17664 static enum elf_reloc_type_class
17665 elf32_arm_reloc_type_class (const struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
17666 const asection
*rel_sec ATTRIBUTE_UNUSED
,
17667 const Elf_Internal_Rela
*rela
)
17669 switch ((int) ELF32_R_TYPE (rela
->r_info
))
17671 case R_ARM_RELATIVE
:
17672 return reloc_class_relative
;
17673 case R_ARM_JUMP_SLOT
:
17674 return reloc_class_plt
;
17676 return reloc_class_copy
;
17677 case R_ARM_IRELATIVE
:
17678 return reloc_class_ifunc
;
17680 return reloc_class_normal
;
17685 arm_final_write_processing (bfd
*abfd
)
17687 bfd_arm_update_notes (abfd
, ARM_NOTE_SECTION
);
17691 elf32_arm_final_write_processing (bfd
*abfd
)
17693 arm_final_write_processing (abfd
);
17694 return _bfd_elf_final_write_processing (abfd
);
17697 /* Return TRUE if this is an unwinding table entry. */
17700 is_arm_elf_unwind_section_name (bfd
* abfd ATTRIBUTE_UNUSED
, const char * name
)
17702 return (startswith (name
, ELF_STRING_ARM_unwind
)
17703 || startswith (name
, ELF_STRING_ARM_unwind_once
));
17707 /* Set the type and flags for an ARM section. We do this by
17708 the section name, which is a hack, but ought to work. */
17711 elf32_arm_fake_sections (bfd
* abfd
, Elf_Internal_Shdr
* hdr
, asection
* sec
)
17715 name
= bfd_section_name (sec
);
17717 if (is_arm_elf_unwind_section_name (abfd
, name
))
17719 hdr
->sh_type
= SHT_ARM_EXIDX
;
17720 hdr
->sh_flags
|= SHF_LINK_ORDER
;
17723 if (sec
->flags
& SEC_ELF_PURECODE
)
17724 hdr
->sh_flags
|= SHF_ARM_PURECODE
;
17729 /* Handle an ARM specific section when reading an object file. This is
17730 called when bfd_section_from_shdr finds a section with an unknown
17734 elf32_arm_section_from_shdr (bfd
*abfd
,
17735 Elf_Internal_Shdr
* hdr
,
17739 /* There ought to be a place to keep ELF backend specific flags, but
17740 at the moment there isn't one. We just keep track of the
17741 sections by their name, instead. Fortunately, the ABI gives
17742 names for all the ARM specific sections, so we will probably get
17744 switch (hdr
->sh_type
)
17746 case SHT_ARM_EXIDX
:
17747 case SHT_ARM_PREEMPTMAP
:
17748 case SHT_ARM_ATTRIBUTES
:
17755 if (! _bfd_elf_make_section_from_shdr (abfd
, hdr
, name
, shindex
))
17761 static _arm_elf_section_data
*
17762 get_arm_elf_section_data (asection
* sec
)
17764 if (sec
&& sec
->owner
&& is_arm_elf (sec
->owner
))
17765 return elf32_arm_section_data (sec
);
17773 struct bfd_link_info
*info
;
17776 int (*func
) (void *, const char *, Elf_Internal_Sym
*,
17777 asection
*, struct elf_link_hash_entry
*);
17778 } output_arch_syminfo
;
17780 enum map_symbol_type
17788 /* Output a single mapping symbol. */
17791 elf32_arm_output_map_sym (output_arch_syminfo
*osi
,
17792 enum map_symbol_type type
,
17795 static const char *names
[3] = {"$a", "$t", "$d"};
17796 Elf_Internal_Sym sym
;
17798 sym
.st_value
= osi
->sec
->output_section
->vma
17799 + osi
->sec
->output_offset
17803 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
17804 sym
.st_shndx
= osi
->sec_shndx
;
17805 sym
.st_target_internal
= 0;
17806 elf32_arm_section_map_add (osi
->sec
, names
[type
][1], offset
);
17807 return osi
->func (osi
->flaginfo
, names
[type
], &sym
, osi
->sec
, NULL
) == 1;
17810 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17811 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17814 elf32_arm_output_plt_map_1 (output_arch_syminfo
*osi
,
17815 bool is_iplt_entry_p
,
17816 union gotplt_union
*root_plt
,
17817 struct arm_plt_info
*arm_plt
)
17819 struct elf32_arm_link_hash_table
*htab
;
17820 bfd_vma addr
, plt_header_size
;
17822 if (root_plt
->offset
== (bfd_vma
) -1)
17825 htab
= elf32_arm_hash_table (osi
->info
);
17829 if (is_iplt_entry_p
)
17831 osi
->sec
= htab
->root
.iplt
;
17832 plt_header_size
= 0;
17836 osi
->sec
= htab
->root
.splt
;
17837 plt_header_size
= htab
->plt_header_size
;
17839 osi
->sec_shndx
= (_bfd_elf_section_from_bfd_section
17840 (osi
->info
->output_bfd
, osi
->sec
->output_section
));
17842 addr
= root_plt
->offset
& -2;
17843 if (htab
->root
.target_os
== is_vxworks
)
17845 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17847 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 8))
17849 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
+ 12))
17851 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 20))
17854 else if (htab
->root
.target_os
== is_nacl
)
17856 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17859 else if (htab
->fdpic_p
)
17861 enum map_symbol_type type
= using_thumb_only (htab
)
17865 if (elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
))
17866 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
17868 if (!elf32_arm_output_map_sym (osi
, type
, addr
))
17870 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 16))
17872 if (htab
->plt_entry_size
== 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry
))
17873 if (!elf32_arm_output_map_sym (osi
, type
, addr
+ 24))
17876 else if (using_thumb_only (htab
))
17878 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
))
17885 thumb_stub_p
= elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
);
17888 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
17891 #ifdef FOUR_WORD_PLT
17892 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17894 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 12))
17897 /* A three-word PLT with no Thumb thunk contains only Arm code,
17898 so only need to output a mapping symbol for the first PLT entry and
17899 entries with thumb thunks. */
17900 if (thumb_stub_p
|| addr
== plt_header_size
)
17902 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17911 /* Output mapping symbols for PLT entries associated with H. */
17914 elf32_arm_output_plt_map (struct elf_link_hash_entry
*h
, void *inf
)
17916 output_arch_syminfo
*osi
= (output_arch_syminfo
*) inf
;
17917 struct elf32_arm_link_hash_entry
*eh
;
17919 if (h
->root
.type
== bfd_link_hash_indirect
)
17922 if (h
->root
.type
== bfd_link_hash_warning
)
17923 /* When warning symbols are created, they **replace** the "real"
17924 entry in the hash table, thus we never get to see the real
17925 symbol in a hash traversal. So look at it now. */
17926 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
17928 eh
= (struct elf32_arm_link_hash_entry
*) h
;
17929 return elf32_arm_output_plt_map_1 (osi
, SYMBOL_CALLS_LOCAL (osi
->info
, h
),
17930 &h
->plt
, &eh
->plt
);
17933 /* Bind a veneered symbol to its veneer identified by its hash entry
17934 STUB_ENTRY. The veneered location thus loose its symbol. */
17937 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry
*stub_entry
)
17939 struct elf32_arm_link_hash_entry
*hash
= stub_entry
->h
;
17942 hash
->root
.root
.u
.def
.section
= stub_entry
->stub_sec
;
17943 hash
->root
.root
.u
.def
.value
= stub_entry
->stub_offset
;
17944 hash
->root
.size
= stub_entry
->stub_size
;
17947 /* Output a single local symbol for a generated stub. */
17950 elf32_arm_output_stub_sym (output_arch_syminfo
*osi
, const char *name
,
17951 bfd_vma offset
, bfd_vma size
)
17953 Elf_Internal_Sym sym
;
17955 sym
.st_value
= osi
->sec
->output_section
->vma
17956 + osi
->sec
->output_offset
17958 sym
.st_size
= size
;
17960 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
17961 sym
.st_shndx
= osi
->sec_shndx
;
17962 sym
.st_target_internal
= 0;
17963 return osi
->func (osi
->flaginfo
, name
, &sym
, osi
->sec
, NULL
) == 1;
17967 arm_map_one_stub (struct bfd_hash_entry
* gen_entry
,
17970 struct elf32_arm_stub_hash_entry
*stub_entry
;
17971 asection
*stub_sec
;
17974 output_arch_syminfo
*osi
;
17975 const insn_sequence
*template_sequence
;
17976 enum stub_insn_type prev_type
;
17979 enum map_symbol_type sym_type
;
17981 /* Massage our args to the form they really have. */
17982 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
17983 osi
= (output_arch_syminfo
*) in_arg
;
17985 stub_sec
= stub_entry
->stub_sec
;
17987 /* Ensure this stub is attached to the current section being
17989 if (stub_sec
!= osi
->sec
)
17992 addr
= (bfd_vma
) stub_entry
->stub_offset
;
17993 template_sequence
= stub_entry
->stub_template
;
17995 if (arm_stub_sym_claimed (stub_entry
->stub_type
))
17996 arm_stub_claim_sym (stub_entry
);
17999 stub_name
= stub_entry
->output_name
;
18000 switch (template_sequence
[0].type
)
18003 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
,
18004 stub_entry
->stub_size
))
18009 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
| 1,
18010 stub_entry
->stub_size
))
18019 prev_type
= DATA_TYPE
;
18021 for (i
= 0; i
< stub_entry
->stub_template_size
; i
++)
18023 switch (template_sequence
[i
].type
)
18026 sym_type
= ARM_MAP_ARM
;
18031 sym_type
= ARM_MAP_THUMB
;
18035 sym_type
= ARM_MAP_DATA
;
18043 if (template_sequence
[i
].type
!= prev_type
)
18045 prev_type
= template_sequence
[i
].type
;
18046 if (!elf32_arm_output_map_sym (osi
, sym_type
, addr
+ size
))
18050 switch (template_sequence
[i
].type
)
18074 /* Output mapping symbols for linker generated sections,
18075 and for those data-only sections that do not have a
18079 elf32_arm_output_arch_local_syms (bfd
*output_bfd
,
18080 struct bfd_link_info
*info
,
18082 int (*func
) (void *, const char *,
18083 Elf_Internal_Sym
*,
18085 struct elf_link_hash_entry
*))
18087 output_arch_syminfo osi
;
18088 struct elf32_arm_link_hash_table
*htab
;
18090 bfd_size_type size
;
18093 htab
= elf32_arm_hash_table (info
);
18097 check_use_blx (htab
);
18099 osi
.flaginfo
= flaginfo
;
18103 /* Add a $d mapping symbol to data-only sections that
18104 don't have any mapping symbol. This may result in (harmless) redundant
18105 mapping symbols. */
18106 for (input_bfd
= info
->input_bfds
;
18108 input_bfd
= input_bfd
->link
.next
)
18110 if ((input_bfd
->flags
& (BFD_LINKER_CREATED
| HAS_SYMS
)) == HAS_SYMS
)
18111 for (osi
.sec
= input_bfd
->sections
;
18113 osi
.sec
= osi
.sec
->next
)
18115 if (osi
.sec
->output_section
!= NULL
18116 && ((osi
.sec
->output_section
->flags
& (SEC_ALLOC
| SEC_CODE
))
18118 && (osi
.sec
->flags
& (SEC_HAS_CONTENTS
| SEC_LINKER_CREATED
))
18119 == SEC_HAS_CONTENTS
18120 && get_arm_elf_section_data (osi
.sec
) != NULL
18121 && get_arm_elf_section_data (osi
.sec
)->mapcount
== 0
18122 && osi
.sec
->size
> 0
18123 && (osi
.sec
->flags
& SEC_EXCLUDE
) == 0)
18125 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18126 (output_bfd
, osi
.sec
->output_section
);
18127 if (osi
.sec_shndx
!= (int)SHN_BAD
)
18128 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 0);
18133 /* ARM->Thumb glue. */
18134 if (htab
->arm_glue_size
> 0)
18136 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18137 ARM2THUMB_GLUE_SECTION_NAME
);
18139 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18140 (output_bfd
, osi
.sec
->output_section
);
18141 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
18142 || htab
->pic_veneer
)
18143 size
= ARM2THUMB_PIC_GLUE_SIZE
;
18144 else if (htab
->use_blx
)
18145 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
18147 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
18149 for (offset
= 0; offset
< htab
->arm_glue_size
; offset
+= size
)
18151 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
);
18152 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, offset
+ size
- 4);
18156 /* Thumb->ARM glue. */
18157 if (htab
->thumb_glue_size
> 0)
18159 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18160 THUMB2ARM_GLUE_SECTION_NAME
);
18162 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18163 (output_bfd
, osi
.sec
->output_section
);
18164 size
= THUMB2ARM_GLUE_SIZE
;
18166 for (offset
= 0; offset
< htab
->thumb_glue_size
; offset
+= size
)
18168 elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, offset
);
18169 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
+ 4);
18173 /* ARMv4 BX veneers. */
18174 if (htab
->bx_glue_size
> 0)
18176 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18177 ARM_BX_GLUE_SECTION_NAME
);
18179 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18180 (output_bfd
, osi
.sec
->output_section
);
18182 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0);
18185 /* Long calls stubs. */
18186 if (htab
->stub_bfd
&& htab
->stub_bfd
->sections
)
18188 asection
* stub_sec
;
18190 for (stub_sec
= htab
->stub_bfd
->sections
;
18192 stub_sec
= stub_sec
->next
)
18194 /* Ignore non-stub sections. */
18195 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
18198 osi
.sec
= stub_sec
;
18200 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18201 (output_bfd
, osi
.sec
->output_section
);
18203 bfd_hash_traverse (&htab
->stub_hash_table
, arm_map_one_stub
, &osi
);
18207 /* Finally, output mapping symbols for the PLT. */
18208 if (htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
18210 osi
.sec
= htab
->root
.splt
;
18211 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
18212 (output_bfd
, osi
.sec
->output_section
));
18214 /* Output mapping symbols for the plt header. */
18215 if (htab
->root
.target_os
== is_vxworks
)
18217 /* VxWorks shared libraries have no PLT header. */
18218 if (!bfd_link_pic (info
))
18220 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18222 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
18226 else if (htab
->root
.target_os
== is_nacl
)
18228 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18231 else if (using_thumb_only (htab
) && !htab
->fdpic_p
)
18233 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 0))
18235 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
18237 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 16))
18240 else if (!htab
->fdpic_p
)
18242 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18244 #ifndef FOUR_WORD_PLT
18245 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 16))
18250 if (htab
->root
.target_os
== is_nacl
18252 && htab
->root
.iplt
->size
> 0)
18254 /* NaCl uses a special first entry in .iplt too. */
18255 osi
.sec
= htab
->root
.iplt
;
18256 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
18257 (output_bfd
, osi
.sec
->output_section
));
18258 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18261 if ((htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
18262 || (htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0))
18264 elf_link_hash_traverse (&htab
->root
, elf32_arm_output_plt_map
, &osi
);
18265 for (input_bfd
= info
->input_bfds
;
18267 input_bfd
= input_bfd
->link
.next
)
18269 struct arm_local_iplt_info
**local_iplt
;
18270 unsigned int i
, num_syms
;
18272 local_iplt
= elf32_arm_local_iplt (input_bfd
);
18273 if (local_iplt
!= NULL
)
18275 num_syms
= elf_symtab_hdr (input_bfd
).sh_info
;
18276 if (num_syms
> elf32_arm_num_entries (input_bfd
))
18278 _bfd_error_handler (_("\
18279 %pB: Number of symbols in input file has increased from %lu to %u\n"),
18281 (unsigned long) elf32_arm_num_entries (input_bfd
),
18285 for (i
= 0; i
< num_syms
; i
++)
18286 if (local_iplt
[i
] != NULL
18287 && !elf32_arm_output_plt_map_1 (&osi
, true,
18288 &local_iplt
[i
]->root
,
18289 &local_iplt
[i
]->arm
))
18294 if (htab
->root
.tlsdesc_plt
!= 0)
18296 /* Mapping symbols for the lazy tls trampoline. */
18297 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
,
18298 htab
->root
.tlsdesc_plt
))
18301 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
18302 htab
->root
.tlsdesc_plt
+ 24))
18305 if (htab
->tls_trampoline
!= 0)
18307 /* Mapping symbols for the tls trampoline. */
18308 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->tls_trampoline
))
18310 #ifdef FOUR_WORD_PLT
18311 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
18312 htab
->tls_trampoline
+ 12))
18320 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18321 the import library. All SYMCOUNT symbols of ABFD can be examined
18322 from their pointers in SYMS. Pointers of symbols to keep should be
18323 stored continuously at the beginning of that array.
18325 Returns the number of symbols to keep. */
18327 static unsigned int
18328 elf32_arm_filter_cmse_symbols (bfd
*abfd ATTRIBUTE_UNUSED
,
18329 struct bfd_link_info
*info
,
18330 asymbol
**syms
, long symcount
)
18334 long src_count
, dst_count
= 0;
18335 struct elf32_arm_link_hash_table
*htab
;
18337 htab
= elf32_arm_hash_table (info
);
18338 if (!htab
->stub_bfd
|| !htab
->stub_bfd
->sections
)
18342 cmse_name
= (char *) bfd_malloc (maxnamelen
);
18343 BFD_ASSERT (cmse_name
);
18345 for (src_count
= 0; src_count
< symcount
; src_count
++)
18347 struct elf32_arm_link_hash_entry
*cmse_hash
;
18353 sym
= syms
[src_count
];
18354 flags
= sym
->flags
;
18355 name
= (char *) bfd_asymbol_name (sym
);
18357 if ((flags
& BSF_FUNCTION
) != BSF_FUNCTION
)
18359 if (!(flags
& (BSF_GLOBAL
| BSF_WEAK
)))
18362 namelen
= strlen (name
) + sizeof (CMSE_PREFIX
) + 1;
18363 if (namelen
> maxnamelen
)
18365 cmse_name
= (char *)
18366 bfd_realloc (cmse_name
, namelen
);
18367 maxnamelen
= namelen
;
18369 snprintf (cmse_name
, maxnamelen
, "%s%s", CMSE_PREFIX
, name
);
18370 cmse_hash
= (struct elf32_arm_link_hash_entry
*)
18371 elf_link_hash_lookup (&(htab
)->root
, cmse_name
, false, false, true);
18374 || (cmse_hash
->root
.root
.type
!= bfd_link_hash_defined
18375 && cmse_hash
->root
.root
.type
!= bfd_link_hash_defweak
)
18376 || cmse_hash
->root
.type
!= STT_FUNC
)
18379 syms
[dst_count
++] = sym
;
18383 syms
[dst_count
] = NULL
;
18388 /* Filter symbols of ABFD to include in the import library. All
18389 SYMCOUNT symbols of ABFD can be examined from their pointers in
18390 SYMS. Pointers of symbols to keep should be stored continuously at
18391 the beginning of that array.
18393 Returns the number of symbols to keep. */
18395 static unsigned int
18396 elf32_arm_filter_implib_symbols (bfd
*abfd ATTRIBUTE_UNUSED
,
18397 struct bfd_link_info
*info
,
18398 asymbol
**syms
, long symcount
)
18400 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
18402 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18403 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18404 library to be a relocatable object file. */
18405 BFD_ASSERT (!(bfd_get_file_flags (info
->out_implib_bfd
) & EXEC_P
));
18406 if (globals
->cmse_implib
)
18407 return elf32_arm_filter_cmse_symbols (abfd
, info
, syms
, symcount
);
18409 return _bfd_elf_filter_global_symbols (abfd
, info
, syms
, symcount
);
18412 /* Allocate target specific section data. */
18415 elf32_arm_new_section_hook (bfd
*abfd
, asection
*sec
)
18417 if (!sec
->used_by_bfd
)
18419 _arm_elf_section_data
*sdata
;
18420 size_t amt
= sizeof (*sdata
);
18422 sdata
= (_arm_elf_section_data
*) bfd_zalloc (abfd
, amt
);
18425 sec
->used_by_bfd
= sdata
;
18428 return _bfd_elf_new_section_hook (abfd
, sec
);
18432 /* Used to order a list of mapping symbols by address. */
18435 elf32_arm_compare_mapping (const void * a
, const void * b
)
18437 const elf32_arm_section_map
*amap
= (const elf32_arm_section_map
*) a
;
18438 const elf32_arm_section_map
*bmap
= (const elf32_arm_section_map
*) b
;
18440 if (amap
->vma
> bmap
->vma
)
18442 else if (amap
->vma
< bmap
->vma
)
18444 else if (amap
->type
> bmap
->type
)
18445 /* Ensure results do not depend on the host qsort for objects with
18446 multiple mapping symbols at the same address by sorting on type
18449 else if (amap
->type
< bmap
->type
)
18455 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18457 static unsigned long
18458 offset_prel31 (unsigned long addr
, bfd_vma offset
)
18460 return (addr
& ~0x7ffffffful
) | ((addr
+ offset
) & 0x7ffffffful
);
18463 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18467 copy_exidx_entry (bfd
*output_bfd
, bfd_byte
*to
, bfd_byte
*from
, bfd_vma offset
)
18469 unsigned long first_word
= bfd_get_32 (output_bfd
, from
);
18470 unsigned long second_word
= bfd_get_32 (output_bfd
, from
+ 4);
18472 /* High bit of first word is supposed to be zero. */
18473 if ((first_word
& 0x80000000ul
) == 0)
18474 first_word
= offset_prel31 (first_word
, offset
);
18476 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18477 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18478 if ((second_word
!= 0x1) && ((second_word
& 0x80000000ul
) == 0))
18479 second_word
= offset_prel31 (second_word
, offset
);
18481 bfd_put_32 (output_bfd
, first_word
, to
);
18482 bfd_put_32 (output_bfd
, second_word
, to
+ 4);
18485 /* Data for make_branch_to_a8_stub(). */
18487 struct a8_branch_to_stub_data
18489 asection
*writing_section
;
18490 bfd_byte
*contents
;
18494 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18495 places for a particular section. */
18498 make_branch_to_a8_stub (struct bfd_hash_entry
*gen_entry
,
18501 struct elf32_arm_stub_hash_entry
*stub_entry
;
18502 struct a8_branch_to_stub_data
*data
;
18503 bfd_byte
*contents
;
18504 unsigned long branch_insn
;
18505 bfd_vma veneered_insn_loc
, veneer_entry_loc
;
18506 bfd_signed_vma branch_offset
;
18510 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
18511 data
= (struct a8_branch_to_stub_data
*) in_arg
;
18513 if (stub_entry
->target_section
!= data
->writing_section
18514 || stub_entry
->stub_type
< arm_stub_a8_veneer_lwm
)
18517 contents
= data
->contents
;
18519 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18520 generated when both source and target are in the same section. */
18521 veneered_insn_loc
= stub_entry
->target_section
->output_section
->vma
18522 + stub_entry
->target_section
->output_offset
18523 + stub_entry
->source_value
;
18525 veneer_entry_loc
= stub_entry
->stub_sec
->output_section
->vma
18526 + stub_entry
->stub_sec
->output_offset
18527 + stub_entry
->stub_offset
;
18529 if (stub_entry
->stub_type
== arm_stub_a8_veneer_blx
)
18530 veneered_insn_loc
&= ~3u;
18532 branch_offset
= veneer_entry_loc
- veneered_insn_loc
- 4;
18534 abfd
= stub_entry
->target_section
->owner
;
18535 loc
= stub_entry
->source_value
;
18537 /* We attempt to avoid this condition by setting stubs_always_after_branch
18538 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18539 This check is just to be on the safe side... */
18540 if ((veneered_insn_loc
& ~0xfff) == (veneer_entry_loc
& ~0xfff))
18542 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18543 "allocated in unsafe location"), abfd
);
18547 switch (stub_entry
->stub_type
)
18549 case arm_stub_a8_veneer_b
:
18550 case arm_stub_a8_veneer_b_cond
:
18551 branch_insn
= 0xf0009000;
18554 case arm_stub_a8_veneer_blx
:
18555 branch_insn
= 0xf000e800;
18558 case arm_stub_a8_veneer_bl
:
18560 unsigned int i1
, j1
, i2
, j2
, s
;
18562 branch_insn
= 0xf000d000;
18565 if (branch_offset
< -16777216 || branch_offset
> 16777214)
18567 /* There's not much we can do apart from complain if this
18569 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18570 "of range (input file too large)"), abfd
);
18574 /* i1 = not(j1 eor s), so:
18576 j1 = (not i1) eor s. */
18578 branch_insn
|= (branch_offset
>> 1) & 0x7ff;
18579 branch_insn
|= ((branch_offset
>> 12) & 0x3ff) << 16;
18580 i2
= (branch_offset
>> 22) & 1;
18581 i1
= (branch_offset
>> 23) & 1;
18582 s
= (branch_offset
>> 24) & 1;
18585 branch_insn
|= j2
<< 11;
18586 branch_insn
|= j1
<< 13;
18587 branch_insn
|= s
<< 26;
18596 bfd_put_16 (abfd
, (branch_insn
>> 16) & 0xffff, &contents
[loc
]);
18597 bfd_put_16 (abfd
, branch_insn
& 0xffff, &contents
[loc
+ 2]);
18602 /* Beginning of stm32l4xx work-around. */
18604 /* Functions encoding instructions necessary for the emission of the
18605 fix-stm32l4xx-629360.
18606 Encoding is extracted from the
18607 ARM (C) Architecture Reference Manual
18608 ARMv7-A and ARMv7-R edition
18609 ARM DDI 0406C.b (ID072512). */
18611 static inline bfd_vma
18612 create_instruction_branch_absolute (int branch_offset
)
18614 /* A8.8.18 B (A8-334)
18615 B target_address (Encoding T4). */
18616 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18617 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18618 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18620 int s
= ((branch_offset
& 0x1000000) >> 24);
18621 int j1
= s
^ !((branch_offset
& 0x800000) >> 23);
18622 int j2
= s
^ !((branch_offset
& 0x400000) >> 22);
18624 if (branch_offset
< -(1 << 24) || branch_offset
>= (1 << 24))
18625 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18627 bfd_vma patched_inst
= 0xf0009000
18629 | (((unsigned long) (branch_offset
) >> 12) & 0x3ff) << 16 /* imm10. */
18630 | j1
<< 13 /* J1. */
18631 | j2
<< 11 /* J2. */
18632 | (((unsigned long) (branch_offset
) >> 1) & 0x7ff); /* imm11. */
18634 return patched_inst
;
18637 static inline bfd_vma
18638 create_instruction_ldmia (int base_reg
, int wback
, int reg_mask
)
18640 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18641 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18642 bfd_vma patched_inst
= 0xe8900000
18643 | (/*W=*/wback
<< 21)
18645 | (reg_mask
& 0x0000ffff);
18647 return patched_inst
;
18650 static inline bfd_vma
18651 create_instruction_ldmdb (int base_reg
, int wback
, int reg_mask
)
18653 /* A8.8.60 LDMDB/LDMEA (A8-402)
18654 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18655 bfd_vma patched_inst
= 0xe9100000
18656 | (/*W=*/wback
<< 21)
18658 | (reg_mask
& 0x0000ffff);
18660 return patched_inst
;
18663 static inline bfd_vma
18664 create_instruction_mov (int target_reg
, int source_reg
)
18666 /* A8.8.103 MOV (register) (A8-486)
18667 MOV Rd, Rm (Encoding T1). */
18668 bfd_vma patched_inst
= 0x4600
18669 | (target_reg
& 0x7)
18670 | ((target_reg
& 0x8) >> 3) << 7
18671 | (source_reg
<< 3);
18673 return patched_inst
;
18676 static inline bfd_vma
18677 create_instruction_sub (int target_reg
, int source_reg
, int value
)
18679 /* A8.8.221 SUB (immediate) (A8-708)
18680 SUB Rd, Rn, #value (Encoding T3). */
18681 bfd_vma patched_inst
= 0xf1a00000
18682 | (target_reg
<< 8)
18683 | (source_reg
<< 16)
18685 | ((value
& 0x800) >> 11) << 26
18686 | ((value
& 0x700) >> 8) << 12
18689 return patched_inst
;
18692 static inline bfd_vma
18693 create_instruction_vldmia (int base_reg
, int is_dp
, int wback
, int num_words
,
18696 /* A8.8.332 VLDM (A8-922)
18697 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18698 bfd_vma patched_inst
= (is_dp
? 0xec900b00 : 0xec900a00)
18699 | (/*W=*/wback
<< 21)
18701 | (num_words
& 0x000000ff)
18702 | (((unsigned)first_reg
>> 1) & 0x0000000f) << 12
18703 | (first_reg
& 0x00000001) << 22;
18705 return patched_inst
;
18708 static inline bfd_vma
18709 create_instruction_vldmdb (int base_reg
, int is_dp
, int num_words
,
18712 /* A8.8.332 VLDM (A8-922)
18713 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18714 bfd_vma patched_inst
= (is_dp
? 0xed300b00 : 0xed300a00)
18716 | (num_words
& 0x000000ff)
18717 | (((unsigned)first_reg
>>1 ) & 0x0000000f) << 12
18718 | (first_reg
& 0x00000001) << 22;
18720 return patched_inst
;
18723 static inline bfd_vma
18724 create_instruction_udf_w (int value
)
18726 /* A8.8.247 UDF (A8-758)
18727 Undefined (Encoding T2). */
18728 bfd_vma patched_inst
= 0xf7f0a000
18729 | (value
& 0x00000fff)
18730 | (value
& 0x000f0000) << 16;
18732 return patched_inst
;
18735 static inline bfd_vma
18736 create_instruction_udf (int value
)
18738 /* A8.8.247 UDF (A8-758)
18739 Undefined (Encoding T1). */
18740 bfd_vma patched_inst
= 0xde00
18743 return patched_inst
;
18746 /* Functions writing an instruction in memory, returning the next
18747 memory position to write to. */
18749 static inline bfd_byte
*
18750 push_thumb2_insn32 (struct elf32_arm_link_hash_table
* htab
,
18751 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
18753 put_thumb2_insn (htab
, output_bfd
, insn
, pt
);
18757 static inline bfd_byte
*
18758 push_thumb2_insn16 (struct elf32_arm_link_hash_table
* htab
,
18759 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
18761 put_thumb_insn (htab
, output_bfd
, insn
, pt
);
18765 /* Function filling up a region in memory with T1 and T2 UDFs taking
18766 care of alignment. */
18769 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table
* htab
,
18771 const bfd_byte
* const base_stub_contents
,
18772 bfd_byte
* const from_stub_contents
,
18773 const bfd_byte
* const end_stub_contents
)
18775 bfd_byte
*current_stub_contents
= from_stub_contents
;
18777 /* Fill the remaining of the stub with deterministic contents : UDF
18779 Check if realignment is needed on modulo 4 frontier using T1, to
18781 if ((current_stub_contents
< end_stub_contents
)
18782 && !((current_stub_contents
- base_stub_contents
) % 2)
18783 && ((current_stub_contents
- base_stub_contents
) % 4))
18784 current_stub_contents
=
18785 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
18786 create_instruction_udf (0));
18788 for (; current_stub_contents
< end_stub_contents
;)
18789 current_stub_contents
=
18790 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18791 create_instruction_udf_w (0));
18793 return current_stub_contents
;
18796 /* Functions writing the stream of instructions equivalent to the
18797 derived sequence for ldmia, ldmdb, vldm respectively. */
18800 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table
* htab
,
18802 const insn32 initial_insn
,
18803 const bfd_byte
*const initial_insn_addr
,
18804 bfd_byte
*const base_stub_contents
)
18806 int wback
= (initial_insn
& 0x00200000) >> 21;
18807 int ri
, rn
= (initial_insn
& 0x000F0000) >> 16;
18808 int insn_all_registers
= initial_insn
& 0x0000ffff;
18809 int insn_low_registers
, insn_high_registers
;
18810 int usable_register_mask
;
18811 int nb_registers
= elf32_arm_popcount (insn_all_registers
);
18812 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
18813 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
18814 bfd_byte
*current_stub_contents
= base_stub_contents
;
18816 BFD_ASSERT (is_thumb2_ldmia (initial_insn
));
18818 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18819 smaller than 8 registers load sequences that do not cause the
18821 if (nb_registers
<= 8)
18823 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18824 current_stub_contents
=
18825 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18828 /* B initial_insn_addr+4. */
18830 current_stub_contents
=
18831 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18832 create_instruction_branch_absolute
18833 (initial_insn_addr
- current_stub_contents
));
18835 /* Fill the remaining of the stub with deterministic contents. */
18836 current_stub_contents
=
18837 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
18838 base_stub_contents
, current_stub_contents
,
18839 base_stub_contents
+
18840 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
18845 /* - reg_list[13] == 0. */
18846 BFD_ASSERT ((insn_all_registers
& (1 << 13))==0);
18848 /* - reg_list[14] & reg_list[15] != 1. */
18849 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
18851 /* - if (wback==1) reg_list[rn] == 0. */
18852 BFD_ASSERT (!wback
|| !restore_rn
);
18854 /* - nb_registers > 8. */
18855 BFD_ASSERT (elf32_arm_popcount (insn_all_registers
) > 8);
18857 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18859 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18860 - One with the 7 lowest registers (register mask 0x007F)
18861 This LDM will finally contain between 2 and 7 registers
18862 - One with the 7 highest registers (register mask 0xDF80)
18863 This ldm will finally contain between 2 and 7 registers. */
18864 insn_low_registers
= insn_all_registers
& 0x007F;
18865 insn_high_registers
= insn_all_registers
& 0xDF80;
18867 /* A spare register may be needed during this veneer to temporarily
18868 handle the base register. This register will be restored with the
18869 last LDM operation.
18870 The usable register may be any general purpose register (that
18871 excludes PC, SP, LR : register mask is 0x1FFF). */
18872 usable_register_mask
= 0x1FFF;
18874 /* Generate the stub function. */
18877 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18878 current_stub_contents
=
18879 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18880 create_instruction_ldmia
18881 (rn
, /*wback=*/1, insn_low_registers
));
18883 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
18884 current_stub_contents
=
18885 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18886 create_instruction_ldmia
18887 (rn
, /*wback=*/1, insn_high_registers
));
18890 /* B initial_insn_addr+4. */
18891 current_stub_contents
=
18892 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18893 create_instruction_branch_absolute
18894 (initial_insn_addr
- current_stub_contents
));
18897 else /* if (!wback). */
18901 /* If Rn is not part of the high-register-list, move it there. */
18902 if (!(insn_high_registers
& (1 << rn
)))
18904 /* Choose a Ri in the high-register-list that will be restored. */
18905 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
18908 current_stub_contents
=
18909 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
18910 create_instruction_mov (ri
, rn
));
18913 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
18914 current_stub_contents
=
18915 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18916 create_instruction_ldmia
18917 (ri
, /*wback=*/1, insn_low_registers
));
18919 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
18920 current_stub_contents
=
18921 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18922 create_instruction_ldmia
18923 (ri
, /*wback=*/0, insn_high_registers
));
18927 /* B initial_insn_addr+4. */
18928 current_stub_contents
=
18929 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18930 create_instruction_branch_absolute
18931 (initial_insn_addr
- current_stub_contents
));
18935 /* Fill the remaining of the stub with deterministic contents. */
18936 current_stub_contents
=
18937 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
18938 base_stub_contents
, current_stub_contents
,
18939 base_stub_contents
+
18940 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
18944 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table
* htab
,
18946 const insn32 initial_insn
,
18947 const bfd_byte
*const initial_insn_addr
,
18948 bfd_byte
*const base_stub_contents
)
18950 int wback
= (initial_insn
& 0x00200000) >> 21;
18951 int ri
, rn
= (initial_insn
& 0x000f0000) >> 16;
18952 int insn_all_registers
= initial_insn
& 0x0000ffff;
18953 int insn_low_registers
, insn_high_registers
;
18954 int usable_register_mask
;
18955 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
18956 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
18957 int nb_registers
= elf32_arm_popcount (insn_all_registers
);
18958 bfd_byte
*current_stub_contents
= base_stub_contents
;
18960 BFD_ASSERT (is_thumb2_ldmdb (initial_insn
));
18962 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18963 smaller than 8 registers load sequences that do not cause the
18965 if (nb_registers
<= 8)
18967 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18968 current_stub_contents
=
18969 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18972 /* B initial_insn_addr+4. */
18973 current_stub_contents
=
18974 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18975 create_instruction_branch_absolute
18976 (initial_insn_addr
- current_stub_contents
));
18978 /* Fill the remaining of the stub with deterministic contents. */
18979 current_stub_contents
=
18980 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
18981 base_stub_contents
, current_stub_contents
,
18982 base_stub_contents
+
18983 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
18988 /* - reg_list[13] == 0. */
18989 BFD_ASSERT ((insn_all_registers
& (1 << 13)) == 0);
18991 /* - reg_list[14] & reg_list[15] != 1. */
18992 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
18994 /* - if (wback==1) reg_list[rn] == 0. */
18995 BFD_ASSERT (!wback
|| !restore_rn
);
18997 /* - nb_registers > 8. */
18998 BFD_ASSERT (elf32_arm_popcount (insn_all_registers
) > 8);
19000 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19002 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
19003 - One with the 7 lowest registers (register mask 0x007F)
19004 This LDM will finally contain between 2 and 7 registers
19005 - One with the 7 highest registers (register mask 0xDF80)
19006 This ldm will finally contain between 2 and 7 registers. */
19007 insn_low_registers
= insn_all_registers
& 0x007F;
19008 insn_high_registers
= insn_all_registers
& 0xDF80;
19010 /* A spare register may be needed during this veneer to temporarily
19011 handle the base register. This register will be restored with
19012 the last LDM operation.
19013 The usable register may be any general purpose register (that excludes
19014 PC, SP, LR : register mask is 0x1FFF). */
19015 usable_register_mask
= 0x1FFF;
19017 /* Generate the stub function. */
19018 if (!wback
&& !restore_pc
&& !restore_rn
)
19020 /* Choose a Ri in the low-register-list that will be restored. */
19021 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
19024 current_stub_contents
=
19025 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19026 create_instruction_mov (ri
, rn
));
19028 /* LDMDB Ri!, {R-high-register-list}. */
19029 current_stub_contents
=
19030 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19031 create_instruction_ldmdb
19032 (ri
, /*wback=*/1, insn_high_registers
));
19034 /* LDMDB Ri, {R-low-register-list}. */
19035 current_stub_contents
=
19036 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19037 create_instruction_ldmdb
19038 (ri
, /*wback=*/0, insn_low_registers
));
19040 /* B initial_insn_addr+4. */
19041 current_stub_contents
=
19042 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19043 create_instruction_branch_absolute
19044 (initial_insn_addr
- current_stub_contents
));
19046 else if (wback
&& !restore_pc
&& !restore_rn
)
19048 /* LDMDB Rn!, {R-high-register-list}. */
19049 current_stub_contents
=
19050 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19051 create_instruction_ldmdb
19052 (rn
, /*wback=*/1, insn_high_registers
));
19054 /* LDMDB Rn!, {R-low-register-list}. */
19055 current_stub_contents
=
19056 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19057 create_instruction_ldmdb
19058 (rn
, /*wback=*/1, insn_low_registers
));
19060 /* B initial_insn_addr+4. */
19061 current_stub_contents
=
19062 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19063 create_instruction_branch_absolute
19064 (initial_insn_addr
- current_stub_contents
));
19066 else if (!wback
&& restore_pc
&& !restore_rn
)
19068 /* Choose a Ri in the high-register-list that will be restored. */
19069 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19071 /* SUB Ri, Rn, #(4*nb_registers). */
19072 current_stub_contents
=
19073 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19074 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
19076 /* LDMIA Ri!, {R-low-register-list}. */
19077 current_stub_contents
=
19078 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19079 create_instruction_ldmia
19080 (ri
, /*wback=*/1, insn_low_registers
));
19082 /* LDMIA Ri, {R-high-register-list}. */
19083 current_stub_contents
=
19084 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19085 create_instruction_ldmia
19086 (ri
, /*wback=*/0, insn_high_registers
));
19088 else if (wback
&& restore_pc
&& !restore_rn
)
19090 /* Choose a Ri in the high-register-list that will be restored. */
19091 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19093 /* SUB Rn, Rn, #(4*nb_registers) */
19094 current_stub_contents
=
19095 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19096 create_instruction_sub (rn
, rn
, (4 * nb_registers
)));
19099 current_stub_contents
=
19100 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19101 create_instruction_mov (ri
, rn
));
19103 /* LDMIA Ri!, {R-low-register-list}. */
19104 current_stub_contents
=
19105 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19106 create_instruction_ldmia
19107 (ri
, /*wback=*/1, insn_low_registers
));
19109 /* LDMIA Ri, {R-high-register-list}. */
19110 current_stub_contents
=
19111 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19112 create_instruction_ldmia
19113 (ri
, /*wback=*/0, insn_high_registers
));
19115 else if (!wback
&& !restore_pc
&& restore_rn
)
19118 if (!(insn_low_registers
& (1 << rn
)))
19120 /* Choose a Ri in the low-register-list that will be restored. */
19121 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
19124 current_stub_contents
=
19125 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19126 create_instruction_mov (ri
, rn
));
19129 /* LDMDB Ri!, {R-high-register-list}. */
19130 current_stub_contents
=
19131 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19132 create_instruction_ldmdb
19133 (ri
, /*wback=*/1, insn_high_registers
));
19135 /* LDMDB Ri, {R-low-register-list}. */
19136 current_stub_contents
=
19137 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19138 create_instruction_ldmdb
19139 (ri
, /*wback=*/0, insn_low_registers
));
19141 /* B initial_insn_addr+4. */
19142 current_stub_contents
=
19143 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19144 create_instruction_branch_absolute
19145 (initial_insn_addr
- current_stub_contents
));
19147 else if (!wback
&& restore_pc
&& restore_rn
)
19150 if (!(insn_high_registers
& (1 << rn
)))
19152 /* Choose a Ri in the high-register-list that will be restored. */
19153 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19156 /* SUB Ri, Rn, #(4*nb_registers). */
19157 current_stub_contents
=
19158 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19159 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
19161 /* LDMIA Ri!, {R-low-register-list}. */
19162 current_stub_contents
=
19163 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19164 create_instruction_ldmia
19165 (ri
, /*wback=*/1, insn_low_registers
));
19167 /* LDMIA Ri, {R-high-register-list}. */
19168 current_stub_contents
=
19169 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19170 create_instruction_ldmia
19171 (ri
, /*wback=*/0, insn_high_registers
));
19173 else if (wback
&& restore_rn
)
19175 /* The assembler should not have accepted to encode this. */
19176 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19177 "undefined behavior.\n");
19180 /* Fill the remaining of the stub with deterministic contents. */
19181 current_stub_contents
=
19182 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19183 base_stub_contents
, current_stub_contents
,
19184 base_stub_contents
+
19185 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
19190 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table
* htab
,
19192 const insn32 initial_insn
,
19193 const bfd_byte
*const initial_insn_addr
,
19194 bfd_byte
*const base_stub_contents
)
19196 int num_words
= initial_insn
& 0xff;
19197 bfd_byte
*current_stub_contents
= base_stub_contents
;
19199 BFD_ASSERT (is_thumb2_vldm (initial_insn
));
19201 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19202 smaller than 8 words load sequences that do not cause the
19204 if (num_words
<= 8)
19206 /* Untouched instruction. */
19207 current_stub_contents
=
19208 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19211 /* B initial_insn_addr+4. */
19212 current_stub_contents
=
19213 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19214 create_instruction_branch_absolute
19215 (initial_insn_addr
- current_stub_contents
));
19219 bool is_dp
= /* DP encoding. */
19220 (initial_insn
& 0xfe100f00) == 0xec100b00;
19221 bool is_ia_nobang
= /* (IA without !). */
19222 (((initial_insn
<< 7) >> 28) & 0xd) == 0x4;
19223 bool is_ia_bang
= /* (IA with !) - includes VPOP. */
19224 (((initial_insn
<< 7) >> 28) & 0xd) == 0x5;
19225 bool is_db_bang
= /* (DB with !). */
19226 (((initial_insn
<< 7) >> 28) & 0xd) == 0x9;
19227 int base_reg
= ((unsigned int) initial_insn
<< 12) >> 28;
19228 /* d = UInt (Vd:D);. */
19229 int first_reg
= ((((unsigned int) initial_insn
<< 16) >> 28) << 1)
19230 | (((unsigned int)initial_insn
<< 9) >> 31);
19232 /* Compute the number of 8-words chunks needed to split. */
19233 int chunks
= (num_words
% 8) ? (num_words
/ 8 + 1) : (num_words
/ 8);
19236 /* The test coverage has been done assuming the following
19237 hypothesis that exactly one of the previous is_ predicates is
19239 BFD_ASSERT ( (is_ia_nobang
^ is_ia_bang
^ is_db_bang
)
19240 && !(is_ia_nobang
& is_ia_bang
& is_db_bang
));
19242 /* We treat the cutting of the words in one pass for all
19243 cases, then we emit the adjustments:
19246 -> vldm rx!, {8_words_or_less} for each needed 8_word
19247 -> sub rx, rx, #size (list)
19250 -> vldm rx!, {8_words_or_less} for each needed 8_word
19251 This also handles vpop instruction (when rx is sp)
19254 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19255 for (chunk
= 0; chunk
< chunks
; ++chunk
)
19257 bfd_vma new_insn
= 0;
19259 if (is_ia_nobang
|| is_ia_bang
)
19261 new_insn
= create_instruction_vldmia
19265 chunks
- (chunk
+ 1) ?
19266 8 : num_words
- chunk
* 8,
19267 first_reg
+ chunk
* 8);
19269 else if (is_db_bang
)
19271 new_insn
= create_instruction_vldmdb
19274 chunks
- (chunk
+ 1) ?
19275 8 : num_words
- chunk
* 8,
19276 first_reg
+ chunk
* 8);
19280 current_stub_contents
=
19281 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19285 /* Only this case requires the base register compensation
19289 current_stub_contents
=
19290 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19291 create_instruction_sub
19292 (base_reg
, base_reg
, 4*num_words
));
19295 /* B initial_insn_addr+4. */
19296 current_stub_contents
=
19297 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19298 create_instruction_branch_absolute
19299 (initial_insn_addr
- current_stub_contents
));
19302 /* Fill the remaining of the stub with deterministic contents. */
19303 current_stub_contents
=
19304 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19305 base_stub_contents
, current_stub_contents
,
19306 base_stub_contents
+
19307 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
19311 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table
* htab
,
19313 const insn32 wrong_insn
,
19314 const bfd_byte
*const wrong_insn_addr
,
19315 bfd_byte
*const stub_contents
)
19317 if (is_thumb2_ldmia (wrong_insn
))
19318 stm32l4xx_create_replacing_stub_ldmia (htab
, output_bfd
,
19319 wrong_insn
, wrong_insn_addr
,
19321 else if (is_thumb2_ldmdb (wrong_insn
))
19322 stm32l4xx_create_replacing_stub_ldmdb (htab
, output_bfd
,
19323 wrong_insn
, wrong_insn_addr
,
19325 else if (is_thumb2_vldm (wrong_insn
))
19326 stm32l4xx_create_replacing_stub_vldm (htab
, output_bfd
,
19327 wrong_insn
, wrong_insn_addr
,
19331 /* End of stm32l4xx work-around. */
19334 /* Do code byteswapping. Return FALSE afterwards so that the section is
19335 written out as normal. */
19338 elf32_arm_write_section (bfd
*output_bfd
,
19339 struct bfd_link_info
*link_info
,
19341 bfd_byte
*contents
)
19343 unsigned int mapcount
, errcount
;
19344 _arm_elf_section_data
*arm_data
;
19345 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
19346 elf32_arm_section_map
*map
;
19347 elf32_vfp11_erratum_list
*errnode
;
19348 elf32_stm32l4xx_erratum_list
*stm32l4xx_errnode
;
19351 bfd_vma offset
= sec
->output_section
->vma
+ sec
->output_offset
;
19355 if (globals
== NULL
)
19358 /* If this section has not been allocated an _arm_elf_section_data
19359 structure then we cannot record anything. */
19360 arm_data
= get_arm_elf_section_data (sec
);
19361 if (arm_data
== NULL
)
19364 mapcount
= arm_data
->mapcount
;
19365 map
= arm_data
->map
;
19366 errcount
= arm_data
->erratumcount
;
19370 unsigned int endianflip
= bfd_big_endian (output_bfd
) ? 3 : 0;
19372 for (errnode
= arm_data
->erratumlist
; errnode
!= 0;
19373 errnode
= errnode
->next
)
19375 bfd_vma target
= errnode
->vma
- offset
;
19377 switch (errnode
->type
)
19379 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
19381 bfd_vma branch_to_veneer
;
19382 /* Original condition code of instruction, plus bit mask for
19383 ARM B instruction. */
19384 unsigned int insn
= (errnode
->u
.b
.vfp_insn
& 0xf0000000)
19387 /* The instruction is before the label. */
19390 /* Above offset included in -4 below. */
19391 branch_to_veneer
= errnode
->u
.b
.veneer
->vma
19392 - errnode
->vma
- 4;
19394 if ((signed) branch_to_veneer
< -(1 << 25)
19395 || (signed) branch_to_veneer
>= (1 << 25))
19396 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19397 "range"), output_bfd
);
19399 insn
|= (branch_to_veneer
>> 2) & 0xffffff;
19400 contents
[endianflip
^ target
] = insn
& 0xff;
19401 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
19402 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
19403 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
19407 case VFP11_ERRATUM_ARM_VENEER
:
19409 bfd_vma branch_from_veneer
;
19412 /* Take size of veneer into account. */
19413 branch_from_veneer
= errnode
->u
.v
.branch
->vma
19414 - errnode
->vma
- 12;
19416 if ((signed) branch_from_veneer
< -(1 << 25)
19417 || (signed) branch_from_veneer
>= (1 << 25))
19418 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19419 "range"), output_bfd
);
19421 /* Original instruction. */
19422 insn
= errnode
->u
.v
.branch
->u
.b
.vfp_insn
;
19423 contents
[endianflip
^ target
] = insn
& 0xff;
19424 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
19425 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
19426 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
19428 /* Branch back to insn after original insn. */
19429 insn
= 0xea000000 | ((branch_from_veneer
>> 2) & 0xffffff);
19430 contents
[endianflip
^ (target
+ 4)] = insn
& 0xff;
19431 contents
[endianflip
^ (target
+ 5)] = (insn
>> 8) & 0xff;
19432 contents
[endianflip
^ (target
+ 6)] = (insn
>> 16) & 0xff;
19433 contents
[endianflip
^ (target
+ 7)] = (insn
>> 24) & 0xff;
19443 if (arm_data
->stm32l4xx_erratumcount
!= 0)
19445 for (stm32l4xx_errnode
= arm_data
->stm32l4xx_erratumlist
;
19446 stm32l4xx_errnode
!= 0;
19447 stm32l4xx_errnode
= stm32l4xx_errnode
->next
)
19449 bfd_vma target
= stm32l4xx_errnode
->vma
- offset
;
19451 switch (stm32l4xx_errnode
->type
)
19453 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
19456 bfd_vma branch_to_veneer
=
19457 stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
;
19459 if ((signed) branch_to_veneer
< -(1 << 24)
19460 || (signed) branch_to_veneer
>= (1 << 24))
19462 bfd_vma out_of_range
=
19463 ((signed) branch_to_veneer
< -(1 << 24)) ?
19464 - branch_to_veneer
- (1 << 24) :
19465 ((signed) branch_to_veneer
>= (1 << 24)) ?
19466 branch_to_veneer
- (1 << 24) : 0;
19469 (_("%pB(%#" PRIx64
"): error: "
19470 "cannot create STM32L4XX veneer; "
19471 "jump out of range by %" PRId64
" bytes; "
19472 "cannot encode branch instruction"),
19474 (uint64_t) (stm32l4xx_errnode
->vma
- 4),
19475 (int64_t) out_of_range
);
19479 insn
= create_instruction_branch_absolute
19480 (stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
);
19482 /* The instruction is before the label. */
19485 put_thumb2_insn (globals
, output_bfd
,
19486 (bfd_vma
) insn
, contents
+ target
);
19490 case STM32L4XX_ERRATUM_VENEER
:
19493 bfd_byte
* veneer_r
;
19496 veneer
= contents
+ target
;
19498 + stm32l4xx_errnode
->u
.b
.veneer
->vma
19499 - stm32l4xx_errnode
->vma
- 4;
19501 if ((signed) (veneer_r
- veneer
-
19502 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
>
19503 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
?
19504 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
:
19505 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
) < -(1 << 24)
19506 || (signed) (veneer_r
- veneer
) >= (1 << 24))
19508 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19509 "veneer"), output_bfd
);
19513 /* Original instruction. */
19514 insn
= stm32l4xx_errnode
->u
.v
.branch
->u
.b
.insn
;
19516 stm32l4xx_create_replacing_stub
19517 (globals
, output_bfd
, insn
, (void*)veneer_r
, (void*)veneer
);
19527 if (arm_data
->elf
.this_hdr
.sh_type
== SHT_ARM_EXIDX
)
19529 arm_unwind_table_edit
*edit_node
19530 = arm_data
->u
.exidx
.unwind_edit_list
;
19531 /* Now, sec->size is the size of the section we will write. The original
19532 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19533 markers) was sec->rawsize. (This isn't the case if we perform no
19534 edits, then rawsize will be zero and we should use size). */
19535 bfd_byte
*edited_contents
= (bfd_byte
*) bfd_malloc (sec
->size
);
19536 unsigned int input_size
= sec
->rawsize
? sec
->rawsize
: sec
->size
;
19537 unsigned int in_index
, out_index
;
19538 bfd_vma add_to_offsets
= 0;
19540 if (edited_contents
== NULL
)
19542 for (in_index
= 0, out_index
= 0; in_index
* 8 < input_size
|| edit_node
;)
19546 unsigned int edit_index
= edit_node
->index
;
19548 if (in_index
< edit_index
&& in_index
* 8 < input_size
)
19550 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
19551 contents
+ in_index
* 8, add_to_offsets
);
19555 else if (in_index
== edit_index
19556 || (in_index
* 8 >= input_size
19557 && edit_index
== UINT_MAX
))
19559 switch (edit_node
->type
)
19561 case DELETE_EXIDX_ENTRY
:
19563 add_to_offsets
+= 8;
19566 case INSERT_EXIDX_CANTUNWIND_AT_END
:
19568 asection
*text_sec
= edit_node
->linked_section
;
19569 bfd_vma text_offset
= text_sec
->output_section
->vma
19570 + text_sec
->output_offset
19572 bfd_vma exidx_offset
= offset
+ out_index
* 8;
19573 unsigned long prel31_offset
;
19575 /* Note: this is meant to be equivalent to an
19576 R_ARM_PREL31 relocation. These synthetic
19577 EXIDX_CANTUNWIND markers are not relocated by the
19578 usual BFD method. */
19579 prel31_offset
= (text_offset
- exidx_offset
)
19581 if (bfd_link_relocatable (link_info
))
19583 /* Here relocation for new EXIDX_CANTUNWIND is
19584 created, so there is no need to
19585 adjust offset by hand. */
19586 prel31_offset
= text_sec
->output_offset
19590 /* First address we can't unwind. */
19591 bfd_put_32 (output_bfd
, prel31_offset
,
19592 &edited_contents
[out_index
* 8]);
19594 /* Code for EXIDX_CANTUNWIND. */
19595 bfd_put_32 (output_bfd
, 0x1,
19596 &edited_contents
[out_index
* 8 + 4]);
19599 add_to_offsets
-= 8;
19604 edit_node
= edit_node
->next
;
19609 /* No more edits, copy remaining entries verbatim. */
19610 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
19611 contents
+ in_index
* 8, add_to_offsets
);
19617 if (!(sec
->flags
& SEC_EXCLUDE
) && !(sec
->flags
& SEC_NEVER_LOAD
))
19618 bfd_set_section_contents (output_bfd
, sec
->output_section
,
19620 (file_ptr
) sec
->output_offset
, sec
->size
);
19625 /* Fix code to point to Cortex-A8 erratum stubs. */
19626 if (globals
->fix_cortex_a8
)
19628 struct a8_branch_to_stub_data data
;
19630 data
.writing_section
= sec
;
19631 data
.contents
= contents
;
19633 bfd_hash_traverse (& globals
->stub_hash_table
, make_branch_to_a8_stub
,
19640 if (globals
->byteswap_code
)
19642 qsort (map
, mapcount
, sizeof (* map
), elf32_arm_compare_mapping
);
19645 for (i
= 0; i
< mapcount
; i
++)
19647 if (i
== mapcount
- 1)
19650 end
= map
[i
+ 1].vma
;
19652 switch (map
[i
].type
)
19655 /* Byte swap code words. */
19656 while (ptr
+ 3 < end
)
19658 tmp
= contents
[ptr
];
19659 contents
[ptr
] = contents
[ptr
+ 3];
19660 contents
[ptr
+ 3] = tmp
;
19661 tmp
= contents
[ptr
+ 1];
19662 contents
[ptr
+ 1] = contents
[ptr
+ 2];
19663 contents
[ptr
+ 2] = tmp
;
19669 /* Byte swap code halfwords. */
19670 while (ptr
+ 1 < end
)
19672 tmp
= contents
[ptr
];
19673 contents
[ptr
] = contents
[ptr
+ 1];
19674 contents
[ptr
+ 1] = tmp
;
19680 /* Leave data alone. */
19688 arm_data
->mapcount
= -1;
19689 arm_data
->mapsize
= 0;
19690 arm_data
->map
= NULL
;
19695 /* Mangle thumb function symbols as we read them in. */
19698 elf32_arm_swap_symbol_in (bfd
* abfd
,
19701 Elf_Internal_Sym
*dst
)
19703 if (!bfd_elf32_swap_symbol_in (abfd
, psrc
, pshn
, dst
))
19705 dst
->st_target_internal
= 0;
19707 /* New EABI objects mark thumb function symbols by setting the low bit of
19709 if (ELF_ST_TYPE (dst
->st_info
) == STT_FUNC
19710 || ELF_ST_TYPE (dst
->st_info
) == STT_GNU_IFUNC
)
19712 if (dst
->st_value
& 1)
19714 dst
->st_value
&= ~(bfd_vma
) 1;
19715 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
,
19716 ST_BRANCH_TO_THUMB
);
19719 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_ARM
);
19721 else if (ELF_ST_TYPE (dst
->st_info
) == STT_ARM_TFUNC
)
19723 dst
->st_info
= ELF_ST_INFO (ELF_ST_BIND (dst
->st_info
), STT_FUNC
);
19724 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_THUMB
);
19726 else if (ELF_ST_TYPE (dst
->st_info
) == STT_SECTION
)
19727 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_LONG
);
19729 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_UNKNOWN
);
19735 /* Mangle thumb function symbols as we write them out. */
19738 elf32_arm_swap_symbol_out (bfd
*abfd
,
19739 const Elf_Internal_Sym
*src
,
19743 Elf_Internal_Sym newsym
;
19745 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19746 of the address set, as per the new EABI. We do this unconditionally
19747 because objcopy does not set the elf header flags until after
19748 it writes out the symbol table. */
19749 if (ARM_GET_SYM_BRANCH_TYPE (src
->st_target_internal
) == ST_BRANCH_TO_THUMB
)
19752 if (ELF_ST_TYPE (src
->st_info
) != STT_GNU_IFUNC
)
19753 newsym
.st_info
= ELF_ST_INFO (ELF_ST_BIND (src
->st_info
), STT_FUNC
);
19754 if (newsym
.st_shndx
!= SHN_UNDEF
)
19756 /* Do this only for defined symbols. At link type, the static
19757 linker will simulate the work of dynamic linker of resolving
19758 symbols and will carry over the thumbness of found symbols to
19759 the output symbol table. It's not clear how it happens, but
19760 the thumbness of undefined symbols can well be different at
19761 runtime, and writing '1' for them will be confusing for users
19762 and possibly for dynamic linker itself.
19764 newsym
.st_value
|= 1;
19769 bfd_elf32_swap_symbol_out (abfd
, src
, cdst
, shndx
);
19772 /* Add the PT_ARM_EXIDX program header. */
19775 elf32_arm_modify_segment_map (bfd
*abfd
,
19776 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
19778 struct elf_segment_map
*m
;
19781 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
19782 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
19784 /* If there is already a PT_ARM_EXIDX header, then we do not
19785 want to add another one. This situation arises when running
19786 "strip"; the input binary already has the header. */
19787 m
= elf_seg_map (abfd
);
19788 while (m
&& m
->p_type
!= PT_ARM_EXIDX
)
19792 m
= (struct elf_segment_map
*)
19793 bfd_zalloc (abfd
, sizeof (struct elf_segment_map
));
19796 m
->p_type
= PT_ARM_EXIDX
;
19798 m
->sections
[0] = sec
;
19800 m
->next
= elf_seg_map (abfd
);
19801 elf_seg_map (abfd
) = m
;
19808 /* We may add a PT_ARM_EXIDX program header. */
19811 elf32_arm_additional_program_headers (bfd
*abfd
,
19812 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
19816 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
19817 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
19823 /* Hook called by the linker routine which adds symbols from an object
19827 elf32_arm_add_symbol_hook (bfd
*abfd
, struct bfd_link_info
*info
,
19828 Elf_Internal_Sym
*sym
, const char **namep
,
19829 flagword
*flagsp
, asection
**secp
, bfd_vma
*valp
)
19831 if (elf32_arm_hash_table (info
) == NULL
)
19834 if (elf32_arm_hash_table (info
)->root
.target_os
== is_vxworks
19835 && !elf_vxworks_add_symbol_hook (abfd
, info
, sym
, namep
,
19836 flagsp
, secp
, valp
))
19842 /* We use this to override swap_symbol_in and swap_symbol_out. */
19843 const struct elf_size_info elf32_arm_size_info
=
19845 sizeof (Elf32_External_Ehdr
),
19846 sizeof (Elf32_External_Phdr
),
19847 sizeof (Elf32_External_Shdr
),
19848 sizeof (Elf32_External_Rel
),
19849 sizeof (Elf32_External_Rela
),
19850 sizeof (Elf32_External_Sym
),
19851 sizeof (Elf32_External_Dyn
),
19852 sizeof (Elf_External_Note
),
19856 ELFCLASS32
, EV_CURRENT
,
19857 bfd_elf32_write_out_phdrs
,
19858 bfd_elf32_write_shdrs_and_ehdr
,
19859 bfd_elf32_checksum_contents
,
19860 bfd_elf32_write_relocs
,
19861 elf32_arm_swap_symbol_in
,
19862 elf32_arm_swap_symbol_out
,
19863 bfd_elf32_slurp_reloc_table
,
19864 bfd_elf32_slurp_symbol_table
,
19865 bfd_elf32_swap_dyn_in
,
19866 bfd_elf32_swap_dyn_out
,
19867 bfd_elf32_swap_reloc_in
,
19868 bfd_elf32_swap_reloc_out
,
19869 bfd_elf32_swap_reloca_in
,
19870 bfd_elf32_swap_reloca_out
19874 read_code32 (const bfd
*abfd
, const bfd_byte
*addr
)
19876 /* V7 BE8 code is always little endian. */
19877 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
19878 return bfd_getl32 (addr
);
19880 return bfd_get_32 (abfd
, addr
);
19884 read_code16 (const bfd
*abfd
, const bfd_byte
*addr
)
19886 /* V7 BE8 code is always little endian. */
19887 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
19888 return bfd_getl16 (addr
);
19890 return bfd_get_16 (abfd
, addr
);
19893 /* Return size of plt0 entry starting at ADDR
19894 or (bfd_vma) -1 if size can not be determined. */
19897 elf32_arm_plt0_size (const bfd
*abfd
, const bfd_byte
*addr
)
19899 bfd_vma first_word
;
19902 first_word
= read_code32 (abfd
, addr
);
19904 if (first_word
== elf32_arm_plt0_entry
[0])
19905 plt0_size
= 4 * ARRAY_SIZE (elf32_arm_plt0_entry
);
19906 else if (first_word
== elf32_thumb2_plt0_entry
[0])
19907 plt0_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
19909 /* We don't yet handle this PLT format. */
19910 return (bfd_vma
) -1;
19915 /* Return size of plt entry starting at offset OFFSET
19916 of plt section located at address START
19917 or (bfd_vma) -1 if size can not be determined. */
19920 elf32_arm_plt_size (const bfd
*abfd
, const bfd_byte
*start
, bfd_vma offset
)
19922 bfd_vma first_insn
;
19923 bfd_vma plt_size
= 0;
19924 const bfd_byte
*addr
= start
+ offset
;
19926 /* PLT entry size if fixed on Thumb-only platforms. */
19927 if (read_code32 (abfd
, start
) == elf32_thumb2_plt0_entry
[0])
19928 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
19930 /* Respect Thumb stub if necessary. */
19931 if (read_code16 (abfd
, addr
) == elf32_arm_plt_thumb_stub
[0])
19933 plt_size
+= 2 * ARRAY_SIZE (elf32_arm_plt_thumb_stub
);
19936 /* Strip immediate from first add. */
19937 first_insn
= read_code32 (abfd
, addr
+ plt_size
) & 0xffffff00;
19939 #ifdef FOUR_WORD_PLT
19940 if (first_insn
== elf32_arm_plt_entry
[0])
19941 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry
);
19943 if (first_insn
== elf32_arm_plt_entry_long
[0])
19944 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_long
);
19945 else if (first_insn
== elf32_arm_plt_entry_short
[0])
19946 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_short
);
19949 /* We don't yet handle this PLT format. */
19950 return (bfd_vma
) -1;
19955 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
19958 elf32_arm_get_synthetic_symtab (bfd
*abfd
,
19959 long symcount ATTRIBUTE_UNUSED
,
19960 asymbol
**syms ATTRIBUTE_UNUSED
,
19970 Elf_Internal_Shdr
*hdr
;
19978 if ((abfd
->flags
& (DYNAMIC
| EXEC_P
)) == 0)
19981 if (dynsymcount
<= 0)
19984 relplt
= bfd_get_section_by_name (abfd
, ".rel.plt");
19985 if (relplt
== NULL
)
19988 hdr
= &elf_section_data (relplt
)->this_hdr
;
19989 if (hdr
->sh_link
!= elf_dynsymtab (abfd
)
19990 || (hdr
->sh_type
!= SHT_REL
&& hdr
->sh_type
!= SHT_RELA
))
19993 plt
= bfd_get_section_by_name (abfd
, ".plt");
19997 if (!elf32_arm_size_info
.slurp_reloc_table (abfd
, relplt
, dynsyms
, true))
20000 data
= plt
->contents
;
20003 if (!bfd_get_full_section_contents (abfd
, (asection
*) plt
, &data
) || data
== NULL
)
20005 bfd_cache_section_contents ((asection
*) plt
, data
);
20008 count
= relplt
->size
/ hdr
->sh_entsize
;
20009 size
= count
* sizeof (asymbol
);
20010 p
= relplt
->relocation
;
20011 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
20013 size
+= strlen ((*p
->sym_ptr_ptr
)->name
) + sizeof ("@plt");
20014 if (p
->addend
!= 0)
20015 size
+= sizeof ("+0x") - 1 + 8;
20018 s
= *ret
= (asymbol
*) bfd_malloc (size
);
20022 offset
= elf32_arm_plt0_size (abfd
, data
);
20023 if (offset
== (bfd_vma
) -1)
20026 names
= (char *) (s
+ count
);
20027 p
= relplt
->relocation
;
20029 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
20033 bfd_vma plt_size
= elf32_arm_plt_size (abfd
, data
, offset
);
20034 if (plt_size
== (bfd_vma
) -1)
20037 *s
= **p
->sym_ptr_ptr
;
20038 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20039 we are defining a symbol, ensure one of them is set. */
20040 if ((s
->flags
& BSF_LOCAL
) == 0)
20041 s
->flags
|= BSF_GLOBAL
;
20042 s
->flags
|= BSF_SYNTHETIC
;
20047 len
= strlen ((*p
->sym_ptr_ptr
)->name
);
20048 memcpy (names
, (*p
->sym_ptr_ptr
)->name
, len
);
20050 if (p
->addend
!= 0)
20054 memcpy (names
, "+0x", sizeof ("+0x") - 1);
20055 names
+= sizeof ("+0x") - 1;
20056 bfd_sprintf_vma (abfd
, buf
, p
->addend
);
20057 for (a
= buf
; *a
== '0'; ++a
)
20060 memcpy (names
, a
, len
);
20063 memcpy (names
, "@plt", sizeof ("@plt"));
20064 names
+= sizeof ("@plt");
20066 offset
+= plt_size
;
20073 elf32_arm_section_flags (const Elf_Internal_Shdr
*hdr
)
20075 if (hdr
->sh_flags
& SHF_ARM_PURECODE
)
20076 hdr
->bfd_section
->flags
|= SEC_ELF_PURECODE
;
20081 elf32_arm_lookup_section_flags (char *flag_name
)
20083 if (!strcmp (flag_name
, "SHF_ARM_PURECODE"))
20084 return SHF_ARM_PURECODE
;
20086 return SEC_NO_FLAGS
;
20089 static unsigned int
20090 elf32_arm_count_additional_relocs (asection
*sec
)
20092 struct _arm_elf_section_data
*arm_data
;
20093 arm_data
= get_arm_elf_section_data (sec
);
20095 return arm_data
== NULL
? 0 : arm_data
->additional_reloc_count
;
20098 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20099 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20100 FALSE otherwise. ISECTION is the best guess matching section from the
20101 input bfd IBFD, but it might be NULL. */
20104 elf32_arm_copy_special_section_fields (const bfd
*ibfd ATTRIBUTE_UNUSED
,
20105 bfd
*obfd ATTRIBUTE_UNUSED
,
20106 const Elf_Internal_Shdr
*isection ATTRIBUTE_UNUSED
,
20107 Elf_Internal_Shdr
*osection
)
20109 switch (osection
->sh_type
)
20111 case SHT_ARM_EXIDX
:
20113 Elf_Internal_Shdr
**oheaders
= elf_elfsections (obfd
);
20114 Elf_Internal_Shdr
**iheaders
= elf_elfsections (ibfd
);
20117 osection
->sh_flags
= SHF_ALLOC
| SHF_LINK_ORDER
;
20118 osection
->sh_info
= 0;
20120 /* The sh_link field must be set to the text section associated with
20121 this index section. Unfortunately the ARM EHABI does not specify
20122 exactly how to determine this association. Our caller does try
20123 to match up OSECTION with its corresponding input section however
20124 so that is a good first guess. */
20125 if (isection
!= NULL
20126 && osection
->bfd_section
!= NULL
20127 && isection
->bfd_section
!= NULL
20128 && isection
->bfd_section
->output_section
!= NULL
20129 && isection
->bfd_section
->output_section
== osection
->bfd_section
20130 && iheaders
!= NULL
20131 && isection
->sh_link
> 0
20132 && isection
->sh_link
< elf_numsections (ibfd
)
20133 && iheaders
[isection
->sh_link
]->bfd_section
!= NULL
20134 && iheaders
[isection
->sh_link
]->bfd_section
->output_section
!= NULL
20137 for (i
= elf_numsections (obfd
); i
-- > 0;)
20138 if (oheaders
[i
]->bfd_section
20139 == iheaders
[isection
->sh_link
]->bfd_section
->output_section
)
20145 /* Failing that we have to find a matching section ourselves. If
20146 we had the output section name available we could compare that
20147 with input section names. Unfortunately we don't. So instead
20148 we use a simple heuristic and look for the nearest executable
20149 section before this one. */
20150 for (i
= elf_numsections (obfd
); i
-- > 0;)
20151 if (oheaders
[i
] == osection
)
20157 if (oheaders
[i
]->sh_type
== SHT_PROGBITS
20158 && (oheaders
[i
]->sh_flags
& (SHF_ALLOC
| SHF_EXECINSTR
))
20159 == (SHF_ALLOC
| SHF_EXECINSTR
))
20165 osection
->sh_link
= i
;
20166 /* If the text section was part of a group
20167 then the index section should be too. */
20168 if (oheaders
[i
]->sh_flags
& SHF_GROUP
)
20169 osection
->sh_flags
|= SHF_GROUP
;
20175 case SHT_ARM_PREEMPTMAP
:
20176 osection
->sh_flags
= SHF_ALLOC
;
20179 case SHT_ARM_ATTRIBUTES
:
20180 case SHT_ARM_DEBUGOVERLAY
:
20181 case SHT_ARM_OVERLAYSECTION
:
20189 /* Returns TRUE if NAME is an ARM mapping symbol.
20190 Traditionally the symbols $a, $d and $t have been used.
20191 The ARM ELF standard also defines $x (for A64 code). It also allows a
20192 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20193 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20194 not support them here. $t.x indicates the start of ThumbEE instructions. */
20197 is_arm_mapping_symbol (const char * name
)
20199 return name
!= NULL
/* Paranoia. */
20200 && name
[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20201 the mapping symbols could have acquired a prefix.
20202 We do not support this here, since such symbols no
20203 longer conform to the ARM ELF ABI. */
20204 && (name
[1] == 'a' || name
[1] == 'd' || name
[1] == 't' || name
[1] == 'x')
20205 && (name
[2] == 0 || name
[2] == '.');
20206 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20207 any characters that follow the period are legal characters for the body
20208 of a symbol's name. For now we just assume that this is the case. */
20211 /* Make sure that mapping symbols in object files are not removed via the
20212 "strip --strip-unneeded" tool. These symbols are needed in order to
20213 correctly generate interworking veneers, and for byte swapping code
20214 regions. Once an object file has been linked, it is safe to remove the
20215 symbols as they will no longer be needed. */
20218 elf32_arm_backend_symbol_processing (bfd
*abfd
, asymbol
*sym
)
20220 if (((abfd
->flags
& (EXEC_P
| DYNAMIC
)) == 0)
20221 && sym
->section
!= bfd_abs_section_ptr
20222 && is_arm_mapping_symbol (sym
->name
))
20223 sym
->flags
|= BSF_KEEP
;
20226 #undef elf_backend_copy_special_section_fields
20227 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20229 #define ELF_ARCH bfd_arch_arm
20230 #define ELF_TARGET_ID ARM_ELF_DATA
20231 #define ELF_MACHINE_CODE EM_ARM
20232 #ifdef __QNXTARGET__
20233 #define ELF_MAXPAGESIZE 0x1000
20235 #define ELF_MAXPAGESIZE 0x10000
20237 #define ELF_COMMONPAGESIZE 0x1000
20239 #define bfd_elf32_mkobject elf32_arm_mkobject
20241 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20242 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20243 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20244 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20245 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20246 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20247 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20248 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20249 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20250 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20251 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20252 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20254 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20255 #define elf_backend_maybe_function_sym elf32_arm_maybe_function_sym
20256 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20257 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20258 #define elf_backend_check_relocs elf32_arm_check_relocs
20259 #define elf_backend_update_relocs elf32_arm_update_relocs
20260 #define elf_backend_relocate_section elf32_arm_relocate_section
20261 #define elf_backend_write_section elf32_arm_write_section
20262 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20263 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20264 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20265 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20266 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20267 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20268 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20269 #define elf_backend_init_file_header elf32_arm_init_file_header
20270 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20271 #define elf_backend_object_p elf32_arm_object_p
20272 #define elf_backend_fake_sections elf32_arm_fake_sections
20273 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20274 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20275 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20276 #define elf_backend_size_info elf32_arm_size_info
20277 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20278 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20279 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20280 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20281 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20282 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20283 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20284 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20286 #define elf_backend_can_refcount 1
20287 #define elf_backend_can_gc_sections 1
20288 #define elf_backend_plt_readonly 1
20289 #define elf_backend_want_got_plt 1
20290 #define elf_backend_want_plt_sym 0
20291 #define elf_backend_want_dynrelro 1
20292 #define elf_backend_may_use_rel_p 1
20293 #define elf_backend_may_use_rela_p 0
20294 #define elf_backend_default_use_rela_p 0
20295 #define elf_backend_dtrel_excludes_plt 1
20297 #define elf_backend_got_header_size 12
20298 #define elf_backend_extern_protected_data 1
20300 #undef elf_backend_obj_attrs_vendor
20301 #define elf_backend_obj_attrs_vendor "aeabi"
20302 #undef elf_backend_obj_attrs_section
20303 #define elf_backend_obj_attrs_section ".ARM.attributes"
20304 #undef elf_backend_obj_attrs_arg_type
20305 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20306 #undef elf_backend_obj_attrs_section_type
20307 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20308 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20309 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20311 #undef elf_backend_section_flags
20312 #define elf_backend_section_flags elf32_arm_section_flags
20313 #undef elf_backend_lookup_section_flags_hook
20314 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20316 #define elf_backend_linux_prpsinfo32_ugid16 true
20318 #include "elf32-target.h"
20320 /* Native Client targets. */
20322 #undef TARGET_LITTLE_SYM
20323 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20324 #undef TARGET_LITTLE_NAME
20325 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20326 #undef TARGET_BIG_SYM
20327 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20328 #undef TARGET_BIG_NAME
20329 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20331 /* Like elf32_arm_link_hash_table_create -- but overrides
20332 appropriately for NaCl. */
20334 static struct bfd_link_hash_table
*
20335 elf32_arm_nacl_link_hash_table_create (bfd
*abfd
)
20337 struct bfd_link_hash_table
*ret
;
20339 ret
= elf32_arm_link_hash_table_create (abfd
);
20342 struct elf32_arm_link_hash_table
*htab
20343 = (struct elf32_arm_link_hash_table
*) ret
;
20345 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry
);
20346 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry
);
20351 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20352 really need to use elf32_arm_modify_segment_map. But we do it
20353 anyway just to reduce gratuitous differences with the stock ARM backend. */
20356 elf32_arm_nacl_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
20358 return (elf32_arm_modify_segment_map (abfd
, info
)
20359 && nacl_modify_segment_map (abfd
, info
));
20363 elf32_arm_nacl_final_write_processing (bfd
*abfd
)
20365 arm_final_write_processing (abfd
);
20366 return nacl_final_write_processing (abfd
);
20370 elf32_arm_nacl_plt_sym_val (bfd_vma i
, const asection
*plt
,
20371 const arelent
*rel ATTRIBUTE_UNUSED
)
20374 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry
) +
20375 i
* ARRAY_SIZE (elf32_arm_nacl_plt_entry
));
20379 #define elf32_bed elf32_arm_nacl_bed
20380 #undef bfd_elf32_bfd_link_hash_table_create
20381 #define bfd_elf32_bfd_link_hash_table_create \
20382 elf32_arm_nacl_link_hash_table_create
20383 #undef elf_backend_plt_alignment
20384 #define elf_backend_plt_alignment 4
20385 #undef elf_backend_modify_segment_map
20386 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20387 #undef elf_backend_modify_headers
20388 #define elf_backend_modify_headers nacl_modify_headers
20389 #undef elf_backend_final_write_processing
20390 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20391 #undef bfd_elf32_get_synthetic_symtab
20392 #undef elf_backend_plt_sym_val
20393 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20394 #undef elf_backend_copy_special_section_fields
20396 #undef ELF_MINPAGESIZE
20397 #undef ELF_COMMONPAGESIZE
20399 #undef ELF_TARGET_OS
20400 #define ELF_TARGET_OS is_nacl
20402 #include "elf32-target.h"
20404 /* Reset to defaults. */
20405 #undef elf_backend_plt_alignment
20406 #undef elf_backend_modify_segment_map
20407 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20408 #undef elf_backend_modify_headers
20409 #undef elf_backend_final_write_processing
20410 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20411 #undef ELF_MINPAGESIZE
20412 #undef ELF_COMMONPAGESIZE
20413 #define ELF_COMMONPAGESIZE 0x1000
20416 /* FDPIC Targets. */
20418 #undef TARGET_LITTLE_SYM
20419 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20420 #undef TARGET_LITTLE_NAME
20421 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20422 #undef TARGET_BIG_SYM
20423 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20424 #undef TARGET_BIG_NAME
20425 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20426 #undef elf_match_priority
20427 #define elf_match_priority 128
20429 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20431 /* Like elf32_arm_link_hash_table_create -- but overrides
20432 appropriately for FDPIC. */
20434 static struct bfd_link_hash_table
*
20435 elf32_arm_fdpic_link_hash_table_create (bfd
*abfd
)
20437 struct bfd_link_hash_table
*ret
;
20439 ret
= elf32_arm_link_hash_table_create (abfd
);
20442 struct elf32_arm_link_hash_table
*htab
= (struct elf32_arm_link_hash_table
*) ret
;
20449 /* We need dynamic symbols for every section, since segments can
20450 relocate independently. */
20452 elf32_arm_fdpic_omit_section_dynsym (bfd
*output_bfd ATTRIBUTE_UNUSED
,
20453 struct bfd_link_info
*info
20455 asection
*p ATTRIBUTE_UNUSED
)
20457 switch (elf_section_data (p
)->this_hdr
.sh_type
)
20461 /* If sh_type is yet undecided, assume it could be
20462 SHT_PROGBITS/SHT_NOBITS. */
20466 /* There shouldn't be section relative relocations
20467 against any other section. */
20474 #define elf32_bed elf32_arm_fdpic_bed
20476 #undef bfd_elf32_bfd_link_hash_table_create
20477 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20479 #undef elf_backend_omit_section_dynsym
20480 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20482 #undef ELF_TARGET_OS
20484 #include "elf32-target.h"
20486 #undef elf_match_priority
20488 #undef elf_backend_omit_section_dynsym
20490 /* VxWorks Targets. */
20492 #undef TARGET_LITTLE_SYM
20493 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20494 #undef TARGET_LITTLE_NAME
20495 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20496 #undef TARGET_BIG_SYM
20497 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20498 #undef TARGET_BIG_NAME
20499 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20501 /* Like elf32_arm_link_hash_table_create -- but overrides
20502 appropriately for VxWorks. */
20504 static struct bfd_link_hash_table
*
20505 elf32_arm_vxworks_link_hash_table_create (bfd
*abfd
)
20507 struct bfd_link_hash_table
*ret
;
20509 ret
= elf32_arm_link_hash_table_create (abfd
);
20512 struct elf32_arm_link_hash_table
*htab
20513 = (struct elf32_arm_link_hash_table
*) ret
;
20520 elf32_arm_vxworks_final_write_processing (bfd
*abfd
)
20522 arm_final_write_processing (abfd
);
20523 return elf_vxworks_final_write_processing (abfd
);
20527 #define elf32_bed elf32_arm_vxworks_bed
20529 #undef bfd_elf32_bfd_link_hash_table_create
20530 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20531 #undef elf_backend_final_write_processing
20532 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20533 #undef elf_backend_emit_relocs
20534 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20536 #undef elf_backend_may_use_rel_p
20537 #define elf_backend_may_use_rel_p 0
20538 #undef elf_backend_may_use_rela_p
20539 #define elf_backend_may_use_rela_p 1
20540 #undef elf_backend_default_use_rela_p
20541 #define elf_backend_default_use_rela_p 1
20542 #undef elf_backend_want_plt_sym
20543 #define elf_backend_want_plt_sym 1
20544 #undef ELF_MAXPAGESIZE
20545 #define ELF_MAXPAGESIZE 0x1000
20546 #undef ELF_TARGET_OS
20547 #define ELF_TARGET_OS is_vxworks
20549 #include "elf32-target.h"
20552 /* Merge backend specific data from an object file to the output
20553 object file when linking. */
20556 elf32_arm_merge_private_bfd_data (bfd
*ibfd
, struct bfd_link_info
*info
)
20558 bfd
*obfd
= info
->output_bfd
;
20559 flagword out_flags
;
20561 bool flags_compatible
= true;
20564 /* Check if we have the same endianness. */
20565 if (! _bfd_generic_verify_endian_match (ibfd
, info
))
20568 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
20571 if (!elf32_arm_merge_eabi_attributes (ibfd
, info
))
20574 /* The input BFD must have had its flags initialised. */
20575 /* The following seems bogus to me -- The flags are initialized in
20576 the assembler but I don't think an elf_flags_init field is
20577 written into the object. */
20578 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20580 in_flags
= elf_elfheader (ibfd
)->e_flags
;
20581 out_flags
= elf_elfheader (obfd
)->e_flags
;
20583 /* In theory there is no reason why we couldn't handle this. However
20584 in practice it isn't even close to working and there is no real
20585 reason to want it. */
20586 if (EF_ARM_EABI_VERSION (in_flags
) >= EF_ARM_EABI_VER4
20587 && !(ibfd
->flags
& DYNAMIC
)
20588 && (in_flags
& EF_ARM_BE8
))
20590 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20595 if (!elf_flags_init (obfd
))
20597 /* If the input is the default architecture and had the default
20598 flags then do not bother setting the flags for the output
20599 architecture, instead allow future merges to do this. If no
20600 future merges ever set these flags then they will retain their
20601 uninitialised values, which surprise surprise, correspond
20602 to the default values. */
20603 if (bfd_get_arch_info (ibfd
)->the_default
20604 && elf_elfheader (ibfd
)->e_flags
== 0)
20607 elf_flags_init (obfd
) = true;
20608 elf_elfheader (obfd
)->e_flags
= in_flags
;
20610 if (bfd_get_arch (obfd
) == bfd_get_arch (ibfd
)
20611 && bfd_get_arch_info (obfd
)->the_default
)
20612 return bfd_set_arch_mach (obfd
, bfd_get_arch (ibfd
), bfd_get_mach (ibfd
));
20617 /* Determine what should happen if the input ARM architecture
20618 does not match the output ARM architecture. */
20619 if (! bfd_arm_merge_machines (ibfd
, obfd
))
20622 /* Identical flags must be compatible. */
20623 if (in_flags
== out_flags
)
20626 /* Check to see if the input BFD actually contains any sections. If
20627 not, its flags may not have been initialised either, but it
20628 cannot actually cause any incompatiblity. Do not short-circuit
20629 dynamic objects; their section list may be emptied by
20630 elf_link_add_object_symbols.
20632 Also check to see if there are no code sections in the input.
20633 In this case there is no need to check for code specific flags.
20634 XXX - do we need to worry about floating-point format compatability
20635 in data sections ? */
20636 if (!(ibfd
->flags
& DYNAMIC
))
20638 bool null_input_bfd
= true;
20639 bool only_data_sections
= true;
20641 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
20643 /* Ignore synthetic glue sections. */
20644 if (strcmp (sec
->name
, ".glue_7")
20645 && strcmp (sec
->name
, ".glue_7t"))
20647 if ((bfd_section_flags (sec
)
20648 & (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
20649 == (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
20650 only_data_sections
= false;
20652 null_input_bfd
= false;
20657 if (null_input_bfd
|| only_data_sections
)
20661 /* Complain about various flag mismatches. */
20662 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags
),
20663 EF_ARM_EABI_VERSION (out_flags
)))
20666 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20667 ibfd
, (in_flags
& EF_ARM_EABIMASK
) >> 24,
20668 obfd
, (out_flags
& EF_ARM_EABIMASK
) >> 24);
20672 /* Not sure what needs to be checked for EABI versions >= 1. */
20673 /* VxWorks libraries do not use these flags. */
20674 if (get_elf_backend_data (obfd
) != &elf32_arm_vxworks_bed
20675 && get_elf_backend_data (ibfd
) != &elf32_arm_vxworks_bed
20676 && EF_ARM_EABI_VERSION (in_flags
) == EF_ARM_EABI_UNKNOWN
)
20678 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
20681 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20682 ibfd
, in_flags
& EF_ARM_APCS_26
? 26 : 32,
20683 obfd
, out_flags
& EF_ARM_APCS_26
? 26 : 32);
20684 flags_compatible
= false;
20687 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
20689 if (in_flags
& EF_ARM_APCS_FLOAT
)
20691 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20695 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20698 flags_compatible
= false;
20701 if ((in_flags
& EF_ARM_VFP_FLOAT
) != (out_flags
& EF_ARM_VFP_FLOAT
))
20703 if (in_flags
& EF_ARM_VFP_FLOAT
)
20705 (_("error: %pB uses %s instructions, whereas %pB does not"),
20706 ibfd
, "VFP", obfd
);
20709 (_("error: %pB uses %s instructions, whereas %pB does not"),
20710 ibfd
, "FPA", obfd
);
20712 flags_compatible
= false;
20715 if ((in_flags
& EF_ARM_MAVERICK_FLOAT
) != (out_flags
& EF_ARM_MAVERICK_FLOAT
))
20717 if (in_flags
& EF_ARM_MAVERICK_FLOAT
)
20719 (_("error: %pB uses %s instructions, whereas %pB does not"),
20720 ibfd
, "Maverick", obfd
);
20723 (_("error: %pB does not use %s instructions, whereas %pB does"),
20724 ibfd
, "Maverick", obfd
);
20726 flags_compatible
= false;
20729 #ifdef EF_ARM_SOFT_FLOAT
20730 if ((in_flags
& EF_ARM_SOFT_FLOAT
) != (out_flags
& EF_ARM_SOFT_FLOAT
))
20732 /* We can allow interworking between code that is VFP format
20733 layout, and uses either soft float or integer regs for
20734 passing floating point arguments and results. We already
20735 know that the APCS_FLOAT flags match; similarly for VFP
20737 if ((in_flags
& EF_ARM_APCS_FLOAT
) != 0
20738 || (in_flags
& EF_ARM_VFP_FLOAT
) == 0)
20740 if (in_flags
& EF_ARM_SOFT_FLOAT
)
20742 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20746 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20749 flags_compatible
= false;
20754 /* Interworking mismatch is only a warning. */
20755 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
20757 if (in_flags
& EF_ARM_INTERWORK
)
20760 (_("warning: %pB supports interworking, whereas %pB does not"),
20766 (_("warning: %pB does not support interworking, whereas %pB does"),
20772 return flags_compatible
;